makhdoomnaeem commited on
Commit
763f89c
·
verified ·
1 Parent(s): b551050

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -19
app.py CHANGED
@@ -7,6 +7,25 @@ GROQ_API_KEY = "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" # Rep
7
  os.environ["GROQ_API_KEY"] = GROQ_API_KEY
8
  client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def enforce_word_count_and_humanization(output_text, input_word_count, original_prompt, tone):
11
  """
12
  Ensure output text has a word count equal to or greater than the input word count
@@ -17,21 +36,7 @@ def enforce_word_count_and_humanization(output_text, input_word_count, original_
17
 
18
  # If output meets word count and humanization, return as is
19
  if output_word_count >= input_word_count:
20
- # Perform a second pass for natural flow and humanization
21
- refinement_prompt = (
22
- f"Refine the following text to make it sound more natural, human-like, and engaging in a {tone} tone. "
23
- f"Ensure it reads fluently and avoids AI-detected phrasing: {output_text}"
24
- )
25
- try:
26
- refinement_response = client.chat.completions.create(
27
- messages=[{"role": "user", "content": refinement_prompt}],
28
- model="llama-3.3-70b-versatile", # Replace with the desired model
29
- stream=False,
30
- )
31
- refined_text = refinement_response.choices[0].message.content.strip()
32
- return refined_text
33
- except Exception:
34
- return output_text # Return original output if refinement fails
35
 
36
  # If output is too short, regenerate with expansion
37
  expansion_prompt = (
@@ -41,11 +46,11 @@ def enforce_word_count_and_humanization(output_text, input_word_count, original_
41
  try:
42
  expansion_response = client.chat.completions.create(
43
  messages=[{"role": "user", "content": expansion_prompt}],
44
- model="llama-3.3-70b-versatile", # Replace with the desired model
45
  stream=False,
46
  )
47
  expanded_text = expansion_response.choices[0].message.content.strip()
48
- return expanded_text
49
  except Exception:
50
  return output_text # Return original output in case of error
51
 
@@ -57,7 +62,7 @@ def split_text_into_chunks(text, max_words=500):
57
  return [" ".join(words[i:i + max_words]) for i in range(0, len(words), max_words)]
58
 
59
  # Streamlit App
60
- st.title("Advanced Humanizer & Rephraser App (Groq-powered)")
61
  st.subheader("Powered by Groq and Streamlit")
62
 
63
  # User Input
@@ -99,7 +104,7 @@ if st.button("Generate Output"):
99
  # Call Groq API
100
  chat_completion = client.chat.completions.create(
101
  messages=[{"role": "user", "content": task_prompt}],
102
- model="llama-3.3-70b-versatile", # Replace with the desired model
103
  stream=False,
104
  )
105
 
 
7
  os.environ["GROQ_API_KEY"] = GROQ_API_KEY
8
  client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
9
 
10
+ def refine_humanization(output_text, tone):
11
+ """
12
+ Add a third refinement pass for deeper humanization.
13
+ """
14
+ refinement_prompt = (
15
+ f"Take the following text and refine it to sound completely human-like, smooth, and natural. "
16
+ f"Ensure it flows seamlessly, incorporates subtle nuances like idioms or conversational phrasing, and avoids AI-detected patterns. "
17
+ f"The tone should be {tone}. Refined text: {output_text}"
18
+ )
19
+ try:
20
+ refinement_response = client.chat.completions.create(
21
+ messages=[{"role": "user", "content": refinement_prompt}],
22
+ model="llama-3.3-70b-versatile",
23
+ stream=False,
24
+ )
25
+ return refinement_response.choices[0].message.content.strip()
26
+ except Exception:
27
+ return output_text # Return original output if refinement fails
28
+
29
  def enforce_word_count_and_humanization(output_text, input_word_count, original_prompt, tone):
30
  """
31
  Ensure output text has a word count equal to or greater than the input word count
 
36
 
37
  # If output meets word count and humanization, return as is
38
  if output_word_count >= input_word_count:
39
+ return refine_humanization(output_text, tone)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  # If output is too short, regenerate with expansion
42
  expansion_prompt = (
 
46
  try:
47
  expansion_response = client.chat.completions.create(
48
  messages=[{"role": "user", "content": expansion_prompt}],
49
+ model="llama-3.3-70b-versatile",
50
  stream=False,
51
  )
52
  expanded_text = expansion_response.choices[0].message.content.strip()
53
+ return refine_humanization(expanded_text, tone)
54
  except Exception:
55
  return output_text # Return original output in case of error
56
 
 
62
  return [" ".join(words[i:i + max_words]) for i in range(0, len(words), max_words)]
63
 
64
  # Streamlit App
65
+ st.title("Enhanced Humanizer & Rephraser App (Groq-powered)")
66
  st.subheader("Powered by Groq and Streamlit")
67
 
68
  # User Input
 
104
  # Call Groq API
105
  chat_completion = client.chat.completions.create(
106
  messages=[{"role": "user", "content": task_prompt}],
107
+ model="llama-3.3-70b-versatile",
108
  stream=False,
109
  )
110