yasserrmd commited on
Commit
a9932e2
·
verified ·
1 Parent(s): a3b8991

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -12
app.py CHANGED
@@ -7,30 +7,50 @@ model_name = "HuggingFaceTB/SmolLM-135M-Instruct"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
- def generate_feedback(text):
 
 
 
11
  # Generate a prompt for writing feedback
12
  prompt = f"Provide constructive feedback on the following creative writing piece:\n\n{text}\n\nFeedback:"
13
 
14
  # Tokenize the input
15
  inputs = tokenizer(prompt, return_tensors="pt")
16
 
17
- # Generate the response
18
- with torch.no_grad():
19
- outputs = model.generate(inputs.input_ids, do_sample=True, top_p=0.85, temperature=0.7)
20
-
21
- # Decode the response
22
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
- feedback = response.split("Feedback:")[-1].strip() # Extract feedback portion
24
- return feedback
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  # Set up the Gradio interface with the updated syntax
27
  app = gr.Interface(
28
- fn=generate_feedback,
29
  inputs=gr.Textbox(lines=5, label="Your Writing", placeholder="Paste a short piece of creative writing here..."),
30
  outputs=gr.Textbox(label="Feedback"),
31
  title="WriteBetter",
32
- description="Quick feedback on tone, grammar, and word choice."
33
  )
34
 
 
 
 
35
  # Launch the app
36
- app.launch()
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
+ # Initialize feedback history
11
+ feedback_history = []
12
+
13
+ def generate_unique_feedback(text):
14
  # Generate a prompt for writing feedback
15
  prompt = f"Provide constructive feedback on the following creative writing piece:\n\n{text}\n\nFeedback:"
16
 
17
  # Tokenize the input
18
  inputs = tokenizer(prompt, return_tensors="pt")
19
 
20
+ # Attempt to generate unique feedback
21
+ for _ in range(5): # Try up to 5 times to get unique feedback
22
+ with torch.no_grad():
23
+ outputs = model.generate(inputs.input_ids, max_length=300, do_sample=True, top_p=0.85, temperature=0.7)
24
+
25
+ # Decode the response
26
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
+ feedback = response.split("Feedback:")[-1].strip()
28
+
29
+ # Check if feedback is unique
30
+ if feedback not in feedback_history:
31
+ feedback_history.append(feedback) # Add new feedback to history
32
+ return feedback
33
+
34
+ # If all generated feedback is repeated, return a default message
35
+ return "No new feedback available at this time. Try rephrasing or adding more details to your text."
36
+
37
+ # Reset history function for Gradio button
38
+ def reset_history():
39
+ global feedback_history
40
+ feedback_history = []
41
+ return "Feedback history has been reset."
42
 
43
  # Set up the Gradio interface with the updated syntax
44
  app = gr.Interface(
45
+ fn=generate_unique_feedback,
46
  inputs=gr.Textbox(lines=5, label="Your Writing", placeholder="Paste a short piece of creative writing here..."),
47
  outputs=gr.Textbox(label="Feedback"),
48
  title="WriteBetter",
49
+ description="Quick feedback on tone, grammar, and word choice.",
50
  )
51
 
52
+ # Add a reset button to clear feedback history
53
+ app = app.add_component(gr.Button("Reset Feedback History"), reset_history)
54
+
55
  # Launch the app
56
+ app.launch()