Go-Raw commited on
Commit
485970c
Β·
verified Β·
1 Parent(s): 301f51a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -15
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
  import gradio as gr
4
 
5
- # Load model
6
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
7
  model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
8
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
@@ -14,24 +14,31 @@ personas = {
14
  "🟑 Neutral": "Respond with a balanced and unbiased view"
15
  }
16
 
17
- # Generate debate response
18
  def generate_debate(topic):
 
 
 
19
  responses = {}
20
  for label, style in personas.items():
21
- prompt = f"You are a debater. {style}. The topic is: '{topic}'. What is your opinion?"
22
- out = pipe(prompt, max_new_tokens=100, temperature=0.7)[0]["generated_text"]
23
- responses[label] = out
24
- return responses
 
 
 
 
 
25
 
26
  # Gradio Interface
27
- def interface_fn(topic):
28
- responses = generate_debate(topic)
29
- return "\n\n".join([f"**{k}**: {v}" for k, v in responses.items()])
30
-
31
- demo = gr.Interface(fn=interface_fn,
32
- inputs=gr.Textbox(label="Enter a Debate Topic"),
33
- outputs=gr.Markdown(),
34
- title="πŸŽ™οΈ Multi-Agent Debate Simulator",
35
- description="This app simulates a debate from different viewpoints using FLAN-T5 on Hugging Face.")
36
 
 
37
  demo.launch()
 
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
  import gradio as gr
4
 
5
+ # Load FLAN-T5 model
6
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
7
  model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
8
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
 
14
  "🟑 Neutral": "Respond with a balanced and unbiased view"
15
  }
16
 
17
+ # Generate responses
18
  def generate_debate(topic):
19
+ if not topic.strip():
20
+ return "⚠️ Please enter a valid debate topic."
21
+
22
  responses = {}
23
  for label, style in personas.items():
24
+ try:
25
+ prompt = f"You are a debater. {style}. The topic is: '{topic}'. What is your opinion?"
26
+ out = pipe(prompt, max_new_tokens=120, temperature=0.7)[0]["generated_text"]
27
+ responses[label] = out.strip()
28
+ except Exception as e:
29
+ responses[label] = f"❌ Error generating response: {str(e)}"
30
+
31
+ result = "\n\n".join([f"### {label}\n{resp}" for label, resp in responses.items()])
32
+ return result
33
 
34
  # Gradio Interface
35
+ demo = gr.Interface(
36
+ fn=generate_debate,
37
+ inputs=gr.Textbox(lines=2, placeholder="e.g. Should AI replace teachers?", label="Enter a Debate Topic"),
38
+ outputs=gr.Markdown(),
39
+ title="πŸŽ™οΈ Multi-Agent Debate Simulator",
40
+ description="This app simulates a debate from different viewpoints using FLAN-T5 (Google) on Hugging Face πŸ€— Transformers. Just enter a topic and see how Optimist, Pessimist, and Neutral personas respond!"
41
+ )
 
 
42
 
43
+ # Launch the app
44
  demo.launch()