Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
| import gradio as gr | |
| model_name = "google/flan-t5-base" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer) | |
| # New, more specific instructions | |
| personas = { | |
| "π’ Optimist": "Give a detailed, optimistic argument with at least two clear benefits and an example.", | |
| "π΄ Pessimist": "Give a detailed, critical argument highlighting at least two risks or drawbacks and an example.", | |
| "π‘ Neutral": "Provide a balanced perspective. Start by listing pros, then cons, and conclude with a neutral summary." | |
| } | |
| def generate_debate(topic): | |
| results = [] | |
| for label, instruction in personas.items(): | |
| prompt = ( | |
| f"You are an experienced debater.\n" | |
| f"Debate Topic: \"{topic}\"\n" | |
| f"{instruction}\n" | |
| f"Write at least 3β4 sentences." | |
| ) | |
| response = pipe( | |
| prompt, | |
| max_new_tokens=180, # increased from 120 | |
| temperature=0.7 | |
| )[0]['generated_text'].strip() | |
| results.append(f"### {label}\n{response}") | |
| return "\n\n".join(results) | |
| demo = gr.Interface( | |
| fn=generate_debate, | |
| inputs=gr.Textbox(label="Debate Topic"), | |
| outputs=gr.Markdown(), | |
| title="ποΈ Multi-Agent Debate Simulator", | |
| description="Debates with Optimist, Pessimist & Neutral perspectives using FLAN-T5-Base." | |
| ) | |
| demo.launch() | |