Spaces:
Sleeping
Sleeping
File size: 1,541 Bytes
899ccdc 809b35d 899ccdc 09585da 899ccdc 86d3714 899ccdc 809b35d 899ccdc 809b35d 86d3714 c2ebf8a 09585da 5aeb336 899ccdc 5aeb336 899ccdc 5aeb336 899ccdc c2ebf8a daa1921 86d3714 cba3f60 485970c 899ccdc 86d3714 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import gradio as gr
model_name = "google/flan-t5-base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
# New, more specific instructions
personas = {
"π’ Optimist": "Give a detailed, optimistic argument with at least two clear benefits and an example.",
"π΄ Pessimist": "Give a detailed, critical argument highlighting at least two risks or drawbacks and an example.",
"π‘ Neutral": "Provide a balanced perspective. Start by listing pros, then cons, and conclude with a neutral summary."
}
def generate_debate(topic):
results = []
for label, instruction in personas.items():
prompt = (
f"You are an experienced debater.\n"
f"Debate Topic: \"{topic}\"\n"
f"{instruction}\n"
f"Write at least 3β4 sentences."
)
response = pipe(
prompt,
max_new_tokens=180, # increased from 120
temperature=0.7
)[0]['generated_text'].strip()
results.append(f"### {label}\n{response}")
return "\n\n".join(results)
demo = gr.Interface(
fn=generate_debate,
inputs=gr.Textbox(label="Debate Topic"),
outputs=gr.Markdown(),
title="ποΈ Multi-Agent Debate Simulator",
description="Debates with Optimist, Pessimist & Neutral perspectives using FLAN-T5-Base."
)
demo.launch()
|