Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| import gradio as gr | |
| # Load Falcon-7B-Instruct (open and free) | |
| model_name = "tiiuae/falcon-7b-instruct" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| # Define personas | |
| personas = { | |
| "π’ Optimist": "Someone who sees the good and hopeful side of any issue.", | |
| "π΄ Pessimist": "Someone who focuses on the risks, negatives, or problems in any issue.", | |
| "π‘ Neutral": "Someone who presents a balanced, unbiased view based on logic." | |
| } | |
| # Prompt template | |
| def build_prompt(topic, style): | |
| return f"""You are a debater. Take this persona: {style}. | |
| Debate Topic: "{topic}" | |
| Provide a thoughtful and opinionated response with reasoning. | |
| Answer:""" | |
| # Generate responses | |
| def debate(topic): | |
| results = {} | |
| for label, persona in personas.items(): | |
| prompt = build_prompt(topic, persona) | |
| output = pipe(prompt, max_new_tokens=200, temperature=0.9)[0]["generated_text"] | |
| answer = output.split("Answer:")[-1].strip() | |
| results[label] = answer | |
| return results | |
| # Gradio interface | |
| def run_debate(topic): | |
| responses = debate(topic) | |
| return "\n\n".join([f"**{k}**:\n{v}" for k, v in responses.items()]) | |
| gr.Interface( | |
| fn=run_debate, | |
| inputs=gr.Textbox(label="Enter a Debate Topic"), | |
| outputs=gr.Markdown(), | |
| title="ποΈ Multi-Agent Debate Simulator", | |
| description="Simulates multi-perspective debates using Falcon-7B on Hugging Face π€." | |
| ).launch() | |