Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,45 +1,43 @@
|
|
|
|
|
| 1 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 2 |
import gradio as gr
|
| 3 |
|
| 4 |
-
# Load
|
| 5 |
-
|
| 6 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 9 |
|
| 10 |
-
#
|
| 11 |
personas = {
|
| 12 |
-
"π’ Optimist": "
|
| 13 |
-
"π΄ Pessimist": "
|
| 14 |
-
"π‘ Neutral": "
|
| 15 |
}
|
| 16 |
|
| 17 |
-
#
|
| 18 |
-
def
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
def debate(topic):
|
| 26 |
-
results = {}
|
| 27 |
-
for label, persona in personas.items():
|
| 28 |
-
prompt = build_prompt(topic, persona)
|
| 29 |
-
output = pipe(prompt, max_new_tokens=200, temperature=0.9)[0]["generated_text"]
|
| 30 |
-
answer = output.split("Answer:")[-1].strip()
|
| 31 |
-
results[label] = answer
|
| 32 |
-
return results
|
| 33 |
|
| 34 |
-
# Gradio
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
return "\n\n".join([f"**{k}**:\n{v}" for k, v in responses.items()])
|
| 38 |
-
|
| 39 |
-
gr.Interface(
|
| 40 |
-
fn=run_debate,
|
| 41 |
inputs=gr.Textbox(label="Enter a Debate Topic"),
|
| 42 |
outputs=gr.Markdown(),
|
| 43 |
-
title="
|
| 44 |
-
description="
|
| 45 |
-
)
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 3 |
import gradio as gr
|
| 4 |
|
| 5 |
+
# Load Mistral-7B with 8-bit quantization (saves memory!)
|
| 6 |
+
model_id = "mistralai/Mistral-7B-Instruct-v0.1"
|
| 7 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 8 |
+
|
| 9 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 10 |
+
model_id,
|
| 11 |
+
device_map="auto", # Automatically assign layers to available GPU/CPU
|
| 12 |
+
load_in_8bit=True, # Use 8-bit quantization
|
| 13 |
+
torch_dtype=torch.float16 # Reduce precision to save memory
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 17 |
|
| 18 |
+
# Viewpoints
|
| 19 |
personas = {
|
| 20 |
+
"π’ Optimist": "Respond with hope and positivity",
|
| 21 |
+
"π΄ Pessimist": "Respond critically and negatively",
|
| 22 |
+
"π‘ Neutral": "Respond with a balanced, unbiased tone"
|
| 23 |
}
|
| 24 |
|
| 25 |
+
# Generate debate
|
| 26 |
+
def generate_debate(topic):
|
| 27 |
+
responses = {}
|
| 28 |
+
for label, style in personas.items():
|
| 29 |
+
prompt = f"[INST] You are a debater. {style}. Topic: '{topic}'. Give a short opinion. [/INST]"
|
| 30 |
+
result = pipe(prompt, max_new_tokens=150, temperature=0.7, do_sample=True)[0]["generated_text"]
|
| 31 |
+
responses[label] = result.strip()
|
| 32 |
+
return "\n\n".join([f"**{k}**: {v}" for k, v in responses.items()])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
+
# Gradio app
|
| 35 |
+
demo = gr.Interface(
|
| 36 |
+
fn=generate_debate,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
inputs=gr.Textbox(label="Enter a Debate Topic"),
|
| 38 |
outputs=gr.Markdown(),
|
| 39 |
+
title="π§ Multi-Agent Debate Simulator (Mistral 7B)",
|
| 40 |
+
description="Debate with different perspectives using the Mistral-7B-Instruct model (quantized)."
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
demo.launch()
|