Spaces:
No application file
No application file
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,39 +1,334 @@
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
from google import genai
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
# --- Load Gemini API key securely ---
|
|
|
|
| 6 |
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
|
| 7 |
if not GEMINI_API_KEY:
|
| 8 |
-
raise ValueError("❌ GEMINI_API_KEY not found.
|
| 9 |
|
| 10 |
-
# --- Initialize the client
|
| 11 |
-
|
|
|
|
| 12 |
|
| 13 |
-
# ---
|
| 14 |
-
|
|
|
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
try:
|
| 19 |
-
response =
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
except Exception as e:
|
| 23 |
-
|
| 24 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
# --- Launch ---
|
| 38 |
if __name__ == "__main__":
|
| 39 |
-
demo.launch()
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
from google import genai
|
| 4 |
+
import json
|
| 5 |
+
import time
|
| 6 |
+
import re
|
| 7 |
|
| 8 |
# --- Load Gemini API key securely ---
|
| 9 |
+
# Make sure to set the GEMINI_API_KEY in your Hugging Face Space Secrets
|
| 10 |
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
|
| 11 |
if not GEMINI_API_KEY:
|
| 12 |
+
raise ValueError("❌ GEMINI_API_KEY not found. Please set it in your Hugging Face Space Secrets.")
|
| 13 |
|
| 14 |
+
# --- Initialize the client ---
|
| 15 |
+
# The new genai.Client() is the recommended way.
|
| 16 |
+
client = genai.Client(api_key=GEMINI_API_KEY)
|
| 17 |
|
| 18 |
+
# --- UI Customization: Bright Neon Trippy Blending ---
|
| 19 |
+
cyberpunk_css = """
|
| 20 |
+
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
|
| 21 |
|
| 22 |
+
:root {
|
| 23 |
+
--neon-pink: #ff00ff;
|
| 24 |
+
--neon-cyan: #00ffff;
|
| 25 |
+
--neon-green: #39ff14;
|
| 26 |
+
--bg-color-1: #1a001a;
|
| 27 |
+
--bg-color-2: #0d001a;
|
| 28 |
+
--border-glow: 0 0 5px var(--neon-cyan), 0 0 10px var(--neon-cyan), 0 0 20px var(--neon-pink), 0 0 30px var(--neon-pink);
|
| 29 |
+
--text-glow: 0 0 3px var(--neon-green);
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
.gradio-container {
|
| 33 |
+
background: linear-gradient(45deg, var(--bg-color-1), var(--bg-color-2));
|
| 34 |
+
font-family: 'Orbitron', sans-serif;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
h1, h2, .gr-label {
|
| 38 |
+
color: var(--neon-cyan);
|
| 39 |
+
text-shadow: 0 0 5px var(--neon-cyan), 0 0 10px var(--neon-cyan);
|
| 40 |
+
text-align: center;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
.gr-button {
|
| 44 |
+
background: transparent;
|
| 45 |
+
color: var(--neon-green);
|
| 46 |
+
border: 2px solid var(--neon-green);
|
| 47 |
+
border-radius: 5px;
|
| 48 |
+
box-shadow: 0 0 5px var(--neon-green), 0 0 10px var(--neon-green) inset;
|
| 49 |
+
transition: all 0.3s ease;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
.gr-button:hover {
|
| 53 |
+
background: var(--neon-green);
|
| 54 |
+
color: var(--bg-color-2);
|
| 55 |
+
box-shadow: 0 0 10px var(--neon-green), 0 0 20px var(--neon-green), 0 0 30px var(--neon-green);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
.chatbot {
|
| 59 |
+
background-color: rgba(13, 0, 26, 0.8);
|
| 60 |
+
border: 1px solid var(--neon-pink);
|
| 61 |
+
box-shadow: var(--border-glow);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
.user-message, .bot-message {
|
| 65 |
+
border-radius: 5px;
|
| 66 |
+
padding: 10px;
|
| 67 |
+
box-shadow: 0 0 5px var(--neon-pink) inset;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
.user-message { border-left: 5px solid var(--neon-cyan); background-color: rgba(0, 255, 255, 0.05); }
|
| 71 |
+
.bot-message { border-left: 5px solid var(--neon-pink); background-color: rgba(255, 0, 255, 0.05); }
|
| 72 |
+
|
| 73 |
+
.gr-dropdown, .gr-radio {
|
| 74 |
+
border: 1px solid var(--neon-cyan) !important;
|
| 75 |
+
background-color: var(--bg-color-2) !important;
|
| 76 |
+
color: var(--neon-cyan) !important;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
footer { display: none !important; }
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
# --- Helper function to parse LLM lists ---
|
| 83 |
+
def parse_list_from_llm(text_response):
|
| 84 |
+
"""Cleans up numbered/bulleted lists from Gemini response."""
|
| 85 |
+
lines = text_response.strip().split('\n')
|
| 86 |
+
# Remove numbering like "1. ", "1) ", or bullet points "* "
|
| 87 |
+
cleaned_lines = [re.sub(r'^\d+[\.\)]\s*|^\*\s*', '', line).strip() for line in lines]
|
| 88 |
+
return [line for line in cleaned_lines if line]
|
| 89 |
+
|
| 90 |
+
# --- Core Application Logic ---
|
| 91 |
+
|
| 92 |
+
def generate_topics():
|
| 93 |
+
"""Calls Gemini to generate 20 debate topics."""
|
| 94 |
+
prompt = "Generate a list of exactly 20 specific, debatable topics related to the future of coding, banking, and artificial intelligence. The topics should be concise and provocative. Format as a numbered list."
|
| 95 |
try:
|
| 96 |
+
response = client.generate_text(model="models/gemini-pro", prompt=prompt)
|
| 97 |
+
topics = parse_list_from_llm(response.result)
|
| 98 |
+
# UI Updates: Show next step, hide current
|
| 99 |
+
return {
|
| 100 |
+
topics_state: topics,
|
| 101 |
+
topic_dropdown: gr.Dropdown(choices=topics, value=None, label="Step 2: Select a Topic", visible=True),
|
| 102 |
+
generate_topics_btn: gr.Button(visible=False),
|
| 103 |
+
}
|
| 104 |
except Exception as e:
|
| 105 |
+
gr.Warning(f"Error generating topics: {e}")
|
| 106 |
+
return {topics_state: [], topic_dropdown: gr.Dropdown(visible=False), generate_topics_btn: gr.Button(visible=True)}
|
| 107 |
+
|
| 108 |
+
def generate_questions(selected_topic):
|
| 109 |
+
"""Calls Gemini to generate 20 questions for the selected topic."""
|
| 110 |
+
if not selected_topic:
|
| 111 |
+
return {question_dropdown: gr.Dropdown(value=None, visible=False)}
|
| 112 |
+
|
| 113 |
+
prompt = f"For the debate topic '{selected_topic}', generate a list of exactly 20 thought-provoking starting questions or controversial statements to kick off the debate. Format as a numbered list."
|
| 114 |
+
try:
|
| 115 |
+
response = client.generate_text(model="models/gemini-pro", prompt=prompt)
|
| 116 |
+
questions = parse_list_from_llm(response.result)
|
| 117 |
+
return {
|
| 118 |
+
questions_state: questions,
|
| 119 |
+
question_dropdown: gr.Dropdown(choices=questions, value=None, label="Step 3: Select a Starting Question", visible=True)
|
| 120 |
+
}
|
| 121 |
+
except Exception as e:
|
| 122 |
+
gr.Warning(f"Error generating questions: {e}")
|
| 123 |
+
return {questions_state: [], question_dropdown: gr.Dropdown(visible=False)}
|
| 124 |
+
|
| 125 |
+
def enable_debate_button(selected_question):
|
| 126 |
+
"""Makes the debate button visible once a question is chosen."""
|
| 127 |
+
return {start_debate_btn: gr.Button(visible=bool(selected_question))}
|
| 128 |
+
|
| 129 |
+
def run_debate(topic, question, history):
|
| 130 |
+
"""Main function to run the 20-turn debate between two AI agents."""
|
| 131 |
+
if not topic or not question:
|
| 132 |
+
gr.Warning("Topic and Question must be selected before starting.")
|
| 133 |
+
return [], history
|
| 134 |
+
|
| 135 |
+
# --- Initialize two separate agent chat sessions for this debate ---
|
| 136 |
+
# This maintains their distinct personalities and contexts.
|
| 137 |
+
agent_a_persona = "You are Agent A, a tech optimist and futurist. You see the best in new technology. Your goal is to argue for the positive aspects of the debate topic. Keep your responses concise and impactful."
|
| 138 |
+
agent_b_persona = "You are Agent B, a cautious skeptic and ethicist. You critically examine the potential downsides and risks of new technology. Your goal is to argue for the negative or cautionary aspects of the debate topic. Keep your responses concise and impactful."
|
| 139 |
+
|
| 140 |
+
agent_a_chat = client.start_chat(context=agent_a_persona, history=[])
|
| 141 |
+
agent_b_chat = client.start_chat(context=agent_b_persona, history=[])
|
| 142 |
+
|
| 143 |
+
current_history = [(None, f"**DEBATE TOPIC:** {topic}\n**STARTING QUESTION:** {question}")]
|
| 144 |
+
yield current_history, current_history # Update UI immediately
|
| 145 |
+
|
| 146 |
+
last_response = question
|
| 147 |
+
|
| 148 |
+
# The debate loop: 10 turns for each agent = 20 total messages
|
| 149 |
+
for i in range(10):
|
| 150 |
+
try:
|
| 151 |
+
# --- Agent A's Turn (Optimist) ---
|
| 152 |
+
time.sleep(1) # Small delay for realism
|
| 153 |
+
prompt_a = f"Based on the last statement ('{last_response}'), provide your optimistic counter-argument or statement."
|
| 154 |
+
response_a = agent_a_chat.send_message(prompt_a)
|
| 155 |
+
current_history.append(("Agent A (Optimist)", response_a.text))
|
| 156 |
+
last_response = response_a.text
|
| 157 |
+
yield current_history, current_history
|
| 158 |
+
|
| 159 |
+
# --- Agent B's Turn (Skeptic) ---
|
| 160 |
+
time.sleep(1)
|
| 161 |
+
prompt_b = f"Based on Agent A's last statement ('{last_response}'), provide your skeptical counter-argument or cautionary statement."
|
| 162 |
+
response_b = agent_b_chat.send_message(prompt_b)
|
| 163 |
+
current_history.append(("Agent B (Skeptic)", response_b.text))
|
| 164 |
+
last_response = response_b.text
|
| 165 |
+
yield current_history, current_history
|
| 166 |
+
|
| 167 |
+
except Exception as e:
|
| 168 |
+
error_message = f"⚠️ An error occurred during the debate: {e}"
|
| 169 |
+
current_history.append((None, error_message))
|
| 170 |
+
yield current_history, current_history
|
| 171 |
+
break # Exit loop on error
|
| 172 |
+
|
| 173 |
+
current_history.append((None, "--- DEBATE CONCLUDED ---"))
|
| 174 |
+
|
| 175 |
+
# Final update showing save buttons
|
| 176 |
+
yield {
|
| 177 |
+
chatbot: current_history,
|
| 178 |
+
debate_log_state: current_history,
|
| 179 |
+
save_row: gr.Row(visible=True)
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def save_as_json(log):
|
| 184 |
+
"""Saves the debate log to a JSON file."""
|
| 185 |
+
filepath = "debate_log.json"
|
| 186 |
+
# Format for clarity
|
| 187 |
+
formatted_log = [{"speaker": speaker if speaker else "SYSTEM", "message": message} for speaker, message in log]
|
| 188 |
+
with open(filepath, "w") as f:
|
| 189 |
+
json.dump(formatted_log, f, indent=2)
|
| 190 |
+
return gr.File(value=filepath, label="Download JSON")
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def save_as_html(log):
|
| 194 |
+
"""Saves the debate log to a styled HTML file."""
|
| 195 |
+
filepath = "debate_log.html"
|
| 196 |
+
|
| 197 |
+
# Simple but effective HTML styling
|
| 198 |
+
html_style = """
|
| 199 |
+
<style>
|
| 200 |
+
body { font-family: sans-serif; background-color: #111; color: #eee; }
|
| 201 |
+
.container { max-width: 800px; margin: auto; padding: 20px; }
|
| 202 |
+
.message { margin-bottom: 1em; padding: 1em; border-radius: 8px; }
|
| 203 |
+
.system { background-color: #333; text-align: center; font-weight: bold; }
|
| 204 |
+
.agent-a { background-color: #023020; border-left: 5px solid #39ff14; }
|
| 205 |
+
.agent-b { background-color: #30022b; border-left: 5px solid #ff00ff; }
|
| 206 |
+
.speaker { font-weight: bold; margin-bottom: 0.5em; display: block; }
|
| 207 |
+
</style>
|
| 208 |
+
"""
|
| 209 |
+
|
| 210 |
+
html_content = f"<html><head><title>Debate Log</title>{html_style}</head><body><div class='container'>"
|
| 211 |
+
html_content += "<h1>Debate Log</h1>"
|
| 212 |
+
|
| 213 |
+
for speaker, message in log:
|
| 214 |
+
if speaker == "Agent A (Optimist)":
|
| 215 |
+
css_class = "agent-a"
|
| 216 |
+
elif speaker == "Agent B (Skeptic)":
|
| 217 |
+
css_class = "agent-b"
|
| 218 |
+
else:
|
| 219 |
+
css_class = "system"
|
| 220 |
+
speaker = "SYSTEM"
|
| 221 |
+
|
| 222 |
+
# Sanitize message to prevent HTML injection
|
| 223 |
+
safe_message = gr.Markdown(message).value.replace('\n', '<br>')
|
| 224 |
+
html_content += f'<div class="message {css_class}"><span class="speaker">{speaker}</span>{safe_message}</div>'
|
| 225 |
+
|
| 226 |
+
html_content += "</div></body></html>"
|
| 227 |
+
|
| 228 |
+
with open(filepath, "w") as f:
|
| 229 |
+
f.write(html_content)
|
| 230 |
+
|
| 231 |
+
return gr.File(value=filepath, label="Download HTML")
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def reset_all():
|
| 235 |
+
"""Resets the entire UI to its initial state."""
|
| 236 |
+
return {
|
| 237 |
+
chatbot: [],
|
| 238 |
+
debate_log_state: [],
|
| 239 |
+
topics_state: [],
|
| 240 |
+
questions_state: [],
|
| 241 |
+
generate_topics_btn: gr.Button(visible=True),
|
| 242 |
+
topic_dropdown: gr.Dropdown(value=None, choices=[], label="Step 2: Select a Topic", visible=False),
|
| 243 |
+
question_dropdown: gr.Dropdown(value=None, choices=[], label="Step 3: Select a Starting Question", visible=False),
|
| 244 |
+
start_debate_btn: gr.Button(visible=False),
|
| 245 |
+
save_row: gr.Row(visible=False),
|
| 246 |
+
download_file: None,
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
# --- Gradio UI Layout ---
|
| 251 |
+
with gr.Blocks(theme=gr.themes.Base(), css=cyberpunk_css, title="AI DEBATE SIMULATOR_v2.5") as demo:
|
| 252 |
+
# State management for data persistence between interactions
|
| 253 |
+
debate_log_state = gr.State([])
|
| 254 |
+
topics_state = gr.State([])
|
| 255 |
+
questions_state = gr.State([])
|
| 256 |
+
|
| 257 |
+
gr.Markdown("# 👾 AI DEBATE SIMULATOR_v2.5 👾")
|
| 258 |
+
gr.Markdown("## A 20-Turn War of Wits Between Two AI Agents")
|
| 259 |
+
|
| 260 |
+
with gr.Row():
|
| 261 |
+
with gr.Column(scale=1):
|
| 262 |
+
gr.Markdown("### **CONTROL PANEL**")
|
| 263 |
+
generate_topics_btn = gr.Button("Step 1: Generate 20 Debate Topics", variant="primary")
|
| 264 |
+
topic_dropdown = gr.Dropdown(label="Step 2: Select a Topic", visible=False, interactive=True)
|
| 265 |
+
question_dropdown = gr.Dropdown(label="Step 3: Select a Starting Question", visible=False, interactive=True)
|
| 266 |
+
start_debate_btn = gr.Button("Step 4: START THE DEBATE", visible=False)
|
| 267 |
+
|
| 268 |
+
with gr.Row(visible=False) as save_row:
|
| 269 |
+
save_json_btn = gr.Button("Save as JSON")
|
| 270 |
+
save_html_btn = gr.Button("Save as HTML")
|
| 271 |
+
|
| 272 |
+
download_file = gr.File(label="Download Your Log")
|
| 273 |
+
|
| 274 |
+
reset_btn = gr.Button("Start New Debate (Reset All)", variant="stop")
|
| 275 |
+
|
| 276 |
+
with gr.Column(scale=3):
|
| 277 |
+
chatbot = gr.Chatbot(
|
| 278 |
+
label="Debate Arena",
|
| 279 |
+
bubble_full_width=False,
|
| 280 |
+
avatar_images=(None, "https://i.imgur.com/Q51aD2S.png"), # Simple bot avatar
|
| 281 |
+
height=700
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
# --- Event Handling Logic ---
|
| 285 |
+
generate_topics_btn.click(
|
| 286 |
+
fn=generate_topics,
|
| 287 |
+
inputs=None,
|
| 288 |
+
outputs=[topics_state, topic_dropdown, generate_topics_btn]
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
topic_dropdown.change(
|
| 292 |
+
fn=generate_questions,
|
| 293 |
+
inputs=topic_dropdown,
|
| 294 |
+
outputs=[questions_state, question_dropdown]
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
question_dropdown.change(
|
| 298 |
+
fn=enable_debate_button,
|
| 299 |
+
inputs=question_dropdown,
|
| 300 |
+
outputs=[start_debate_btn]
|
| 301 |
+
)
|
| 302 |
|
| 303 |
+
start_debate_btn.click(
|
| 304 |
+
fn=lambda: {
|
| 305 |
+
# Disable controls during debate
|
| 306 |
+
generate_topics_btn: gr.Button(visible=False),
|
| 307 |
+
topic_dropdown: gr.Dropdown(interactive=False),
|
| 308 |
+
question_dropdown: gr.Dropdown(interactive=False),
|
| 309 |
+
start_debate_btn: gr.Button(visible=False),
|
| 310 |
+
},
|
| 311 |
+
inputs=None,
|
| 312 |
+
outputs=[generate_topics_btn, topic_dropdown, question_dropdown, start_debate_btn]
|
| 313 |
+
).then(
|
| 314 |
+
fn=run_debate,
|
| 315 |
+
inputs=[topic_dropdown, question_dropdown, debate_log_state],
|
| 316 |
+
outputs=[chatbot, debate_log_state, save_row]
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
save_json_btn.click(save_as_json, inputs=debate_log_state, outputs=download_file)
|
| 320 |
+
save_html_btn.click(save_as_html, inputs=debate_log_state, outputs=download_file)
|
| 321 |
|
| 322 |
+
reset_btn.click(
|
| 323 |
+
fn=reset_all,
|
| 324 |
+
inputs=None,
|
| 325 |
+
outputs=[
|
| 326 |
+
chatbot, debate_log_state, topics_state, questions_state,
|
| 327 |
+
generate_topics_btn, topic_dropdown, question_dropdown,
|
| 328 |
+
start_debate_btn, save_row, download_file
|
| 329 |
+
]
|
| 330 |
+
)
|
| 331 |
|
| 332 |
+
# --- Launch the App ---
|
| 333 |
if __name__ == "__main__":
|
| 334 |
+
demo.launch(debug=True)
|