admin08077 commited on
Commit
361d146
Β·
verified Β·
1 Parent(s): 56c3319

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +313 -147
app.py CHANGED
@@ -1,7 +1,10 @@
1
- import os
2
  import gradio as gr
3
- from google import genai
 
 
4
  import time
 
5
 
6
  # --- Load Gemini API key securely ---
7
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
@@ -10,161 +13,324 @@ if not GEMINI_API_KEY:
10
 
11
  client = genai.Client()
12
 
13
- # --- Constants ---
14
- MODEL_NAME = "gemini-2.5-flash"
15
- AVATAR_IMAGES = {
16
- "Agent A (Optimist)": "https://i.imgur.com/2GMV9y7.png",
17
- "Agent B (Skeptic)": "https://i.imgur.com/KzX3T21.png",
18
- "SYSTEM": "https://i.imgur.com/z3uxLzT.png",
 
 
 
 
 
 
 
 
 
 
 
 
19
  }
20
 
21
- # --- UI CSS ---
22
- cyberpunk_css = """
23
- @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
24
- :root {
25
- --neon-pink: #ff00ff; --neon-cyan: #00ffff; --neon-green: #39ff14;
26
- --bg-color-1: #1a001a; --bg-color-2: #0d001a; --bg-color-3: #2a003a;
27
- --border-glow: 0 0 5px var(--neon-cyan), 0 0 10px var(--neon-cyan), 0 0 20px var(--neon-pink), 0 0 30px var(--neon-pink);
28
- --text-shadow: 0 0 3px var(--neon-cyan), 0 0 5px var(--neon-cyan);
 
29
  }
30
- .gradio-container { background: linear-gradient(45deg, var(--bg-color-1), var(--bg-color-2)); font-family: 'Orbitron', sans-serif; }
31
- h1, .gr-label { color: var(--neon-cyan); text-shadow: var(--text-shadow); text-align: center; }
32
- .gr-button { background: transparent; color: var(--neon-green); border: 2px solid var(--neon-green) !important; border-radius: 5px; box-shadow: 0 0 5px var(--neon-green), 0 0 10px var(--neon-green) inset; transition: all 0.3s ease; font-family: 'Orbitron', sans-serif; }
33
- .gr-button:hover { background: var(--neon-green); color: var(--bg-color-2); box-shadow: 0 0 10px var(--neon-green), 0 0 20px var(--neon-green), 0 0 30px var(--neon-green); }
34
- .gr-button:disabled { background: #333 !important; color: #555 !important; border-color: #555 !important; box-shadow: none !important; cursor: not-allowed; }
35
- .chatbot { background-color: rgba(13, 0, 26, 0.8); border: 1px solid var(--neon-pink); box-shadow: var(--border-glow); }
36
- .message-bubble { background: none !important; border: none !important; }
37
- footer { display: none !important; }
38
- """
39
-
40
- def stream_llm_response(model, prompt, history_list, role_name, avatar_url):
41
- """A dedicated generator to stream a response into the history and yield updates."""
42
- history_list.append({"role": "assistant", "name": role_name, "content": "", "avatar_url": avatar_url})
43
- full_response = ""
44
- try:
45
- stream = model.generate_content(prompt, stream=True)
46
- for chunk in stream:
47
- full_response += chunk.text
48
- history_list[-1]["content"] = full_response + "β–Œ"
49
- yield history_list
50
- history_list[-1]["content"] = full_response.strip()
51
- except Exception as e:
52
- history_list[-1]["content"] = f"⚠️ An error occurred: {e}"
53
-
54
- yield history_list
55
- return full_response.strip()
56
 
57
- def run_full_debate_flow(chat_history):
58
- """Handles the entire automated debate, narrating each step to the screen."""
59
-
60
- def add_system_message(text):
61
- """Helper to append a system message and yield the UI update."""
62
- chat_history.append({"role": "user", "name": "SYSTEM", "content": text, "avatar_url": AVATAR_IMAGES["SYSTEM"]})
63
- time.sleep(0.5) # A brief, dramatic pause
64
- return {chatbot: chat_history}
65
-
66
- chat_history.clear()
67
- yield {
68
- start_button: gr.update(interactive=False, value="πŸ”„ Initializing..."),
69
- chatbot: chat_history,
70
- download_button: gr.update(visible=False),
71
- download_file_output: gr.update(value=None, visible=False)
72
- }
73
 
74
- yield add_system_message("Receiving transmission... Booting up debate protocol.")
75
-
76
- try:
77
- gen_model = genai.GenerativeModel(MODEL_NAME)
78
-
79
- yield add_system_message("Contacting orbital AI to generate a fresh topic...")
80
- topic_prompt = "Generate a single, specific, and highly debatable topic about the future of technology, AI, or finance. Output only the topic string, nothing else."
81
- topic = ""
82
- for updated_history in stream_llm_response(gen_model, topic_prompt, chat_history, "SYSTEM", AVATAR_IMAGES["SYSTEM"]):
83
- topic = updated_history[-1]['content'].replace('β–Œ', '')
84
- yield { chatbot: updated_history }
85
- if "⚠️" in topic or not topic: raise ValueError("Topic generation failed.")
86
-
87
- yield add_system_message("Formulating a provocative starting question...")
88
- question_prompt = f"For the debate topic '{topic}', generate one provocative, open-ended starting question. Output only the question string, nothing else."
89
- question = ""
90
- for updated_history in stream_llm_response(gen_model, question_prompt, chat_history, "SYSTEM", AVATAR_IMAGES["SYSTEM"]):
91
- question = updated_history[-1]['content'].replace('β–Œ', '')
92
- yield { chatbot: updated_history }
93
- if "⚠️" in question or not question: raise ValueError("Question generation failed.")
94
-
95
- except Exception as e:
96
- yield add_system_message(f"❌ FATAL ERROR during setup: {e}. Protocol terminated. Please try again.")
97
- yield { start_button: gr.update(interactive=True, value="β–· RE-INITIATE DEBATE PROTOCOL") }
98
- return
99
-
100
- yield add_system_message("Initializing agents... Let the debate begin!")
101
- yield { start_button: gr.update(value="βš”οΈ DEBATE IN PROGRESS...") }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
- agent_a_chat = gen_model.start_chat(history=[{'role': 'user', 'parts': ["You are Agent A, a tech optimist. Argue positively."]}, {'role': 'model', 'parts': ["Understood."]}])
104
- agent_b_chat = gen_model.start_chat(history=[{'role': 'user', 'parts': ["You are Agent B, a skeptic. Argue cautiously."]}, {'role': 'model', 'parts': ["Acknowledged."]}])
105
-
106
- last_response = question
107
- for _ in range(5):
108
- prompt_a = f"Debate Topic: '{topic}'. The previous statement was: '{last_response}'. Provide your optimistic argument."
109
- for updated_history in stream_llm_response(agent_a_chat, prompt_a, chat_history, "Agent A (Optimist)", AVATAR_IMAGES["Agent A (Optimist)"]):
110
- last_response = updated_history[-1]['content'].replace('β–Œ', '')
111
- yield { chatbot: updated_history }
112
-
113
- prompt_b = f"Debate Topic: '{topic}'. Agent A said: '{last_response}'. Provide your skeptical counter-argument."
114
- for updated_history in stream_llm_response(agent_b_chat, prompt_b, chat_history, "Agent B (Skeptic)", AVATAR_IMAGES["Agent B (Skeptic)"]):
115
- last_response = updated_history[-1]['content'].replace('β–Œ', '')
116
- yield { chatbot: updated_history }
117
-
118
- yield add_system_message("--- DEBATE CONCLUDED ---")
119
- yield {
120
- chatbot: chat_history,
121
- start_button: gr.update(interactive=True, value="β–· START NEW DEBATE"),
122
- download_button: gr.update(visible=True)
123
  }
 
 
124
 
125
- def create_transcript(chat_history):
126
- """Formats chat history into a downloadable text file."""
127
- if not chat_history: return None
128
- transcript = "AI DEBATE SIMULATOR TRANSCRIPT\n=================================\n\n"
129
- for message in chat_history:
130
- content = message.get("content", "").replace("β–Œ", "")
131
- transcript += f"[{message.get('name', 'SYSTEM')}]\n{content}\n\n---------------------------------\n\n"
132
- filepath = "debate_transcript.txt"
133
- with open(filepath, "w", encoding="utf-8") as f: f.write(transcript)
134
- return gr.update(value=filepath, visible=True)
135
-
136
- # --- Gradio UI Layout ---
137
- with gr.Blocks(css=cyberpunk_css, theme=gr.themes.Base(), title="AI Debate Simulator v4.2") as demo:
138
- chat_history_state = gr.State([])
139
-
140
- gr.Markdown("# πŸ‘Ύ AI DEBATE SIMULATOR v4.2 πŸ‘Ύ")
141
- gr.Markdown("### An Automated War of Wits, Narrated in Real-Time")
142
-
143
- chatbot = gr.Chatbot(
144
- type="messages",
145
- label="Debate Arena",
146
- height=700,
147
- avatar_images=list(AVATAR_IMAGES.values()),
148
- show_copy_button=True
149
- )
150
-
151
- with gr.Row():
152
- start_button = gr.Button("β–· START NEW DEBATE", variant="primary", scale=3)
153
- download_button = gr.Button("πŸ’Ύ Download Transcript", visible=False, scale=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
- download_file_output = gr.File(label="Download", visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
- start_button.click(
158
- fn=run_full_debate_flow,
159
- inputs=[chat_history_state],
160
- outputs=[chatbot, start_button, download_button, download_file_output]
161
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
- download_button.click(
164
- fn=create_transcript,
165
- inputs=[chat_history_state],
166
- outputs=download_file_output
167
- )
168
 
 
169
  if __name__ == "__main__":
170
- demo.queue().launch(debug=True)
 
 
1
+
2
  import gradio as gr
3
+ import google as genai
4
+ import os
5
+ import random
6
  import time
7
+ from textwrap import dedent
8
 
9
  # --- Load Gemini API key securely ---
10
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
 
13
 
14
  client = genai.Client()
15
 
16
+
17
+ # --- 1. CONSTANTS ---
18
+ # Replicates constants from the original React app
19
+ GEMINI_TEXT_MODEL = 'gemini-2.5-flash'
20
+ # Note: imagen-3.0-generate-002 is not available in the Python SDK yet.
21
+ # We will handle this limitation gracefully in the app logic.
22
+ GEMINI_IMAGE_MODEL = 'imagen-3.0-generate-002'
23
+
24
+ class Agent:
25
+ SYSTEM = 'SYSTEM'
26
+ OPTIMIST = 'OPTIMIST'
27
+ SKEPTIC = 'SKEPTIC'
28
+
29
+ SYSTEM_AVATAR = 'https://i.imgur.com/z3uxLzT.png'
30
+ AGENT_AVATARS = {
31
+ Agent.OPTIMIST: 'https://i.imgur.com/2GMV9y7.png',
32
+ Agent.SKEPTIC: 'https://i.imgur.com/KzX3T21.png',
33
+ Agent.SYSTEM: SYSTEM_AVATAR,
34
  }
35
 
36
+ AGENT_PROMPTS = {
37
+ Agent.OPTIMIST: {
38
+ "personality": "You are Agent A, a tech optimist and futurist. You see the incredible potential and benefits in every new technology. Your arguments are passionate, forward-thinking, and inspiring. Respond in 1-2 concise paragraphs.",
39
+ "prompt": lambda topic, last_response: f"Debate Topic: '{topic}'. The last statement was: '{last_response}'. Provide your powerful, optimistic argument.",
40
+ },
41
+ Agent.SKEPTIC: {
42
+ "personality": "You are Agent B, a cautious skeptic and ethicist. You ground the conversation by highlighting risks, societal impact, and unforeseen consequences. Your arguments are critical, grounded, and raise important questions. Respond in 1-2 concise paragraphs.",
43
+ "prompt": lambda topic, last_response: f"Debate Topic: '{topic}'. Agent A said: '{last_response}'. Provide your insightful, skeptical counter-argument.",
44
+ },
45
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ GENERATION_PROMPTS = {
48
+ "topic": "Generate one specific, debatable topic about the future of AI. The topic should be a short phrase. Output only the topic itself.",
49
+ "question": lambda topic: f"For the debate topic '{topic}', create one provocative question to start the debate. Output only the question itself.",
50
+ }
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ INITIAL_MESSAGE = [(None, "Awaiting signal to begin debate protocol...")]
53
+
54
+ # --- 2. JAVASCRIPT FOR TEXT-TO-SPEECH ---
55
+ # This JS code runs in the user's browser to enable speech.
56
+ tts_js = dedent("""
57
+ function() {
58
+ let optimistVoice = null;
59
+ let skepticVoice = null;
60
+ let voices = [];
61
+
62
+ const OPTIMIST_VOICE_CANDIDATES = ['Google UK English Female', 'Samantha', 'Microsoft Zira - English (United States)', 'Karen', 'en-GB-female'];
63
+ const SKEPTIC_VOICE_CANDIDATES = ['Google UK English Male', 'Daniel', 'Microsoft David - English (United States)', 'Rishi', 'en-GB-male'];
64
+
65
+ const getVoices = () => {
66
+ return new Promise((resolve) => {
67
+ const voiceList = window.speechSynthesis.getVoices();
68
+ if (voiceList.length) {
69
+ resolve(voiceList);
70
+ return;
71
+ }
72
+ window.speechSynthesis.onvoiceschanged = () => {
73
+ resolve(window.speechSynthesis.getVoices());
74
+ };
75
+ });
76
+ };
77
+
78
+ const selectVoices = async () => {
79
+ voices = await getVoices();
80
+ if (!voices.length) return;
81
+
82
+ for (const name of OPTIMIST_VOICE_CANDIDATES) {
83
+ const found = voices.find(v => v.name === name);
84
+ if (found) { optimistVoice = found; break; }
85
+ }
86
+ if (!optimistVoice) {
87
+ optimistVoice = voices.find(v => v.lang.startsWith('en') && /female/i.test(v.name)) || voices.find(v => v.lang.startsWith('en')) || null;
88
+ }
89
+
90
+ for (const name of SKEPTIC_VOICE_CANDIDATES) {
91
+ const found = voices.find(v => v.name === name && v !== optimistVoice);
92
+ if (found) { skepticVoice = found; break; }
93
+ }
94
+ if (!skepticVoice) {
95
+ skepticVoice = voices.find(v => v.lang.startsWith('en') && /male/i.test(v.name) && v !== optimistVoice) || voices.find(v => v.lang.startsWith('en') && v !== optimistVoice) || null;
96
+ }
97
+ };
98
+
99
+ window.initSpeech = () => {
100
+ if (typeof window === 'undefined' || !('speechSynthesis' in window)) {
101
+ console.warn("Speech Synthesis not supported in this browser.");
102
+ return;
103
+ }
104
+ selectVoices();
105
+ window.speechSynthesis.onvoiceschanged = selectVoices;
106
+ };
107
+
108
+ window.speakText = (text, agent) => {
109
+ if (!window.speechSynthesis || !text) return;
110
+ window.speechSynthesis.cancel();
111
+ const utterance = new SpeechSynthesisUtterance(text);
112
+ if (agent === 'OPTIMIST' && optimistVoice) {
113
+ utterance.voice = optimistVoice;
114
+ } else if (agent === 'SKEPTIC' && skepticVoice) {
115
+ utterance.voice = skepticVoice;
116
+ }
117
+ utterance.pitch = 1;
118
+ utterance.rate = 1;
119
+ utterance.volume = 0.9;
120
+ window.speechSynthesis.speak(utterance);
121
+ };
122
+
123
+ window.cancelSpeech = () => {
124
+ if (window.speechSynthesis) {
125
+ window.speechSynthesis.cancel();
126
+ }
127
+ };
128
 
129
+ return (history, text, agent) => {
130
+ window.speakText(text, agent);
131
+ return history;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  }
133
+ }
134
+ """)
135
 
136
+ # --- 3. GEMINI SERVICE ---
137
+ class GeminiService:
138
+ """A service class to encapsulate all interactions with the Google Gemini API."""
139
+ def __init__(self):
140
+ try:
141
+ api_key = os.environ["API_KEY"]
142
+ genai.configure(api_key=api_key)
143
+ except KeyError:
144
+ raise RuntimeError("FATAL: API_KEY environment variable not set. Please add it to your environment.")
145
+ self.text_model = genai.GenerativeModel(GEMINI_TEXT_MODEL)
146
+
147
+ def generate_topic(self):
148
+ """Generates a debate topic."""
149
+ response = self.text_model.generate_content(
150
+ GENERATION_PROMPTS["topic"],
151
+ generation_config={"temperature": 1.0}
152
+ )
153
+ return response.text.strip()
154
+
155
+ def generate_question(self, topic):
156
+ """Generates a starting question for a given topic."""
157
+ response = self.text_model.generate_content(
158
+ GENERATION_PROMPTS["question"](topic),
159
+ generation_config={"temperature": 1.0}
160
+ )
161
+ return response.text.strip()
162
+
163
+ def create_agent_session(self, agent_type):
164
+ """Creates a stateful chat session for an agent with a specific personality."""
165
+ personality = AGENT_PROMPTS[agent_type]["personality"]
166
+ return self.text_model.start_chat(
167
+ history=[
168
+ {'role': 'user', 'parts': [personality]},
169
+ {'role': 'model', 'parts': ["Understood. I am ready."]}
170
+ ]
171
+ )
172
+
173
+ # Instantiate the service. This will raise an error on startup if the API key is missing.
174
+ gemini_service = GeminiService()
175
+
176
+ # --- 4. GRADIO APP ---
177
+ def create_interface():
178
+ # Gradio CSS for styling
179
+ css = """
180
+ @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&family=Roboto:wght@400&display=swap');
181
+ body { font-family: 'Roboto', sans-serif; }
182
+ .font-orbitron { font-family: 'Orbitron', sans-serif; }
183
+ .gradio-container { background: linear-gradient(45deg, #0d001a, #1a001a); }
184
+ .chatbot { background-color: rgba(0,0,0,0.2) !important; border: none !important; }
185
+ .chatbot .user-message, .chatbot .bot-message { max-width: 100% !important; }
186
+ .system-message-container { display: flex; align-items: center; justify-content: center; gap: 1rem; margin: 1rem 0; }
187
+ .system-message-line { flex-grow: 1; border-top: 1px dashed rgba(52, 211, 153, 0.3); }
188
+ .system-message-text { color: #34d399; font-family: monospace; font-size: 0.8rem; white-space: pre-wrap; text-align: center; }
189
+ .image-message-container { padding: 0.5rem; border: 1px solid rgba(168, 85, 247, 0.5); background: rgba(168, 85, 247, 0.1); border-radius: 0.5rem; margin: 1.5rem auto; max-width: 80%; }
190
+ .image-message-container img { border-radius: 0.25rem; }
191
+ .image-message-caption { color: #c084fc; font-size: 0.75rem; text-align: center; font-family: monospace; margin-top: 0.5rem; }
192
+ """
193
+
194
+ with gr.Blocks(theme=gr.themes.Base(), css=css) as demo:
195
+ # App State
196
+ topic_state = gr.State("")
197
+ question_state = gr.State("")
198
+
199
+ # Header
200
+ with gr.Row():
201
+ gr.HTML("""
202
+ <div style="text-align: center; padding: 1rem;">
203
+ <h1 class="font-orbitron" style="font-size: 2rem; font-weight: bold; color: #67e8f9; text-shadow: 0 0 5px #00ffff;">AI DEBATE SIMULATOR</h1>
204
+ <h2 class="font-orbitron" style="font-size: 1rem; color: #f472b6; text-shadow: 0 0 5px #ff00ff;">Hardened Edition</h2>
205
+ </div>
206
+ """)
207
+
208
+ # Chat Window
209
+ chatbot = gr.Chatbot(
210
+ label="Debate Arena",
211
+ value=INITIAL_MESSAGE,
212
+ elem_classes=["chatbot"],
213
+ height=600,
214
+ avatar_images=(AGENT_AVATARS[Agent.SKEPTIC], AGENT_AVATARS[Agent.OPTIMIST]),
215
+ show_copy_button=False,
216
+ bubble_full_width=False,
217
+ )
218
+
219
+ # Controls
220
+ start_btn = gr.Button("β–· START NEW DEBATE", variant="primary", elem_classes="font-orbitron")
221
 
222
+ # Hidden components for JS interop
223
+ text_to_speak = gr.Textbox(visible=False)
224
+ agent_to_speak = gr.Textbox(visible=False)
225
+
226
+ def add_system_message(history, text):
227
+ container_html = f"""
228
+ <div class="system-message-container">
229
+ <div class="system-message-line"></div>
230
+ <p class="system-message-text">{text}</p>
231
+ <div class="system-message-line"></div>
232
+ </div>
233
+ """
234
+ history.append((None, container_html))
235
+ time.sleep(0.5)
236
+ return history
237
+
238
+ def run_full_debate_flow():
239
+ yield {start_btn: gr.update(value="πŸ”„ Initializing...", interactive=False), chatbot: []}
240
+
241
+ history = []
242
+ yield { chatbot: history, text_to_speak: "", agent_to_speak: "" }
243
+
244
+ try:
245
+ history = add_system_message(history, "SYSTEM: Receiving transmission... Booting up debate protocol.")
246
+ yield {chatbot: history}
247
 
248
+ history = add_system_message(history, "SYSTEM: Contacting orbital AI to generate a fresh topic...")
249
+ yield {chatbot: history}
250
+ topic = gemini_service.generate_topic()
251
+
252
+ history = add_system_message(history, "SYSTEM: Formulating a provocative starting question...")
253
+ yield {chatbot: history}
254
+ question = gemini_service.generate_question(topic)
255
+
256
+ topic_and_question_text = f"TOPIC: {topic}\\n\\nQUESTION: {question}"
257
+ history = add_system_message(history, topic_and_question_text)
258
+ yield {chatbot: history, topic_state: topic, question_state: question}
259
+
260
+ history = add_system_message(history, "SYSTEM: Initializing agents... Let the debate begin!")
261
+ yield {chatbot: history, start_btn: gr.update(value="βš”οΈ DEBATE IN PROGRESS...")}
262
+
263
+ optimist_session = gemini_service.create_agent_session(Agent.OPTIMIST)
264
+ skeptic_session = gemini_service.create_agent_session(Agent.SKEPTIC)
265
+
266
+ last_response = question
267
+ debate_turns = 3
268
+ image_turn = random.randint(2, debate_turns * 2 - 1)
269
+
270
+ turn_counter = 0
271
+ for _ in range(debate_turns):
272
+ turn_counter += 1
273
+ history.append(("", ""))
274
+ optimist_prompt = AGENT_PROMPTS[Agent.OPTIMIST]['prompt'](topic, last_response)
275
+ stream = optimist_session.send_message(optimist_prompt, stream=True)
276
+ full_response = ""
277
+ for chunk in stream:
278
+ full_response += chunk.text
279
+ history[-1] = (full_response + "β–Œ", None)
280
+ yield {chatbot: history}
281
+ history[-1] = (full_response, None)
282
+ last_response = full_response
283
+ yield {chatbot: history, text_to_speak: full_response, agent_to_speak: Agent.OPTIMIST}
284
+
285
+ if turn_counter == image_turn:
286
+ history = add_system_message(history, "SYSTEM: Visual data generation is not supported in this Python version.\\n(Imagen 3 is not yet available in the Python SDK).")
287
+ yield {chatbot: history}
288
+
289
+ turn_counter += 1
290
+ history.append(("", ""))
291
+ skeptic_prompt = AGENT_PROMPTS[Agent.SKEPTIC]['prompt'](topic, last_response)
292
+ stream = skeptic_session.send_message(skeptic_prompt, stream=True)
293
+ full_response = ""
294
+ for chunk in stream:
295
+ full_response += chunk.text
296
+ history[-1] = (None, full_response + "β–Œ")
297
+ yield {chatbot: history}
298
+ history[-1] = (None, full_response)
299
+ last_response = full_response
300
+ yield {chatbot: history, text_to_speak: full_response, agent_to_speak: Agent.SKEPTIC}
301
+
302
+ if turn_counter == image_turn:
303
+ history = add_system_message(history, "SYSTEM: Visual data generation is not supported in this Python version.\\n(Imagen 3 is not yet available in the Python SDK).")
304
+ yield {chatbot: history}
305
+
306
+ except Exception as e:
307
+ print(f"An error occurred: {e}")
308
+ error_message = f"❌ FATAL ERROR: {e}. Protocol terminated."
309
+ history = add_system_message(history, error_message)
310
+ yield {chatbot: history, start_btn: gr.update(value="β–· RE-INITIATE DEBATE", interactive=True)}
311
+ return
312
+
313
+ history = add_system_message(history, "--- DEBATE CONCLUDED ---")
314
+ yield {chatbot: history, start_btn: gr.update(value="β–· START NEW DEBATE", interactive=True)}
315
+
316
+ start_btn.click(
317
+ fn=run_full_debate_flow,
318
+ outputs=[chatbot, start_btn, topic_state, question_state, text_to_speak, agent_to_speak]
319
+ )
320
+
321
+ agent_to_speak.change(
322
+ fn=None,
323
+ js=tts_js,
324
+ inputs=[chatbot, text_to_speak, agent_to_speak],
325
+ outputs=[chatbot],
326
+ api_name=False
327
+ )
328
+
329
+ demo.load(fn=None, js="() => { window.initSpeech(); }")
330
 
331
+ return demo
 
 
 
 
332
 
333
+ # --- 5. MAIN EXECUTION BLOCK ---
334
  if __name__ == "__main__":
335
+ app_interface = create_interface()
336
+ app_interface.queue().launch(debug=True, server_name="0.0.0.0", server_port=7860)