admin08077 commited on
Commit
413fce0
·
verified ·
1 Parent(s): 2034a2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +186 -173
app.py CHANGED
@@ -1,208 +1,221 @@
1
  import os
2
- import time
3
- import re
4
  import gradio as gr
5
- from google import genai
6
 
7
- # Load API key
 
8
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
9
  if not GEMINI_API_KEY:
10
- raise ValueError("❌ GEMINI_API_KEY not found. Please set it in Hugging Face Space Secrets.")
11
 
12
- # Initialize Gemini client
13
  client = genai.Client()
14
 
15
- # Avatars
 
16
  AVATAR_IMAGES = {
17
  "Agent A (Optimist)": "https://i.imgur.com/2GMV9y7.png",
18
  "Agent B (Skeptic)": "https://i.imgur.com/KzX3T21.png",
19
- "SYSTEM": "https://i.imgur.com/z3uxLzT.png"
20
  }
21
 
22
- # Custom CSS
23
  cyberpunk_css = """
24
  @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
25
  :root {
26
- --neon-pink: #ff00ff; --neon-cyan: #00ffff; --neon-green: #39ff14;
27
- --bg-color-1: #1a001a; --bg-color-2: #0d001a;
28
- --border-glow: 0 0 5px var(--neon-cyan), 0 0 10px var(--neon-cyan), 0 0 20px var(--neon-pink), 0 0 30px var(--neon-pink);
 
29
  }
30
  .gradio-container { background: linear-gradient(45deg, var(--bg-color-1), var(--bg-color-2)); font-family: 'Orbitron', sans-serif; }
31
- h1, h2, .gr-label { color: var(--neon-cyan); text-shadow: 0 0 5px var(--neon-cyan), 0 0 10px var(--neon-cyan); text-align: center; }
32
- .gr-button { background: transparent; color: var(--neon-green); border: 2px solid var(--neon-green); border-radius: 5px; box-shadow: 0 0 5px var(--neon-green), 0 0 10px var(--neon-green) inset; transition: all 0.3s ease; }
33
  .gr-button:hover { background: var(--neon-green); color: var(--bg-color-2); box-shadow: 0 0 10px var(--neon-green), 0 0 20px var(--neon-green), 0 0 30px var(--neon-green); }
 
34
  .chatbot { background-color: rgba(13, 0, 26, 0.8); border: 1px solid var(--neon-pink); box-shadow: var(--border-glow); }
35
- .gr-dropdown, .gr-radio { border: 1px solid var(--neon-cyan) !important; background-color: var(--bg-color-2) !important; color: var(--neon-cyan) !important; }
36
  footer { display: none !important; }
37
  """
38
 
39
- # Utils
40
- def parse_list(text_response):
41
- lines = text_response.strip().split('\n')
42
- return [re.sub(r'^\d+[.)]\s*', '', line).strip() for line in lines if line.strip()]
43
 
44
- # Topic Generator
45
- def generate_topics():
46
- prompt = "Generate a list of exactly 20 specific, debatable topics related to the future of coding, banking, and artificial intelligence. Format as a numbered list."
 
47
  try:
48
- chat = client.chats.create(model="gemini-2.5-flash")
49
- response = chat.send_message(prompt)
50
- topics = parse_list(response.text)
51
- return {
52
- topics_state: topics,
53
- topic_dropdown: gr.Dropdown(choices=topics, value=None, label="Step 2: Select a Topic", visible=True),
54
- generate_topics_btn: gr.Button(visible=False),
55
- }
56
  except Exception as e:
57
- gr.Warning(f"Error generating topics: {e}")
58
- return {
59
- topics_state: [],
60
- topic_dropdown: gr.Dropdown(visible=False),
61
- generate_topics_btn: gr.Button(visible=True)
62
- }
63
-
64
- # Question Generator
65
- def generate_questions(selected_topic):
66
- if not selected_topic:
67
- return {question_dropdown: gr.Dropdown(visible=False)}
68
- prompt = f"For the debate topic '{selected_topic}', generate a list of exactly 20 provocative questions to start the debate. Format as a numbered list."
 
 
 
 
 
 
 
 
 
 
 
69
  try:
70
- chat = client.chats.create(model="gemini-2.5-flash")
71
- response = chat.send_message(prompt)
72
- questions = parse_list(response.text)
73
- return {
74
- questions_state: questions,
75
- question_dropdown: gr.Dropdown(choices=questions, value=None, label="Step 3: Select a Starting Question", visible=True)
76
- }
 
 
77
  except Exception as e:
78
- gr.Warning(f"Error generating questions: {e}")
79
- return {
80
- questions_state: [],
81
- question_dropdown: gr.Dropdown(visible=False)
82
- }
83
-
84
- # Debate Enabler
85
- def enable_debate_button(selected_question):
86
- return {start_debate_btn: gr.Button(visible=bool(selected_question))}
87
-
88
- # Debate Runner
89
- def run_debate(topic, question):
90
- if not topic or not question:
91
- gr.Warning("Select a topic and question first.")
92
- return [], []
93
-
94
- agent_a = client.chats.create(model="gemini-2.5-flash")
95
- agent_b = client.chats.create(model="gemini-2.5-flash")
96
-
97
- agent_a.send_message("You are Agent A, a tech optimist and futurist. Defend the topic positively.")
98
- agent_b.send_message("You are Agent B, a skeptic and ethicist. Argue against the topic cautiously.")
99
-
100
- history = [{
101
- "role": "user", "name": "SYSTEM",
102
- "content": f"**DEBATE TOPIC:** {topic}\n\n**STARTING QUESTION:** {question}",
103
- "avatar_url": AVATAR_IMAGES["SYSTEM"]
104
- }]
105
- yield history, history
106
-
107
- last_message = question
108
-
109
- for i in range(5): # 5 rounds = 10 exchanges
110
- try:
111
- time.sleep(0.7)
112
- res_a = agent_a.send_message(f"Based on: '{last_message}', make an optimistic argument.")
113
- history.append({
114
- "role": "assistant", "name": "Agent A (Optimist)",
115
- "content": res_a.text,
116
- "avatar_url": AVATAR_IMAGES["Agent A (Optimist)"]
117
- })
118
- last_message = res_a.text
119
- yield history, history
120
-
121
- time.sleep(0.7)
122
- res_b = agent_b.send_message(f"Respond to Agent A: '{last_message}' with a skeptical argument.")
123
- history.append({
124
- "role": "assistant", "name": "Agent B (Skeptic)",
125
- "content": res_b.text,
126
- "avatar_url": AVATAR_IMAGES["Agent B (Skeptic)"]
127
- })
128
- last_message = res_b.text
129
- yield history, history
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  except Exception as e:
132
- history.append({
133
- "role": "user", "name": "SYSTEM",
134
- "content": f"⚠️ An error occurred: {e}",
135
- "avatar_url": AVATAR_IMAGES["SYSTEM"]
136
- })
137
- yield history, history
138
- break
139
-
140
- history.append({
141
- "role": "user", "name": "SYSTEM",
142
- "content": "--- DEBATE CONCLUDED ---",
143
- "avatar_url": AVATAR_IMAGES["SYSTEM"]
144
- })
145
- yield history, history, gr.Row(visible=True)
146
-
147
- # Reset
148
- def reset_all():
149
- return {
150
- chatbox: [], debate_log_state: [], topics_state: [], questions_state: [],
151
- generate_topics_btn: gr.Button(visible=True),
152
- topic_dropdown: gr.Dropdown(value=None, choices=[], visible=False),
153
- question_dropdown: gr.Dropdown(value=None, choices=[], visible=False),
154
- start_debate_btn: gr.Button(visible=False),
155
- save_row: gr.Row(visible=False),
156
- download_file: None,
 
 
157
  }
158
 
159
- # UI
160
- with gr.Blocks(css=cyberpunk_css, title="AI DEBATE SIMULATOR") as demo:
161
- debate_log_state = gr.State([])
162
- topics_state = gr.State([])
163
- questions_state = gr.State([])
164
-
165
- gr.Markdown("# AI DEBATE SIMULATOR")
166
- gr.Markdown("## A War of Wits Between Two AI Agents")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  with gr.Row():
169
- with gr.Column(scale=1):
170
- generate_topics_btn = gr.Button("Step 1: Generate 20 Debate Topics", variant="primary")
171
- topic_dropdown = gr.Dropdown(label="Step 2: Select a Topic", visible=False)
172
- question_dropdown = gr.Dropdown(label="Step 3: Select a Starting Question", visible=False)
173
- start_debate_btn = gr.Button("Step 4: START THE DEBATE", visible=False)
174
- with gr.Row(visible=False) as save_row:
175
- save_row_button = gr.Button("Download Log")
176
- download_file = gr.File(label="Download File")
177
- reset_btn = gr.Button("Start New Debate", variant="stop")
178
- with gr.Column(scale=3):
179
- chatbox = gr.Chatbot(label="Debate Arena", height=700, type="messages")
180
-
181
- # Button logic
182
- generate_topics_btn.click(fn=generate_topics, inputs=None,
183
- outputs=[topics_state, topic_dropdown, generate_topics_btn])
184
- topic_dropdown.change(fn=generate_questions, inputs=topic_dropdown,
185
- outputs=[questions_state, question_dropdown])
186
- question_dropdown.change(fn=enable_debate_button, inputs=question_dropdown,
187
- outputs=[start_debate_btn])
188
- start_debate_btn.click(
189
- fn=lambda: {
190
- generate_topics_btn: gr.Button(visible=False),
191
- topic_dropdown: gr.Dropdown(interactive=False),
192
- question_dropdown: gr.Dropdown(interactive=False),
193
- start_debate_btn: gr.Button(visible=False),
194
- },
195
- inputs=None,
196
- outputs=[generate_topics_btn, topic_dropdown, question_dropdown, start_debate_btn]
197
- ).then(fn=run_debate, inputs=[topic_dropdown, question_dropdown],
198
- outputs=[chatbox, debate_log_state, save_row])
199
-
200
- reset_btn.click(fn=reset_all, inputs=None,
201
- outputs=[chatbox, debate_log_state, topics_state,
202
- questions_state, generate_topics_btn,
203
- topic_dropdown, question_dropdown,
204
- start_debate_btn, save_row, download_file])
205
-
206
- # Launch app
207
  if __name__ == "__main__":
208
- demo.launch(debug=True, ssr_mode=False)
 
1
  import os
 
 
2
  import gradio as gr
3
+ from google import generativeai as genai
4
 
5
+ # --- Load Gemini API key securely ---
6
+ # Make sure to set the GEMINI_API_KEY in your Hugging Face Space Secrets
7
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
8
  if not GEMINI_API_KEY:
9
+ raise ValueError("❌ GEMINI_API_KEY not found. Please set it in your Hugging Face Space Secrets.")
10
 
 
11
  client = genai.Client()
12
 
13
+ # --- Constants ---
14
+ MODEL_NAME = "gemini-1.5-flash-latest"
15
  AVATAR_IMAGES = {
16
  "Agent A (Optimist)": "https://i.imgur.com/2GMV9y7.png",
17
  "Agent B (Skeptic)": "https://i.imgur.com/KzX3T21.png",
18
+ "SYSTEM": "https://i.imgur.com/z3uxLzT.png",
19
  }
20
 
21
+ # --- UI CSS ---
22
  cyberpunk_css = """
23
  @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
24
  :root {
25
+ --neon-pink: #ff00ff; --neon-cyan: #00ffff; --neon-green: #39ff14;
26
+ --bg-color-1: #1a001a; --bg-color-2: #0d001a; --bg-color-3: #2a003a;
27
+ --border-glow: 0 0 5px var(--neon-cyan), 0 0 10px var(--neon-cyan), 0 0 20px var(--neon-pink), 0 0 30px var(--neon-pink);
28
+ --text-shadow: 0 0 3px var(--neon-cyan), 0 0 5px var(--neon-cyan);
29
  }
30
  .gradio-container { background: linear-gradient(45deg, var(--bg-color-1), var(--bg-color-2)); font-family: 'Orbitron', sans-serif; }
31
+ h1, .gr-label { color: var(--neon-cyan); text-shadow: var(--text-shadow); text-align: center; }
32
+ .gr-button { background: transparent; color: var(--neon-green); border: 2px solid var(--neon-green) !important; border-radius: 5px; box-shadow: 0 0 5px var(--neon-green), 0 0 10px var(--neon-green) inset; transition: all 0.3s ease; font-family: 'Orbitron', sans-serif; }
33
  .gr-button:hover { background: var(--neon-green); color: var(--bg-color-2); box-shadow: 0 0 10px var(--neon-green), 0 0 20px var(--neon-green), 0 0 30px var(--neon-green); }
34
+ .gr-button:disabled { background: #333 !important; color: #555 !important; border-color: #555 !important; box-shadow: none !important; cursor: not-allowed; }
35
  .chatbot { background-color: rgba(13, 0, 26, 0.8); border: 1px solid var(--neon-pink); box-shadow: var(--border-glow); }
36
+ .message-bubble { background: none !important; border: none !important; } /* Clean up message bubbles */
37
  footer { display: none !important; }
38
  """
39
 
40
+ # --- Core Logic Functions ---
 
 
 
41
 
42
+ def stream_content_to_chat(model, prompt, history_list, role_name, avatar_url):
43
+ """Helper to stream a single LLM response into the chat history."""
44
+ history_list.append({"role": "assistant", "name": role_name, "content": "", "avatar_url": avatar_url})
45
+ full_response = ""
46
  try:
47
+ stream = model.generate_content(prompt, stream=True)
48
+ for chunk in stream:
49
+ full_response += chunk.text
50
+ history_list[-1]["content"] = full_response + "▌"
51
+ yield history_list
52
+ history_list[-1]["content"] = full_response.strip()
53
+ yield history_list
 
54
  except Exception as e:
55
+ error_msg = f"⚠️ An error occurred: {e}"
56
+ history_list[-1]["content"] = error_msg
57
+ yield history_list
58
+ return full_response.strip()
59
+
60
+ def run_full_debate_flow(chat_history):
61
+ """
62
+ This single, powerful function handles the entire user flow:
63
+ 1. Clears the chat.
64
+ 2. Disables the button.
65
+ 3. Streams the generation of a topic.
66
+ 4. Streams the generation of a question.
67
+ 5. Streams the full debate between two agents.
68
+ 6. Re-enables the button at the end.
69
+ """
70
+ # 1. Clear chat and disable button for a fresh start
71
+ chat_history.clear()
72
+ yield {
73
+ start_button: gr.update(interactive=False, value="🔄 Initializing..."),
74
+ chatbot: chat_history
75
+ }
76
+
77
+ # --- STEP 1: GENERATE TOPIC ---
78
  try:
79
+ gen_model = genai.GenerativeModel(MODEL_NAME)
80
+ topic_prompt = "Generate a single, specific, and highly debatable topic about the future of technology, AI, or finance. Output only the topic string, nothing else."
81
+ topic = ""
82
+ for updated_history in stream_content_to_chat(gen_model, topic_prompt, chat_history, "SYSTEM", AVATAR_IMAGES["SYSTEM"]):
83
+ topic = updated_history[-1]['content'].replace('▌', '').strip()
84
+ yield { chatbot: updated_history }
85
+
86
+ if "⚠️" in topic: raise Exception("Topic generation failed.")
87
+
88
  except Exception as e:
89
+ gr.Error(f"Could not generate a debate topic. Please check your API key and network. Details: {e}")
90
+ yield { start_button: gr.update(interactive=True, value="▷ GENERATE & START NEW DEBATE") }
91
+ return
92
+
93
+ # --- STEP 2: GENERATE QUESTION ---
94
+ try:
95
+ question_prompt = f"For the debate topic '{topic}', generate one provocative, open-ended starting question. Output only the question string, nothing else."
96
+ question = ""
97
+ for updated_history in stream_content_to_chat(gen_model, question_prompt, chat_history, "SYSTEM", AVATAR_IMAGES["SYSTEM"]):
98
+ question = updated_history[-1]['content'].replace('▌', '').strip()
99
+ yield { chatbot: updated_history }
100
+
101
+ if "⚠️" in question: raise Exception("Question generation failed.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
+ except Exception as e:
104
+ gr.Error(f"Could not generate a starting question. Please check your API key and network. Details: {e}")
105
+ yield { start_button: gr.update(interactive=True, value="▷ GENERATE & START NEW DEBATE") }
106
+ return
107
+
108
+ # --- STEP 3: RUN THE DEBATE ---
109
+ yield { start_button: gr.update(value="⚔️ DEBATE IN PROGRESS...") }
110
+
111
+ # Initialize agents with their personalities
112
+ agent_a_chat = gen_model.start_chat(history=[
113
+ {'role': 'user', 'parts': ["You are Agent A, a tech optimist and futurist. Argue passionately for the positive aspects."]},
114
+ {'role': 'model', 'parts': ["I am Agent A, ready to envision a brighter future."]}
115
+ ])
116
+ agent_b_chat = gen_model.start_chat(history=[
117
+ {'role': 'user', 'parts': ["You are Agent B, a cautious skeptic and ethicist. Argue the cautionary side, raising ethical concerns."]},
118
+ {'role': 'model', 'parts': ["I am Agent B, here to ensure we consider the risks."]}
119
+ ])
120
+
121
+ last_response = question
122
+ num_turns = 5
123
+
124
+ for i in range(num_turns):
125
+ # Agent A's Turn (Optimist)
126
+ chat_history.append({"role": "assistant", "name": "Agent A (Optimist)", "content": "", "avatar_url": AVATAR_IMAGES["Agent A (Optimist)"]})
127
+ try:
128
+ stream_a = agent_a_chat.send_message(f"Debate Topic: '{topic}'. The previous statement was: '{last_response}'. Provide your optimistic argument.", stream=True)
129
+ full_response_a = ""
130
+ for chunk in stream_a:
131
+ full_response_a += chunk.text
132
+ chat_history[-1]["content"] = full_response_a + "▌"
133
+ yield { chatbot: chat_history }
134
+ chat_history[-1]["content"] = full_response_a
135
+ last_response = full_response_a
136
+ yield { chatbot: chat_history }
137
  except Exception as e:
138
+ chat_history[-1]['content'] = f"⚠️ Agent A Error: {e}"
139
+ yield { chatbot: chat_history, start_button: gr.update(interactive=True, value="▷ GENERATE & START NEW DEBATE") }
140
+ return
141
+
142
+ # Agent B's Turn (Skeptic)
143
+ chat_history.append({"role": "assistant", "name": "Agent B (Skeptic)", "content": "", "avatar_url": AVATAR_IMAGES["Agent B (Skeptic)"]})
144
+ try:
145
+ stream_b = agent_b_chat.send_message(f"Debate Topic: '{topic}'. Agent A said: '{last_response}'. Provide your skeptical counter-argument.", stream=True)
146
+ full_response_b = ""
147
+ for chunk in stream_b:
148
+ full_response_b += chunk.text
149
+ chat_history[-1]["content"] = full_response_b + ""
150
+ yield { chatbot: chat_history }
151
+ chat_history[-1]["content"] = full_response_b
152
+ last_response = full_response_b
153
+ yield { chatbot: chat_history }
154
+ except Exception as e:
155
+ chat_history[-1]['content'] = f"⚠️ Agent B Error: {e}"
156
+ yield { chatbot: chat_history, start_button: gr.update(interactive=True, value="▷ GENERATE & START NEW DEBATE") }
157
+ return
158
+
159
+ # --- DEBATE CONCLUDED ---
160
+ chat_history.append({"role": "user", "name": "SYSTEM", "content": "--- DEBATE CONCLUDED ---", "avatar_url": AVATAR_IMAGES["SYSTEM"]})
161
+ yield {
162
+ chatbot: chat_history,
163
+ start_button: gr.update(interactive=True, value="▷ GENERATE & START NEW DEBATE"),
164
+ download_button: gr.update(visible=True)
165
  }
166
 
167
+ def create_transcript(chat_history):
168
+ """Formats the chat history into a downloadable text file."""
169
+ if not chat_history:
170
+ return None
171
+ transcript = "AI DEBATE SIMULATOR TRANSCRIPT\n=================================\n\n"
172
+ for message in chat_history:
173
+ content = message.get("content", "").replace("▌", "")
174
+ transcript += f"[{message.get('name', 'SYSTEM')}]\n{content}\n\n---------------------------------\n\n"
175
+
176
+ filepath = "debate_transcript.txt"
177
+ with open(filepath, "w", encoding="utf-8") as f:
178
+ f.write(transcript)
179
+ return filepath
180
+
181
+ # --- Gradio UI Layout ---
182
+ with gr.Blocks(css=cyberpunk_css, theme=gr.themes.Base(), title="AI Debate Simulator v4.0") as demo:
183
+ # State to hold the chat history list. This is the single source of truth.
184
+ chat_history_state = gr.State([])
185
+
186
+ gr.Markdown("# 👾 AI DEBATE SIMULATOR v4.0 👾")
187
+ gr.Markdown("### A Fully Automated War of Wits, Streamed in Real-Time")
188
+
189
+ chatbot = gr.Chatbot(
190
+ label="Debate Arena",
191
+ height=700,
192
+ avatar_images=list(AVATAR_IMAGES.values()),
193
+ show_copy_button=True
194
+ )
195
 
196
  with gr.Row():
197
+ start_button = gr.Button("▷ GENERATE & START NEW DEBATE", variant="primary", scale=3)
198
+ download_button = gr.Button("💾 Download Transcript", visible=False, scale=1)
199
+
200
+ download_file_output = gr.File(label="Download", visible=False)
201
+
202
+ # --- Event Listeners ---
203
+ start_button.click(
204
+ fn=run_full_debate_flow,
205
+ inputs=[chat_history_state],
206
+ outputs=[chatbot, start_button, download_button]
207
+ )
208
+
209
+ download_button.click(
210
+ fn=create_transcript,
211
+ inputs=[chat_history_state],
212
+ outputs=[download_file_output]
213
+ ).then(
214
+ fn=lambda: gr.update(visible=True), # Show the file component
215
+ outputs=[download_file_output]
216
+ )
217
+
218
+
219
+ # --- Launch the App ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  if __name__ == "__main__":
221
+ demo.queue().launch(debug=True)