admin08077 commited on
Commit
cf4ab34
Β·
verified Β·
1 Parent(s): e4dd138

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -107
app.py CHANGED
@@ -1,9 +1,9 @@
1
  import os
2
  import gradio as gr
3
  from google import genai
 
4
 
5
  # --- Load Gemini API key securely ---
6
- # Make sure to set the GEMINI_API_KEY in your Hugging Face Space Secrets
7
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
8
  if not GEMINI_API_KEY:
9
  raise ValueError("❌ GEMINI_API_KEY not found. Please set it in your Hugging Face Space Secrets.")
@@ -34,13 +34,11 @@ h1, .gr-label { color: var(--neon-cyan); text-shadow: var(--text-shadow); text-a
34
  .gr-button:disabled { background: #333 !important; color: #555 !important; border-color: #555 !important; box-shadow: none !important; cursor: not-allowed; }
35
  .chatbot { background-color: rgba(13, 0, 26, 0.8); border: 1px solid var(--neon-pink); box-shadow: var(--border-glow); }
36
  .message-bubble { background: none !important; border: none !important; } /* Clean up message bubbles */
37
- footer { display: none !important; }
38
  """
39
 
40
- # --- Core Logic Functions ---
41
-
42
- def stream_content_to_chat(model, prompt, history_list, role_name, avatar_url):
43
- """Helper to stream a single LLM response into the chat history."""
44
  history_list.append({"role": "assistant", "name": role_name, "content": "", "avatar_url": avatar_url})
45
  full_response = ""
46
  try:
@@ -50,143 +48,107 @@ def stream_content_to_chat(model, prompt, history_list, role_name, avatar_url):
50
  history_list[-1]["content"] = full_response + "β–Œ"
51
  yield history_list
52
  history_list[-1]["content"] = full_response.strip()
53
- yield history_list
54
  except Exception as e:
55
- error_msg = f"⚠️ An error occurred: {e}"
56
- history_list[-1]["content"] = error_msg
57
- yield history_list
58
  return full_response.strip()
59
 
60
  def run_full_debate_flow(chat_history):
61
- """
62
- This single, powerful function handles the entire user flow:
63
- 1. Clears the chat.
64
- 2. Disables the button.
65
- 3. Streams the generation of a topic.
66
- 4. Streams the generation of a question.
67
- 5. Streams the full debate between two agents.
68
- 6. Re-enables the button at the end.
69
- """
70
  # 1. Clear chat and disable button for a fresh start
71
  chat_history.clear()
72
  yield {
73
  start_button: gr.update(interactive=False, value="πŸ”„ Initializing..."),
74
- chatbot: chat_history
 
 
75
  }
 
 
76
 
77
- # --- STEP 1: GENERATE TOPIC ---
78
  try:
79
  gen_model = genai.GenerativeModel(MODEL_NAME)
 
 
 
80
  topic_prompt = "Generate a single, specific, and highly debatable topic about the future of technology, AI, or finance. Output only the topic string, nothing else."
81
  topic = ""
82
- for updated_history in stream_content_to_chat(gen_model, topic_prompt, chat_history, "SYSTEM", AVATAR_IMAGES["SYSTEM"]):
83
- topic = updated_history[-1]['content'].replace('β–Œ', '').strip()
84
  yield { chatbot: updated_history }
85
-
86
- if "⚠️" in topic: raise Exception("Topic generation failed.")
87
 
88
- except Exception as e:
89
- gr.Error(f"Could not generate a debate topic. Please check your API key and network. Details: {e}")
90
- yield { start_button: gr.update(interactive=True, value="β–· GENERATE & START NEW DEBATE") }
91
- return
92
-
93
- # --- STEP 2: GENERATE QUESTION ---
94
- try:
95
  question_prompt = f"For the debate topic '{topic}', generate one provocative, open-ended starting question. Output only the question string, nothing else."
96
  question = ""
97
- for updated_history in stream_content_to_chat(gen_model, question_prompt, chat_history, "SYSTEM", AVATAR_IMAGES["SYSTEM"]):
98
- question = updated_history[-1]['content'].replace('β–Œ', '').strip()
99
  yield { chatbot: updated_history }
100
-
101
- if "⚠️" in question: raise Exception("Question generation failed.")
102
 
103
  except Exception as e:
104
- gr.Error(f"Could not generate a starting question. Please check your API key and network. Details: {e}")
105
- yield { start_button: gr.update(interactive=True, value="β–· GENERATE & START NEW DEBATE") }
106
  return
107
 
108
- # --- STEP 3: RUN THE DEBATE ---
 
109
  yield { start_button: gr.update(value="βš”οΈ DEBATE IN PROGRESS...") }
110
 
111
- # Initialize agents with their personalities
112
- agent_a_chat = gen_model.start_chat(history=[
113
- {'role': 'user', 'parts': ["You are Agent A, a tech optimist and futurist. Argue passionately for the positive aspects."]},
114
- {'role': 'model', 'parts': ["I am Agent A, ready to envision a brighter future."]}
115
- ])
116
- agent_b_chat = gen_model.start_chat(history=[
117
- {'role': 'user', 'parts': ["You are Agent B, a cautious skeptic and ethicist. Argue the cautionary side, raising ethical concerns."]},
118
- {'role': 'model', 'parts': ["I am Agent B, here to ensure we consider the risks."]}
119
- ])
120
 
121
  last_response = question
122
- num_turns = 5
123
-
124
- for i in range(num_turns):
125
- # Agent A's Turn (Optimist)
126
- chat_history.append({"role": "assistant", "name": "Agent A (Optimist)", "content": "", "avatar_url": AVATAR_IMAGES["Agent A (Optimist)"]})
127
- try:
128
- stream_a = agent_a_chat.send_message(f"Debate Topic: '{topic}'. The previous statement was: '{last_response}'. Provide your optimistic argument.", stream=True)
129
- full_response_a = ""
130
- for chunk in stream_a:
131
- full_response_a += chunk.text
132
- chat_history[-1]["content"] = full_response_a + "β–Œ"
133
- yield { chatbot: chat_history }
134
- chat_history[-1]["content"] = full_response_a
135
- last_response = full_response_a
136
- yield { chatbot: chat_history }
137
- except Exception as e:
138
- chat_history[-1]['content'] = f"⚠️ Agent A Error: {e}"
139
- yield { chatbot: chat_history, start_button: gr.update(interactive=True, value="β–· GENERATE & START NEW DEBATE") }
140
- return
141
 
142
- # Agent B's Turn (Skeptic)
143
- chat_history.append({"role": "assistant", "name": "Agent B (Skeptic)", "content": "", "avatar_url": AVATAR_IMAGES["Agent B (Skeptic)"]})
144
- try:
145
- stream_b = agent_b_chat.send_message(f"Debate Topic: '{topic}'. Agent A said: '{last_response}'. Provide your skeptical counter-argument.", stream=True)
146
- full_response_b = ""
147
- for chunk in stream_b:
148
- full_response_b += chunk.text
149
- chat_history[-1]["content"] = full_response_b + "β–Œ"
150
- yield { chatbot: chat_history }
151
- chat_history[-1]["content"] = full_response_b
152
- last_response = full_response_b
153
- yield { chatbot: chat_history }
154
- except Exception as e:
155
- chat_history[-1]['content'] = f"⚠️ Agent B Error: {e}"
156
- yield { chatbot: chat_history, start_button: gr.update(interactive=True, value="β–· GENERATE & START NEW DEBATE") }
157
- return
158
-
159
- # --- DEBATE CONCLUDED ---
160
- chat_history.append({"role": "user", "name": "SYSTEM", "content": "--- DEBATE CONCLUDED ---", "avatar_url": AVATAR_IMAGES["SYSTEM"]})
161
  yield {
162
  chatbot: chat_history,
163
- start_button: gr.update(interactive=True, value="β–· GENERATE & START NEW DEBATE"),
164
  download_button: gr.update(visible=True)
165
  }
166
 
167
  def create_transcript(chat_history):
168
- """Formats the chat history into a downloadable text file."""
169
- if not chat_history:
170
- return None
171
  transcript = "AI DEBATE SIMULATOR TRANSCRIPT\n=================================\n\n"
172
  for message in chat_history:
173
  content = message.get("content", "").replace("β–Œ", "")
174
  transcript += f"[{message.get('name', 'SYSTEM')}]\n{content}\n\n---------------------------------\n\n"
175
-
176
  filepath = "debate_transcript.txt"
177
- with open(filepath, "w", encoding="utf-8") as f:
178
- f.write(transcript)
179
- return filepath
180
 
181
  # --- Gradio UI Layout ---
182
- with gr.Blocks(css=cyberpunk_css, theme=gr.themes.Base(), title="AI Debate Simulator v4.0") as demo:
183
- # State to hold the chat history list. This is the single source of truth.
184
  chat_history_state = gr.State([])
185
 
186
- gr.Markdown("# πŸ‘Ύ AI DEBATE SIMULATOR v4.0 πŸ‘Ύ")
187
- gr.Markdown("### A Fully Automated War of Wits, Streamed in Real-Time")
188
 
189
  chatbot = gr.Chatbot(
 
190
  label="Debate Arena",
191
  height=700,
192
  avatar_images=list(AVATAR_IMAGES.values()),
@@ -194,28 +156,22 @@ with gr.Blocks(css=cyberpunk_css, theme=gr.themes.Base(), title="AI Debate Simul
194
  )
195
 
196
  with gr.Row():
197
- start_button = gr.Button("β–· GENERATE & START NEW DEBATE", variant="primary", scale=3)
198
  download_button = gr.Button("πŸ’Ύ Download Transcript", visible=False, scale=1)
199
 
200
  download_file_output = gr.File(label="Download", visible=False)
201
 
202
- # --- Event Listeners ---
203
  start_button.click(
204
  fn=run_full_debate_flow,
205
  inputs=[chat_history_state],
206
- outputs=[chatbot, start_button, download_button]
207
  )
208
 
209
  download_button.click(
210
  fn=create_transcript,
211
  inputs=[chat_history_state],
212
- outputs=[download_file_output]
213
- ).then(
214
- fn=lambda: gr.update(visible=True), # Show the file component
215
- outputs=[download_file_output]
216
  )
217
 
218
-
219
- # --- Launch the App ---
220
  if __name__ == "__main__":
221
  demo.queue().launch(debug=True)
 
1
  import os
2
  import gradio as gr
3
  from google import genai
4
+ import time
5
 
6
  # --- Load Gemini API key securely ---
 
7
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
8
  if not GEMINI_API_KEY:
9
  raise ValueError("❌ GEMINI_API_KEY not found. Please set it in your Hugging Face Space Secrets.")
 
34
  .gr-button:disabled { background: #333 !important; color: #555 !important; border-color: #555 !important; box-shadow: none !important; cursor: not-allowed; }
35
  .chatbot { background-color: rgba(13, 0, 26, 0.8); border: 1px solid var(--neon-pink); box-shadow: var(--border-glow); }
36
  .message-bubble { background: none !important; border: none !important; } /* Clean up message bubbles */
37
+ footer { display: none !import
38
  """
39
 
40
+ def stream_llm_response(model, prompt, history_list, role_name, avatar_url):
41
+ """A dedicated generator to stream a response into the history and yield updates."""
 
 
42
  history_list.append({"role": "assistant", "name": role_name, "content": "", "avatar_url": avatar_url})
43
  full_response = ""
44
  try:
 
48
  history_list[-1]["content"] = full_response + "β–Œ"
49
  yield history_list
50
  history_list[-1]["content"] = full_response.strip()
 
51
  except Exception as e:
52
+ history_list[-1]["content"] = f"⚠️ An error occurred: {e}"
53
+
54
+ yield history_list # Yield the final, clean response
55
  return full_response.strip()
56
 
57
  def run_full_debate_flow(chat_history):
58
+ """Handles the entire automated debate, narrating each step to the screen."""
59
+
60
+ def add_system_message(text):
61
+ """Helper to append a system message and yield the UI update."""
62
+ chat_history.append({"role": "user", "name": "SYSTEM", "content": text, "avatar_url": AVATAR_IMAGES["SYSTEM"]})
63
+ time.sleep(0.5) # A brief, dramatic pause
64
+ return {chatbot: chat_history}
65
+
 
66
  # 1. Clear chat and disable button for a fresh start
67
  chat_history.clear()
68
  yield {
69
  start_button: gr.update(interactive=False, value="πŸ”„ Initializing..."),
70
+ chatbot: chat_history,
71
+ download_button: gr.update(visible=False), # Hide download button
72
+ download_file_output: gr.update(value=None, visible=False)
73
  }
74
+
75
+ yield add_system_message("Receiving transmission... Booting up debate protocol.")
76
 
 
77
  try:
78
  gen_model = genai.GenerativeModel(MODEL_NAME)
79
+
80
+ # --- NARRATE & GENERATE TOPIC ---
81
+ yield add_system_message("Contacting orbital AI to generate a fresh topic...")
82
  topic_prompt = "Generate a single, specific, and highly debatable topic about the future of technology, AI, or finance. Output only the topic string, nothing else."
83
  topic = ""
84
+ for updated_history in stream_llm_response(gen_model, topic_prompt, chat_history, "SYSTEM", AVATAR_IMAGES["SYSTEM"]):
85
+ topic = updated_history[-1]['content'].replace('β–Œ', '')
86
  yield { chatbot: updated_history }
87
+ if "⚠️" in topic or not topic: raise ValueError("Topic generation failed.")
 
88
 
89
+ # --- NARRATE & GENERATE QUESTION ---
90
+ yield add_system_message("Formulating a provocative starting question...")
 
 
 
 
 
91
  question_prompt = f"For the debate topic '{topic}', generate one provocative, open-ended starting question. Output only the question string, nothing else."
92
  question = ""
93
+ for updated_history in stream_llm_response(gen_model, question_prompt, chat_history, "SYSTEM", AVATAR_IMAGES["SYSTEM"]):
94
+ question = updated_history[-1]['content'].replace('β–Œ', '')
95
  yield { chatbot: updated_history }
96
+ if "⚠️" in question or not question: raise ValueError("Question generation failed.")
 
97
 
98
  except Exception as e:
99
+ yield add_system_message(f"❌ FATAL ERROR during setup: {e}. Protocol terminated. Please try again.")
100
+ yield { start_button: gr.update(interactive=True, value="β–· RE-INITIATE DEBATE PROTOCOL") }
101
  return
102
 
103
+ # --- NARRATE DEBATE START & RUN THE DEBATE ---
104
+ yield add_system_message("Initializing agents... Let the debate begin!")
105
  yield { start_button: gr.update(value="βš”οΈ DEBATE IN PROGRESS...") }
106
 
107
+ agent_a_chat = gen_model.start_chat(history=[{'role': 'user', 'parts': ["You are Agent A, a tech optimist. Argue positively."]}, {'role': 'model', 'parts': ["Understood."]}])
108
+ agent_b_chat = gen_model.start_chat(history=[{'role': 'user', 'parts': ["You are Agent B, a skeptic. Argue cautiously."]}, {'role': 'model', 'parts': ["Acknowledged."]}])
 
 
 
 
 
 
 
109
 
110
  last_response = question
111
+ for _ in range(5):
112
+ # Agent A
113
+ prompt_a = f"Debate Topic: '{topic}'. The previous statement was: '{last_response}'. Provide your optimistic argument."
114
+ for updated_history in stream_llm_response(agent_a_chat, prompt_a, chat_history, "Agent A (Optimist)", AVATAR_IMAGES["Agent A (Optimist)"]):
115
+ last_response = updated_history[-1]['content'].replace('β–Œ', '')
116
+ yield { chatbot: updated_history }
117
+
118
+ # Agent B
119
+ prompt_b = f"Debate Topic: '{topic}'. Agent A said: '{last_response}'. Provide your skeptical counter-argument."
120
+ for updated_history in stream_llm_response(agent_b_chat, prompt_b, chat_history, "Agent B (Skeptic)", AVATAR_IMAGES["Agent B (Skeptic)"]):
121
+ last_response = updated_history[-1]['content'].replace('β–Œ', '')
122
+ yield { chatbot: updated_history }
 
 
 
 
 
 
 
123
 
124
+ # --- CONCLUDE DEBATE ---
125
+ yield add_system_message("--- DEBATE CONCLUDED ---")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  yield {
127
  chatbot: chat_history,
128
+ start_button: gr.update(interactive=True, value="β–· START NEW DEBATE"),
129
  download_button: gr.update(visible=True)
130
  }
131
 
132
  def create_transcript(chat_history):
133
+ """Formats chat history into a downloadable text file."""
134
+ if not chat_history: return None
 
135
  transcript = "AI DEBATE SIMULATOR TRANSCRIPT\n=================================\n\n"
136
  for message in chat_history:
137
  content = message.get("content", "").replace("β–Œ", "")
138
  transcript += f"[{message.get('name', 'SYSTEM')}]\n{content}\n\n---------------------------------\n\n"
 
139
  filepath = "debate_transcript.txt"
140
+ with open(filepath, "w", encoding="utf-8") as f: f.write(transcript)
141
+ return gr.update(value=filepath, visible=True)
 
142
 
143
  # --- Gradio UI Layout ---
144
+ with gr.Blocks(css=cyberpunk_css, theme=gr.themes.Base(), title="AI Debate Simulator v4.2") as demo:
 
145
  chat_history_state = gr.State([])
146
 
147
+ gr.Markdown("# πŸ‘Ύ AI DEBATE SIMULATOR v4.2 πŸ‘Ύ")
148
+ gr.Markdown("### An Automated War of Wits, Narrated in Real-Time")
149
 
150
  chatbot = gr.Chatbot(
151
+ type="messages",
152
  label="Debate Arena",
153
  height=700,
154
  avatar_images=list(AVATAR_IMAGES.values()),
 
156
  )
157
 
158
  with gr.Row():
159
+ start_button = gr.Button("β–· START NEW DEBATE", variant="primary", scale=3)
160
  download_button = gr.Button("πŸ’Ύ Download Transcript", visible=False, scale=1)
161
 
162
  download_file_output = gr.File(label="Download", visible=False)
163
 
 
164
  start_button.click(
165
  fn=run_full_debate_flow,
166
  inputs=[chat_history_state],
167
+ outputs=[chatbot, start_button, download_button, download_file_output]
168
  )
169
 
170
  download_button.click(
171
  fn=create_transcript,
172
  inputs=[chat_history_state],
173
+ outputs=download_file_output
 
 
 
174
  )
175
 
 
 
176
  if __name__ == "__main__":
177
  demo.queue().launch(debug=True)