Spaces:
No application file
No application file
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
-
from google import genai
|
| 4 |
import time
|
| 5 |
|
| 6 |
# --- Load Gemini API key securely ---
|
|
@@ -8,7 +8,7 @@ GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
|
|
| 8 |
if not GEMINI_API_KEY:
|
| 9 |
raise ValueError("β GEMINI_API_KEY not found. Please set it in your Hugging Face Space Secrets.")
|
| 10 |
|
| 11 |
-
|
| 12 |
|
| 13 |
# --- Constants ---
|
| 14 |
MODEL_NAME = "gemini-1.5-flash-latest"
|
|
@@ -33,8 +33,8 @@ h1, .gr-label { color: var(--neon-cyan); text-shadow: var(--text-shadow); text-a
|
|
| 33 |
.gr-button:hover { background: var(--neon-green); color: var(--bg-color-2); box-shadow: 0 0 10px var(--neon-green), 0 0 20px var(--neon-green), 0 0 30px var(--neon-green); }
|
| 34 |
.gr-button:disabled { background: #333 !important; color: #555 !important; border-color: #555 !important; box-shadow: none !important; cursor: not-allowed; }
|
| 35 |
.chatbot { background-color: rgba(13, 0, 26, 0.8); border: 1px solid var(--neon-pink); box-shadow: var(--border-glow); }
|
| 36 |
-
.message-bubble { background: none !important; border: none !important; }
|
| 37 |
-
footer { display: none !
|
| 38 |
"""
|
| 39 |
|
| 40 |
def stream_llm_response(model, prompt, history_list, role_name, avatar_url):
|
|
@@ -51,7 +51,7 @@ def stream_llm_response(model, prompt, history_list, role_name, avatar_url):
|
|
| 51 |
except Exception as e:
|
| 52 |
history_list[-1]["content"] = f"β οΈ An error occurred: {e}"
|
| 53 |
|
| 54 |
-
yield history_list
|
| 55 |
return full_response.strip()
|
| 56 |
|
| 57 |
def run_full_debate_flow(chat_history):
|
|
@@ -63,12 +63,11 @@ def run_full_debate_flow(chat_history):
|
|
| 63 |
time.sleep(0.5) # A brief, dramatic pause
|
| 64 |
return {chatbot: chat_history}
|
| 65 |
|
| 66 |
-
# 1. Clear chat and disable button for a fresh start
|
| 67 |
chat_history.clear()
|
| 68 |
yield {
|
| 69 |
start_button: gr.update(interactive=False, value="π Initializing..."),
|
| 70 |
chatbot: chat_history,
|
| 71 |
-
download_button: gr.update(visible=False),
|
| 72 |
download_file_output: gr.update(value=None, visible=False)
|
| 73 |
}
|
| 74 |
|
|
@@ -77,7 +76,6 @@ def run_full_debate_flow(chat_history):
|
|
| 77 |
try:
|
| 78 |
gen_model = genai.GenerativeModel(MODEL_NAME)
|
| 79 |
|
| 80 |
-
# --- NARRATE & GENERATE TOPIC ---
|
| 81 |
yield add_system_message("Contacting orbital AI to generate a fresh topic...")
|
| 82 |
topic_prompt = "Generate a single, specific, and highly debatable topic about the future of technology, AI, or finance. Output only the topic string, nothing else."
|
| 83 |
topic = ""
|
|
@@ -86,7 +84,6 @@ def run_full_debate_flow(chat_history):
|
|
| 86 |
yield { chatbot: updated_history }
|
| 87 |
if "β οΈ" in topic or not topic: raise ValueError("Topic generation failed.")
|
| 88 |
|
| 89 |
-
# --- NARRATE & GENERATE QUESTION ---
|
| 90 |
yield add_system_message("Formulating a provocative starting question...")
|
| 91 |
question_prompt = f"For the debate topic '{topic}', generate one provocative, open-ended starting question. Output only the question string, nothing else."
|
| 92 |
question = ""
|
|
@@ -100,7 +97,6 @@ def run_full_debate_flow(chat_history):
|
|
| 100 |
yield { start_button: gr.update(interactive=True, value="β· RE-INITIATE DEBATE PROTOCOL") }
|
| 101 |
return
|
| 102 |
|
| 103 |
-
# --- NARRATE DEBATE START & RUN THE DEBATE ---
|
| 104 |
yield add_system_message("Initializing agents... Let the debate begin!")
|
| 105 |
yield { start_button: gr.update(value="βοΈ DEBATE IN PROGRESS...") }
|
| 106 |
|
|
@@ -109,19 +105,16 @@ def run_full_debate_flow(chat_history):
|
|
| 109 |
|
| 110 |
last_response = question
|
| 111 |
for _ in range(5):
|
| 112 |
-
# Agent A
|
| 113 |
prompt_a = f"Debate Topic: '{topic}'. The previous statement was: '{last_response}'. Provide your optimistic argument."
|
| 114 |
for updated_history in stream_llm_response(agent_a_chat, prompt_a, chat_history, "Agent A (Optimist)", AVATAR_IMAGES["Agent A (Optimist)"]):
|
| 115 |
last_response = updated_history[-1]['content'].replace('β', '')
|
| 116 |
yield { chatbot: updated_history }
|
| 117 |
|
| 118 |
-
# Agent B
|
| 119 |
prompt_b = f"Debate Topic: '{topic}'. Agent A said: '{last_response}'. Provide your skeptical counter-argument."
|
| 120 |
for updated_history in stream_llm_response(agent_b_chat, prompt_b, chat_history, "Agent B (Skeptic)", AVATAR_IMAGES["Agent B (Skeptic)"]):
|
| 121 |
last_response = updated_history[-1]['content'].replace('β', '')
|
| 122 |
yield { chatbot: updated_history }
|
| 123 |
|
| 124 |
-
# --- CONCLUDE DEBATE ---
|
| 125 |
yield add_system_message("--- DEBATE CONCLUDED ---")
|
| 126 |
yield {
|
| 127 |
chatbot: chat_history,
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
+
from google import generativeai as genai
|
| 4 |
import time
|
| 5 |
|
| 6 |
# --- Load Gemini API key securely ---
|
|
|
|
| 8 |
if not GEMINI_API_KEY:
|
| 9 |
raise ValueError("β GEMINI_API_KEY not found. Please set it in your Hugging Face Space Secrets.")
|
| 10 |
|
| 11 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
| 12 |
|
| 13 |
# --- Constants ---
|
| 14 |
MODEL_NAME = "gemini-1.5-flash-latest"
|
|
|
|
| 33 |
.gr-button:hover { background: var(--neon-green); color: var(--bg-color-2); box-shadow: 0 0 10px var(--neon-green), 0 0 20px var(--neon-green), 0 0 30px var(--neon-green); }
|
| 34 |
.gr-button:disabled { background: #333 !important; color: #555 !important; border-color: #555 !important; box-shadow: none !important; cursor: not-allowed; }
|
| 35 |
.chatbot { background-color: rgba(13, 0, 26, 0.8); border: 1px solid var(--neon-pink); box-shadow: var(--border-glow); }
|
| 36 |
+
.message-bubble { background: none !important; border: none !important; }
|
| 37 |
+
footer { display: none !important; }
|
| 38 |
"""
|
| 39 |
|
| 40 |
def stream_llm_response(model, prompt, history_list, role_name, avatar_url):
|
|
|
|
| 51 |
except Exception as e:
|
| 52 |
history_list[-1]["content"] = f"β οΈ An error occurred: {e}"
|
| 53 |
|
| 54 |
+
yield history_list
|
| 55 |
return full_response.strip()
|
| 56 |
|
| 57 |
def run_full_debate_flow(chat_history):
|
|
|
|
| 63 |
time.sleep(0.5) # A brief, dramatic pause
|
| 64 |
return {chatbot: chat_history}
|
| 65 |
|
|
|
|
| 66 |
chat_history.clear()
|
| 67 |
yield {
|
| 68 |
start_button: gr.update(interactive=False, value="π Initializing..."),
|
| 69 |
chatbot: chat_history,
|
| 70 |
+
download_button: gr.update(visible=False),
|
| 71 |
download_file_output: gr.update(value=None, visible=False)
|
| 72 |
}
|
| 73 |
|
|
|
|
| 76 |
try:
|
| 77 |
gen_model = genai.GenerativeModel(MODEL_NAME)
|
| 78 |
|
|
|
|
| 79 |
yield add_system_message("Contacting orbital AI to generate a fresh topic...")
|
| 80 |
topic_prompt = "Generate a single, specific, and highly debatable topic about the future of technology, AI, or finance. Output only the topic string, nothing else."
|
| 81 |
topic = ""
|
|
|
|
| 84 |
yield { chatbot: updated_history }
|
| 85 |
if "β οΈ" in topic or not topic: raise ValueError("Topic generation failed.")
|
| 86 |
|
|
|
|
| 87 |
yield add_system_message("Formulating a provocative starting question...")
|
| 88 |
question_prompt = f"For the debate topic '{topic}', generate one provocative, open-ended starting question. Output only the question string, nothing else."
|
| 89 |
question = ""
|
|
|
|
| 97 |
yield { start_button: gr.update(interactive=True, value="β· RE-INITIATE DEBATE PROTOCOL") }
|
| 98 |
return
|
| 99 |
|
|
|
|
| 100 |
yield add_system_message("Initializing agents... Let the debate begin!")
|
| 101 |
yield { start_button: gr.update(value="βοΈ DEBATE IN PROGRESS...") }
|
| 102 |
|
|
|
|
| 105 |
|
| 106 |
last_response = question
|
| 107 |
for _ in range(5):
|
|
|
|
| 108 |
prompt_a = f"Debate Topic: '{topic}'. The previous statement was: '{last_response}'. Provide your optimistic argument."
|
| 109 |
for updated_history in stream_llm_response(agent_a_chat, prompt_a, chat_history, "Agent A (Optimist)", AVATAR_IMAGES["Agent A (Optimist)"]):
|
| 110 |
last_response = updated_history[-1]['content'].replace('β', '')
|
| 111 |
yield { chatbot: updated_history }
|
| 112 |
|
|
|
|
| 113 |
prompt_b = f"Debate Topic: '{topic}'. Agent A said: '{last_response}'. Provide your skeptical counter-argument."
|
| 114 |
for updated_history in stream_llm_response(agent_b_chat, prompt_b, chat_history, "Agent B (Skeptic)", AVATAR_IMAGES["Agent B (Skeptic)"]):
|
| 115 |
last_response = updated_history[-1]['content'].replace('β', '')
|
| 116 |
yield { chatbot: updated_history }
|
| 117 |
|
|
|
|
| 118 |
yield add_system_message("--- DEBATE CONCLUDED ---")
|
| 119 |
yield {
|
| 120 |
chatbot: chat_history,
|