Spaces:
Paused
Paused
thinking mode
Browse files
app.py
CHANGED
|
@@ -177,110 +177,24 @@
|
|
| 177 |
# if __name__ == "__main__":
|
| 178 |
# demo.launch(debug=True)
|
| 179 |
|
| 180 |
-
|
| 181 |
import gradio as gr
|
| 182 |
-
|
| 183 |
-
import os
|
| 184 |
from datetime import datetime
|
| 185 |
-
import json
|
| 186 |
-
|
| 187 |
-
# --- Global Variables and Initial Setup ---
|
| 188 |
-
stop_flag = {"stop": False}
|
| 189 |
|
| 190 |
-
#
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
# --- Core Functions ---
|
| 194 |
|
| 195 |
-
|
| 196 |
-
"""
|
| 197 |
-
Main chat function that handles user messages, streams responses,
|
| 198 |
-
and provides reasoning.
|
| 199 |
-
"""
|
| 200 |
-
global stop_flag, REASONING_CONTEXT
|
| 201 |
-
stop_flag["stop"] = False
|
| 202 |
-
|
| 203 |
-
# 1. Indicate thinking and reset UI elements
|
| 204 |
-
yield (
|
| 205 |
-
history, # chatbot
|
| 206 |
-
gr.update(visible=True), # thinking_indicator
|
| 207 |
-
gr.update(visible=False, value=""), # reasoning_accordion
|
| 208 |
-
gr.update(visible=True), # stop_btn
|
| 209 |
-
gr.update(interactive=False) # send_btn
|
| 210 |
-
)
|
| 211 |
-
|
| 212 |
-
# --- This is where you would add your retrieval logic ---
|
| 213 |
-
# For demonstration, we'll create mock reasoning context.
|
| 214 |
-
# In a real app, you would retrieve this from your RAG pipeline.
|
| 215 |
-
REASONING_CONTEXT = (
|
| 216 |
-
"### Retrieved Context\n\n"
|
| 217 |
-
"1. **Document:** `policy_document_A.pdf` (Page 17)\n"
|
| 218 |
-
" * **Content:** 'The primary objective for renewable energy is to increase its share in the national grid by 20% by the year 2030.'\n"
|
| 219 |
-
"2. **Document:** `strategy_paper_B.pdf` (Page 5)\n"
|
| 220 |
-
" * **Content:** 'Energy efficiency measures will be enforced through new building codes and industrial standards.'\n"
|
| 221 |
-
)
|
| 222 |
-
# --- End of retrieval logic section ---
|
| 223 |
-
|
| 224 |
-
# 2. Show the reasoning and hide the thinking indicator
|
| 225 |
-
yield (
|
| 226 |
-
history, # chatbot
|
| 227 |
-
gr.update(visible=False), # thinking_indicator
|
| 228 |
-
gr.update(visible=True, value=REASONING_CONTEXT), # reasoning_accordion
|
| 229 |
-
gr.update(visible=True), # stop_btn
|
| 230 |
-
gr.update(interactive=False) # send_btn
|
| 231 |
-
)
|
| 232 |
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
# 3. Stream the final answer
|
| 236 |
-
try:
|
| 237 |
-
# In a real app, you would pass the retrieved context into your prompt
|
| 238 |
-
response = openai.ChatCompletion.create(
|
| 239 |
-
model="gpt-3.5-turbo",
|
| 240 |
-
messages=[{"role": "user", "content": f"Based on the context, answer this question: {message}"}],
|
| 241 |
-
stream=True,
|
| 242 |
-
# api_base="your_api_base", # Add your endpoint if needed
|
| 243 |
-
# api_key="your_api_key" # Add your key if needed
|
| 244 |
-
)
|
| 245 |
-
|
| 246 |
-
for chunk in response:
|
| 247 |
-
if stop_flag["stop"]:
|
| 248 |
-
break
|
| 249 |
-
if hasattr(chunk.choices[0].delta, 'content'):
|
| 250 |
-
content = chunk.choices[0].delta.content
|
| 251 |
-
if content:
|
| 252 |
-
history[-1][1] += content
|
| 253 |
-
yield (
|
| 254 |
-
history, # chatbot
|
| 255 |
-
gr.update(visible=False), # thinking_indicator
|
| 256 |
-
gr.update(visible=True, value=REASONING_CONTEXT), # reasoning_accordion
|
| 257 |
-
gr.update(visible=True), # stop_btn
|
| 258 |
-
gr.update(interactive=False) # send_btn
|
| 259 |
-
)
|
| 260 |
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
yield (
|
| 264 |
-
history, # chatbot
|
| 265 |
-
gr.update(visible=False), # thinking_indicator
|
| 266 |
-
gr.update(visible=True, value=REASONING_CONTEXT), # reasoning_accordion
|
| 267 |
-
gr.update(visible=False), # stop_btn
|
| 268 |
-
gr.update(interactive=True) # send_btn
|
| 269 |
-
)
|
| 270 |
-
finally:
|
| 271 |
-
# 4. Clean up UI after streaming is finished or stopped
|
| 272 |
-
stop_flag["stop"] = False
|
| 273 |
-
yield (
|
| 274 |
-
history, # chatbot
|
| 275 |
-
gr.update(visible=False), # thinking_indicator
|
| 276 |
-
gr.update(visible=True, value=REASONING_CONTEXT), # reasoning_accordion
|
| 277 |
-
gr.update(visible=False), # stop_btn
|
| 278 |
-
gr.update(interactive=True) # send_btn
|
| 279 |
-
)
|
| 280 |
|
| 281 |
def stop_streaming():
|
| 282 |
-
"""Sets the
|
| 283 |
-
global stop_flag
|
| 284 |
stop_flag["stop"] = True
|
| 285 |
|
| 286 |
def export_chat(history):
|
|
@@ -303,13 +217,94 @@ def export_chat(history):
|
|
| 303 |
formatted_chat += "="*20 + "\n\n"
|
| 304 |
formatted_chat += REASONING_CONTEXT
|
| 305 |
|
| 306 |
-
# Create a temporary file that Gradio can serve
|
| 307 |
with open(filename, "w", encoding="utf-8") as f:
|
| 308 |
f.write(formatted_chat)
|
| 309 |
|
| 310 |
return filename
|
| 311 |
|
| 312 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 313 |
|
| 314 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) as demo:
|
| 315 |
gr.Markdown("# 🤖 Policy-Agent Chatbot\nAsk me about policies. I'll stream responses and show my reasoning.")
|
|
@@ -342,25 +337,24 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) a
|
|
| 342 |
stop_btn = gr.Button("⛔ Stop Streaming", visible=False)
|
| 343 |
clear_btn = gr.Button("Clear Chat")
|
| 344 |
|
| 345 |
-
#
|
| 346 |
-
|
| 347 |
-
|
|
|
|
|
|
|
| 348 |
chat_outputs = [chatbot, thinking_indicator, reasoning_accordion, stop_btn, send_btn]
|
| 349 |
|
| 350 |
-
|
| 351 |
-
|
|
|
|
| 352 |
|
| 353 |
-
#
|
| 354 |
msg.submit(lambda: "", None, msg, queue=False)
|
| 355 |
send_btn.click(lambda: "", None, msg, queue=False)
|
| 356 |
|
| 357 |
-
# Button actions
|
| 358 |
stop_btn.click(stop_streaming, None, None, queue=False)
|
| 359 |
clear_btn.click(lambda: ([], "", None), None, [chatbot, reasoning_accordion, export_file], queue=False)
|
| 360 |
export_btn.click(export_chat, chatbot, export_file)
|
| 361 |
|
| 362 |
if __name__ == "__main__":
|
| 363 |
-
|
| 364 |
-
# For example: os.environ["OPENAI_API_KEY"] = "your_key_here"
|
| 365 |
-
# openai.api_base = "your_api_base_here_if_needed"
|
| 366 |
-
demo.launch(debug=True)
|
|
|
|
| 177 |
# if __name__ == "__main__":
|
| 178 |
# demo.launch(debug=True)
|
| 179 |
|
|
|
|
| 180 |
import gradio as gr
|
| 181 |
+
from openai import OpenAI
|
|
|
|
| 182 |
from datetime import datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
|
| 184 |
+
# 🔹 Configure your agent
|
| 185 |
+
agent_endpoint = "https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/api/v1/"
|
| 186 |
+
agent_access_key = "CzIwmTIDFNWRRIHvxVNzKWztq8rn5S5w"
|
|
|
|
| 187 |
|
| 188 |
+
client = OpenAI(base_url=agent_endpoint, api_key=agent_access_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
|
| 190 |
+
# Global flag to control streaming
|
| 191 |
+
stop_flag = {"stop": False}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
|
| 193 |
+
# Store reasoning context for export
|
| 194 |
+
REASONING_CONTEXT = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
|
| 196 |
def stop_streaming():
|
| 197 |
+
"""Sets the stop flag to True to interrupt the streaming response."""
|
|
|
|
| 198 |
stop_flag["stop"] = True
|
| 199 |
|
| 200 |
def export_chat(history):
|
|
|
|
| 217 |
formatted_chat += "="*20 + "\n\n"
|
| 218 |
formatted_chat += REASONING_CONTEXT
|
| 219 |
|
|
|
|
| 220 |
with open(filename, "w", encoding="utf-8") as f:
|
| 221 |
f.write(formatted_chat)
|
| 222 |
|
| 223 |
return filename
|
| 224 |
|
| 225 |
+
def policy_chat(message, history):
|
| 226 |
+
"""
|
| 227 |
+
Single stream chat with real-time thinking process and a stop button.
|
| 228 |
+
"""
|
| 229 |
+
global REASONING_CONTEXT
|
| 230 |
+
# Reset stop flag at the beginning of a new request
|
| 231 |
+
stop_flag["stop"] = False
|
| 232 |
+
|
| 233 |
+
# 1. Show thinking indicator and stop button
|
| 234 |
+
yield {
|
| 235 |
+
chatbot: history,
|
| 236 |
+
thinking_indicator: gr.update(visible=True),
|
| 237 |
+
reasoning_accordion: gr.update(visible=False, value=""),
|
| 238 |
+
stop_btn: gr.update(visible=True),
|
| 239 |
+
send_btn: gr.update(interactive=False)
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
# Add user message and empty assistant response to history
|
| 243 |
+
history = history + [[message, ""]]
|
| 244 |
+
yield {chatbot: history, thinking_indicator: gr.update(visible=True)}
|
| 245 |
+
|
| 246 |
+
try:
|
| 247 |
+
# 2. Simulate reasoning (in real app, this would be your retrieval logic)
|
| 248 |
+
REASONING_CONTEXT = (
|
| 249 |
+
"### Retrieved Context\n\n"
|
| 250 |
+
"1. **Document:** `policy_document_A.pdf` (Page 17)\n"
|
| 251 |
+
" - **Content:** 'The primary objective for renewable energy is to increase its share in the national grid by 20% by the year 2030.'\n"
|
| 252 |
+
"2. **Document:** `strategy_paper_B.pdf` (Page 5)\n"
|
| 253 |
+
" - **Content:** 'Energy efficiency measures will be enforced through new building codes and industrial standards.'\n"
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
# Show reasoning and hide thinking indicator
|
| 257 |
+
yield {
|
| 258 |
+
chatbot: history,
|
| 259 |
+
thinking_indicator: gr.update(visible=False),
|
| 260 |
+
reasoning_accordion: gr.update(visible=True, value=REASONING_CONTEXT),
|
| 261 |
+
stop_btn: gr.update(visible=True),
|
| 262 |
+
send_btn: gr.update(interactive=False)
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
# Create streaming chat completion
|
| 266 |
+
stream = client.chat.completions.create(
|
| 267 |
+
model="n/a", # agent handles routing
|
| 268 |
+
messages=[
|
| 269 |
+
{"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
|
| 270 |
+
*[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}
|
| 271 |
+
for i, (u, b) in enumerate(history[:-1])], # exclude current exchange
|
| 272 |
+
{"role": "user", "content": message},
|
| 273 |
+
],
|
| 274 |
+
extra_body={"include_retrieval_info": True},
|
| 275 |
+
stream=True,
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
# Stream response content in real-time
|
| 279 |
+
response_text = ""
|
| 280 |
+
for chunk in stream:
|
| 281 |
+
# Check if the stop flag has been set
|
| 282 |
+
if stop_flag["stop"]:
|
| 283 |
+
response_text += "\n\n⛔ **Streaming stopped by user.**"
|
| 284 |
+
history[-1][1] = response_text
|
| 285 |
+
yield {chatbot: history}
|
| 286 |
+
break # Exit the loop
|
| 287 |
+
|
| 288 |
+
delta = chunk.choices[0].delta
|
| 289 |
+
if delta and delta.content:
|
| 290 |
+
response_text += delta.content
|
| 291 |
+
# Update the last message in history with streaming content
|
| 292 |
+
history[-1][1] = response_text
|
| 293 |
+
yield {chatbot: history}
|
| 294 |
+
|
| 295 |
+
except Exception as e:
|
| 296 |
+
# Handle errors by updating the assistant message
|
| 297 |
+
history[-1][1] = f"⚠️ Error: {str(e)}"
|
| 298 |
+
yield {chatbot: history}
|
| 299 |
+
|
| 300 |
+
finally:
|
| 301 |
+
# Hide the stop button and re-enable send button once streaming is complete or stopped
|
| 302 |
+
yield {
|
| 303 |
+
stop_btn: gr.update(visible=False),
|
| 304 |
+
send_btn: gr.update(interactive=True),
|
| 305 |
+
thinking_indicator: gr.update(visible=False)
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
|
| 309 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) as demo:
|
| 310 |
gr.Markdown("# 🤖 Policy-Agent Chatbot\nAsk me about policies. I'll stream responses and show my reasoning.")
|
|
|
|
| 337 |
stop_btn = gr.Button("⛔ Stop Streaming", visible=False)
|
| 338 |
clear_btn = gr.Button("Clear Chat")
|
| 339 |
|
| 340 |
+
# Define event handlers
|
| 341 |
+
def on_submit(message, history):
|
| 342 |
+
yield from policy_chat(message, history)
|
| 343 |
+
|
| 344 |
+
# Combine all UI updates into outputs list
|
| 345 |
chat_outputs = [chatbot, thinking_indicator, reasoning_accordion, stop_btn, send_btn]
|
| 346 |
|
| 347 |
+
# Wire up components
|
| 348 |
+
msg.submit(on_submit, [msg, chatbot], chat_outputs)
|
| 349 |
+
send_btn.click(on_submit, [msg, chatbot], chat_outputs)
|
| 350 |
|
| 351 |
+
# After submission, clear the textbox
|
| 352 |
msg.submit(lambda: "", None, msg, queue=False)
|
| 353 |
send_btn.click(lambda: "", None, msg, queue=False)
|
| 354 |
|
|
|
|
| 355 |
stop_btn.click(stop_streaming, None, None, queue=False)
|
| 356 |
clear_btn.click(lambda: ([], "", None), None, [chatbot, reasoning_accordion, export_file], queue=False)
|
| 357 |
export_btn.click(export_chat, chatbot, export_file)
|
| 358 |
|
| 359 |
if __name__ == "__main__":
|
| 360 |
+
demo.launch(debug=True, height=800)
|
|
|
|
|
|
|
|
|