Spaces:
Running
Running
thinking
Browse files
app.py
CHANGED
|
@@ -1,3 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from openai import OpenAI
|
| 3 |
from datetime import datetime
|
|
@@ -62,7 +241,13 @@ def policy_chat(message, history):
|
|
| 62 |
|
| 63 |
# Add user message and empty assistant response to history
|
| 64 |
history = history + [[message, ""]]
|
| 65 |
-
yield {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
try:
|
| 68 |
# 2. Simulate reasoning (this would be your retrieval logic integrated with DigitalOcean agent)
|
|
@@ -78,7 +263,7 @@ def policy_chat(message, history):
|
|
| 78 |
yield {
|
| 79 |
chatbot: history,
|
| 80 |
thinking_indicator: gr.update(visible=False),
|
| 81 |
-
reasoning_accordion: gr.update(visible=
|
| 82 |
stop_btn: gr.update(visible=True),
|
| 83 |
send_btn: gr.update(interactive=False)
|
| 84 |
}
|
|
@@ -103,7 +288,13 @@ def policy_chat(message, history):
|
|
| 103 |
if stop_flag["stop"]:
|
| 104 |
response_text += "\n\n⛔ **Streaming stopped by user.**"
|
| 105 |
history[-1][1] = response_text
|
| 106 |
-
yield {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
break # Exit the loop
|
| 108 |
|
| 109 |
delta = chunk.choices[0].delta
|
|
@@ -111,19 +302,33 @@ def policy_chat(message, history):
|
|
| 111 |
response_text += delta.content
|
| 112 |
# Update the last message in history with streaming content
|
| 113 |
history[-1][1] = response_text
|
| 114 |
-
yield {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
except Exception as e:
|
| 117 |
# Handle errors by updating the assistant message
|
| 118 |
history[-1][1] = f"⚠️ Error: {str(e)}"
|
| 119 |
-
yield {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
finally:
|
| 122 |
# Hide the stop button and re-enable send button once streaming is complete or stopped
|
| 123 |
yield {
|
|
|
|
|
|
|
|
|
|
| 124 |
stop_btn: gr.update(visible=False),
|
| 125 |
-
send_btn: gr.update(interactive=True)
|
| 126 |
-
thinking_indicator: gr.update(visible=False)
|
| 127 |
}
|
| 128 |
|
| 129 |
|
|
|
|
| 1 |
+
# # import gradio as gr
|
| 2 |
+
# # from openai import OpenAI
|
| 3 |
+
|
| 4 |
+
# # # 🔹 Configure your agent
|
| 5 |
+
# # agent_endpoint = "https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/api/v1/"
|
| 6 |
+
# # agent_access_key = "CzIwmTIDFNWRRIHvxVNzKWztq8rn5S5w"
|
| 7 |
+
|
| 8 |
+
# # client = OpenAI(base_url=agent_endpoint, api_key=agent_access_key)
|
| 9 |
+
|
| 10 |
+
# # # Parameters
|
| 11 |
+
# # DEFAULT_RETRIEVAL_RUNS = 3 # adjustable in UI
|
| 12 |
+
|
| 13 |
+
# # def policy_chat(message, history, retrieval_runs=DEFAULT_RETRIEVAL_RUNS):
|
| 14 |
+
# # """
|
| 15 |
+
# # Chatbot with streaming + multiple retrieval runs.
|
| 16 |
+
# # """
|
| 17 |
+
# # # Show "processing" placeholder
|
| 18 |
+
# # history = history + [[message, "Processing..."]]
|
| 19 |
+
# # yield history, history
|
| 20 |
+
|
| 21 |
+
# # aggregated_responses = []
|
| 22 |
+
|
| 23 |
+
# # for run in range(retrieval_runs):
|
| 24 |
+
# # try:
|
| 25 |
+
# # stream = client.chat.completions.create(
|
| 26 |
+
# # model="n/a", # agent handles routing
|
| 27 |
+
# # messages=[
|
| 28 |
+
# # {"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
|
| 29 |
+
# # *[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}
|
| 30 |
+
# # for i, (u, b) in enumerate(history[:-1])], # exclude placeholder
|
| 31 |
+
# # {"role": "user", "content": message},
|
| 32 |
+
# # ],
|
| 33 |
+
# # extra_body={"include_retrieval_info": True},
|
| 34 |
+
# # stream=True,
|
| 35 |
+
# # )
|
| 36 |
+
|
| 37 |
+
# # response_text = ""
|
| 38 |
+
# # for chunk in stream:
|
| 39 |
+
# # delta = chunk.choices[0].delta
|
| 40 |
+
# # if delta and delta.content: # delta.content is a string or None
|
| 41 |
+
# # response_text += delta.content
|
| 42 |
+
# # # Stream update to Gradio UI
|
| 43 |
+
# # history[-1][1] = response_text
|
| 44 |
+
# # yield history, history
|
| 45 |
+
|
| 46 |
+
# # aggregated_responses.append(response_text or "⚠️ Empty response")
|
| 47 |
+
|
| 48 |
+
# # except Exception as e:
|
| 49 |
+
# # aggregated_responses.append(f"⚠️ Error during run {run+1}: {str(e)}")
|
| 50 |
+
|
| 51 |
+
# # # 🔹 Choose the “best” response (longest for now)
|
| 52 |
+
# # best_response = max(aggregated_responses, key=len, default="⚠️ No response")
|
| 53 |
+
# # history[-1][1] = best_response
|
| 54 |
+
# # yield history, history
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# # with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) as demo:
|
| 58 |
+
# # gr.Markdown("# 🤖 Policy-Agent Chatbot\nAsk me about policies. I’ll query the knowledge base multiple times to retrieve the best answer for you!")
|
| 59 |
+
|
| 60 |
+
# # popup_widget_html = '''<style>
|
| 61 |
+
# # .policy-agent-popup-container { position:fixed; bottom:16px; right:16px; z-index:9999; }
|
| 62 |
+
# # </style>
|
| 63 |
+
# # <div class="policy-agent-popup-container">
|
| 64 |
+
# # <script async
|
| 65 |
+
# # src="https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/static/chatbot/widget.js"
|
| 66 |
+
# # data-agent-id="fcad9141-8590-11f0-b074-4e013e2ddde4"
|
| 67 |
+
# # data-chatbot-id="oTQKgtWMkQLbLVw7CIHkbxw25Pu9jekn"
|
| 68 |
+
# # data-name="policy-agent Chatbot"
|
| 69 |
+
# # data-primary-color="#031B4E"
|
| 70 |
+
# # data-secondary-color="#E5E8ED"
|
| 71 |
+
# # data-button-background-color="#0061EB"
|
| 72 |
+
# # data-starting-message="Hello! I am your policy analysis bot made to help you comb through the policies."
|
| 73 |
+
# # data-logo="/static/chatbot/icons/default-agent.svg">
|
| 74 |
+
# # </script>
|
| 75 |
+
# # </div>'''
|
| 76 |
+
# # gr.HTML(popup_widget_html)
|
| 77 |
+
|
| 78 |
+
# # chatbot = gr.Chatbot(height=500)
|
| 79 |
+
# # msg = gr.Textbox(placeholder="Type your question...")
|
| 80 |
+
# # retrieval_slider = gr.Slider(1, 10, value=DEFAULT_RETRIEVAL_RUNS, step=1, label="Number of retrieval runs")
|
| 81 |
+
# # clear = gr.Button("Clear Chat")
|
| 82 |
+
|
| 83 |
+
# # msg.submit(policy_chat, [msg, chatbot, retrieval_slider], [chatbot, chatbot])
|
| 84 |
+
# # msg.submit(lambda: "", None, msg) # clear textbox
|
| 85 |
+
# # clear.click(lambda: None, None, chatbot, queue=False)
|
| 86 |
+
|
| 87 |
+
# # if __name__ == "__main__":
|
| 88 |
+
# # demo.launch(debug=True)
|
| 89 |
+
|
| 90 |
+
# import gradio as gr
|
| 91 |
+
# from openai import OpenAI
|
| 92 |
+
|
| 93 |
+
# # 🔹 Configure your agent
|
| 94 |
+
# agent_endpoint = "https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/api/v1/"
|
| 95 |
+
# agent_access_key = "CzIwmTIDFNWRRIHvxVNzKWztq8rn5S5w"
|
| 96 |
+
|
| 97 |
+
# client = OpenAI(base_url=agent_endpoint, api_key=agent_access_key)
|
| 98 |
+
|
| 99 |
+
# # Parameters
|
| 100 |
+
# DEFAULT_RETRIEVAL_RUNS = 3 # adjustable in UI
|
| 101 |
+
# stop_flag = {"stop": False} # global flag for stopping runs
|
| 102 |
+
|
| 103 |
+
# def stop_runs():
|
| 104 |
+
# """Set stop flag to True to interrupt current processing."""
|
| 105 |
+
# stop_flag["stop"] = True
|
| 106 |
+
# return "⛔ Retrieval stopped by user."
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# def policy_chat(message, history, retrieval_runs=DEFAULT_RETRIEVAL_RUNS):
|
| 110 |
+
# """
|
| 111 |
+
# Runs multiple retrievals, stores responses, and returns the best one.
|
| 112 |
+
# """
|
| 113 |
+
# # Reset stop flag
|
| 114 |
+
# stop_flag["stop"] = False
|
| 115 |
+
|
| 116 |
+
# # Show "processing" placeholder in UI
|
| 117 |
+
# history = history + [[message, "Processing..."]]
|
| 118 |
+
# yield history, history
|
| 119 |
+
|
| 120 |
+
# aggregated_responses = []
|
| 121 |
+
|
| 122 |
+
# for run in range(retrieval_runs):
|
| 123 |
+
# if stop_flag["stop"]:
|
| 124 |
+
# history[-1][1] = "⛔ Stopped before completion."
|
| 125 |
+
# yield history, history
|
| 126 |
+
# return
|
| 127 |
+
|
| 128 |
+
# try:
|
| 129 |
+
# # Stream response but collect full text silently
|
| 130 |
+
# stream = client.chat.completions.create(
|
| 131 |
+
# model="n/a", # agent handles routing
|
| 132 |
+
# messages=[
|
| 133 |
+
# {"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
|
| 134 |
+
# *[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}
|
| 135 |
+
# for i, (u, b) in enumerate(history[:-1])], # exclude placeholder
|
| 136 |
+
# {"role": "user", "content": message},
|
| 137 |
+
# ],
|
| 138 |
+
# extra_body={"include_retrieval_info": True},
|
| 139 |
+
# stream=True,
|
| 140 |
+
# )
|
| 141 |
+
|
| 142 |
+
# response_text = ""
|
| 143 |
+
# for chunk in stream:
|
| 144 |
+
# delta = chunk.choices[0].delta
|
| 145 |
+
# if delta and delta.content:
|
| 146 |
+
# response_text += delta.content
|
| 147 |
+
|
| 148 |
+
# aggregated_responses.append(response_text or "⚠️ Empty response")
|
| 149 |
+
|
| 150 |
+
# except Exception as e:
|
| 151 |
+
# aggregated_responses.append(f"⚠️ Error during run {run+1}: {str(e)}")
|
| 152 |
+
|
| 153 |
+
# # --- Selection logic ---
|
| 154 |
+
# # For now, pick the "best" as the longest response
|
| 155 |
+
# best_response = max(aggregated_responses, key=len, default="⚠️ No response")
|
| 156 |
+
|
| 157 |
+
# # Replace "Processing..." with final answer
|
| 158 |
+
# history[-1][1] = best_response
|
| 159 |
+
# yield history, history
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
# with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) as demo:
|
| 163 |
+
# gr.Markdown("# 🤖 Policy-Agent Chatbot\nAsk me about policies. I’ll query the knowledge base multiple times!")
|
| 164 |
+
|
| 165 |
+
# chatbot = gr.Chatbot(height=500)
|
| 166 |
+
# msg = gr.Textbox(placeholder="Type your question...")
|
| 167 |
+
# retrieval_slider = gr.Slider(1, 10, value=DEFAULT_RETRIEVAL_RUNS, step=1, label="Number of retrieval runs")
|
| 168 |
+
# with gr.Row():
|
| 169 |
+
# clear = gr.Button("Clear Chat")
|
| 170 |
+
# stop = gr.Button("⛔ Stop")
|
| 171 |
+
|
| 172 |
+
# msg.submit(policy_chat, [msg, chatbot, retrieval_slider], [chatbot, chatbot])
|
| 173 |
+
# msg.submit(lambda: "", None, msg) # clear textbox
|
| 174 |
+
# clear.click(lambda: None, None, chatbot, queue=False)
|
| 175 |
+
# stop.click(stop_runs, None, chatbot, queue=False)
|
| 176 |
+
|
| 177 |
+
# if __name__ == "__main__":
|
| 178 |
+
# demo.launch(debug=True)
|
| 179 |
+
|
| 180 |
import gradio as gr
|
| 181 |
from openai import OpenAI
|
| 182 |
from datetime import datetime
|
|
|
|
| 241 |
|
| 242 |
# Add user message and empty assistant response to history
|
| 243 |
history = history + [[message, ""]]
|
| 244 |
+
yield {
|
| 245 |
+
chatbot: history,
|
| 246 |
+
thinking_indicator: gr.update(visible=True),
|
| 247 |
+
reasoning_accordion: gr.update(visible=False, value=""),
|
| 248 |
+
stop_btn: gr.update(visible=True),
|
| 249 |
+
send_btn: gr.update(interactive=False)
|
| 250 |
+
}
|
| 251 |
|
| 252 |
try:
|
| 253 |
# 2. Simulate reasoning (this would be your retrieval logic integrated with DigitalOcean agent)
|
|
|
|
| 263 |
yield {
|
| 264 |
chatbot: history,
|
| 265 |
thinking_indicator: gr.update(visible=False),
|
| 266 |
+
reasoning_accordion: gr.update(visible=True, value=REASONING_CONTEXT),
|
| 267 |
stop_btn: gr.update(visible=True),
|
| 268 |
send_btn: gr.update(interactive=False)
|
| 269 |
}
|
|
|
|
| 288 |
if stop_flag["stop"]:
|
| 289 |
response_text += "\n\n⛔ **Streaming stopped by user.**"
|
| 290 |
history[-1][1] = response_text
|
| 291 |
+
yield {
|
| 292 |
+
chatbot: history,
|
| 293 |
+
thinking_indicator: gr.update(visible=False),
|
| 294 |
+
reasoning_accordion: gr.update(visible=True, value=REASONING_CONTEXT),
|
| 295 |
+
stop_btn: gr.update(visible=True),
|
| 296 |
+
send_btn: gr.update(interactive=False)
|
| 297 |
+
}
|
| 298 |
break # Exit the loop
|
| 299 |
|
| 300 |
delta = chunk.choices[0].delta
|
|
|
|
| 302 |
response_text += delta.content
|
| 303 |
# Update the last message in history with streaming content
|
| 304 |
history[-1][1] = response_text
|
| 305 |
+
yield {
|
| 306 |
+
chatbot: history,
|
| 307 |
+
thinking_indicator: gr.update(visible=False),
|
| 308 |
+
reasoning_accordion: gr.update(visible=True, value=REASONING_CONTEXT),
|
| 309 |
+
stop_btn: gr.update(visible=True),
|
| 310 |
+
send_btn: gr.update(interactive=False)
|
| 311 |
+
}
|
| 312 |
|
| 313 |
except Exception as e:
|
| 314 |
# Handle errors by updating the assistant message
|
| 315 |
history[-1][1] = f"⚠️ Error: {str(e)}"
|
| 316 |
+
yield {
|
| 317 |
+
chatbot: history,
|
| 318 |
+
thinking_indicator: gr.update(visible=False),
|
| 319 |
+
reasoning_accordion: gr.update(visible=True, value=REASONING_CONTEXT),
|
| 320 |
+
stop_btn: gr.update(visible=False),
|
| 321 |
+
send_btn: gr.update(interactive=True)
|
| 322 |
+
}
|
| 323 |
|
| 324 |
finally:
|
| 325 |
# Hide the stop button and re-enable send button once streaming is complete or stopped
|
| 326 |
yield {
|
| 327 |
+
chatbot: history,
|
| 328 |
+
thinking_indicator: gr.update(visible=False),
|
| 329 |
+
reasoning_accordion: gr.update(visible=True, value=REASONING_CONTEXT),
|
| 330 |
stop_btn: gr.update(visible=False),
|
| 331 |
+
send_btn: gr.update(interactive=True)
|
|
|
|
| 332 |
}
|
| 333 |
|
| 334 |
|