kaburia commited on
Commit
dbcb597
·
1 Parent(s): eefe030

thinking mode

Browse files
Files changed (1) hide show
  1. app.py +4 -182
app.py CHANGED
@@ -1,181 +1,3 @@
1
- # # import gradio as gr
2
- # # from openai import OpenAI
3
-
4
- # # # 🔹 Configure your agent
5
- # # agent_endpoint = "https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/api/v1/"
6
- # # agent_access_key = "CzIwmTIDFNWRRIHvxVNzKWztq8rn5S5w"
7
-
8
- # # client = OpenAI(base_url=agent_endpoint, api_key=agent_access_key)
9
-
10
- # # # Parameters
11
- # # DEFAULT_RETRIEVAL_RUNS = 3 # adjustable in UI
12
-
13
- # # def policy_chat(message, history, retrieval_runs=DEFAULT_RETRIEVAL_RUNS):
14
- # # """
15
- # # Chatbot with streaming + multiple retrieval runs.
16
- # # """
17
- # # # Show "processing" placeholder
18
- # # history = history + [[message, "Processing..."]]
19
- # # yield history, history
20
-
21
- # # aggregated_responses = []
22
-
23
- # # for run in range(retrieval_runs):
24
- # # try:
25
- # # stream = client.chat.completions.create(
26
- # # model="n/a", # agent handles routing
27
- # # messages=[
28
- # # {"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
29
- # # *[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}
30
- # # for i, (u, b) in enumerate(history[:-1])], # exclude placeholder
31
- # # {"role": "user", "content": message},
32
- # # ],
33
- # # extra_body={"include_retrieval_info": True},
34
- # # stream=True,
35
- # # )
36
-
37
- # # response_text = ""
38
- # # for chunk in stream:
39
- # # delta = chunk.choices[0].delta
40
- # # if delta and delta.content: # delta.content is a string or None
41
- # # response_text += delta.content
42
- # # # Stream update to Gradio UI
43
- # # history[-1][1] = response_text
44
- # # yield history, history
45
-
46
- # # aggregated_responses.append(response_text or "⚠️ Empty response")
47
-
48
- # # except Exception as e:
49
- # # aggregated_responses.append(f"⚠️ Error during run {run+1}: {str(e)}")
50
-
51
- # # # 🔹 Choose the “best” response (longest for now)
52
- # # best_response = max(aggregated_responses, key=len, default="⚠️ No response")
53
- # # history[-1][1] = best_response
54
- # # yield history, history
55
-
56
-
57
- # # with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) as demo:
58
- # # gr.Markdown("# 🤖 Policy-Agent Chatbot\nAsk me about policies. I’ll query the knowledge base multiple times to retrieve the best answer for you!")
59
-
60
- # # popup_widget_html = '''<style>
61
- # # .policy-agent-popup-container { position:fixed; bottom:16px; right:16px; z-index:9999; }
62
- # # </style>
63
- # # <div class="policy-agent-popup-container">
64
- # # <script async
65
- # # src="https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/static/chatbot/widget.js"
66
- # # data-agent-id="fcad9141-8590-11f0-b074-4e013e2ddde4"
67
- # # data-chatbot-id="oTQKgtWMkQLbLVw7CIHkbxw25Pu9jekn"
68
- # # data-name="policy-agent Chatbot"
69
- # # data-primary-color="#031B4E"
70
- # # data-secondary-color="#E5E8ED"
71
- # # data-button-background-color="#0061EB"
72
- # # data-starting-message="Hello! I am your policy analysis bot made to help you comb through the policies."
73
- # # data-logo="/static/chatbot/icons/default-agent.svg">
74
- # # </script>
75
- # # </div>'''
76
- # # gr.HTML(popup_widget_html)
77
-
78
- # # chatbot = gr.Chatbot(height=500)
79
- # # msg = gr.Textbox(placeholder="Type your question...")
80
- # # retrieval_slider = gr.Slider(1, 10, value=DEFAULT_RETRIEVAL_RUNS, step=1, label="Number of retrieval runs")
81
- # # clear = gr.Button("Clear Chat")
82
-
83
- # # msg.submit(policy_chat, [msg, chatbot, retrieval_slider], [chatbot, chatbot])
84
- # # msg.submit(lambda: "", None, msg) # clear textbox
85
- # # clear.click(lambda: None, None, chatbot, queue=False)
86
-
87
- # # if __name__ == "__main__":
88
- # # demo.launch(debug=True)
89
-
90
- # import gradio as gr
91
- # from openai import OpenAI
92
-
93
- # # 🔹 Configure your agent
94
- # agent_endpoint = "https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/api/v1/"
95
- # agent_access_key = "CzIwmTIDFNWRRIHvxVNzKWztq8rn5S5w"
96
-
97
- # client = OpenAI(base_url=agent_endpoint, api_key=agent_access_key)
98
-
99
- # # Parameters
100
- # DEFAULT_RETRIEVAL_RUNS = 3 # adjustable in UI
101
- # stop_flag = {"stop": False} # global flag for stopping runs
102
-
103
- # def stop_runs():
104
- # """Set stop flag to True to interrupt current processing."""
105
- # stop_flag["stop"] = True
106
- # return "⛔ Retrieval stopped by user."
107
-
108
-
109
- # def policy_chat(message, history, retrieval_runs=DEFAULT_RETRIEVAL_RUNS):
110
- # """
111
- # Runs multiple retrievals, stores responses, and returns the best one.
112
- # """
113
- # # Reset stop flag
114
- # stop_flag["stop"] = False
115
-
116
- # # Show "processing" placeholder in UI
117
- # history = history + [[message, "Processing..."]]
118
- # yield history, history
119
-
120
- # aggregated_responses = []
121
-
122
- # for run in range(retrieval_runs):
123
- # if stop_flag["stop"]:
124
- # history[-1][1] = "⛔ Stopped before completion."
125
- # yield history, history
126
- # return
127
-
128
- # try:
129
- # # Stream response but collect full text silently
130
- # stream = client.chat.completions.create(
131
- # model="n/a", # agent handles routing
132
- # messages=[
133
- # {"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
134
- # *[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}
135
- # for i, (u, b) in enumerate(history[:-1])], # exclude placeholder
136
- # {"role": "user", "content": message},
137
- # ],
138
- # extra_body={"include_retrieval_info": True},
139
- # stream=True,
140
- # )
141
-
142
- # response_text = ""
143
- # for chunk in stream:
144
- # delta = chunk.choices[0].delta
145
- # if delta and delta.content:
146
- # response_text += delta.content
147
-
148
- # aggregated_responses.append(response_text or "⚠️ Empty response")
149
-
150
- # except Exception as e:
151
- # aggregated_responses.append(f"⚠️ Error during run {run+1}: {str(e)}")
152
-
153
- # # --- Selection logic ---
154
- # # For now, pick the "best" as the longest response
155
- # best_response = max(aggregated_responses, key=len, default="⚠️ No response")
156
-
157
- # # Replace "Processing..." with final answer
158
- # history[-1][1] = best_response
159
- # yield history, history
160
-
161
-
162
- # with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) as demo:
163
- # gr.Markdown("# 🤖 Policy-Agent Chatbot\nAsk me about policies. I’ll query the knowledge base multiple times!")
164
-
165
- # chatbot = gr.Chatbot(height=500)
166
- # msg = gr.Textbox(placeholder="Type your question...")
167
- # retrieval_slider = gr.Slider(1, 10, value=DEFAULT_RETRIEVAL_RUNS, step=1, label="Number of retrieval runs")
168
- # with gr.Row():
169
- # clear = gr.Button("Clear Chat")
170
- # stop = gr.Button("⛔ Stop")
171
-
172
- # msg.submit(policy_chat, [msg, chatbot, retrieval_slider], [chatbot, chatbot])
173
- # msg.submit(lambda: "", None, msg) # clear textbox
174
- # clear.click(lambda: None, None, chatbot, queue=False)
175
- # stop.click(stop_runs, None, chatbot, queue=False)
176
-
177
- # if __name__ == "__main__":
178
- # demo.launch(debug=True)
179
  import gradio as gr
180
  from openai import OpenAI
181
  from datetime import datetime
@@ -243,7 +65,7 @@ def policy_chat(message, history):
243
  yield {chatbot: history, thinking_indicator: gr.update(visible=True)}
244
 
245
  try:
246
- # 2. Simulate reasoning (in real app, this would be your retrieval logic)
247
  REASONING_CONTEXT = (
248
  "### Retrieved Context\n\n"
249
  "1. **Document:** `policy_document_A.pdf` (Page 17)\n"
@@ -256,14 +78,14 @@ def policy_chat(message, history):
256
  yield {
257
  chatbot: history,
258
  thinking_indicator: gr.update(visible=False),
259
- reasoning_accordion: gr.update(visible=True, value=REASONING_CONTEXT),
260
  stop_btn: gr.update(visible=True),
261
  send_btn: gr.update(interactive=False)
262
  }
263
 
264
- # Create streaming chat completion
265
  stream = client.chat.completions.create(
266
- model="n/a", # agent handles routing
267
  messages=[
268
  {"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
269
  *[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from openai import OpenAI
3
  from datetime import datetime
 
65
  yield {chatbot: history, thinking_indicator: gr.update(visible=True)}
66
 
67
  try:
68
+ # 2. Simulate reasoning (this would be your retrieval logic integrated with DigitalOcean agent)
69
  REASONING_CONTEXT = (
70
  "### Retrieved Context\n\n"
71
  "1. **Document:** `policy_document_A.pdf` (Page 17)\n"
 
78
  yield {
79
  chatbot: history,
80
  thinking_indicator: gr.update(visible=False),
81
+ reasoning_accordion: gr.update(visible=False, value=REASONING_CONTEXT),
82
  stop_btn: gr.update(visible=True),
83
  send_btn: gr.update(interactive=False)
84
  }
85
 
86
+ # Create streaming chat completion using DigitalOcean agent
87
  stream = client.chat.completions.create(
88
+ model="n/a", # DigitalOcean agent handles routing
89
  messages=[
90
  {"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
91
  *[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}