HAMMALE commited on
Commit
68be27c
·
verified ·
1 Parent(s): e2ae385

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -141
app.py CHANGED
@@ -41,10 +41,6 @@ def wikipedia_search(query: str) -> str:
41
  wikipedia.set_lang("en")
42
  summary = wikipedia.summary(query, sentences=3, auto_suggest=True)
43
  return f"Wikipedia: {summary}"
44
- except wikipedia.exceptions.DisambiguationError as e:
45
- return f"Wikipedia: Multiple results found. Options: {', '.join(e.options[:5])}"
46
- except wikipedia.exceptions.PageError:
47
- return f"Wikipedia: No page found for '{query}'."
48
  except Exception as e:
49
  return f"Wikipedia error: {str(e)}"
50
 
@@ -123,7 +119,7 @@ def download_and_load_model(progress=gr.Progress()):
123
  progress(0.95, desc="Finalizing...")
124
  model_loaded = True
125
  progress(1.0, desc="Model loaded!")
126
- return f"Model loaded successfully!"
127
  except Exception as e:
128
  return f"Error: {str(e)}"
129
 
@@ -151,96 +147,35 @@ def call_llm(messages: List[Dict], max_tokens: int = 500) -> str:
151
  except Exception as e:
152
  return f"Error: {str(e)}"
153
 
154
- def chat_think_only(message, history, mode):
155
- """Think-Only mode: Pure reasoning without tools"""
156
-
157
- system_prompt = """You are a helpful AI assistant that thinks step-by-step. For each question:
158
- 1. Think through the problem carefully
159
- 2. Show your reasoning process
160
- 3. Provide a clear answer
161
-
162
- Always respond conversationally and naturally."""
163
-
164
- messages = [{"role": "system", "content": system_prompt}]
165
-
166
- for user_msg, assistant_msg in history:
167
- messages.append({"role": "user", "content": user_msg})
168
- messages.append({"role": "assistant", "content": assistant_msg})
169
-
170
- messages.append({"role": "user", "content": message})
171
-
172
- response = call_llm(messages, max_tokens=800)
173
- return response
174
-
175
- def chat_act_only(message, history, mode):
176
- """Act-Only mode: Use tools to answer"""
177
 
178
  if not model_loaded:
179
- return "Error: Model not loaded."
180
 
181
- system_prompt = f"""You are a helpful AI assistant with access to tools. Use tools to find accurate information.
 
 
 
 
182
 
183
  Available tools:
184
  {get_tool_descriptions()}
185
 
186
- When you need to use a tool, respond with:
187
  Action: tool_name
188
- Action Input: your input
189
-
190
- After getting results, provide a natural answer to the user."""
191
-
192
- messages = [{"role": "system", "content": system_prompt}]
193
-
194
- for user_msg, assistant_msg in history:
195
- messages.append({"role": "user", "content": user_msg})
196
- messages.append({"role": "assistant", "content": assistant_msg})
197
-
198
- messages.append({"role": "user", "content": message})
199
-
200
- max_iterations = 3
201
- response_parts = []
202
-
203
- for iteration in range(max_iterations):
204
- response = call_llm(messages, max_tokens=400)
205
 
206
- action_name, action_input = parse_action(response)
207
-
208
- if action_name and action_input:
209
- response_parts.append(f"🔧 Using tool: **{action_name}**")
210
- observation = call_tool(action_name, action_input)
211
- response_parts.append(f"📊 Result: {observation}\n")
212
-
213
- messages.append({"role": "assistant", "content": response})
214
- messages.append({"role": "user", "content": f"Tool result: {observation}\n\nNow provide a natural answer to the user based on this information."})
215
- else:
216
- response_parts.append(response)
217
- break
218
-
219
- return "\n\n".join(response_parts)
220
-
221
- def chat_react(message, history, mode):
222
- """ReAct mode: Interleaved thinking and tool use"""
223
-
224
- if not model_loaded:
225
- return "Error: Model not loaded."
226
-
227
- system_prompt = f"""You are a helpful AI assistant that thinks and uses tools.
228
 
229
  Available tools:
230
  {get_tool_descriptions()}
231
 
232
- For each question:
233
- 1. Think about what you need to do
234
- 2. Use tools when you need information
235
- 3. Think about the results
236
- 4. Provide a clear answer
237
-
238
- Format for tool use:
239
- Thought: [your reasoning]
240
  Action: tool_name
241
- Action Input: input
242
-
243
- After tool results, think again and either use another tool or provide the final answer."""
244
 
245
  messages = [{"role": "system", "content": system_prompt}]
246
 
@@ -250,102 +185,81 @@ After tool results, think again and either use another tool or provide the final
250
 
251
  messages.append({"role": "user", "content": message})
252
 
253
- max_iterations = 3
254
  response_parts = []
 
255
 
256
  for iteration in range(max_iterations):
257
  response = call_llm(messages, max_tokens=400)
258
 
259
- thoughts = re.findall(r'Thought:\s*(.+?)(?=\n|Action:|$)', response, re.IGNORECASE | re.DOTALL)
260
- for thought in thoughts:
261
- response_parts.append(f"💭 **Thought:** {thought.strip()}")
 
262
 
263
  action_name, action_input = parse_action(response)
264
 
265
- if action_name and action_input:
266
- response_parts.append(f"🔧 **Action:** {action_name} with input: {action_input}")
267
  observation = call_tool(action_name, action_input)
268
- response_parts.append(f"📊 **Result:** {observation}\n")
269
 
270
  messages.append({"role": "assistant", "content": response})
271
- messages.append({"role": "user", "content": f"Observation: {observation}\n\nContinue thinking or provide final answer."})
272
  else:
273
- if not thoughts:
274
- response_parts.append(response)
275
  break
276
 
277
- return "\n\n".join(response_parts)
278
-
279
- def chat_response(message, history, mode):
280
- """Route to appropriate mode"""
281
- if mode == "Think-Only":
282
- return chat_think_only(message, history, mode)
283
- elif mode == "Act-Only":
284
- return chat_act_only(message, history, mode)
285
- elif mode == "ReAct":
286
- return chat_react(message, history, mode)
287
- else:
288
- return "Please select a reasoning mode."
289
 
290
- with gr.Blocks(title="LLM Reasoning Chat", theme=gr.themes.Soft()) as demo:
291
  gr.Markdown("""
292
- # 🤖 LLM Reasoning Chat
293
 
294
- **Model:** openai/gpt-oss-20b | **Tools:** DuckDuckGo, Wikipedia, Weather, Calculator, Python
295
 
296
- Select a reasoning mode and start chatting!
 
 
 
297
  """)
298
 
299
  with gr.Row():
300
- load_btn = gr.Button("🚀 Load Model", variant="primary", size="lg")
301
- status = gr.Textbox(label="Status", value="Click 'Load Model' to start", interactive=False, scale=2)
302
 
303
- with gr.Row():
304
- mode_selector = gr.Radio(
305
- choices=["Think-Only", "Act-Only", "ReAct"],
306
- value="ReAct",
307
- label="Reasoning Mode",
308
- info="Think-Only: Pure reasoning | Act-Only: Tools only | ReAct: Think + Tools"
309
- )
310
-
311
- chatbot = gr.Chatbot(
312
- label="Chat",
313
- height=500,
314
- show_label=True,
315
- avatar_images=(None, "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.png")
316
  )
317
 
 
 
318
  with gr.Row():
319
- msg = gr.Textbox(
320
- label="Message",
321
- placeholder="Ask me anything...",
322
- scale=4
323
- )
324
  submit = gr.Button("Send", variant="primary", scale=1)
325
 
 
 
326
  gr.Examples(
327
  examples=[
328
- "What is the capital of France and what's the weather there?",
329
- "Who wrote 'To Kill a Mockingbird' and when was it published?",
330
- "Calculate the compound interest on $1000 at 5% for 3 years",
331
- "What is the population of Tokyo?",
332
- "Explain quantum mechanics in simple terms",
333
  ],
334
- inputs=msg,
335
- label="Example Questions"
336
  )
337
 
338
- clear = gr.Button("🗑️ Clear Chat")
339
-
340
  load_btn.click(fn=download_and_load_model, outputs=status)
341
 
342
- msg.submit(fn=chat_response, inputs=[msg, chatbot, mode_selector], outputs=chatbot)
343
- submit.click(fn=chat_response, inputs=[msg, chatbot, mode_selector], outputs=chatbot)
344
 
345
- msg.submit(lambda: "", None, msg)
346
- submit.click(lambda: "", None, msg)
347
 
348
- clear.click(lambda: None, None, chatbot)
349
 
350
  if __name__ == "__main__":
351
  demo.launch(share=True)
 
41
  wikipedia.set_lang("en")
42
  summary = wikipedia.summary(query, sentences=3, auto_suggest=True)
43
  return f"Wikipedia: {summary}"
 
 
 
 
44
  except Exception as e:
45
  return f"Wikipedia error: {str(e)}"
46
 
 
119
  progress(0.95, desc="Finalizing...")
120
  model_loaded = True
121
  progress(1.0, desc="Model loaded!")
122
+ return "Model loaded successfully!"
123
  except Exception as e:
124
  return f"Error: {str(e)}"
125
 
 
147
  except Exception as e:
148
  return f"Error: {str(e)}"
149
 
150
+ def chat_function(message, history, mode):
151
+ """Main chat function"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
  if not model_loaded:
154
+ return history + [[message, "Error: Model not loaded. Please click 'Load Model' button first."]]
155
 
156
+ if mode == "Think-Only":
157
+ system_prompt = "You are a helpful AI assistant. Think step-by-step and provide clear answers."
158
+
159
+ elif mode == "Act-Only":
160
+ system_prompt = f"""You are a helpful AI assistant with tools.
161
 
162
  Available tools:
163
  {get_tool_descriptions()}
164
 
165
+ Use tools when needed. Format:
166
  Action: tool_name
167
+ Action Input: input"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
+ else:
170
+ system_prompt = f"""You are a helpful AI assistant with tools.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
 
172
  Available tools:
173
  {get_tool_descriptions()}
174
 
175
+ Think step-by-step and use tools when needed. Format:
176
+ Thought: [reasoning]
 
 
 
 
 
 
177
  Action: tool_name
178
+ Action Input: input"""
 
 
179
 
180
  messages = [{"role": "system", "content": system_prompt}]
181
 
 
185
 
186
  messages.append({"role": "user", "content": message})
187
 
 
188
  response_parts = []
189
+ max_iterations = 3
190
 
191
  for iteration in range(max_iterations):
192
  response = call_llm(messages, max_tokens=400)
193
 
194
+ if mode == "ReAct":
195
+ thoughts = re.findall(r'Thought:\s*(.+?)(?=\nAction:|$)', response, re.IGNORECASE | re.DOTALL)
196
+ for thought in thoughts:
197
+ response_parts.append(f"💭 {thought.strip()}")
198
 
199
  action_name, action_input = parse_action(response)
200
 
201
+ if action_name and action_input and mode in ["Act-Only", "ReAct"]:
202
+ response_parts.append(f"🔧 Using: {action_name}")
203
  observation = call_tool(action_name, action_input)
204
+ response_parts.append(f"📊 {observation}")
205
 
206
  messages.append({"role": "assistant", "content": response})
207
+ messages.append({"role": "user", "content": f"Result: {observation}\n\nProvide final answer."})
208
  else:
209
+ response_parts.append(response)
 
210
  break
211
 
212
+ final_response = "\n\n".join(response_parts)
213
+ return history + [[message, final_response]]
 
 
 
 
 
 
 
 
 
 
214
 
215
+ with gr.Blocks(title="LLM Reasoning Chat") as demo:
216
  gr.Markdown("""
217
+ # LLM Reasoning Chat
218
 
219
+ **Model:** openai/gpt-oss-20b
220
 
221
+ **Modes:**
222
+ - **Think-Only**: Pure reasoning
223
+ - **Act-Only**: Uses tools
224
+ - **ReAct**: Thinks and uses tools
225
  """)
226
 
227
  with gr.Row():
228
+ load_btn = gr.Button("Load Model", variant="primary", size="lg")
229
+ status = gr.Textbox(label="Status", value="Click 'Load Model'", interactive=False)
230
 
231
+ mode_selector = gr.Radio(
232
+ choices=["Think-Only", "Act-Only", "ReAct"],
233
+ value="ReAct",
234
+ label="Reasoning Mode"
 
 
 
 
 
 
 
 
 
235
  )
236
 
237
+ chatbot = gr.Chatbot(label="Chat", height=400)
238
+
239
  with gr.Row():
240
+ msg = gr.Textbox(label="Message", placeholder="Ask anything...", scale=4)
 
 
 
 
241
  submit = gr.Button("Send", variant="primary", scale=1)
242
 
243
+ clear = gr.Button("Clear Chat")
244
+
245
  gr.Examples(
246
  examples=[
247
+ "What is the capital of France and its weather?",
248
+ "Calculate: 1000 * (1.05 ** 3)",
249
+ "Who wrote To Kill a Mockingbird?",
 
 
250
  ],
251
+ inputs=msg
 
252
  )
253
 
 
 
254
  load_btn.click(fn=download_and_load_model, outputs=status)
255
 
256
+ submit.click(fn=chat_function, inputs=[msg, chatbot, mode_selector], outputs=chatbot)
257
+ msg.submit(fn=chat_function, inputs=[msg, chatbot, mode_selector], outputs=chatbot)
258
 
259
+ submit.click(lambda: "", outputs=msg)
260
+ msg.submit(lambda: "", outputs=msg)
261
 
262
+ clear.click(lambda: [], outputs=chatbot)
263
 
264
  if __name__ == "__main__":
265
  demo.launch(share=True)