HAMMALE commited on
Commit
6b5b0cd
·
verified ·
1 Parent(s): 59879f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -292
app.py CHANGED
@@ -8,8 +8,8 @@ from datetime import datetime
8
  import ast
9
  import operator as op
10
  import wikipedia
 
11
  import torch
12
- from transformers import AutoTokenizer, AutoModelForCausalLM
13
 
14
  class Tool:
15
  def __init__(self, name: str, description: str, func):
@@ -21,73 +21,45 @@ class Tool:
21
  return self.func(*args, **kwargs)
22
 
23
  def duckduckgo_search(query: str) -> str:
24
- """Search DuckDuckGo for information."""
25
  try:
26
  url = "https://api.duckduckgo.com/"
27
- params = {
28
- 'q': query,
29
- 'format': 'json',
30
- 'no_html': 1,
31
- 'skip_disambig': 1
32
- }
33
  response = requests.get(url, params=params, timeout=10)
34
  data = response.json()
35
 
36
  if data.get('Abstract'):
37
  return f"Search result: {data['Abstract']}"
38
  elif data.get('RelatedTopics') and len(data['RelatedTopics']) > 0:
39
- results = []
40
- for topic in data['RelatedTopics'][:3]:
41
- if 'Text' in topic:
42
- results.append(topic['Text'])
43
  return f"Search results: {' | '.join(results)}" if results else "No results found."
44
- else:
45
- return "No results found."
46
  except Exception as e:
47
  return f"Search error: {str(e)}"
48
 
49
  def wikipedia_search(query: str) -> str:
50
- """Search Wikipedia for information."""
51
  try:
52
  wikipedia.set_lang("en")
53
  summary = wikipedia.summary(query, sentences=3, auto_suggest=True)
54
  return f"Wikipedia: {summary}"
55
  except wikipedia.exceptions.DisambiguationError as e:
56
- return f"Wikipedia: Multiple results found. Please be more specific. Options: {', '.join(e.options[:5])}"
57
  except wikipedia.exceptions.PageError:
58
  return f"Wikipedia: No page found for '{query}'."
59
  except Exception as e:
60
  return f"Wikipedia error: {str(e)}"
61
 
62
  def get_weather(location: str) -> str:
63
- """Get current weather for a location using wttr.in."""
64
  try:
65
  url = f"https://wttr.in/{location}?format=j1"
66
  response = requests.get(url, timeout=10)
67
  data = response.json()
68
-
69
  current = data['current_condition'][0]
70
- temp_c = current['temp_C']
71
- temp_f = current['temp_F']
72
- desc = current['weatherDesc'][0]['value']
73
- humidity = current['humidity']
74
- wind_speed = current['windspeedKmph']
75
-
76
- return f"Weather in {location}: {desc}, {temp_c}°C ({temp_f}°F), Humidity: {humidity}%, Wind: {wind_speed} km/h"
77
  except Exception as e:
78
  return f"Weather error: {str(e)}"
79
 
80
  def calculate(expression: str) -> str:
81
- """Safely evaluate mathematical expressions."""
82
- operators = {
83
- ast.Add: op.add,
84
- ast.Sub: op.sub,
85
- ast.Mult: op.mul,
86
- ast.Div: op.truediv,
87
- ast.Pow: op.pow,
88
- ast.USub: op.neg,
89
- ast.Mod: op.mod,
90
- }
91
 
92
  def eval_expr(node):
93
  if isinstance(node, ast.Num):
@@ -96,350 +68,212 @@ def calculate(expression: str) -> str:
96
  return operators[type(node.op)](eval_expr(node.left), eval_expr(node.right))
97
  elif isinstance(node, ast.UnaryOp):
98
  return operators[type(node.op)](eval_expr(node.operand))
99
- elif isinstance(node, ast.Call):
100
- if node.func.id == 'abs':
101
- return abs(eval_expr(node.args[0]))
102
- elif node.func.id == 'round':
103
- return round(eval_expr(node.args[0]))
104
- else:
105
- raise TypeError(node)
106
 
107
  try:
108
- expression = expression.strip()
109
- node = ast.parse(expression, mode='eval')
110
- result = eval_expr(node.body)
111
  return f"Result: {result}"
112
  except Exception as e:
113
- return f"Calculation error: {str(e)}. Please use basic arithmetic operators (+, -, *, /, **, %)."
114
 
115
  def python_repl(code: str) -> str:
116
- """Execute safe Python code (limited to basic operations)."""
117
  try:
118
- safe_builtins = {
119
- 'abs': abs, 'round': round, 'min': min, 'max': max,
120
- 'sum': sum, 'len': len, 'range': range, 'list': list,
121
- 'dict': dict, 'str': str, 'int': int, 'float': float,
122
- 'print': print, 'enumerate': enumerate, 'zip': zip,
123
- 'sorted': sorted, 'reversed': reversed,
124
- }
125
-
126
  namespace = {'__builtins__': safe_builtins}
127
 
128
  from io import StringIO
129
  import sys
130
  old_stdout = sys.stdout
131
  sys.stdout = StringIO()
132
-
133
  exec(code, namespace)
134
-
135
  output = sys.stdout.getvalue()
136
  sys.stdout = old_stdout
137
 
138
  result_vars = {k: v for k, v in namespace.items() if k != '__builtins__' and not k.startswith('_')}
139
-
140
- result = output if output else str(result_vars) if result_vars else "Code executed successfully (no output)"
141
- return f"Python output: {result}"
142
  except Exception as e:
143
  return f"Python error: {str(e)}"
144
 
145
  TOOLS = [
146
- Tool(
147
- name="duckduckgo_search",
148
- description="Search the web using DuckDuckGo. Use this when you need current information or facts. Input should be a search query string.",
149
- func=duckduckgo_search
150
- ),
151
- Tool(
152
- name="wikipedia_search",
153
- description="Search Wikipedia for detailed information about topics, people, places, etc. Input should be a search query string.",
154
- func=wikipedia_search
155
- ),
156
- Tool(
157
- name="get_weather",
158
- description="Get current weather information for a location. Input should be a city name or location string.",
159
- func=get_weather
160
- ),
161
- Tool(
162
- name="calculate",
163
- description="Perform mathematical calculations. Input should be a mathematical expression like '5 + 3 * 2' or '2 ** 10'.",
164
- func=calculate
165
- ),
166
- Tool(
167
- name="python_repl",
168
- description="Execute Python code for data processing or calculations. Input should be valid Python code. Only basic operations are allowed.",
169
- func=python_repl
170
- ),
171
  ]
172
 
173
  MODEL_NAME = "openai/gpt-oss-20b"
174
- model = None
175
- tokenizer = None
176
  model_loaded = False
177
 
178
  def download_and_load_model(progress=gr.Progress()):
179
- """Download and load the model."""
180
- global model, tokenizer, model_loaded
181
 
182
  try:
183
- progress(0, desc="Starting model download...")
 
184
 
185
- progress(0.3, desc="Downloading tokenizer...")
186
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
187
-
188
- progress(0.5, desc="Downloading model (this may take several minutes)...")
189
- model = AutoModelForCausalLM.from_pretrained(
190
- MODEL_NAME,
191
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
192
- device_map="auto" if torch.cuda.is_available() else None,
193
- low_cpu_mem_usage=True
194
  )
195
 
196
  progress(0.95, desc="Finalizing...")
197
  model_loaded = True
198
-
199
- progress(1.0, desc="Model loaded successfully!")
200
  return f"Model '{MODEL_NAME}' loaded successfully!"
201
-
202
  except Exception as e:
203
- return f"Error loading model: {str(e)}"
204
 
205
  def get_tool_descriptions() -> str:
206
- descriptions = []
207
- for tool in TOOLS:
208
- descriptions.append(f"- {tool.name}: {tool.description}")
209
- return "\n".join(descriptions)
210
-
211
- THINK_ONLY_PROMPT = """You are a helpful AI assistant. You solve problems by thinking through them step-by-step.
212
 
213
- For each question:
214
- 1. Think through the problem carefully in your internal monologue
215
- 2. Show your reasoning process using "Thought: ..." format
216
- 3. Provide a final answer using "Answer: ..." format
217
 
218
- You do NOT have access to any tools. Rely only on your knowledge and reasoning.
 
 
219
 
220
  Question: {question}
221
 
222
- Let's think step by step:"""
223
 
224
- ACT_ONLY_PROMPT = """You are a helpful AI assistant with access to tools. You solve problems by using tools.
225
 
226
  Available tools:
227
  {tools}
228
 
229
- For each question, you must use tools to find information. Do NOT think or reason - just use tools.
230
-
231
- Format your response as:
232
  Action: tool_name
233
- Action Input: input_for_tool
234
-
235
- After receiving the observation, you can call another tool or provide the final answer:
236
- Answer: your final answer
237
 
238
  Question: {question}
239
 
240
  Action:"""
241
 
242
- REACT_PROMPT = """You are a helpful AI assistant that can think and use tools. You solve problems by alternating between Thought, Action, and Observation.
243
 
244
  Available tools:
245
  {tools}
246
 
247
- For each question, follow this pattern:
248
- Thought: Think about what you need to do next
249
  Action: tool_name
250
- Action Input: input_for_tool
251
- Observation: [tool result will be provided]
252
- ... (repeat Thought/Action/Observation as needed)
253
- Thought: I now know the final answer
254
- Answer: your final answer
255
 
256
  Question: {question}
257
 
258
  Thought:"""
259
 
260
  def parse_action(text: str) -> tuple:
261
- """Parse action and action input from model output."""
262
- action_pattern = r'Action:\s*(\w+)'
263
- input_pattern = r'Action Input:\s*(.+?)(?=\n(?:Thought:|Action:|Answer:|$))'
264
-
265
- action_match = re.search(action_pattern, text, re.IGNORECASE)
266
- input_match = re.search(input_pattern, text, re.IGNORECASE | re.DOTALL)
267
-
268
- if action_match and input_match:
269
- action_name = action_match.group(1).strip()
270
- action_input = input_match.group(1).strip()
271
- return action_name, action_input
272
- return None, None
273
 
274
  def call_tool(tool_name: str, tool_input: str) -> str:
275
- """Call a tool by name."""
276
  for tool in TOOLS:
277
  if tool.name.lower() == tool_name.lower():
278
  return tool(tool_input)
279
- return f"Error: Tool '{tool_name}' not found. Available tools: {', '.join([t.name for t in TOOLS])}"
280
 
281
- def call_llm(prompt: str, temperature: float = 0.7, max_tokens: int = 500) -> str:
282
- """Call the local LLM."""
283
- global model, tokenizer, model_loaded
284
-
285
  if not model_loaded:
286
- return "Error: Model not loaded. Please click 'Download & Load Model' first."
287
 
288
  try:
289
  messages = [{"role": "user", "content": prompt}]
290
-
291
- inputs = tokenizer.apply_chat_template(
292
- messages,
293
- add_generation_prompt=True,
294
- tokenize=True,
295
- return_dict=True,
296
- return_tensors="pt",
297
- ).to(model.device)
298
-
299
- with torch.no_grad():
300
- outputs = model.generate(
301
- **inputs,
302
- max_new_tokens=max_tokens,
303
- temperature=temperature,
304
- do_sample=True,
305
- top_p=0.9
306
- )
307
-
308
- response = tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
309
- return response.strip()
310
-
311
  except Exception as e:
312
- return f"Error calling model: {str(e)}"
313
 
314
  def think_only_mode(question: str) -> str:
315
- """Think-Only mode: Chain-of-Thought only, no tools."""
316
  if not model_loaded:
317
- return "Error: Model not loaded. Please click 'Download & Load Model' first."
318
 
319
- prompt = THINK_ONLY_PROMPT.format(question=question)
 
320
 
321
- output = "**Mode: Think-Only (Chain-of-Thought)**\n\n"
322
- output += "Generating thoughts...\n\n"
323
-
324
- response = call_llm(prompt, temperature=0.7, max_tokens=800)
325
-
326
- lines = response.split('\n')
327
- for line in lines:
328
  if line.strip():
329
- if line.strip().startswith('Thought:'):
330
- output += f"**{line.strip()}**\n\n"
331
- elif line.strip().startswith('Answer:'):
332
- output += f"**{line.strip()}**\n\n"
333
- else:
334
- output += f"{line}\n\n"
335
 
336
- output += "\n---\n**Mode completed**\n"
337
- return output
338
 
339
  def act_only_mode(question: str, max_iterations: int = 5) -> str:
340
- """Act-Only mode: Tool use only, no explicit thinking."""
341
  if not model_loaded:
342
- return "Error: Model not loaded. Please click 'Download & Load Model' first."
343
-
344
- tool_descriptions = get_tool_descriptions()
345
- prompt = ACT_ONLY_PROMPT.format(question=question, tools=tool_descriptions)
346
 
347
- output = "**Mode: Act-Only (Tool Use Only)**\n\n"
 
348
 
349
- iteration = 0
350
- conversation_history = prompt
351
-
352
- while iteration < max_iterations:
353
- iteration += 1
354
-
355
- response = call_llm(conversation_history, temperature=0.5, max_tokens=300)
356
 
357
  if 'Answer:' in response:
358
- answer_match = re.search(r'Answer:\s*(.+)', response, re.IGNORECASE | re.DOTALL)
359
- if answer_match:
360
- output += f"**Answer:** {answer_match.group(1).strip()}\n\n"
361
  break
362
 
363
  action_name, action_input = parse_action(response)
364
-
365
  if action_name and action_input:
366
- output += f"**Action:** {action_name}\n"
367
- output += f"**Action Input:** {action_input}\n\n"
368
-
369
  observation = call_tool(action_name, action_input)
370
  output += f"**Observation:** {observation}\n\n"
371
-
372
- conversation_history += f"\n{response}\nObservation: {observation}\n\nContinue with another action or provide the final answer.\n"
373
  else:
374
- output += f"Could not parse action from response. Response: {response}\n\n"
375
  break
376
 
377
- if iteration >= max_iterations:
378
- output += "**Reached maximum iterations.**\n\n"
379
-
380
- output += "\n---\n**Mode completed**\n"
381
- return output
382
 
383
  def react_mode(question: str, max_iterations: int = 5) -> str:
384
- """ReAct mode: Interleaving Thought, Action, Observation."""
385
  if not model_loaded:
386
- return "Error: Model not loaded. Please click 'Download & Load Model' first."
387
-
388
- tool_descriptions = get_tool_descriptions()
389
- prompt = REACT_PROMPT.format(question=question, tools=tool_descriptions)
390
 
391
- output = "**Mode: ReAct (Thought + Action + Observation)**\n\n"
 
392
 
393
- iteration = 0
394
- conversation_history = prompt
395
-
396
- while iteration < max_iterations:
397
- iteration += 1
398
-
399
- response = call_llm(conversation_history, temperature=0.7, max_tokens=400)
400
 
401
- thought_matches = re.findall(r'Thought:\s*(.+?)(?=\n(?:Action:|Answer:|$))', response, re.IGNORECASE | re.DOTALL)
402
- for thought in thought_matches:
403
  output += f"**Thought:** {thought.strip()}\n\n"
404
 
405
  if 'Answer:' in response:
406
- answer_match = re.search(r'Answer:\s*(.+)', response, re.IGNORECASE | re.DOTALL)
407
- if answer_match:
408
- output += f"**Answer:** {answer_match.group(1).strip()}\n\n"
409
  break
410
 
411
  action_name, action_input = parse_action(response)
412
-
413
  if action_name and action_input:
414
- output += f"**Action:** {action_name}\n"
415
- output += f"**Action Input:** {action_input}\n\n"
416
-
417
  observation = call_tool(action_name, action_input)
418
  output += f"**Observation:** {observation}\n\n"
419
-
420
- conversation_history += f"\n{response}\nObservation: {observation}\n\nThought:"
421
  else:
422
  if 'Answer:' not in response:
423
- output += f"No action found. Response: {response}\n\n"
424
  break
425
 
426
- if iteration >= max_iterations:
427
- output += "**Reached maximum iterations.**\n\n"
428
-
429
- output += "\n---\n**Mode completed**\n"
430
- return output
431
 
432
  EXAMPLES = [
433
- "What is the capital of France and what's the current weather there?",
434
- "Who wrote 'To Kill a Mockingbird' and when was it published?",
435
- "Calculate the compound interest on $1000 at 5% annual rate for 3 years using the formula A = P(1 + r)^t",
436
- "What is the population of Tokyo and how does it compare to New York City?",
437
- "If I have a list of numbers [15, 23, 8, 42, 16], what is the average and which number is closest to it?",
438
- "What are the main causes of climate change according to scientific consensus?",
439
  ]
440
 
441
  def run_comparison(question: str, mode: str):
442
- """Run the selected mode(s)."""
443
  if mode == "Think-Only":
444
  return think_only_mode(question), "", ""
445
  elif mode == "Act-Only":
@@ -448,57 +282,34 @@ def run_comparison(question: str, mode: str):
448
  return "", "", react_mode(question)
449
  elif mode == "All (Compare)":
450
  return think_only_mode(question), act_only_mode(question), react_mode(question)
451
- else:
452
- return "Invalid mode selected.", "", ""
453
 
454
- with gr.Blocks(title="LLM Reasoning Modes Comparison") as demo:
 
455
 
456
  with gr.Row():
457
  download_btn = gr.Button("Download & Load Model", variant="primary", size="lg")
458
- model_status = gr.Textbox(label="Model Status", value="Model not loaded. Click to download openai/gpt-oss-20b", interactive=False)
459
 
460
  with gr.Row():
461
  with gr.Column(scale=3):
462
- question_input = gr.Textbox(
463
- label="Enter your question",
464
- placeholder="Ask a question that might require tools or reasoning...",
465
- lines=3
466
- )
467
- mode_dropdown = gr.Dropdown(
468
- choices=["Think-Only", "Act-Only", "ReAct", "All (Compare)"],
469
- value="All (Compare)",
470
- label="Select Mode"
471
- )
472
  submit_btn = gr.Button("Run", variant="primary", size="lg")
473
-
474
  with gr.Column(scale=1):
475
- gr.Markdown("**Example Questions**")
476
- for idx, example in enumerate(EXAMPLES):
477
- gr.Button(f"Example {idx+1}", size="sm").click(
478
- fn=lambda ex=example: ex,
479
- outputs=question_input
480
- )
481
 
482
  gr.Markdown("---")
483
 
484
  with gr.Row():
485
- with gr.Column():
486
- think_output = gr.Markdown(label="Think-Only Output")
487
- with gr.Column():
488
- act_output = gr.Markdown(label="Act-Only Output")
489
- with gr.Column():
490
- react_output = gr.Markdown(label="ReAct Output")
491
-
492
- download_btn.click(
493
- fn=download_and_load_model,
494
- outputs=model_status
495
- )
496
 
497
- submit_btn.click(
498
- fn=run_comparison,
499
- inputs=[question_input, mode_dropdown],
500
- outputs=[think_output, act_output, react_output]
501
- )
502
 
503
  if __name__ == "__main__":
504
  demo.launch(share=True)
 
8
  import ast
9
  import operator as op
10
  import wikipedia
11
+ from transformers import pipeline
12
  import torch
 
13
 
14
  class Tool:
15
  def __init__(self, name: str, description: str, func):
 
21
  return self.func(*args, **kwargs)
22
 
23
  def duckduckgo_search(query: str) -> str:
 
24
  try:
25
  url = "https://api.duckduckgo.com/"
26
+ params = {'q': query, 'format': 'json', 'no_html': 1, 'skip_disambig': 1}
 
 
 
 
 
27
  response = requests.get(url, params=params, timeout=10)
28
  data = response.json()
29
 
30
  if data.get('Abstract'):
31
  return f"Search result: {data['Abstract']}"
32
  elif data.get('RelatedTopics') and len(data['RelatedTopics']) > 0:
33
+ results = [topic['Text'] for topic in data['RelatedTopics'][:3] if 'Text' in topic]
 
 
 
34
  return f"Search results: {' | '.join(results)}" if results else "No results found."
35
+ return "No results found."
 
36
  except Exception as e:
37
  return f"Search error: {str(e)}"
38
 
39
  def wikipedia_search(query: str) -> str:
 
40
  try:
41
  wikipedia.set_lang("en")
42
  summary = wikipedia.summary(query, sentences=3, auto_suggest=True)
43
  return f"Wikipedia: {summary}"
44
  except wikipedia.exceptions.DisambiguationError as e:
45
+ return f"Wikipedia: Multiple results found. Options: {', '.join(e.options[:5])}"
46
  except wikipedia.exceptions.PageError:
47
  return f"Wikipedia: No page found for '{query}'."
48
  except Exception as e:
49
  return f"Wikipedia error: {str(e)}"
50
 
51
  def get_weather(location: str) -> str:
 
52
  try:
53
  url = f"https://wttr.in/{location}?format=j1"
54
  response = requests.get(url, timeout=10)
55
  data = response.json()
 
56
  current = data['current_condition'][0]
57
+ return f"Weather in {location}: {current['weatherDesc'][0]['value']}, {current['temp_C']}°C, Humidity: {current['humidity']}%"
 
 
 
 
 
 
58
  except Exception as e:
59
  return f"Weather error: {str(e)}"
60
 
61
  def calculate(expression: str) -> str:
62
+ operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul, ast.Div: op.truediv, ast.Pow: op.pow, ast.USub: op.neg, ast.Mod: op.mod}
 
 
 
 
 
 
 
 
 
63
 
64
  def eval_expr(node):
65
  if isinstance(node, ast.Num):
 
68
  return operators[type(node.op)](eval_expr(node.left), eval_expr(node.right))
69
  elif isinstance(node, ast.UnaryOp):
70
  return operators[type(node.op)](eval_expr(node.operand))
71
+ raise TypeError(node)
 
 
 
 
 
 
72
 
73
  try:
74
+ result = eval_expr(ast.parse(expression.strip(), mode='eval').body)
 
 
75
  return f"Result: {result}"
76
  except Exception as e:
77
+ return f"Calculation error: {str(e)}"
78
 
79
  def python_repl(code: str) -> str:
 
80
  try:
81
+ safe_builtins = {'abs': abs, 'round': round, 'min': min, 'max': max, 'sum': sum, 'len': len, 'range': range, 'list': list, 'dict': dict, 'str': str, 'int': int, 'float': float, 'print': print}
 
 
 
 
 
 
 
82
  namespace = {'__builtins__': safe_builtins}
83
 
84
  from io import StringIO
85
  import sys
86
  old_stdout = sys.stdout
87
  sys.stdout = StringIO()
 
88
  exec(code, namespace)
 
89
  output = sys.stdout.getvalue()
90
  sys.stdout = old_stdout
91
 
92
  result_vars = {k: v for k, v in namespace.items() if k != '__builtins__' and not k.startswith('_')}
93
+ return f"Python output: {output if output else (str(result_vars) if result_vars else 'Code executed')}"
 
 
94
  except Exception as e:
95
  return f"Python error: {str(e)}"
96
 
97
  TOOLS = [
98
+ Tool("duckduckgo_search", "Search the web. Input: search query.", duckduckgo_search),
99
+ Tool("wikipedia_search", "Search Wikipedia. Input: search query.", wikipedia_search),
100
+ Tool("get_weather", "Get weather for location. Input: city name.", get_weather),
101
+ Tool("calculate", "Calculate math expression. Input: expression.", calculate),
102
+ Tool("python_repl", "Execute Python code. Input: code.", python_repl),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  ]
104
 
105
  MODEL_NAME = "openai/gpt-oss-20b"
106
+ pipe = None
 
107
  model_loaded = False
108
 
109
  def download_and_load_model(progress=gr.Progress()):
110
+ global pipe, model_loaded
 
111
 
112
  try:
113
+ progress(0, desc="Downloading model...")
114
+ progress(0.5, desc="Loading model (this may take several minutes)...")
115
 
116
+ pipe = pipeline(
117
+ "text-generation",
118
+ model=MODEL_NAME,
119
+ torch_dtype="auto",
120
+ device_map="auto",
 
 
 
 
121
  )
122
 
123
  progress(0.95, desc="Finalizing...")
124
  model_loaded = True
125
+ progress(1.0, desc="Model loaded!")
 
126
  return f"Model '{MODEL_NAME}' loaded successfully!"
 
127
  except Exception as e:
128
+ return f"Error: {str(e)}"
129
 
130
  def get_tool_descriptions() -> str:
131
+ return "\n".join([f"- {tool.name}: {tool.description}" for tool in TOOLS])
 
 
 
 
 
132
 
133
+ THINK_ONLY_PROMPT = """You are a helpful AI assistant. Solve problems step-by-step.
 
 
 
134
 
135
+ Format:
136
+ Thought: your reasoning
137
+ Answer: your final answer
138
 
139
  Question: {question}
140
 
141
+ Think step by step:"""
142
 
143
+ ACT_ONLY_PROMPT = """You are a helpful AI assistant with tools.
144
 
145
  Available tools:
146
  {tools}
147
 
148
+ Format:
 
 
149
  Action: tool_name
150
+ Action Input: input
 
 
 
151
 
152
  Question: {question}
153
 
154
  Action:"""
155
 
156
+ REACT_PROMPT = """You are a helpful AI assistant with tools.
157
 
158
  Available tools:
159
  {tools}
160
 
161
+ Pattern:
162
+ Thought: what to do next
163
  Action: tool_name
164
+ Action Input: input
165
+ Observation: [result]
166
+ ... repeat as needed
167
+ Thought: I know the answer
168
+ Answer: final answer
169
 
170
  Question: {question}
171
 
172
  Thought:"""
173
 
174
  def parse_action(text: str) -> tuple:
175
+ action_match = re.search(r'Action:\s*(\w+)', text, re.IGNORECASE)
176
+ input_match = re.search(r'Action Input:\s*(.+?)(?=\n(?:Thought:|Action:|Answer:|$))', text, re.IGNORECASE | re.DOTALL)
177
+ return (action_match.group(1).strip(), input_match.group(1).strip()) if action_match and input_match else (None, None)
 
 
 
 
 
 
 
 
 
178
 
179
  def call_tool(tool_name: str, tool_input: str) -> str:
 
180
  for tool in TOOLS:
181
  if tool.name.lower() == tool_name.lower():
182
  return tool(tool_input)
183
+ return f"Error: Tool '{tool_name}' not found."
184
 
185
+ def call_llm(prompt: str, max_tokens: int = 500) -> str:
 
 
 
186
  if not model_loaded:
187
+ return "Error: Model not loaded."
188
 
189
  try:
190
  messages = [{"role": "user", "content": prompt}]
191
+ outputs = pipe(messages, max_new_tokens=max_tokens)
192
+ return outputs[0]["generated_text"][-1]["content"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  except Exception as e:
194
+ return f"Error: {str(e)}"
195
 
196
  def think_only_mode(question: str) -> str:
 
197
  if not model_loaded:
198
+ return "Error: Model not loaded."
199
 
200
+ output = "**Mode: Think-Only**\n\n"
201
+ response = call_llm(THINK_ONLY_PROMPT.format(question=question), max_tokens=800)
202
 
203
+ for line in response.split('\n'):
 
 
 
 
 
 
204
  if line.strip():
205
+ output += f"**{line.strip()}**\n\n" if line.strip().startswith(('Thought:', 'Answer:')) else f"{line}\n\n"
 
 
 
 
 
206
 
207
+ return output + "\n---\n"
 
208
 
209
  def act_only_mode(question: str, max_iterations: int = 5) -> str:
 
210
  if not model_loaded:
211
+ return "Error: Model not loaded."
 
 
 
212
 
213
+ output = "**Mode: Act-Only**\n\n"
214
+ conversation = ACT_ONLY_PROMPT.format(question=question, tools=get_tool_descriptions())
215
 
216
+ for _ in range(max_iterations):
217
+ response = call_llm(conversation, max_tokens=300)
 
 
 
 
 
218
 
219
  if 'Answer:' in response:
220
+ match = re.search(r'Answer:\s*(.+)', response, re.IGNORECASE | re.DOTALL)
221
+ if match:
222
+ output += f"**Answer:** {match.group(1).strip()}\n\n"
223
  break
224
 
225
  action_name, action_input = parse_action(response)
 
226
  if action_name and action_input:
227
+ output += f"**Action:** {action_name}\n**Input:** {action_input}\n\n"
 
 
228
  observation = call_tool(action_name, action_input)
229
  output += f"**Observation:** {observation}\n\n"
230
+ conversation += f"\n{response}\nObservation: {observation}\n\n"
 
231
  else:
232
+ output += f"Could not parse action.\n\n"
233
  break
234
 
235
+ return output + "\n---\n"
 
 
 
 
236
 
237
  def react_mode(question: str, max_iterations: int = 5) -> str:
 
238
  if not model_loaded:
239
+ return "Error: Model not loaded."
 
 
 
240
 
241
+ output = "**Mode: ReAct**\n\n"
242
+ conversation = REACT_PROMPT.format(question=question, tools=get_tool_descriptions())
243
 
244
+ for _ in range(max_iterations):
245
+ response = call_llm(conversation, max_tokens=400)
 
 
 
 
 
246
 
247
+ for thought in re.findall(r'Thought:\s*(.+?)(?=\n(?:Action:|Answer:|$))', response, re.IGNORECASE | re.DOTALL):
 
248
  output += f"**Thought:** {thought.strip()}\n\n"
249
 
250
  if 'Answer:' in response:
251
+ match = re.search(r'Answer:\s*(.+)', response, re.IGNORECASE | re.DOTALL)
252
+ if match:
253
+ output += f"**Answer:** {match.group(1).strip()}\n\n"
254
  break
255
 
256
  action_name, action_input = parse_action(response)
 
257
  if action_name and action_input:
258
+ output += f"**Action:** {action_name}\n**Input:** {action_input}\n\n"
 
 
259
  observation = call_tool(action_name, action_input)
260
  output += f"**Observation:** {observation}\n\n"
261
+ conversation += f"\n{response}\nObservation: {observation}\n\nThought:"
 
262
  else:
263
  if 'Answer:' not in response:
264
+ output += "No action found.\n\n"
265
  break
266
 
267
+ return output + "\n---\n"
 
 
 
 
268
 
269
  EXAMPLES = [
270
+ "What is the capital of France and its weather?",
271
+ "Who wrote 'To Kill a Mockingbird'?",
272
+ "Calculate: 1000 * (1.05 ** 3)",
273
+ "What is Tokyo's population?",
 
 
274
  ]
275
 
276
  def run_comparison(question: str, mode: str):
 
277
  if mode == "Think-Only":
278
  return think_only_mode(question), "", ""
279
  elif mode == "Act-Only":
 
282
  return "", "", react_mode(question)
283
  elif mode == "All (Compare)":
284
  return think_only_mode(question), act_only_mode(question), react_mode(question)
285
+ return "Invalid mode.", "", ""
 
286
 
287
+ with gr.Blocks(title="LLM Reasoning Modes") as demo:
288
+ gr.Markdown("# LLM Reasoning Modes Comparison\n\n**Model:** openai/gpt-oss-20b\n\n**Tools:** DuckDuckGo | Wikipedia | Weather | Calculator | Python")
289
 
290
  with gr.Row():
291
  download_btn = gr.Button("Download & Load Model", variant="primary", size="lg")
292
+ model_status = gr.Textbox(label="Status", value="Click to download", interactive=False)
293
 
294
  with gr.Row():
295
  with gr.Column(scale=3):
296
+ question_input = gr.Textbox(label="Question", lines=3)
297
+ mode_dropdown = gr.Dropdown(choices=["Think-Only", "Act-Only", "ReAct", "All (Compare)"], value="All (Compare)", label="Mode")
 
 
 
 
 
 
 
 
298
  submit_btn = gr.Button("Run", variant="primary", size="lg")
 
299
  with gr.Column(scale=1):
300
+ gr.Markdown("**Examples**")
301
+ for idx, ex in enumerate(EXAMPLES):
302
+ gr.Button(f"Ex {idx+1}", size="sm").click(fn=lambda e=ex: e, outputs=question_input)
 
 
 
303
 
304
  gr.Markdown("---")
305
 
306
  with gr.Row():
307
+ think_output = gr.Markdown(label="Think-Only")
308
+ act_output = gr.Markdown(label="Act-Only")
309
+ react_output = gr.Markdown(label="ReAct")
 
 
 
 
 
 
 
 
310
 
311
+ download_btn.click(fn=download_and_load_model, outputs=model_status)
312
+ submit_btn.click(fn=run_comparison, inputs=[question_input, mode_dropdown], outputs=[think_output, act_output, react_output])
 
 
 
313
 
314
  if __name__ == "__main__":
315
  demo.launch(share=True)