File size: 15,493 Bytes
35bd451
 
 
 
569b494
35bd451
 
 
 
 
286a919
2619233
35bd451
 
 
 
 
 
 
 
 
 
 
 
 
6b5b0cd
35bd451
 
 
 
 
 
6b5b0cd
35bd451
6b5b0cd
35bd451
 
 
 
 
 
 
 
286a919
 
 
 
35bd451
 
 
 
 
 
 
 
 
6b5b0cd
35bd451
 
 
 
6b5b0cd
35bd451
 
 
 
 
 
 
 
6b5b0cd
35bd451
 
6b5b0cd
35bd451
 
6b5b0cd
35bd451
 
 
6b5b0cd
35bd451
 
 
 
 
 
 
 
 
 
 
6b5b0cd
35bd451
 
 
 
6b5b0cd
 
 
 
 
35bd451
 
dd69132
286a919
 
2619233
 
 
286a919
2619233
 
286a919
 
2619233
286a919
 
 
 
 
6b5b0cd
286a919
2619233
 
dd69132
2619233
6b5b0cd
286a919
2619233
6b5b0cd
2619233
35bd451
6b5b0cd
35bd451
569b494
286a919
569b494
 
 
 
 
 
 
 
 
 
 
286a919
 
 
569b494
 
 
286a919
569b494
286a919
 
 
 
569b494
 
 
 
 
 
 
 
286a919
 
569b494
 
 
 
 
 
 
 
286a919
 
 
 
 
569b494
286a919
 
 
 
569b494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286a919
 
 
 
 
35bd451
6b5b0cd
286a919
6b5b0cd
35bd451
 
 
 
 
6b5b0cd
35bd451
286a919
2619233
286a919
2619233
35bd451
286a919
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35bd451
286a919
35bd451
569b494
2619233
569b494
 
 
 
 
35bd451
286a919
e2ae385
286a919
569b494
 
35bd451
286a919
 
569b494
 
 
 
 
 
e2ae385
569b494
286a919
569b494
286a919
569b494
 
e2ae385
569b494
286a919
e2ae385
 
569b494
 
286a919
35bd451
286a919
569b494
 
e2ae385
286a919
 
 
569b494
286a919
 
 
 
569b494
 
 
 
35bd451
569b494
 
286a919
35bd451
569b494
35bd451
 
569b494
35bd451
569b494
286a919
569b494
 
35bd451
569b494
286a919
2619233
286a919
569b494
 
286a919
 
 
569b494
 
286a919
569b494
286a919
569b494
286a919
569b494
286a919
 
 
569b494
286a919
 
569b494
286a919
 
569b494
 
 
 
286a919
569b494
 
286a919
 
569b494
 
286a919
35bd451
569b494
286a919
 
 
 
 
 
 
 
 
569b494
 
 
 
 
286a919
569b494
 
 
 
 
286a919
569b494
 
 
 
 
286a919
569b494
 
 
 
 
 
 
286a919
 
 
68be27c
35bd451
286a919
 
e2ae385
286a919
 
 
569b494
286a919
 
 
 
 
e2ae385
286a919
e2ae385
286a919
 
 
 
2619233
286a919
 
07cc304
35bd451
2379646
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
import os
import re
import json
import gradio as gr
from typing import List, Dict, Any, Generator
import requests
from datetime import datetime
import ast
import operator as op
import wikipedia
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

class Tool:
    def __init__(self, name: str, description: str, func):
        self.name = name
        self.description = description
        self.func = func
    
    def __call__(self, *args, **kwargs):
        return self.func(*args, **kwargs)

def duckduckgo_search(query: str) -> str:
    try:
        url = "https://api.duckduckgo.com/"
        params = {'q': query, 'format': 'json', 'no_html': 1, 'skip_disambig': 1}
        response = requests.get(url, params=params, timeout=10)
        data = response.json()
        
        if data.get('Abstract'):
            return f"Search result: {data['Abstract']}"
        elif data.get('RelatedTopics') and len(data['RelatedTopics']) > 0:
            results = [topic['Text'] for topic in data['RelatedTopics'][:3] if 'Text' in topic]
            return f"Search results: {' | '.join(results)}" if results else "No results found."
        return "No results found."
    except Exception as e:
        return f"Search error: {str(e)}"

def wikipedia_search(query: str) -> str:
    try:
        wikipedia.set_lang("en")
        summary = wikipedia.summary(query, sentences=3, auto_suggest=True)
        return f"Wikipedia: {summary}"
    except wikipedia.exceptions.DisambiguationError as e:
        return f"Wikipedia: Multiple results found. Options: {', '.join(e.options[:5])}"
    except wikipedia.exceptions.PageError:
        return f"Wikipedia: No page found for '{query}'."
    except Exception as e:
        return f"Wikipedia error: {str(e)}"

def get_weather(location: str) -> str:
    try:
        url = f"https://wttr.in/{location}?format=j1"
        response = requests.get(url, timeout=10)
        data = response.json()
        current = data['current_condition'][0]
        return f"Weather in {location}: {current['weatherDesc'][0]['value']}, {current['temp_C']}Β°C, Humidity: {current['humidity']}%"
    except Exception as e:
        return f"Weather error: {str(e)}"

def calculate(expression: str) -> str:
    operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul, ast.Div: op.truediv, ast.Pow: op.pow, ast.USub: op.neg, ast.Mod: op.mod}
    
    def eval_expr(node):
        if isinstance(node, ast.Num):
            return node.n
        elif isinstance(node, ast.BinOp):
            return operators[type(node.op)](eval_expr(node.left), eval_expr(node.right))
        elif isinstance(node, ast.UnaryOp):
            return operators[type(node.op)](eval_expr(node.operand))
        raise TypeError(node)
    
    try:
        result = eval_expr(ast.parse(expression.strip(), mode='eval').body)
        return f"Result: {result}"
    except Exception as e:
        return f"Calculation error: {str(e)}"

def python_repl(code: str) -> str:
    try:
        safe_builtins = {'abs': abs, 'round': round, 'min': min, 'max': max, 'sum': sum, 'len': len, 'range': range, 'list': list, 'dict': dict, 'str': str, 'int': int, 'float': float, 'print': print}
        namespace = {'__builtins__': safe_builtins}
        
        from io import StringIO
        import sys
        old_stdout = sys.stdout
        sys.stdout = StringIO()
        exec(code, namespace)
        output = sys.stdout.getvalue()
        sys.stdout = old_stdout
        
        result_vars = {k: v for k, v in namespace.items() if k != '__builtins__' and not k.startswith('_')}
        return f"Python output: {output if output else (str(result_vars) if result_vars else 'Code executed')}"
    except Exception as e:
        return f"Python error: {str(e)}"

TOOLS = [
    Tool("duckduckgo_search", "Search the web. Input: search query.", duckduckgo_search),
    Tool("wikipedia_search", "Search Wikipedia. Input: search query.", wikipedia_search),
    Tool("get_weather", "Get weather for location. Input: city name.", get_weather),
    Tool("calculate", "Calculate math expression. Input: expression.", calculate),
    Tool("python_repl", "Execute Python code. Input: code.", python_repl),
]

MODEL_NAME = "openai/gpt-oss-20b"
model = None
tokenizer = None
model_loaded = False

def download_and_load_model(progress=gr.Progress()):
    global model, tokenizer, model_loaded
    
    try:
        progress(0, desc="Downloading tokenizer...")
        tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
        
        progress(0.4, desc="Downloading model (this may take several minutes)...")
        model = AutoModelForCausalLM.from_pretrained(
            MODEL_NAME,
            trust_remote_code=True,
            torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
            device_map="auto",
            low_cpu_mem_usage=True,
        )
        
        progress(0.95, desc="Finalizing...")
        model_loaded = True
        progress(1.0, desc="Model loaded!")
        return f"Model '{MODEL_NAME}' loaded successfully!"
    except Exception as e:
        return f"Error: {str(e)}"

def get_tool_descriptions() -> str:
    return "\n".join([f"- {tool.name}: {tool.description}" for tool in TOOLS])

THINK_ONLY_PROMPT = """You are an expert problem solver. Use your knowledge and reasoning to answer questions.

You must show your complete reasoning process using this format:
Thought: [Explain what you're thinking and why]
Thought: [Continue your reasoning, breaking down the problem]
Thought: [Build toward the solution step by step]
Answer: [Your final, complete answer]

Important: 
- Show multiple thought steps
- Break down complex problems
- Explain your reasoning clearly
- Only provide the Answer when you're certain

Question: {question}

Let me think through this step by step:

Thought:"""

ACT_ONLY_PROMPT = """You are an AI agent with access to external tools. You MUST use tools to find information.

Available tools:
{tools}

You MUST respond ONLY with actions - no thinking out loud:
Action: [exact tool name]
Action Input: [specific input for the tool]

After receiving the Observation, you can:
- Call another tool if you need more information
- Provide the final Answer when you have enough information

Format:
Action: tool_name
Action Input: input_string

Then after observation:
Action: another_tool
Action Input: another_input

OR
Answer: [final answer based on observations]

Question: {question}

Action:"""

REACT_PROMPT = """You are an expert AI agent that combines reasoning with tool usage (ReAct paradigm).

Available tools:
{tools}

You MUST alternate between thinking and acting:

1. Thought: [Reason about what information you need and which tool to use]
2. Action: [exact tool name]
3. Action Input: [specific input]
4. Observation: [tool result - will be provided to you]
5. Thought: [Analyze the observation and decide next steps]
6. Repeat 2-5 until you have enough information
7. Thought: [Final reasoning with all gathered information]
8. Answer: [Complete final answer]

Rules:
- ALWAYS start with a Thought explaining your strategy
- After each Observation, think about what you learned
- Use multiple tools if needed
- Only give Answer when you have sufficient information
- Be specific in your Action Inputs

Question: {question}

Thought:"""

def parse_action(text: str) -> tuple:
    action_match = re.search(r'Action:\s*(\w+)', text, re.IGNORECASE)
    input_match = re.search(r'Action Input:\s*(.+?)(?=\n(?:Thought:|Action:|Answer:|$))', text, re.IGNORECASE | re.DOTALL)
    return (action_match.group(1).strip(), input_match.group(1).strip()) if action_match and input_match else (None, None)

def call_tool(tool_name: str, tool_input: str) -> str:
    for tool in TOOLS:
        if tool.name.lower() == tool_name.lower():
            return tool(tool_input)
    return f"Error: Tool '{tool_name}' not found."

def call_llm(prompt: str, max_tokens: int = 500) -> str:
    if not model_loaded:
        return "Error: Model not loaded."
    
    try:
        inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
        
        if torch.cuda.is_available():
            inputs = {k: v.to(model.device) for k, v in inputs.items()}
        
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                max_new_tokens=max_tokens,
                temperature=0.7,
                do_sample=True,
                top_p=0.9,
                pad_token_id=tokenizer.eos_token_id,
                eos_token_id=tokenizer.eos_token_id,
            )
        
        response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
        return response.strip()
    except Exception as e:
        return f"Error during generation: {str(e)}"

def think_only_mode(question: str) -> Generator[str, None, None]:
    if not model_loaded:
        yield "❌ **Error: Model not loaded. Click 'Download & Load Model' first.**\n\n"
        return
    
    yield "🧠 **Mode: Think-Only (Chain-of-Thought)**\n\n"
    yield "πŸ’­ Generating reasoning steps...\n\n"
    
    response = call_llm(THINK_ONLY_PROMPT.format(question=question), max_tokens=800)
    
    if response.startswith("Error"):
        yield f"❌ {response}\n\n"
        return
    
    for line in response.split('\n'):
        if line.strip():
            if line.strip().startswith('Thought:'):
                yield f"πŸ’­ **{line.strip()}**\n\n"
            elif line.strip().startswith('Answer:'):
                yield f"βœ… **{line.strip()}**\n\n"
            else:
                yield f"{line}\n\n"
    
    yield "\n---\nβœ“ **Completed**\n"

def act_only_mode(question: str, max_iterations: int = 5) -> Generator[str, None, None]:
    if not model_loaded:
        yield "❌ **Error: Model not loaded. Click 'Download & Load Model' first.**\n\n"
        return
    
    yield "πŸ”§ **Mode: Act-Only (Tool Use Only)**\n\n"
    conversation = ACT_ONLY_PROMPT.format(question=question, tools=get_tool_descriptions())
    
    for iteration in range(max_iterations):
        yield f"πŸ”„ **Iteration {iteration + 1}**\n\n"
        
        response = call_llm(conversation, max_tokens=300)
        
        if response.startswith("Error"):
            yield f"❌ {response}\n\n"
            return
        
        if 'Answer:' in response:
            match = re.search(r'Answer:\s*(.+)', response, re.IGNORECASE | re.DOTALL)
            if match:
                yield f"βœ… **Answer:** {match.group(1).strip()}\n\n"
                break
        
        action_name, action_input = parse_action(response)
        if action_name and action_input:
            yield f"πŸ”§ **Action:** `{action_name}`\n"
            yield f"πŸ“ **Action Input:** {action_input}\n\n"
            
            yield f"⏳ Executing tool...\n\n"
            observation = call_tool(action_name, action_input)
            
            yield f"πŸ‘οΈ **Observation:** {observation}\n\n"
            conversation += f"\n{response}\nObservation: {observation}\n\n"
        else:
            yield f"⚠️ No valid action found. Response: {response}\n\n"
            break
    
    yield "\n---\nβœ“ **Completed**\n"

def react_mode(question: str, max_iterations: int = 5) -> Generator[str, None, None]:
    if not model_loaded:
        yield "❌ **Error: Model not loaded. Click 'Download & Load Model' first.**\n\n"
        return
    
    yield "πŸ€– **Mode: ReAct (Reasoning + Acting)**\n\n"
    conversation = REACT_PROMPT.format(question=question, tools=get_tool_descriptions())
    
    for iteration in range(max_iterations):
        yield f"πŸ”„ **Step {iteration + 1}**\n\n"
        
        response = call_llm(conversation, max_tokens=400)
        
        if response.startswith("Error"):
            yield f"❌ {response}\n\n"
            return
        
        # Extract and display thoughts
        for thought in re.findall(r'Thought:\s*(.+?)(?=\n(?:Action:|Answer:|$))', response, re.IGNORECASE | re.DOTALL):
            yield f"πŸ’­ **Thought:** {thought.strip()}\n\n"
        
        # Check for final answer
        if 'Answer:' in response:
            match = re.search(r'Answer:\s*(.+)', response, re.IGNORECASE | re.DOTALL)
            if match:
                yield f"βœ… **Answer:** {match.group(1).strip()}\n\n"
                break
        
        # Parse and execute action
        action_name, action_input = parse_action(response)
        if action_name and action_input:
            yield f"πŸ”§ **Action:** `{action_name}`\n"
            yield f"πŸ“ **Action Input:** {action_input}\n\n"
            
            yield f"⏳ Executing tool...\n\n"
            observation = call_tool(action_name, action_input)
            
            yield f"πŸ‘οΈ **Observation:** {observation}\n\n"
            conversation += f"\n{response}\nObservation: {observation}\n\nThought:"
        else:
            if 'Answer:' not in response:
                yield f"⚠️ No action found. Response: {response}\n\n"
            break
    
    yield "\n---\nβœ“ **Completed**\n"

EXAMPLES = [
    "What is 25 * 47?",
    "What is the weather in Paris?",
    "Who wrote 1984?",
    "Calculate: 100 + 200",
]

def run_comparison(question: str, mode: str):
    """Run selected mode with real-time streaming."""
    if not question.strip():
        yield "Please enter a question.", "", ""
        return
    
    if mode == "Think-Only":
        think_out = ""
        for chunk in think_only_mode(question):
            think_out += chunk
            yield think_out, "", ""
    
    elif mode == "Act-Only":
        act_out = ""
        for chunk in act_only_mode(question):
            act_out += chunk
            yield "", act_out, ""
    
    elif mode == "ReAct":
        react_out = ""
        for chunk in react_mode(question):
            react_out += chunk
            yield "", "", react_out
    
    else:
        yield "Invalid mode selected.", "", ""

with gr.Blocks(title="LLM Reasoning Modes") as demo:
    gr.Markdown("# LLM Reasoning Modes Comparison\n\n**Model:** openai/gpt-oss-20b\n\n**Tools:** DuckDuckGo | Wikipedia | Weather | Calculator | Python")
    
    with gr.Row():
        download_btn = gr.Button("Download & Load Model", variant="primary", size="lg")
        model_status = gr.Textbox(label="Status", value="Click to download", interactive=False)
    
    with gr.Row():
        with gr.Column(scale=3):
            question_input = gr.Textbox(label="Question", lines=3)
            mode_dropdown = gr.Dropdown(choices=["Think-Only", "Act-Only", "ReAct"], value="ReAct", label="Mode")
            submit_btn = gr.Button("Run", variant="primary", size="lg")
        with gr.Column(scale=1):
            gr.Markdown("**Examples**")
            for idx, ex in enumerate(EXAMPLES):
                gr.Button(f"Ex {idx+1}", size="sm").click(fn=lambda e=ex: e, outputs=question_input)
    
    gr.Markdown("---")
    
    with gr.Row():
        think_output = gr.Markdown(label="Think-Only")
        act_output = gr.Markdown(label="Act-Only")
        react_output = gr.Markdown(label="ReAct")
    
    download_btn.click(fn=download_and_load_model, outputs=model_status)
    submit_btn.click(fn=run_comparison, inputs=[question_input, mode_dropdown], outputs=[think_output, act_output, react_output])

if __name__ == "__main__":
    demo.launch(share=True)