Humanlearning commited on
Commit
3708220
·
1 Parent(s): 0e6f333

+ updated research tool

Browse files
__pycache__/langgraph_tools.cpython-310.pyc ADDED
Binary file (6.84 kB). View file
 
__pycache__/langgraph_tools.cpython-313.pyc ADDED
Binary file (10.3 kB). View file
 
__pycache__/memory_system.cpython-310.pyc ADDED
Binary file (5.31 kB). View file
 
__pycache__/observability.cpython-310.pyc CHANGED
Binary files a/__pycache__/observability.cpython-310.pyc and b/__pycache__/observability.cpython-310.pyc differ
 
__pycache__/tools.cpython-310.pyc ADDED
Binary file (3.72 kB). View file
 
agents/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (738 Bytes). View file
 
agents/__pycache__/answer_formatter.cpython-310.pyc CHANGED
Binary files a/agents/__pycache__/answer_formatter.cpython-310.pyc and b/agents/__pycache__/answer_formatter.cpython-310.pyc differ
 
agents/__pycache__/code_agent.cpython-310.pyc CHANGED
Binary files a/agents/__pycache__/code_agent.cpython-310.pyc and b/agents/__pycache__/code_agent.cpython-310.pyc differ
 
agents/__pycache__/code_agent.cpython-313.pyc CHANGED
Binary files a/agents/__pycache__/code_agent.cpython-313.pyc and b/agents/__pycache__/code_agent.cpython-313.pyc differ
 
agents/__pycache__/lead_agent.cpython-310.pyc CHANGED
Binary files a/agents/__pycache__/lead_agent.cpython-310.pyc and b/agents/__pycache__/lead_agent.cpython-310.pyc differ
 
agents/__pycache__/research_agent.cpython-310.pyc CHANGED
Binary files a/agents/__pycache__/research_agent.cpython-310.pyc and b/agents/__pycache__/research_agent.cpython-310.pyc differ
 
agents/__pycache__/research_agent.cpython-313.pyc CHANGED
Binary files a/agents/__pycache__/research_agent.cpython-313.pyc and b/agents/__pycache__/research_agent.cpython-313.pyc differ
 
agents/code_agent.py CHANGED
@@ -16,196 +16,97 @@ from typing import Dict, Any, List
16
  from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage
17
  from langgraph.types import Command
18
  from langchain_groq import ChatGroq
19
- from langchain_core.tools import Tool
20
  from observability import agent_span, tool_span
21
  from dotenv import load_dotenv
22
 
23
- # Import calculator tools from the existing tools.py
24
- from tools import get_calculator_tool, get_hub_stats_tool
25
 
26
  load_dotenv("env.local")
27
 
28
 
29
- def create_code_tools() -> List[Tool]:
30
- """Create LangChain-compatible computational tools"""
31
- tools = []
32
-
33
- # Mathematical calculator tools
34
- def multiply_func(a: float, b: float) -> str:
35
- """Multiply two numbers"""
36
- try:
37
- result = a * b
38
- return f"{a} × {b} = {result}"
39
- except Exception as e:
40
- return f"Error: {str(e)}"
41
-
42
- def add_func(a: float, b: float) -> str:
43
- """Add two numbers"""
44
- try:
45
- result = a + b
46
- return f"{a} + {b} = {result}"
47
- except Exception as e:
48
- return f"Error: {str(e)}"
49
-
50
- def subtract_func(a: float, b: float) -> str:
51
- """Subtract two numbers"""
52
- try:
53
- result = a - b
54
- return f"{a} - {b} = {result}"
55
- except Exception as e:
56
- return f"Error: {str(e)}"
57
-
58
- def divide_func(a: float, b: float) -> str:
59
- """Divide two numbers"""
60
- try:
61
- if b == 0:
62
- return "Error: Cannot divide by zero"
63
- result = a / b
64
- return f"{a} ÷ {b} = {result}"
65
- except Exception as e:
66
- return f"Error: {str(e)}"
67
-
68
- def modulus_func(a: int, b: int) -> str:
69
- """Get the modulus of two numbers"""
70
- try:
71
- if b == 0:
72
- return "Error: Cannot modulo by zero"
73
- result = a % b
74
- return f"{a} mod {b} = {result}"
75
- except Exception as e:
76
- return f"Error: {str(e)}"
77
 
78
- # Create calculator tools
79
- calc_tools = [
80
- Tool(name="multiply", description="Multiply two numbers. Use format: multiply(a, b)", func=lambda input_str: multiply_func(*map(float, input_str.split(',')))),
81
- Tool(name="add", description="Add two numbers. Use format: add(a, b)", func=lambda input_str: add_func(*map(float, input_str.split(',')))),
82
- Tool(name="subtract", description="Subtract two numbers. Use format: subtract(a, b)", func=lambda input_str: subtract_func(*map(float, input_str.split(',')))),
83
- Tool(name="divide", description="Divide two numbers. Use format: divide(a, b)", func=lambda input_str: divide_func(*map(float, input_str.split(',')))),
84
- Tool(name="modulus", description="Get modulus of two integers. Use format: modulus(a, b)", func=lambda input_str: modulus_func(*map(int, input_str.split(',')))),
85
- ]
 
86
 
87
- tools.extend(calc_tools)
88
- print(f"✅ Added {len(calc_tools)} calculator tools")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
- # Hub stats tool
91
  try:
92
- from tools import get_hub_stats
93
-
94
- def hub_stats_func(author: str) -> str:
95
- """Get Hugging Face Hub statistics for an author"""
96
- try:
97
- return get_hub_stats(author)
98
- except Exception as e:
99
- return f"Hub stats error: {str(e)}"
100
 
101
- hub_tool = Tool(
102
- name="hub_stats",
103
- description="Get statistics for Hugging Face Hub models by author",
104
- func=hub_stats_func
105
- )
106
- tools.append(hub_tool)
107
- print("✅ Added Hub stats tool")
108
- except Exception as e:
109
- print(f"⚠️ Could not load Hub stats tool: {e}")
110
-
111
- # Python execution tool
112
- python_tool = create_python_execution_tool()
113
- tools.append(python_tool)
114
- print("✅ Added Python execution tool")
115
 
116
- print(f"🔧 Code Agent loaded {len(tools)} tools")
117
- return tools
118
-
119
-
120
- def create_python_execution_tool() -> Tool:
121
- """Create a tool for executing Python code safely"""
122
-
123
- def execute_python_code(code: str) -> str:
124
- """
125
- Execute Python code in a controlled environment.
126
-
127
- Args:
128
- code: Python code to execute
129
-
130
- Returns:
131
- String containing the output or error message
132
- """
133
- # Create a string buffer to capture output
134
- output_buffer = io.StringIO()
135
- error_buffer = io.StringIO()
136
-
137
- # Prepare a safe execution environment
138
- safe_globals = {
139
- '__builtins__': {
140
- 'print': lambda *args, **kwargs: print(*args, file=output_buffer, **kwargs),
141
- 'len': len,
142
- 'str': str,
143
- 'int': int,
144
- 'float': float,
145
- 'list': list,
146
- 'dict': dict,
147
- 'set': set,
148
- 'tuple': tuple,
149
- 'range': range,
150
- 'sum': sum,
151
- 'max': max,
152
- 'min': min,
153
- 'abs': abs,
154
- 'round': round,
155
- 'sorted': sorted,
156
- 'enumerate': enumerate,
157
- 'zip': zip,
158
- 'map': map,
159
- 'filter': filter,
160
- }
161
- }
162
 
163
- # Allow common safe modules
164
- try:
165
- import math
166
- import statistics
167
- import datetime
168
- import json
169
- import re
170
-
171
- safe_globals.update({
172
- 'math': math,
173
- 'statistics': statistics,
174
- 'datetime': datetime,
175
- 'json': json,
176
- 're': re,
177
- })
178
- except ImportError:
179
- pass
180
 
181
- try:
182
- # Execute the code
183
- with contextlib.redirect_stdout(output_buffer), \
184
- contextlib.redirect_stderr(error_buffer):
185
- exec(code, safe_globals)
186
-
187
- # Get the output
188
- output = output_buffer.getvalue()
189
- error = error_buffer.getvalue()
190
 
191
- if error:
192
- return f"Error: {error}"
193
- elif output:
194
- return output.strip()
195
- else:
196
- return "Code executed successfully (no output)"
197
-
198
- except Exception as e:
199
- return f"Execution error: {str(e)}"
200
- finally:
201
- output_buffer.close()
202
- error_buffer.close()
203
-
204
- return Tool(
205
- name="python_execution",
206
- description="Execute Python code for calculations and data processing. Use for complex computations, data analysis, or when calculator tools are insufficient.",
207
- func=execute_python_code
208
- )
209
 
210
 
211
  def load_code_prompt() -> str:
@@ -226,6 +127,7 @@ Your goals:
226
  When handling computational tasks:
227
  - Use calculator tools for basic arithmetic operations
228
  - Use Python execution for complex calculations, data processing, or multi-step computations
 
229
  - Show your work and intermediate steps
230
  - Verify results when possible
231
  - Handle edge cases and potential errors
@@ -233,7 +135,7 @@ When handling computational tasks:
233
  Available tools:
234
  - Calculator tools: add, subtract, multiply, divide, modulus
235
  - Python execution: for complex computations and data analysis
236
- - Hub stats tool: for Hugging Face model information
237
 
238
  Format your response as:
239
  ### Computational Analysis
@@ -249,7 +151,7 @@ Format your response as:
249
 
250
  def code_agent(state: Dict[str, Any]) -> Command:
251
  """
252
- Code Agent node that handles computational tasks and code execution.
253
 
254
  Returns Command with computational results appended to code_outputs.
255
  """
@@ -267,17 +169,18 @@ def code_agent(state: Dict[str, Any]) -> Command:
267
  max_tokens=2048
268
  )
269
 
270
- # Get computational tools
271
- tools = create_code_tools()
 
272
 
273
  # Bind tools to LLM
274
- llm_with_tools = llm.bind_tools(tools)
275
 
276
  # Create agent span for tracing
277
  with agent_span(
278
  "code",
279
  metadata={
280
- "tools_available": len(tools),
281
  "research_context_length": len(state.get("research_notes", "")),
282
  "user_id": state.get("user_id", "unknown"),
283
  "session_id": state.get("session_id", "unknown")
@@ -324,34 +227,25 @@ Please perform all necessary calculations to help answer this question.
324
  # Get computational response
325
  response = llm_with_tools.invoke(code_messages)
326
 
327
- # Process the response - handle both tool calls and direct responses
328
  computation_results = []
329
-
330
- # Check if the LLM wants to use tools
331
  if hasattr(response, 'tool_calls') and response.tool_calls:
332
  print(f"🛠️ Executing {len(response.tool_calls)} computational operations")
333
 
334
- # Execute tool calls and collect results
335
  for tool_call in response.tool_calls:
336
  try:
337
  # Find the tool by name
338
- tool = next((t for t in tools if t.name == tool_call['name']), None)
339
  if tool:
340
- # Handle different argument formats
341
- args = tool_call.get('args', {})
342
- if isinstance(args, dict):
343
- # Convert dict args to string for simple tools
344
- if len(args) == 1:
345
- arg_value = list(args.values())[0]
346
- else:
347
- arg_value = ','.join(str(v) for v in args.values())
348
- else:
349
- arg_value = str(args)
350
-
351
- result = tool.func(arg_value)
352
- computation_results.append(f"**{tool.name}**: {result}")
353
  else:
354
  computation_results.append(f"**{tool_call['name']}**: Tool not found")
 
355
  except Exception as e:
356
  print(f"⚠️ Tool {tool_call.get('name', 'unknown')} failed: {e}")
357
  computation_results.append(f"**{tool_call.get('name', 'unknown')}**: Error - {str(e)}")
@@ -359,50 +253,52 @@ Please perform all necessary calculations to help answer this question.
359
  # Compile computational results
360
  if computation_results:
361
  computational_findings = "\n\n".join(computation_results)
362
- else:
363
- # No tools used or tool calls failed, analyze if computation is needed
364
- computational_findings = response.content if hasattr(response, 'content') else str(response)
365
 
366
- # If the response looks like it should have used tools but didn't, try direct calculation
367
- if any(op in user_query.lower() for op in ['+', '-', '*', '/', 'calculate', 'compute', 'multiply', 'add', 'subtract', 'divide']):
368
- print("🔧 Attempting direct calculation...")
369
-
370
- # Try to extract and solve simple mathematical expressions
371
- import re
372
-
373
- # Look for simple math expressions
374
- math_patterns = [
375
- r'(\d+)\s*\+\s*(\d+)', # addition
376
- r'(\d+)\s*\*\s*(\d+)', # multiplication
377
- r'(\d+)\s*-\s*(\d+)', # subtraction
378
- r'(\d+)\s*/\s*(\d+)', # division
379
- ]
380
-
381
- for pattern in math_patterns:
382
- matches = re.findall(pattern, user_query)
383
- if matches:
384
- for match in matches:
385
- a, b = int(match[0]), int(match[1])
386
- if '+' in user_query:
387
- result = a + b
388
- computational_findings += f"\n\nDirect calculation: {a} + {b} = {result}"
389
- elif '*' in user_query:
390
- result = a * b
391
- computational_findings += f"\n\nDirect calculation: {a} × {b} = {result}"
392
- elif '-' in user_query:
393
- result = a - b
394
- computational_findings += f"\n\nDirect calculation: {a} - {b} = {result}"
395
- elif '/' in user_query:
396
- result = a / b
397
- computational_findings += f"\n\nDirect calculation: {a} ÷ {b} = {result}"
398
- break
399
-
400
- # Format computational results
401
- formatted_results = f"""
402
  ### Computational Analysis {state.get('loop_counter', 0) + 1}
403
 
 
 
 
404
  {computational_findings}
405
 
 
 
 
 
 
 
 
 
 
 
406
  ---
407
  """
408
 
@@ -412,6 +308,7 @@ Please perform all necessary calculations to help answer this question.
412
  if span:
413
  span.update_trace(metadata={
414
  "computation_length": len(formatted_results),
 
415
  "results_preview": formatted_results[:300] + "..."
416
  })
417
 
 
16
  from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage
17
  from langgraph.types import Command
18
  from langchain_groq import ChatGroq
19
+ from langchain_core.tools import BaseTool, tool
20
  from observability import agent_span, tool_span
21
  from dotenv import load_dotenv
22
 
23
+ # Import LangChain-compatible tools
24
+ from langgraph_tools import get_code_tools
25
 
26
  load_dotenv("env.local")
27
 
28
 
29
+ @tool("python_execution")
30
+ def python_execution_tool(code: str) -> str:
31
+ """
32
+ Execute Python code in a controlled environment.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
+ Args:
35
+ code: Python code to execute
36
+
37
+ Returns:
38
+ String containing the output or error message
39
+ """
40
+ # Create a string buffer to capture output
41
+ output_buffer = io.StringIO()
42
+ error_buffer = io.StringIO()
43
 
44
+ # Prepare a safe execution environment
45
+ safe_globals = {
46
+ '__builtins__': {
47
+ 'print': lambda *args, **kwargs: print(*args, file=output_buffer, **kwargs),
48
+ 'len': len,
49
+ 'str': str,
50
+ 'int': int,
51
+ 'float': float,
52
+ 'list': list,
53
+ 'dict': dict,
54
+ 'set': set,
55
+ 'tuple': tuple,
56
+ 'range': range,
57
+ 'sum': sum,
58
+ 'max': max,
59
+ 'min': min,
60
+ 'abs': abs,
61
+ 'round': round,
62
+ 'sorted': sorted,
63
+ 'enumerate': enumerate,
64
+ 'zip': zip,
65
+ 'map': map,
66
+ 'filter': filter,
67
+ }
68
+ }
69
 
70
+ # Allow common safe modules
71
  try:
72
+ import math
73
+ import statistics
74
+ import datetime
75
+ import json
76
+ import re
 
 
 
77
 
78
+ safe_globals.update({
79
+ 'math': math,
80
+ 'statistics': statistics,
81
+ 'datetime': datetime,
82
+ 'json': json,
83
+ 're': re,
84
+ })
85
+ except ImportError:
86
+ pass
 
 
 
 
 
87
 
88
+ try:
89
+ # Execute the code
90
+ with contextlib.redirect_stdout(output_buffer), \
91
+ contextlib.redirect_stderr(error_buffer):
92
+ exec(code, safe_globals)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ # Get the output
95
+ output = output_buffer.getvalue()
96
+ error = error_buffer.getvalue()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
+ if error:
99
+ return f"Error: {error}"
100
+ elif output:
101
+ return output.strip()
102
+ else:
103
+ return "Code executed successfully (no output)"
 
 
 
104
 
105
+ except Exception as e:
106
+ return f"Execution error: {str(e)}"
107
+ finally:
108
+ output_buffer.close()
109
+ error_buffer.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
 
112
  def load_code_prompt() -> str:
 
127
  When handling computational tasks:
128
  - Use calculator tools for basic arithmetic operations
129
  - Use Python execution for complex calculations, data processing, or multi-step computations
130
+ - Use Hugging Face Hub stats for model information
131
  - Show your work and intermediate steps
132
  - Verify results when possible
133
  - Handle edge cases and potential errors
 
135
  Available tools:
136
  - Calculator tools: add, subtract, multiply, divide, modulus
137
  - Python execution: for complex computations and data analysis
138
+ - Hugging Face Hub stats: for model information
139
 
140
  Format your response as:
141
  ### Computational Analysis
 
151
 
152
  def code_agent(state: Dict[str, Any]) -> Command:
153
  """
154
+ Code Agent node that handles computational tasks using LangChain tools.
155
 
156
  Returns Command with computational results appended to code_outputs.
157
  """
 
169
  max_tokens=2048
170
  )
171
 
172
+ # Get computational tools (calculator tools + hub stats + python execution)
173
+ code_tools = get_code_tools()
174
+ code_tools.append(python_execution_tool) # Add Python execution tool
175
 
176
  # Bind tools to LLM
177
+ llm_with_tools = llm.bind_tools(code_tools)
178
 
179
  # Create agent span for tracing
180
  with agent_span(
181
  "code",
182
  metadata={
183
+ "tools_available": len(code_tools),
184
  "research_context_length": len(state.get("research_notes", "")),
185
  "user_id": state.get("user_id", "unknown"),
186
  "session_id": state.get("session_id", "unknown")
 
227
  # Get computational response
228
  response = llm_with_tools.invoke(code_messages)
229
 
230
+ # Process tool calls if any
231
  computation_results = []
 
 
232
  if hasattr(response, 'tool_calls') and response.tool_calls:
233
  print(f"🛠️ Executing {len(response.tool_calls)} computational operations")
234
 
 
235
  for tool_call in response.tool_calls:
236
  try:
237
  # Find the tool by name
238
+ tool = next((t for t in code_tools if t.name == tool_call['name']), None)
239
  if tool:
240
+ # Execute the tool
241
+ with tool_span(tool.name, metadata={"args": tool_call.get('args', {})}) as tool_span_ctx:
242
+ result = tool.invoke(tool_call.get('args', {}))
243
+ computation_results.append(f"**{tool.name}**: {result}")
244
+ if tool_span_ctx:
245
+ tool_span_ctx.update_trace(output={"result": str(result)[:200] + "..."})
 
 
 
 
 
 
 
246
  else:
247
  computation_results.append(f"**{tool_call['name']}**: Tool not found")
248
+
249
  except Exception as e:
250
  print(f"⚠️ Tool {tool_call.get('name', 'unknown')} failed: {e}")
251
  computation_results.append(f"**{tool_call.get('name', 'unknown')}**: Error - {str(e)}")
 
253
  # Compile computational results
254
  if computation_results:
255
  computational_findings = "\n\n".join(computation_results)
 
 
 
256
 
257
+ # Ask LLM to analyze the computational results
258
+ analysis_request = f"""
259
+ Based on the computational results below, provide a structured analysis:
260
+
261
+ Original Question: {user_query}
262
+
263
+ Computational Results:
264
+ {computational_findings}
265
+
266
+ Please analyze these results and provide:
267
+ 1. Summary of calculations performed
268
+ 2. Key numerical findings
269
+ 3. Interpretation of results
270
+ 4. How these results help answer the original question
271
+
272
+ Structure your response clearly.
273
+ """
274
+
275
+ analysis_messages = [
276
+ SystemMessage(content=code_prompt),
277
+ HumanMessage(content=analysis_request)
278
+ ]
279
+
280
+ analysis_response = llm.invoke(analysis_messages)
281
+ analysis_content = analysis_response.content if hasattr(analysis_response, 'content') else str(analysis_response)
282
+
283
+ # Format final computational results
284
+ formatted_results = f"""
 
 
 
 
 
 
 
 
285
  ### Computational Analysis {state.get('loop_counter', 0) + 1}
286
 
287
+ {analysis_content}
288
+
289
+ ### Tool Results
290
  {computational_findings}
291
 
292
+ ---
293
+ """
294
+ else:
295
+ # No tools were called, use the LLM response directly
296
+ response_content = response.content if hasattr(response, 'content') else str(response)
297
+ formatted_results = f"""
298
+ ### Computational Analysis {state.get('loop_counter', 0) + 1}
299
+
300
+ {response_content}
301
+
302
  ---
303
  """
304
 
 
308
  if span:
309
  span.update_trace(metadata={
310
  "computation_length": len(formatted_results),
311
+ "tools_used": len(computation_results),
312
  "results_preview": formatted_results[:300] + "..."
313
  })
314
 
agents/research_agent.py CHANGED
@@ -13,112 +13,16 @@ from typing import Dict, Any, List
13
  from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage
14
  from langgraph.types import Command
15
  from langchain_groq import ChatGroq
16
- from langchain_core.tools import Tool
17
  from observability import agent_span, tool_span
18
  from dotenv import load_dotenv
19
 
20
- # Import tools from the existing tools.py
21
- from tools import (
22
- get_tavily_tool,
23
- get_wikipedia_tool,
24
- get_arxiv_tool,
25
- get_wikipedia_reader,
26
- get_arxiv_reader
27
- )
28
 
29
  load_dotenv("env.local")
30
 
31
 
32
- def create_research_functions() -> List[callable]:
33
- """Create simple research functions that can be called directly"""
34
-
35
- def web_search(query: str) -> str:
36
- """Search the web using Tavily API"""
37
- try:
38
- with tool_span("tavily_search", metadata={"query": query}):
39
- from tools import get_tavily_tool
40
- tavily_spec = get_tavily_tool()
41
- if tavily_spec:
42
- tavily_tools = tavily_spec.to_tool_list()
43
- if tavily_tools and len(tavily_tools) > 0:
44
- # Try different parameter formats for Tavily
45
- try:
46
- result = tavily_tools[0].call({"input": query})
47
- except Exception:
48
- try:
49
- result = tavily_tools[0].call({"query": query})
50
- except Exception:
51
- result = tavily_tools[0].call(query)
52
- return f"Web Search Results for '{query}':\n{str(result)}"
53
- return "Web search tool not available"
54
- except Exception as e:
55
- print(f"Web search failed: {e}")
56
- return f"Web search not available: {str(e)}"
57
-
58
- def wikipedia_search(query: str) -> str:
59
- """Search Wikipedia for information"""
60
- try:
61
- with tool_span("wikipedia_search", metadata={"query": query}):
62
- from tools import get_wikipedia_tool
63
- wiki_tool = get_wikipedia_tool()
64
- if wiki_tool:
65
- # Try different parameter formats for Wikipedia
66
- try:
67
- result = wiki_tool.call({"query_str": query})
68
- except Exception:
69
- try:
70
- result = wiki_tool.call({"input": query})
71
- except Exception:
72
- try:
73
- result = wiki_tool.call({"query": query})
74
- except Exception:
75
- result = wiki_tool.call(query)
76
- return f"Wikipedia Results for '{query}':\n{str(result)}"
77
- return "Wikipedia tool not available"
78
- except Exception as e:
79
- print(f"Wikipedia search failed: {e}")
80
- return f"Wikipedia search not available: {str(e)}"
81
-
82
- def arxiv_search(query: str) -> str:
83
- """Search ArXiv for academic papers"""
84
- try:
85
- with tool_span("arxiv_search", metadata={"query": query}):
86
- from tools import get_arxiv_tool
87
- arxiv_tool = get_arxiv_tool()
88
- if arxiv_tool:
89
- # Try different parameter formats for ArXiv
90
- try:
91
- result = arxiv_tool.call({"query_str": query})
92
- except Exception:
93
- try:
94
- result = arxiv_tool.call({"input": query})
95
- except Exception:
96
- try:
97
- result = arxiv_tool.call({"query": query})
98
- except Exception:
99
- result = arxiv_tool.call(query)
100
- return f"ArXiv Results for '{query}':\n{str(result)}"
101
- return "ArXiv tool not available"
102
- except Exception as e:
103
- print(f"ArXiv search failed: {e}")
104
- return f"ArXiv search not available: {str(e)}"
105
-
106
- # Add a simple fallback research function that doesn't use external APIs
107
- def fallback_research(query: str) -> str:
108
- """Provide basic context when external tools fail"""
109
- return f"""
110
- Fallback Research for '{query}':
111
-
112
- This research uses general knowledge available in the system.
113
- For comprehensive research, external API access (Tavily, Wikipedia) would be needed.
114
-
115
- Basic information may be available through the language model's training data,
116
- but current information would require working API connections.
117
- """
118
-
119
- return [web_search, wikipedia_search, arxiv_search, fallback_research]
120
-
121
-
122
  def load_research_prompt() -> str:
123
  """Load the research-specific prompt"""
124
  try:
@@ -141,70 +45,27 @@ When researching:
141
  - Cross-reference information across sources
142
  - Note any conflicting information found
143
 
144
- Important: You have access to research functions, but you cannot call them directly.
145
- Instead, specify what searches you would like to perform and the system will execute them for you.
146
-
147
  Format your response as:
148
  ### Research Strategy
149
  [Describe what searches are needed]
150
 
151
- ### Analysis
152
- [Analyze the information once gathered]
153
 
154
  ### Key Facts
155
  - Fact 1
156
  - Fact 2
157
  - Fact 3
158
 
159
- ### Citations
160
- - Citation 1
161
- - Citation 2
162
  """
163
 
164
 
165
- def perform_research_searches(query: str, research_functions: List[callable]) -> str:
166
- """
167
- Intelligently perform research searches based on the query
168
- """
169
- results = []
170
-
171
- # Always try web search first for current info
172
- web_search, wikipedia_search, arxiv_search, fallback_research = research_functions
173
-
174
- print("🌐 Performing web search...")
175
- web_result = web_search(query)
176
- results.append(web_result)
177
-
178
- # Try Wikipedia for encyclopedic info
179
- print("📚 Performing Wikipedia search...")
180
- wiki_result = wikipedia_search(query)
181
- results.append(wiki_result)
182
-
183
- # For academic/technical queries, try ArXiv
184
- academic_keywords = ['research', 'study', 'paper', 'algorithm', 'model', 'theory', 'analysis']
185
- if any(keyword in query.lower() for keyword in academic_keywords):
186
- print("🎓 Performing ArXiv search...")
187
- arxiv_result = arxiv_search(query)
188
- results.append(arxiv_result)
189
-
190
- # Check if we got meaningful results, if not, use fallback
191
- meaningful_results = []
192
- for result in results:
193
- if result and not ("not available" in result or "error" in result.lower()):
194
- meaningful_results.append(result)
195
-
196
- # If no meaningful results, add fallback research
197
- if not meaningful_results:
198
- print("🔄 Using fallback research...")
199
- fallback_result = fallback_research(query)
200
- results.append(fallback_result)
201
-
202
- return "\n\n---\n\n".join(results)
203
-
204
-
205
  def research_agent(state: Dict[str, Any]) -> Command:
206
  """
207
- Research Agent node that gathers information using available tools.
208
 
209
  Returns Command with research results appended to research_notes.
210
  """
@@ -215,21 +76,24 @@ def research_agent(state: Dict[str, Any]) -> Command:
215
  # Get research prompt
216
  research_prompt = load_research_prompt()
217
 
218
- # Initialize LLM without tool binding (we'll call tools manually)
219
  llm = ChatGroq(
220
  model="llama-3.3-70b-versatile",
221
  temperature=0.3, # Slightly higher for research creativity
222
  max_tokens=2048
223
  )
224
 
225
- # Get research functions
226
- research_functions = create_research_functions()
 
 
 
227
 
228
  # Create agent span for tracing
229
  with agent_span(
230
  "research",
231
  metadata={
232
- "tools_available": len(research_functions),
233
  "user_id": state.get("user_id", "unknown"),
234
  "session_id": state.get("session_id", "unknown")
235
  }
@@ -243,18 +107,115 @@ def research_agent(state: Dict[str, Any]) -> Command:
243
  user_query = msg.content
244
  break
245
 
246
- # Perform actual research searches
247
- print(f"🔍 Researching: {user_query}")
248
- research_raw_results = perform_research_searches(user_query, research_functions)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
- # Now ask LLM to analyze and structure the results
251
- analysis_request = f"""
252
- Based on the research results below, provide a structured analysis to help answer the user's question.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
 
254
  Original Question: {user_query}
255
 
256
  Research Results:
257
- {research_raw_results}
258
 
259
  Current research status: {len(state.get('research_notes', ''))} characters already gathered
260
 
@@ -267,25 +228,33 @@ Instructions:
267
 
268
  Please provide a comprehensive analysis of the research findings.
269
  """
270
-
271
- # Create messages for analysis
272
- analysis_messages = [
273
- SystemMessage(content=research_prompt),
274
- HumanMessage(content=analysis_request)
275
- ]
276
-
277
- # Get analysis response
278
- response = llm.invoke(analysis_messages)
279
- analysis_content = response.content if hasattr(response, 'content') else str(response)
280
-
281
- # Format research results
282
- formatted_results = f"""
283
  ### Research Iteration {state.get('loop_counter', 0) + 1}
284
 
285
  {analysis_content}
286
 
287
- ### Raw Search Results
288
- {research_raw_results}
 
 
 
 
 
 
 
 
 
 
289
 
290
  ---
291
  """
@@ -296,6 +265,7 @@ Please provide a comprehensive analysis of the research findings.
296
  if span:
297
  span.update_trace(output={
298
  "research_length": len(formatted_results),
 
299
  "findings_preview": formatted_results[:300] + "..."
300
  })
301
 
 
13
  from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage
14
  from langgraph.types import Command
15
  from langchain_groq import ChatGroq
16
+ from langchain_core.tools import BaseTool
17
  from observability import agent_span, tool_span
18
  from dotenv import load_dotenv
19
 
20
+ # Import LangChain-compatible tools
21
+ from langgraph_tools import get_research_tools
 
 
 
 
 
 
22
 
23
  load_dotenv("env.local")
24
 
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def load_research_prompt() -> str:
27
  """Load the research-specific prompt"""
28
  try:
 
45
  - Cross-reference information across sources
46
  - Note any conflicting information found
47
 
 
 
 
48
  Format your response as:
49
  ### Research Strategy
50
  [Describe what searches are needed]
51
 
52
+ ### Findings
53
+ [Key information discovered]
54
 
55
  ### Key Facts
56
  - Fact 1
57
  - Fact 2
58
  - Fact 3
59
 
60
+ ### Sources
61
+ - Source 1
62
+ - Source 2
63
  """
64
 
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  def research_agent(state: Dict[str, Any]) -> Command:
67
  """
68
+ Research Agent node that gathers information using LangChain tools.
69
 
70
  Returns Command with research results appended to research_notes.
71
  """
 
76
  # Get research prompt
77
  research_prompt = load_research_prompt()
78
 
79
+ # Initialize LLM with tool binding
80
  llm = ChatGroq(
81
  model="llama-3.3-70b-versatile",
82
  temperature=0.3, # Slightly higher for research creativity
83
  max_tokens=2048
84
  )
85
 
86
+ # Get LangChain-compatible research tools
87
+ research_tools = get_research_tools()
88
+
89
+ # Bind tools to LLM for function calling
90
+ llm_with_tools = llm.bind_tools(research_tools)
91
 
92
  # Create agent span for tracing
93
  with agent_span(
94
  "research",
95
  metadata={
96
+ "tools_available": len(research_tools),
97
  "user_id": state.get("user_id", "unknown"),
98
  "session_id": state.get("session_id", "unknown")
99
  }
 
107
  user_query = msg.content
108
  break
109
 
110
+ # Build research request
111
+ research_request = f"""
112
+ You must research the following question using the available tools. Do not answer from memory alone.
113
+
114
+ Question: {user_query}
115
+
116
+ Current research status: {len(state.get('research_notes', ''))} characters already gathered
117
+
118
+ CRITICAL: You MUST use the available research tools to gather information. Do not provide an answer without using tools.
119
+
120
+ Available tools:
121
+ - tavily_search_results_json: For current web information
122
+ - wikipedia_search: For encyclopedic knowledge
123
+ - arxiv_search: For academic papers
124
+
125
+ Instructions:
126
+ 1. ALWAYS use tavily_search_results_json for current information
127
+ 2. Use wikipedia_search for general knowledge topics
128
+ 3. Use arxiv_search for academic/technical topics if relevant
129
+ 4. You must call at least one tool - preferably multiple tools
130
+ 5. Analyze and synthesize the information from the tools
131
+ 6. Provide structured findings with sources
132
+
133
+ Start by calling the appropriate research tools to gather information about this question.
134
+ """
135
 
136
+ # Create messages for research
137
+ research_messages = [
138
+ SystemMessage(content=research_prompt),
139
+ HumanMessage(content=research_request)
140
+ ]
141
+
142
+ # Get research response with tool calls
143
+ response = llm_with_tools.invoke(research_messages)
144
+
145
+ # Debug: Check if tools were called
146
+ print(f"🔍 Research response type: {type(response)}")
147
+ print(f"🔍 Has tool_calls attribute: {hasattr(response, 'tool_calls')}")
148
+ if hasattr(response, 'tool_calls'):
149
+ print(f"🔍 Tool calls: {response.tool_calls}")
150
+ else:
151
+ print(f"🔍 Response content preview: {str(response)[:200]}...")
152
+
153
+ # Process tool calls if any
154
+ tool_results = []
155
+ if hasattr(response, 'tool_calls') and response.tool_calls:
156
+ print(f"🛠️ Executing {len(response.tool_calls)} research tools")
157
+
158
+ for tool_call in response.tool_calls:
159
+ try:
160
+ # Find the tool by name
161
+ tool = next((t for t in research_tools if t.name == tool_call['name']), None)
162
+ if tool:
163
+ # Execute the tool
164
+ with tool_span(tool.name, metadata={"args": tool_call.get('args', {})}) as tool_span_ctx:
165
+ result = tool.invoke(tool_call.get('args', {}))
166
+ tool_results.append(f"**{tool.name}**: {result}")
167
+ if tool_span_ctx:
168
+ tool_span_ctx.update_trace(output={"result": str(result)[:200] + "..."})
169
+ else:
170
+ tool_results.append(f"**{tool_call['name']}**: Tool not found")
171
+
172
+ except Exception as e:
173
+ print(f"⚠️ Tool {tool_call.get('name', 'unknown')} failed: {e}")
174
+ tool_results.append(f"**{tool_call.get('name', 'unknown')}**: Error - {str(e)}")
175
+ else:
176
+ print("⚠️ No tool calls detected - LLM did not choose to use any tools")
177
+ # Force tool usage for research questions
178
+ print("🔧 Forcing tool usage for research...")
179
+
180
+ # Manually call appropriate tools based on query
181
+ forced_tools = []
182
+
183
+ # Always try web search for current info
184
+ tavily_tool = next((t for t in research_tools if t.name == "tavily_search_results_json"), None)
185
+ if tavily_tool:
186
+ try:
187
+ print("🌐 Forcing Tavily web search...")
188
+ result = tavily_tool.invoke({"query": user_query})
189
+ forced_tools.append(f"**tavily_search_results_json (forced)**: {result}")
190
+ except Exception as e:
191
+ print(f"⚠️ Forced Tavily search failed: {e}")
192
+ forced_tools.append(f"**tavily_search_results_json (forced)**: Error - {str(e)}")
193
+
194
+ # Try Wikipedia for general knowledge
195
+ wiki_tool = next((t for t in research_tools if t.name == "wikipedia_search"), None)
196
+ if wiki_tool:
197
+ try:
198
+ print("📚 Forcing Wikipedia search...")
199
+ result = wiki_tool.invoke({"query": user_query})
200
+ forced_tools.append(f"**wikipedia_search (forced)**: {result}")
201
+ except Exception as e:
202
+ print(f"⚠️ Forced Wikipedia search failed: {e}")
203
+ forced_tools.append(f"**wikipedia_search (forced)**: Error - {str(e)}")
204
+
205
+ tool_results = forced_tools
206
+
207
+ # Compile research findings
208
+ if tool_results:
209
+ research_findings = "\n\n".join(tool_results)
210
+
211
+ # Ask LLM to analyze the tool results
212
+ analysis_request = f"""
213
+ Based on the research results below, provide a structured analysis:
214
 
215
  Original Question: {user_query}
216
 
217
  Research Results:
218
+ {research_findings}
219
 
220
  Current research status: {len(state.get('research_notes', ''))} characters already gathered
221
 
 
228
 
229
  Please provide a comprehensive analysis of the research findings.
230
  """
231
+
232
+ analysis_messages = [
233
+ SystemMessage(content=research_prompt),
234
+ HumanMessage(content=analysis_request)
235
+ ]
236
+
237
+ analysis_response = llm.invoke(analysis_messages)
238
+ analysis_content = analysis_response.content if hasattr(analysis_response, 'content') else str(analysis_response)
239
+
240
+ # Format final research results
241
+ formatted_results = f"""
 
 
242
  ### Research Iteration {state.get('loop_counter', 0) + 1}
243
 
244
  {analysis_content}
245
 
246
+ ### Raw Tool Results
247
+ {research_findings}
248
+
249
+ ---
250
+ """
251
+ else:
252
+ # No tools were called, use the LLM response directly
253
+ response_content = response.content if hasattr(response, 'content') else str(response)
254
+ formatted_results = f"""
255
+ ### Research Iteration {state.get('loop_counter', 0) + 1}
256
+
257
+ {response_content}
258
 
259
  ---
260
  """
 
265
  if span:
266
  span.update_trace(output={
267
  "research_length": len(formatted_results),
268
+ "tools_used": len(tool_results),
269
  "findings_preview": formatted_results[:300] + "..."
270
  })
271
 
langgraph_tools.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LangChain-compatible tools for the LangGraph multi-agent system
3
+
4
+ This module provides LangChain tools that work properly with LangGraph agents,
5
+ replacing the LlamaIndex tools with native LangChain implementations.
6
+ """
7
+
8
+ import os
9
+ import wikipedia
10
+ import arxiv
11
+ from typing import List, Optional, Type
12
+ from langchain_core.tools import BaseTool, tool
13
+ from pydantic import BaseModel, Field
14
+ from langchain_tavily import TavilySearch
15
+ from huggingface_hub import list_models
16
+ from observability import tool_span
17
+
18
+
19
+ # Pydantic schemas for tool inputs
20
+ class WikipediaSearchInput(BaseModel):
21
+ """Input for Wikipedia search tool."""
22
+ query: str = Field(description="The search query for Wikipedia")
23
+
24
+
25
+ class ArxivSearchInput(BaseModel):
26
+ """Input for ArXiv search tool."""
27
+ query: str = Field(description="The search query for ArXiv papers")
28
+
29
+
30
+ class HubStatsInput(BaseModel):
31
+ """Input for Hugging Face Hub stats tool."""
32
+ author: str = Field(description="The author/organization name on Hugging Face Hub")
33
+
34
+
35
+ # LangChain-compatible tool implementations
36
+
37
+ @tool("wikipedia_search", args_schema=WikipediaSearchInput)
38
+ def wikipedia_search_tool(query: str) -> str:
39
+ """Search Wikipedia for information about a topic."""
40
+ try:
41
+ with tool_span("wikipedia_search", metadata={"query": query}):
42
+ # Use wikipedia library directly
43
+ try:
44
+ # Search for pages
45
+ search_results = wikipedia.search(query, results=3)
46
+ if not search_results:
47
+ return f"No Wikipedia results found for '{query}'"
48
+
49
+ # Get the first page that works
50
+ for page_title in search_results:
51
+ try:
52
+ page = wikipedia.page(page_title)
53
+ # Get summary and first few paragraphs
54
+ content = page.summary
55
+ if len(content) > 1000:
56
+ content = content[:1000] + "..."
57
+
58
+ return f"Wikipedia: {page.title}\n\nURL: {page.url}\n\nSummary:\n{content}"
59
+
60
+ except wikipedia.exceptions.DisambiguationError as e:
61
+ # Try the first suggestion
62
+ try:
63
+ page = wikipedia.page(e.options[0])
64
+ content = page.summary
65
+ if len(content) > 1000:
66
+ content = content[:1000] + "..."
67
+ return f"Wikipedia: {page.title}\n\nURL: {page.url}\n\nSummary:\n{content}"
68
+ except:
69
+ continue
70
+ except:
71
+ continue
72
+
73
+ return f"Could not retrieve Wikipedia content for '{query}'"
74
+
75
+ except Exception as e:
76
+ return f"Wikipedia search error: {str(e)}"
77
+
78
+ except Exception as e:
79
+ return f"Wikipedia search failed: {str(e)}"
80
+
81
+
82
+ @tool("arxiv_search", args_schema=ArxivSearchInput)
83
+ def arxiv_search_tool(query: str) -> str:
84
+ """Search ArXiv for academic papers."""
85
+ try:
86
+ with tool_span("arxiv_search", metadata={"query": query}):
87
+ # Use arxiv library
88
+ search = arxiv.Search(
89
+ query=query,
90
+ max_results=3,
91
+ sort_by=arxiv.SortCriterion.Relevance
92
+ )
93
+
94
+ results = []
95
+ for paper in search.results():
96
+ result = f"""Title: {paper.title}
97
+ Authors: {', '.join([author.name for author in paper.authors])}
98
+ Published: {paper.published.strftime('%Y-%m-%d')}
99
+ URL: {paper.entry_id}
100
+ Summary: {paper.summary[:500]}..."""
101
+ results.append(result)
102
+
103
+ if results:
104
+ return f"ArXiv Search Results for '{query}':\n\n" + "\n\n---\n\n".join(results)
105
+ else:
106
+ return f"No ArXiv papers found for '{query}'"
107
+
108
+ except Exception as e:
109
+ return f"ArXiv search failed: {str(e)}"
110
+
111
+
112
+ @tool("huggingface_hub_stats", args_schema=HubStatsInput)
113
+ def huggingface_hub_stats_tool(author: str) -> str:
114
+ """Get statistics for a Hugging Face Hub author."""
115
+ try:
116
+ with tool_span("huggingface_hub_stats", metadata={"author": author}):
117
+ models = list(list_models(author=author, sort="downloads", direction=-1, limit=5))
118
+ if models:
119
+ results = []
120
+ for i, model in enumerate(models, 1):
121
+ results.append(f"{i}. {model.id} - {model.downloads:,} downloads")
122
+
123
+ top_model = models[0]
124
+ summary = f"Top 5 models by {author}:\n" + "\n".join(results)
125
+ summary += f"\n\nMost popular: {top_model.id} with {top_model.downloads:,} downloads"
126
+ return summary
127
+ else:
128
+ return f"No models found for author '{author}'"
129
+
130
+ except Exception as e:
131
+ return f"Hub stats error: {str(e)}"
132
+
133
+
134
+ def get_tavily_search_tool() -> TavilySearch:
135
+ """Get the Tavily search tool from LangChain community."""
136
+ return TavilySearch(
137
+ api_key=os.getenv("TAVILY_API_KEY"),
138
+ max_results=6,
139
+ include_answer=True,
140
+ include_raw_content=True,
141
+ description="Search the web for current information and facts"
142
+ )
143
+
144
+
145
+ def get_calculator_tools() -> List[BaseTool]:
146
+ """Get calculator tools as LangChain tools."""
147
+
148
+ @tool("multiply")
149
+ def multiply(a: float, b: float) -> float:
150
+ """Multiply two numbers."""
151
+ return a * b
152
+
153
+ @tool("add")
154
+ def add(a: float, b: float) -> float:
155
+ """Add two numbers."""
156
+ return a + b
157
+
158
+ @tool("subtract")
159
+ def subtract(a: float, b: float) -> float:
160
+ """Subtract two numbers."""
161
+ return a - b
162
+
163
+ @tool("divide")
164
+ def divide(a: float, b: float) -> float:
165
+ """Divide two numbers."""
166
+ if b == 0:
167
+ raise ValueError("Cannot divide by zero")
168
+ return a / b
169
+
170
+ @tool("modulus")
171
+ def modulus(a: int, b: int) -> int:
172
+ """Get the modulus of two integers."""
173
+ if b == 0:
174
+ raise ValueError("Cannot modulo by zero")
175
+ return a % b
176
+
177
+ return [multiply, add, subtract, divide, modulus]
178
+
179
+
180
+ def get_research_tools() -> List[BaseTool]:
181
+ """Get all research tools for the research agent."""
182
+ tools = [
183
+ get_tavily_search_tool(),
184
+ wikipedia_search_tool,
185
+ arxiv_search_tool,
186
+ ]
187
+ return tools
188
+
189
+
190
+ def get_code_tools() -> List[BaseTool]:
191
+ """Get all code/computation tools for the code agent."""
192
+ tools = get_calculator_tools()
193
+ tools.append(huggingface_hub_stats_tool)
194
+ return tools
195
+
196
+
197
+ def get_all_tools() -> List[BaseTool]:
198
+ """Get all available tools."""
199
+ return get_research_tools() + get_code_tools()
pyproject.toml CHANGED
@@ -35,4 +35,7 @@ dependencies = [
35
  "supabase>=2.15.3",
36
  "wikipedia>=1.4.0",
37
  "datasets>=2.19.1",
 
 
 
38
  ]
 
35
  "supabase>=2.15.3",
36
  "wikipedia>=1.4.0",
37
  "datasets>=2.19.1",
38
+ "arxiv>=2.2.0",
39
+ "langchain-tavily>=0.2.4",
40
+ "python-dotenv>=1.1.0",
41
  ]
test_langgraph_system.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script for the LangGraph multi-agent system with LangChain tools
4
+ """
5
+
6
+ import asyncio
7
+ import os
8
+ from dotenv import load_dotenv
9
+
10
+ # Load environment variables
11
+ load_dotenv("env.local")
12
+
13
+ async def test_langgraph_system():
14
+ """Test the LangGraph system with a simple question"""
15
+
16
+ print("🔧 Testing LangGraph System with LangChain Tools")
17
+ print("=" * 60)
18
+
19
+ try:
20
+ # Import the main system
21
+ from langgraph_agent_system import run_agent_system
22
+
23
+ # Test with a simple computational question
24
+ test_query = "What is 25 + 17?"
25
+ print(f"📝 Test Query: {test_query}")
26
+ print("-" * 40)
27
+
28
+ # Run the agent system
29
+ result = await run_agent_system(
30
+ query=test_query,
31
+ user_id="test_user",
32
+ session_id="test_session",
33
+ max_iterations=2
34
+ )
35
+
36
+ print("\n📊 Final Result:")
37
+ print(result)
38
+ print("\n✅ Test completed successfully!")
39
+
40
+ except Exception as e:
41
+ print(f"❌ Test failed: {e}")
42
+ import traceback
43
+ traceback.print_exc()
44
+
45
+
46
+ async def test_research_tools():
47
+ """Test the research tools separately"""
48
+
49
+ print("\n🔍 Testing Research Tools")
50
+ print("=" * 40)
51
+
52
+ try:
53
+ from langgraph_tools import get_research_tools
54
+
55
+ # Get the tools
56
+ tools = get_research_tools()
57
+ print(f"✅ Loaded {len(tools)} research tools:")
58
+ for tool in tools:
59
+ print(f" - {tool.name}: {tool.description}")
60
+
61
+ # Test Wikipedia tool (if available)
62
+ wiki_tool = next((t for t in tools if t.name == "wikipedia_search"), None)
63
+ if wiki_tool:
64
+ print("\n📚 Testing Wikipedia search...")
65
+ result = wiki_tool.invoke({"query": "Python programming"})
66
+ print(f"Wikipedia result length: {len(str(result))} characters")
67
+ print(f"Preview: {str(result)[:200]}...")
68
+
69
+ except Exception as e:
70
+ print(f"❌ Research tools test failed: {e}")
71
+
72
+
73
+ async def test_code_tools():
74
+ """Test the code tools separately"""
75
+
76
+ print("\n🧮 Testing Code Tools")
77
+ print("=" * 40)
78
+
79
+ try:
80
+ from langgraph_tools import get_code_tools
81
+
82
+ # Get the tools
83
+ tools = get_code_tools()
84
+ print(f"✅ Loaded {len(tools)} code tools:")
85
+ for tool in tools:
86
+ print(f" - {tool.name}: {tool.description}")
87
+
88
+ # Test add tool
89
+ add_tool = next((t for t in tools if t.name == "add"), None)
90
+ if add_tool:
91
+ print("\n➕ Testing addition...")
92
+ result = add_tool.invoke({"a": 25, "b": 17})
93
+ print(f"Addition result: {result}")
94
+
95
+ except Exception as e:
96
+ print(f"❌ Code tools test failed: {e}")
97
+
98
+
99
+ if __name__ == "__main__":
100
+ async def main():
101
+ await test_research_tools()
102
+ await test_code_tools()
103
+ await test_langgraph_system()
104
+
105
+ asyncio.run(main())
uv.lock CHANGED
@@ -528,6 +528,7 @@ name = "final-assignment-template"
528
  version = "0.1.0"
529
  source = { virtual = "." }
530
  dependencies = [
 
531
  { name = "datasets" },
532
  { name = "dotenv" },
533
  { name = "gradio" },
@@ -541,6 +542,7 @@ dependencies = [
541
  { name = "langchain-groq" },
542
  { name = "langchain-huggingface" },
543
  { name = "langchain-openai" },
 
544
  { name = "langfuse" },
545
  { name = "langgraph" },
546
  { name = "langgraph-checkpoint" },
@@ -554,6 +556,7 @@ dependencies = [
554
  { name = "llama-index-tools-tavily-research" },
555
  { name = "opencv-python" },
556
  { name = "pandas" },
 
557
  { name = "rich" },
558
  { name = "sentence-transformers" },
559
  { name = "supabase" },
@@ -562,6 +565,7 @@ dependencies = [
562
 
563
  [package.metadata]
564
  requires-dist = [
 
565
  { name = "datasets", specifier = ">=2.19.1" },
566
  { name = "dotenv", specifier = ">=0.9.9" },
567
  { name = "gradio", specifier = ">=5.34.1" },
@@ -575,6 +579,7 @@ requires-dist = [
575
  { name = "langchain-groq", specifier = ">=0.3.2" },
576
  { name = "langchain-huggingface", specifier = ">=0.3.0" },
577
  { name = "langchain-openai", specifier = ">=0.3.24" },
 
578
  { name = "langfuse", specifier = ">=3.0.0" },
579
  { name = "langgraph", specifier = ">=0.4.8" },
580
  { name = "langgraph-checkpoint", specifier = ">=2.1.0" },
@@ -588,6 +593,7 @@ requires-dist = [
588
  { name = "llama-index-tools-tavily-research", specifier = ">=0.3.0" },
589
  { name = "opencv-python", specifier = ">=4.11.0.86" },
590
  { name = "pandas", specifier = ">=2.2.3" },
 
591
  { name = "rich", specifier = ">=14.0.0" },
592
  { name = "sentence-transformers", specifier = ">=4.1.0" },
593
  { name = "supabase", specifier = ">=2.15.3" },
@@ -1348,6 +1354,22 @@ wheels = [
1348
  { url = "https://files.pythonhosted.org/packages/fc/9b/b8f86d78dbc651decd684ab938a1340e1ad3ba1dbcef805e12db65dee0ba/langchain_openai-0.3.24-py3-none-any.whl", hash = "sha256:3db7bb2964f86636276a8f4bbed4514daf13865b80896e547ff7ea13ce98e593", size = 68950 },
1349
  ]
1350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1351
  [[package]]
1352
  name = "langchain-text-splitters"
1353
  version = "0.3.8"
@@ -1912,6 +1934,26 @@ wheels = [
1912
  { url = "https://files.pythonhosted.org/packages/da/d9/f7f9379981e39b8c2511c9e0326d212accacb82f12fbfdc1aa2ce2a7b2b6/multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", size = 133351 },
1913
  ]
1914
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1915
  [[package]]
1916
  name = "mypy-extensions"
1917
  version = "1.1.0"
@@ -2348,6 +2390,15 @@ wheels = [
2348
  { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 },
2349
  ]
2350
 
 
 
 
 
 
 
 
 
 
2351
  [[package]]
2352
  name = "pexpect"
2353
  version = "4.9.0"
 
528
  version = "0.1.0"
529
  source = { virtual = "." }
530
  dependencies = [
531
+ { name = "arxiv" },
532
  { name = "datasets" },
533
  { name = "dotenv" },
534
  { name = "gradio" },
 
542
  { name = "langchain-groq" },
543
  { name = "langchain-huggingface" },
544
  { name = "langchain-openai" },
545
+ { name = "langchain-tavily" },
546
  { name = "langfuse" },
547
  { name = "langgraph" },
548
  { name = "langgraph-checkpoint" },
 
556
  { name = "llama-index-tools-tavily-research" },
557
  { name = "opencv-python" },
558
  { name = "pandas" },
559
+ { name = "python-dotenv" },
560
  { name = "rich" },
561
  { name = "sentence-transformers" },
562
  { name = "supabase" },
 
565
 
566
  [package.metadata]
567
  requires-dist = [
568
+ { name = "arxiv", specifier = ">=2.2.0" },
569
  { name = "datasets", specifier = ">=2.19.1" },
570
  { name = "dotenv", specifier = ">=0.9.9" },
571
  { name = "gradio", specifier = ">=5.34.1" },
 
579
  { name = "langchain-groq", specifier = ">=0.3.2" },
580
  { name = "langchain-huggingface", specifier = ">=0.3.0" },
581
  { name = "langchain-openai", specifier = ">=0.3.24" },
582
+ { name = "langchain-tavily", specifier = ">=0.2.4" },
583
  { name = "langfuse", specifier = ">=3.0.0" },
584
  { name = "langgraph", specifier = ">=0.4.8" },
585
  { name = "langgraph-checkpoint", specifier = ">=2.1.0" },
 
593
  { name = "llama-index-tools-tavily-research", specifier = ">=0.3.0" },
594
  { name = "opencv-python", specifier = ">=4.11.0.86" },
595
  { name = "pandas", specifier = ">=2.2.3" },
596
+ { name = "python-dotenv", specifier = ">=1.1.0" },
597
  { name = "rich", specifier = ">=14.0.0" },
598
  { name = "sentence-transformers", specifier = ">=4.1.0" },
599
  { name = "supabase", specifier = ">=2.15.3" },
 
1354
  { url = "https://files.pythonhosted.org/packages/fc/9b/b8f86d78dbc651decd684ab938a1340e1ad3ba1dbcef805e12db65dee0ba/langchain_openai-0.3.24-py3-none-any.whl", hash = "sha256:3db7bb2964f86636276a8f4bbed4514daf13865b80896e547ff7ea13ce98e593", size = 68950 },
1355
  ]
1356
 
1357
+ [[package]]
1358
+ name = "langchain-tavily"
1359
+ version = "0.2.4"
1360
+ source = { registry = "https://pypi.org/simple" }
1361
+ dependencies = [
1362
+ { name = "aiohttp" },
1363
+ { name = "langchain" },
1364
+ { name = "langchain-core" },
1365
+ { name = "mypy" },
1366
+ { name = "requests" },
1367
+ ]
1368
+ sdist = { url = "https://files.pythonhosted.org/packages/15/df/faf9b205c93a048b96d013a88623bf31f974cf8d2785fc877d239861f1e8/langchain_tavily-0.2.4.tar.gz", hash = "sha256:68281a47e2e45e857a0d8087478f2638ea7e7c3a8a61a00cc0a2e3b7541240ea", size = 20722 }
1369
+ wheels = [
1370
+ { url = "https://files.pythonhosted.org/packages/61/10/9c7fc924a4a099840d03cf7b58da337f9ca54949bb0cfd26fe5320959c6c/langchain_tavily-0.2.4-py3-none-any.whl", hash = "sha256:36a80f2dd331cd68e26f4f6e6c3602e2615df4da21da05d787b904f7a564d487", size = 24465 },
1371
+ ]
1372
+
1373
  [[package]]
1374
  name = "langchain-text-splitters"
1375
  version = "0.3.8"
 
1934
  { url = "https://files.pythonhosted.org/packages/da/d9/f7f9379981e39b8c2511c9e0326d212accacb82f12fbfdc1aa2ce2a7b2b6/multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", size = 133351 },
1935
  ]
1936
 
1937
+ [[package]]
1938
+ name = "mypy"
1939
+ version = "1.16.1"
1940
+ source = { registry = "https://pypi.org/simple" }
1941
+ dependencies = [
1942
+ { name = "mypy-extensions" },
1943
+ { name = "pathspec" },
1944
+ { name = "typing-extensions" },
1945
+ ]
1946
+ sdist = { url = "https://files.pythonhosted.org/packages/81/69/92c7fa98112e4d9eb075a239caa4ef4649ad7d441545ccffbd5e34607cbb/mypy-1.16.1.tar.gz", hash = "sha256:6bd00a0a2094841c5e47e7374bb42b83d64c527a502e3334e1173a0c24437bab", size = 3324747 }
1947
+ wheels = [
1948
+ { url = "https://files.pythonhosted.org/packages/28/e3/96964af4a75a949e67df4b95318fe2b7427ac8189bbc3ef28f92a1c5bc56/mypy-1.16.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ddc91eb318c8751c69ddb200a5937f1232ee8efb4e64e9f4bc475a33719de438", size = 11063480 },
1949
+ { url = "https://files.pythonhosted.org/packages/f5/4d/cd1a42b8e5be278fab7010fb289d9307a63e07153f0ae1510a3d7b703193/mypy-1.16.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:87ff2c13d58bdc4bbe7dc0dedfe622c0f04e2cb2a492269f3b418df2de05c536", size = 10090538 },
1950
+ { url = "https://files.pythonhosted.org/packages/c9/4f/c3c6b4b66374b5f68bab07c8cabd63a049ff69796b844bc759a0ca99bb2a/mypy-1.16.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a7cfb0fe29fe5a9841b7c8ee6dffb52382c45acdf68f032145b75620acfbd6f", size = 11836839 },
1951
+ { url = "https://files.pythonhosted.org/packages/b4/7e/81ca3b074021ad9775e5cb97ebe0089c0f13684b066a750b7dc208438403/mypy-1.16.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:051e1677689c9d9578b9c7f4d206d763f9bbd95723cd1416fad50db49d52f359", size = 12715634 },
1952
+ { url = "https://files.pythonhosted.org/packages/e9/95/bdd40c8be346fa4c70edb4081d727a54d0a05382d84966869738cfa8a497/mypy-1.16.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d5d2309511cc56c021b4b4e462907c2b12f669b2dbeb68300110ec27723971be", size = 12895584 },
1953
+ { url = "https://files.pythonhosted.org/packages/5a/fd/d486a0827a1c597b3b48b1bdef47228a6e9ee8102ab8c28f944cb83b65dc/mypy-1.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:4f58ac32771341e38a853c5d0ec0dfe27e18e27da9cdb8bbc882d2249c71a3ee", size = 9573886 },
1954
+ { url = "https://files.pythonhosted.org/packages/cf/d3/53e684e78e07c1a2bf7105715e5edd09ce951fc3f47cf9ed095ec1b7a037/mypy-1.16.1-py3-none-any.whl", hash = "sha256:5fc2ac4027d0ef28d6ba69a0343737a23c4d1b83672bf38d1fe237bdc0643b37", size = 2265923 },
1955
+ ]
1956
+
1957
  [[package]]
1958
  name = "mypy-extensions"
1959
  version = "1.1.0"
 
2390
  { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 },
2391
  ]
2392
 
2393
+ [[package]]
2394
+ name = "pathspec"
2395
+ version = "0.12.1"
2396
+ source = { registry = "https://pypi.org/simple" }
2397
+ sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043 }
2398
+ wheels = [
2399
+ { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191 },
2400
+ ]
2401
+
2402
  [[package]]
2403
  name = "pexpect"
2404
  version = "4.9.0"