File size: 10,991 Bytes
f844f16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3708220
f844f16
 
 
3708220
 
f844f16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
848db06
 
 
3708220
 
f844f16
 
 
848db06
f844f16
 
3708220
 
 
f844f16
 
 
 
 
3708220
f844f16
 
 
 
 
 
 
 
 
 
3708220
f844f16
 
 
 
 
 
3708220
 
 
 
 
f844f16
 
 
 
 
3708220
f844f16
 
 
 
 
 
 
 
 
 
 
 
 
3708220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
848db06
3708220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
848db06
 
f844f16
848db06
3708220
f844f16
 
 
 
848db06
 
 
 
 
f844f16
848db06
f844f16
3708220
 
 
 
 
 
 
 
 
 
 
f844f16
 
848db06
 
3708220
 
 
 
 
 
 
 
 
 
 
 
f844f16
 
 
 
 
 
 
 
 
 
3708220
f844f16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
"""
Research Agent - Information gathering and research tasks

The Research Agent is responsible for:
1. Gathering information from multiple sources (web, Wikipedia, arXiv)
2. Searching for relevant context and facts
3. Compiling research results in a structured format
4. Returning citations and source information
"""

import os
from typing import Dict, Any, List
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage
from langgraph.types import Command
from langchain_groq import ChatGroq
from langchain_core.tools import BaseTool
from observability import agent_span, tool_span
from dotenv import load_dotenv

# Import LangChain-compatible tools
from langgraph_tools import get_research_tools

load_dotenv("env.local")


def load_research_prompt() -> str:
    """Load the research-specific prompt"""
    try:
        with open("archive/prompts/retrieval_prompt.txt", "r") as f:
            return f.read()
    except FileNotFoundError:
        return """
You are a research specialist focused on gathering accurate information.

Your goals:
1. Search for factual, current, and relevant information
2. Use multiple sources to verify facts
3. Provide clear citations and sources
4. Structure findings in an organized manner

When researching:
- Use web search for current information and facts
- Use Wikipedia for encyclopedic knowledge  
- Use ArXiv for academic and technical topics
- Cross-reference information across sources
- Note any conflicting information found

Format your response as:
### Research Strategy
[Describe what searches are needed]

### Findings
[Key information discovered]

### Key Facts
- Fact 1
- Fact 2  
- Fact 3

### Sources
- Source 1
- Source 2
"""


def research_agent(state: Dict[str, Any]) -> Command:
    """
    Research Agent node that gathers information using LangChain tools.
    
    Returns Command with research results appended to research_notes.
    """
    
    print("πŸ” Research Agent: Gathering information...")
    
    try:
        # Get research prompt
        research_prompt = load_research_prompt()
        
        # Initialize LLM with tool binding
        llm = ChatGroq(
            model="llama-3.3-70b-versatile",
            temperature=0.3,  # Slightly higher for research creativity
            max_tokens=2048
        )
        
        # Get LangChain-compatible research tools
        research_tools = get_research_tools()
        
        # Bind tools to LLM for function calling
        llm_with_tools = llm.bind_tools(research_tools)
        
        # Create agent span for tracing
        with agent_span(
            "research",
            metadata={
                "tools_available": len(research_tools),
                "user_id": state.get("user_id", "unknown"),
                "session_id": state.get("session_id", "unknown")
            }
        ) as span:
            
            # Extract user query
            messages = state.get("messages", [])
            user_query = ""
            for msg in messages:
                if isinstance(msg, HumanMessage):
                    user_query = msg.content
                    break
            
            # Build research request
            research_request = f"""
You must research the following question using the available tools. Do not answer from memory alone.

Question: {user_query}

Current research status: {len(state.get('research_notes', ''))} characters already gathered

CRITICAL: You MUST use the available research tools to gather information. Do not provide an answer without using tools.

Available tools:
- tavily_search_results_json: For current web information
- wikipedia_search: For encyclopedic knowledge  
- arxiv_search: For academic papers

Instructions:
1. ALWAYS use tavily_search_results_json for current information
2. Use wikipedia_search for general knowledge topics
3. Use arxiv_search for academic/technical topics if relevant
4. You must call at least one tool - preferably multiple tools
5. Analyze and synthesize the information from the tools
6. Provide structured findings with sources

Start by calling the appropriate research tools to gather information about this question.
"""
            
            # Create messages for research
            research_messages = [
                SystemMessage(content=research_prompt),
                HumanMessage(content=research_request)
            ]
            
            # Get research response with tool calls
            response = llm_with_tools.invoke(research_messages)
            
            # Debug: Check if tools were called
            print(f"πŸ” Research response type: {type(response)}")
            print(f"πŸ” Has tool_calls attribute: {hasattr(response, 'tool_calls')}")
            if hasattr(response, 'tool_calls'):
                print(f"πŸ” Tool calls: {response.tool_calls}")
            else:
                print(f"πŸ” Response content preview: {str(response)[:200]}...")
            
            # Process tool calls if any
            tool_results = []
            if hasattr(response, 'tool_calls') and response.tool_calls:
                print(f"πŸ› οΈ  Executing {len(response.tool_calls)} research tools")
                
                for tool_call in response.tool_calls:
                    try:
                        # Find the tool by name
                        tool = next((t for t in research_tools if t.name == tool_call['name']), None)
                        if tool:
                            # Execute the tool
                            with tool_span(tool.name, metadata={"args": tool_call.get('args', {})}) as tool_span_ctx:
                                result = tool.invoke(tool_call.get('args', {}))
                                tool_results.append(f"**{tool.name}**: {result}")
                                if tool_span_ctx:
                                    tool_span_ctx.update_trace(output={"result": str(result)[:200] + "..."})
                        else:
                            tool_results.append(f"**{tool_call['name']}**: Tool not found")
                            
                    except Exception as e:
                        print(f"⚠️  Tool {tool_call.get('name', 'unknown')} failed: {e}")
                        tool_results.append(f"**{tool_call.get('name', 'unknown')}**: Error - {str(e)}")
            else:
                print("⚠️  No tool calls detected - LLM did not choose to use any tools")
                # Force tool usage for research questions
                print("πŸ”§ Forcing tool usage for research...")
                
                # Manually call appropriate tools based on query
                forced_tools = []
                
                # Always try web search for current info
                tavily_tool = next((t for t in research_tools if t.name == "tavily_search_results_json"), None)
                if tavily_tool:
                    try:
                        print("🌐 Forcing Tavily web search...")
                        result = tavily_tool.invoke({"query": user_query})
                        forced_tools.append(f"**tavily_search_results_json (forced)**: {result}")
                    except Exception as e:
                        print(f"⚠️  Forced Tavily search failed: {e}")
                        forced_tools.append(f"**tavily_search_results_json (forced)**: Error - {str(e)}")
                
                # Try Wikipedia for general knowledge
                wiki_tool = next((t for t in research_tools if t.name == "wikipedia_search"), None)
                if wiki_tool:
                    try:
                        print("πŸ“š Forcing Wikipedia search...")
                        result = wiki_tool.invoke({"query": user_query})
                        forced_tools.append(f"**wikipedia_search (forced)**: {result}")
                    except Exception as e:
                        print(f"⚠️  Forced Wikipedia search failed: {e}")
                        forced_tools.append(f"**wikipedia_search (forced)**: Error - {str(e)}")
                
                tool_results = forced_tools
            
            # Compile research findings
            if tool_results:
                research_findings = "\n\n".join(tool_results)
                
                # Ask LLM to analyze the tool results
                analysis_request = f"""
Based on the research results below, provide a structured analysis:

Original Question: {user_query}

Research Results:
{research_findings}

Current research status: {len(state.get('research_notes', ''))} characters already gathered

Instructions:
1. Analyze the search results for relevant information
2. Extract key facts that help answer the question
3. Note any important details or findings
4. Identify if additional specific searches might be needed
5. Structure your findings clearly with citations

Please provide a comprehensive analysis of the research findings.
"""
                
                analysis_messages = [
                    SystemMessage(content=research_prompt),
                    HumanMessage(content=analysis_request)
                ]
                
                analysis_response = llm.invoke(analysis_messages)
                analysis_content = analysis_response.content if hasattr(analysis_response, 'content') else str(analysis_response)
                
                # Format final research results
                formatted_results = f"""
### Research Iteration {state.get('loop_counter', 0) + 1}

{analysis_content}

### Raw Tool Results
{research_findings}

---
"""
            else:
                # No tools were called, use the LLM response directly
                response_content = response.content if hasattr(response, 'content') else str(response)
                formatted_results = f"""
### Research Iteration {state.get('loop_counter', 0) + 1}

{response_content}

---
"""
            
            print(f"πŸ“ Research Agent: Gathered {len(formatted_results)} characters")
            
            # Update trace
            if span:
                span.update_trace(output={
                    "research_length": len(formatted_results),
                    "tools_used": len(tool_results),
                    "findings_preview": formatted_results[:300] + "..."
                })
            
            # Return command to proceed back to lead agent for decision
            return Command(
                goto="lead",
                update={
                    "research_notes": formatted_results
                }
            )
            
    except Exception as e:
        print(f"❌ Research Agent Error: {e}")
        
        # Return with error information
        error_result = f"""
### Research Error
An error occurred during research: {str(e)}

"""
        return Command(
            goto="lead",
            update={
                "research_notes": error_result
            }
        )