File size: 2,903 Bytes
fe36046
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
"""Plan Node - Initial ReAct planning loop"""
from typing import Dict, Any
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_groq import ChatGroq
from src.tracing import get_langfuse_callback_handler


def load_system_prompt() -> str:
    """Load the system prompt from file"""
    try:
        with open("./prompts/system_prompt.txt", "r", encoding="utf-8") as f:
            return f.read().strip()
    except FileNotFoundError:
        return "You are a helpful assistant tasked with answering GAIA benchmark questions."


def plan_node(state: Dict[str, Any]) -> Dict[str, Any]:
    """
    Initial planning node that sets up the conversation with system prompt
    and prepares for agent routing
    """
    print("Plan Node: Processing query")
    
    try:
        # Get the system prompt
        system_prompt = load_system_prompt()
        
        # Initialize LLM for planning
        llm = ChatGroq(model="qwen-qwq-32b", temperature=0.1)
        
        # Get callback handler for tracing
        callback_handler = get_langfuse_callback_handler()
        callbacks = [callback_handler] if callback_handler else []
        
        # Extract user messages
        messages = state.get("messages", [])
        if not messages:
            return {"messages": [SystemMessage(content=system_prompt)]}
        
        # Build message list with system prompt
        plan_messages = [SystemMessage(content=system_prompt)]
        
        # Add existing messages
        for msg in messages:
            if msg.type != "system":  # Avoid duplicate system messages
                plan_messages.append(msg)
        
        # Add planning instruction
        planning_instruction = """
        Analyze this query and prepare a plan for answering it. Consider:
        1. What type of information or processing is needed?
        2. What tools or agents would be most appropriate?
        3. What is the expected output format?
        
        Provide a brief analysis and initial plan.
        """
        
        if plan_messages and plan_messages[-1].type == "human":
            # Get LLM analysis of the query
            analysis_messages = plan_messages + [HumanMessage(content=planning_instruction)]
            
            response = llm.invoke(analysis_messages, config={"callbacks": callbacks})
            plan_messages.append(response)
        
        return {
            "messages": plan_messages,
            "plan_complete": True,
            "current_step": "routing"
        }
        
    except Exception as e:
        print(f"Plan Node Error: {e}")
        # Fallback with basic system message
        system_prompt = load_system_prompt()
        return {
            "messages": [SystemMessage(content=system_prompt)] + state.get("messages", []),
            "plan_complete": True,
            "current_step": "routing"
        }