File size: 4,920 Bytes
2e38934
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a605490
2e38934
a605490
2e38934
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
from typing import Literal

from langchain.chat_models import init_chat_model
from langchain.tools import tool
from langchain_core.messages import SystemMessage, ToolMessage, filter_messages, HumanMessage
from langgraph.graph import StateGraph, START, END

from core.state import MathAgentState, MathAgentOutputState
from tools.python_executor import execute_python_code
from tools.think_tool import think_tool
from utils.prompt_manager import prompt_mgmt

# ===== CONFIGURATION =====

# Set up tools and model binding
tools = [execute_python_code, think_tool]
tools_by_name = {tool.name: tool for tool in tools}

# Initialize models
math_model = init_chat_model(model="openai:gpt-5")
model_with_tools = math_model.bind_tools(tools)
summarization_model = init_chat_model(model="openai:gpt-4.1-mini")
compress_model = init_chat_model(model="openai:gpt-4.1",
                                 max_tokens=32000)


def llm_call(state: MathAgentState):
    """Analyze current state and decide on next actions.

    The model analyzes the current state and decides whether to:
    1. Call search tools to gather more information
    2. Provide a final answer based on gathered information

    Returns updated state with the model's response.
    """
    return {
        "messages": [
            model_with_tools.invoke(
                [SystemMessage(content=prompt_mgmt.render_template("math_agent_base_system", {}))] + state[
                    "messages"]
            )
        ]
    }


def tool_node(state: MathAgentState):
    """Execute all tool calls from the previous LLM response.

    Executes all tool calls from the previous LLM responses.
    Returns updated state with tool execution results.
    """
    tool_calls = state["messages"][-1].tool_calls

    # Execute all tool calls
    observations = []
    for tool_call in tool_calls:
        tool = tools_by_name[tool_call["name"]]
        observations.append(tool.invoke(tool_call["args"]))

    # Create tool message outputs
    tool_outputs = [
        ToolMessage(
            content=observation,
            name=tool_call["name"],
            tool_call_id=tool_call["id"]
        ) for observation, tool_call in zip(observations, tool_calls)
    ]

    return {"messages": tool_outputs}


def compress_research(state: MathAgentState) -> dict:
    """Compress research findings into a concise summary.

    Takes all the research messages and tool outputs and creates
    a compressed summary suitable for the supervisor's decision-making.
    """

    last_message = state.get("messages", [])[-1]

    # Extract raw notes from tool and AI messages
    raw_notes = [
        str(m.content) for m in filter_messages(
            state["messages"],
            include_types=["tool", "ai"]
        )
    ]

    return {
        "compressed_research": str(last_message.content),
        "raw_notes": ["\n".join(raw_notes)]
    }


def should_continue(state: MathAgentState) -> Literal["tool_node", "compress_research"]:
    """Determine whether to continue the solving of the problem or provide final answer.

    Determines whether the agent should continue the solving loop or provide
    a final answer based on whether the LLM made tool calls.

    Returns:
        "tool_node": Continue to tool execution
        "compress_research": Stop and compress research
    """
    messages = state["messages"]
    last_message = messages[-1]

    # If the LLM makes a tool call, continue to tool execution
    if last_message.tool_calls:
        return "tool_node"
    # Otherwise, we have a final answer
    return "compress_research"


# ===== GRAPH CONSTRUCTION =====

# Build the agent workflow
agent_builder = StateGraph(MathAgentState, output_schema=MathAgentOutputState)

# Add nodes to the graph
agent_builder.add_node("llm_call", llm_call)
agent_builder.add_node("tool_node", tool_node)
agent_builder.add_node("compress_research", compress_research)

# Add edges to connect nodes
agent_builder.add_edge(START, "llm_call")
agent_builder.add_conditional_edges(
    "llm_call",
    should_continue,
    {
        "tool_node": "tool_node",  # Continue research loop
        "compress_research": "compress_research",  # Provide final answer
    },
)
agent_builder.add_edge("tool_node", "llm_call")  # Loop back for more research
agent_builder.add_edge("compress_research", END)

# Compile the agent
math_agent = agent_builder.compile()


@tool
def math_tool(problem: str):
    """
    Tool for solving a mathematical problem
    :param problem: The problem to be solved
    :return: the solution to the given problem
    """
    response = math_agent.invoke({"messages": [HumanMessage(content=problem)], "question": problem})
    # return the LLM response as a string (expected tool response format)
    # this will be automatically turned to ToolMessage
    # by the prebuilt create_react_agent (supervisor)
    return response['compressed_research']