Commit
·
2e38934
1
Parent(s):
0f45d0b
Added math sub agent
Browse files- config/prompts.yaml +51 -6
- core/state.py +29 -0
- nodes/nodes.py +3 -1
- tools/math_agent.py +163 -0
- tools/think_tool.py +29 -0
- utils/prompt_manager.py +1 -0
config/prompts.yaml
CHANGED
|
@@ -2,8 +2,12 @@ prompts:
|
|
| 2 |
base_system:
|
| 3 |
content: |
|
| 4 |
You are a general AI assistant tasked with answering complex questions.
|
| 5 |
-
|
| 6 |
Make sure you think step by step in order to answer the given question.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
{{attachment}}
|
| 9 |
|
|
@@ -12,8 +16,10 @@ prompts:
|
|
| 12 |
{{summary}}
|
| 13 |
</summary>
|
| 14 |
|
|
|
|
|
|
|
| 15 |
Include citations for all the information you retrieve, ensuring you know exactly where the data comes from.
|
| 16 |
-
If you have the information inside your knowledge, still call a tool in order to confirm it.
|
| 17 |
|
| 18 |
**Guidelines for Conducting Research:**
|
| 19 |
|
|
@@ -61,11 +67,10 @@ prompts:
|
|
| 61 |
|
| 62 |
If the value of chunked_last_tool_call is true, this means that the last tool execution returns a result formed from the concatenation
|
| 63 |
of multiple chunks.
|
| 64 |
-
Current value of the chunked_last_tool_call is {{chunked_last_tool_call}}
|
| 65 |
-
|
| 66 |
If you generate python code make sure you print the value of the variable you are interested in.
|
| 67 |
type: base_system
|
| 68 |
-
variables: ["summary", "chunked_last_tool_call", "attachment"]
|
| 69 |
version: 1.0
|
| 70 |
description: "Core system prompt for all interactions"
|
| 71 |
final_answer_processor:
|
|
@@ -100,4 +105,44 @@ prompts:
|
|
| 100 |
type: tool
|
| 101 |
variables: []
|
| 102 |
version: 1.0
|
| 103 |
-
description: "Prompt for audio tool"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
base_system:
|
| 3 |
content: |
|
| 4 |
You are a general AI assistant tasked with answering complex questions.
|
|
|
|
| 5 |
Make sure you think step by step in order to answer the given question.
|
| 6 |
+
|
| 7 |
+
Here is the the question you received:
|
| 8 |
+
<question>
|
| 9 |
+
{{question}}
|
| 10 |
+
</question>
|
| 11 |
|
| 12 |
{{attachment}}
|
| 13 |
|
|
|
|
| 16 |
{{summary}}
|
| 17 |
</summary>
|
| 18 |
|
| 19 |
+
For mathematical questions or problems delegate them to the math_tool.
|
| 20 |
+
|
| 21 |
Include citations for all the information you retrieve, ensuring you know exactly where the data comes from.
|
| 22 |
+
If you have the information inside your knowledge, still call a tool in order to confirm it.
|
| 23 |
|
| 24 |
**Guidelines for Conducting Research:**
|
| 25 |
|
|
|
|
| 67 |
|
| 68 |
If the value of chunked_last_tool_call is true, this means that the last tool execution returns a result formed from the concatenation
|
| 69 |
of multiple chunks.
|
| 70 |
+
Current value of the chunked_last_tool_call is {{chunked_last_tool_call}}
|
|
|
|
| 71 |
If you generate python code make sure you print the value of the variable you are interested in.
|
| 72 |
type: base_system
|
| 73 |
+
variables: ["summary", "chunked_last_tool_call", "attachment", "question"]
|
| 74 |
version: 1.0
|
| 75 |
description: "Core system prompt for all interactions"
|
| 76 |
final_answer_processor:
|
|
|
|
| 105 |
type: tool
|
| 106 |
variables: []
|
| 107 |
version: 1.0
|
| 108 |
+
description: "Prompt for audio tool"
|
| 109 |
+
math_agent_base_system:
|
| 110 |
+
content: |
|
| 111 |
+
You are a research assistant conducting research on the user's input topic.
|
| 112 |
+
|
| 113 |
+
<Task>
|
| 114 |
+
Your job is to use tools to gather information about the user's input topic.
|
| 115 |
+
You can use any of the tools provided to you to find resources that can help answer the research question. You can call these tools in series or in parallel, your research is conducted in a tool-calling loop.
|
| 116 |
+
</Task>
|
| 117 |
+
|
| 118 |
+
<Available Tools>
|
| 119 |
+
You have access to two main tools:
|
| 120 |
+
1. **execute_python_code**: For executing python code
|
| 121 |
+
2. **think_tool**: For reflection and strategic planning during problem solving
|
| 122 |
+
|
| 123 |
+
**CRITICAL: Use think_tool after each search to reflect on results and plan next steps**
|
| 124 |
+
If you generate python code make sure you print the value of the variable you are interested in.
|
| 125 |
+
</Available Tools>
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
<Hard Limits>
|
| 129 |
+
**Tool Call Budgets** (Prevent excessive searching):
|
| 130 |
+
- **Simple queries**: Use 2-3 search tool calls maximum
|
| 131 |
+
- **Complex queries**: Use up to 5 search tool calls maximum
|
| 132 |
+
- **Always stop**: After 5 python tool calls if you cannot find the right sources
|
| 133 |
+
|
| 134 |
+
**Stop Immediately When**:
|
| 135 |
+
- You can answer the user's question comprehensively
|
| 136 |
+
</Hard Limits>
|
| 137 |
+
|
| 138 |
+
<Show Your Thinking>
|
| 139 |
+
After each search tool call, use think_tool to analyze the results:
|
| 140 |
+
- What key information did I find?
|
| 141 |
+
- What's missing?
|
| 142 |
+
- Do I have enough to answer the question comprehensively?
|
| 143 |
+
- Should I search more or provide my answer?
|
| 144 |
+
</Show Your Thinking>
|
| 145 |
+
type: sub_agent
|
| 146 |
+
variables: []
|
| 147 |
+
version: 1.0
|
| 148 |
+
description: "Core system prompt for the math agent"
|
core/state.py
CHANGED
|
@@ -1,4 +1,8 @@
|
|
| 1 |
from langgraph.graph import MessagesState
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
|
| 4 |
class State(MessagesState):
|
|
@@ -6,3 +10,28 @@ class State(MessagesState):
|
|
| 6 |
question: str
|
| 7 |
chunked_last_tool_call: bool
|
| 8 |
file_reference: str # Attachment file reference: a path, URL, or unique ID
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from langgraph.graph import MessagesState
|
| 2 |
+
import operator
|
| 3 |
+
from typing_extensions import TypedDict, Annotated, List, Sequence
|
| 4 |
+
from langchain_core.messages import BaseMessage
|
| 5 |
+
from langgraph.graph.message import add_messages
|
| 6 |
|
| 7 |
|
| 8 |
class State(MessagesState):
|
|
|
|
| 10 |
question: str
|
| 11 |
chunked_last_tool_call: bool
|
| 12 |
file_reference: str # Attachment file reference: a path, URL, or unique ID
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class MathAgentState(MessagesState):
|
| 16 |
+
"""
|
| 17 |
+
State for the math agent containing message history and research metadata.
|
| 18 |
+
|
| 19 |
+
This state tracks the agent's conversation, iteration count for limiting
|
| 20 |
+
tool calls, the research topic being investigated, compressed findings,
|
| 21 |
+
and raw research notes for detailed analysis.
|
| 22 |
+
"""
|
| 23 |
+
tool_call_iterations: int
|
| 24 |
+
question: str
|
| 25 |
+
compressed_research: str
|
| 26 |
+
raw_notes: Annotated[List[str], operator.add]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class MathAgentOutputState(MessagesState):
|
| 30 |
+
"""
|
| 31 |
+
Output state for the math agent containing final results.
|
| 32 |
+
|
| 33 |
+
This represents the final output of the solving process with steps
|
| 34 |
+
and raw notes from the solving process.
|
| 35 |
+
"""
|
| 36 |
+
compressed_research: str
|
| 37 |
+
raw_notes: Annotated[List[str], operator.add]
|
nodes/nodes.py
CHANGED
|
@@ -10,6 +10,7 @@ from core.state import State
|
|
| 10 |
from nodes.chunking_node import OversizedContentHandler
|
| 11 |
from tools.audio_tool import query_audio
|
| 12 |
from tools.excel_tool import query_excel_file
|
|
|
|
| 13 |
from tools.python_executor import execute_python_code
|
| 14 |
from tools.tavily_tools import llm_tools
|
| 15 |
from utils.prompt_manager import prompt_mgmt
|
|
@@ -19,6 +20,7 @@ response_processing_model = ChatOpenAI(model="gpt-4.1-mini")
|
|
| 19 |
llm_tools.append(query_audio)
|
| 20 |
llm_tools.append(query_excel_file)
|
| 21 |
llm_tools.append(execute_python_code)
|
|
|
|
| 22 |
model = model.bind_tools(llm_tools, parallel_tool_calls=False)
|
| 23 |
|
| 24 |
|
|
@@ -56,7 +58,7 @@ def assistant(state: State):
|
|
| 56 |
if file_reference:
|
| 57 |
attachment = f" you have access to the file with the following reference {file_reference}"
|
| 58 |
prompt_params = {"summary": summary, "chunked_last_tool_call": state.get("chunked_last_tool_call", False),
|
| 59 |
-
"attachment": attachment}
|
| 60 |
sys_msg = SystemMessage(content=prompt_mgmt.render_template("base_system", prompt_params))
|
| 61 |
try:
|
| 62 |
response = model.invoke([sys_msg] + state["messages"])
|
|
|
|
| 10 |
from nodes.chunking_node import OversizedContentHandler
|
| 11 |
from tools.audio_tool import query_audio
|
| 12 |
from tools.excel_tool import query_excel_file
|
| 13 |
+
from tools.math_agent import math_tool
|
| 14 |
from tools.python_executor import execute_python_code
|
| 15 |
from tools.tavily_tools import llm_tools
|
| 16 |
from utils.prompt_manager import prompt_mgmt
|
|
|
|
| 20 |
llm_tools.append(query_audio)
|
| 21 |
llm_tools.append(query_excel_file)
|
| 22 |
llm_tools.append(execute_python_code)
|
| 23 |
+
llm_tools.append(math_tool)
|
| 24 |
model = model.bind_tools(llm_tools, parallel_tool_calls=False)
|
| 25 |
|
| 26 |
|
|
|
|
| 58 |
if file_reference:
|
| 59 |
attachment = f" you have access to the file with the following reference {file_reference}"
|
| 60 |
prompt_params = {"summary": summary, "chunked_last_tool_call": state.get("chunked_last_tool_call", False),
|
| 61 |
+
"attachment": attachment, "question": question}
|
| 62 |
sys_msg = SystemMessage(content=prompt_mgmt.render_template("base_system", prompt_params))
|
| 63 |
try:
|
| 64 |
response = model.invoke([sys_msg] + state["messages"])
|
tools/math_agent.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Research Agent Implementation.
|
| 2 |
+
|
| 3 |
+
This module implements a research agent that can perform iterative web searches
|
| 4 |
+
and synthesis to answer complex research questions.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Literal
|
| 8 |
+
|
| 9 |
+
from langchain.chat_models import init_chat_model
|
| 10 |
+
from langchain.tools import tool
|
| 11 |
+
from langchain_core.messages import SystemMessage, ToolMessage, filter_messages, HumanMessage
|
| 12 |
+
from langgraph.graph import StateGraph, START, END
|
| 13 |
+
|
| 14 |
+
from core.state import MathAgentState, MathAgentOutputState
|
| 15 |
+
from tools.python_executor import execute_python_code
|
| 16 |
+
from tools.think_tool import think_tool
|
| 17 |
+
from utils.prompt_manager import prompt_mgmt
|
| 18 |
+
|
| 19 |
+
# ===== CONFIGURATION =====
|
| 20 |
+
|
| 21 |
+
# Set up tools and model binding
|
| 22 |
+
tools = [execute_python_code, think_tool]
|
| 23 |
+
tools_by_name = {tool.name: tool for tool in tools}
|
| 24 |
+
|
| 25 |
+
# Initialize models
|
| 26 |
+
math_model = init_chat_model(model="openai:gpt-5")
|
| 27 |
+
model_with_tools = math_model.bind_tools(tools)
|
| 28 |
+
summarization_model = init_chat_model(model="openai:gpt-4.1-mini")
|
| 29 |
+
compress_model = init_chat_model(model="openai:gpt-4.1",
|
| 30 |
+
max_tokens=32000)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# ===== AGENT NODES =====
|
| 34 |
+
|
| 35 |
+
def llm_call(state: MathAgentState):
|
| 36 |
+
"""Analyze current state and decide on next actions.
|
| 37 |
+
|
| 38 |
+
The model analyzes the current state and decides whether to:
|
| 39 |
+
1. Call search tools to gather more information
|
| 40 |
+
2. Provide a final answer based on gathered information
|
| 41 |
+
|
| 42 |
+
Returns updated state with the model's response.
|
| 43 |
+
"""
|
| 44 |
+
return {
|
| 45 |
+
"messages": [
|
| 46 |
+
model_with_tools.invoke(
|
| 47 |
+
[SystemMessage(content=prompt_mgmt.render_template("math_agent_base_system", {}))] + state[
|
| 48 |
+
"messages"]
|
| 49 |
+
)
|
| 50 |
+
]
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def tool_node(state: MathAgentState):
|
| 55 |
+
"""Execute all tool calls from the previous LLM response.
|
| 56 |
+
|
| 57 |
+
Executes all tool calls from the previous LLM responses.
|
| 58 |
+
Returns updated state with tool execution results.
|
| 59 |
+
"""
|
| 60 |
+
tool_calls = state["messages"][-1].tool_calls
|
| 61 |
+
|
| 62 |
+
# Execute all tool calls
|
| 63 |
+
observations = []
|
| 64 |
+
for tool_call in tool_calls:
|
| 65 |
+
tool = tools_by_name[tool_call["name"]]
|
| 66 |
+
observations.append(tool.invoke(tool_call["args"]))
|
| 67 |
+
|
| 68 |
+
# Create tool message outputs
|
| 69 |
+
tool_outputs = [
|
| 70 |
+
ToolMessage(
|
| 71 |
+
content=observation,
|
| 72 |
+
name=tool_call["name"],
|
| 73 |
+
tool_call_id=tool_call["id"]
|
| 74 |
+
) for observation, tool_call in zip(observations, tool_calls)
|
| 75 |
+
]
|
| 76 |
+
|
| 77 |
+
return {"messages": tool_outputs}
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def compress_research(state: MathAgentState) -> dict:
|
| 81 |
+
"""Compress research findings into a concise summary.
|
| 82 |
+
|
| 83 |
+
Takes all the research messages and tool outputs and creates
|
| 84 |
+
a compressed summary suitable for the supervisor's decision-making.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
last_message = state.get("messages", [])[-1]
|
| 88 |
+
|
| 89 |
+
# Extract raw notes from tool and AI messages
|
| 90 |
+
raw_notes = [
|
| 91 |
+
str(m.content) for m in filter_messages(
|
| 92 |
+
state["messages"],
|
| 93 |
+
include_types=["tool", "ai"]
|
| 94 |
+
)
|
| 95 |
+
]
|
| 96 |
+
|
| 97 |
+
return {
|
| 98 |
+
"compressed_research": str(last_message.content),
|
| 99 |
+
"raw_notes": ["\n".join(raw_notes)]
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# ===== ROUTING LOGIC =====
|
| 104 |
+
|
| 105 |
+
def should_continue(state: MathAgentState) -> Literal["tool_node", "compress_research"]:
|
| 106 |
+
"""Determine whether to continue research or provide final answer.
|
| 107 |
+
|
| 108 |
+
Determines whether the agent should continue the research loop or provide
|
| 109 |
+
a final answer based on whether the LLM made tool calls.
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
"tool_node": Continue to tool execution
|
| 113 |
+
"compress_research": Stop and compress research
|
| 114 |
+
"""
|
| 115 |
+
messages = state["messages"]
|
| 116 |
+
last_message = messages[-1]
|
| 117 |
+
|
| 118 |
+
# If the LLM makes a tool call, continue to tool execution
|
| 119 |
+
if last_message.tool_calls:
|
| 120 |
+
return "tool_node"
|
| 121 |
+
# Otherwise, we have a final answer
|
| 122 |
+
return "compress_research"
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# ===== GRAPH CONSTRUCTION =====
|
| 126 |
+
|
| 127 |
+
# Build the agent workflow
|
| 128 |
+
agent_builder = StateGraph(MathAgentState, output_schema=MathAgentOutputState)
|
| 129 |
+
|
| 130 |
+
# Add nodes to the graph
|
| 131 |
+
agent_builder.add_node("llm_call", llm_call)
|
| 132 |
+
agent_builder.add_node("tool_node", tool_node)
|
| 133 |
+
agent_builder.add_node("compress_research", compress_research)
|
| 134 |
+
|
| 135 |
+
# Add edges to connect nodes
|
| 136 |
+
agent_builder.add_edge(START, "llm_call")
|
| 137 |
+
agent_builder.add_conditional_edges(
|
| 138 |
+
"llm_call",
|
| 139 |
+
should_continue,
|
| 140 |
+
{
|
| 141 |
+
"tool_node": "tool_node", # Continue research loop
|
| 142 |
+
"compress_research": "compress_research", # Provide final answer
|
| 143 |
+
},
|
| 144 |
+
)
|
| 145 |
+
agent_builder.add_edge("tool_node", "llm_call") # Loop back for more research
|
| 146 |
+
agent_builder.add_edge("compress_research", END)
|
| 147 |
+
|
| 148 |
+
# Compile the agent
|
| 149 |
+
math_agent = agent_builder.compile()
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@tool
|
| 153 |
+
def math_tool(problem: str):
|
| 154 |
+
"""
|
| 155 |
+
Tool for solving a mathematical problem
|
| 156 |
+
:param problem: The problem to be solved
|
| 157 |
+
:return: the solution to the given problem
|
| 158 |
+
"""
|
| 159 |
+
response = math_agent.invoke({"messages": [HumanMessage(content=problem)], "question": problem})
|
| 160 |
+
# return the LLM response as a string (expected tool response format)
|
| 161 |
+
# this will be automatically turned to ToolMessage
|
| 162 |
+
# by the prebuilt create_react_agent (supervisor)
|
| 163 |
+
return response['compressed_research']
|
tools/think_tool.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.tools import tool
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
@tool(parse_docstring=True)
|
| 5 |
+
def think_tool(reflection: str) -> str:
|
| 6 |
+
"""Tool for strategic reflection on research progress and decision-making.
|
| 7 |
+
|
| 8 |
+
Use this tool after each search to analyze results and plan next steps systematically.
|
| 9 |
+
This creates a deliberate pause in the research workflow for quality decision-making.
|
| 10 |
+
|
| 11 |
+
When to use:
|
| 12 |
+
- After receiving search results: What key information did I find?
|
| 13 |
+
- Before deciding next steps: Do I have enough to answer comprehensively?
|
| 14 |
+
- When assessing research gaps: What specific information am I still missing?
|
| 15 |
+
- Before concluding research: Can I provide a complete answer now?
|
| 16 |
+
|
| 17 |
+
Reflection should address:
|
| 18 |
+
1. Analysis of current findings - What concrete information have I gathered?
|
| 19 |
+
2. Gap assessment - What crucial information is still missing?
|
| 20 |
+
3. Quality evaluation - Do I have sufficient evidence/examples for a good answer?
|
| 21 |
+
4. Strategic decision - Should I continue searching or provide my answer?
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
reflection: Your detailed reflection on research progress, findings, gaps, and next steps
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
Confirmation that reflection was recorded for decision-making
|
| 28 |
+
"""
|
| 29 |
+
return f"Reflection recorded: {reflection}"
|
utils/prompt_manager.py
CHANGED
|
@@ -14,6 +14,7 @@ class PromptType(Enum):
|
|
| 14 |
ANSWER_REFINEMENT = "answer_refinement"
|
| 15 |
MEMORY_OPTIMIZATION = "memory_optimization"
|
| 16 |
TOOL = "tool"
|
|
|
|
| 17 |
|
| 18 |
|
| 19 |
@dataclass
|
|
|
|
| 14 |
ANSWER_REFINEMENT = "answer_refinement"
|
| 15 |
MEMORY_OPTIMIZATION = "memory_optimization"
|
| 16 |
TOOL = "tool"
|
| 17 |
+
SUB_AGENT = "sub_agent"
|
| 18 |
|
| 19 |
|
| 20 |
@dataclass
|