Paperbag commited on
Commit
ce86dad
·
1 Parent(s): d3a1a1a

Refactor answer_message function to prepend a SystemMessage for context, handle tool calls more robustly, and ensure final responses are generated without tool invocation. This enhances the overall response accuracy and flow.

Browse files
Files changed (2) hide show
  1. __pycache__/agent.cpython-39.pyc +0 -0
  2. agent.py +49 -33
__pycache__/agent.cpython-39.pyc CHANGED
Binary files a/__pycache__/agent.cpython-39.pyc and b/__pycache__/agent.cpython-39.pyc differ
 
agent.py CHANGED
@@ -3,7 +3,7 @@ from typing import TypedDict, List, Dict, Any, Optional, Union
3
  from langchain_core import tools
4
  from langgraph.graph import StateGraph, START, END
5
  from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFacePipeline
6
- from langchain_core.messages import HumanMessage, AIMessage
7
  from langchain_core.tools import tool
8
  from ddgs import DDGS
9
  from dotenv import load_dotenv
@@ -50,7 +50,7 @@ def web_search(keywords: str, max_results:int = 5) -> str:
50
 
51
 
52
  class AgentState(TypedDict):
53
- messages: List[Union[HumanMessage, AIMessage]]
54
 
55
 
56
  def read_message(state: AgentState) -> AgentState:
@@ -82,39 +82,55 @@ model_with_tools = model.bind_tools(tools)
82
 
83
  def answer_message(state: AgentState) -> AgentState:
84
  messages = state["messages"]
85
- prompt = f"""
86
- You are a GAIA question answering expert.
87
- Your task is to provide an answer to a question.
88
- Think carefully before answering the question.
89
- Do not include any thought process before answering the question, and only response exactly what was being asked of you.
90
- If you are not able to provide an answer, please state the limitation that you're facing instead.
91
-
92
- Example question: How many hours are there in a day?
93
- Response: 24
94
-
95
- Here is the question:
96
- {messages}
97
-
98
- """
99
- ai_msg = model_with_tools.invoke(prompt)
 
 
 
100
  messages.append(ai_msg)
101
 
102
- # Step 2: Execute tools and collect results
103
- for tool_call in ai_msg.tool_calls:
104
- # Execute the tool with the generated arguments
105
- name = tool_call['name']
106
- args = tool_call['args']
107
- tool = tools_by_name[name]
108
- tool_result = tool.invoke(args)
109
- # prompt.append(tool_result)
110
-
111
- # Step 3: Pass results back to model for final response
112
- print(f"Messages: {messages}")
113
- final_response = model_with_tools.invoke(prompt + tool_result)
114
- print(f"Final response: {final_response}")
115
-
116
- # Append the model's answer to the messages list
117
- return {"messages": [final_response]}
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
 
120
 
 
3
  from langchain_core import tools
4
  from langgraph.graph import StateGraph, START, END
5
  from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFacePipeline
6
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, ToolMessage, BaseMessage
7
  from langchain_core.tools import tool
8
  from ddgs import DDGS
9
  from dotenv import load_dotenv
 
50
 
51
 
52
  class AgentState(TypedDict):
53
+ messages: List[BaseMessage]
54
 
55
 
56
  def read_message(state: AgentState) -> AgentState:
 
82
 
83
  def answer_message(state: AgentState) -> AgentState:
84
  messages = state["messages"]
85
+
86
+ # Prepend system message if not already there
87
+ if not messages or not isinstance(messages[0], SystemMessage):
88
+ system_message = SystemMessage(
89
+ content=(
90
+ "You are a GAIA question answering expert. "
91
+ "Your task is to provide an answer to a question. "
92
+ "Think carefully before answering the question. "
93
+ "Do not include any thought process before answering the question, "
94
+ "and only respond exactly what was being asked of you. "
95
+ "If you are not able to provide an answer, use the available tools and "
96
+ "state the limitation that you're facing instead."
97
+ )
98
+ )
99
+ messages = [system_message] + messages
100
+
101
+ # Step 1: First call (tools enabled). Often returns tool_calls with empty content.
102
+ ai_msg = model_with_tools.invoke(messages)
103
  messages.append(ai_msg)
104
 
105
+ tool_calls = getattr(ai_msg, "tool_calls", None) or []
106
+ if not tool_calls:
107
+ # Model answered directly
108
+ return {"messages": messages}
109
+
110
+ # Step 2: Execute tools and add ToolMessages
111
+ for tool_call in tool_calls:
112
+ name = tool_call["name"]
113
+ args = tool_call["args"]
114
+ tool_obj = tools_by_name[name]
115
+ tool_result = tool_obj.invoke(args)
116
+ messages.append(
117
+ ToolMessage(
118
+ content=str(tool_result),
119
+ tool_call_id=tool_call["id"],
120
+ )
121
+ )
122
+
123
+ # Step 3: Force final answer with tools DISABLED.
124
+ # Some HF chat models keep emitting tool_calls forever; calling the base model avoids blank .content.
125
+ final_instruction = HumanMessage(
126
+ content=(
127
+ "Using the tool results above, provide the FINAL answer now. "
128
+ "Do not call any tools. Respond with only the answer."
129
+ )
130
+ )
131
+ final_response = model.invoke(messages + [final_instruction])
132
+ messages.append(final_response)
133
+ return {"messages": messages}
134
 
135
 
136