Paperbag commited on
Commit
685b36d
·
1 Parent(s): ce86dad

fixing empty output

Browse files
Files changed (2) hide show
  1. __pycache__/agent.cpython-39.pyc +0 -0
  2. agent.py +34 -43
__pycache__/agent.cpython-39.pyc CHANGED
Binary files a/__pycache__/agent.cpython-39.pyc and b/__pycache__/agent.cpython-39.pyc differ
 
agent.py CHANGED
@@ -3,7 +3,7 @@ from typing import TypedDict, List, Dict, Any, Optional, Union
3
  from langchain_core import tools
4
  from langgraph.graph import StateGraph, START, END
5
  from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFacePipeline
6
- from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, ToolMessage, BaseMessage
7
  from langchain_core.tools import tool
8
  from ddgs import DDGS
9
  from dotenv import load_dotenv
@@ -50,7 +50,7 @@ def web_search(keywords: str, max_results:int = 5) -> str:
50
 
51
 
52
  class AgentState(TypedDict):
53
- messages: List[BaseMessage]
54
 
55
 
56
  def read_message(state: AgentState) -> AgentState:
@@ -82,55 +82,46 @@ model_with_tools = model.bind_tools(tools)
82
 
83
  def answer_message(state: AgentState) -> AgentState:
84
  messages = state["messages"]
85
-
86
- # Prepend system message if not already there
87
- if not messages or not isinstance(messages[0], SystemMessage):
88
- system_message = SystemMessage(
89
- content=(
90
- "You are a GAIA question answering expert. "
91
- "Your task is to provide an answer to a question. "
92
- "Think carefully before answering the question. "
93
- "Do not include any thought process before answering the question, "
94
- "and only respond exactly what was being asked of you. "
95
- "If you are not able to provide an answer, use the available tools and "
96
- "state the limitation that you're facing instead."
97
- )
98
- )
99
- messages = [system_message] + messages
100
-
101
- # Step 1: First call (tools enabled). Often returns tool_calls with empty content.
102
  ai_msg = model_with_tools.invoke(messages)
103
  messages.append(ai_msg)
104
 
105
- tool_calls = getattr(ai_msg, "tool_calls", None) or []
106
- if not tool_calls:
107
- # Model answered directly
108
- return {"messages": messages}
109
-
110
- # Step 2: Execute tools and add ToolMessages
111
- for tool_call in tool_calls:
112
- name = tool_call["name"]
113
- args = tool_call["args"]
114
- tool_obj = tools_by_name[name]
115
- tool_result = tool_obj.invoke(args)
116
- messages.append(
117
- ToolMessage(
118
- content=str(tool_result),
119
- tool_call_id=tool_call["id"],
120
- )
121
- )
122
-
123
- # Step 3: Force final answer with tools DISABLED.
124
- # Some HF chat models keep emitting tool_calls forever; calling the base model avoids blank .content.
125
  final_instruction = HumanMessage(
126
- content=(
127
  "Using the tool results above, provide the FINAL answer now. "
128
  "Do not call any tools. Respond with only the answer."
129
  )
130
  )
131
- final_response = model.invoke(messages + [final_instruction])
132
- messages.append(final_response)
133
- return {"messages": messages}
 
 
 
 
 
 
 
 
134
 
135
 
136
 
 
3
  from langchain_core import tools
4
  from langgraph.graph import StateGraph, START, END
5
  from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFacePipeline
6
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
7
  from langchain_core.tools import tool
8
  from ddgs import DDGS
9
  from dotenv import load_dotenv
 
50
 
51
 
52
  class AgentState(TypedDict):
53
+ messages: List[Union[HumanMessage, AIMessage]]
54
 
55
 
56
  def read_message(state: AgentState) -> AgentState:
 
82
 
83
  def answer_message(state: AgentState) -> AgentState:
84
  messages = state["messages"]
85
+ prompt = [SystemMessage(f"""
86
+ You are a GAIA question answering expert.
87
+ Your task is to provide an answer to a question.
88
+ Think carefully before answering the question.
89
+ Do not include any thought process before answering the question, and only response exactly what was being asked of you.
90
+ If you are not able to provide an answer, use tools or state the limitation that you're facing instead.
91
+
92
+ Example question: How many hours are there in a day?
93
+ Response: 24
94
+ """)]
95
+ messages = prompt + messages
 
 
 
 
 
 
96
  ai_msg = model_with_tools.invoke(messages)
97
  messages.append(ai_msg)
98
 
99
+ # Step 2: Execute tools and collect results
100
+ for tool_call in ai_msg.tool_calls:
101
+ # Execute the tool with the generated arguments
102
+ name = tool_call['name']
103
+ args = tool_call['args']
104
+ tool = tools_by_name[name]
105
+ tool_result = tool.invoke(args)
106
+ messages.append(tool_result)
107
+
 
 
 
 
 
 
 
 
 
 
 
108
  final_instruction = HumanMessage(
109
+ content=(
110
  "Using the tool results above, provide the FINAL answer now. "
111
  "Do not call any tools. Respond with only the answer."
112
  )
113
  )
114
+ messages.append(final_instruction)
115
+
116
+ final_response = model_with_tools.invoke(messages)
117
+
118
+ # Step 3: Pass results back to model for final response
119
+ print(f"Messages: {messages}")
120
+ # final_response = model_with_tools.invoke(messages)
121
+ print(f"Final response: {final_response}")
122
+
123
+ # Append the model's answer to the messages list
124
+ return {"messages": [final_response]}
125
 
126
 
127