Paperbag commited on
Commit
45386f2
·
1 Parent(s): ab4e631

feat: Increase processed questions in `app.py` and use `ToolMessage` for tool call results in `agent.py`.

Browse files
Files changed (4) hide show
  1. __pycache__/agent.cpython-39.pyc +0 -0
  2. agent.py +5 -3
  3. app copy.py +1 -1
  4. out.log +0 -0
__pycache__/agent.cpython-39.pyc CHANGED
Binary files a/__pycache__/agent.cpython-39.pyc and b/__pycache__/agent.cpython-39.pyc differ
 
agent.py CHANGED
@@ -6,7 +6,7 @@ from typing import TypedDict, List, Dict, Any, Optional, Union
6
  from langchain_core import tools
7
  from langgraph.graph import StateGraph, START, END
8
  from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFacePipeline
9
- from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
10
  from langchain_core.tools import tool
11
  from langchain_community.document_loaders import WikipediaLoader
12
  from ddgs import DDGS
@@ -374,14 +374,16 @@ def answer_message(state: AgentState) -> AgentState:
374
  for tool_call in tool_calls:
375
  name = tool_call["name"]
376
  args = tool_call["args"]
 
377
  print(f"Calling tool: {name} with args: {args}")
378
  try:
379
  tool = tools_by_name[name]
380
  tool_result = tool.invoke(args)
381
  except Exception as e:
382
  tool_result = f"Error executing tool {name}: {str(e)}"
383
-
384
- messages.append(HumanMessage(content=f"Tool result ({name}):\n{tool_result}"))
 
385
 
386
  # If we exhausted all steps without an answer, force a draft response
387
  if draft_response is None:
 
6
  from langchain_core import tools
7
  from langgraph.graph import StateGraph, START, END
8
  from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFacePipeline
9
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, ToolMessage
10
  from langchain_core.tools import tool
11
  from langchain_community.document_loaders import WikipediaLoader
12
  from ddgs import DDGS
 
374
  for tool_call in tool_calls:
375
  name = tool_call["name"]
376
  args = tool_call["args"]
377
+ tool_call_id = tool_call.get("id")
378
  print(f"Calling tool: {name} with args: {args}")
379
  try:
380
  tool = tools_by_name[name]
381
  tool_result = tool.invoke(args)
382
  except Exception as e:
383
  tool_result = f"Error executing tool {name}: {str(e)}"
384
+
385
+ # Using ToolMessage allows the model to map the result back perfectly to its request
386
+ messages.append(ToolMessage(content=str(tool_result), tool_call_id=tool_call_id, name=name))
387
 
388
  # If we exhausted all steps without an answer, force a draft response
389
  if draft_response is None:
app copy.py CHANGED
@@ -57,7 +57,7 @@ questions_url = f"{DEFAULT_API_URL}/questions"
57
  response = requests.get(questions_url, timeout=15)
58
  response.raise_for_status()
59
  questions_data = response.json()
60
- for item in questions_data[3:4]:
61
  question_text = item.get("question")
62
  if question_text is None:
63
  continue
 
57
  response = requests.get(questions_url, timeout=15)
58
  response.raise_for_status()
59
  questions_data = response.json()
60
+ for item in questions_data[:5]:
61
  question_text = item.get("question")
62
  if question_text is None:
63
  continue
out.log DELETED
Binary file (8.69 kB)