ameglei-external commited on
Commit
1a6c543
·
verified ·
1 Parent(s): 3881dff

Trying to set up langgraph to add messages every time, not overwrite it

Browse files
Files changed (1) hide show
  1. app.py +23 -24
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  from typing import TypedDict, List, Dict, Any, Optional, Tuple
 
3
 
4
  import gradio as gr
5
  import requests
@@ -12,7 +13,7 @@ from langgraph.prebuilt import ToolNode, tools_condition
12
 
13
  from langchain_openai import ChatOpenAI
14
  from langchain_community.tools import DuckDuckGoSearchRun
15
- from langchain_core.messages import SystemMessage, HumanMessage
16
 
17
 
18
  # --- Constants ---
@@ -20,7 +21,7 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
20
 
21
  class State(TypedDict):
22
  question: str
23
- messages: List[Dict[str, Any]]
24
 
25
 
26
  class BasicAgent:
@@ -45,7 +46,24 @@ class BasicAgent:
45
 
46
  def __call__(self, question: str) -> Tuple[str, List[Dict[str, Any]]]:
47
  print(f"Agent received question: {question}")
48
- state = State(question=question, messages=[HumanMessage(content=question)])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  result = self.compiled_graph.invoke(state)
50
  final_answer = result["messages"][-1].content
51
  print(f"Final Answer: {final_answer}")
@@ -53,31 +71,12 @@ class BasicAgent:
53
 
54
  def assistant(self, state: State):
55
  print("Assistant invoked. State:", state)
56
- messages = state.get("messages", [])
57
-
58
- # Add system message only once
59
- if not any(isinstance(m, SystemMessage) for m in messages):
60
- tool_doc = """
61
- search_tool(question: str, max_length: int = 2048) -> str:
62
- Search info on the web.
63
- Args:
64
- question: Question string
65
- max_length: maximum characters in the output
66
- Returns:
67
- A single string containing the info from the web.
68
- """
69
- sys_msg = SystemMessage(
70
- content=f"""You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. You can use provided tools:\n{tool_doc}"""
71
- )
72
- messages = [sys_msg] + messages
73
-
74
- # Invoke model with tools (LangGraph handles tool routing)
75
  response = self.model_with_tools.invoke(messages)
76
  print("Assistant response:", response)
77
 
78
  return {
79
- "question": state["question"],
80
- "messages": add_messages(messages, [response]),
81
  }
82
 
83
  def search_tool(self, question: str, max_length: int = 2048) -> str:
 
1
  import os
2
  from typing import TypedDict, List, Dict, Any, Optional, Tuple
3
+ from typing_extensions import Annotated
4
 
5
  import gradio as gr
6
  import requests
 
13
 
14
  from langchain_openai import ChatOpenAI
15
  from langchain_community.tools import DuckDuckGoSearchRun
16
+ from langchain_core.messages import SystemMessage, HumanMessage, AnyMessage
17
 
18
 
19
  # --- Constants ---
 
21
 
22
  class State(TypedDict):
23
  question: str
24
+ messages: Annotated[List[AnyMessage], add_messages]
25
 
26
 
27
  class BasicAgent:
 
46
 
47
  def __call__(self, question: str) -> Tuple[str, List[Dict[str, Any]]]:
48
  print(f"Agent received question: {question}")
49
+
50
+ tool_doc = """
51
+ search_tool(question: str, max_length: int = 2048) -> str:
52
+ Search info on the web.
53
+ Args:
54
+ question: Question string
55
+ max_length: maximum characters in the output
56
+ Returns:
57
+ A single string containing the info from the web.
58
+ """
59
+ sys_msg = SystemMessage(
60
+ content=f"""You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. You can use provided tools:\n{tool_doc}"""
61
+ )
62
+
63
+ state = State(
64
+ question=question,
65
+ messages=[sys_msg, HumanMessage(content=question)]
66
+ )
67
  result = self.compiled_graph.invoke(state)
68
  final_answer = result["messages"][-1].content
69
  print(f"Final Answer: {final_answer}")
 
71
 
72
  def assistant(self, state: State):
73
  print("Assistant invoked. State:", state)
74
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  response = self.model_with_tools.invoke(messages)
76
  print("Assistant response:", response)
77
 
78
  return {
79
+ "messages": [response]
 
80
  }
81
 
82
  def search_tool(self, question: str, max_length: int = 2048) -> str: