Try to use proper tool doc
Browse files
app.py
CHANGED
|
@@ -6,171 +6,86 @@ import requests
|
|
| 6 |
import inspect
|
| 7 |
import pandas as pd
|
| 8 |
|
| 9 |
-
from langgraph.graph import StateGraph, START
|
| 10 |
from langgraph.graph.message import add_messages
|
| 11 |
from langgraph.prebuilt import ToolNode, tools_condition
|
| 12 |
|
| 13 |
from langchain_openai import ChatOpenAI
|
| 14 |
from langchain_community.tools import DuckDuckGoSearchRun
|
| 15 |
-
from langchain_core.messages import
|
|
|
|
| 16 |
|
| 17 |
-
# (Keep Constants as is)
|
| 18 |
# --- Constants ---
|
| 19 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 20 |
|
| 21 |
-
|
| 22 |
class State(TypedDict):
|
| 23 |
question: str
|
| 24 |
messages: List[Dict[str, Any]]
|
| 25 |
|
| 26 |
|
| 27 |
-
# --- Basic Agent Definition ---
|
| 28 |
-
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 29 |
class BasicAgent:
|
| 30 |
-
|
| 31 |
def __init__(self):
|
| 32 |
-
|
| 33 |
-
self.tools = [
|
| 34 |
-
self.search_tool,
|
| 35 |
-
# self.image_analyze_tool,
|
| 36 |
-
# self.file_operating_tool
|
| 37 |
-
]
|
| 38 |
|
|
|
|
| 39 |
self.model = ChatOpenAI(model="gpt-4o", temperature=0)
|
| 40 |
self.model_with_tools = self.model.bind_tools(self.tools, parallel_tool_calls=False)
|
| 41 |
|
|
|
|
| 42 |
self.graph = StateGraph(State)
|
| 43 |
self.graph.add_node("assistant", self.assistant)
|
| 44 |
self.graph.add_node("tools", ToolNode(self.tools))
|
| 45 |
|
| 46 |
-
# Start the edges
|
| 47 |
self.graph.add_edge(START, "assistant")
|
| 48 |
-
self.graph.add_conditional_edges(
|
| 49 |
-
|
| 50 |
-
tools_condition
|
| 51 |
-
)
|
| 52 |
-
self.graph.add_edge("tools", "assistant")
|
| 53 |
-
|
| 54 |
-
# Compile the graph
|
| 55 |
-
self.compiled_graph = self.graph.compile()
|
| 56 |
|
|
|
|
| 57 |
print("BasicAgent initialized.")
|
| 58 |
|
| 59 |
-
def __call__(self, question: str) -> Tuple[str,
|
| 60 |
-
print(f"Agent received question
|
| 61 |
state = State(question=question, messages=[HumanMessage(content=question)])
|
| 62 |
result = self.compiled_graph.invoke(state)
|
| 63 |
final_answer = result["messages"][-1].content
|
| 64 |
-
print(f"
|
| 65 |
return final_answer, result["messages"]
|
| 66 |
|
| 67 |
-
# def assistant(self, state: State):
|
| 68 |
-
# print("Assistant call state:", state)
|
| 69 |
-
# messages = state.get("messages", [])
|
| 70 |
-
|
| 71 |
-
# # Add system message only once
|
| 72 |
-
# if not any(isinstance(m, SystemMessage) for m in messages):
|
| 73 |
-
# textual_description_of_tool="""
|
| 74 |
-
# search_tool(question: str, max_length: int = 2048) -> str:
|
| 75 |
-
# Search info in the web.
|
| 76 |
-
# Call example:
|
| 77 |
-
|
| 78 |
-
# self.search_tool("Who won the election 2008 in the USA?", max_length = 4096)
|
| 79 |
-
|
| 80 |
-
# Args:
|
| 81 |
-
# question: Question string
|
| 82 |
-
# max_length: maximum chars in the output (if exceeded the first "max_length" characters will be taken)
|
| 83 |
-
|
| 84 |
-
# Returns:
|
| 85 |
-
# A single string containing the info from the web.
|
| 86 |
-
|
| 87 |
-
# """
|
| 88 |
-
|
| 89 |
-
# sys_msg = SystemMessage(
|
| 90 |
-
# content=f"""You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
| 91 |
-
# You can use provided tools:\n{textual_description_of_tool}"""
|
| 92 |
-
# )
|
| 93 |
-
# messages = [sys_msg] + messages
|
| 94 |
-
|
| 95 |
-
# print("Calling model with tools invoke...")
|
| 96 |
-
# new_msg = self.model_with_tools.invoke(messages)
|
| 97 |
-
# print("Calling model with tools invoke finished, result:", new_msg)
|
| 98 |
-
|
| 99 |
-
# # Detect tool call
|
| 100 |
-
# if "tool_calls" in new_msg.additional_kwargs:
|
| 101 |
-
# tool_call = new_msg.additional_kwargs["tool_calls"][0]
|
| 102 |
-
# tool_response = self.search_tool(**eval(tool_call["function"]["arguments"]))
|
| 103 |
-
|
| 104 |
-
# tool_msg = ToolMessage(
|
| 105 |
-
# tool_call_id=tool_call["id"],
|
| 106 |
-
# content=tool_response,
|
| 107 |
-
# name=tool_call["function"]["name"]
|
| 108 |
-
# )
|
| 109 |
-
|
| 110 |
-
# return {
|
| 111 |
-
# "question": state["question"],
|
| 112 |
-
# "messages": messages + [new_msg, tool_msg],
|
| 113 |
-
# }
|
| 114 |
-
|
| 115 |
-
# # new_messages = add_messages(messages, [new_msg])
|
| 116 |
-
# return {
|
| 117 |
-
# "question": state["question"],
|
| 118 |
-
# "messages": messages + [new_msg],
|
| 119 |
-
# }
|
| 120 |
-
|
| 121 |
def assistant(self, state: State):
|
| 122 |
-
print("Assistant
|
| 123 |
messages = state.get("messages", [])
|
| 124 |
-
|
| 125 |
# Add system message only once
|
| 126 |
if not any(isinstance(m, SystemMessage) for m in messages):
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
question: Question string
|
| 136 |
-
max_length: maximum chars in the output (if exceeded, the first "max_length" characters will be taken)
|
| 137 |
-
|
| 138 |
-
Returns:
|
| 139 |
-
A single string containing the info from the web.
|
| 140 |
"""
|
| 141 |
-
|
| 142 |
sys_msg = SystemMessage(
|
| 143 |
-
content=f"""You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
|
| 144 |
-
You can use provided tools:\n{textual_description_of_tool}"""
|
| 145 |
)
|
| 146 |
messages = [sys_msg] + messages
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
print("
|
| 151 |
-
|
| 152 |
-
# Let LangGraph handle tool routing
|
| 153 |
return {
|
| 154 |
"question": state["question"],
|
| 155 |
-
"messages": add_messages(messages, [
|
| 156 |
}
|
| 157 |
|
| 158 |
-
|
| 159 |
def search_tool(self, question: str, max_length: int = 2048) -> str:
|
| 160 |
-
""
|
| 161 |
-
Search info in the web.
|
| 162 |
-
|
| 163 |
-
Args:
|
| 164 |
-
question: Question string
|
| 165 |
-
max_length: Maximum length of response (default 2048 chars)
|
| 166 |
-
|
| 167 |
-
Returns:
|
| 168 |
-
A single string containing the info from the web.
|
| 169 |
-
"""
|
| 170 |
search = DuckDuckGoSearchRun()
|
| 171 |
-
|
| 172 |
-
print("
|
| 173 |
-
return
|
| 174 |
|
| 175 |
|
| 176 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
|
| 6 |
import inspect
|
| 7 |
import pandas as pd
|
| 8 |
|
| 9 |
+
from langgraph.graph import StateGraph, START
|
| 10 |
from langgraph.graph.message import add_messages
|
| 11 |
from langgraph.prebuilt import ToolNode, tools_condition
|
| 12 |
|
| 13 |
from langchain_openai import ChatOpenAI
|
| 14 |
from langchain_community.tools import DuckDuckGoSearchRun
|
| 15 |
+
from langchain_core.messages import SystemMessage, HumanMessage
|
| 16 |
+
|
| 17 |
|
|
|
|
| 18 |
# --- Constants ---
|
| 19 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 20 |
|
|
|
|
| 21 |
class State(TypedDict):
|
| 22 |
question: str
|
| 23 |
messages: List[Dict[str, Any]]
|
| 24 |
|
| 25 |
|
|
|
|
|
|
|
| 26 |
class BasicAgent:
|
|
|
|
| 27 |
def __init__(self):
|
| 28 |
+
self.tools = [self.search_tool]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
+
# Chat model with tool support
|
| 31 |
self.model = ChatOpenAI(model="gpt-4o", temperature=0)
|
| 32 |
self.model_with_tools = self.model.bind_tools(self.tools, parallel_tool_calls=False)
|
| 33 |
|
| 34 |
+
# LangGraph
|
| 35 |
self.graph = StateGraph(State)
|
| 36 |
self.graph.add_node("assistant", self.assistant)
|
| 37 |
self.graph.add_node("tools", ToolNode(self.tools))
|
| 38 |
|
|
|
|
| 39 |
self.graph.add_edge(START, "assistant")
|
| 40 |
+
self.graph.add_conditional_edges("assistant", tools_condition) # decide if tools should be called
|
| 41 |
+
self.graph.add_edge("tools", "assistant") # loop back
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
+
self.compiled_graph = self.graph.compile()
|
| 44 |
print("BasicAgent initialized.")
|
| 45 |
|
| 46 |
+
def __call__(self, question: str) -> Tuple[str, List[Dict[str, Any]]]:
|
| 47 |
+
print(f"Agent received question: {question}")
|
| 48 |
state = State(question=question, messages=[HumanMessage(content=question)])
|
| 49 |
result = self.compiled_graph.invoke(state)
|
| 50 |
final_answer = result["messages"][-1].content
|
| 51 |
+
print(f"Final Answer: {final_answer}")
|
| 52 |
return final_answer, result["messages"]
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
def assistant(self, state: State):
|
| 55 |
+
print("Assistant invoked.")
|
| 56 |
messages = state.get("messages", [])
|
| 57 |
+
|
| 58 |
# Add system message only once
|
| 59 |
if not any(isinstance(m, SystemMessage) for m in messages):
|
| 60 |
+
tool_doc = """
|
| 61 |
+
search_tool(question: str, max_length: int = 2048) -> str:
|
| 62 |
+
Search info on the web.
|
| 63 |
+
Args:
|
| 64 |
+
question: Question string
|
| 65 |
+
max_length: maximum characters in the output
|
| 66 |
+
Returns:
|
| 67 |
+
A single string containing the info from the web.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
"""
|
|
|
|
| 69 |
sys_msg = SystemMessage(
|
| 70 |
+
content=f"""You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. You can use provided tools:\n{tool_doc}"""
|
|
|
|
| 71 |
)
|
| 72 |
messages = [sys_msg] + messages
|
| 73 |
+
|
| 74 |
+
# Invoke model with tools (LangGraph handles tool routing)
|
| 75 |
+
response = self.model_with_tools.invoke(messages)
|
| 76 |
+
print("Assistant response:", response)
|
| 77 |
+
|
|
|
|
| 78 |
return {
|
| 79 |
"question": state["question"],
|
| 80 |
+
"messages": add_messages(messages, [response]),
|
| 81 |
}
|
| 82 |
|
|
|
|
| 83 |
def search_tool(self, question: str, max_length: int = 2048) -> str:
|
| 84 |
+
print(f"Calling search tool with: {question}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
search = DuckDuckGoSearchRun()
|
| 86 |
+
result = search.invoke(question)
|
| 87 |
+
print("Tool result:", result)
|
| 88 |
+
return result[:max_length]
|
| 89 |
|
| 90 |
|
| 91 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|