Spaces:
Sleeping
Sleeping
Update agent.py
Browse files
agent.py
CHANGED
|
@@ -1,258 +1,258 @@
|
|
| 1 |
-
########## Imports ############
|
| 2 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 3 |
-
import os
|
| 4 |
-
from typing import TypedDict, List, Dict, Any, Optional
|
| 5 |
-
from langgraph.graph import StateGraph, START, END
|
| 6 |
-
from langchain_openai import ChatOpenAI
|
| 7 |
-
from langchain_core.messages import HumanMessage
|
| 8 |
-
|
| 9 |
-
from langchain_community.tools import WikipediaQueryRun
|
| 10 |
-
from langchain_community.utilities import WikipediaAPIWrapper
|
| 11 |
-
import string
|
| 12 |
-
|
| 13 |
-
from langchain_experimental.tools import PythonREPLTool
|
| 14 |
-
import ast, json
|
| 15 |
-
|
| 16 |
-
from langchain_community.tools import DuckDuckGoSearchRun
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
########## State ############
|
| 20 |
-
class InfoState(TypedDict):
|
| 21 |
-
question: str
|
| 22 |
-
answer_type: Optional[str] # WebInfo - WIKI - MATH
|
| 23 |
-
answer_code : Optional[str]
|
| 24 |
-
main_parts: Optional[List[str]]
|
| 25 |
-
tool_answer : Optional[list[str]]
|
| 26 |
-
final_answer : Optional[str]
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
######### Nodes ############
|
| 31 |
-
def get_wiki_relate(state: InfoState) -> InfoState:
|
| 32 |
-
"""
|
| 33 |
-
Tool to Get the wikipedia info from keywords extracted from preprocessing at main_parts.
|
| 34 |
-
|
| 35 |
-
Uses: Wikipedia API
|
| 36 |
-
Returns: tool_answer (summary)
|
| 37 |
-
"""
|
| 38 |
-
print("Using Wikipedia...")
|
| 39 |
-
# Create the Wikipedia utility
|
| 40 |
-
wiki = WikipediaAPIWrapper(
|
| 41 |
-
lang="en", # Wikipedia language
|
| 42 |
-
top_k_results=1, # how many results to fetch
|
| 43 |
-
doc_content_chars_max=2000
|
| 44 |
-
)
|
| 45 |
-
|
| 46 |
-
# Make a tool from it
|
| 47 |
-
wiki_tool = WikipediaQueryRun(api_wrapper=wiki)
|
| 48 |
-
|
| 49 |
-
try:
|
| 50 |
-
wiki_answer = wiki_tool.run(" ".join(state["main_parts"]) + " full wikipedia article about this topic")
|
| 51 |
-
state['tool_answer'] = wiki_answer
|
| 52 |
-
return state
|
| 53 |
-
except Exception as e:
|
| 54 |
-
print("Rate limit Exception")
|
| 55 |
-
state['tool_answer'] = ""
|
| 56 |
-
return state
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
def execute_code(state: InfoState) -> InfoState :
|
| 60 |
-
"""Tool to calculate any math using python code or get current date time."""
|
| 61 |
-
print("Execut Code...")
|
| 62 |
-
python_tool = PythonREPLTool()
|
| 63 |
-
code = state["answer_code"]
|
| 64 |
-
state["tool_answer"]=python_tool.run(code)
|
| 65 |
-
return state
|
| 66 |
-
|
| 67 |
-
def get_code(state:InfoState) -> InfoState:
|
| 68 |
-
"""From prompt get the code to run."""
|
| 69 |
-
print("Getting Code (Gemini)...")
|
| 70 |
-
prompt = (
|
| 71 |
-
f"You are a strict code generator. "
|
| 72 |
-
f"Given the question: '{state['question']}', "
|
| 73 |
-
f"return ONLY valid Python code that computes the answer IF the question is about math, date, or time. "
|
| 74 |
-
f"Otherwise, return exactly: print('not valid')\n\n"
|
| 75 |
-
f"Rules:\n"
|
| 76 |
-
f"- Output ONLY the code or print('not valid')\n"
|
| 77 |
-
f"- No explanations, no markdown, no extra text\n"
|
| 78 |
-
f"- No quotes around the code\n"
|
| 79 |
-
f"- Use print() to show the result\n"
|
| 80 |
-
f"- Import modules only if needed (e.g. datetime, math)"
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
# 2️⃣ Call Gemini
|
| 84 |
-
model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
| 85 |
-
response = model.invoke([HumanMessage(content=prompt)]).content.strip()
|
| 86 |
-
|
| 87 |
-
state["answer_code"] = response
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
return state
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
def preprocess_text(state: dict) -> InfoState:
|
| 95 |
-
|
| 96 |
-
"""
|
| 97 |
-
Preprocess text to get the keywords to help get results directly from wikipedia.
|
| 98 |
-
|
| 99 |
-
Input: raw question
|
| 100 |
-
Output: main_parts (list of keywords)
|
| 101 |
-
"""
|
| 102 |
-
print("Preprocess text (Gemini)...")
|
| 103 |
-
# 1️⃣ Prepare the prompt
|
| 104 |
-
prompt = (
|
| 105 |
-
"We want to find the best-matching English Wikipedia pages for a factual question, "
|
| 106 |
-
"so we must extract only the essential topic names or entities that Wikipedia likely has pages for. "
|
| 107 |
-
"These should include the main subject (e.g., a person, event, place, or concept) and any directly relevant subtopic "
|
| 108 |
-
"(like 'Discography', 'Career', or 'History') if they help narrow the search.\n\n"
|
| 109 |
-
|
| 110 |
-
"Rules:\n"
|
| 111 |
-
"- Output 1 to 3 items maximum.\n"
|
| 112 |
-
"- Use English Wikipedia title format (capitalize each main word).\n"
|
| 113 |
-
"- Translate non-English names or terms to English.\n"
|
| 114 |
-
"- Exclude question words, pronouns, and filler terms.\n"
|
| 115 |
-
"- Fix spelling errors if necessary.\n"
|
| 116 |
-
"- Prefer specific Wikipedia topics over vague ones.\n\n"
|
| 117 |
-
|
| 118 |
-
"Example:\n"
|
| 119 |
-
"Q: 'Who built the Eiffel Tower?'\n"
|
| 120 |
-
"A: [\"Eiffel Tower\", \"Gustave Eiffel\"]\n\n"
|
| 121 |
-
f"Question: '{state['question']}'\n\n"
|
| 122 |
-
"Output ONLY a valid JSON list as described — no explanations, markdown, or extra formatting."
|
| 123 |
-
)
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
# 2️⃣ Call Gemini
|
| 127 |
-
model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
| 128 |
-
response = model.invoke([HumanMessage(content=prompt)]).content.strip()
|
| 129 |
-
|
| 130 |
-
# 3️⃣ Try to safely parse
|
| 131 |
-
try:
|
| 132 |
-
# First, try JSON
|
| 133 |
-
state["main_parts"] = json.loads(response)
|
| 134 |
-
except json.JSONDecodeError:
|
| 135 |
-
try:
|
| 136 |
-
# If not JSON, try Python literal
|
| 137 |
-
state["main_parts"] = ast.literal_eval(response)
|
| 138 |
-
except Exception:
|
| 139 |
-
# If both fail, store fallback info
|
| 140 |
-
print("⚠️ Model returned invalid content:", response)
|
| 141 |
-
state["main_parts"] = []
|
| 142 |
-
|
| 143 |
-
return state
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
def get_answer(state: InfoState) -> InfoState :
|
| 149 |
-
"""
|
| 150 |
-
Final Node that returns the final answer organized.
|
| 151 |
-
|
| 152 |
-
Combines: tool_answer → final_answer
|
| 153 |
-
"""
|
| 154 |
-
print("Getting Answer (Gemini)...")
|
| 155 |
-
|
| 156 |
-
prompt = (
|
| 157 |
-
"You are a knowledgeable assistant that answers questions based on context and common factual knowledge.\n"
|
| 158 |
-
"Use the context first, but if it clearly lacks the needed details, you may rely on well-known public facts "
|
| 159 |
-
"(such as from Wikipedia) that logically complete the answer.\n\n"
|
| 160 |
-
f"Question: {state['question']}\n"
|
| 161 |
-
f"Context:\n{state.get('tool_answer')}\n\n"
|
| 162 |
-
"Instructions:\n"
|
| 163 |
-
"- Focus on producing one short factual answer.\n"
|
| 164 |
-
"- Do not include tool names, prefixes, or metadata.\n"
|
| 165 |
-
"- If the context contains partial hints, you can infer the answer from general knowledge of the same topic.\n"
|
| 166 |
-
"- If absolutely nothing is relevant, reply: I don't know.\n\n"
|
| 167 |
-
"Final Answer:"
|
| 168 |
-
)
|
| 169 |
-
|
| 170 |
-
model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
| 171 |
-
state["final_answer"] = (model.invoke([HumanMessage(content=prompt)]).content)
|
| 172 |
-
|
| 173 |
-
return state
|
| 174 |
-
|
| 175 |
-
def get_type(state: InfoState) -> InfoState:
|
| 176 |
-
"""Choose which tool to use based on question type (WIKI, SEARCH, CODE)."""
|
| 177 |
-
print("Getting Type (Gemini)...")
|
| 178 |
-
|
| 179 |
-
prompt = "According to the Question " +state["question"] + " Select the best tool to answer WIKI if it's informatative or science question, WebInfo if it need up to data news,MATH if math or date or time,LLM if other or have link You're very serious,just give one word from given"
|
| 180 |
-
model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
| 181 |
-
state["answer_type"] = (model.invoke([HumanMessage(content=prompt)]).content)
|
| 182 |
-
|
| 183 |
-
return state
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
def get_search_results(state: InfoState) -> InfoState:
|
| 189 |
-
"""Tool to search web for results using DuckDuckGo."""
|
| 190 |
-
print("Searching...")
|
| 191 |
-
|
| 192 |
-
search = DuckDuckGoSearchRun()
|
| 193 |
-
|
| 194 |
-
try:
|
| 195 |
-
state['tool_answer'] = search.run(state["question"]) #" " .join(state["main_parts"]))
|
| 196 |
-
return state
|
| 197 |
-
except Exception:
|
| 198 |
-
state['tool_answer'] = ""
|
| 199 |
-
return state
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
def route(state: InfoState):
|
| 203 |
-
print(state["answer_type"])
|
| 204 |
-
return state["answer_type"]
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
################# Graph ################
|
| 209 |
-
def get_graph():
|
| 210 |
-
graph = StateGraph(InfoState)
|
| 211 |
-
|
| 212 |
-
# Add nodes
|
| 213 |
-
#graph.add_node("get_wiki_relate", get_wiki_relate)
|
| 214 |
-
graph.add_node("preprocess_text", preprocess_text)
|
| 215 |
-
graph.add_node("get_answer", get_answer)
|
| 216 |
-
graph.add_node("get_type", get_type)
|
| 217 |
-
graph.add_node("get_search_results", get_search_results)
|
| 218 |
-
graph.add_node("execute_code", execute_code)
|
| 219 |
-
graph.add_node("get_code", get_code)
|
| 220 |
-
|
| 221 |
-
# Add edges
|
| 222 |
-
graph.add_edge(START, "preprocess_text")
|
| 223 |
-
graph.add_edge("preprocess_text", "get_type")
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
# Add conditional edges
|
| 227 |
-
graph.add_conditional_edges(
|
| 228 |
-
"get_type",
|
| 229 |
-
route,
|
| 230 |
-
{
|
| 231 |
-
"WebInfo": "get_search_results",
|
| 232 |
-
"WIKI": "get_search_results",#"get_wiki_relate",
|
| 233 |
-
"MATH": "get_code",
|
| 234 |
-
"LLM": "get_answer"
|
| 235 |
-
}
|
| 236 |
-
)
|
| 237 |
-
|
| 238 |
-
# Add final edges
|
| 239 |
-
graph.add_edge("get_search_results", "get_answer")
|
| 240 |
-
#graph.add_edge("get_wiki_relate", "get_answer")
|
| 241 |
-
|
| 242 |
-
graph.add_edge("get_code", "execute_code")
|
| 243 |
-
graph.add_edge("execute_code", "get_answer")
|
| 244 |
-
|
| 245 |
-
graph.add_edge("get_answer", END)
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
# Compile the graph
|
| 249 |
-
compiled_graph = graph.compile()
|
| 250 |
-
return compiled_graph
|
| 251 |
-
|
| 252 |
-
def ask(compiled_graph,question):
|
| 253 |
-
legitimate_result = compiled_graph.invoke({
|
| 254 |
-
"question": question,
|
| 255 |
-
|
| 256 |
-
})
|
| 257 |
-
|
| 258 |
-
return legitimate_result['final_answer']
|
|
|
|
| 1 |
+
########## Imports ############
|
| 2 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 3 |
+
import os
|
| 4 |
+
from typing import TypedDict, List, Dict, Any, Optional
|
| 5 |
+
from langgraph.graph import StateGraph, START, END
|
| 6 |
+
from langchain_openai import ChatOpenAI
|
| 7 |
+
from langchain_core.messages import HumanMessage
|
| 8 |
+
|
| 9 |
+
from langchain_community.tools import WikipediaQueryRun
|
| 10 |
+
from langchain_community.utilities import WikipediaAPIWrapper
|
| 11 |
+
import string
|
| 12 |
+
|
| 13 |
+
from langchain_experimental.tools import PythonREPLTool
|
| 14 |
+
import ast, json
|
| 15 |
+
|
| 16 |
+
from langchain_community.tools import DuckDuckGoSearchRun
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
########## State ############
|
| 20 |
+
class InfoState(TypedDict):
|
| 21 |
+
question: str
|
| 22 |
+
answer_type: Optional[str] # WebInfo - WIKI - MATH
|
| 23 |
+
answer_code : Optional[str]
|
| 24 |
+
main_parts: Optional[List[str]]
|
| 25 |
+
tool_answer : Optional[list[str]]
|
| 26 |
+
final_answer : Optional[str]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
######### Nodes ############
|
| 31 |
+
def get_wiki_relate(state: InfoState) -> InfoState:
|
| 32 |
+
"""
|
| 33 |
+
Tool to Get the wikipedia info from keywords extracted from preprocessing at main_parts.
|
| 34 |
+
|
| 35 |
+
Uses: Wikipedia API
|
| 36 |
+
Returns: tool_answer (summary)
|
| 37 |
+
"""
|
| 38 |
+
print("Using Wikipedia...")
|
| 39 |
+
# Create the Wikipedia utility
|
| 40 |
+
wiki = WikipediaAPIWrapper(
|
| 41 |
+
lang="en", # Wikipedia language
|
| 42 |
+
top_k_results=1, # how many results to fetch
|
| 43 |
+
doc_content_chars_max=2000
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# Make a tool from it
|
| 47 |
+
wiki_tool = WikipediaQueryRun(api_wrapper=wiki)
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
wiki_answer = wiki_tool.run(" ".join(state["main_parts"]) + " full wikipedia article about this topic")
|
| 51 |
+
state['tool_answer'] = wiki_answer
|
| 52 |
+
return state
|
| 53 |
+
except Exception as e:
|
| 54 |
+
print("Rate limit Exception")
|
| 55 |
+
state['tool_answer'] = ""
|
| 56 |
+
return state
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def execute_code(state: InfoState) -> InfoState :
|
| 60 |
+
"""Tool to calculate any math using python code or get current date time."""
|
| 61 |
+
print("Execut Code...")
|
| 62 |
+
python_tool = PythonREPLTool()
|
| 63 |
+
code = state["answer_code"]
|
| 64 |
+
state["tool_answer"]=python_tool.run(code)
|
| 65 |
+
return state
|
| 66 |
+
|
| 67 |
+
def get_code(state:InfoState) -> InfoState:
|
| 68 |
+
"""From prompt get the code to run."""
|
| 69 |
+
print("Getting Code (Gemini)...")
|
| 70 |
+
prompt = (
|
| 71 |
+
f"You are a strict code generator. "
|
| 72 |
+
f"Given the question: '{state['question']}', "
|
| 73 |
+
f"return ONLY valid Python code that computes the answer IF the question is about math, date, or time. "
|
| 74 |
+
f"Otherwise, return exactly: print('not valid')\n\n"
|
| 75 |
+
f"Rules:\n"
|
| 76 |
+
f"- Output ONLY the code or print('not valid')\n"
|
| 77 |
+
f"- No explanations, no markdown, no extra text\n"
|
| 78 |
+
f"- No quotes around the code\n"
|
| 79 |
+
f"- Use print() to show the result\n"
|
| 80 |
+
f"- Import modules only if needed (e.g. datetime, math)"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
# 2️⃣ Call Gemini
|
| 84 |
+
model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
| 85 |
+
response = model.invoke([HumanMessage(content=prompt)]).content.strip()
|
| 86 |
+
|
| 87 |
+
state["answer_code"] = response
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
return state
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def preprocess_text(state: dict) -> InfoState:
|
| 95 |
+
|
| 96 |
+
"""
|
| 97 |
+
Preprocess text to get the keywords to help get results directly from wikipedia.
|
| 98 |
+
|
| 99 |
+
Input: raw question
|
| 100 |
+
Output: main_parts (list of keywords)
|
| 101 |
+
"""
|
| 102 |
+
print("Preprocess text (Gemini)...")
|
| 103 |
+
# 1️⃣ Prepare the prompt
|
| 104 |
+
prompt = (
|
| 105 |
+
"We want to find the best-matching English Wikipedia pages for a factual question, "
|
| 106 |
+
"so we must extract only the essential topic names or entities that Wikipedia likely has pages for. "
|
| 107 |
+
"These should include the main subject (e.g., a person, event, place, or concept) and any directly relevant subtopic "
|
| 108 |
+
"(like 'Discography', 'Career', or 'History') if they help narrow the search.\n\n"
|
| 109 |
+
|
| 110 |
+
"Rules:\n"
|
| 111 |
+
"- Output 1 to 3 items maximum.\n"
|
| 112 |
+
"- Use English Wikipedia title format (capitalize each main word).\n"
|
| 113 |
+
"- Translate non-English names or terms to English.\n"
|
| 114 |
+
"- Exclude question words, pronouns, and filler terms.\n"
|
| 115 |
+
"- Fix spelling errors if necessary.\n"
|
| 116 |
+
"- Prefer specific Wikipedia topics over vague ones.\n\n"
|
| 117 |
+
|
| 118 |
+
"Example:\n"
|
| 119 |
+
"Q: 'Who built the Eiffel Tower?'\n"
|
| 120 |
+
"A: [\"Eiffel Tower\", \"Gustave Eiffel\"]\n\n"
|
| 121 |
+
f"Question: '{state['question']}'\n\n"
|
| 122 |
+
"Output ONLY a valid JSON list as described — no explanations, markdown, or extra formatting."
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
# 2️⃣ Call Gemini
|
| 127 |
+
model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
| 128 |
+
response = model.invoke([HumanMessage(content=prompt)]).content.strip()
|
| 129 |
+
|
| 130 |
+
# 3️⃣ Try to safely parse
|
| 131 |
+
try:
|
| 132 |
+
# First, try JSON
|
| 133 |
+
state["main_parts"] = json.loads(response)
|
| 134 |
+
except json.JSONDecodeError:
|
| 135 |
+
try:
|
| 136 |
+
# If not JSON, try Python literal
|
| 137 |
+
state["main_parts"] = ast.literal_eval(response)
|
| 138 |
+
except Exception:
|
| 139 |
+
# If both fail, store fallback info
|
| 140 |
+
print("⚠️ Model returned invalid content:", response)
|
| 141 |
+
state["main_parts"] = []
|
| 142 |
+
|
| 143 |
+
return state
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def get_answer(state: InfoState) -> InfoState :
|
| 149 |
+
"""
|
| 150 |
+
Final Node that returns the final answer organized.
|
| 151 |
+
|
| 152 |
+
Combines: tool_answer → final_answer
|
| 153 |
+
"""
|
| 154 |
+
print("Getting Answer (Gemini)...")
|
| 155 |
+
|
| 156 |
+
prompt = (
|
| 157 |
+
"You are a knowledgeable assistant that answers questions based on context and common factual knowledge.\n"
|
| 158 |
+
"Use the context first, but if it clearly lacks the needed details, you may rely on well-known public facts "
|
| 159 |
+
"(such as from Wikipedia) that logically complete the answer.\n\n"
|
| 160 |
+
f"Question: {state['question']}\n"
|
| 161 |
+
f"Context:\n{state.get('tool_answer')}\n\n"
|
| 162 |
+
"Instructions:\n"
|
| 163 |
+
"- Focus on producing one short factual answer.\n"
|
| 164 |
+
"- Do not include tool names, prefixes, or metadata.\n"
|
| 165 |
+
"- If the context contains partial hints, you can infer the answer from general knowledge of the same topic.\n"
|
| 166 |
+
"- If absolutely nothing is relevant, reply: I don't know.\n\n"
|
| 167 |
+
"Final Answer:"
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
| 171 |
+
state["final_answer"] = (model.invoke([HumanMessage(content=prompt)]).content)
|
| 172 |
+
|
| 173 |
+
return state
|
| 174 |
+
|
| 175 |
+
def get_type(state: InfoState) -> InfoState:
|
| 176 |
+
"""Choose which tool to use based on question type (WIKI, SEARCH, CODE)."""
|
| 177 |
+
print("Getting Type (Gemini)...")
|
| 178 |
+
|
| 179 |
+
prompt = "According to the Question " +state["question"] + " Select the best tool to answer WIKI if it's informatative or science question, WebInfo if it need up to data news,MATH if math or date or time,LLM if other or have link You're very serious,just give one word from given"
|
| 180 |
+
model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
| 181 |
+
state["answer_type"] = (model.invoke([HumanMessage(content=prompt)]).content)
|
| 182 |
+
|
| 183 |
+
return state
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def get_search_results(state: InfoState) -> InfoState:
|
| 189 |
+
"""Tool to search web for results using DuckDuckGo."""
|
| 190 |
+
print("Searching...")
|
| 191 |
+
|
| 192 |
+
search = DuckDuckGoSearchRun()
|
| 193 |
+
|
| 194 |
+
try:
|
| 195 |
+
state['tool_answer'] = search.run(state["question"]) #" " .join(state["main_parts"]))
|
| 196 |
+
return state
|
| 197 |
+
except Exception:
|
| 198 |
+
state['tool_answer'] = ""
|
| 199 |
+
return state
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def route(state: InfoState):
|
| 203 |
+
print(state["answer_type"])
|
| 204 |
+
return state["answer_type"]
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
################# Graph ################
|
| 209 |
+
def get_graph():
|
| 210 |
+
graph = StateGraph(InfoState)
|
| 211 |
+
|
| 212 |
+
# Add nodes
|
| 213 |
+
#graph.add_node("get_wiki_relate", get_wiki_relate)
|
| 214 |
+
graph.add_node("preprocess_text", preprocess_text)
|
| 215 |
+
graph.add_node("get_answer", get_answer)
|
| 216 |
+
graph.add_node("get_type", get_type)
|
| 217 |
+
graph.add_node("get_search_results", get_search_results)
|
| 218 |
+
graph.add_node("execute_code", execute_code)
|
| 219 |
+
graph.add_node("get_code", get_code)
|
| 220 |
+
|
| 221 |
+
# Add edges
|
| 222 |
+
graph.add_edge(START, "preprocess_text")
|
| 223 |
+
graph.add_edge("preprocess_text", "get_type")
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
# Add conditional edges
|
| 227 |
+
graph.add_conditional_edges(
|
| 228 |
+
"get_type",
|
| 229 |
+
route,
|
| 230 |
+
{
|
| 231 |
+
"WebInfo": "get_search_results",
|
| 232 |
+
"WIKI": "get_search_results",#"get_wiki_relate",
|
| 233 |
+
"MATH": "get_code",
|
| 234 |
+
"LLM": "get_answer"
|
| 235 |
+
}
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
# Add final edges
|
| 239 |
+
graph.add_edge("get_search_results", "get_answer")
|
| 240 |
+
#graph.add_edge("get_wiki_relate", "get_answer")
|
| 241 |
+
|
| 242 |
+
graph.add_edge("get_code", "execute_code")
|
| 243 |
+
graph.add_edge("execute_code", "get_answer")
|
| 244 |
+
|
| 245 |
+
graph.add_edge("get_answer", END)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
# Compile the graph
|
| 249 |
+
compiled_graph = graph.compile()
|
| 250 |
+
return compiled_graph
|
| 251 |
+
|
| 252 |
+
def ask(compiled_graph,question):
|
| 253 |
+
legitimate_result = compiled_graph.invoke({
|
| 254 |
+
"question": question,
|
| 255 |
+
|
| 256 |
+
})
|
| 257 |
+
|
| 258 |
+
return legitimate_result['final_answer']
|