Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,10 +1,8 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
from
|
| 4 |
-
import base64
|
| 5 |
-
|
| 6 |
-
import requests
|
| 7 |
|
|
|
|
| 8 |
from langgraph.graph import StateGraph, START, END
|
| 9 |
from typing import Annotated, Literal
|
| 10 |
from typing_extensions import TypedDict
|
|
@@ -19,28 +17,20 @@ from langchain_experimental.utilities import PythonREPL
|
|
| 19 |
class SmallerQuestions(BaseModel):
|
| 20 |
questions: list[str]
|
| 21 |
|
| 22 |
-
class Question(BaseModel):
|
| 23 |
-
question: str
|
| 24 |
-
nature: str
|
| 25 |
-
context: str
|
| 26 |
-
middle_answer: Annotated[list, operator.add]
|
| 27 |
-
|
| 28 |
class Nature(BaseModel):
|
| 29 |
nature: Literal["research", "computation", "interpretation"]
|
| 30 |
|
| 31 |
-
class SearchableQuestion(BaseModel):
|
| 32 |
-
question: str
|
| 33 |
-
key_words: list[str]
|
| 34 |
-
context: str
|
| 35 |
-
|
| 36 |
-
class Code(BaseModel):
|
| 37 |
-
code: str
|
| 38 |
-
|
| 39 |
class OverallState(TypedDict):
|
| 40 |
-
|
| 41 |
-
smaller_questions: list[str]
|
| 42 |
-
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
#Nodes
|
| 46 |
|
|
@@ -48,83 +38,87 @@ def BreakQuestion(state: OverallState):
|
|
| 48 |
prompt = """Task:
|
| 49 |
Break the following question down into a maximum of 7 smaller questions.
|
| 50 |
Your response must be a list in which each item corresponds to one smaller question.
|
| 51 |
-
Beware that the answer to each of the questions you list should lead to the next question without holdbacks.
|
| 52 |
|
| 53 |
Question:
|
| 54 |
-
""" + state["
|
| 55 |
response = llm.with_structured_output(SmallerQuestions).invoke(prompt)
|
| 56 |
-
return {smaller_questions: response.questions
|
|
|
|
| 57 |
|
| 58 |
-
def DecideNature(state:
|
| 59 |
prompt = """Task:
|
| 60 |
-
The following question is
|
| 61 |
-
Computation is a question that requires a
|
| 62 |
Decide which of those is the best approach and return only the string corresponding to the one you choose, and nothing else.
|
| 63 |
Question:
|
| 64 |
-
""" + state["
|
| 65 |
response = llm.with_structured_output(Nature).invoke(prompt)
|
| 66 |
-
return {nature: response.nature}
|
| 67 |
|
| 68 |
-
def KeyWords(state:
|
| 69 |
prompt = """Task:
|
| 70 |
-
Gather the
|
| 71 |
Question:
|
| 72 |
-
""" + state["
|
| 73 |
-
response = llm.
|
| 74 |
-
return {key_words: response.
|
| 75 |
|
| 76 |
-
def WikiSearch(state:
|
| 77 |
wikipedia_api = WikipediaAPIWrapper(top_k_results=1,doc_content_chars_max=1000)
|
| 78 |
wikipedia = WikipediaQueryRun(api_wrapper=wikipedia_api)
|
| 79 |
-
result = wikipedia.run({query: state["key_words"]})
|
| 80 |
-
return {
|
| 81 |
|
| 82 |
-
def WebSearch(state:
|
| 83 |
search_tool = DuckDuckGoSearchRun()
|
| 84 |
result = search_tool.invoke(state["key_words"])
|
| 85 |
-
return {
|
| 86 |
|
| 87 |
-
def ContextAnswer(state:
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
Context:
|
| 91 |
-
""" + state["context"] + "Question: " + state["question"]
|
| 92 |
-
response = llm.invoke(prompt)
|
| 93 |
-
return {middle_answer: [response]}
|
| 94 |
|
| 95 |
-
def Computation(state:
|
| 96 |
python_repl = PythonREPL()
|
| 97 |
prompt = """Task:
|
| 98 |
The following question requires a computation to be answered. Create a code that solves the problem.
|
| 99 |
Your response must be only the code that solves the problem and nothing else.
|
| 100 |
Question:
|
| 101 |
-
""" + state["
|
| 102 |
-
response = llm.
|
| 103 |
-
code_result = python_repl.run(response.
|
| 104 |
-
structured_answer = "The result of the following problem is " + code_result + "Problem: " + state["
|
| 105 |
-
return {
|
| 106 |
|
| 107 |
-
def Reasoning(state:
|
| 108 |
prompt = """Task:
|
| 109 |
-
Answer the following question using your reasoning. Your response must
|
| 110 |
Question:
|
| 111 |
-
""" + state["
|
| 112 |
response = llm.invoke(prompt)
|
| 113 |
-
return {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
def FinalAnswer(state: OverallState):
|
| 116 |
-
middle_answers = "\n\n".join(state["
|
| 117 |
prompt = """Task:
|
| 118 |
You will receive a question and some information from which you must be able to solve the question completely.
|
| 119 |
Do your best to keep your response as close to the given information as possible. Your response should contain only the final answer to the question and nothing else.
|
| 120 |
Question:
|
| 121 |
-
""" + state["
|
| 122 |
response = llm.invoke(prompt)
|
| 123 |
-
return {answer: response}
|
| 124 |
|
| 125 |
#Edges
|
| 126 |
|
| 127 |
-
def Nature(state:
|
| 128 |
if state["nature"] == "computation":
|
| 129 |
return Send("computation", state)
|
| 130 |
elif state["nature"] == "research":
|
|
@@ -132,9 +126,27 @@ def Nature(state: Question):
|
|
| 132 |
else:
|
| 133 |
return Send("interpretation", state)
|
| 134 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
|
| 136 |
builder = StateGraph(OverallState)
|
| 137 |
builder.add_node("break_question", BreakQuestion)
|
|
|
|
| 138 |
builder.add_node("define_nature", DecideNature)
|
| 139 |
builder.add_node("key_words", KeyWords)
|
| 140 |
builder.add_node("wiki_search", WikiSearch)
|
|
@@ -146,11 +158,13 @@ builder.add_node("final_answer", FinalAnswer)
|
|
| 146 |
builder.add_edge(START, "break_question")
|
| 147 |
builder.add_conditional_edges("define_nature", Nature)
|
| 148 |
builder.add_edge("key_words", "wiki_search")
|
| 149 |
-
builder.add_edge("
|
| 150 |
builder.add_edge("web_search", "answer")
|
| 151 |
-
builder.add_edge("
|
| 152 |
-
builder.add_edge("
|
| 153 |
-
builder.add_edge("
|
|
|
|
|
|
|
| 154 |
builder.add_edge("final_answer", END)
|
| 155 |
|
| 156 |
graph = builder.compile()
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
+
import operator
|
| 6 |
from langgraph.graph import StateGraph, START, END
|
| 7 |
from typing import Annotated, Literal
|
| 8 |
from typing_extensions import TypedDict
|
|
|
|
| 17 |
class SmallerQuestions(BaseModel):
|
| 18 |
questions: list[str]
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
class Nature(BaseModel):
|
| 21 |
nature: Literal["research", "computation", "interpretation"]
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
class OverallState(TypedDict):
|
| 24 |
+
initial_question: str = ""
|
| 25 |
+
smaller_questions: list[str] = []
|
| 26 |
+
current_question: str = ""
|
| 27 |
+
context: str = ""
|
| 28 |
+
key_words: str = ""
|
| 29 |
+
wiki_results: str = ""
|
| 30 |
+
web_results: str = ""
|
| 31 |
+
nature: Literal["research", "computation", "interpretation"] = "interpretation"
|
| 32 |
+
middle_answers: Annotated[list, operator.add] = []
|
| 33 |
+
answer: str = ""
|
| 34 |
|
| 35 |
#Nodes
|
| 36 |
|
|
|
|
| 38 |
prompt = """Task:
|
| 39 |
Break the following question down into a maximum of 7 smaller questions.
|
| 40 |
Your response must be a list in which each item corresponds to one smaller question.
|
| 41 |
+
Beware that the answer to each one of the questions you list should lead to the next question without holdbacks.
|
| 42 |
|
| 43 |
Question:
|
| 44 |
+
""" + state["initial_question"]
|
| 45 |
response = llm.with_structured_output(SmallerQuestions).invoke(prompt)
|
| 46 |
+
return {"smaller_questions": response.questions,
|
| 47 |
+
"current_question": response.questions[0]}
|
| 48 |
|
| 49 |
+
def DecideNature(state: OverallState):
|
| 50 |
prompt = """Task:
|
| 51 |
+
The following question is answerable via one of the following three approaches: "computation", "research" or "interpretation".
|
| 52 |
+
Computation is a question that requires a Python interpreter; research is a question that requires a web search in sites like Wikipedia; interpretation is a question that needs only reasoning to be answered.
|
| 53 |
Decide which of those is the best approach and return only the string corresponding to the one you choose, and nothing else.
|
| 54 |
Question:
|
| 55 |
+
""" + state["current_question"]
|
| 56 |
response = llm.with_structured_output(Nature).invoke(prompt)
|
| 57 |
+
return {"nature": response.nature}
|
| 58 |
|
| 59 |
+
def KeyWords(state: OverallState):
|
| 60 |
prompt = """Task:
|
| 61 |
+
Gather the keywords from the following question to perform a search. Your response must return only keywords and nothing else.
|
| 62 |
Question:
|
| 63 |
+
""" + state["current_question"]
|
| 64 |
+
response = llm.invoke(prompt)
|
| 65 |
+
return {"key_words": response.content}
|
| 66 |
|
| 67 |
+
def WikiSearch(state: OverallState):
|
| 68 |
wikipedia_api = WikipediaAPIWrapper(top_k_results=1,doc_content_chars_max=1000)
|
| 69 |
wikipedia = WikipediaQueryRun(api_wrapper=wikipedia_api)
|
| 70 |
+
result = wikipedia.run({"query": state["key_words"]})
|
| 71 |
+
return {"wiki_results": result}
|
| 72 |
|
| 73 |
+
def WebSearch(state: OverallState):
|
| 74 |
search_tool = DuckDuckGoSearchRun()
|
| 75 |
result = search_tool.invoke(state["key_words"])
|
| 76 |
+
return {"web_results": result}
|
| 77 |
|
| 78 |
+
def ContextAnswer(state: OverallState):
|
| 79 |
+
context = "Wiki results: " + state.get("wiki_results", "No wiki results found") + "\n\n Web results: " + state.get("web_results", "No web results found")
|
| 80 |
+
return {"context": context}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
+
def Computation(state: OverallState):
|
| 83 |
python_repl = PythonREPL()
|
| 84 |
prompt = """Task:
|
| 85 |
The following question requires a computation to be answered. Create a code that solves the problem.
|
| 86 |
Your response must be only the code that solves the problem and nothing else.
|
| 87 |
Question:
|
| 88 |
+
""" + state["current_question"]
|
| 89 |
+
response = llm.invoke(prompt)
|
| 90 |
+
code_result = python_repl.run(response.content)
|
| 91 |
+
structured_answer = "The result of the following problem is " + code_result + "Problem: " + state["current_question"]
|
| 92 |
+
return {"middle_answers": [structured_answer]}
|
| 93 |
|
| 94 |
+
def Reasoning(state: OverallState):
|
| 95 |
prompt = """Task:
|
| 96 |
+
Answer the following question using your reasoning. Your response must contain only the answer to the question and nothing else.
|
| 97 |
Question:
|
| 98 |
+
""" + state["current_question"]
|
| 99 |
response = llm.invoke(prompt)
|
| 100 |
+
return {"middle_answers": [response.content]}
|
| 101 |
+
|
| 102 |
+
def AdvanceToNextQuestion(state: OverallState):
|
| 103 |
+
current_index = state["smaller_questions"].index(state["current_question"])
|
| 104 |
+
if current_index < len(state["smaller_questions"]) - 1:
|
| 105 |
+
return {"current_question": state["smaller_questions"][current_index + 1],
|
| 106 |
+
"context": ""}
|
| 107 |
+
return {}
|
| 108 |
|
| 109 |
def FinalAnswer(state: OverallState):
|
| 110 |
+
middle_answers = "\n\n".join(state["middle_answers"])
|
| 111 |
prompt = """Task:
|
| 112 |
You will receive a question and some information from which you must be able to solve the question completely.
|
| 113 |
Do your best to keep your response as close to the given information as possible. Your response should contain only the final answer to the question and nothing else.
|
| 114 |
Question:
|
| 115 |
+
""" + state["initial_question"] + "Context: " + middle_answers
|
| 116 |
response = llm.invoke(prompt)
|
| 117 |
+
return {"answer": response.content}
|
| 118 |
|
| 119 |
#Edges
|
| 120 |
|
| 121 |
+
def Nature(state: OverallState):
|
| 122 |
if state["nature"] == "computation":
|
| 123 |
return Send("computation", state)
|
| 124 |
elif state["nature"] == "research":
|
|
|
|
| 126 |
else:
|
| 127 |
return Send("interpretation", state)
|
| 128 |
|
| 129 |
+
def NextQuestion(state: OverallState):
|
| 130 |
+
if state["current_question"] == state["smaller_questions"][-1]:
|
| 131 |
+
return "final_answer"
|
| 132 |
+
else:
|
| 133 |
+
return "define_nature"
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
load_dotenv()
|
| 137 |
+
|
| 138 |
+
llm = ChatGoogleGenerativeAI(
|
| 139 |
+
model="gemini-1.5-pro", # or "gemini-1.5-flash" for faster/cheaper
|
| 140 |
+
temperature=0,
|
| 141 |
+
max_tokens=1024,
|
| 142 |
+
timeout=30,
|
| 143 |
+
max_retries=2,
|
| 144 |
+
google_api_key=os.getenv("GOOGLE_API_KEY")
|
| 145 |
+
)
|
| 146 |
|
| 147 |
builder = StateGraph(OverallState)
|
| 148 |
builder.add_node("break_question", BreakQuestion)
|
| 149 |
+
builder.add_node("advance", AdvanceToNextQuestion)
|
| 150 |
builder.add_node("define_nature", DecideNature)
|
| 151 |
builder.add_node("key_words", KeyWords)
|
| 152 |
builder.add_node("wiki_search", WikiSearch)
|
|
|
|
| 158 |
builder.add_edge(START, "break_question")
|
| 159 |
builder.add_conditional_edges("define_nature", Nature)
|
| 160 |
builder.add_edge("key_words", "wiki_search")
|
| 161 |
+
builder.add_edge("key_words", "web_search")
|
| 162 |
builder.add_edge("web_search", "answer")
|
| 163 |
+
builder.add_edge("wiki_search", "answer")
|
| 164 |
+
builder.add_edge("answer", "advance")
|
| 165 |
+
builder.add_edge("computation", "advance")
|
| 166 |
+
builder.add_edge("interpretation", "advance")
|
| 167 |
+
builder.add_conditional_edges("advance", NextQuestion)
|
| 168 |
builder.add_edge("final_answer", END)
|
| 169 |
|
| 170 |
graph = builder.compile()
|