File size: 5,438 Bytes
2f9e3a2 b4cd776 e38f8e4 b97774a 2f9e3a2 c285622 2f9e3a2 c285622 b4cd776 2f9e3a2 c285622 b97774a 2f9e3a2 5d8f022 b4cd776 b97774a 2f9e3a2 c285622 2f9e3a2 c285622 2f9e3a2 c285622 b97774a 2f9e3a2 b97774a 2f9e3a2 b97774a 5d8f022 2f9e3a2 c285622 b97774a b4cd776 b97774a c285622 b97774a b4cd776 b97774a b4cd776 b97774a 2f9e3a2 b4cd776 2f9e3a2 b4cd776 c285622 2f9e3a2 c285622 b4cd776 c285622 2f9e3a2 b4cd776 c285622 b4cd776 c285622 b4cd776 2f9e3a2 b4cd776 2f9e3a2 c285622 b4cd776 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
from models.models import groq_model, anthropic_model
from tools import taivily_search, serper_search, execute_code, get_youtube_transcript, execute_python_file_url
from langgraph.graph import StateGraph, START, END
from langchain_core.messages import SystemMessage, AIMessage, ToolMessage
from typing import List, TypedDict
from langgraph.prebuilt import ToolNode
tools = [
taivily_search,
serper_search,
get_youtube_transcript,
execute_code,
execute_python_file_url
]
class EvaluationState(TypedDict):
messages: List
question: str
answer: str
external_information: str
has_enough_information: bool
is_valid_answer: bool
step_counter: dict[str, int]
bounded_model_groq = groq_model.bind_tools(tools)
def call_node(state: EvaluationState):
"""
This node call the model with the question and the tools
"""
# Convert any ToolMessage objects to a format Groq can handle
response = bounded_model_groq.invoke(state["messages"])
state["messages"].append(response)
return state
tool_node = ToolNode(tools)
def answer_question(state: EvaluationState):
"""
This node get the context information and call the model to get the answer.
"""
prompt = f"""## Instruction \n I will ask you a question. Report your thoughts, and finish with only YOUR FINAL ANSWER.
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
## Question
{state["question"]}
## Relevant information
{state["external_information"]}
## answer"""
response = anthropic_model.invoke(prompt)
state["messages"].append(AIMessage(content=response.content))
state["answer"] = response.content
return state
def map_answer(state: EvaluationState):
"""
Map the answer to the final answer
"""
answer = state["answer"]
prompt = f"""## Instruction
map the answer to the final answer. The final answer should be a number, string or a list of numbers and/or strings. Remove quotes.
## Answer
{answer}
## Final answer"""
answer = anthropic_model.invoke(prompt)
return {
"answer": answer.content
}
def validator(state: EvaluationState):
"""
Validate if the answer fills the requirements
"""
answer = state["answer"]
result = anthropic_model.invoke(f"Validate if the answer fits the next requirements: \n\n{answer}\n\nThe answer should be a number, string or a list of numbers and/or strings. If the answer fits the requirements, return just 'yes', otherwise return 'no'.")
state["is_valid_answer"] = result.content.startswith("yes")
state["messages"].append(SystemMessage(content=f"Validator: {result.content}"))
return state
def route_validator(state):
state["step_counter"]["validator"] = state["step_counter"].get("validator", 0) + 1
if state["is_valid_answer"] or state["step_counter"]["validator"] > 2:
return END
else:
return "map_answer"
def evaluator(state):
"""
Evaluate if the context information is enough to answer the question.
"""
prompt = f"""## Instruction
Answer just "yes" (without the quotes), if the context information is enough to answer the question.
## Question
{state["question"]}
## Relevant information
{state["external_information"]}
"""
result = anthropic_model.invoke(prompt)
state["has_enough_information"] = result.content.startswith("yes")
state["messages"].append(SystemMessage(content=f"Evaluator: {result.content}"))
return state
def route_iteration(state):
state["step_counter"]["iteration"] = state["step_counter"].get("iteration", 0) + 1
if state["has_enough_information"] or state["step_counter"]["iteration"] > 2:
return "answer_question"
else:
return "agent"
def build_workflow():
"""
Build search workflow with conditional edge for evaluation and iteration.
"""
workflow = StateGraph(EvaluationState)
workflow.add_node("agent", call_node)
workflow.add_node("action", tool_node)
workflow.add_node("evaluator", evaluator)
workflow.add_node("answer_question", answer_question)
workflow.add_node("map_answer", map_answer)
workflow.add_node("validator", validator)
workflow.add_edge(START, "agent")
workflow.add_edge("agent", "action")
workflow.add_edge("action", "evaluator")
workflow.add_conditional_edges("evaluator", route_iteration, {"answer_question":"answer_question","agent":"agent"})
workflow.add_edge("answer_question","map_answer")
workflow.add_edge("map_answer", "validator")
workflow.add_conditional_edges("validator", route_validator, {"map_answer": "map_answer", END: END})
return workflow.compile()
""" if __name__ == "__main__":
graph = build_workflow()
mermaid_text = graph.get_graph().draw_mermaid()
print(mermaid_text) """ |