Spaces:
Sleeping
Sleeping
File size: 3,984 Bytes
0f9a313 52be2ce 0f9a313 52be2ce 0f9a313 52be2ce 0f9a313 52be2ce 0f9a313 52be2ce 0f9a313 52be2ce 8c89aac 52be2ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
from langgraph.graph.state import CompiledStateGraph
from langgraph.graph import StateGraph,START, END
from state.state import StateVector
from graph.state_vector_nodes import question_model,research_model
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
class BuildGraphOptions():
def __init__(self,question_model):
self.graph_builder=StateGraph(StateVector) #For invocation
self.questionmodel=question_model
def load_research_model(self,research_model):
self.research_model=research_model
def build_question_graph(self):
#workflow = self.graph_builder
self.graph_builder.add_node("check_inputs", self.questionmodel.check_inputs)
self.graph_builder.add_node("create_question_prompt_template", self.questionmodel.create_question_prompt_template)
self.graph_builder.add_node("generate_questions", self.questionmodel.generate_questions)
# Set entry point
self.graph_builder.set_entry_point("check_inputs")
# Add conditional edges
self.graph_builder.add_conditional_edges(
"check_inputs",
self.questionmodel.should_continue,
{
"create_question_prompt_template": "create_question_prompt_template",
"terminate": END
}
)
self.graph_builder.add_edge("create_question_prompt_template", "generate_questions")
self.graph_builder.add_edge("generate_questions", END)
return self.graph_builder.compile()
def build_research_graph(self,research_model:research_model):
self.graph_builder.add_node("check_inputs", self.questionmodel.check_inputs)
self.graph_builder.add_node("create_question_prompt_template", self.questionmodel.create_question_prompt_template)
self.graph_builder.add_node("generate_questions", self.questionmodel.generate_questions)
self.graph_builder.add_node("create_prompt_template", research_model.create_prompt_template)
# Set entry point
self.graph_builder.set_entry_point("check_inputs")
# Add conditional edges
self.graph_builder.add_conditional_edges(
"check_inputs",
self.questionmodel.should_continue,
{
"create_question_prompt_template": "create_question_prompt_template",
"terminate": END
}
)
self.graph_builder.add_edge("create_question_prompt_template", "generate_questions")
self.graph_builder.add_node(research_model.tool_calling_llm,
name="tool_calling_llm",
description="Invoke the LLM with curated questions to answer.",)
#llm_with_tools,tools=research_model.tool_calling_agent()
self.graph_builder.add_node("tools", ToolNode(research_model.tools))
self.graph_builder.add_node(research_model.summary_answer,name="summary_answer",
description="Summarize the answer from the LLM using the information gathered from the tools.",)
self.graph_builder.add_edge("generate_questions","create_prompt_template")
self.graph_builder.add_edge("create_prompt_template","tool_calling_llm")
self.graph_builder.add_conditional_edges(
"tool_calling_llm",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to Summary answer with no retrieved docs
tools_condition,
{
"tools": "tools",
"summary_answer": "summary_answer",
# You might also want to handle other potential returns
},
)
self.graph_builder.add_edge("tools", "summary_answer")
self.graph_builder.add_edge("summary_answer", END)
return self.graph_builder.compile()
|