import os from dotenv import load_dotenv from langchain_core.messages import SystemMessage, HumanMessage from langgraph.prebuilt import ToolNode from langgraph.graph import START, StateGraph, MessagesState from langgraph.prebuilt import tools_condition from langchain_openai import AzureChatOpenAI from tools import multiply, add, subtract, divide, modulus, wiki_search, web_search, arvix_search load_dotenv() # load the system prompt from the file with open("system_prompt.txt", "r", encoding="utf-8") as f: system_prompt = f.read() # System message sys_msg = SystemMessage(content=system_prompt) # Build graph function def build_graph(): """Build the graph""" llm = AzureChatOpenAI( api_version=os.getenv("AZURE_OPENAI_API_VERSION"), azure_deployment=os.getenv("AZURE_OPENAI_MODEL"), temperature=0, max_tokens=None, timeout=None, max_retries=2, ) tools = [ multiply, add, subtract, divide, modulus, wiki_search, web_search, arvix_search, ] llm_with_tools = llm.bind_tools(tools) # Node def assistant(state: MessagesState): """Assistant node""" return {"messages": [llm_with_tools.invoke(state["messages"])]} ## The graph builder = StateGraph(MessagesState) # Define nodes: these do the work builder.add_node("assistant", assistant) builder.add_node("tools", ToolNode(tools)) # Define edges: these determine how the control flow moves builder.add_edge(START, "assistant") builder.add_conditional_edges( "assistant", # If the latest message requires a tool, route to tools # Otherwise, provide a direct response tools_condition, ) builder.add_edge("tools", "assistant") return builder.compile() # Test if __name__ == "__main__": # question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?" # question = "Examine the video at https://www.youtube.com/watch?v=1htKBjuUWec.\n\nWhat does Teal'c say in response to the question \"Isn't that hot?\"" question = "Who's the current President of France?" # Build the graph graph = build_graph() # Run the graph messages = [HumanMessage(content=question)] messages = graph.invoke({"messages": messages}) for m in messages["messages"]: m.pretty_print()