| | import os |
| | from dotenv import load_dotenv |
| | from langchain_core.messages import SystemMessage, HumanMessage |
| | from langgraph.prebuilt import ToolNode |
| | from langgraph.graph import START, StateGraph, MessagesState |
| | from langgraph.prebuilt import tools_condition |
| | from langchain_openai import AzureChatOpenAI |
| | from tools import multiply, add, subtract, divide, modulus, wiki_search, web_search, arvix_search |
| |
|
| | load_dotenv() |
| |
|
| | |
| | with open("system_prompt.txt", "r", encoding="utf-8") as f: |
| | system_prompt = f.read() |
| |
|
| | |
| | sys_msg = SystemMessage(content=system_prompt) |
| |
|
| | |
| | def build_graph(): |
| | """Build the graph""" |
| | llm = AzureChatOpenAI( |
| | api_version=os.getenv("AZURE_OPENAI_API_VERSION"), |
| | azure_deployment=os.getenv("AZURE_OPENAI_MODEL"), |
| | temperature=0, |
| | max_tokens=None, |
| | timeout=None, |
| | max_retries=2, |
| | ) |
| |
|
| | tools = [ |
| | multiply, |
| | add, |
| | subtract, |
| | divide, |
| | modulus, |
| | wiki_search, |
| | web_search, |
| | arvix_search, |
| | ] |
| |
|
| | llm_with_tools = llm.bind_tools(tools) |
| |
|
| |
|
| | |
| | def assistant(state: MessagesState): |
| | """Assistant node""" |
| | return {"messages": [llm_with_tools.invoke(state["messages"])]} |
| |
|
| | |
| | builder = StateGraph(MessagesState) |
| |
|
| | |
| | builder.add_node("assistant", assistant) |
| | builder.add_node("tools", ToolNode(tools)) |
| |
|
| | |
| | builder.add_edge(START, "assistant") |
| | builder.add_conditional_edges( |
| | "assistant", |
| | |
| | |
| | tools_condition, |
| | ) |
| | builder.add_edge("tools", "assistant") |
| |
|
| | return builder.compile() |
| |
|
| | |
| | if __name__ == "__main__": |
| | |
| | |
| | question = "Who's the current President of France?" |
| | |
| | graph = build_graph() |
| | |
| | messages = [HumanMessage(content=question)] |
| | messages = graph.invoke({"messages": messages}) |
| | for m in messages["messages"]: |
| | m.pretty_print() |