from typing import Annotated, Optional from langgraph.graph import StateGraph, MessagesState, START, END from langchain_core.messages import HumanMessage, AIMessage, SystemMessage from langgraph.prebuilt import ToolNode, tools_condition from langchain_ollama import ChatOllama from agent.tools import TOOLS class State(MessagesState): file_path: str model = ChatOllama(model="qwen3:32b") #model = ChatOllama(model="llama3.2:3b") model_with_tools = model.bind_tools(TOOLS) def call_model(state: State): return {"messages": [AIMessage(content="FINAL ANSWER: right")]} system_prompt = """ You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. Instructions for the tools: - If you need information from the web, you must use both the web_search and wikipedia_search tools, unless the question mentions wikipedia. Then, you must use only the wikipedia_search tool. Do not forget to use the FINAl ANSWER: [YOUR FINAL ANSWER] template!!! """ if state["file_path"] and state["file_path"] != "": system_prompt += f"\n\nYou have acces to a file at {state['file_path']}. You can use it to answer the question. Use this file path as input to relevant tools." result = model_with_tools.invoke([SystemMessage(content=system_prompt)] + state["messages"]) return {"messages": [result]} def build_agent(): graph_builder = StateGraph(State) graph_builder.add_node("call_model", call_model) graph_builder.add_node("tools", ToolNode(TOOLS)) graph_builder.add_edge(START, "call_model") graph_builder.add_conditional_edges("call_model", tools_condition) graph_builder.add_edge("tools", "call_model") return graph_builder.compile() if __name__ == "__main__": # Example usage agent = build_agent() output = agent.invoke({"messages": [HumanMessage(content="Hello, how are you?")]}) for msg in output["messages"]: msg.pretty_print()