from langgraph.graph import StateGraph, START, END from langchain_openai import ChatOpenAI from langchain_core.prompts import PromptTemplate from state import AgentState from tools import search_tool from dotenv import load_dotenv load_dotenv() template = """Your name is Atom, you're an advance AI Agent powered by a powerful LLM. Your task is to answer the following questions as best you can. You have access to the following tools: {tools} Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question Begin! Question: {input} Thought:{{agent_scratchpad}}""" prompt_template = PromptTemplate.from_template(template) llm = ChatOpenAI(temperature=0) def analyze_question(state: AgentState): """Read the incoming question""" question = state["question"] tools = state["tools"] # temp tool_names = state["tool_names"] # temp # Create prompt template prompt = prompt_template.invoke( {"input": question, "tools": tools, "tool_names": tool_names} ) response = llm.invoke(prompt) state["thought"] = response print("\n STATE", state) def create_final_answer(state: AgentState): """Create the final answer""" # Create graph builder = StateGraph(AgentState) # Add Nodes builder.add_node("analyze_question", analyze_question) builder.add_node("search_tool", search_tool) # Add Edges builder.add_edge(START, "analyze_question") builder.add_edge("analyze_question", "search_tool") builder.add_edge("search_tool", END) agent = builder.compile() agent.invoke( { "input": "whats the current weather in Orlando?", "question": "whats the current weather in Orlando?", "tools": "search_tool", "agent_scratchpad": "", "tool_names": "search_tool", } )