# agent.py from langchain_ollama.chat_models import ChatOllama from langchain_core.messages import HumanMessage, ToolMessage from tools.wiki import wikipedia_search_tool import re, ast from configs.config import Config from configs.registry import TOOL_REGISTRY # Define Variables env = Config() llm = env.LOCAL_LLM tools_registery = TOOL_REGISTRY def generate_prompt(query: str) -> str: tool_list = "\n".join( f"- {name}: {meta['description']}" for name, meta in tools_registery.items() ) return f""" You are a smart assistant that decides which tool to use based on user queries. User Query: "{query}" Available tools: {tool_list} Respond in this format: Tool: [tool_name] Tool Input: [Python dict of parameters] """ def parse_tool_selection(response: str) -> tuple[str, dict]: tool_match = re.search(r"Tool:\s*(\w+)", response) input_match = re.search(r"Tool Input:\s*(\{.*\})", response) if not tool_match or not input_match: raise ValueError("Failed to parse tool selection.") tool_name = tool_match.group(1) tool_input = ast.literal_eval(input_match.group(1)) return tool_name, tool_input def main(query: str = None): user_query = query.strip() # 1. Generate selection prompt prompt = generate_prompt(user_query) response = llm.invoke([HumanMessage(content=prompt)]) # 2. Parse tool selection try: tool_name, tool_input = parse_tool_selection(response.content) except Exception as e: print("Error parsing tool selection:", e) print("LLM response was:", response.content) return # 3. Run selected tool tool_entry = tools_registery.get(tool_name) if not tool_entry: print(f"Tool '{tool_name}' not found.") return tool = tool_entry["tool"] try: result = tool.invoke(tool_input) except Exception as e: print(f"Error running tool '{tool_name}': {e}") return print("Final Answer:", result.content) if __name__ == "__main__": query = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of English Wikipedia." main(query)