import yaml import os from smolagents import CodeAgent, InferenceClientModel from tools import search_tool, visit_webpage, handle_file, transcription_tool # model model = InferenceClientModel( model_id="Qwen/Qwen2.5-72B-Instruct", token=os.getenv("HF_TOKEN") ) # load the prompt template from yaml file with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) # Initialize agent agent = CodeAgent( model=model, tools=[search_tool, visit_webpage, handle_file, transcription_tool], max_steps=20, verbosity_level=1, additional_authorized_imports=["pandas", "numpy", "re", "math", "collections", "datetime", "json"], prompt_templates=prompt_templates ) # run function def run_agent(question: str) -> str: """ This function is used to run the agent. Function is called in app.py """ result = agent.run(question) return str(result).strip()