Spaces:
Sleeping
Sleeping
File size: 919 Bytes
936f890 115b402 35ac4df f6ec5bc 696d6f3 cc15036 1052a02 ea9859d 936f890 696d6f3 f6ec5bc 3e44c22 936f890 e557673 936f890 696d6f3 0b16bce 696d6f3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | import yaml
import os
from smolagents import CodeAgent, InferenceClientModel
from tools import search_tool, visit_webpage, handle_file, transcription_tool
# model
model = InferenceClientModel(
model_id="Qwen/Qwen2.5-72B-Instruct",
token=os.getenv("HF_TOKEN")
)
# load the prompt template from yaml file
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Initialize agent
agent = CodeAgent(
model=model,
tools=[search_tool, visit_webpage, handle_file, transcription_tool],
max_steps=20,
verbosity_level=1,
additional_authorized_imports=["pandas", "numpy", "re", "math", "collections", "datetime", "json"],
prompt_templates=prompt_templates
)
# run function
def run_agent(question: str) -> str:
"""
This function is used to run the agent. Function is called in app.py
"""
result = agent.run(question)
return str(result).strip() |