| from smolagents.models import OpenAIServerModel | |
| from smolagents import CodeAgent, DuckDuckGoSearchTool, WikipediaSearchTool,\ | |
| ToolCallingAgent | |
| from src.tools.tools import download_video, extract_frames,\ | |
| analyze_frame_with_vision_model | |
| def create_agent(model_id: str="gpt-4o-mini"): | |
| model = OpenAIServerModel(model_id=model_id) | |
| agent = CodeAgent(tools=[WikipediaSearchTool(), | |
| DuckDuckGoSearchTool(), | |
| download_video, | |
| extract_frames, | |
| analyze_frame_with_vision_model | |
| ], | |
| model=model | |
| ) | |
| with open("final_prompt.txt", "r") as f: | |
| sys_prompt = f.read() | |
| return agent, sys_prompt | |
| def create_toolcall_agent(model_id: str="gpt-4o-mini"): | |
| model = OpenAIServerModel(model_id=model_id) | |
| agent = ToolCallingAgent(tools=[WikipediaSearchTool(), | |
| DuckDuckGoSearchTool(), | |
| download_video, | |
| extract_frames, | |
| analyze_frame_with_vision_model | |
| ], | |
| model=model | |
| ) | |
| with open("final_prompt.txt", "r") as f: | |
| sys_prompt = f.read() | |
| return agent, sys_prompt | |
| def format_agent_prompt(agent_prompt: str, | |
| question: str, | |
| supporting_file: str) -> str: | |
| input_agent_i = agent_prompt.format(question=question) | |
| if len(supporting_file) > 0: | |
| input_agent_i += f"\nSUPPORTING FILE: {supporting_file}" | |
| input_agent_i += "\n\nANSWER:" | |
| return input_agent_i | |