|
|
import os |
|
|
|
|
|
from smolagents import ( |
|
|
CodeAgent, |
|
|
HfApiModel, |
|
|
OpenAIServerModel, |
|
|
DuckDuckGoSearchTool, |
|
|
ToolCallingAgent, |
|
|
WikipediaSearchTool, |
|
|
) |
|
|
|
|
|
import logging |
|
|
import sys |
|
|
|
|
|
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) |
|
|
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) |
|
|
|
|
|
from llama_index.core.agent.workflow import AgentWorkflow |
|
|
from llama_index.llms.gemini import Gemini |
|
|
from llama_index.tools.wikipedia import WikipediaToolSpec |
|
|
import asyncio |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = Gemini( |
|
|
model="gemini-2.0-flash", |
|
|
) |
|
|
|
|
|
agent = AgentWorkflow.from_tools_or_functions( |
|
|
[ |
|
|
*WikipediaToolSpec().to_tool_list(), |
|
|
], |
|
|
llm=model, |
|
|
) |
|
|
|
|
|
|
|
|
def run_agent(question: str) -> str: |
|
|
prompt = f""" |
|
|
You are a helpful assistant that answers requested questions using tools. |
|
|
I will give you a question at the end. Report your thoughts, and give the final answer with the following template: |
|
|
FINAL ANSWER: [YOUR FINAL ANSWER]. |
|
|
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. |
|
|
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. |
|
|
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. |
|
|
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. |
|
|
|
|
|
Remember that your answer should start with "FINAL ANSWER: " and be followed by the answer. |
|
|
|
|
|
The question is: |
|
|
|
|
|
{question} |
|
|
""" |
|
|
|
|
|
response = agent.run(prompt) |
|
|
return str(response) |
|
|
|