Spaces:
Sleeping
Sleeping
File size: 3,106 Bytes
9b5b26a c19d193 6aae614 8fe992b 9b5b26a 5df72d6 9b5b26a 424df81 9b5b26a 424df81 9b5b26a 8c01ffb d7fceba 8c01ffb 6aae614 ae7a494 6df5430 785d9af 57c7d55 3126e6d 785d9af be1c76f 785d9af be1c76f 57c7d55 3126e6d be1c76f 57c7d55 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b d7fceba 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def multiply(arg1:float, arg2:float)-> float: #it's important to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return arg1 * arg2
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
web_search = DuckDuckGoSearchTool()
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
#model = HfApiModel(
#max_tokens=2096,
#temperature=0.5,
#model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' ,# it is possible that this model may be overloaded
#custom_role_conversions=None,
#)
class DummyModel:
def __init__(self):
self.last_input_token_count = 0
self.last_output_token_count = 0
def __call__(self, prompt, **kwargs):
def safe_str(msg):
if isinstance(msg["content"], list):
return "\n".join(map(str, msg["content"]))
return str(msg["content"])
# Recompose le texte à partir de la liste des messages
text_prompt = "\n".join([safe_str(msg) for msg in prompt if "content" in msg])
self.last_input_token_count = len(text_prompt.split())
self.last_output_token_count = 10
return {
"text": """Thought: I now know the final answer.
Final Answer: 6.2"""
}
model = DummyModel()
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, multiply, get_current_time_in_timezone, image_generation_tool, web_search], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |