Spaces:
Sleeping
Sleeping
File size: 3,865 Bytes
49d6b5e 9b5b26a c19d193 6aae614 8fe992b 9b5b26a 49d6b5e 9b5b26a 49d6b5e 9b5b26a 8c01ffb 6aae614 f1b2bf7 49d6b5e 13d500a 8c01ffb 9b5b26a 8c01ffb 49d6b5e 861422e 34a4423 9e893f1 4b8818c 9e893f1 4b8818c 0ab2606 9e893f1 9b5b26a 8c01ffb 8fe992b 34a4423 8c01ffb 49d6b5e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity!
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: # it's important to specify the return type
# Keep this format for the tool description / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
# Load system prompt from prompt.yaml file
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
if "final_answer" not in prompt_templates:
prompt_templates["final_answer"] = {
"template": "This is the final answer: {answer}",
"pre_messages": "You are about to receive the final answer.",
"post_messages": "The task is complete. If you have any further questions, feel free to ask."
}
else:
# Ensure 'pre_messages' exists within 'final_answer' template
if "pre_messages" not in prompt_templates["final_answer"]:
prompt_templates["final_answer"]["pre_messages"] = "You are about to receive the final answer."
# Ensure 'post_messages' exists within 'final_answer' template
if "post_messages" not in prompt_templates["final_answer"]:
prompt_templates["final_answer"]["post_messages"] = "The task is complete. If you have any further questions, feel free to ask."
required_templates = ["planning", "managed_agent"]
for template in required_templates:
if template not in prompt_templates:
# Define a default template for the missing one(s)
if template == "planning":
prompt_templates["planning"] = """
You are an expert assistant capable of breaking down complex tasks into smaller steps.
For each task, plan your approach step-by-step. Each step should include a 'Thought:'
explaining the reasoning, 'Code:' to write the Python code, and 'Observation:' to explain the result.
"""
elif template == "managed_agent":
prompt_templates["managed_agent"] = """
You are a managed agent who will execute a task with steps defined for you. After each step,
you should provide 'Thought:', 'Code:', and 'Observation:' sequences to guide the process.
"""
agent = CodeAgent(
model=model,
tools=[
final_answer,
my_custom_tool,
get_current_time_in_timezone,
image_generation_tool
],
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates # Pass system prompt to CodeAgent
)
GradioUI(agent).launch() |