Spaces:
Runtime error
Runtime error
File size: 3,274 Bytes
4c9a1bb 9b5b26a c19d193 6aae614 bf85f0c 9b5b26a 4c9a1bb 60b2104 4c9a1bb 60b2104 4c9a1bb dda81cd 4c9a1bb 636cbfe 4c9a1bb 60b2104 4c9a1bb 8fe992b 4c9a1bb 0e8b50f 4c9a1bb 60b2104 4c9a1bb 0e8b50f 4c9a1bb 60b2104 4c9a1bb 0e8b50f 4c9a1bb 0e8b50f 4c9a1bb 0e8b50f 4c9a1bb 0e8b50f 4c9a1bb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
from smolagents import CodeAgent, HfApiModel, load_tool, tool
import random
import requests
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
@tool
def get_random_fact() -> str:
"""Fetches a random fact from an API.
Takes inputs: None
Returns an output of type: str
"""
try:
url = "https://uselessfacts.jsph.pl/random.json?language=en"
response = requests.get(url)
response.raise_for_status()
data = response.json()
return data['text']
except Exception as e:
return f"Error fetching random fact: {str(e)}"
@tool
def generate_lucky_number(max_num: int) -> str:
"""Generates a 'lucky' number with a fun message.
Args:
max_num: The upper limit for the random number (must be a positive integer).
Takes inputs: max_num (int)
Returns an output of type: str
"""
if max_num < 1:
return "Please provide a number greater than 0!"
num = random.randint(1, max_num)
messages = [
f"Your lucky number is {num}! It’s destined to bring you cookies!",
f"Behold, {num}! The universe approves!",
f"{num} is your magic number—use it wisely!"
]
return random.choice(messages)
final_answer_tool = FinalAnswerTool()
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
# Load prompt templates from prompts.yaml
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Define the system prompt with tools injected
tools_dict = {
"final_answer": {
"name": "final_answer",
"description": "Returns the final answer to the task",
"inputs": "answer (any type)",
"output_type": "str"
},
"get_random_fact": {
"name": "get_random_fact",
"description": "Fetches a random fact from an API",
"inputs": "None",
"output_type": "str"
},
"generate_lucky_number": {
"name": "generate_lucky_number",
"description": "Generates a 'lucky' number with a fun message",
"inputs": "max_num (int)",
"output_type": "str"
},
"image_generator": {
"name": "image_generator",
"description": "Generates an image from a text description",
"inputs": "description (str)",
"output_type": "str"
}
}
# Inject tools into the system prompt
system_prompt = prompt_templates["system_prompt"].format(
tools=tools_dict,
managed_agents={}, # No managed agents in this case
authorized_imports="random, requests"
)
# Configure the model
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
# Update prompt_templates with the formatted system prompt
prompt_templates["system_prompt"] = system_prompt
# Create the agent
agent = CodeAgent(
model=model,
tools=[final_answer_tool, get_random_fact, generate_lucky_number, image_generation_tool],
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name="FunAgent",
description="An agent for random fun and facts",
prompt_templates=prompt_templates
)
if __name__ == "__main__":
GradioUI(agent).launch() |