Spaces:
Sleeping
Sleeping
File size: 1,282 Bytes
7f05617 c19d193 7f05617 6aae614 9b5b26a eae0c09 9b5b26a eae0c09 7f05617 626c310 7f05617 626c310 7f05617 6aae614 ae7a494 eae0c09 e121372 7f05617 13d500a 8c01ffb eae0c09 861422e 7f05617 eae0c09 8c01ffb 8fe992b 7f05617 8c01ffb 861422e 8fe992b eae0c09 7f05617 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 | from smolagents import CodeAgent, HfApiModel, tool
import yaml
import random
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Tool: tell a random joke (no arguments)
@tool
def tell_joke() -> str:
"""A tool that tells a random joke.
Returns:
A funny one-liner from a predefined list.
"""
jokes = [
"Why don't scientists trust atoms? Because they make up everything!",
"Why was the math book sad? Because it had too many problems.",
"Why did the scarecrow win an award? Because he was outstanding in his field!",
"Why did the tomato turn red? Because it saw the salad dressing!",
"Why did the computer get cold? Because it forgot to close its windows."
]
return random.choice(jokes)
# Final answer tool
final_answer = FinalAnswerTool()
# Model
model = HfApiModel(
max_tokens=1024,
temperature=0.7,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
)
# Load system prompt
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Create agent
agent = CodeAgent(
model=model,
tools=[final_answer, tell_joke],
max_steps=4,
verbosity_level=1,
prompt_templates=prompt_templates
)
# Launch UI
GradioUI(agent).launch()
|