Spaces:
Sleeping
Sleeping
File size: 2,775 Bytes
1186fcf 9b5b26a c19d193 6aae614 8fe992b 9b5b26a 5df72d6 9b5b26a 8c01ffb 8e384af 8c01ffb a2b460f 8e384af 029a47c a892ea7 c3e1b83 2bd8163 1186fcf 2bd8163 a892ea7 8c01ffb a2b460f 9b5b26a a2b460f 8c01ffb 861422e 494f3dc 8c01ffb 8fe992b d2f6a24 8c01ffb 861422e 8fe992b 494f3dc 9b5b26a 9459caa | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 | from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel, load_tool, tool, TransformersModel
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_cutom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
# https://docs.litellm.ai/
#model = LiteLLMModel(
# model_id="gemini/gemini-2.0-flash-exp",
# max_tokens=2096,
# temperature=0.6,
# api_key=os.getenv("LITELLM_API_KEY")
#)
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
#model = HfApiModel(
# max_tokens=2096,
# temperature=0.5,
# model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
# custom_role_conversions=None,
#)
# ollama
#model = OpenAIServerModel(
# model_id="gemma2:2b", #"deepseek-coder-v2:16b",
# max_tokens=2096,
# temperature=0.6,
# api_base="http://localhost:11434",
# #num_ctx=8192,
# api_key="",
#)
# transformer
#model = TransformersModel(
# model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
# device_map="auto",
# torch_dtype="auto",
# max_new_tokens=2096,
# temperature=0.6,
#)
# print("Model Image")
# Import tool from Hub
# image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
print("Start GradioUI")
GradioUI(agent).launch() |