from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel,load_tool,tool import datetime import requests import os import pytz import yaml from tools.final_answer import FinalAnswerTool import requests from Gradio_UI import GradioUI TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN") TELEGRAM_CHAT_ID = os.getenv("TELEGRAM_CHAT_ID") if not TELEGRAM_BOT_TOKEN: raise ValueError("TELEGRAM_BOT_TOKEN is missing! Did you set it in Hugging Face Secrets?") # Below is an example of a tool that does nothing. Amaze us with your creativity ! @tool def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type #Keep this format for the description / args / args description but feel free to modify the tool """A tool that does nothing yet Args: arg1: the first argument arg2: the second argument """ return "What magic will you build ?" @tool def post_to_telegram(message: str) -> str: """Posts a message to a Telegram channel using a bot. Args: message: The text to post in the Telegram channel. Returns: A success or failure message. """ url = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendMessage" payload = {"chat_id": TELEGRAM_CHAT_ID, "text": message} try: response = requests.post(url, json=payload) response_data = response.json() if response_data.get("ok"): return "Message successfully posted to Telegram!" else: return f"Failed to post message: {response_data.get('description')}" except Exception as e: return f"Error posting to Telegram: {str(e)}" @tool def get_current_time_in_timezone(timezone: str) -> str: """A tool that fetches the current local time in a specified timezone. Args: timezone: A string representing a valid timezone (e.g., 'America/New_York'). """ try: # Create timezone object tz = pytz.timezone(timezone) # Get current time in that timezone local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") return f"The current local time in {timezone} is: {local_time}" except Exception as e: return f"Error fetching time for timezone '{timezone}': {str(e)}" @tool def generate_post(subject: str, max_length: int = 200) -> str: """Generates a concise and well-structured post about a given subject. Args: subject: The topic to generate a post about. max_length: The maximum length of the post in characters (default: 200). Returns: A well-structured, engaging post about the subject. """ prompt = f"Write a concise and engaging social media post about {subject}. Limit the response to {max_length} characters." try: global model # Ensure the model is accessible response = model.run(prompt) # Use .run() instead of .generate_text() return response[:max_length] # Trim response if needed except Exception as e: return f"Error generating post about '{subject}': {str(e)}" @tool def generate_image(description: str) -> str: """Generates an image based on a textual description. Args: description: A detailed description of the image to generate. Returns: A URL or path to the generated image, or an error message if the generation fails. """ try: # Use the loaded image generation tool or an external service global image_generation_tool response = image_generation_tool(description) # Assuming the tool returns a URL or a path to the generated image return response except Exception as e: return f"Error generating image: {str(e)}" @tool def generate_joke(subject: str) -> str: """Generates a joke about a specific subject. Args: subject: The topic or subject for the joke. Returns: A humorous and creative joke about the given subject. """ prompt = f"Write a funny and creative joke about {subject}. Keep it short and engaging." try: global model response = model.run(prompt) return response.strip() except Exception as e: return f"Error generating joke about '{subject}': {str(e)}" final_answer = FinalAnswerTool() # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' model = HfApiModel( max_tokens=2096, temperature=0.5, model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded custom_role_conversions=None, ) # Import tool from Hub image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) agent = CodeAgent( model=model, tools=[final_answer,generate_joke,post_to_telegram], ## add your tools here (don't remove final answer) max_steps=6, verbosity_level=1, grammar=None, planning_interval=None, name=None, description=None, prompt_templates=prompt_templates ) GradioUI(agent).launch()