mtmx's picture
update local model
da8bacb verified
from smolagents import CodeAgent, DuckDuckGoSearchTool, load_tool, tool, HfApiModel,OpenAIServerModel
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# AI Framework Roasting Meme Generator Tool
@tool
def roast_ai_frameworks_meme(target_framework: str = "random", roast_style: str = "playful") -> str:
"""Generates memes that playfully mock and roast AI agent frameworks and their quirks.
Args:
target_framework: Which framework to roast (options: "langchain", "autogen", "crewai", "smolagents", "haystack", "random")
roast_style: How savage the roast should be (options: "playful", "savage", "brutal")
"""
# Framework-specific roasts and stereotypes
framework_roasts = {
"langchain": {
"stereotypes": ["overcomplicated everything", "documentation nightmare", "callback hell", "abstraction overload"],
"common_issues": ["version breaking changes", "too many ways to do one thing", "memory management chaos", "dependency hell"],
"meme_scenarios": [
"trying to understand LangChain's architecture",
"reading LangChain docs for the 100th time",
"LangChain update breaking your entire project",
"explaining LangChain to a junior developer"
]
},
"autogen": {
"stereotypes": ["agents talking in circles", "conversation loops", "Microsoft complexity", "over-engineered"],
"common_issues": ["agents arguing forever", "multi-agent chaos", "conversation management", "complex setup"],
"meme_scenarios": [
"AutoGen agents having a 3-hour argument about nothing",
"trying to stop AutoGen agents from talking",
"AutoGen agent conversation going completely off-topic",
"setting up AutoGen for a simple task"
]
},
"crewai": {
"stereotypes": ["trying too hard to be cool", "crew member management", "workflow complexity", "startup vibes"],
"common_issues": ["crew coordination problems", "task delegation chaos", "role confusion", "process overhead"],
"meme_scenarios": [
"CrewAI agents not knowing their roles",
"crew members doing each other's jobs",
"CrewAI workflow taking longer than doing it manually",
"explaining why you need a 'crew' for a simple task"
]
},
"smolagents": {
"stereotypes": ["'smol' but complicated", "trying to be minimalist", "tool management issues", "documentation gaps"],
"common_issues": ["tool integration problems", "limited ecosystem", "debugging nightmares", "unclear error messages"],
"meme_scenarios": [
"SmolagAgents being anything but 'smol'",
"trying to debug a SmolagAgents tool",
"SmolagAgents promising simplicity but delivering complexity",
"finding SmolagAgents documentation"
]
},
"haystack": {
"stereotypes": ["search obsessed", "RAG everything", "pipeline confusion", "enterprise bloat"],
"common_issues": ["pipeline configuration hell", "version compatibility", "component confusion", "setup complexity"],
"meme_scenarios": [
"Haystack turning every problem into a search problem",
"configuring Haystack pipelines",
"Haystack's 47 different ways to do RAG",
"Haystack documentation sending you in circles"
]
}
}
# Select target framework
if target_framework == "random":
import random
target_framework = random.choice(list(framework_roasts.keys()))
framework_data = framework_roasts.get(target_framework, framework_roasts["smolagents"])
# Meme formats perfect for roasting
roast_formats = [
"Drake rejecting vs approving meme format",
"Expanding brain meme with increasingly absurd solutions",
"Distracted boyfriend looking at other frameworks",
"This is fine dog in burning room (framework on fire)",
"Galaxy brain ascending to framework enlightenment",
"Two buttons difficult choice between frameworks",
"Surprised Pikachu face when framework breaks",
"Woman yelling at cat (developer vs framework)",
"Stonks going down with framework complexity",
"Change my mind: framework is overcomplicated"
]
# Roast intensity levels
roast_levels = {
"playful": "gentle teasing, developer humor, lighthearted jokes about common issues",
"savage": "pointed criticism, highlighting real problems, witty burns about framework flaws",
"brutal": "ruthless roasting, exposing all the pain points, maximum developer frustration humor"
}
# Pick random elements
import random
selected_format = random.choice(roast_formats)
selected_scenario = random.choice(framework_data["meme_scenarios"])
selected_issue = random.choice(framework_data["common_issues"])
selected_stereotype = random.choice(framework_data["stereotypes"])
roast_intensity = roast_levels[roast_style]
# Create the roast meme prompt
meme_prompt = f"""Create a programming meme roasting {target_framework.title()} using {selected_format}.
Scenario: {selected_scenario}
Focus on: {selected_issue} and {selected_stereotype}
Roast style: {roast_intensity}
The meme should be relatable to developers who have struggled with AI frameworks.
Include bold, readable meme text with typical internet meme formatting.
Make it funny but accurate to real developer experiences.
Use programming/developer culture references and inside jokes.
The visual should clearly convey developer frustration or irony.
Perfect for sharing on developer Twitter, Reddit, or Discord.
High contrast text (white with black outline) for maximum readability."""
instructions = f"""
🔥 AI FRAMEWORK ROAST MEME GENERATOR 🔥
Target: {target_framework.title()}
Roast Level: {roast_style.title()}
Scenario: {selected_scenario}
Format: {selected_format}
ROAST PROMPT:
{meme_prompt}
BURN TOPICS:
• Main Issue: {selected_issue}
• Stereotype: {selected_stereotype}
• Developer Pain Point: Framework complexity vs promised simplicity
INSTRUCTIONS: Use the image generation tool with the prompt above to create this spicy developer meme!
⚠️ WARNING: This meme may cause framework maintainers to cry and developers to nod in agreement! ⚠️
"""
return instructions
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='microsoft/Phi-3-mini-4k-instruct',
custom_role_conversions=None,
)
from smolagents import OpenAIServerModel
# Use Ollama's local API
model_local = OpenAIServerModel(
model_id="llama3.2:3b", # or phi3:mini
api_base="http://localhost:11434/v1",
api_key="ollama", # dummy key
max_tokens=2096,
temperature=0.5,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model_local,
tools=[final_answer,
image_generation_tool,
roast_ai_frameworks_meme,
get_current_time_in_timezone],
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()