Spaces:
Sleeping
Sleeping
File size: 8,659 Bytes
da8bacb 9b5b26a c19d193 6aae614 8fe992b 9b5b26a 5c8dfc3 9b5b26a 5c8dfc3 9b5b26a 5c8dfc3 9b5b26a 5c8dfc3 9b5b26a 8c01ffb 6aae614 ae7a494 e121372 bf6d34c 867aba7 fe328e0 13d500a 8c01ffb 6fae833 da8bacb 6fae833 da8bacb 6fae833 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 6fae833 867aba7 54f02f6 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, load_tool, tool, HfApiModel,OpenAIServerModel
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# AI Framework Roasting Meme Generator Tool
@tool
def roast_ai_frameworks_meme(target_framework: str = "random", roast_style: str = "playful") -> str:
"""Generates memes that playfully mock and roast AI agent frameworks and their quirks.
Args:
target_framework: Which framework to roast (options: "langchain", "autogen", "crewai", "smolagents", "haystack", "random")
roast_style: How savage the roast should be (options: "playful", "savage", "brutal")
"""
# Framework-specific roasts and stereotypes
framework_roasts = {
"langchain": {
"stereotypes": ["overcomplicated everything", "documentation nightmare", "callback hell", "abstraction overload"],
"common_issues": ["version breaking changes", "too many ways to do one thing", "memory management chaos", "dependency hell"],
"meme_scenarios": [
"trying to understand LangChain's architecture",
"reading LangChain docs for the 100th time",
"LangChain update breaking your entire project",
"explaining LangChain to a junior developer"
]
},
"autogen": {
"stereotypes": ["agents talking in circles", "conversation loops", "Microsoft complexity", "over-engineered"],
"common_issues": ["agents arguing forever", "multi-agent chaos", "conversation management", "complex setup"],
"meme_scenarios": [
"AutoGen agents having a 3-hour argument about nothing",
"trying to stop AutoGen agents from talking",
"AutoGen agent conversation going completely off-topic",
"setting up AutoGen for a simple task"
]
},
"crewai": {
"stereotypes": ["trying too hard to be cool", "crew member management", "workflow complexity", "startup vibes"],
"common_issues": ["crew coordination problems", "task delegation chaos", "role confusion", "process overhead"],
"meme_scenarios": [
"CrewAI agents not knowing their roles",
"crew members doing each other's jobs",
"CrewAI workflow taking longer than doing it manually",
"explaining why you need a 'crew' for a simple task"
]
},
"smolagents": {
"stereotypes": ["'smol' but complicated", "trying to be minimalist", "tool management issues", "documentation gaps"],
"common_issues": ["tool integration problems", "limited ecosystem", "debugging nightmares", "unclear error messages"],
"meme_scenarios": [
"SmolagAgents being anything but 'smol'",
"trying to debug a SmolagAgents tool",
"SmolagAgents promising simplicity but delivering complexity",
"finding SmolagAgents documentation"
]
},
"haystack": {
"stereotypes": ["search obsessed", "RAG everything", "pipeline confusion", "enterprise bloat"],
"common_issues": ["pipeline configuration hell", "version compatibility", "component confusion", "setup complexity"],
"meme_scenarios": [
"Haystack turning every problem into a search problem",
"configuring Haystack pipelines",
"Haystack's 47 different ways to do RAG",
"Haystack documentation sending you in circles"
]
}
}
# Select target framework
if target_framework == "random":
import random
target_framework = random.choice(list(framework_roasts.keys()))
framework_data = framework_roasts.get(target_framework, framework_roasts["smolagents"])
# Meme formats perfect for roasting
roast_formats = [
"Drake rejecting vs approving meme format",
"Expanding brain meme with increasingly absurd solutions",
"Distracted boyfriend looking at other frameworks",
"This is fine dog in burning room (framework on fire)",
"Galaxy brain ascending to framework enlightenment",
"Two buttons difficult choice between frameworks",
"Surprised Pikachu face when framework breaks",
"Woman yelling at cat (developer vs framework)",
"Stonks going down with framework complexity",
"Change my mind: framework is overcomplicated"
]
# Roast intensity levels
roast_levels = {
"playful": "gentle teasing, developer humor, lighthearted jokes about common issues",
"savage": "pointed criticism, highlighting real problems, witty burns about framework flaws",
"brutal": "ruthless roasting, exposing all the pain points, maximum developer frustration humor"
}
# Pick random elements
import random
selected_format = random.choice(roast_formats)
selected_scenario = random.choice(framework_data["meme_scenarios"])
selected_issue = random.choice(framework_data["common_issues"])
selected_stereotype = random.choice(framework_data["stereotypes"])
roast_intensity = roast_levels[roast_style]
# Create the roast meme prompt
meme_prompt = f"""Create a programming meme roasting {target_framework.title()} using {selected_format}.
Scenario: {selected_scenario}
Focus on: {selected_issue} and {selected_stereotype}
Roast style: {roast_intensity}
The meme should be relatable to developers who have struggled with AI frameworks.
Include bold, readable meme text with typical internet meme formatting.
Make it funny but accurate to real developer experiences.
Use programming/developer culture references and inside jokes.
The visual should clearly convey developer frustration or irony.
Perfect for sharing on developer Twitter, Reddit, or Discord.
High contrast text (white with black outline) for maximum readability."""
instructions = f"""
🔥 AI FRAMEWORK ROAST MEME GENERATOR 🔥
Target: {target_framework.title()}
Roast Level: {roast_style.title()}
Scenario: {selected_scenario}
Format: {selected_format}
ROAST PROMPT:
{meme_prompt}
BURN TOPICS:
• Main Issue: {selected_issue}
• Stereotype: {selected_stereotype}
• Developer Pain Point: Framework complexity vs promised simplicity
INSTRUCTIONS: Use the image generation tool with the prompt above to create this spicy developer meme!
⚠️ WARNING: This meme may cause framework maintainers to cry and developers to nod in agreement! ⚠️
"""
return instructions
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='microsoft/Phi-3-mini-4k-instruct',
custom_role_conversions=None,
)
from smolagents import OpenAIServerModel
# Use Ollama's local API
model_local = OpenAIServerModel(
model_id="llama3.2:3b", # or phi3:mini
api_base="http://localhost:11434/v1",
api_key="ollama", # dummy key
max_tokens=2096,
temperature=0.5,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model_local,
tools=[final_answer,
image_generation_tool,
roast_ai_frameworks_meme,
get_current_time_in_timezone],
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |