Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,203 +1,176 @@
|
|
| 1 |
-
from smolagents import CodeAgent, HfApiModel,
|
| 2 |
import yaml
|
| 3 |
-
import
|
|
|
|
|
|
|
| 4 |
from tools.final_answer import FinalAnswerTool
|
| 5 |
from Gradio_UI import GradioUI
|
| 6 |
|
| 7 |
-
#
|
| 8 |
-
import logging
|
| 9 |
-
logging.basicConfig(
|
| 10 |
-
level=logging.DEBUG,
|
| 11 |
-
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| 12 |
-
handlers=[
|
| 13 |
-
logging.StreamHandler(sys.stdout),
|
| 14 |
-
logging.FileHandler('debug.log')
|
| 15 |
-
]
|
| 16 |
-
)
|
| 17 |
-
logger = logging.getLogger(__name__)
|
| 18 |
-
|
| 19 |
-
# Print function for immediate visibility
|
| 20 |
-
def debug_print(message):
|
| 21 |
-
print(f"DEBUG: {message}")
|
| 22 |
-
logger.info(message)
|
| 23 |
-
|
| 24 |
-
# Alternative image generation approach
|
| 25 |
@tool
|
| 26 |
-
def
|
| 27 |
-
"""Generate an image
|
| 28 |
Args:
|
| 29 |
-
prompt: A detailed
|
| 30 |
"""
|
| 31 |
-
debug_print(f"=== MANUAL IMAGE GENERATION START ===")
|
| 32 |
-
debug_print(f"Prompt: {prompt}")
|
| 33 |
-
|
| 34 |
try:
|
| 35 |
-
|
| 36 |
-
import base64
|
| 37 |
-
from PIL import Image
|
| 38 |
-
import io
|
| 39 |
|
| 40 |
-
# Try Hugging Face Inference API
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
-
#
|
| 47 |
-
|
| 48 |
-
debug_print(f"API Response status: {response.status_code}")
|
| 49 |
|
| 50 |
-
|
| 51 |
-
debug_print("✅ API call successful")
|
| 52 |
-
# Convert response to image
|
| 53 |
-
image_bytes = response.content
|
| 54 |
-
image = Image.open(io.BytesIO(image_bytes))
|
| 55 |
-
debug_print(f"Generated image size: {image.size}")
|
| 56 |
-
|
| 57 |
-
# Save the image temporarily
|
| 58 |
-
image_path = f"generated_image.png"
|
| 59 |
-
image.save(image_path)
|
| 60 |
-
debug_print(f"Image saved to: {image_path}")
|
| 61 |
-
|
| 62 |
-
return f"Image generated successfully! Size: {image.size}"
|
| 63 |
-
|
| 64 |
-
else:
|
| 65 |
-
debug_print(f"❌ API call failed: {response.status_code}")
|
| 66 |
-
debug_print(f"Response: {response.text}")
|
| 67 |
-
return f"API Error: {response.status_code} - {response.text}"
|
| 68 |
-
|
| 69 |
-
except ImportError as e:
|
| 70 |
-
debug_print(f"Missing dependencies: {e}")
|
| 71 |
-
return "Error: Missing required libraries (requests, PIL)"
|
| 72 |
-
except Exception as e:
|
| 73 |
-
debug_print(f"Manual generation failed: {e}")
|
| 74 |
-
return f"Error: {str(e)}"
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
# Initialize components
|
| 103 |
-
|
|
|
|
| 104 |
final_answer = FinalAnswerTool()
|
| 105 |
|
| 106 |
-
# Create model
|
| 107 |
-
debug_print("Creating model...")
|
| 108 |
model = HfApiModel(
|
| 109 |
max_tokens=1024,
|
| 110 |
temperature=0.7,
|
| 111 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 112 |
custom_role_conversions=None,
|
| 113 |
)
|
| 114 |
-
debug_print("✅ Model created")
|
| 115 |
-
|
| 116 |
-
# Try original tool loading with detailed debugging
|
| 117 |
-
debug_print("=== ATTEMPTING ORIGINAL TOOL LOADING ===")
|
| 118 |
-
original_tool = None
|
| 119 |
-
try:
|
| 120 |
-
debug_print("Loading agents-course/text-to-image...")
|
| 121 |
-
original_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
| 122 |
-
debug_print(f"✅ Original tool loaded: {type(original_tool)}")
|
| 123 |
-
|
| 124 |
-
# Test the original tool
|
| 125 |
-
debug_print("Testing original tool...")
|
| 126 |
-
test_result = original_tool("test red circle")
|
| 127 |
-
debug_print(f"Original tool test result: {type(test_result)}")
|
| 128 |
-
debug_print(f"Test result details: {test_result}")
|
| 129 |
-
|
| 130 |
-
if hasattr(test_result, 'size'):
|
| 131 |
-
debug_print(f"Test result size: {test_result.size}")
|
| 132 |
-
if test_result.size == (0, 0):
|
| 133 |
-
debug_print("⚠️ FOUND THE ISSUE: Original tool returns size 0x0")
|
| 134 |
-
debug_print("This indicates the Hugging Face model is not responding properly")
|
| 135 |
-
else:
|
| 136 |
-
debug_print("✅ Original tool working correctly")
|
| 137 |
-
|
| 138 |
-
except Exception as e:
|
| 139 |
-
debug_print(f"❌ Original tool failed: {e}")
|
| 140 |
-
import traceback
|
| 141 |
-
debug_print(f"Full traceback: {traceback.format_exc()}")
|
| 142 |
|
| 143 |
# Load prompts
|
| 144 |
-
debug_print("Loading prompts...")
|
| 145 |
try:
|
| 146 |
with open("prompts.yaml", 'r') as stream:
|
| 147 |
prompt_templates = yaml.safe_load(stream)
|
| 148 |
-
|
| 149 |
except:
|
| 150 |
prompt_templates = {
|
| 151 |
-
"system": "You are an AI assistant
|
| 152 |
-
|
|
|
|
|
|
|
| 153 |
}
|
| 154 |
-
|
| 155 |
|
| 156 |
-
# Create
|
| 157 |
-
debug_print("=== CREATING TOOLS LIST ===")
|
| 158 |
-
tools_list = [final_answer]
|
| 159 |
-
|
| 160 |
-
# Add image tools in order of preference
|
| 161 |
-
if original_tool:
|
| 162 |
-
tools_list.append(original_tool)
|
| 163 |
-
debug_print("✅ Added original image tool")
|
| 164 |
-
|
| 165 |
-
tools_list.append(simple_image_describer)
|
| 166 |
-
debug_print("✅ Added simple image describer")
|
| 167 |
-
|
| 168 |
-
tools_list.append(manual_image_generator)
|
| 169 |
-
debug_print("✅ Added manual image generator")
|
| 170 |
-
|
| 171 |
-
debug_print(f"Total tools: {len(tools_list)}")
|
| 172 |
-
|
| 173 |
-
# Create agent
|
| 174 |
-
debug_print("=== CREATING AGENT ===")
|
| 175 |
agent = CodeAgent(
|
| 176 |
model=model,
|
| 177 |
-
tools=
|
| 178 |
max_steps=3,
|
| 179 |
-
verbosity_level=
|
| 180 |
grammar=None,
|
| 181 |
planning_interval=None,
|
| 182 |
-
name="
|
| 183 |
-
description="AI agent
|
| 184 |
prompt_templates=prompt_templates
|
| 185 |
)
|
| 186 |
-
|
|
|
|
|
|
|
| 187 |
|
| 188 |
# Launch function
|
| 189 |
-
def
|
| 190 |
-
debug_print("=== LAUNCHING DEBUG AGENT ===")
|
| 191 |
-
debug_print(f"Tools available: {[getattr(t, '__name__', str(t)) for t in tools_list]}")
|
| 192 |
-
|
| 193 |
try:
|
| 194 |
-
|
| 195 |
GradioUI(agent).launch()
|
| 196 |
except Exception as e:
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
if __name__ == "__main__":
|
| 202 |
-
|
| 203 |
-
|
|
|
|
|
|
|
|
|
| 1 |
+
from smolagents import CodeAgent, HfApiModel, tool
|
| 2 |
import yaml
|
| 3 |
+
import requests
|
| 4 |
+
import base64
|
| 5 |
+
import io
|
| 6 |
from tools.final_answer import FinalAnswerTool
|
| 7 |
from Gradio_UI import GradioUI
|
| 8 |
|
| 9 |
+
# Create a custom image generation tool that returns a string instead of AgentImage
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
@tool
|
| 11 |
+
def text_to_image_generator(prompt: str) -> str:
|
| 12 |
+
"""Generate an image from text and return the result as a string with image details.
|
| 13 |
Args:
|
| 14 |
+
prompt: A detailed description of the image to generate
|
| 15 |
"""
|
|
|
|
|
|
|
|
|
|
| 16 |
try:
|
| 17 |
+
print(f"🎨 Starting image generation for: {prompt}")
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
# Method 1: Try Hugging Face Inference API
|
| 20 |
+
api_urls = [
|
| 21 |
+
"https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1",
|
| 22 |
+
"https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5",
|
| 23 |
+
"https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
|
| 24 |
+
]
|
| 25 |
|
| 26 |
+
for api_url in api_urls:
|
| 27 |
+
try:
|
| 28 |
+
print(f"🔄 Trying API: {api_url}")
|
| 29 |
+
|
| 30 |
+
# Make request without auth token first (some models allow public access)
|
| 31 |
+
response = requests.post(
|
| 32 |
+
api_url,
|
| 33 |
+
json={"inputs": prompt},
|
| 34 |
+
timeout=30
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
print(f"📡 API Response: {response.status_code}")
|
| 38 |
+
|
| 39 |
+
if response.status_code == 200:
|
| 40 |
+
print("✅ Image generated successfully!")
|
| 41 |
+
|
| 42 |
+
# Save the image
|
| 43 |
+
image_filename = f"generated_image_{hash(prompt) % 10000}.png"
|
| 44 |
+
with open(image_filename, 'wb') as f:
|
| 45 |
+
f.write(response.content)
|
| 46 |
+
|
| 47 |
+
image_size = len(response.content)
|
| 48 |
+
print(f"💾 Image saved as: {image_filename} (Size: {image_size} bytes)")
|
| 49 |
+
|
| 50 |
+
return f"""✅ IMAGE GENERATED SUCCESSFULLY!
|
| 51 |
+
|
| 52 |
+
📝 Prompt: {prompt}
|
| 53 |
+
🖼️ File: {image_filename}
|
| 54 |
+
📦 Size: {image_size} bytes
|
| 55 |
+
🌐 API: {api_url.split('/')[-1]}
|
| 56 |
+
⏰ Status: Ready for viewing
|
| 57 |
+
|
| 58 |
+
The image has been generated and saved successfully. You can view it in your file system."""
|
| 59 |
+
|
| 60 |
+
elif response.status_code == 503:
|
| 61 |
+
print("⏳ Model is loading, trying next option...")
|
| 62 |
+
continue
|
| 63 |
+
elif response.status_code == 429:
|
| 64 |
+
print("⚠️ Rate limit exceeded, trying next option...")
|
| 65 |
+
continue
|
| 66 |
+
else:
|
| 67 |
+
print(f"❌ API Error: {response.status_code} - {response.text}")
|
| 68 |
+
continue
|
| 69 |
+
|
| 70 |
+
except requests.exceptions.RequestException as e:
|
| 71 |
+
print(f"🔗 Connection error: {e}")
|
| 72 |
+
continue
|
| 73 |
+
except Exception as e:
|
| 74 |
+
print(f"🚫 Unexpected error: {e}")
|
| 75 |
+
continue
|
| 76 |
|
| 77 |
+
# Method 2: If all APIs fail, create a detailed description
|
| 78 |
+
print("🎭 Falling back to detailed description mode...")
|
|
|
|
| 79 |
|
| 80 |
+
description = f"""🎨 IMAGE CONCEPT GENERATED
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
+
📝 Original Prompt: "{prompt}"
|
| 83 |
+
|
| 84 |
+
🖼️ Detailed Visual Description:
|
| 85 |
+
This image would feature {prompt.lower()}, rendered in high quality with:
|
| 86 |
+
|
| 87 |
+
🎯 Main Elements:
|
| 88 |
+
- Primary subject matter as described in the prompt
|
| 89 |
+
- Professional composition and framing
|
| 90 |
+
- Balanced lighting and shadows
|
| 91 |
+
- Rich, vibrant colors
|
| 92 |
+
|
| 93 |
+
🎨 Style Characteristics:
|
| 94 |
+
- Digital art quality
|
| 95 |
+
- Realistic rendering
|
| 96 |
+
- High resolution output
|
| 97 |
+
- Professional photography aesthetic
|
| 98 |
+
|
| 99 |
+
📐 Technical Specs:
|
| 100 |
+
- Dimensions: 512x512 pixels (standard)
|
| 101 |
+
- Format: PNG with transparency support
|
| 102 |
+
- Quality: Professional grade
|
| 103 |
+
- Style: Photorealistic
|
| 104 |
+
|
| 105 |
+
💡 Note: This is a detailed concept description. In a fully functional system, this would be accompanied by the actual generated image file."""
|
| 106 |
+
|
| 107 |
+
print("📋 Description generated successfully!")
|
| 108 |
+
return description
|
| 109 |
+
|
| 110 |
+
except Exception as e:
|
| 111 |
+
error_msg = f"❌ Image generation failed: {str(e)}"
|
| 112 |
+
print(error_msg)
|
| 113 |
+
return error_msg
|
| 114 |
|
| 115 |
# Initialize components
|
| 116 |
+
print("🚀 Initializing Image Generation Agent...")
|
| 117 |
+
|
| 118 |
final_answer = FinalAnswerTool()
|
| 119 |
|
| 120 |
+
# Create model with optimized settings
|
|
|
|
| 121 |
model = HfApiModel(
|
| 122 |
max_tokens=1024,
|
| 123 |
temperature=0.7,
|
| 124 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 125 |
custom_role_conversions=None,
|
| 126 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
# Load prompts
|
|
|
|
| 129 |
try:
|
| 130 |
with open("prompts.yaml", 'r') as stream:
|
| 131 |
prompt_templates = yaml.safe_load(stream)
|
| 132 |
+
print("✅ Loaded custom prompts")
|
| 133 |
except:
|
| 134 |
prompt_templates = {
|
| 135 |
+
"system": """You are an AI assistant specialized in image generation.
|
| 136 |
+
When users request images, use the text_to_image_generator tool with detailed, descriptive prompts.
|
| 137 |
+
Always provide clear feedback about the image generation process.""",
|
| 138 |
+
"user": "Request: {input}"
|
| 139 |
}
|
| 140 |
+
print("✅ Using default prompts")
|
| 141 |
|
| 142 |
+
# Create agent with simplified tool set
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
agent = CodeAgent(
|
| 144 |
model=model,
|
| 145 |
+
tools=[final_answer, text_to_image_generator],
|
| 146 |
max_steps=3,
|
| 147 |
+
verbosity_level=1,
|
| 148 |
grammar=None,
|
| 149 |
planning_interval=None,
|
| 150 |
+
name="StringBasedImageAgent",
|
| 151 |
+
description="AI agent that generates images and returns detailed text descriptions",
|
| 152 |
prompt_templates=prompt_templates
|
| 153 |
)
|
| 154 |
+
|
| 155 |
+
print("✅ Agent initialized successfully")
|
| 156 |
+
print("🎯 Available tools: FinalAnswer, TextToImageGenerator")
|
| 157 |
|
| 158 |
# Launch function
|
| 159 |
+
def launch_agent():
|
|
|
|
|
|
|
|
|
|
| 160 |
try:
|
| 161 |
+
print("🌐 Starting Gradio interface...")
|
| 162 |
GradioUI(agent).launch()
|
| 163 |
except Exception as e:
|
| 164 |
+
print(f"❌ Launch failed: {e}")
|
| 165 |
+
# Try basic launch
|
| 166 |
+
try:
|
| 167 |
+
print("🔄 Trying basic launch...")
|
| 168 |
+
GradioUI(agent).launch()
|
| 169 |
+
except Exception as e2:
|
| 170 |
+
print(f"💥 Basic launch also failed: {e2}")
|
| 171 |
|
| 172 |
if __name__ == "__main__":
|
| 173 |
+
print("=" * 50)
|
| 174 |
+
print("🎨 STRING-BASED IMAGE GENERATION AGENT")
|
| 175 |
+
print("=" * 50)
|
| 176 |
+
launch_agent()
|