Spaces:
Running
Running
| import gradio as gr | |
| from smolagents import CodeAgent, InferenceClientModel, Tool | |
| import os | |
| from PIL import Image | |
| import tempfile | |
| import base64 | |
| from io import BytesIO | |
| # Initialize the image generation tool | |
| image_generation_tool = Tool.from_space( | |
| "black-forest-labs/FLUX.1-schnell", | |
| name="image_generator", | |
| description="Generate an image from a prompt" | |
| ) | |
| # Initialize the model and agent | |
| model = InferenceClientModel("neta-art/Neta-Lumina") | |
| agent = CodeAgent(tools=[image_generation_tool], model=model) | |
| def process_message(message, history): | |
| """ | |
| Process user message with the SmolagentsAI agent | |
| Args: | |
| message: User input message | |
| history: Chat history | |
| Returns: | |
| Updated chat history | |
| """ | |
| try: | |
| # Run the agent with the user's message | |
| response = agent.run(message) | |
| # Check if response contains image information | |
| if hasattr(response, 'content') and isinstance(response.content, list): | |
| # Handle multi-modal response (text + image) | |
| text_parts = [] | |
| images = [] | |
| for item in response.content: | |
| if hasattr(item, 'type'): | |
| if item.type == 'text': | |
| text_parts.append(item.text) | |
| elif item.type == 'image': | |
| images.append(item) | |
| # Combine text parts | |
| text_response = ' '.join(text_parts) if text_parts else str(response) | |
| # Add images to the response if any | |
| if images: | |
| text_response += f"\n\nπΌοΈ Generated {len(images)} image(s)" | |
| else: | |
| # Handle text-only response | |
| text_response = str(response) | |
| # Update chat history | |
| history.append((message, text_response)) | |
| return history | |
| except Exception as e: | |
| error_message = f"β Error: {str(e)}" | |
| history.append((message, error_message)) | |
| return history | |
| def clear_chat(): | |
| """Clear the chat history""" | |
| return [] | |
| def create_interface(): | |
| """Create the Gradio interface""" | |
| # Custom CSS for better styling | |
| custom_css = """ | |
| .gradio-container { | |
| max-width: 900px !important; | |
| margin: auto !important; | |
| } | |
| .chat-message { | |
| padding: 10px !important; | |
| margin: 5px 0 !important; | |
| border-radius: 10px !important; | |
| } | |
| .user-message { | |
| background-color: #e3f2fd !important; | |
| margin-left: 20% !important; | |
| } | |
| .bot-message { | |
| background-color: #f5f5f5 !important; | |
| margin-right: 20% !important; | |
| } | |
| """ | |
| with gr.Blocks( | |
| title="π€ AI Agent with Image Generation", | |
| theme=gr.themes.Soft(), | |
| css=custom_css | |
| ) as demo: | |
| # Header | |
| gr.Markdown(""" | |
| # π€ AI Agent with Image Generation | |
| This intelligent agent can help you with various tasks and generate images using FLUX.1-schnell! | |
| **What you can do:** | |
| - Ask questions and get intelligent responses | |
| - Request image generation with detailed prompts | |
| - Combine text and image requests in natural language | |
| **Example prompts:** | |
| - "Generate an image of a sunset over mountains" | |
| - "Create a logo for a tech startup" | |
| - "Show me a futuristic city" | |
| - "Help me write code and create an illustration for it" | |
| """) | |
| # Chat interface | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| # Chat history | |
| chatbot = gr.Chatbot( | |
| label="π¬ Chat with AI Agent", | |
| height=500, | |
| show_copy_button=True, | |
| show_share_button=True, | |
| avatar_images=("π€", "π€"), | |
| bubble_full_width=False | |
| ) | |
| # Input area | |
| with gr.Row(): | |
| msg_input = gr.Textbox( | |
| label="Your Message", | |
| placeholder="Ask me anything or request an image generation...", | |
| lines=2, | |
| scale=4 | |
| ) | |
| send_btn = gr.Button("Send π", variant="primary", scale=1) | |
| # Control buttons | |
| with gr.Row(): | |
| clear_btn = gr.Button("Clear Chat ποΈ", variant="secondary") | |
| examples_btn = gr.Button("Show Examples π‘", variant="secondary") | |
| # Examples section (initially hidden) | |
| examples_section = gr.Markdown( | |
| """ | |
| ### π‘ Example Prompts: | |
| **Image Generation:** | |
| - "Generate a realistic photo of a golden retriever playing in a park" | |
| - "Create a minimalist logo design for a coffee shop" | |
| - "Show me an abstract art piece with vibrant colors" | |
| - "Generate a cyberpunk-style illustration of a neon city" | |
| **Text + Image Combination:** | |
| - "Explain quantum computing and create a visual representation" | |
| - "Write a short story about space exploration and generate an accompanying image" | |
| - "Help me design a website layout and show me a mockup" | |
| **General AI Tasks:** | |
| - "Help me write a Python function to sort a list" | |
| - "Explain the concept of machine learning in simple terms" | |
| - "Create a meal plan for the week" | |
| """, | |
| visible=False | |
| ) | |
| # Event handlers | |
| def send_message(message, history): | |
| if not message.strip(): | |
| return history, "" | |
| return process_message(message, history), "" | |
| def toggle_examples(examples_visible): | |
| return gr.update(visible=not examples_visible) | |
| # Wire up the events | |
| send_btn.click( | |
| send_message, | |
| inputs=[msg_input, chatbot], | |
| outputs=[chatbot, msg_input] | |
| ) | |
| msg_input.submit( | |
| send_message, | |
| inputs=[msg_input, chatbot], | |
| outputs=[chatbot, msg_input] | |
| ) | |
| clear_btn.click( | |
| clear_chat, | |
| outputs=chatbot | |
| ) | |
| # Examples toggle | |
| examples_visible = gr.State(False) | |
| examples_btn.click( | |
| lambda visible: (gr.update(visible=not visible), not visible), | |
| inputs=examples_visible, | |
| outputs=[examples_section, examples_visible] | |
| ) | |
| # Footer | |
| gr.Markdown(""" | |
| --- | |
| **Powered by:** | |
| - π§ **Model**: Qwen2.5-Coder-32B-Instruct | |
| - π¨ **Image Generation**: FLUX.1-schnell | |
| - π€ **Framework**: SmolagentsAI | |
| - π **Interface**: Gradio | |
| *Built for Hugging Face Spaces* | |
| """) | |
| return demo | |
| # Launch the interface | |
| if __name__ == "__main__": | |
| demo = create_interface() | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False, # Set to False for HF Spaces | |
| show_error=True | |
| ) | |