import os import gradio as gr import requests from PIL import Image import io from dotenv import load_dotenv # Load environment variables load_dotenv() # Get API key from environment variable # Set your API key in Hugging Face Space Settings > Variables and Secrets API_KEY = os.getenv("API_KEY") # NEW CORRECT ENDPOINT (updated from api-inference.huggingface.co) API_URL = os.getenv("API_URL", "https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-schnell") def generate_image(prompt, negative_prompt="", width=1024, height=1024, guidance_scale=7.5, num_inference_steps=50): """ Generate image using Hugging Face Inference API (Router endpoint) """ if not API_KEY: return None, "Error: API key not configured. Please set the API_KEY in your Space secrets." if not prompt: return None, "Error: Please enter a prompt." headers = { "Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json" } # FLUX.1-schnell uses different parameters - adjust payload accordingly payload = { "inputs": prompt, "parameters": { "width": width, "height": height, "guidance_scale": guidance_scale, "num_inference_steps": num_inference_steps, } } # Only add negative_prompt if provided and model supports it if negative_prompt: payload["parameters"]["negative_prompt"] = negative_prompt try: response = requests.post(API_URL, headers=headers, json=payload, timeout=300) if response.status_code != 200: return None, f"API Error: {response.status_code} - {response.text}" # Handle image response image_bytes = response.content image = Image.open(io.BytesIO(image_bytes)) return image, "Success!" except Exception as e: return None, f"Error: {str(e)}" # Create Gradio interface with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.Markdown(""" # 🎨 AI Image Generator Generate stunning images from text prompts using AI. Enter your prompt below and click Generate. """) with gr.Row(): with gr.Column(scale=1): prompt_input = gr.Textbox( label="Prompt", placeholder="A serene landscape with mountains and a lake at sunset...", lines=3 ) negative_prompt_input = gr.Textbox( label="Negative Prompt (what to avoid)", placeholder="blurry, low quality, distorted...", lines=2 ) with gr.Row(): width_slider = gr.Slider( minimum=512, maximum=1024, step=64, value=1024, label="Width" ) height_slider = gr.Slider( minimum=512, maximum=1024, step=64, value=1024, label="Height" ) guidance_slider = gr.Slider( minimum=1, maximum=20, step=0.5, value=7.5, label="Guidance Scale" ) steps_slider = gr.Slider( minimum=1, maximum=50, step=1, value=4, label="Inference Steps (FLUX works well with 4-20)" ) generate_btn = gr.Button("✨ Generate Image", variant="primary") status_text = gr.Textbox(label="Status", interactive=False) with gr.Column(scale=1): output_image = gr.Image(label="Generated Image", type="pil") gr.Markdown(""" ### Tips for better results: - Be specific and descriptive in your prompts - Include style keywords: "digital art", "photorealistic", "oil painting" - Mention lighting: "cinematic lighting", "golden hour", "studio lighting" - FLUX models work great with just 4-8 inference steps! """) # Event handlers generate_btn.click( fn=generate_image, inputs=[ prompt_input, negative_prompt_input, width_slider, height_slider, guidance_slider, steps_slider ], outputs=[output_image, status_text] ) # Examples gr.Examples( examples=[ ["A futuristic city at night with neon lights and flying cars, digital art", "", 1024, 1024, 7.5, 4], ["Portrait of a wise wizard with long beard, magical atmosphere, oil painting", "blurry, ugly", 1024, 1024, 8, 4], ["Cute robot reading a book in a cozy library, pixar style", "", 1024, 1024, 7, 4], ], inputs=[prompt_input, negative_prompt_input, width_slider, height_slider, guidance_slider, steps_slider], label="Example Prompts" ) if __name__ == "__main__": demo.launch()