import os import gradio as gr from huggingface_hub import InferenceClient from PIL import Image # --- Configuration --- # Using the high-speed distilled FLUX model MODEL_ID = "black-forest-labs/FLUX.1-schnell" # Pre-defined style templates STYLE_OPTIONS = { "None": "{prompt}", "Realistic": "A hyper-realistic, high-detail photograph of {prompt}, 8k resolution, cinematic lighting, sharp focus", "Spirited Away": "Studio Ghibli style, hand-drawn animation of {prompt}, lush colors, whimsical atmosphere, cinematic", "Cyberpunk": "Cyberpunk aesthetic, neon lighting, futuristic city vibes, {prompt}, high contrast, 8k", "Minimalist": "Clean smooth lines, minimalist style, {prompt}, simple white background, high-quality digital art", "Oil Painting": "Classic oil painting texture, visible brushstrokes, rich colors, {prompt}, masterpiece" } def authenticate_client(): """ Retrieves the 'TextToImage' token (stored as HF_TOKEN secret) from the Space environment. """ hf_token = os.getenv("HF_TOKEN") if not hf_token: raise ValueError("HF_TOKEN secret not found. Please add your 'TextToImage' token to Space Secrets.") return InferenceClient(token=hf_token) def apply_style_to_prompt(prompt: str, style: str) -> str: """ Combines the user prompt with the selected style keywords. """ template = STYLE_OPTIONS.get(style, "{prompt}") return template.format(prompt=prompt) def generate_image(prompt: str, style: str): """ Generates the image with error handling for gated models and timeouts. """ try: client = authenticate_client() enriched_prompt = apply_style_to_prompt(prompt, style) # Call the Inference API # FLUX.1-schnell is optimized for quality in just 4 steps image = client.text_to_image( enriched_prompt, model=MODEL_ID ) return image except Exception as e: error_msg = str(e) if "403" in error_msg or "gated" in error_msg.lower(): return gr.Error("Access Denied: Please visit the FLUX.1-schnell model card and accept the terms.") elif "timeout" in error_msg.lower(): return gr.Error("API Timeout: The server is busy. Please try again in a moment.") else: return gr.Error(f"Generation Error: {error_msg}") # --- UI Layout --- def build_app(): with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.Markdown(f"# 🎨 Professional AI Image Generator") gr.Markdown(f"Powered by `{MODEL_ID}` via Hugging Face Inference API.") with gr.Row(): with gr.Column(): user_prompt = gr.Textbox( label="What do you want to create?", placeholder="Describe your vision...", lines=3 ) style_choice = gr.Dropdown( choices=list(STYLE_OPTIONS.keys()), value="None", label="Choose a Style" ) generate_btn = gr.Button("Generate Masterpiece", variant="primary") gr.Examples( examples=[ ["A futuristic city on Mars with glass domes", "Realistic"], ["A peaceful forest with glowing mushrooms", "Spirited Away"], ["A sleek metallic owl perched on a branch", "Minimalist"] ], inputs=[user_prompt, style_choice] ) with gr.Column(): output_image = gr.Image(label="Generated Result", type="pil") # Link the button to the function generate_btn.click( fn=generate_image, inputs=[user_prompt, style_choice], outputs=output_image ) gr.Markdown("---") gr.Markdown("### Deployment Steps:\n1. Create a **Gradio Space**.\n2. Add your **TextToImage** token to **Settings > Secrets** as `HF_TOKEN`.\n3. Upload these files.") return demo if __name__ == "__main__": app = build_app() app.launch()