| | import gradio as gr |
| | import os |
| | import tempfile |
| | from huggingface_hub import InferenceClient |
| | from pathlib import Path |
| | import time |
| |
|
| | |
| | client = InferenceClient(api_key=os.environ.get("HF_TOKEN")) |
| |
|
| | def cleanup_temp_files(): |
| | """Clean up old temporary image files.""" |
| | try: |
| | temp_dir = tempfile.gettempdir() |
| | for file_path in Path(temp_dir).glob("*.png"): |
| | try: |
| | if file_path.stat().st_mtime < (time.time() - 300): |
| | file_path.unlink(missing_ok=True) |
| | except Exception: |
| | pass |
| | except Exception as e: |
| | print(f"Cleanup error: {e}") |
| |
|
| | def generate_image(prompt: str) -> tuple: |
| | """Generate image using Hugging Face's free text-to-image model.""" |
| | cleanup_temp_files() |
| | |
| | if not os.environ.get("HF_TOKEN"): |
| | return (None, "❌ Please set HF_TOKEN environment variable") |
| | |
| | try: |
| | |
| | image = client.text_to_image( |
| | prompt=prompt, |
| | model="black-forest-labs/FLUX.1-schnell", |
| | width=1024, |
| | height=1024, |
| | num_inference_steps=4 |
| | ) |
| | |
| | |
| | temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) |
| | image.save(temp_file.name) |
| | return (temp_file.name, "✅ Image generated successfully!") |
| | |
| | except Exception as e: |
| | return (None, f"❌ Error generating image: {str(e)}") |
| |
|
| | def create_ui(): |
| | css = """ |
| | .status-box { |
| | margin-top: 1rem; |
| | padding: 0.8rem 1rem; |
| | border-radius: 8px; |
| | font-weight: 500; |
| | border-left: 4px solid; |
| | } |
| | .status-success { |
| | background-color: #d4edda; |
| | color: #155724; |
| | border-color: #c3e6cb; |
| | } |
| | .status-error { |
| | background-color: #f8d7da; |
| | color: #721c24; |
| | border-color: #f5c6cb; |
| | } |
| | """ |
| | |
| | with gr.Blocks(title="HF Text-to-Image (Free)", theme=gr.themes.Soft(), css=css) as demo: |
| | gr.HTML(""" |
| | <div style="text-align: center; max-width: 800px; margin: 0 auto;"> |
| | <h1 style="font-size: 2.5em; margin-bottom: 0.5em;"> |
| | 🖼️ Hugging Face Text-to-Image (Free & Fast) |
| | </h1> |
| | <p style="font-size: 1.1em; color: #666;"> |
| | Generate high-quality images using FLUX.1-schnell (free on Hugging Face!) |
| | </p> |
| | <p style="color: green; margin: 1rem 0;"> |
| | ✅ Works with free HF accounts • ⚡ Only 4 inference steps! |
| | </p> |
| | <p style="color: orange; margin: 0.5rem 0; font-size: 0.95em;"> |
| | ⚠️ Requires Hugging Face API token (set as HF_TOKEN) |
| | </p> |
| | </div> |
| | """) |
| | |
| | with gr.Row(): |
| | with gr.Column(scale=1): |
| | prompt_input = gr.Textbox( |
| | label="Enter your prompt", |
| | placeholder="A cyberpunk cat wearing sunglasses, neon city at night...", |
| | lines=4 |
| | ) |
| | generate_btn = gr.Button("🖼️ Generate Image", variant="primary", size="lg") |
| | with gr.Column(scale=1): |
| | image_output = gr.Image(label="Generated Image", height=512) |
| | status_output = gr.Textbox(label="Status", interactive=False) |
| | |
| | generate_btn.click( |
| | fn=generate_image, |
| | inputs=[prompt_input], |
| | outputs=[image_output, status_output] |
| | ) |
| | |
| | gr.Examples( |
| | examples=[ |
| | ["A majestic lion in a golden savanna, sunset, photorealistic"], |
| | ["Anime girl with blue hair, futuristic Tokyo, rain"], |
| | ["Minimalist logo of a phoenix, black and gold, vector style"] |
| | ], |
| | inputs=prompt_input, |
| | outputs=[image_output, status_output], |
| | fn=generate_image, |
| | cache_examples=False |
| | ) |
| | |
| | return demo |
| |
|
| | if __name__ == "__main__": |
| | try: |
| | cleanup_temp_files() |
| | import shutil |
| | if os.path.exists("gradio_cached_examples"): |
| | shutil.rmtree("gradio_cached_examples", ignore_errors=True) |
| | except Exception as e: |
| | print(f"Initial cleanup error: {e}") |
| | |
| | app = create_ui() |
| | app.queue() |
| | app.launch() |