Spaces:
Running
Running
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # Using FLUX.1-schnell, a state-of-the-art incredible image generation model available free on HF API | |
| try: | |
| client = InferenceClient("black-forest-labs/FLUX.1-schnell") | |
| except Exception as e: | |
| client = None | |
| def generate_image(prompt, progress=gr.Progress()): | |
| if not client: | |
| return None | |
| try: | |
| progress(0.5, desc="Generating Artwork (This takes a few seconds)...") | |
| # Generate image using HF Serverless API | |
| image = client.text_to_image(prompt) | |
| progress(1.0, desc="Done!") | |
| return image | |
| except Exception as e: | |
| raise gr.Error(f"FLUX Inference Failed: {str(e)}") | |
| # A sleek Gradio interface | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# 🎨 FLUX.1 AI Image Studio") | |
| gr.Markdown("Generate stunning, hyperrealistic images in seconds using the open-source **FLUX.1-schnell** model via the Hugging Face Serverless API.") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| prompt = gr.Textbox(label="Prompt", placeholder="A cyberpunk cat hacker in neon lights, highly detailed, 4k...", lines=4) | |
| btn = gr.Button("Generate Artwork 🪄", variant="primary") | |
| gr.Markdown("### Try these Examples:") | |
| gr.Examples( | |
| examples=[ | |
| ["A futuristic city built into a giant tree, cinematic lighting, conceptual art"], | |
| ["An astronaut riding a glowing translucent horse on the surface of Mars, 8k resolution"] | |
| ], | |
| inputs=[prompt] | |
| ) | |
| with gr.Column(scale=1): | |
| output = gr.Image(label="Generated Artwork", type="pil") | |
| btn.click(generate_image, inputs=prompt, outputs=output) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0") | |