smart-blue-demo / app.py
AiCoderv2's picture
Deploy Gradio app with multiple files
578f301 verified
import gradio as gr
from diffusers import DiffusionPipeline
import os
# Load the model locally
pipe = DiffusionPipeline.from_pretrained("lightx2v/Qwen-Image-Lightning")
def generate_image(prompt: str):
"""
Generate an image from text prompt using Qwen-Image-Lightning model.
Args:
prompt (str): The text description for image generation
Returns:
PIL.Image: The generated image
"""
try:
# Generate image using the local model
image = pipe(prompt).images[0]
return image
except Exception as e:
# Handle errors gracefully
raise gr.Error(f"Error generating image: {str(e)}")
# Create Gradio interface
with gr.Blocks(title="Text-to-Image Generator", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🖼️ Text-to-Image Generator")
gr.Markdown("Generate images from text prompts using the Qwen-Image-Lightning model. Powered by Hugging Face.")
with gr.Row():
prompt_input = gr.Textbox(
label="Enter your prompt",
placeholder="A beautiful sunset over mountains...",
lines=3,
show_copy_button=True
)
generate_btn = gr.Button("Generate Image", variant="primary", size="lg")
output_image = gr.Image(label="Generated Image", show_download_button=True)
# Examples for users to try
gr.Examples(
examples=[
"A futuristic city at night with neon lights",
"A cute kitten playing with yarn",
"An astronaut walking on the moon",
"A serene lake surrounded by autumn trees",
"A steampunk airship flying over Victorian London"
],
inputs=prompt_input,
outputs=output_image,
fn=generate_image,
cache_examples=False
)
# Event handling
prompt_input.submit(generate_image, inputs=prompt_input, outputs=output_image)
generate_btn.click(generate_image, inputs=prompt_input, outputs=output_image)
gr.Markdown("---")
gr.Markdown('<p style="text-align: center;">Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank">anycoder</a></p>')
if __name__ == "__main__":
demo.launch()