import torch import gradio as gr from diffusers import StableDiffusionPipeline from PIL import Image import io # 🔧 Set device and dtype device = "cuda" if torch.cuda.is_available() else "cpu" dtype = torch.float16 if device == "cuda" else torch.float32 # 🧠 Load SD Turbo pipeline pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/sd-turbo", torch_dtype=dtype, ) pipe.to(device) pipe.enable_attention_slicing() # 🎯 Optimized image generation def generate(prompt): with torch.autocast(device_type=device): # 🔻 Generate smaller image directly image = pipe(prompt, height=384, width=384).images[0] # 🖼️ Optional: compress image for faster Gradio display buffer = io.BytesIO() image.save(buffer, format="JPEG", quality=70) # 🔻 reduce quality to 70 buffer.seek(0) return Image.open(buffer) # 🚀 Gradio UI gr.Interface( fn=generate, inputs=gr.Textbox(label="Enter Prompt", placeholder="A futuristic cyberpunk city at night"), outputs=gr.Image(label="Generated Image"), title="🎨 Fast Text-to-Image Generator (SD Turbo)", description="Optimized for speed and light resource use. Generates lower-res compressed images from text.", ).launch()