Spaces:
Sleeping
Sleeping
File size: 2,843 Bytes
cdec560 24b81db 386194d 1e87a0e 24b81db 31b158a 16f5dea 386194d 16f5dea 31b158a 386194d 31b158a 386194d 1e87a0e 386194d 31b158a 386194d 1e87a0e 386194d 31b158a 1e87a0e 31b158a 16f5dea 1e87a0e 31b158a 1e87a0e 31b158a 16f5dea 1e87a0e 386194d 1e87a0e 16f5dea 1e87a0e 386194d 1e87a0e 31b158a 1e87a0e 386194d 1e87a0e 31b158a 386194d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
# -*- coding: utf-8 -*-
"""
Gradio Space: Text → Image (Diffusers Pipeline)
UI designed by Mehak Mazhar
"""
import os
import torch
from diffusers import StableDiffusionPipeline
import gradio as gr
# --- Available models ---
MODEL_CHOICES = {
"Dreamlike Diffusion 1.0": "dreamlike-art/dreamlike-diffusion-1.0",
"Stable Diffusion XL Base": "stabilityai/stable-diffusion-xl-base-1.0"
}
# --- Cache pipelines to avoid reloading ---
loaded_pipelines = {}
def get_pipeline(model_id):
"""Load pipeline if not cached"""
if model_id not in loaded_pipelines:
pipe = StableDiffusionPipeline.from_pretrained(
model_id,
torch_dtype=torch.float16,
use_safetensors=True
)
pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
loaded_pipelines[model_id] = pipe
return loaded_pipelines[model_id]
# --- Image generation function ---
def generate_image(prompt, model_choice, width, height, guidance_scale, steps):
try:
model_id = MODEL_CHOICES[model_choice]
pipe = get_pipeline(model_id)
image = pipe(
prompt,
width=int(width),
height=int(height),
guidance_scale=float(guidance_scale),
num_inference_steps=int(steps)
).images[0]
return image, f"✅ Generated with {model_choice}"
except Exception as e:
return None, f"⚠️ Error: {str(e)}"
# --- Gradio UI ---
css = """
body { background-color: #fff7e6; }
h1 { color: #a0522d; font-weight: bold; }
"""
with gr.Blocks(css=css, title="Stable Diffusion Text-to-Image") as demo:
gr.HTML("<h1>Stable Diffusion — designed by Mehak Mazhar</h1>")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", placeholder="A futuristic city at night", lines=3)
model_choice = gr.Dropdown(
list(MODEL_CHOICES.keys()),
value="Dreamlike Diffusion 1.0",
label="Choose Model"
)
width = gr.Dropdown([256, 384, 512, 768, 1024], value=512, label="Width")
height = gr.Dropdown([256, 384, 512, 768, 1024], value=512, label="Height")
guidance = gr.Slider(1.0, 15.0, value=7.5, step=0.1, label="Guidance Scale")
steps = gr.Slider(10, 100, value=30, step=1, label="Steps")
generate_btn = gr.Button("Generate Image", variant="primary")
with gr.Column():
output_image = gr.Image(label="Generated Image", type="pil")
status = gr.Textbox(label="Status", interactive=False)
generate_btn.click(
fn=generate_image,
inputs=[prompt, model_choice, width, height, guidance, steps],
outputs=[output_image, status]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0")
|