Papadizzy's picture
Update app.py
6704470 verified
import torch
import numpy as np
import gradio as gr
import spaces
from PIL import Image
from diffusers import AutoPipelineForImage2Image, LCMScheduler
# Load Model
model_id = "Lykon/dreamshaper-8-lcm"
pipe = AutoPipelineForImage2Image.from_pretrained(model_id, torch_dtype=torch.float16, use_safetensors=True)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe.enable_vae_tiling()
def get_blank():
return Image.fromarray(np.ones((512, 512, 3), dtype=np.uint8) * 255)
@spaces.GPU(duration=20)
def predict(ui_data, prompt, strength, steps, guidance):
# In Gradio 5, ui_data is often just the composite image if configured simply
if ui_data is None: return None
# Handle the dictionary format if it exists
img = ui_data["composite"] if isinstance(ui_data, dict) else ui_data
pipe.to("cuda")
result = pipe(
prompt=f"{prompt}, high quality",
image=img.convert("RGB").resize((512, 512)),
strength=strength,
num_inference_steps=int(steps),
guidance_scale=guidance,
).images[0]
return result
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# ⚡ ZeroGPU Real-Time v5")
with gr.Row():
with gr.Column():
# Simplified Editor for v5
canvas = gr.ImageEditor(
value=get_blank(),
type="pil",
layers=False,
label="Draw Here"
)
prompt = gr.Textbox(label="Prompt", value="cyberpunk city")
strength = gr.Slider(0.3, 0.9, value=0.5)
steps = gr.Slider(1, 4, value=4, step=1)
guidance = gr.Slider(0.0, 2.0, value=1.0)
output = gr.Image(label="AI Feed")
# Trigger on change
inputs = [canvas, prompt, strength, steps, guidance]
canvas.change(fn=predict, inputs=inputs, outputs=output, show_progress="hidden")
demo.launch()