Spaces:
K00B404
/
Runtime error

SDXL / app.py
K00B404's picture
Update app.py
d3cb4fb verified
import os
import random
import gradio as gr
import numpy as np
import PIL.Image
import torch
from diffusers import AutoencoderKL, DiffusionPipeline
DESCRIPTION = "# SDXL (CPU Version)"
DESCRIPTION += "\n<p>Running on CPU. This may be very slow.</p>"
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "512")) # Reduced for CPU
ENABLE_REFINER = False # Disabled for CPU version
device = torch.device("cpu")
# Load models
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float32)
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
vae=vae,
torch_dtype=torch.float32,
use_safetensors=True,
)
pipe.to(device)
def generate(
prompt: str,
negative_prompt: str = "",
prompt_2: str = "",
negative_prompt_2: str = "",
use_negative_prompt: bool = False,
use_prompt_2: bool = False,
use_negative_prompt_2: bool = False,
seed: int = 0,
width: int = 512,
height: int = 512,
guidance_scale_base: float = 5.0,
num_inference_steps_base: int = 25,
) -> PIL.Image.Image:
generator = torch.Generator().manual_seed(seed)
if not use_negative_prompt:
negative_prompt = None
if not use_prompt_2:
prompt_2 = None
if not use_negative_prompt_2:
negative_prompt_2 = None
return pipe(
prompt=prompt,
negative_prompt=negative_prompt,
prompt_2=prompt_2,
negative_prompt_2=negative_prompt_2,
width=width,
height=height,
guidance_scale=guidance_scale_base,
num_inference_steps=num_inference_steps_base,
generator=generator,
output_type="pil",
).images[0]
if __name__ == "__main__":
demo.queue(max_size=5).launch()