| import torch |
| from PIL.Image import Image |
| from pipelines.models import TextToImageRequest |
| from diffusers import StableDiffusionXLPipeline |
| from diffusers import AutoencoderTiny |
| from torch import Generator |
|
|
|
|
| def load_pipeline() -> StableDiffusionXLPipeline: |
| pipeline = StableDiffusionXLPipeline.from_pretrained( |
| "stablediffusionapi/newdream-sdxl-20", |
| torch_dtype=torch.float16, |
| ) |
| pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16) |
| pipeline.unet = torch.compile(pipeline.unet, mode='reduce-overhead', fullgraph=True) |
| pipe.to("cuda") |
| |
| return pipeline |
|
|
|
|
| def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image: |
| generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None |
|
|
| return pipeline( |
| prompt=request.prompt, |
| negative_prompt=request.negative_prompt, |
| width=request.width, |
| height=request.height, |
| generator=generator, |
| ).images[0] |
|
|