|
|
from diffusers import AutoencoderKL |
|
|
from diffusers.image_processor import VaeImageProcessor |
|
|
import torch |
|
|
import torch._dynamo |
|
|
import gc |
|
|
from PIL import Image |
|
|
from pipelines.models import TextToImageRequest |
|
|
from torch import Generator |
|
|
from diffusers import DiffusionPipeline |
|
|
from torchao.quantization import quantize_, int8_weight_only |
|
|
|
|
|
Pipeline = None |
|
|
MODEL_ID = "black-forest-labs/FLUX.1-schnell" |
|
|
|
|
|
def clear(): |
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() |
|
|
torch.cuda.reset_max_memory_allocated() |
|
|
torch.cuda.reset_peak_memory_stats() |
|
|
|
|
|
def load_pipeline() -> Pipeline: |
|
|
clear() |
|
|
dtype, device = torch.bfloat16, "cuda" |
|
|
vae = AutoencoderKL.from_pretrained( |
|
|
MODEL_ID, subfolder="vae", torch_dtype=torch.bfloat16 |
|
|
) |
|
|
quantize_(vae, int8_weight_only(), device="cuda") |
|
|
pipeline = DiffusionPipeline.from_pretrained( |
|
|
MODEL_ID, |
|
|
vae=vae, |
|
|
torch_dtype=dtype, |
|
|
) |
|
|
pipeline.enable_sequential_cpu_offload() |
|
|
|
|
|
pipeline(prompt="unpervaded, unencumber, froggish, groundneedle, transnatural, fatherhood, outjump, cinerator", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256) |
|
|
clear() |
|
|
return pipeline |
|
|
|
|
|
@torch.inference_mode() |
|
|
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image: |
|
|
clear() |
|
|
if request.seed is None: |
|
|
generator = None |
|
|
else: |
|
|
generator = Generator(device="cuda").manual_seed(request.seed) |
|
|
|
|
|
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0] |
|
|
return image |