from diffusers.image_processor import VaeImageProcessor import torch import gc from PIL.Image import Image from pipelines.models import TextToImageRequest from torch import Generator from diffusers import DiffusionPipeline Pipeline = None MODEL_ID = "black-forest-labs/FLUX.1-schnell" DTYPE = torch.bfloat16 def clear(): gc.collect() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() torch.cuda.empty_cache() def load_pipeline() -> Pipeline: empty_cache() dtype, device = torch.bfloat16, "cuda" pipeline = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=DTYPE) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True torch.cuda.set_per_process_memory_fraction(0.99) pipeline.text_encoder.to(memory_format=torch.channels_last) pipeline.text_encoder_2.to(memory_format=torch.channels_last) pipeline.transformer.to(memory_format=torch.channels_last) pipeline.vae.to(memory_format=torch.channels_last) pipeline.vae = torch.compile(pipeline.vae) pipeline._exclude_from_cpu_offload = ["vae"] pipeline.enable_sequential_cpu_offload() clear() # warm up just once for _ in range(): pipeline(prompt="unpervaded, unencumber, froggish, groundneedle, transnatural, fatherhood, outjump, cinerator", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256) return pipeline sample = True @torch.inference_mode() def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image: global sample if sample: clear() sample = None generator = Generator("cuda").manual_seed(request.seed) image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0] return(image)