| from diffusers import AutoencoderTiny |
| from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel |
| from diffusers.image_processor import VaeImageProcessor |
| import torch |
| import torch._dynamo |
| import gc |
| from PIL.Image import Image |
| from pipelines.models import TextToImageRequest |
| from torch import Generator |
| from diffusers import FluxPipeline |
| from torchao.quantization import quantize_, int8_weight_only |
|
|
| Pipeline = None |
| MODEL_ID = "black-forest-labs/FLUX.1-schnell" |
| DTYPE = torch.bfloat16 |
| def clear(): |
| gc.collect() |
| torch.cuda.empty_cache() |
| torch.cuda.reset_max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
|
|
|
|
| def load_pipeline() -> Pipeline: |
| clear() |
| dtype, device = torch.bfloat16, "cuda" |
| |
| vae = AutoencoderTiny.from_pretrained("manbeast3b/quantized1", torch_dtype=DTYPE) |
| text_encoder_2 = T5EncoderModel.from_pretrained( |
| "city96/t5-v1_1-xxl-encoder-bf16", torch_dtype=DTYPE |
| ) |
| pipeline = FluxPipeline.from_pretrained(MODEL_ID,vae=vae,text_encoder_2=text_encoder_2, |
| torch_dtype=DTYPE) |
| torch.backends.cudnn.benchmark = True |
| torch.backends.cuda.matmul.allow_tf32 = True |
| torch.cuda.set_per_process_memory_fraction(0.99) |
| pipeline.text_encoder.to(memory_format=torch.channels_last) |
| pipeline.text_encoder_2.to(memory_format=torch.channels_last) |
| pipeline.transformer.to(memory_format=torch.channels_last) |
| pipeline.vae.to(memory_format=torch.channels_last) |
| pipeline.vae = torch.compile(pipeline.vae) |
| pipeline._exclude_from_cpu_offload = ["vae"] |
| pipeline.enable_sequential_cpu_offload() |
| clear() |
| for _ in range(1): |
| pipeline(prompt="unpervaded, unencumber, froggish, groundneedle, transnatural, fatherhood, outjump, cinerator", width=1024, height=1024, guidance_scale=0.1, num_inference_steps=4, max_sequence_length=256) |
| return pipeline |
|
|
| sample = True |
| @torch.inference_mode() |
| def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image: |
| global sample |
| if sample: |
| clear() |
| sample = None |
| generator = Generator("cuda").manual_seed(request.seed) |
| image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0] |
| return(image) |