| import os |
| import gc |
| import time |
| import torch |
| from PIL import Image as img |
| from PIL.Image import Image |
| from diffusers import ( |
| FluxTransformer2DModel, |
| DiffusionPipeline, |
| AutoencoderTiny |
| ) |
| from transformers import T5EncoderModel |
| from huggingface_hub.constants import HF_HUB_CACHE |
| from torchao.quantization import quantize_, int8_weight_only |
| from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe |
| from pipelines.models import TextToImageRequest |
| from torch import Generator |
|
|
| os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" |
|
|
| Pipeline = None |
| torch.backends.cuda.matmul.allow_tf32 = True |
| torch.backends.cudnn.enabled = True |
| torch.backends.cudnn.benchmark = True |
|
|
| ckpt_id = "black-forest-labs/FLUX.1-schnell" |
| ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9" |
|
|
| def empty_cache(): |
| gc.collect() |
| torch.cuda.empty_cache() |
| torch.cuda.reset_max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
|
|
| def load_pipeline() -> Pipeline: |
| empty_cache() |
|
|
| dtype, device = torch.bfloat16, "cuda" |
|
|
| text_encoder_2 = T5EncoderModel.from_pretrained( |
| "city96/t5-v1_1-xxl-encoder-bf16", |
| revision="1b9c856aadb864af93c1dcdc226c2774fa67bc86", |
| torch_dtype=torch.bfloat16 |
| ).to(memory_format=torch.channels_last) |
|
|
| vae = AutoencoderTiny.from_pretrained( |
| "RobertML/FLUX.1-schnell-vae_e3m2", |
| revision="da0d2cd7815792fb40d084dbd8ed32b63f153d8d", |
| torch_dtype=dtype |
| ) |
| |
| path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a") |
| model = FluxTransformer2DModel.from_pretrained( |
| path, |
| torch_dtype=dtype, |
| use_safetensors=False |
| ).to(memory_format=torch.channels_last) |
| |
| pipeline = DiffusionPipeline.from_pretrained( |
| ckpt_id, |
| vae=vae, |
| revision=ckpt_revision, |
| transformer=model, |
| text_encoder_2=text_encoder_2, |
| torch_dtype=dtype, |
| ).to(device) |
|
|
| apply_cache_on_pipe(pipeline, residual_diff_threshold=0.8) |
| quantize_(pipeline.vae, int8_weight_only()) |
|
|
| for _ in range(3): |
| pipeline( |
| prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", |
| width=1024, |
| height=1024, |
| guidance_scale=0.0, |
| num_inference_steps=4, |
| max_sequence_length=256 |
| ) |
|
|
| return pipeline |
|
|
| @torch.no_grad() |
| def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image: |
| try: |
| image = pipeline( |
| request.prompt, |
| generator=generator, |
| guidance_scale=0.0, |
| num_inference_steps=4, |
| max_sequence_length=256, |
| height=request.height, |
| width=request.width, |
| output_type="pil" |
| ).images[0] |
| except: |
| image = img.open("./RobertML.png") |
| return image |
|
|