| | from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny |
| | from diffusers.image_processor import VaeImageProcessor |
| | from diffusers.schedulers import FlowMatchEulerDiscreteScheduler |
| | from huggingface_hub.constants import HF_HUB_CACHE |
| | from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel |
| | import torch |
| | import torch._dynamo |
| | import gc |
| | from PIL import Image as img |
| | from PIL.Image import Image |
| | from pipelines.models import TextToImageRequest |
| | from torch import Generator |
| | import time |
| | from diffusers import FluxTransformer2DModel, DiffusionPipeline |
| | from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only |
| | import os |
| |
|
| | os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" |
| |
|
| | Pipeline = None |
| |
|
| | ckpt_id = "malicious546/flux.1-schnell-dat-q8" |
| | ckpt_revision = "f312bd733a4f351e328e94f12e1cd4d0d4baa5eb" |
| |
|
| |
|
| | def empty_cache(): |
| | gc.collect() |
| | torch.cuda.empty_cache() |
| | torch.cuda.reset_max_memory_allocated() |
| | torch.cuda.reset_peak_memory_stats() |
| |
|
| |
|
| | def load_pipeline() -> Pipeline: |
| | empty_cache() |
| |
|
| | dtype, device = torch.bfloat16, "cuda" |
| |
|
| | text_encoder_2 = T5EncoderModel.from_pretrained( |
| | "city96/t5-v1_1-xxl-encoder-bf16", |
| | revision="1b9c856aadb864af93c1dcdc226c2774fa67bc86", |
| | torch_dtype=torch.bfloat16, |
| | ).to(memory_format=torch.channels_last) |
| |
|
| | path = os.path.join( |
| | HF_HUB_CACHE, |
| | "models--malicious546--flux.1-schnell-dat-int8/snapshots/017b51c93e8b1a0a7ab7a05cdcce2270f139de60", |
| | ) |
| | model = FluxTransformer2DModel.from_pretrained( |
| | path, torch_dtype=dtype, use_safetensors=False |
| | ).to(memory_format=torch.channels_last) |
| | pipeline = DiffusionPipeline.from_pretrained( |
| | ckpt_id, |
| | revision=ckpt_revision, |
| | transformer=model, |
| | text_encoder_2=text_encoder_2, |
| | torch_dtype=dtype, |
| | ).to(device) |
| | pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune") |
| |
|
| | for _ in range(3): |
| | pipeline( |
| | prompt="divination, aftermath, airy, flatworm, adjuster, fruity, dullard, presence", |
| | width=1024, |
| | height=1024, |
| | guidance_scale=0.0, |
| | num_inference_steps=4, |
| | max_sequence_length=256, |
| | ) |
| |
|
| | empty_cache() |
| | return pipeline |
| |
|
| |
|
| | @torch.no_grad() |
| | def infer( |
| | request: TextToImageRequest, pipeline: Pipeline, generator: Generator |
| | ) -> Image: |
| | return pipeline( |
| | request.prompt, |
| | generator=generator, |
| | guidance_scale=0.0, |
| | num_inference_steps=4, |
| | max_sequence_length=256, |
| | height=request.height, |
| | width=request.width, |
| | output_type="pil", |
| | ).images[0] |
| |
|