| from PIL.Image import Image |
| from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, AutoencoderTiny |
| from huggingface_hub.constants import HF_HUB_CACHE |
| from pipelines.models import TextToImageRequest |
| from torch import Generator |
| from torchao.quantization import quantize_, int8_weight_only |
| from transformers import T5EncoderModel, CLIPTextModel, logging |
| import gc |
| import os |
| from typing import TypeAlias |
| import torch |
|
|
| Pipeline = FluxPipeline |
| torch.backends.cudnn.benchmark = True |
| torch.backends.cudnn.benchmark = True |
| torch._inductor.config.conv_1x1_as_mm = True |
| torch._inductor.config.coordinate_descent_tuning = True |
| torch._inductor.config.epilogue_fusion = False |
| torch._inductor.config.coordinate_descent_check_all_directions = True |
| torch._dynamo.config.suppress_errors = True |
| os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" |
|
|
| repo = "smash3211/Flux.1.schnell" |
| revision = "26534bc47459428a6763951757fd63892119ee08" |
|
|
| vae_repo = "madebyollin/taef1" |
| vae_revision = "2d552378e58c9c94201075708d7de4e1163b2689" |
|
|
|
|
| def load_pipeline() -> Pipeline: |
| path = os.path.join(HF_HUB_CACHE, f"models--{repo.split('/')[0]}--{repo.split('/')[1]}/snapshots/26534bc47459428a6763951757fd63892119ee08/transformer") |
| transformer = FluxTransformer2DModel.from_pretrained( |
| path, |
| use_safetensors=False, |
| local_files_only=True, |
| torch_dtype=torch.bfloat16) |
| vae = AutoencoderTiny.from_pretrained( |
| vae_repo, |
| revision=vae_revision, |
| local_files_only=True, |
| torch_dtype=torch.bfloat16) |
| vae.encoder.load_state_dict(torch.load("encoder.pth"), strict=False) |
| vae.decoder.load_state_dict(torch.load("decoder.pth"), strict=False) |
| pipeline = FluxPipeline.from_pretrained(repo,revision=revision,transformer=transformer,vae=vae,local_files_only=True,torch_dtype=torch.bfloat16).to("cuda") |
| pipeline.to(memory_format=torch.channels_last) |
| quantize_(pipeline.vae, int8_weight_only()) |
| pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune", fullgraph=True) |
| with torch.inference_mode(): |
| for _ in range(2): |
| pipeline("meow", num_inference_steps=4) |
| return pipeline |
|
|
| @torch.inference_mode() |
| def infer(request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator) -> Image: |
| return pipeline( |
| request.prompt, |
| generator=generator, |
| guidance_scale=0.0, |
| num_inference_steps=4, |
| max_sequence_length=256, |
| height=request.height, |
| width=request.width, |
| ).images[0] |
|
|