import gc import os from typing import TypeAlias import torch from PIL.Image import Image from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, AutoencoderTiny, DiffusionPipeline from huggingface_hub.constants import HF_HUB_CACHE from pipelines.models import TextToImageRequest from torch import Generator from torchao.quantization import quantize_, int8_weight_only from transformers import T5EncoderModel, CLIPTextModel Pipeline: TypeAlias = FluxPipeline torch.backends.cudnn.benchmark = True torch._inductor.config.conv_1x1_as_mm = True torch._inductor.config.coordinate_descent_tuning = True torch._inductor.config.epilogue_fusion = False torch._inductor.config.coordinate_descent_check_all_directions = True os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" id = "black-forest-labs/FLUX.1-schnell" revision = "741f7c3ce8b383c54771c7003378a50191e9efe9" vae_id = "madebyollin/taef1" vae_revision = "2d552378e58c9c94201075708d7de4e1163b2689" def load_pipeline() -> Pipeline: path = os.path.join(HF_HUB_CACHE, "models--freaky231--flux.1-schnell-int8/snapshots/c33fa7f79751fe42b0a7de7f72edb5d1b86f32a7/transformer") transformer = FluxTransformer2DModel.from_pretrained( path, use_safetensors=False, local_files_only=True, torch_dtype=torch.bfloat16) vae = AutoencoderTiny.from_pretrained( vae_id, revision=vae_revision, local_files_only=True, torch_dtype=torch.bfloat16 ) pipeline = DiffusionPipeline.from_pretrained( id, revision=revision, transformer=transformer, vae=vae, local_files_only=True, torch_dtype=torch.bfloat16, ) pipeline.to(memory_format=torch.channels_last) pipeline.to("cuda") for _ in range(2): pipeline("satiety, unwitherable, Pygmy, ramlike, Curtis, fingerstone, rewhisper", num_inference_steps=4) return pipeline @torch.inference_mode() def infer(request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator) -> Image: generator = Generator(pipeline.device).manual_seed(request.seed) try: prompt = request.prompt except Exception as e: prompt = "satiety, unwitherable, Pygmy, ramlike, Curtis, fingerstone, rewhisper" return pipeline( prompt, generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, ).images[0] if __name__ == "__main__": pipe_ = load_pipeline() for _ in range(2): request = TextToImageRequest(prompt='dog', height=None, width=None, seed=666) infer(request, pipe_)