import os import gc import torch import torch._dynamo from PIL import Image from typing import Type from torch import Generator from transformers import T5EncoderModel from huggingface_hub.constants import HF_HUB_CACHE from pipelines.models import TextToImageRequest from torchao.quantization import quantize_, int8_weight_only from diffusers import AutoencoderKL, FluxPipeline, FluxTransformer2DModel os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" os.environ["TOKENIZERS_PARALLELISM"] = "True" torch._dynamo.config.suppress_errors = True torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True Pipeline = None def empty_cache(): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() def load_pipeline() -> Pipeline: ckpt_id = "black-forest-labs/FLUX.1-schnell" ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9" text_encoder_2 = T5EncoderModel.from_pretrained("db900/encode-stream", revision = "8a6b7bd09dc66733fa582900186f929353f63619", subfolder="text_encoder_2", torch_dtype=torch.bfloat16).to(memory_format=torch.channels_last) path = os.path.join(HF_HUB_CACHE, "models--db900--encode-stream/snapshots/8a6b7bd09dc66733fa582900186f929353f63619/transformer") transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False).to(memory_format=torch.channels_last) quantize_(AutoencoderKL.from_pretrained(ckpt_id, revision=ckpt_revision, subfolder="vae", local_files_only=True, torch_dtype=torch.bfloat16,), int8_weight_only()) pipeline = FluxPipeline.from_pretrained(ckpt_id, revision=ckpt_revision, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16,) pipeline.to("cuda") with torch.inference_mode(): pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256) return pipeline @torch.no_grad() def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image: return pipeline(request.prompt, generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]