from diffusers import ( DiffusionPipeline, AutoencoderKL, FluxPipeline, FluxTransformer2DModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from huggingface_hub.constants import HF_HUB_CACHE from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel import torch import torch._dynamo import gc from PIL import Image from pipelines.models import TextToImageRequest from torch import Generator import time import math import torch.nn.functional as F from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only # preconfigs import os os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" os.environ["TOKENIZERS_PARALLELISM"] = "True" torch._dynamo.config.suppress_errors = True torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.enabled = True # globals Pipeline = None ckpt_id = "soft987/FLUX.1.schnell-quant2" ckpt_revision = "6d93094cc0c92f72236c6de41bddf789b8b0b38e" def empty_cache(): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() def load_pipeline() -> Pipeline: vae = AutoencoderKL.from_pretrained( ckpt_id, revision=ckpt_revision, subfolder="vae", local_files_only=True, torch_dtype=torch.bfloat16, ) quantize_(vae, int8_weight_only()) text_encoder_2 = T5EncoderModel.from_pretrained( "soft987/FLUX1.schnell-full", revision="a05d320df4f5795fb4eff2f85ec117e870c078cb", subfolder="text_encoder_2", torch_dtype=torch.bfloat16, ) path = os.path.join( HF_HUB_CACHE, "models--soft987--FLUX1.schnell-full/snapshots/a05d320df4f5795fb4eff2f85ec117e870c078cb/transformer", ) transformer = FluxTransformer2DModel.from_pretrained( path, torch_dtype=torch.bfloat16, use_safetensors=False ) pipeline = FluxPipeline.from_pretrained( ckpt_id, revision=ckpt_revision, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16, ) pipeline.to("cuda") pipeline.to(memory_format=torch.channels_last) for _ in range(1): pipeline( prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, ) return pipeline sample = 1 @torch.no_grad() def infer( request: TextToImageRequest, pipeline: Pipeline, generator: Generator ) -> Image: global sample if not sample: sample = 1 empty_cache() try: img = pipeline( request.prompt, generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil", ).images[0] return img except Exception as e: return None