from huggingface_hub.constants import HF_HUB_CACHE from transformers import T5EncoderModel import torch import torch._dynamo import os from diffusers import AutoencoderKL from PIL.Image import Image from pipelines.models import TextToImageRequest from torch import Generator from diffusers import FluxTransformer2DModel, DiffusionPipeline from torchao.quantization import quantize_, int8_weight_only os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" os.environ["TOKENIZERS_PARALLELISM"] = "True" torch._dynamo.config.suppress_errors = True Pipeline = None ids = "black-forest-labs/FLUX.1-schnell" Revision = "741f7c3ce8b383c54771c7003378a50191e9efe9" def load_pipeline() -> Pipeline: vae = AutoencoderKL.from_pretrained(ids,revision=Revision, subfolder="vae", local_files_only=True, torch_dtype=torch.bfloat16,) quantize_(vae, int8_weight_only()) encoder_bf16 = "edgetensor/edgetensor-t5-v1_1-xxl-encoder-bf16" revision_encoder_bf16 = "bfed5213335c2ead9c9b5aff657680db420a7c7d" flux_transformer_path = "models--edgetensor--edgetensor-FLUX.1-schnell-int8wo/snapshots/6b4c594d4c510da13a40f8c3b483789eb82d36df" prompt_base = "unwitherable, Pygmy, ramlike, Curtis, fingerstone, rewhisper" text_encoder_2 = T5EncoderModel.from_pretrained(encoder_bf16, revision = revision_encoder_bf16, torch_dtype=torch.bfloat16).to(memory_format=torch.channels_last) path = os.path.join(HF_HUB_CACHE, flux_transformer_path) transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False).to(memory_format=torch.channels_last) pipeline = DiffusionPipeline.from_pretrained(ids, revision=Revision, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16,) pipeline.to("cuda") pipeline(prompt=prompt_base, width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256) return pipeline @torch.no_grad() def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image: generator = Generator(pipeline.device).manual_seed(request.seed) return pipeline( request.prompt, generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, ).images[0]