import os from typing import TypeAlias import torch from PIL.Image import Image from diffusers import FluxPipeline, FluxTransformer2DModel from huggingface_hub.constants import HF_HUB_CACHE from pipelines.models import TextToImageRequest from torch import Generator from torchao.quantization import quantize_, int8_weight_only Pipeline = None os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" os.environ["TOKENIZERS_PARALLELISM"] = "True" def load_pipeline() -> Pipeline: path = os.path.join(HF_HUB_CACHE, "models--farapart--flow.1-fast/snapshots/59ebc4a11e1a6d4fe2085988028c5252f3a07b74/transformer") transformer = FluxTransformer2DModel.from_pretrained(path, use_safetensors=False, local_files_only=True, torch_dtype=torch.bfloat16) pipeline = FluxPipeline.from_pretrained("farapart/flow.1-fast", revision="59ebc4a11e1a6d4fe2085988028c5252f3a07b74", transformer=transformer, local_files_only=True, torch_dtype=torch.bfloat16).to("cuda") pipeline.to(memory_format=torch.channels_last) with torch.inference_mode(): for _ in range(4): pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256) torch.cuda.empty_cache() return pipeline @torch.inference_mode() def infer(request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator) -> Image: return pipeline( request.prompt, generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, ).images[0]