| |
| from huggingface_hub.constants import HF_HUB_CACHE |
| from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel |
| import torch |
| import torch._dynamo |
| import gc |
| import os |
| from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny |
| from PIL.Image import Image |
| from pipelines.models import TextToImageRequest |
| from torch import Generator |
| from diffusers import FluxTransformer2DModel, DiffusionPipeline |
| from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only |
| from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe |
| os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" |
| os.environ["TOKENIZERS_PARALLELISM"] = "True" |
| torch._dynamo.config.suppress_errors = True |
|
|
| os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" |
| os.environ["TOKENIZERS_PARALLELISM"] = "True" |
| torch._dynamo.config.suppress_errors = True |
| Pipeline = None |
| CHECKPOINT = "black-forest-labs/FLUX.1-schnell" |
| REVISION = "741f7c3ce8b383c54771c7003378a50191e9efe9" |
|
|
| def load_pipeline() -> Pipeline: |
| text_encoder = CLIPTextModel.from_pretrained(CHECKPOINT, revision=REVISION, subfolder="text_encoder", local_files_only=True, torch_dtype=torch.bfloat16,) |
| text_encoder_2 = T5EncoderModel.from_pretrained(CHECKPOINT, revision=REVISION, subfolder="text_encoder_2", local_files_only=True, torch_dtype=torch.bfloat16,) |
| vae = AutoencoderKL.from_pretrained(CHECKPOINT, revision=REVISION, subfolder="vae", local_files_only=True, torch_dtype=torch.bfloat16,) |
| path = os.path.join(HF_HUB_CACHE, "models--thorejaya--Flux.1.Schnella/snapshots/e411a2c0a1dc5c0848373eb78af8590faf38f315") |
| transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False,) |
| pipeline = FluxPipeline.from_pretrained(CHECKPOINT, revision=REVISION, local_files_only=True, text_encoder=text_encoder, text_encoder_2=text_encoder_2, transformer=transformer, vae=vae, torch_dtype=torch.bfloat16,).to("cuda") |
| quantize_(pipeline.vae, int8_weight_only()) |
| pipeline = apply_cache_on_pipe(pipeline, residual_diff_threshold=0.345) |
| pipeline("") |
| return pipeline |
| @torch.no_grad() |
| def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image: |
| generator = Generator(pipeline.device).manual_seed(request.seed) |
| return pipeline( |
| request.prompt, |
| generator=generator, |
| guidance_scale=0.0, |
| num_inference_steps=4, |
| max_sequence_length=256, |
| height=request.height, |
| width=request.width, |
| ).images[0] |