File size: 2,588 Bytes
1570e69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from PIL.Image import Image
from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, AutoencoderTiny
from huggingface_hub.constants import HF_HUB_CACHE
from pipelines.models import TextToImageRequest
from torch import Generator
from torchao.quantization import quantize_, int8_weight_only
from transformers import T5EncoderModel, CLIPTextModel, logging 
import gc
import os
from typing import TypeAlias
import torch

Pipeline = FluxPipeline
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.benchmark = True
torch._inductor.config.conv_1x1_as_mm = True
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.epilogue_fusion = False
torch._inductor.config.coordinate_descent_check_all_directions = True
torch._dynamo.config.suppress_errors = True
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"

CHECKPOINT = "manbeast3b/Flux.1.schnell-quant2"
REVISION = "44eb293715147878512da10bf3bc47cd14ec8c55"

TinyVAE = "madebyollin/taef1"
TinyVAE_REV = "2d552378e58c9c94201075708d7de4e1163b2689"


def load_pipeline() -> Pipeline:
    path = os.path.join(HF_HUB_CACHE, "models--manbeast3b--Flux.1.schnell-quant2/snapshots/44eb293715147878512da10bf3bc47cd14ec8c55/transformer")
    transformer = FluxTransformer2DModel.from_pretrained(
            path, 
            use_safetensors=False,
            local_files_only=True,
            torch_dtype=torch.bfloat16)
    vae = AutoencoderTiny.from_pretrained(
            TinyVAE, 
            revision=TinyVAE_REV, 
            local_files_only=True,
            torch_dtype=torch.bfloat16)
    vae.encoder.load_state_dict(torch.load("encoder.pth"), strict=False)
    vae.decoder.load_state_dict(torch.load("decoder.pth"), strict=False)
    pipeline = FluxPipeline.from_pretrained(CHECKPOINT,revision=REVISION,transformer=transformer,vae=vae,local_files_only=True,torch_dtype=torch.bfloat16).to("cuda")
    pipeline.to(memory_format=torch.channels_last)
    quantize_(pipeline.vae, int8_weight_only()) 
    pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune", fullgraph=True)
    with torch.inference_mode():
        for _ in range(2):
            pipeline("meow", num_inference_steps=4)
    return pipeline

@torch.inference_mode()
def infer(request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator) -> Image:
    return pipeline(
        request.prompt,
        generator=generator,
        guidance_scale=0.0,
        num_inference_steps=4,
        max_sequence_length=256,
        height=request.height,
        width=request.width,
    ).images[0]