File size: 3,307 Bytes
e8bf693
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from PIL.Image import Image
from diffusers import (
    FluxPipeline,
    FluxTransformer2DModel,
    AutoencoderKL,
    AutoencoderTiny,
)
from huggingface_hub.constants import HF_HUB_CACHE
from pipelines.models import TextToImageRequest
from torch import Generator
from torchao.quantization import quantize_, int8_weight_only
from transformers import T5EncoderModel, CLIPTextModel, logging
import gc
import os
from typing import TypeAlias
import torch

Pipeline = FluxPipeline
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.benchmark = True
torch._inductor.config.conv_1x1_as_mm = True
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.epilogue_fusion = False
torch._inductor.config.coordinate_descent_check_all_directions = True
torch._dynamo.config.suppress_errors = True
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

repo = "smash3211/Flux.1.schnell"
revision = "26534bc47459428a6763951757fd63892119ee08"

vae_repo = "smash3211/tae1-update"
vae_revision = "4aa8fbe28d8631db070810bc2b9ff9f9320effda"


def load_pipeline() -> Pipeline:
    path = os.path.join(
        HF_HUB_CACHE,
        f"models--{repo.split('/')[0]}--{repo.split('/')[1]}/snapshots/{revision}/transformer",
    )
    transformer = FluxTransformer2DModel.from_pretrained(
        path, use_safetensors=False, local_files_only=True, torch_dtype=torch.bfloat16
    )
    vae = AutoencoderTiny.from_pretrained(
        vae_repo,
        revision=vae_revision,
        local_files_only=True,
        torch_dtype=torch.bfloat16,
    )
    vae_path = os.path.join(
        HF_HUB_CACHE,
        f"models--{vae_repo.split('/')[0]}--{vae_repo.split('/')[1]}/snapshots/{vae_revision}",
    )
    vae.encoder.load_state_dict(torch.load(f"{vae_path}/encoder.pth"), strict=False)
    vae.decoder.load_state_dict(torch.load(f"{vae_path}/decoder.pth"), strict=False)
    pipeline = FluxPipeline.from_pretrained(
        repo,
        revision=revision,
        transformer=transformer,
        vae=vae,
        local_files_only=True,
        torch_dtype=torch.bfloat16,
    )
    pipeline.to('cuda')
    pipeline.to(memory_format=torch.channels_last)
    quantize_(pipeline.vae, int8_weight_only())
    pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune", fullgraph=True)
    for _ in range(4):
        pipeline(prompt="satiety, unwitherable, Pygmy, ramlike, Curtis, fingerstone, rewhisper", num_inference_steps=4)
    return pipeline


@torch.inference_mode()
def infer(
    request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator
) -> Image:
    return pipeline(
        prompt = request.prompt,
        generator=generator,
        guidance_scale=0.0,
        num_inference_steps=4,
        max_sequence_length=256,
        height=request.height,
        width=request.width,
    ).images[0]

# Example Usage
if __name__ == "__main__":
    print("load pipeline...")
    diffusion_pipeline = load_pipeline()

    sample_request = TextToImageRequest(
        prompt="A futuristic cityscape with neon lights",
        height=1024,
        width=1024,
    )

    generator = torch.Generator(device="cuda").manual_seed(42)

    print("Generating image...")
    generated_img = infer(sample_request, diffusion_pipeline, generator)
    generated_img.show()