CoreAI-c751dc / src /pipeline.py
smash3211's picture
Upload folder using huggingface_hub
e8bf693 verified
from PIL.Image import Image
from diffusers import (
FluxPipeline,
FluxTransformer2DModel,
AutoencoderKL,
AutoencoderTiny,
)
from huggingface_hub.constants import HF_HUB_CACHE
from pipelines.models import TextToImageRequest
from torch import Generator
from torchao.quantization import quantize_, int8_weight_only
from transformers import T5EncoderModel, CLIPTextModel, logging
import gc
import os
from typing import TypeAlias
import torch
Pipeline = FluxPipeline
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.benchmark = True
torch._inductor.config.conv_1x1_as_mm = True
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.epilogue_fusion = False
torch._inductor.config.coordinate_descent_check_all_directions = True
torch._dynamo.config.suppress_errors = True
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
repo = "smash3211/Flux.1.schnell"
revision = "26534bc47459428a6763951757fd63892119ee08"
vae_repo = "smash3211/tae1-update"
vae_revision = "4aa8fbe28d8631db070810bc2b9ff9f9320effda"
def load_pipeline() -> Pipeline:
path = os.path.join(
HF_HUB_CACHE,
f"models--{repo.split('/')[0]}--{repo.split('/')[1]}/snapshots/{revision}/transformer",
)
transformer = FluxTransformer2DModel.from_pretrained(
path, use_safetensors=False, local_files_only=True, torch_dtype=torch.bfloat16
)
vae = AutoencoderTiny.from_pretrained(
vae_repo,
revision=vae_revision,
local_files_only=True,
torch_dtype=torch.bfloat16,
)
vae_path = os.path.join(
HF_HUB_CACHE,
f"models--{vae_repo.split('/')[0]}--{vae_repo.split('/')[1]}/snapshots/{vae_revision}",
)
vae.encoder.load_state_dict(torch.load(f"{vae_path}/encoder.pth"), strict=False)
vae.decoder.load_state_dict(torch.load(f"{vae_path}/decoder.pth"), strict=False)
pipeline = FluxPipeline.from_pretrained(
repo,
revision=revision,
transformer=transformer,
vae=vae,
local_files_only=True,
torch_dtype=torch.bfloat16,
)
pipeline.to('cuda')
pipeline.to(memory_format=torch.channels_last)
quantize_(pipeline.vae, int8_weight_only())
pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune", fullgraph=True)
for _ in range(4):
pipeline(prompt="satiety, unwitherable, Pygmy, ramlike, Curtis, fingerstone, rewhisper", num_inference_steps=4)
return pipeline
@torch.inference_mode()
def infer(
request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator
) -> Image:
return pipeline(
prompt = request.prompt,
generator=generator,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
height=request.height,
width=request.width,
).images[0]
# Example Usage
if __name__ == "__main__":
print("load pipeline...")
diffusion_pipeline = load_pipeline()
sample_request = TextToImageRequest(
prompt="A futuristic cityscape with neon lights",
height=1024,
width=1024,
)
generator = torch.Generator(device="cuda").manual_seed(42)
print("Generating image...")
generated_img = infer(sample_request, diffusion_pipeline, generator)
generated_img.show()