|
|
from diffusers import ( |
|
|
DiffusionPipeline, |
|
|
AutoencoderKL, |
|
|
FluxPipeline, |
|
|
FluxTransformer2DModel, |
|
|
) |
|
|
from diffusers.image_processor import VaeImageProcessor |
|
|
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler |
|
|
from huggingface_hub.constants import HF_HUB_CACHE |
|
|
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel |
|
|
import torch |
|
|
import torch._dynamo |
|
|
import gc |
|
|
from PIL import Image |
|
|
from pipelines.models import TextToImageRequest |
|
|
from torch import Generator |
|
|
import time |
|
|
import math |
|
|
from typing import Type, Dict, Any, Tuple, Callable, Optional, Union |
|
|
import numpy as np |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only |
|
|
|
|
|
|
|
|
import os |
|
|
|
|
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" |
|
|
os.environ["TOKENIZERS_PARALLELISM"] = "True" |
|
|
torch._dynamo.config.suppress_errors = True |
|
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
|
torch.backends.cudnn.enabled = True |
|
|
|
|
|
|
|
|
|
|
|
Pipeline = None |
|
|
ckpt_id = "freaky231/t5-encoder-bf16" |
|
|
ckpt_revision = "994f6e4720f69e67bfc8822cbb4063c9149b801b" |
|
|
|
|
|
|
|
|
def empty_cache(): |
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() |
|
|
torch.cuda.reset_max_memory_allocated() |
|
|
torch.cuda.reset_peak_memory_stats() |
|
|
|
|
|
|
|
|
def load_pipeline() -> Pipeline: |
|
|
vae = AutoencoderKL.from_pretrained( |
|
|
ckpt_id, |
|
|
revision=ckpt_revision, |
|
|
subfolder="vae", |
|
|
local_files_only=True, |
|
|
torch_dtype=torch.bfloat16, |
|
|
) |
|
|
quantize_(vae, int8_weight_only()) |
|
|
text_encoder_2 = T5EncoderModel.from_pretrained( |
|
|
"freaky231/FluxPipeline", |
|
|
revision="c5cf4b2fc96d25c81eb0783d2c362689ea9ccf28", |
|
|
subfolder="text_encoder_2", |
|
|
torch_dtype=torch.bfloat16, |
|
|
) |
|
|
path = os.path.join( |
|
|
HF_HUB_CACHE, |
|
|
"models--freaky231--FluxPipeline/snapshots/c5cf4b2fc96d25c81eb0783d2c362689ea9ccf28/transformer", |
|
|
) |
|
|
transformer = FluxTransformer2DModel.from_pretrained( |
|
|
path, torch_dtype=torch.bfloat16, use_safetensors=False |
|
|
) |
|
|
pipeline = FluxPipeline.from_pretrained( |
|
|
ckpt_id, |
|
|
revision=ckpt_revision, |
|
|
transformer=transformer, |
|
|
text_encoder_2=text_encoder_2, |
|
|
torch_dtype=torch.bfloat16, |
|
|
) |
|
|
pipeline.to("cuda") |
|
|
pipeline.to(memory_format=torch.channels_last) |
|
|
for _ in range(1): |
|
|
pipeline( |
|
|
prompt="unaware, kettledrum, clayey, bioenergetic, radiograph, locomotion, subcortical, microtubule", |
|
|
width=1024, |
|
|
height=1024, |
|
|
guidance_scale=0.0, |
|
|
num_inference_steps=4, |
|
|
max_sequence_length=256, |
|
|
) |
|
|
return pipeline |
|
|
|
|
|
|
|
|
sample = 1 |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def infer( |
|
|
request: TextToImageRequest, pipeline: Pipeline, generator: Generator |
|
|
) -> Image: |
|
|
global sample |
|
|
if not sample: |
|
|
sample = 1 |
|
|
empty_cache() |
|
|
return pipeline( |
|
|
request.prompt, |
|
|
generator=generator, |
|
|
guidance_scale=0.0, |
|
|
num_inference_steps=4, |
|
|
max_sequence_length=256, |
|
|
height=request.height, |
|
|
width=request.width, |
|
|
output_type="pil", |
|
|
).images[0] |
|
|
|