|
|
from diffusers import AutoencoderKL |
|
|
from diffusers.image_processor import VaeImageProcessor |
|
|
import torch |
|
|
import torch._dynamo |
|
|
import gc |
|
|
from PIL import Image |
|
|
from pipelines.models import TextToImageRequest |
|
|
from torch import Generator |
|
|
from diffusers import FluxPipeline |
|
|
from torchao.quantization import quant_api |
|
|
from deps import f |
|
|
|
|
|
Pipeline = None |
|
|
MODEL_ID = "black-forest-labs/FLUX.1-schnell" |
|
|
DTYPE = torch.bfloat16 |
|
|
def clear(): |
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() |
|
|
torch.cuda.reset_max_memory_allocated() |
|
|
torch.cuda.reset_peak_memory_stats() |
|
|
|
|
|
|
|
|
def load_pipeline() -> Pipeline: |
|
|
clear() |
|
|
pipeline = FluxPipeline.from_pretrained(MODEL_ID, |
|
|
torch_dtype=DTYPE) |
|
|
torch.backends.cudnn.benchmark = True |
|
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
|
torch.cuda.set_per_process_memory_fraction(0.99) |
|
|
|
|
|
pipeline.text_encoder.to(memory_format=torch.channels_last) |
|
|
pipeline.text_encoder_2.to(memory_format=torch.channels_last) |
|
|
pipeline.transformer.to(memory_format=torch.channels_last) |
|
|
pipeline.vae.to(memory_format=torch.channels_last) |
|
|
pipeline.vae = torch.compile(pipeline.vae) |
|
|
pipeline._exclude_from_cpu_offload = ["vae"] |
|
|
pipeline.enable_sequential_cpu_offload() |
|
|
for _ in range(1): |
|
|
clear() |
|
|
with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=True): |
|
|
pipeline(prompt="unpervaded, unencumber, froggish, groundneedle, transnatural, fatherhood, outjump, cinerator", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256) |
|
|
return pipeline |
|
|
|
|
|
|
|
|
sample = True |
|
|
@torch.inference_mode() |
|
|
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image: |
|
|
global sample |
|
|
if sample: |
|
|
clear() |
|
|
sample = None |
|
|
|
|
|
generator = Generator("cuda").manual_seed(request.seed) |
|
|
image = None |
|
|
with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=True): |
|
|
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0] |
|
|
return(image) |