BussinSlopChillGuy / src /pipeline.py
manbeast3b's picture
Update src/pipeline.py
6244c56 verified
from diffusers import AutoencoderKL, AutoencoderTiny
from diffusers.image_processor import VaeImageProcessor
import torch
import torch._dynamo
import gc
from PIL.Image import Image
from pipelines.models import TextToImageRequest
from torch import Generator
from diffusers import FluxPipeline
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
Pipeline = None
MODEL_ID = "black-forest-labs/FLUX.1-schnell"
DTYPE = torch.bfloat16
def clear():
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
def load_pipeline() -> Pipeline:
clear()
# vae = AutoencoderKL.from_pretrained(
# MODEL_ID, subfolder="vae", torch_dtype=torch.bfloat16
# )
vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=DTYPE)
# quantize_(vae, fpx_weight_only(3, 2))
quantize_(vae, int8_weight_only())
pipeline = FluxPipeline.from_pretrained(MODEL_ID,vae=vae,
torch_dtype=DTYPE)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.cuda.set_per_process_memory_fraction(0.99)
pipeline.text_encoder.to(memory_format=torch.channels_last)
pipeline.text_encoder_2.to(memory_format=torch.channels_last)
pipeline.transformer.to(memory_format=torch.channels_last)
pipeline.vae.to(memory_format=torch.channels_last)
pipeline.vae = torch.compile(pipeline.vae)
pipeline._exclude_from_cpu_offload = ["vae"]
pipeline.enable_sequential_cpu_offload()
clear()
for _ in range(1):
pipeline(prompt="unpervaded, unencumber, froggish, groundneedle, transnatural, fatherhood, outjump, cinerator", width=1024, height=1024, guidance_scale=0.1, num_inference_steps=4, max_sequence_length=256)
return pipeline
# sample = True
# @torch.inference_mode()
# def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
# global sample
# if sample:
# clear()
# sample = None
# # torch.cuda.reset_peak_memory_stats()
# generator = Generator("cuda").manual_seed(request.seed)
# image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
# return(image)
sample = True
@torch.inference_mode()
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
global sample
if sample:
clear()
sample = None
# torch.cuda.reset_peak_memory_stats()
generator = Generator("cuda").manual_seed(request.seed)
image = None
with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_math=False, enable_mem_efficient=True):
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
return(image)