bench_try10 / src /pipeline.py
manbeast3b's picture
Update src/pipeline.py
cb75b34 verified
# from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
# from diffusers.image_processor import VaeImageProcessor
# from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
# from huggingface_hub.constants import HF_HUB_CACHE
# from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
# import torch
# import torch._dynamo
# import gc
# from PIL import Image as img
# from PIL.Image import Image
# from pipelines.models import TextToImageRequest
# from torch import Generator
# import time
# from diffusers import FluxTransformer2DModel, DiffusionPipeline
# from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
# import os
# os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
# torch._dynamo.config.suppress_errors = True
# Pipeline = None
# # ckpt_id = "manbeast3b/flux.1-schnell-full1"
# # ckpt_revision = "cb1b599b0d712b9aab2c4df3ad27b050a27ec146"
# ckpt_id = "black-forest-labs/FLUX.1-schnell"
# ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
# def empty_cache():
# gc.collect()
# torch.cuda.empty_cache()
# torch.cuda.reset_max_memory_allocated()
# torch.cuda.reset_peak_memory_stats()
# def load_pipeline() -> Pipeline:
# empty_cache()
# dtype, device = torch.bfloat16, "cuda"
# text_encoder_2 = T5EncoderModel.from_pretrained(
# "city96/t5-v1_1-xxl-encoder-bf16", revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86", torch_dtype=torch.bfloat16
# ).to(memory_format=torch.channels_last)
# tinypath= os.path.join(HF_HUB_CACHE, "models--madebyollin--taef1/snapshots/5463ee684fd9131a724bea777a2f50d89b0b6b24")
# vae = AutoencoderTiny.from_pretrained(tinypath, torch_dtype=dtype)
# path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a")
# model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=dtype, use_safetensors=False).to(memory_format=torch.channels_last)
# pipeline = FluxPipeline.from_pretrained(
# ckpt_id,
# vae=vae,
# revision=ckpt_revision,
# transformer=model,
# text_encoder_2=text_encoder_2,
# torch_dtype=dtype,
# ).to(device)
# pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune")
# quantize_(pipeline.text_encoder, fpx_weight_only(3,2))
# quantize_(pipeline.vae, int8_weight_only())
# for _ in range(3):
# pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
# empty_cache()
# return pipeline
# @torch.no_grad()
# def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
# image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
# return(image)
'''
from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
from huggingface_hub.constants import HF_HUB_CACHE
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer
import torch
import torch._dynamo
import gc
from PIL import Image as img
from PIL.Image import Image
from pipelines.models import TextToImageRequest
from torch import Generator
import time
from diffusers import FluxTransformer2DModel, DiffusionPipeline
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
import os
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
ckpt_id = "black-forest-labs/FLUX.1-schnell"
ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
Pipeline = None
def empty_cache():
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
def load_pipeline() -> Pipeline:
empty_cache()
dtype, device = torch.bfloat16, "cuda"
text_encoder_2 = T5EncoderModel.from_pretrained(
"city96/t5-v1_1-xxl-encoder-bf16", revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86", torch_dtype=torch.bfloat16
).to(memory_format=torch.channels_last)
path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a")
model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=dtype, use_safetensors=False).to(memory_format=torch.channels_last)
pipeline = DiffusionPipeline.from_pretrained(
ckpt_id,
revision=ckpt_revision,
transformer=model,
text_encoder_2=text_encoder_2,
torch_dtype=dtype,
).to(device)
quantize_(pipeline.vae, int8_weight_only())
pipeline(prompt="imprisonable, forechamber, demagogic, monotropic, blandiloquious, blechnoid, uncarnivorous", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
empty_cache()
return pipeline
@torch.no_grad()
def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
image=pipeline(request.prompt,
generator=generator,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
height=request.height,
width=request.width,
output_type="pil").images[0]
return(image)
'''
from huggingface_hub.constants import HF_HUB_CACHE
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
import torch
import torch._dynamo
import gc
import os
from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
from PIL.Image import Image
from pipelines.models import TextToImageRequest
from torch import Generator
from diffusers import FluxTransformer2DModel, DiffusionPipeline
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
os.environ["TOKENIZERS_PARALLELISM"] = "True"
torch._dynamo.config.suppress_errors = True
Pipeline = None
ids = "black-forest-labs/FLUX.1-schnell"
Revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
def load_pipeline() -> Pipeline:
vae = AutoencoderKL.from_pretrained(ids,revision=Revision, subfolder="vae", local_files_only=True, torch_dtype=torch.bfloat16,)
quantize_(vae, int8_weight_only())
text_encoder_2 = T5EncoderModel.from_pretrained("city96/t5-v1_1-xxl-encoder-bf16", revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86", torch_dtype=torch.bfloat16).to(memory_format=torch.channels_last)
path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a")
transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False).to(memory_format=torch.channels_last)
pipeline = DiffusionPipeline.from_pretrained(ids, revision=Revision, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16,)
pipeline.to("cuda")
pipeline(prompt="ramlike, Curtis, fingerstone, rewhisper", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
return pipeline
@torch.no_grad()
def infer(request: TextToImageRequest, pipeline: Pipeline, generator) -> Image:
generator = Generator(pipeline.device).manual_seed(request.seed)
return pipeline(
request.prompt,
generator=generator,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
height=request.height,
width=request.width,
).images[0]