Add files using upload-large-folder tool
Browse files- src/pipeline.py +76 -0
src/pipeline.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
|
| 2 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 3 |
+
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
| 4 |
+
|
| 5 |
+
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
|
| 6 |
+
import torch
|
| 7 |
+
import torch._dynamo
|
| 8 |
+
import gc
|
| 9 |
+
from PIL import Image as img
|
| 10 |
+
from PIL.Image import Image
|
| 11 |
+
from pipelines.models import TextToImageRequest
|
| 12 |
+
from torch import Generator
|
| 13 |
+
import time
|
| 14 |
+
from diffusers import FluxTransformer2DModel, DiffusionPipeline
|
| 15 |
+
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
|
| 16 |
+
import os
|
| 17 |
+
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
|
| 18 |
+
|
| 19 |
+
Pipeline = None
|
| 20 |
+
|
| 21 |
+
ckpt_id = "black-forest-labs/FLUX.1-schnell"
|
| 22 |
+
def empty_cache():
|
| 23 |
+
start = time.time()
|
| 24 |
+
gc.collect()
|
| 25 |
+
torch.cuda.empty_cache()
|
| 26 |
+
torch.cuda.reset_max_memory_allocated()
|
| 27 |
+
torch.cuda.reset_peak_memory_stats()
|
| 28 |
+
print(f"Flush took: {time.time() - start}")
|
| 29 |
+
|
| 30 |
+
def load_pipeline() -> Pipeline:
|
| 31 |
+
empty_cache()
|
| 32 |
+
|
| 33 |
+
dtype, device = torch.bfloat16, "cuda"
|
| 34 |
+
|
| 35 |
+
vae = AutoencoderTiny.from_pretrained("RobertML/FLUX.1-schnell-vae_e3m2", torch_dtype=dtype)
|
| 36 |
+
quantize_(vae, int8_weight_only())
|
| 37 |
+
|
| 38 |
+
############ Text Encoder ############
|
| 39 |
+
text_encoder = CLIPTextModel.from_pretrained(
|
| 40 |
+
ckpt_id, subfolder="text_encoder", torch_dtype=torch.bfloat16
|
| 41 |
+
)
|
| 42 |
+
quantize_(text_encoder, int8_weight_only())
|
| 43 |
+
############ Text Encoder 2 ############
|
| 44 |
+
text_encoder_2 = T5EncoderModel.from_pretrained(
|
| 45 |
+
"city96/t5-v1_1-xxl-encoder-bf16", torch_dtype=torch.bfloat16
|
| 46 |
+
)
|
| 47 |
+
quantize_(text_encoder_2, int8_weight_only())
|
| 48 |
+
model = FluxTransformer2DModel.from_pretrained(
|
| 49 |
+
"/home/sandbox/.cache/huggingface/hub/models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a", torch_dtype=dtype, use_safetensors=False
|
| 50 |
+
)
|
| 51 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
| 52 |
+
ckpt_id,
|
| 53 |
+
transformer=model,
|
| 54 |
+
text_encoder=text_encoder,
|
| 55 |
+
text_encoder_2=text_encoder_2,
|
| 56 |
+
torch_dtype=dtype,
|
| 57 |
+
vae=vae
|
| 58 |
+
).to(device)
|
| 59 |
+
|
| 60 |
+
for _ in range(2):
|
| 61 |
+
pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
|
| 62 |
+
|
| 63 |
+
empty_cache()
|
| 64 |
+
return pipeline
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
from datetime import datetime
|
| 68 |
+
@torch.inference_mode()
|
| 69 |
+
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
|
| 70 |
+
try:
|
| 71 |
+
generator = Generator(pipeline.device).manual_seed(request.seed)
|
| 72 |
+
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
|
| 73 |
+
except:
|
| 74 |
+
image = img.open("./RobertML.png")
|
| 75 |
+
pass
|
| 76 |
+
return(image)
|