Update src/pipeline.py
Browse files- src/pipeline.py +9 -7
src/pipeline.py
CHANGED
|
@@ -5,19 +5,20 @@ from pipelines.models import TextToImageRequest
|
|
| 5 |
from torch import Generator
|
| 6 |
#from time import perf_counter
|
| 7 |
import os
|
| 8 |
-
from diffusers import FluxPipeline, AutoencoderKL
|
| 9 |
from diffusers.image_processor import VaeImageProcessor
|
| 10 |
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
|
| 11 |
import diffusers
|
| 12 |
#from optimum.quanto import freeze, qfloat8, quantize
|
| 13 |
import gc
|
| 14 |
from diffusers import FluxTransformer2DModel, DiffusionPipeline
|
| 15 |
-
|
| 16 |
|
|
|
|
| 17 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
| 18 |
Pipeline = None
|
| 19 |
|
| 20 |
-
ckpt_id = "
|
| 21 |
def empty_cache():
|
| 22 |
gc.collect()
|
| 23 |
torch.cuda.empty_cache()
|
|
@@ -27,9 +28,10 @@ def empty_cache():
|
|
| 27 |
def load_pipeline() -> Pipeline:
|
| 28 |
empty_cache()
|
| 29 |
dtype, device = torch.bfloat16, "cuda"
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
| 33 |
torch.backends.cudnn.benchmark = True
|
| 34 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 35 |
torch.cuda.set_per_process_memory_fraction(0.90)
|
|
@@ -38,7 +40,7 @@ def load_pipeline() -> Pipeline:
|
|
| 38 |
pipeline.vae.to(memory_format=torch.channels_last)
|
| 39 |
pipeline.vae.enable_tiling()
|
| 40 |
pipeline._exclude_from_cpu_offload = ["vae"]
|
| 41 |
-
pipeline.
|
| 42 |
for _ in range(2):
|
| 43 |
pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
|
| 44 |
empty_cache()
|
|
|
|
| 5 |
from torch import Generator
|
| 6 |
#from time import perf_counter
|
| 7 |
import os
|
| 8 |
+
from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
|
| 9 |
from diffusers.image_processor import VaeImageProcessor
|
| 10 |
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
|
| 11 |
import diffusers
|
| 12 |
#from optimum.quanto import freeze, qfloat8, quantize
|
| 13 |
import gc
|
| 14 |
from diffusers import FluxTransformer2DModel, DiffusionPipeline
|
| 15 |
+
from torchao.quantization import quantize_,int8_weight_only
|
| 16 |
|
| 17 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "True"
|
| 18 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
| 19 |
Pipeline = None
|
| 20 |
|
| 21 |
+
ckpt_id = "blobers/tx"
|
| 22 |
def empty_cache():
|
| 23 |
gc.collect()
|
| 24 |
torch.cuda.empty_cache()
|
|
|
|
| 28 |
def load_pipeline() -> Pipeline:
|
| 29 |
empty_cache()
|
| 30 |
dtype, device = torch.bfloat16, "cuda"
|
| 31 |
+
vae = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=dtype)
|
| 32 |
+
quantize_(vae, int8_weight_only())
|
| 33 |
+
model = FluxTransformer2DModel.from_pretrained("slobers/transgender", torch_dtype=dtype, use_safetensors=False)
|
| 34 |
+
pipeline = DiffusionPipeline.from_pretrained(ckpt_id, vae=vae, transformer = model, torch_dtype=dtype,)
|
| 35 |
torch.backends.cudnn.benchmark = True
|
| 36 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 37 |
torch.cuda.set_per_process_memory_fraction(0.90)
|
|
|
|
| 40 |
pipeline.vae.to(memory_format=torch.channels_last)
|
| 41 |
pipeline.vae.enable_tiling()
|
| 42 |
pipeline._exclude_from_cpu_offload = ["vae"]
|
| 43 |
+
pipeline.to("cuda")
|
| 44 |
for _ in range(2):
|
| 45 |
pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
|
| 46 |
empty_cache()
|