YOURNAME commited on
Commit
e3a3cdd
·
1 Parent(s): 55b23d4
Files changed (1) hide show
  1. src/pipeline.py +18 -7
src/pipeline.py CHANGED
@@ -1,14 +1,10 @@
1
- # FLux Optimization Pipeline
2
  import os
3
  import torch
4
  import torch._dynamo
5
  import gc
6
 
7
-
8
  from huggingface_hub.constants import HF_HUB_CACHE
9
- from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
10
-
11
- from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
12
  from torch import Generator
13
  from diffusers import FluxTransformer2DModel, DiffusionPipeline
14
 
@@ -18,7 +14,7 @@ from pipelines.models import TextToImageRequest
18
  from optimum.quanto import requantize
19
  import json
20
  import transformers
21
-
22
 
23
 
24
  torch._dynamo.config.suppress_errors = True
@@ -47,6 +43,20 @@ def load_quanto_text_encoder_2(text_repo_path):
47
  requantize(text_encoder_2, state_dict, quantization_map, device=torch.device("cuda"))
48
  return text_encoder_2
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  def load_pipeline() -> Pipeline:
51
 
52
  origin_vae = AutoencoderTiny.from_pretrained("RichardWilliam/XULF_Vae",
@@ -70,11 +80,12 @@ def load_pipeline() -> Pipeline:
70
  # pipeline.text_encoder_v2 = load_quanto_text_encoder_2(text_repo_path=None)
71
  pipeline.enable_cuda_graph(type="max-autotune")
72
  pipeline.text_encoder_v2 = load_quanto_text_encoder_2(text_repo_path=None)
 
73
  except:
74
  print("Something wrong here")
75
 
76
  for __ in range(3):
77
- pipeline(prompt="schoenobatist, halisteresis, chronometric, hallucinative",
78
  width=1024,
79
  height=1024,
80
  guidance_scale=0.0,
 
1
+ # asfsdgdvsdgtwtgfgfsgsgdsvxcvsgsg
2
  import os
3
  import torch
4
  import torch._dynamo
5
  import gc
6
 
 
7
  from huggingface_hub.constants import HF_HUB_CACHE
 
 
 
8
  from torch import Generator
9
  from diffusers import FluxTransformer2DModel, DiffusionPipeline
10
 
 
14
  from optimum.quanto import requantize
15
  import json
16
  import transformers
17
+ # ITs meeeeeeeeeeeeeeeeeeeeeeeeeeeeee
18
 
19
 
20
  torch._dynamo.config.suppress_errors = True
 
43
  requantize(text_encoder_2, state_dict, quantization_map, device=torch.device("cuda"))
44
  return text_encoder_2
45
 
46
+ class TransformerDiffusionCompiler:
47
+ def __init__(self, diffusion_pipeline, activate_optimization=False):
48
+ self.pipeline = diffusion_pipeline
49
+ self.optimization = activate_optimization
50
+ if self.optimization:
51
+ self._enhance_model()
52
+
53
+ def _enhance_model(self):
54
+ print("Applying advanced optimizations to the transformer pipeline...")
55
+ self.pipeline.unet = torch.compile(self.pipeline.unet, mode="reduce-overhead")
56
+
57
+ def execute(self, *params, **kwargs):
58
+ return self.pipeline(*params, **kwargs)
59
+
60
  def load_pipeline() -> Pipeline:
61
 
62
  origin_vae = AutoencoderTiny.from_pretrained("RichardWilliam/XULF_Vae",
 
80
  # pipeline.text_encoder_v2 = load_quanto_text_encoder_2(text_repo_path=None)
81
  pipeline.enable_cuda_graph(type="max-autotune")
82
  pipeline.text_encoder_v2 = load_quanto_text_encoder_2(text_repo_path=None)
83
+ pipeline = TransformerDiffusionCompiler(pipeline, activate_optimization=True)
84
  except:
85
  print("Something wrong here")
86
 
87
  for __ in range(3):
88
+ pipeline(prompt="whensoever, lodger, Locarnize, hippology, harakeke",
89
  width=1024,
90
  height=1024,
91
  guidance_scale=0.0,