Text-to-Image
Diffusers
Safetensors
LibreFluxIPAdapterPipeline
neuralvfx commited on
Commit
339b2a9
·
verified ·
1 Parent(s): a241769

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +54 -0
README.md CHANGED
@@ -106,3 +106,57 @@ images = pipe(
106
  )[0][0]
107
  ```
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  )[0][0]
107
  ```
108
 
109
+ # Load Pipeline ( Low VRAM )
110
+ ```
111
+ import torch
112
+ from diffusers import DiffusionPipeline
113
+ from optimum.quanto import freeze, quantize, qint8
114
+
115
+ model_id = "neuralvfx/LibreFlux-IP-Adapter"
116
+
117
+ device = "cuda" if torch.cuda.is_available() else "cpu"
118
+ dtype = torch.bfloat16 if device == "cuda" else torch.float32
119
+
120
+ pipe = DiffusionPipeline.from_pretrained(
121
+ model_id,
122
+ custom_pipeline=model_id,
123
+ trust_remote_code=True,
124
+ torch_dtype=dtype,
125
+ safety_checker=None
126
+ )
127
+ print("Loaded:", type(pipe).__name__) # should be LibreFluxControlNetPipeline
128
+
129
+
130
+ # Optional way to download the weights
131
+ hf_hub_download(repo_id="neuralvfx/LibreFlux-IP-Adapter",
132
+ filename="ip_adapter.pt",
133
+ local_dir=".",
134
+ local_dir_use_symlinks=False)
135
+
136
+ # Load the IP Adapter First
137
+ pipe.load_ip_adapter('ip_adapter.pt')
138
+
139
+ # Quantize and Freeze
140
+ quantize(
141
+ pipe.transformer,
142
+ weights=qint8,
143
+ exclude=[
144
+ "*.norm", "*.norm1", "*.norm2", "*.norm2_context",
145
+ "proj_out", "x_embedder", "norm_out", "context_embedder",
146
+ ],
147
+ )
148
+
149
+ quantize(
150
+ pipe.ip_adapter,
151
+ weights=qint8,
152
+ exclude=[
153
+ "*.norm", "*.norm1", "*.norm2", "*.norm2_context",
154
+ "proj_out", "x_embedder", "norm_out", "context_embedder",
155
+ ],
156
+ )
157
+ freeze(pipe.transformer)
158
+ freeze(pipe.ip_adapter)
159
+
160
+ # Enable Model Offloading
161
+ pipe.enable_model_cpu_offload()
162
+ ```