primerz commited on
Commit
70fc44d
·
verified ·
1 Parent(s): 784618e

Update models.py

Browse files
Files changed (1) hide show
  1. models.py +31 -17
models.py CHANGED
@@ -1,7 +1,7 @@
1
  """
2
  Models.py - Following examplewithface.py EXACTLY
3
  NO MultiControlNetModel wrapper!
4
- Using fuse_lora with scale (examplewithface.py line 267)
5
  """
6
  import torch
7
  import time
@@ -160,8 +160,8 @@ def load_lora(pipe):
160
 
161
  def fuse_lora_with_scale(pipe, lora_scale):
162
  """
163
- Following examplewithface.py lines 266-267:
164
- Load LoRA weights and FUSE them into the model
165
  """
166
  global lora_path_cached
167
 
@@ -169,25 +169,39 @@ def fuse_lora_with_scale(pipe, lora_scale):
169
  return False
170
 
171
  try:
172
- # Unload and unfuse previous LoRA if exists
173
- try:
174
- pipe.unfuse_lora()
175
- pipe.unload_lora_weights()
176
- except:
177
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
- # Load LoRA weights (examplewithface.py line 266)
180
- print(f" [LORA] Loading weights...")
181
- pipe.load_lora_weights(lora_path_cached)
182
 
183
- # CRITICAL: Fuse LoRA into model (examplewithface.py line 267)
184
- print(f" [LORA] Fusing with scale {lora_scale}...")
185
- pipe.fuse_lora(lora_scale=lora_scale)
186
- print(f" [OK] LoRA fused into model")
187
 
188
  return True
189
  except Exception as e:
190
- print(f" [ERROR] LoRA fusion failed: {e}")
191
  import traceback
192
  traceback.print_exc()
193
  return False
 
1
  """
2
  Models.py - Following examplewithface.py EXACTLY
3
  NO MultiControlNetModel wrapper!
4
+ Using Kohya-style LoRA from lora.py (examplewithface.py lines 223-235)
5
  """
6
  import torch
7
  import time
 
160
 
161
  def fuse_lora_with_scale(pipe, lora_scale):
162
  """
163
+ Following examplewithface.py lines 223-235:
164
+ Use the Kohya-style LoRA loader from lora.py (NOT diffusers built-in)
165
  """
166
  global lora_path_cached
167
 
 
169
  return False
170
 
171
  try:
172
+ # Import the local lora module (Kohya-style)
173
+ import lora
174
+
175
+ print(f" [LORA] Creating network from weights...")
176
+
177
+ # examplewithface.py lines 223-229
178
+ # Note: SDXL has two text encoders, pass both as a list
179
+ text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
180
+
181
+ lora_model, weights_sd = lora.create_network_from_weights(
182
+ lora_scale, # multiplier
183
+ lora_path_cached, # file path
184
+ pipe.vae,
185
+ text_encoders, # Both SDXL text encoders
186
+ pipe.unet,
187
+ for_inference=True,
188
+ )
189
+
190
+ # examplewithface.py lines 231-233
191
+ print(f" [LORA] Merging to model with scale {lora_scale}...")
192
+ lora_model.merge_to(
193
+ text_encoders, pipe.unet, weights_sd, torch.float16, "cuda"
194
+ )
195
 
196
+ # Cleanup
197
+ del weights_sd
198
+ del lora_model
199
 
200
+ print(f" [OK] LoRA merged into model using Kohya loader")
 
 
 
201
 
202
  return True
203
  except Exception as e:
204
+ print(f" [ERROR] LoRA merge failed: {e}")
205
  import traceback
206
  traceback.print_exc()
207
  return False