mxpln commited on
Commit
d04ab1e
·
1 Parent(s): 9df8308

Refactor LoRA loading logic in app.py to use lazy loading

Browse files
Files changed (1) hide show
  1. app.py +21 -13
app.py CHANGED
@@ -129,18 +129,13 @@ LORA_CONFIGS = {
129
  "Meta4": ("loras/Meta4.safetensors", 0.85),
130
  "Oily_Skin_QWEN_V2_GMR": ("loras/Oily Skin QWEN V2-GMR.safetensors", 0.85),
131
  "PillowHump_2509": ("loras/PillowHump_2509.safetensors", 0.85),
132
- "Put_it_here_Qwen_edit_V2_0": ("loras/Put it here_Qwen edit_V2.0.safetensors", 0.85),
133
  "QWEN_jtn_barbell": ("loras/QWEN_jtn_barbell.safetensors", 0.85),
134
  "Qwen_Image_Helm_v0_1": ("loras/Qwen-Image-Helm_v0.1.safetensors", 0.85),
135
  "Qwen_MysticXXX_v1": ("loras/Qwen-MysticXXX-v1.safetensors", 0.85),
136
- "Qwen_NSFW_Beta2": ("loras/Qwen-NSFW-Beta2.safetensors", 0.85),
137
- "Qwen_NSFW_Beta4": ("loras/Qwen-NSFW-Beta4.safetensors", 0.85),
138
  "Qwen_NSFW_Beta5": ("loras/Qwen-NSFW-Beta5.safetensors", 0.85),
139
- "Qwen_NSFW": ("loras/Qwen-NSFW.safetensors", 0.85),
140
  "Qwen_Real_PS_v1_83K": ("loras/Qwen-Real PS_v1_83K.safetensors", 0.85),
141
  "Qwen4Play_v2": ("loras/Qwen4Play_v2.safetensors", 0.85),
142
  "QwenImageHentaiPIV_v3_1": ("loras/QwenImageHentaiPIV_v3.1.safetensors", 0.85),
143
- "QwenSnofs1_1": ("loras/QwenSnofs1_1.safetensors", 0.85),
144
  "Qwen_Nsfw_Body_V10_4K": ("loras/Qwen_Nsfw_Body_V10-4K.safetensors", 0.85),
145
  "Qwen_Nsfw_Body_V14_10K": ("loras/Qwen_Nsfw_Body_V14-10K.safetensors", 0.85),
146
  "Qwen_Real_Nud3s": ("loras/Qwen_Real_Nud3s.safetensors", 0.85),
@@ -173,7 +168,6 @@ LORA_CONFIGS = {
173
  "qwen_hand_grab_6000s": ("loras/qwen_hand_grab_6000s.safetensors", 0.85),
174
  "qwen_image_edit_remove_clothing_v1_0": ("loras/qwen_image_edit_remove-clothing_v1.0.safetensors", 0.85),
175
  "qwen_image_snapchat": ("loras/qwen_image_snapchat.safetensors", 0.85),
176
- "qwen_snofs": ("loras/qwen_snofs.safetensors", 0.85),
177
  "qwen_uncensor_000014928": ("loras/qwen_uncensor_000014928.safetensors", 0.85),
178
  "reclining_nude_v1_000003500": ("loras/reclining_nude_v1_000003500.safetensors", 0.85),
179
  "royal_treatment_V3": ("loras/royal+treatment+V3.safetensors", 0.85),
@@ -183,12 +177,8 @@ LORA_CONFIGS = {
183
  "qwen_real_consistency_A2R_2509_Base": ("loras-2/qwen真实+一致性-A2R_2509_Base.safetensors", 0.6),
184
  }
185
 
186
- # Load all LoRAs
187
- for adapter_name, (weight_path, _) in LORA_CONFIGS.items():
188
- pipe.load_lora_weights("wiikoo/Qwen-lora-nsfw",
189
- weight_name=weight_path,
190
- adapter_name=adapter_name)
191
-
192
 
193
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
194
  MAX_SEED = np.iinfo(np.int32).max
@@ -228,10 +218,28 @@ def infer(
228
  if input_image is None:
229
  raise gr.Error("Please upload an image to edit.")
230
 
231
- # Special case for consistence_edit_v2 which uses two adapters
232
  if lora_adapter == "consistence_edit_v2":
 
 
 
 
 
 
 
 
 
 
 
233
  pipe.set_adapters(["consistence_edit_v2", "qwen_real_consistency_A2R_2509_Base"], adapter_weights=[0.4, 0.6])
234
  elif lora_adapter in LORA_CONFIGS:
 
 
 
 
 
 
 
235
  _, weight = LORA_CONFIGS[lora_adapter]
236
  pipe.set_adapters([lora_adapter], adapter_weights=[weight])
237
  else:
 
129
  "Meta4": ("loras/Meta4.safetensors", 0.85),
130
  "Oily_Skin_QWEN_V2_GMR": ("loras/Oily Skin QWEN V2-GMR.safetensors", 0.85),
131
  "PillowHump_2509": ("loras/PillowHump_2509.safetensors", 0.85),
 
132
  "QWEN_jtn_barbell": ("loras/QWEN_jtn_barbell.safetensors", 0.85),
133
  "Qwen_Image_Helm_v0_1": ("loras/Qwen-Image-Helm_v0.1.safetensors", 0.85),
134
  "Qwen_MysticXXX_v1": ("loras/Qwen-MysticXXX-v1.safetensors", 0.85),
 
 
135
  "Qwen_NSFW_Beta5": ("loras/Qwen-NSFW-Beta5.safetensors", 0.85),
 
136
  "Qwen_Real_PS_v1_83K": ("loras/Qwen-Real PS_v1_83K.safetensors", 0.85),
137
  "Qwen4Play_v2": ("loras/Qwen4Play_v2.safetensors", 0.85),
138
  "QwenImageHentaiPIV_v3_1": ("loras/QwenImageHentaiPIV_v3.1.safetensors", 0.85),
 
139
  "Qwen_Nsfw_Body_V10_4K": ("loras/Qwen_Nsfw_Body_V10-4K.safetensors", 0.85),
140
  "Qwen_Nsfw_Body_V14_10K": ("loras/Qwen_Nsfw_Body_V14-10K.safetensors", 0.85),
141
  "Qwen_Real_Nud3s": ("loras/Qwen_Real_Nud3s.safetensors", 0.85),
 
168
  "qwen_hand_grab_6000s": ("loras/qwen_hand_grab_6000s.safetensors", 0.85),
169
  "qwen_image_edit_remove_clothing_v1_0": ("loras/qwen_image_edit_remove-clothing_v1.0.safetensors", 0.85),
170
  "qwen_image_snapchat": ("loras/qwen_image_snapchat.safetensors", 0.85),
 
171
  "qwen_uncensor_000014928": ("loras/qwen_uncensor_000014928.safetensors", 0.85),
172
  "reclining_nude_v1_000003500": ("loras/reclining_nude_v1_000003500.safetensors", 0.85),
173
  "royal_treatment_V3": ("loras/royal+treatment+V3.safetensors", 0.85),
 
177
  "qwen_real_consistency_A2R_2509_Base": ("loras-2/qwen真实+一致性-A2R_2509_Base.safetensors", 0.6),
178
  }
179
 
180
+ # Track which LoRAs have been loaded (lazy loading on first use)
181
+ loaded_adapters = set()
 
 
 
 
182
 
183
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
184
  MAX_SEED = np.iinfo(np.int32).max
 
218
  if input_image is None:
219
  raise gr.Error("Please upload an image to edit.")
220
 
221
+ # Lazy load LoRA on first use
222
  if lora_adapter == "consistence_edit_v2":
223
+ # Special case: needs two adapters
224
+ if "consistence_edit_v2" not in loaded_adapters:
225
+ pipe.load_lora_weights("wiikoo/Qwen-lora-nsfw",
226
+ weight_name="loras2/consistence_edit_v2.safetensors",
227
+ adapter_name="consistence_edit_v2")
228
+ loaded_adapters.add("consistence_edit_v2")
229
+ if "qwen_real_consistency_A2R_2509_Base" not in loaded_adapters:
230
+ pipe.load_lora_weights("wiikoo/Qwen-lora-nsfw",
231
+ weight_name="loras-2/qwen真实+一致性-A2R_2509_Base.safetensors",
232
+ adapter_name="qwen_real_consistency_A2R_2509_Base")
233
+ loaded_adapters.add("qwen_real_consistency_A2R_2509_Base")
234
  pipe.set_adapters(["consistence_edit_v2", "qwen_real_consistency_A2R_2509_Base"], adapter_weights=[0.4, 0.6])
235
  elif lora_adapter in LORA_CONFIGS:
236
+ # Load LoRA if not already loaded
237
+ if lora_adapter not in loaded_adapters:
238
+ weight_path, _ = LORA_CONFIGS[lora_adapter]
239
+ pipe.load_lora_weights("wiikoo/Qwen-lora-nsfw",
240
+ weight_name=weight_path,
241
+ adapter_name=lora_adapter)
242
+ loaded_adapters.add(lora_adapter)
243
  _, weight = LORA_CONFIGS[lora_adapter]
244
  pipe.set_adapters([lora_adapter], adapter_weights=[weight])
245
  else: