primerz commited on
Commit
3480bce
·
verified ·
1 Parent(s): 927c643

Update generator.py

Browse files
Files changed (1) hide show
  1. generator.py +31 -12
generator.py CHANGED
@@ -319,22 +319,32 @@ class RetroArtConverter:
319
  print(f" [WARNING] Face detection failed: {e}")
320
 
321
  # Unfuse and reload LORA with new scale (like exampleapp.py)
322
- if hasattr(self.pipe, 'unfuse_lora'):
323
- try:
324
- self.pipe.unfuse_lora()
325
- self.pipe.unload_lora_weights()
326
- print(" [OK] Unfused previous LORA")
327
- except Exception as e:
328
- print(f" [INFO] No previous LORA to unfuse: {e}")
329
 
330
  # Load and fuse LORA at the requested scale
331
- if self.models_loaded['lora'] and self.models_loaded.get('lora_path'):
 
 
 
 
 
 
 
 
 
 
332
  try:
333
- self.pipe.load_lora_weights(self.models_loaded['lora_path'])
334
- self.pipe.fuse_lora(lora_scale=lora_scale)
335
- print(f" [OK] LORA fused at scale: {lora_scale}")
336
  except Exception as e:
337
- print(f" [WARNING] Could not fuse LORA: {e}")
 
338
 
339
  # Setup generator with seed control
340
  if seed == -1:
@@ -349,6 +359,15 @@ class RetroArtConverter:
349
  # Use Compel for prompt encoding (like exampleapp.py - simpler)
350
  if self.use_compel and self.compel is not None:
351
  print("Encoding prompts with Compel...")
 
 
 
 
 
 
 
 
 
352
  conditioning, pooled = self.compel(prompt)
353
  negative_conditioning, negative_pooled = self.compel(negative_prompt)
354
  print(" [OK] Prompts encoded")
 
319
  print(f" [WARNING] Face detection failed: {e}")
320
 
321
  # Unfuse and reload LORA with new scale (like exampleapp.py)
322
+ #if hasattr(self.pipe, 'unfuse_lora'):
323
+ # try:
324
+ # self.pipe.unfuse_lora()
325
+ # self.pipe.unload_lora_weights()
326
+ # print(" [OK] Unfused previous LORA")
327
+ # except Exception as e:
328
+ # print(f" [INFO] No previous LORA to unfuse: {e}")
329
 
330
  # Load and fuse LORA at the requested scale
331
+ #if self.models_loaded['lora'] and self.models_loaded.get('lora_path'):
332
+ # try:
333
+ # self.pipe.load_lora_weights(self.models_loaded['lora_path'])
334
+ # self.pipe.fuse_lora(lora_scale=lora_scale)
335
+ # print(f" [OK] LORA fused at scale: {lora_scale}")
336
+ # except Exception as e:
337
+ # print(f" [WARNING] Could not fuse LORA: {e}")
338
+
339
+ # --- CORRECTED BLOCK ---
340
+ # Set LORA scale using set_adapters (matches models.py)
341
+ if hasattr(self.pipe, 'set_adapters') and self.models_loaded['lora']:
342
  try:
343
+ self.pipe.set_adapters(["retroart"], adapter_weights=[lora_scale])
344
+ print(f"LORA scale: {lora_scale}")
 
345
  except Exception as e:
346
+ print(f"Could not set LORA scale: {e}")
347
+ # --- END OF BLOCK ---
348
 
349
  # Setup generator with seed control
350
  if seed == -1:
 
359
  # Use Compel for prompt encoding (like exampleapp.py - simpler)
360
  if self.use_compel and self.compel is not None:
361
  print("Encoding prompts with Compel...")
362
+
363
+ # --- FIX: Add the LORA trigger word ---
364
+ # Ensure trigger word is present and avoid duplicates
365
+ if TRIGGER_WORD not in prompt:
366
+ # Prepend the trigger word for highest impact
367
+ prompt = f"{TRIGGER_WORD}, {prompt}"
368
+ print(f" Using final prompt: {prompt}")
369
+ # --- End Fix ---
370
+
371
  conditioning, pooled = self.compel(prompt)
372
  negative_conditioning, negative_pooled = self.compel(negative_prompt)
373
  print(" [OK] Prompts encoded")