primerz commited on
Commit
157355f
·
verified ·
1 Parent(s): 7ca8329

Update generator.py

Browse files
Files changed (1) hide show
  1. generator.py +5 -5
generator.py CHANGED
@@ -751,15 +751,15 @@ class RetroArtConverter:
751
 
752
  pipe_kwargs["generator"] = generator
753
 
 
754
  # Use Compel for prompt encoding if available
755
  if self.use_compel and self.compel is not None:
756
  try:
757
  print("Encoding prompts with Compel...")
758
 
759
- # --- FIX: Move text encoders to GPU for Compel ---
760
  self.pipe.text_encoder.to(self.device)
761
  self.pipe.text_encoder_2.to(self.device)
762
- # --- END FIX ---
763
 
764
  conditioning = self.compel(prompt)
765
  negative_conditioning = self.compel(negative_prompt)
@@ -775,16 +775,16 @@ class RetroArtConverter:
775
  pipe_kwargs["prompt"] = prompt
776
  pipe_kwargs["negative_prompt"] = negative_prompt
777
  finally:
778
- # --- FIX: Move text encoders back to CPU to save VRAM ---
779
  try:
780
  self.pipe.text_encoder.to("cpu")
781
  self.pipe.text_encoder_2.to("cpu")
782
  except Exception as e:
783
  print(f"Could not move text encoders back to CPU: {e}")
784
- # --- END FIX ---
785
  else:
786
  pipe_kwargs["prompt"] = prompt
787
  pipe_kwargs["negative_prompt"] = negative_prompt
 
788
 
789
  # Add CLIP skip
790
  if hasattr(self.pipe, 'text_encoder'):
@@ -878,7 +878,7 @@ class RetroArtConverter:
878
  pipe_kwargs["controlnet_conditioning_scale"] = conditioning_scales
879
  print(f"Active ControlNets: {len(control_images)} (all {target_width}x{target_height})")
880
  else:
881
- print("No active ControlNets, running standard Img2Img")
882
 
883
  # Generate
884
  print(f"Generating with LCM: Steps={num_inference_steps}, CFG={guidance_scale}, Strength={strength}")
 
751
 
752
  pipe_kwargs["generator"] = generator
753
 
754
+ # --- START FIX: Handle CPU Offload for Compel ---
755
  # Use Compel for prompt encoding if available
756
  if self.use_compel and self.compel is not None:
757
  try:
758
  print("Encoding prompts with Compel...")
759
 
760
+ # Move text encoders to GPU
761
  self.pipe.text_encoder.to(self.device)
762
  self.pipe.text_encoder_2.to(self.device)
 
763
 
764
  conditioning = self.compel(prompt)
765
  negative_conditioning = self.compel(negative_prompt)
 
775
  pipe_kwargs["prompt"] = prompt
776
  pipe_kwargs["negative_prompt"] = negative_prompt
777
  finally:
778
+ # Move text encoders back to CPU
779
  try:
780
  self.pipe.text_encoder.to("cpu")
781
  self.pipe.text_encoder_2.to("cpu")
782
  except Exception as e:
783
  print(f"Could not move text encoders back to CPU: {e}")
 
784
  else:
785
  pipe_kwargs["prompt"] = prompt
786
  pipe_kwargs["negative_prompt"] = negative_prompt
787
+ # --- END FIX ---
788
 
789
  # Add CLIP skip
790
  if hasattr(self.pipe, 'text_encoder'):
 
878
  pipe_kwargs["controlnet_conditioning_scale"] = conditioning_scales
879
  print(f"Active ControlNets: {len(control_images)} (all {target_width}x{target_height})")
880
  else:
881
+ print("No active ControlNfets, running standard Img2Img")
882
 
883
  # Generate
884
  print(f"Generating with LCM: Steps={num_inference_steps}, CFG={guidance_scale}, Strength={strength}")