primerz commited on
Commit
3058dc8
·
verified ·
1 Parent(s): 8a37e2d

Update models.py

Browse files
Files changed (1) hide show
  1. models.py +12 -11
models.py CHANGED
@@ -17,7 +17,10 @@ from transformers import CLIPVisionModelWithProjection
17
  from insightface.app import FaceAnalysis
18
  from controlnet_aux import ZoeDetector, OpenposeDetector, LeresDetector, MidasDetector, MediapipeFaceDetector
19
  from huggingface_hub import hf_hub_download, snapshot_download
20
- from compel import Compel, ReturnedEmbeddingsType
 
 
 
21
 
22
  # Use reference implementation's attention processor
23
  from attention_processor import IPAttnProcessor2_0, AttnProcessor
@@ -382,21 +385,19 @@ def setup_ip_adapter(pipe, image_encoder):
382
  return None, False
383
 
384
 
 
385
  def setup_compel(pipe):
386
- """Setup Compel for better SDXL prompt handling."""
387
- print("Setting up Compel for enhanced prompt processing...")
388
  try:
389
- compel = Compel(
390
- tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
391
- text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
392
- returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
393
- requires_pooled=[False, True]
394
- )
395
- print(" [OK] Compel loaded successfully")
396
  return compel, True
397
  except Exception as e:
398
  print(f" [WARNING] Compel not available: {e}")
399
  return None, False
 
400
 
401
 
402
  def setup_scheduler(pipe):
@@ -414,7 +415,7 @@ def optimize_pipeline(pipe):
414
  # Try to enable xformers
415
  if device == "cuda":
416
  try:
417
- pipe.enable_xformfiers_memory_efficient_attention()
418
  print(" [OK] xformers enabled")
419
  except Exception as e:
420
  print(f" [INFO] xformers not available: {e}")
 
17
  from insightface.app import FaceAnalysis
18
  from controlnet_aux import ZoeDetector, OpenposeDetector, LeresDetector, MidasDetector, MediapipeFaceDetector
19
  from huggingface_hub import hf_hub_download, snapshot_download
20
+
21
+ # --- START FIX: Import CompelForSDXL ---
22
+ from compel import CompelForSDXL
23
+ # --- END FIX ---
24
 
25
  # Use reference implementation's attention processor
26
  from attention_processor import IPAttnProcessor2_0, AttnProcessor
 
385
  return None, False
386
 
387
 
388
+ # --- START FIX: Use CompelForSDXL helper class ---
389
  def setup_compel(pipe):
390
+ """Setup CompelForSDXL for better SDXL prompt handling."""
391
+ print("Setting up Compel for enhanced prompt processing (using CompelForSDXL)...")
392
  try:
393
+ # Pass the whole pipeline as per Compel docs
394
+ compel = CompelForSDXL(pipe)
395
+ print(" [OK] CompelForSDXL loaded successfully")
 
 
 
 
396
  return compel, True
397
  except Exception as e:
398
  print(f" [WARNING] Compel not available: {e}")
399
  return None, False
400
+ # --- END FIX ---
401
 
402
 
403
  def setup_scheduler(pipe):
 
415
  # Try to enable xformers
416
  if device == "cuda":
417
  try:
418
+ pipe.enable_xformers_memory_efficient_attention()
419
  print(" [OK] xformers enabled")
420
  except Exception as e:
421
  print(f" [INFO] xformers not available: {e}")