primerz commited on
Commit
c32a3cb
·
verified ·
1 Parent(s): 491c926

Update models.py

Browse files
Files changed (1) hide show
  1. models.py +39 -66
models.py CHANGED
@@ -16,7 +16,7 @@ from diffusers.models.attention_processor import AttnProcessor2_0
16
  from transformers import CLIPVisionModelWithProjection
17
  from insightface.app import FaceAnalysis
18
  from controlnet_aux import ZoeDetector, OpenposeDetector, LeresDetector, MidasDetector, MediapipeFaceDetector
19
- from huggingface_hub import hf_hub_download
20
  from compel import Compel, ReturnedEmbeddingsType
21
 
22
  # Use reference implementation's attention processor
@@ -64,81 +64,54 @@ def download_model_with_retry(repo_id, filename, max_retries=None, **kwargs):
64
  def load_face_analysis():
65
  """
66
  Load face analysis model with proper error handling.
67
- This version downloads files manually to a custom folder
68
- to bypass the insightface hard-coded zip download.
69
  """
70
  print("Loading face analysis model...")
71
 
72
- # Use a custom model name to prevent insightface from auto-downloading a zip
73
- model_name = FACE_DETECTION_CONFIG['model_name'] # "pixagram_face_models"
74
- local_model_root = '.' # We want files to be in ./pixagram_face_models
75
- local_model_path = os.path.join(local_model_root, model_name)
 
 
 
 
 
76
 
77
  try:
78
- # --- NEW: Manual download logic ---
79
  print(f" Ensuring insightface models are present in {local_model_path}...")
80
- os.makedirs(local_model_path, exist_ok=True)
81
 
82
- required_files = [
83
- "1k3d68.onnx",
84
- "2d106det.onnx",
85
- "genderage.onnx",
86
- "glintr100.onnx",
87
- "scrfd_10g_bnkps.onnx"
88
- ]
89
 
90
- for file_name in required_files:
91
- local_file_path = os.path.join(local_model_path, file_name)
92
 
93
- if not os.path.exists(local_file_path):
94
- print(f" Downloading {file_name}...")
95
-
96
- # Path to the file in the HF model repo
97
- repo_file_path = f"antelopev2/{file_name}"
98
-
99
- try:
100
- # Download the file directly into our target folder
101
- downloaded_path = download_model_with_retry(
102
- repo_id=MODEL_REPO,
103
- filename=repo_file_path,
104
- local_dir=local_model_path,
105
- local_dir_use_symlinks=False,
106
- resume_download=True,
107
- repo_type="model"
108
- )
109
-
110
- # hf_hub_download *might* preserve folder structure,
111
- # e.g., saving to ./pixagram_face_models/antelopev2/genderage.onnx
112
- # We must move it if that happens.
113
-
114
- expected_download_path = os.path.join(local_model_path, *repo_file_path.split('/'))
115
-
116
- if os.path.exists(expected_download_path) and expected_download_path != local_file_path:
117
- print(f" Moving {expected_download_path} to {local_file_path}")
118
- shutil.move(expected_download_path, local_file_path)
119
-
120
- # Clean up empty antelopev2 folder if it was created
121
- try:
122
- os.rmdir(os.path.dirname(expected_download_path))
123
- except OSError:
124
- pass # Not empty, which is fine
125
-
126
- elif not os.path.exists(local_file_path):
127
- # Fallback in case logic is wrong, just check the returned path
128
- if downloaded_path != local_file_path:
129
- print(f" Moving {downloaded_path} to {local_file_path}")
130
- shutil.move(downloaded_path, local_file_path)
131
-
132
- except Exception as e:
133
- print(f" [ERROR] Failed to download {file_name}: {e}")
134
- raise # Re-raise to stop startup
135
 
136
- print(" [OK] All insightface models are present locally.")
 
 
 
 
 
137
  # --- END NEW ---
138
 
 
 
139
  face_app = FaceAnalysis(
140
- name=model_name, # "pixagram_face_models" (custom name)
141
- root=local_model_root, # "." (looks in ./pixagram_face_models)
142
  providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
143
  )
144
  face_app.prepare(
@@ -297,7 +270,7 @@ def load_sdxl_pipeline(controlnets):
297
  "stabilityai/stable-diffusion-xl-base-1.0",
298
  controlnet=controlnets,
299
  torch_dtype=dtype,
300
- use_safetponsors=True
301
  ).to(device)
302
  return pipe, False
303
 
@@ -519,7 +492,7 @@ def load_caption_model():
519
  torch_dtype=dtype
520
  ).to(device)
521
  print(" [OK] BLIP base model loaded (standard captions)")
522
- return caption_primary, caption_model, True, 'blip'
523
  except Exception as e2:
524
  print(f" [WARNING] Caption models not available: {e2}")
525
  print(" Caption generation will be disabled")
@@ -529,7 +502,7 @@ def load_caption_model():
529
  def set_clip_skip(pipe):
530
  """Set CLIP skip value."""
531
  if hasattr(pipe, 'text_encoder'):
532
- print(f" [OK] CLIP skip set to {CLIP_SKIP}")
533
 
534
 
535
  print("[OK] Model loading functions ready")
 
16
  from transformers import CLIPVisionModelWithProjection
17
  from insightface.app import FaceAnalysis
18
  from controlnet_aux import ZoeDetector, OpenposeDetector, LeresDetector, MidasDetector, MediapipeFaceDetector
19
+ from huggingface_hub import hf_hub_download, snapshot_download
20
  from compel import Compel, ReturnedEmbeddingsType
21
 
22
  # Use reference implementation's attention processor
 
64
  def load_face_analysis():
65
  """
66
  Load face analysis model with proper error handling.
67
+ This version pre-downloads models to the exact path
68
+ insightface expects, to bypass its internal downloader.
69
  """
70
  print("Loading face analysis model...")
71
 
72
+ # The library expects models to be at: [root]/[name]
73
+ # We will set root = './models' and name = 'antelopev2'
74
+ # The final path it will check is './models/antelopev2'
75
+
76
+ local_model_root = './models'
77
+ model_name = FACE_DETECTION_CONFIG['model_name'] # 'antelopev2'
78
+
79
+ # This is the directory where the .onnx files must be:
80
+ local_model_path = os.path.join(local_model_root, model_name) # ./models/antelopev2
81
 
82
  try:
83
+ # --- NEW: Download logic based on user's snippet ---
84
  print(f" Ensuring insightface models are present in {local_model_path}...")
 
85
 
86
+ # Check if models are already downloaded
87
+ test_file_path = os.path.join(local_model_path, "scrfd_10g_bnkps.onnx")
 
 
 
 
 
88
 
89
+ if not os.path.exists(test_file_path):
90
+ print(f" Models not found locally, downloading from {MODEL_REPO}...")
91
 
92
+ snapshot_kwargs = {
93
+ "repo_id": MODEL_REPO,
94
+ "repo_type": "model",
95
+ "allow_patterns": f"{model_name}/*.onnx", # Download only files from antelopev2 folder
96
+ "local_dir": local_model_root, # Download *into* ./models
97
+ "local_dir_use_symlinks": False
98
+ }
99
+ if HUGGINGFACE_TOKEN:
100
+ snapshot_kwargs["token"] = HUGGINGFACE_TOKEN
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
+ # This will download files, preserving the repo folder structure,
103
+ # creating ./models/antelopev2/[...].onnx
104
+ snapshot_download(**snapshot_kwargs)
105
+ print(" [OK] Downloaded insightface models.")
106
+ else:
107
+ print(" [OK] Insightface models found locally.")
108
  # --- END NEW ---
109
 
110
+ # Now, initialize FaceAnalysis
111
+ # It will look for models in root + name = ./models/antelopev2
112
  face_app = FaceAnalysis(
113
+ name=model_name,
114
+ root=local_model_root,
115
  providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
116
  )
117
  face_app.prepare(
 
270
  "stabilityai/stable-diffusion-xl-base-1.0",
271
  controlnet=controlnets,
272
  torch_dtype=dtype,
273
+ use_safetensors=True
274
  ).to(device)
275
  return pipe, False
276
 
 
492
  torch_dtype=dtype
493
  ).to(device)
494
  print(" [OK] BLIP base model loaded (standard captions)")
495
+ return caption_processor, caption_model, True, 'blip'
496
  except Exception as e2:
497
  print(f" [WARNING] Caption models not available: {e2}")
498
  print(" Caption generation will be disabled")
 
502
  def set_clip_skip(pipe):
503
  """Set CLIP skip value."""
504
  if hasattr(pipe, 'text_encoder'):
505
+ print(" [OK] CLIP skip set to {CLIP_SKIP}")
506
 
507
 
508
  print("[OK] Model loading functions ready")