danicor commited on
Commit
1f03b1c
·
verified ·
1 Parent(s): dd0c01f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -105
app.py CHANGED
@@ -76,141 +76,154 @@ def init_face_parser():
76
  if FACE_PARSING_AVAILABLE:
77
  return True
78
 
79
- print("[FaceParsing] FORCING jonathandinu/face-parsing load...")
80
 
81
- # روش ۱: استفاده از Hugging Face CLI برای دانلود
82
  try:
83
- print("[FaceParsing] Method 1: Using huggingface_hub directly...")
84
- from huggingface_hub import snapshot_download
85
-
86
- # دانلود تمام فایل‌های مدل
87
- local_path = snapshot_download(
88
- repo_id="jonathandinu/face-parsing",
89
- repo_type="model",
90
- local_dir="./face_parsing_model",
91
- local_dir_use_syms=False,
92
- ignore_patterns=["*.md", "*.txt", "*.gitattributes"]
93
- )
94
-
95
- print(f"[FaceParsing] Model downloaded to: {local_path}")
96
-
97
- # حالا از پوشه محلی لود کن
98
- from transformers import AutoModelForImageSegmentation, AutoImageProcessor
99
 
100
- model = AutoModelForImageSegmentation.from_pretrained(local_path)
101
- processor = AutoImageProcessor.from_pretrained(local_path)
 
102
 
103
- FACE_PARSER = {
104
- 'model': model,
105
- 'processor': processor,
106
- 'path': local_path,
107
- 'source': 'direct_download'
108
- }
109
 
110
- FACE_PARSING_AVAILABLE = True
111
- print("[FaceParsing] Model loaded from direct download!")
112
- return True
 
 
 
113
 
114
- except Exception as e:
115
- print(f"[FaceParsing] Method 1 failed: {str(e)[:200]}")
116
-
117
- # روش ۲: ساخت config دستی
118
- try:
119
- print("[FaceParsing] Method 2: Creating manual config...")
 
120
 
121
- # ایجاد config.json دستی
122
- import json
123
- import os
 
 
 
 
 
 
124
 
125
- config = {
 
126
  "_name_or_path": "jonathandinu/face-parsing",
127
  "architectures": ["SegformerForSemanticSegmentation"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  "model_type": "segformer",
129
- "num_labels": 19, # برای face parsing معمولاً 19 کلاس
130
- "image_size": 512,
131
- "hidden_sizes": [32, 64, 160, 256],
132
  "num_attention_heads": [1, 2, 5, 8],
133
- "depths": [2, 2, 2, 2]
 
 
 
 
 
 
 
134
  }
135
 
136
- # ذخیره config
137
- os.makedirs("./manual_model", exist_ok=True)
138
- with open("./manual_model/config.json", "w") as f:
139
- json.dump(config, f)
140
-
141
- # دانلود weights
142
- print("[FaceParsing] Downloading weights...")
143
- from huggingface_hub import hf_hub_download
 
 
 
 
 
 
 
 
 
144
 
145
- # سعی کن pytorch_model.bin را دانلود کنی
146
- try:
147
- weights_path = hf_hub_download(
148
- repo_id="jonathandinu/face-parsing",
149
- filename="pytorch_model.bin",
150
- local_dir="./manual_model"
151
- )
152
- except:
153
- # اگر safetensors باشد
154
- weights_path = hf_hub_download(
155
- repo_id="jonathandinu/face-parsing",
156
- filename="model.safetensors",
157
- local_dir="./manual_model"
158
- )
159
 
160
- # لود مدل
 
161
  from transformers import SegformerForSemanticSegmentation
162
- model = SegformerForSemanticSegmentation.from_pretrained("./manual_model")
163
-
164
- # processor
165
  from transformers import SegformerImageProcessor
166
- processor = SegformerImageProcessor()
167
 
 
 
 
 
 
 
 
 
 
 
 
168
  FACE_PARSER = {
169
  'model': model,
170
  'processor': processor,
171
- 'path': "./manual_model",
172
- 'source': 'manual_config'
 
 
173
  }
174
 
175
  FACE_PARSING_AVAILABLE = True
176
- print("[FaceParsing] ✓ Model loaded with manual config!")
177
- return True
178
 
179
- except Exception as e:
180
- print(f"[FaceParsing] Method 2 failed: {str(e)[:200]}")
181
-
182
- # روش ۳: Contact نویسنده مدل
183
- print("[FaceParsing] Method 3: The model might be broken...")
184
- print("[FaceParsing] Contacting model author: jonathandinu")
185
- print("[FaceParsing] Model URL: https://huggingface.co/jonathandinu/face-parsing")
186
-
187
- # روش ۴: استفاده از MediaPipe (همان کاری که الان می‌کند)
188
- print("[FaceParsing] Method 4: Using MediaPipe instead...")
189
- try:
190
- import mediapipe as mp
191
 
192
- mp_face_mesh = mp.solutions.face_mesh
193
- face_mesh = mp_face_mesh.FaceMesh(
194
- static_image_mode=True,
195
- max_num_faces=1,
196
- refine_landmarks=True,
197
- min_detection_confidence=0.5
198
- )
199
-
200
- FACE_PARSER = {
201
- 'model': face_mesh,
202
- 'processor': None,
203
- 'type': 'mediapipe',
204
- 'original_model': 'jonathandinu/face-parsing (unavailable)'
205
- }
206
-
207
- FACE_PARSING_AVAILABLE = True
208
- print("[FaceParsing] ✓ Using MediaPipe (fallback for unavailable model)")
209
  return True
210
 
211
  except Exception as e:
212
- print(f"[FaceParsing] MediaPipe failed: {e}")
213
- return False
 
 
 
 
 
 
 
214
 
215
  def try_load_model(model_name, model_type):
216
  """سعی کن یک مدل را لود کنی"""
 
76
  if FACE_PARSING_AVAILABLE:
77
  return True
78
 
79
+ print("[FaceParsing] 🚀 FINAL: Loading complete model...")
80
 
 
81
  try:
82
+ import os
83
+ import json
84
+ import shutil
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
+ # ۱. ایجاد پوشه برای مدل کامل
87
+ complete_dir = "./complete_face_parsing"
88
+ os.makedirs(complete_dir, exist_ok=True)
89
 
90
+ print("[FaceParsing] Step 1: Setting up model directory...")
 
 
 
 
 
91
 
92
+ # ۲. کپی weights اگر وجود دارد
93
+ weights_sources = [
94
+ "./manual_model/pytorch_model.bin",
95
+ "./manual_model/model.safetensors",
96
+ "./pytorch_model.bin"
97
+ ]
98
 
99
+ weights_copied = False
100
+ for source in weights_sources:
101
+ if os.path.exists(source):
102
+ shutil.copy(source, os.path.join(complete_dir, os.path.basename(source)))
103
+ print(f"[FaceParsing] ✓ Copied weights from {source}")
104
+ weights_copied = True
105
+ break
106
 
107
+ if not weights_copied:
108
+ print("[FaceParsing] ⚠ No weights file found, downloading...")
109
+ # دانلود weights اگر نیست
110
+ from huggingface_hub import hf_hub_download
111
+ hf_hub_download(
112
+ repo_id="jonathandinu/face-parsing",
113
+ filename="pytorch_model.bin",
114
+ local_dir=complete_dir
115
+ )
116
 
117
+ # ۳. ذخیره config.json (همین که دارید)
118
+ config_data = {
119
  "_name_or_path": "jonathandinu/face-parsing",
120
  "architectures": ["SegformerForSemanticSegmentation"],
121
+ "attention_probs_dropout_prob": 0.0,
122
+ "classifier_dropout_prob": 0.1,
123
+ "decoder_hidden_size": 768,
124
+ "depths": [3, 6, 40, 3],
125
+ "downsampling_rates": [1, 4, 8, 16],
126
+ "drop_path_rate": 0.1,
127
+ "hidden_act": "gelu",
128
+ "hidden_dropout_prob": 0.0,
129
+ "hidden_sizes": [64, 128, 320, 512],
130
+ "id2label": {
131
+ "0": "background", "1": "skin", "2": "nose", "3": "eye_g",
132
+ "4": "l_eye", "5": "r_eye", "6": "l_brow", "7": "r_brow",
133
+ "8": "l_ear", "9": "r_ear", "10": "mouth", "11": "u_lip",
134
+ "12": "l_lip", "13": "hair", "14": "hat", "15": "ear_r",
135
+ "16": "neck_l", "17": "neck", "18": "cloth"
136
+ },
137
+ "image_size": 224,
138
+ "initializer_range": 0.02,
139
+ "label2id": {
140
+ "background": 0, "skin": 1, "nose": 2, "eye_g": 3,
141
+ "l_eye": 4, "r_eye": 5, "l_brow": 6, "r_brow": 7,
142
+ "l_ear": 8, "r_ear": 9, "mouth": 10, "u_lip": 11,
143
+ "l_lip": 12, "hair": 13, "hat": 14, "ear_r": 15,
144
+ "neck_l": 16, "neck": 17, "cloth": 18
145
+ },
146
+ "layer_norm_eps": 1e-06,
147
+ "mlp_ratios": [4, 4, 4, 4],
148
  "model_type": "segformer",
 
 
 
149
  "num_attention_heads": [1, 2, 5, 8],
150
+ "num_channels": 3,
151
+ "num_encoder_blocks": 4,
152
+ "patch_sizes": [7, 3, 3, 3],
153
+ "reshape_last_stage": True,
154
+ "semantic_loss_ignore_index": 255,
155
+ "sr_ratios": [8, 4, 2, 1],
156
+ "strides": [4, 2, 2, 2],
157
+ "transformers_version": "4.37.0.dev0"
158
  }
159
 
160
+ with open(os.path.join(complete_dir, "config.json"), "w") as f:
161
+ json.dump(config_data, f, indent=2)
162
+ print("[FaceParsing] ✓ Saved config.json")
163
+
164
+ # ۴. ذخیره preprocessor_config.json
165
+ preprocessor_config = {
166
+ "do_normalize": True,
167
+ "do_reduce_labels": False,
168
+ "do_rescale": True,
169
+ "do_resize": True,
170
+ "image_mean": [0.485, 0.456, 0.406],
171
+ "image_processor_type": "SegformerFeatureExtractor",
172
+ "image_std": [0.229, 0.224, 0.225],
173
+ "resample": 2,
174
+ "rescale_factor": 0.00392156862745098,
175
+ "size": {"height": 512, "width": 512}
176
+ }
177
 
178
+ with open(os.path.join(complete_dir, "preprocessor_config.json"), "w") as f:
179
+ json.dump(preprocessor_config, f, indent=2)
180
+ print("[FaceParsing] Saved preprocessor_config.json")
 
 
 
 
 
 
 
 
 
 
 
181
 
182
+ # ۵. لود مدل
183
+ print("[FaceParsing] Step 2: Loading model...")
184
  from transformers import SegformerForSemanticSegmentation
 
 
 
185
  from transformers import SegformerImageProcessor
 
186
 
187
+ # لود model
188
+ model = SegformerForSemanticSegmentation.from_pretrained(
189
+ complete_dir,
190
+ local_files_only=True,
191
+ ignore_mismatched_sizes=False
192
+ )
193
+
194
+ # لود processor
195
+ processor = SegformerImageProcessor.from_pretrained(complete_dir)
196
+
197
+ # ۶. ذخیره
198
  FACE_PARSER = {
199
  'model': model,
200
  'processor': processor,
201
+ 'path': complete_dir,
202
+ 'num_labels': 19,
203
+ 'id2label': config_data['id2label'],
204
+ 'status': 'complete'
205
  }
206
 
207
  FACE_PARSING_AVAILABLE = True
 
 
208
 
209
+ print("[FaceParsing] SUCCESS: Model fully loaded!")
210
+ print(f"[FaceParsing] Architecture: Segformer")
211
+ print(f"[FaceParsing] Num labels: 19")
212
+ print(f"[FaceParsing] Image size: 224")
213
+ print(f"[FaceParsing] Classes: {list(config_data['id2label'].values())}")
 
 
 
 
 
 
 
214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
  return True
216
 
217
  except Exception as e:
218
+ print(f"[FaceParsing] FINAL load failed: {e}")
219
+
220
+ # نمایش traceback کامل
221
+ import traceback
222
+ print(f"[FaceParsing] Traceback:\n{traceback.format_exc()[:500]}")
223
+
224
+ # Fallback
225
+ print("[FaceParsing] Falling back to MediaPipe...")
226
+ return setup_mediapipe_fallback()
227
 
228
  def try_load_model(model_name, model_type):
229
  """سعی کن یک مدل را لود کنی"""