ArmanRV commited on
Commit
978ca4f
·
verified ·
1 Parent(s): 0d734ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -212
app.py CHANGED
@@ -9,12 +9,14 @@ from PIL import Image
9
 
10
  # =========================
11
  # FIX: gradio 4.24 / gradio_client crashes on boolean JSON Schemas in /api_info
 
12
  # =========================
13
  def _patch_gradio_client_bool_schema():
14
  try:
15
  import gradio_client.utils as gcu
16
  patched_any = False
17
 
 
18
  if hasattr(gcu, "get_type"):
19
  _orig_get_type = gcu.get_type
20
 
@@ -26,6 +28,7 @@ def _patch_gradio_client_bool_schema():
26
  gcu.get_type = _get_type_patched
27
  patched_any = True
28
 
 
29
  if hasattr(gcu, "get_desc"):
30
  _orig_get_desc = gcu.get_desc
31
 
@@ -37,10 +40,12 @@ def _patch_gradio_client_bool_schema():
37
  gcu.get_desc = _get_desc_patched
38
  patched_any = True
39
 
 
40
  if hasattr(gcu, "_json_schema_to_python_type"):
41
  _orig_json2py = gcu._json_schema_to_python_type
42
 
43
  def _json_schema_to_python_type_patched(schema, defs=None):
 
44
  if isinstance(schema, bool):
45
  return "any"
46
  return _orig_json2py(schema, defs)
@@ -58,6 +63,7 @@ def _patch_gradio_client_bool_schema():
58
 
59
  _patch_gradio_client_bool_schema()
60
 
 
61
  import torch
62
  import numpy as np
63
  from torchvision import transforms
@@ -97,11 +103,14 @@ APP_AUTH = (DEMO_USER, DEMO_PASS) if (DEMO_USER and DEMO_PASS) else None
97
  # =========================
98
  GARMENT_DIR = "garments"
99
  ALLOWED_EXTS = (".png", ".jpg", ".jpeg", ".webp")
100
- GARMENTS_DATASET = os.getenv("GARMENTS_DATASET", "").strip()
101
  HF_TOKEN = os.getenv("HF_TOKEN", "").strip()
102
 
103
 
104
  def ensure_garments_downloaded() -> None:
 
 
 
105
  os.makedirs(GARMENT_DIR, exist_ok=True)
106
 
107
  if HF_TOKEN:
@@ -129,6 +138,9 @@ def ensure_garments_downloaded() -> None:
129
 
130
 
131
  def list_garments() -> List[str]:
 
 
 
132
  files: List[str] = []
133
  if not os.path.isdir(GARMENT_DIR):
134
  return files
@@ -162,7 +174,7 @@ def build_gallery_items(files: List[str]):
162
 
163
 
164
  # =========================
165
- # Helpers
166
  # =========================
167
  def clamp_int(x, lo, hi):
168
  try:
@@ -183,116 +195,8 @@ def allow_call(min_interval_sec: float = 2.5) -> Tuple[bool, str]:
183
  return True, ""
184
 
185
 
186
- def _quality_metrics(img: Image.Image) -> Tuple[int, int, float, float]:
187
- """(w, h, brightness, sharpness)"""
188
- img = img.convert("RGB")
189
- w, h = img.size
190
- gray = np.array(img.convert("L"))
191
- brightness = float(gray.mean())
192
- gy, gx = np.gradient(gray.astype(np.float32))
193
- sharpness = float((gx * gx + gy * gy).mean())
194
- return w, h, brightness, sharpness
195
-
196
-
197
- # =========================
198
- # Person photo evaluation (UX gate)
199
- # - главное: если НЕ похоже на фото человека -> предупреждение и блокируем try-on
200
- # - предупреждения по качеству показываем только при явной проблеме
201
- # =========================
202
- def _count_openpose_keypoints(keypoints) -> int:
203
- """
204
- Пытаемся универсально посчитать найденные ключевые точки (score > 0.2)
205
- под разные форматы, которые могут возвращать разные реализации OpenPose.
206
- """
207
- try:
208
- if isinstance(keypoints, dict):
209
- cand = keypoints.get("candidate", None)
210
- if cand is None:
211
- # иногда внутри другой ключ
212
- cand = keypoints.get("candidates", None)
213
- if cand is not None:
214
- cand = np.array(cand)
215
- if cand.ndim >= 2 and cand.shape[-1] >= 3:
216
- return int((cand[:, 2] > 0.2).sum())
217
- # иногда subset/candidate в другом виде — если не распознали, возвращаем 0
218
- return 0
219
-
220
- arr = np.array(keypoints)
221
- if arr.ndim >= 2 and arr.shape[-1] >= 3:
222
- return int((arr[..., 2] > 0.2).sum())
223
- except Exception:
224
- return 0
225
- return 0
226
-
227
-
228
- def _detect_person_openpose_or_parsing(img: Image.Image) -> bool:
229
- """
230
- True если похоже на человека:
231
- - OpenPose нашёл достаточно keypoints, ИЛИ
232
- - Human Parsing дал заметную область "не фон"
233
- """
234
- try:
235
- # EXIF-поворот (часто ломает детект на телефонных фотках)
236
- try:
237
- img = _apply_exif_orientation(img)
238
- except Exception:
239
- pass
240
-
241
- small = img.convert("RGB").resize((384, 512))
242
-
243
- # 1) OpenPose
244
- keypoints = openpose_model(small)
245
- kpt_count = _count_openpose_keypoints(keypoints)
246
- if kpt_count >= 6:
247
- return True
248
-
249
- # 2) Parsing
250
- model_parse, _ = parsing_model(small)
251
- mp = np.array(model_parse) if not isinstance(model_parse, np.ndarray) else model_parse
252
-
253
- # доля пикселей не фона
254
- non_bg = float((mp > 0).mean())
255
- if non_bg >= 0.03:
256
- return True
257
-
258
- return False
259
- except Exception:
260
- return False
261
-
262
-
263
- def evaluate_person_photo(img: Optional[Image.Image]) -> Tuple[bool, str]:
264
- """
265
- UX-логика:
266
- 1) Если НЕ похоже на фото человека (нет keypoints и нет parsing-области) -> ⚠️ и просим другое фото
267
- 2) Если похоже -> ✅ Фото подходит
268
- ЛИБО ⚠️ (только при явной проблеме качества)
269
- """
270
- if img is None:
271
- return False, ""
272
-
273
- is_person = _detect_person_openpose_or_parsing(img)
274
- if not is_person:
275
- return False, "⚠️ Не похоже на фото человека. Загрузите фото человека (по пояс или в полный рост)."
276
-
277
- w, h, brightness, sharpness = _quality_metrics(img)
278
-
279
- issues = []
280
- # только явные проблемы
281
- if min(w, h) < 520:
282
- issues.append("низкое разрешение")
283
- if brightness < 50:
284
- issues.append("слишком темно")
285
- if sharpness < 8:
286
- issues.append("сильно размыто")
287
-
288
- if issues:
289
- return True, "⚠️ Фото может плохо подойти для примерки (" + ", ".join(issues) + "). Лучше загрузить другое."
290
-
291
- return True, "✅ Фото подходит для примерки."
292
-
293
-
294
  # =========================
295
- # Model init
296
  # =========================
297
  base_path = "yisol/IDM-VTON"
298
 
@@ -300,8 +204,11 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
300
  DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
301
  print("DEVICE:", DEVICE, "DTYPE:", DTYPE, flush=True)
302
 
303
- tensor_transfrom = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
 
 
304
 
 
305
  unet = UNet2DConditionModel.from_pretrained(base_path, subfolder="unet", torch_dtype=DTYPE)
306
  unet.requires_grad_(False)
307
 
@@ -319,6 +226,7 @@ vae = AutoencoderKL.from_pretrained(base_path, subfolder="vae", torch_dtype=DTYP
319
  UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(base_path, subfolder="unet_encoder", torch_dtype=DTYPE)
320
  UNet_Encoder.requires_grad_(False)
321
 
 
322
  parsing_model = Parsing(0)
323
  openpose_model = OpenPose(0)
324
 
@@ -341,6 +249,9 @@ pipe = TryonPipeline.from_pretrained(
341
  pipe.unet_encoder = UNet_Encoder
342
 
343
 
 
 
 
344
  @spaces.GPU
345
  def start_tryon(
346
  human_pil: Image.Image,
@@ -349,11 +260,12 @@ def start_tryon(
349
  crop_center: bool = True,
350
  denoise_steps: int = 25,
351
  seed: int = 42,
352
- guidance_scale: float = 2.0,
353
  ) -> Image.Image:
 
354
  device = "cuda" if torch.cuda.is_available() else "cpu"
355
  dtype = torch.float16 if device == "cuda" else torch.float32
356
 
 
357
  if device == "cuda":
358
  openpose_model.preprocessor.body_estimation.model.to(device)
359
  pipe.to(device)
@@ -362,6 +274,7 @@ def start_tryon(
362
  garm_img = garm_img.convert("RGB").resize((768, 1024))
363
  human_img_orig = human_pil.convert("RGB")
364
 
 
365
  if crop_center:
366
  width, height = human_img_orig.size
367
  target_width = int(min(width, height * (3 / 4)))
@@ -375,9 +288,8 @@ def start_tryon(
375
  human_img = cropped_img.resize((768, 1024))
376
  else:
377
  human_img = human_img_orig.resize((768, 1024))
378
- crop_size = None
379
- left = top = None
380
 
 
381
  if auto_mask:
382
  keypoints = openpose_model(human_img.resize((384, 512)))
383
  model_parse, _ = parsing_model(human_img.resize((384, 512)))
@@ -386,6 +298,7 @@ def start_tryon(
386
  else:
387
  mask = Image.new("L", (768, 1024), 0)
388
 
 
389
  human_img_arg = _apply_exif_orientation(human_img.resize((384, 512)))
390
  human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
391
 
@@ -403,6 +316,7 @@ def start_tryon(
403
  pose_img = pose_img[:, :, ::-1]
404
  pose_img = Image.fromarray(pose_img).resize((768, 1024))
405
 
 
406
  garment_des = "a garment"
407
  prompt_main = "model is wearing " + garment_des
408
  prompt_cloth = "a photo of " + garment_des
@@ -410,7 +324,6 @@ def start_tryon(
410
 
411
  denoise_steps = clamp_int(denoise_steps, 20, 40)
412
  seed = clamp_int(seed, 0, 999999)
413
- guidance_scale = float(max(0.1, min(10.0, guidance_scale)))
414
 
415
  with torch.no_grad():
416
  if device == "cuda":
@@ -467,11 +380,11 @@ def start_tryon(
467
  height=1024,
468
  width=768,
469
  ip_adapter_image=garm_img.resize((768, 1024)),
470
- guidance_scale=guidance_scale,
471
  )[0]
472
 
473
  out_img = images[0]
474
- if crop_center and crop_size is not None:
475
  out_img_rs = out_img.resize(crop_size)
476
  human_img_orig.paste(out_img_rs, (int(left), int(top)))
477
  return human_img_orig
@@ -479,7 +392,7 @@ def start_tryon(
479
 
480
 
481
  # =========================
482
- # UI
483
  # =========================
484
  CUSTOM_CSS = """
485
  footer {display:none !important;}
@@ -502,86 +415,26 @@ def on_gallery_select(files_list: List[str], evt: gr.SelectData):
502
  idx = max(0, min(idx, len(files_list) - 1))
503
  return files_list[idx], f"👕 Выбрано: {files_list[idx]}"
504
 
505
- def on_person_change(person_pil):
506
- # Показываем либо:
507
- # - ⚠️ (если не похоже на человека или явное плохое качество)
508
- # - ✅ (если подходит)
509
- # - "" (если нет фото)
510
- _, msg = evaluate_person_photo(person_pil)
511
- return msg or ""
512
-
513
- def tryon_ui_imageslider(person_pil, selected_filename):
514
- yield (None, None), "⏳ Проверяем ввод..."
515
-
516
- ok, msg = allow_call(2.5)
517
- if not ok:
518
- yield (None, None), msg
519
- return
520
-
521
- if person_pil is None:
522
- yield (None, None), "❌ Загрузите фото человека"
523
- return
524
-
525
- is_person, verdict = evaluate_person_photo(person_pil)
526
- if not is_person:
527
- yield (None, None), verdict
528
- return
529
-
530
- if not selected_filename:
531
- yield (None, None), "❌ Выберите одежду (клик по превью)"
532
- return
533
-
534
- garm = load_garment_pil(selected_filename)
535
- if garm is None:
536
- yield (None, None), "❌ Не удалось загрузить выбранную одежду"
537
- return
538
-
539
- yield (None, None), "🧠 Анализируем силуэт..."
540
- time.sleep(0.05)
541
- yield (None, None), "✨ Примеряем..."
542
- try:
543
- out = start_tryon(
544
- human_pil=person_pil,
545
- garm_img=garm,
546
- auto_mask=True,
547
- crop_center=True,
548
- denoise_steps=25,
549
- seed=42,
550
- guidance_scale=2.0,
551
- )
552
- yield (person_pil, out), "✅ Готово"
553
- except Exception as e:
554
- yield (None, None), f"❌ Ошибка: {type(e).__name__}: {str(e)[:220]}"
555
-
556
- def tryon_ui_pair(person_pil, selected_filename):
557
- yield None, None, "⏳ Проверяем ввод..."
558
 
559
  ok, msg = allow_call(2.5)
560
  if not ok:
561
- yield None, None, msg
562
  return
563
 
564
  if person_pil is None:
565
- yield None, None, "❌ Загрузите фото человека"
566
- return
567
-
568
- is_person, verdict = evaluate_person_photo(person_pil)
569
- if not is_person:
570
- yield None, None, verdict
571
  return
572
-
573
  if not selected_filename:
574
- yield None, None, "❌ Выберите одежду (клик по превью)"
575
  return
576
 
577
  garm = load_garment_pil(selected_filename)
578
  if garm is None:
579
- yield None, None, "❌ Не удалось загрузить выбранную одежду"
580
  return
581
 
582
- yield None, None, "🧠 Анализируем силуэт..."
583
- time.sleep(0.05)
584
- yield None, None, "✨ Примеряем..."
585
  try:
586
  out = start_tryon(
587
  human_pil=person_pil,
@@ -590,11 +443,10 @@ def tryon_ui_pair(person_pil, selected_filename):
590
  crop_center=True,
591
  denoise_steps=25,
592
  seed=42,
593
- guidance_scale=2.0,
594
  )
595
- yield person_pil, out, "✅ Готово"
596
  except Exception as e:
597
- yield None, None, f"❌ Ошибка: {type(e).__name__}: {str(e)[:220]}"
598
 
599
 
600
  # Preload garments
@@ -612,10 +464,6 @@ with gr.Blocks(title="Virtual Try-On Rendez-vous", css=CUSTOM_CSS) as demo:
612
  with gr.Column():
613
  person = gr.Image(label="Фото человека", type="pil", height=420)
614
 
615
- # Оценка/предупреждение по фото (✅/⚠️/пусто)
616
- warning = gr.Markdown("")
617
- person.change(fn=on_person_change, inputs=[person], outputs=[warning])
618
-
619
  with gr.Row():
620
  refresh_btn = gr.Button("🔄 Обновить каталог одежды", variant="secondary")
621
  selected_label = gr.Markdown("👕 Выберите одежду ниже")
@@ -632,25 +480,7 @@ with gr.Blocks(title="Virtual Try-On Rendez-vous", css=CUSTOM_CSS) as demo:
632
  status = gr.Textbox(value="Ожидание...", interactive=False)
633
 
634
  with gr.Column():
635
- gr.Markdown("### Результат (До / После)")
636
- if hasattr(gr, "ImageSlider"):
637
- compare = gr.ImageSlider(label="До / После")
638
- run.click(
639
- fn=tryon_ui_imageslider,
640
- inputs=[person, selected_garment_state],
641
- outputs=[compare, status],
642
- concurrency_limit=1,
643
- )
644
- else:
645
- with gr.Row():
646
- before_img = gr.Image(label="До", type="pil", height=360)
647
- after_img = gr.Image(label="После", type="pil", height=360)
648
- run.click(
649
- fn=tryon_ui_pair,
650
- inputs=[person, selected_garment_state],
651
- outputs=[before_img, after_img, status],
652
- concurrency_limit=1,
653
- )
654
 
655
  garment_gallery.select(
656
  fn=on_gallery_select,
@@ -664,6 +494,13 @@ with gr.Blocks(title="Virtual Try-On Rendez-vous", css=CUSTOM_CSS) as demo:
664
  outputs=[garment_gallery, garment_files_state, selected_garment_state, status],
665
  )
666
 
 
 
 
 
 
 
 
667
  demo.queue(max_size=20)
668
 
669
  if __name__ == "__main__":
@@ -674,5 +511,5 @@ if __name__ == "__main__":
674
  auth=APP_AUTH,
675
  max_threads=4,
676
  show_error=True,
677
- show_api=False,
678
  )
 
9
 
10
  # =========================
11
  # FIX: gradio 4.24 / gradio_client crashes on boolean JSON Schemas in /api_info
12
+ # - works across gradio_client versions (get_desc may not exist)
13
  # =========================
14
  def _patch_gradio_client_bool_schema():
15
  try:
16
  import gradio_client.utils as gcu
17
  patched_any = False
18
 
19
+ # 1) Patch get_type if exists
20
  if hasattr(gcu, "get_type"):
21
  _orig_get_type = gcu.get_type
22
 
 
28
  gcu.get_type = _get_type_patched
29
  patched_any = True
30
 
31
+ # 2) Patch get_desc if exists (some versions don't have it)
32
  if hasattr(gcu, "get_desc"):
33
  _orig_get_desc = gcu.get_desc
34
 
 
40
  gcu.get_desc = _get_desc_patched
41
  patched_any = True
42
 
43
+ # 3) Patch internal json-schema conversion (this is the key crash site)
44
  if hasattr(gcu, "_json_schema_to_python_type"):
45
  _orig_json2py = gcu._json_schema_to_python_type
46
 
47
  def _json_schema_to_python_type_patched(schema, defs=None):
48
+ # JSON Schema allows boolean schemas (True/False). Treat as "any".
49
  if isinstance(schema, bool):
50
  return "any"
51
  return _orig_json2py(schema, defs)
 
63
 
64
  _patch_gradio_client_bool_schema()
65
 
66
+
67
  import torch
68
  import numpy as np
69
  from torchvision import transforms
 
103
  # =========================
104
  GARMENT_DIR = "garments"
105
  ALLOWED_EXTS = (".png", ".jpg", ".jpeg", ".webp")
106
+ GARMENTS_DATASET = os.getenv("GARMENTS_DATASET", "").strip() # e.g. "ArmanRV/armanrv-garments"
107
  HF_TOKEN = os.getenv("HF_TOKEN", "").strip()
108
 
109
 
110
  def ensure_garments_downloaded() -> None:
111
+ """
112
+ Downloads garments from HF Dataset into ./garments to avoid Space repo 1GB limit.
113
+ """
114
  os.makedirs(GARMENT_DIR, exist_ok=True)
115
 
116
  if HF_TOKEN:
 
138
 
139
 
140
  def list_garments() -> List[str]:
141
+ """
142
+ Recursively list images inside ./garments (handles dataset subfolders).
143
+ """
144
  files: List[str] = []
145
  if not os.path.isdir(GARMENT_DIR):
146
  return files
 
174
 
175
 
176
  # =========================
177
+ # Small helpers
178
  # =========================
179
  def clamp_int(x, lo, hi):
180
  try:
 
195
  return True, ""
196
 
197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  # =========================
199
+ # Model init (local IDM-VTON)
200
  # =========================
201
  base_path = "yisol/IDM-VTON"
202
 
 
204
  DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
205
  print("DEVICE:", DEVICE, "DTYPE:", DTYPE, flush=True)
206
 
207
+ tensor_transfrom = transforms.Compose(
208
+ [transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
209
+ )
210
 
211
+ # Components
212
  unet = UNet2DConditionModel.from_pretrained(base_path, subfolder="unet", torch_dtype=DTYPE)
213
  unet.requires_grad_(False)
214
 
 
226
  UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(base_path, subfolder="unet_encoder", torch_dtype=DTYPE)
227
  UNet_Encoder.requires_grad_(False)
228
 
229
+ # Preprocessors
230
  parsing_model = Parsing(0)
231
  openpose_model = OpenPose(0)
232
 
 
249
  pipe.unet_encoder = UNet_Encoder
250
 
251
 
252
+ # =========================
253
+ # Inference (returns ONLY final image)
254
+ # =========================
255
  @spaces.GPU
256
  def start_tryon(
257
  human_pil: Image.Image,
 
260
  crop_center: bool = True,
261
  denoise_steps: int = 25,
262
  seed: int = 42,
 
263
  ) -> Image.Image:
264
+
265
  device = "cuda" if torch.cuda.is_available() else "cpu"
266
  dtype = torch.float16 if device == "cuda" else torch.float32
267
 
268
+ # Move models
269
  if device == "cuda":
270
  openpose_model.preprocessor.body_estimation.model.to(device)
271
  pipe.to(device)
 
274
  garm_img = garm_img.convert("RGB").resize((768, 1024))
275
  human_img_orig = human_pil.convert("RGB")
276
 
277
+ # Crop
278
  if crop_center:
279
  width, height = human_img_orig.size
280
  target_width = int(min(width, height * (3 / 4)))
 
288
  human_img = cropped_img.resize((768, 1024))
289
  else:
290
  human_img = human_img_orig.resize((768, 1024))
 
 
291
 
292
+ # Mask
293
  if auto_mask:
294
  keypoints = openpose_model(human_img.resize((384, 512)))
295
  model_parse, _ = parsing_model(human_img.resize((384, 512)))
 
298
  else:
299
  mask = Image.new("L", (768, 1024), 0)
300
 
301
+ # DensePose
302
  human_img_arg = _apply_exif_orientation(human_img.resize((384, 512)))
303
  human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
304
 
 
316
  pose_img = pose_img[:, :, ::-1]
317
  pose_img = Image.fromarray(pose_img).resize((768, 1024))
318
 
319
+ # Fixed prompts
320
  garment_des = "a garment"
321
  prompt_main = "model is wearing " + garment_des
322
  prompt_cloth = "a photo of " + garment_des
 
324
 
325
  denoise_steps = clamp_int(denoise_steps, 20, 40)
326
  seed = clamp_int(seed, 0, 999999)
 
327
 
328
  with torch.no_grad():
329
  if device == "cuda":
 
380
  height=1024,
381
  width=768,
382
  ip_adapter_image=garm_img.resize((768, 1024)),
383
+ guidance_scale=2.0,
384
  )[0]
385
 
386
  out_img = images[0]
387
+ if crop_center:
388
  out_img_rs = out_img.resize(crop_size)
389
  human_img_orig.paste(out_img_rs, (int(left), int(top)))
390
  return human_img_orig
 
392
 
393
 
394
  # =========================
395
+ # UI (API-like)
396
  # =========================
397
  CUSTOM_CSS = """
398
  footer {display:none !important;}
 
415
  idx = max(0, min(idx, len(files_list) - 1))
416
  return files_list[idx], f"👕 Выбрано: {files_list[idx]}"
417
 
418
+ def tryon_ui(person_pil, selected_filename):
419
+ yield None, "⏳ Обработка... (первый запуск может быть дольше)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420
 
421
  ok, msg = allow_call(2.5)
422
  if not ok:
423
+ yield None, msg
424
  return
425
 
426
  if person_pil is None:
427
+ yield None, "❌ Загрузите фото человека"
 
 
 
 
 
428
  return
 
429
  if not selected_filename:
430
+ yield None, "❌ Выберите одежду (клик по превью)"
431
  return
432
 
433
  garm = load_garment_pil(selected_filename)
434
  if garm is None:
435
+ yield None, "❌ Не удалось загрузить выбранную одежду"
436
  return
437
 
 
 
 
438
  try:
439
  out = start_tryon(
440
  human_pil=person_pil,
 
443
  crop_center=True,
444
  denoise_steps=25,
445
  seed=42,
 
446
  )
447
+ yield out, "✅ Готово"
448
  except Exception as e:
449
+ yield None, f"❌ Ошибка: {type(e).__name__}: {str(e)[:220]}"
450
 
451
 
452
  # Preload garments
 
464
  with gr.Column():
465
  person = gr.Image(label="Фото человека", type="pil", height=420)
466
 
 
 
 
 
467
  with gr.Row():
468
  refresh_btn = gr.Button("🔄 Обновить каталог одежды", variant="secondary")
469
  selected_label = gr.Markdown("👕 Выберите одежду ниже")
 
480
  status = gr.Textbox(value="Ожидание...", interactive=False)
481
 
482
  with gr.Column():
483
+ out = gr.Image(label="Результат", type="pil", height=760)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
484
 
485
  garment_gallery.select(
486
  fn=on_gallery_select,
 
494
  outputs=[garment_gallery, garment_files_state, selected_garment_state, status],
495
  )
496
 
497
+ run.click(
498
+ fn=tryon_ui,
499
+ inputs=[person, selected_garment_state],
500
+ outputs=[out, status],
501
+ concurrency_limit=1,
502
+ )
503
+
504
  demo.queue(max_size=20)
505
 
506
  if __name__ == "__main__":
 
511
  auth=APP_AUTH,
512
  max_threads=4,
513
  show_error=True,
514
+ show_api=False, # важно: не показываем API, но /api_info могут дергать — патч это чинит
515
  )