iljung1106 commited on
Commit
0ff521b
Β·
1 Parent(s): 89b3ad1

webui_gradio, Gradio galleries return data format match.

Browse files
Files changed (1) hide show
  1. webui_gradio.py +38 -4
webui_gradio.py CHANGED
@@ -316,6 +316,33 @@ def classify(
316
  return "βœ… OK", rows, (face_pil if "face_pil" in locals() else None), (eyes_pil if "eyes_pil" in locals() else None)
317
 
318
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  def add_prototype(
320
  label_name: str,
321
  images: List,
@@ -334,9 +361,14 @@ def add_prototype(
334
  return "❌ Upload at least 1 image."
335
 
336
  zs: List[torch.Tensor] = []
337
- for x in images:
 
338
  try:
339
- im = x if isinstance(x, Image.Image) else Image.fromarray(x)
 
 
 
 
340
  face_pil = None
341
  eyes_pil = None
342
  if ex is not None:
@@ -352,11 +384,13 @@ def add_prototype(
352
  et = _pil_to_tensor(eyes_pil, lm.T_e) if eyes_pil is not None else None
353
  z = embed_triview(lm, whole=wt, face=ft, eyes=et)
354
  zs.append(z)
355
- except Exception:
 
356
  continue
357
 
358
  if not zs:
359
- return "❌ Could not embed any uploaded images."
 
360
 
361
  center = torch.stack(zs, dim=0).mean(dim=0)
362
  lid = db.add_center(label_name, center)
 
316
  return "βœ… OK", rows, (face_pil if "face_pil" in locals() else None), (eyes_pil if "eyes_pil" in locals() else None)
317
 
318
 
319
+ def _gallery_item_to_pil(item) -> Optional[Image.Image]:
320
+ """Convert a Gradio gallery item to PIL Image (handles various formats)."""
321
+ if item is None:
322
+ return None
323
+ # Already a PIL Image
324
+ if isinstance(item, Image.Image):
325
+ return item
326
+ # Tuple format: (image, caption)
327
+ if isinstance(item, (tuple, list)) and len(item) >= 1:
328
+ return _gallery_item_to_pil(item[0])
329
+ # Dict format: {"image": ..., "caption": ...} or {"name": filepath, ...}
330
+ if isinstance(item, dict):
331
+ if "image" in item:
332
+ return _gallery_item_to_pil(item["image"])
333
+ if "name" in item:
334
+ return Image.open(item["name"]).convert("RGB")
335
+ if "path" in item:
336
+ return Image.open(item["path"]).convert("RGB")
337
+ # String path
338
+ if isinstance(item, str):
339
+ return Image.open(item).convert("RGB")
340
+ # Numpy array
341
+ if isinstance(item, np.ndarray):
342
+ return Image.fromarray(item).convert("RGB")
343
+ return None
344
+
345
+
346
  def add_prototype(
347
  label_name: str,
348
  images: List,
 
361
  return "❌ Upload at least 1 image."
362
 
363
  zs: List[torch.Tensor] = []
364
+ errors: List[str] = []
365
+ for i, x in enumerate(images):
366
  try:
367
+ im = _gallery_item_to_pil(x)
368
+ if im is None:
369
+ errors.append(f"Image {i}: could not parse format {type(x)}")
370
+ continue
371
+
372
  face_pil = None
373
  eyes_pil = None
374
  if ex is not None:
 
384
  et = _pil_to_tensor(eyes_pil, lm.T_e) if eyes_pil is not None else None
385
  z = embed_triview(lm, whole=wt, face=ft, eyes=et)
386
  zs.append(z)
387
+ except Exception as e:
388
+ errors.append(f"Image {i}: {e}")
389
  continue
390
 
391
  if not zs:
392
+ err_detail = "; ".join(errors[:3]) if errors else "unknown error"
393
+ return f"❌ Could not embed any uploaded images. Details: {err_detail}"
394
 
395
  center = torch.stack(zs, dim=0).mean(dim=0)
396
  lid = db.add_center(label_name, center)