vlordier commited on
Commit
43f4332
·
verified ·
1 Parent(s): c144739

Upload hf_collect_teacher_metadata.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. hf_collect_teacher_metadata.py +269 -38
hf_collect_teacher_metadata.py CHANGED
@@ -34,10 +34,7 @@ from PIL import Image
34
  import cv2
35
  from typing import List, Dict, Optional
36
  import time
37
- import json
38
- from collections import defaultdict
39
- from transformers import AutoImageProcessor, AutoModelForImageClassification
40
- import subprocess
41
 
42
  # SAM 3D Body imports
43
  import sys
@@ -412,7 +409,240 @@ def compute_bbox_from_keypoints(keypoints_2d, indices):
412
  return [float(x1), float(y1), float(x2), float(y2)]
413
 
414
 
415
- def collect_for_image(estimator, nsfw_classifier, gaze_estimator, face_embedder, image_pil, image_id: str, out_dir: Path, faces: np.ndarray) -> Optional[Dict]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
416
  """
417
  Process PIL image and save .npz with ALL outputs + metadata + NSFW scores per human.
418
 
@@ -694,48 +924,49 @@ def main():
694
  logger.info(f"✓ Dataset ready in {time.time() - start_ds:.1f}s")
695
  sys.stdout.flush()
696
 
697
- # Process
698
- out_dir = Path('teacher_labels')
699
- out_dir.mkdir(exist_ok=True)
700
-
701
- # Get mesh faces (same for all images)
702
- faces = teacher.faces
703
- logger.info(f"Mesh topology: {faces.shape[0]} faces")
704
- logger.info("="*60)
705
- logger.info("Starting image processing...")
706
- logger.info("="*60)
707
  sys.stdout.flush()
708
-
709
- processed = 0
710
- failed = 0
711
- no_detection = 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
  metadata_records = []
 
713
  start_process = time.time()
714
 
715
- for i, sample in enumerate(ds, 1):
716
- image_pil = sample['image']
717
- image_id = sample.get('image_path', f'img_{i:06d}')
718
- image_id = Path(image_id).stem if image_id else f'img_{i:06d}'
719
-
720
- metadata = collect_for_image(teacher, nsfw_classifier, gaze_estimator, face_embedder, image_pil, image_id, out_dir, faces)
721
-
722
- if metadata:
723
- metadata_records.append(metadata)
724
-
725
- if metadata['status'] == 'success':
726
- processed += 1
727
- elif metadata['status'] == 'no_detection':
728
- no_detection += 1
729
- else:
730
- failed += 1
731
 
732
- if i % 10 == 0:
733
  elapsed = time.time() - start_process
 
734
  speed = processed / elapsed if elapsed > 0 else 0
735
- logger.info(f"[{i}] success={processed}, no_detect={no_detection}, failed={failed}, speed={speed:.2f} img/s")
736
  sys.stdout.flush()
737
-
738
  total_time = time.time() - start_process
 
 
 
 
739
  logger.info("="*60)
740
  logger.info(f"✓ Processing complete!")
741
  logger.info(f" Processed: {processed} images in {total_time:.1f}s ({processed/total_time:.2f} img/s)")
 
34
  import cv2
35
  from typing import List, Dict, Optional
36
  import time
37
+ import functools
 
 
 
38
 
39
  # SAM 3D Body imports
40
  import sys
 
409
  return [float(x1), float(y1), float(x2), float(y2)]
410
 
411
 
412
+ def process_batch(batch, teacher, nsfw_classifier, gaze_estimator, face_embedder, faces, out_dir):
413
+ """
414
+ Process a batch of samples using dataset.map() with batched NSFW inference
415
+
416
+ Args:
417
+ batch: dict with 'image' list and optional 'image_path' list
418
+ ... (other args)
419
+
420
+ Returns:
421
+ dict with 'metadata' list
422
+ """
423
+ images = batch['image']
424
+ image_paths = batch.get('image_path', [f'img_{i:06d}' for i in range(len(images))])
425
+
426
+ # First pass: process images and collect humans data (without NSFW)
427
+ humans_data_list = []
428
+ outputs_list = []
429
+
430
+ for img_idx, image_pil in enumerate(images):
431
+ img_width, img_height = image_pil.size
432
+ image_rgb = np.array(image_pil.convert('RGB'))
433
+ image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
434
+
435
+ outputs = teacher.process_one_image(image_bgr)
436
+ outputs_list.append(outputs)
437
+
438
+ if not outputs:
439
+ humans_data_list.append([])
440
+ continue
441
+
442
+ humans_data = []
443
+ for human_idx, pred in enumerate(outputs):
444
+ vertices = pred.get('pred_vertices')
445
+ cam_t = pred.get('pred_cam_t')
446
+ focal_length = pred.get('focal_length')
447
+ kpts2d = pred.get('pred_keypoints_2d')
448
+ kpts3d = pred.get('pred_keypoints_3d')
449
+ bbox = pred.get('bbox', None)
450
+
451
+ # Check face
452
+ has_face = False
453
+ if kpts2d is not None and kpts3d is not None and len(kpts2d) >= 3 and len(kpts3d) >= 3:
454
+ nose_2d = kpts2d[0]
455
+ left_eye_2d = kpts2d[1]
456
+ right_eye_2d = kpts2d[2]
457
+ nose_3d = kpts3d[0]
458
+ left_eye_3d = kpts3d[1]
459
+ right_eye_3d = kpts3d[2]
460
+
461
+ keypoints_valid_3d = (np.linalg.norm(nose_3d) > 1e-6 and
462
+ np.linalg.norm(left_eye_3d) > 1e-6 and
463
+ np.linalg.norm(right_eye_3d) > 1e-6)
464
+
465
+ keypoints_in_image = True
466
+ if keypoints_valid_3d:
467
+ for kp in [nose_2d, left_eye_2d, right_eye_2d]:
468
+ if (kp[0] < 0 or kp[0] >= img_width or
469
+ kp[1] < 0 or kp[1] >= img_height):
470
+ keypoints_in_image = False
471
+ break
472
+
473
+ has_face = keypoints_valid_3d and keypoints_in_image
474
+
475
+ face_orientation = None
476
+ if has_face:
477
+ face_orientation = compute_face_orientation(vertices, kpts3d)
478
+
479
+ gaze_direction = None
480
+ if has_face and bbox is not None and gaze_estimator is not None:
481
+ try:
482
+ gaze_direction = gaze_estimator.estimate_gaze(image_pil, bbox)
483
+ except Exception as e:
484
+ gaze_direction = None
485
+
486
+ face_embedding = None
487
+ if has_face and bbox is not None and face_embedder is not None:
488
+ try:
489
+ face_embedding = face_embedder.extract_embedding(image_pil, bbox, kpts2d)
490
+ except Exception as e:
491
+ face_embedding = None
492
+
493
+ # Compute bboxes
494
+ left_hand_bbox = None
495
+ right_hand_bbox = None
496
+ left_foot_bbox = None
497
+ right_foot_bbox = None
498
+
499
+ if kpts2d is not None:
500
+ left_hand_indices = list(range(42, 62))
501
+ left_hand_bbox = compute_bbox_from_keypoints(kpts2d, left_hand_indices)
502
+
503
+ right_hand_indices = list(range(21, 41))
504
+ right_hand_bbox = compute_bbox_from_keypoints(kpts2d, right_hand_indices)
505
+
506
+ left_foot_indices = [15, 16, 17]
507
+ left_foot_bbox = compute_bbox_from_keypoints(kpts2d, left_foot_indices)
508
+
509
+ right_foot_indices = [18, 19, 20]
510
+ right_foot_bbox = compute_bbox_from_keypoints(kpts2d, right_foot_indices)
511
+
512
+ humans_data.append({
513
+ 'human_idx': human_idx,
514
+ 'bbox': bbox.tolist() if bbox is not None else None,
515
+ 'left_hand_bbox': left_hand_bbox,
516
+ 'right_hand_bbox': right_hand_bbox,
517
+ 'left_foot_bbox': left_foot_bbox,
518
+ 'right_foot_bbox': right_foot_bbox,
519
+ 'has_face': has_face,
520
+ 'face_orientation': face_orientation.tolist() if face_orientation is not None else None,
521
+ 'gaze_direction': gaze_direction,
522
+ 'face_embedding': face_embedding,
523
+ 'has_mesh': vertices is not None,
524
+ 'nsfw_scores': None # Will fill later
525
+ })
526
+
527
+ humans_data_list.append(humans_data)
528
+
529
+ # Batch NSFW classification
530
+ crops = []
531
+ crop_info = [] # (img_idx, human_idx)
532
+
533
+ for img_idx, humans_data in enumerate(humans_data_list):
534
+ image_pil = images[img_idx]
535
+ for human_idx, human in enumerate(humans_data):
536
+ bbox = human['bbox']
537
+ if bbox is not None:
538
+ x1, y1, x2, y2 = bbox
539
+ crop = image_pil.crop((x1, y1, x2, y2))
540
+ crops.append(crop)
541
+ crop_info.append((img_idx, human_idx))
542
+
543
+ if crops and nsfw_classifier is not None and nsfw_classifier.enabled:
544
+ try:
545
+ results = nsfw_classifier.model(crops, conf=0.2, iou=0.3, verbose=False)
546
+
547
+ for crop_idx, result in enumerate(results):
548
+ img_idx, human_idx = crop_info[crop_idx]
549
+ bbox = humans_data_list[img_idx][human_idx]['bbox']
550
+ x1, y1, x2, y2 = bbox
551
+
552
+ detections = []
553
+ if result.boxes:
554
+ for box in result.boxes:
555
+ class_id = int(box.cls.item())
556
+ confidence = box.conf.item()
557
+ class_names = ['anus', 'make_love', 'nipple', 'penis', 'vagina']
558
+ class_name = class_names[class_id] if class_id < len(class_names) else f'class_{class_id}'
559
+
560
+ dx1, dy1, dx2, dy2 = box.xyxy[0].tolist()
561
+ abs_bbox = [x1 + dx1, y1 + dy1, x1 + dx2, y1 + dy2]
562
+
563
+ detections.append({
564
+ 'class': class_name,
565
+ 'confidence': confidence,
566
+ 'bbox': abs_bbox
567
+ })
568
+
569
+ if detections:
570
+ humans_data_list[img_idx][human_idx]['nsfw_scores'] = detections
571
+ else:
572
+ humans_data_list[img_idx][human_idx]['nsfw_scores'] = [{'class': 'safe', 'confidence': 1.0, 'bbox': [x1, y1, x2, y2]}]
573
+ except Exception as e:
574
+ print(f"! Batched NSFW failed: {e}")
575
+ # Fallback: set safe for all
576
+ for img_idx, humans_data in enumerate(humans_data_list):
577
+ for human in humans_data:
578
+ if human['bbox'] is not None:
579
+ x1, y1, x2, y2 = human['bbox']
580
+ human['nsfw_scores'] = [{'class': 'safe', 'confidence': 1.0, 'bbox': [x1, y1, x2, y2]}]
581
+
582
+ # Save NPZ files and create metadata
583
+ metadatas = []
584
+ for img_idx, (humans_data, image_path) in enumerate(zip(humans_data_list, image_paths)):
585
+ image_pil = images[img_idx]
586
+ image_id = Path(image_path).stem if image_path else f'img_{img_idx:06d}'
587
+ img_width, img_height = image_pil.size
588
+
589
+ out_path = out_dir / f"{image_id}.npz"
590
+ if out_path.exists():
591
+ metadatas.append(None)
592
+ continue
593
+
594
+ if not humans_data:
595
+ metadata = {
596
+ 'image_id': image_id,
597
+ 'num_humans': 0,
598
+ 'image_width': img_width,
599
+ 'image_height': img_height,
600
+ 'processing_time_ms': 0, # Not tracked in batch
601
+ 'status': 'no_detection',
602
+ 'humans': []
603
+ }
604
+ else:
605
+ num_humans = len(humans_data)
606
+
607
+ # Save first human's mesh
608
+ pred = outputs_list[img_idx][0]
609
+ vertices = pred.get('pred_vertices')
610
+ cam_t = pred.get('pred_cam_t')
611
+ focal_length = pred.get('focal_length')
612
+ kpts2d = pred.get('pred_keypoints_2d')
613
+ kpts3d = pred.get('pred_keypoints_3d')
614
+ bbox_0 = pred.get('bbox', None)
615
+
616
+ np.savez_compressed(
617
+ out_path,
618
+ vertices=vertices.astype(np.float32) if vertices is not None else None,
619
+ faces=faces.astype(np.int32),
620
+ cam_t=cam_t.astype(np.float32) if cam_t is not None else None,
621
+ focal_length=np.array([focal_length], dtype=np.float32) if focal_length is not None else None,
622
+ keypoints_2d=kpts2d.astype(np.float32) if kpts2d is not None else None,
623
+ keypoints_3d=kpts3d.astype(np.float32) if kpts3d is not None else None,
624
+ bbox=np.array(bbox_0, dtype=np.float32) if bbox_0 is not None else None,
625
+ image_id=image_id,
626
+ num_humans=num_humans,
627
+ image_width=img_width,
628
+ image_height=img_height,
629
+ humans_metadata=json.dumps(humans_data)
630
+ )
631
+
632
+ metadata = {
633
+ 'image_id': image_id,
634
+ 'num_humans': num_humans,
635
+ 'image_width': img_width,
636
+ 'image_height': img_height,
637
+ 'processing_time_ms': 0, # Not tracked
638
+ 'status': 'success',
639
+ 'npz_size_bytes': out_path.stat().st_size,
640
+ 'humans': humans_data
641
+ }
642
+
643
+ metadatas.append(metadata)
644
+
645
+ return {'metadata': metadatas}
646
  """
647
  Process PIL image and save .npz with ALL outputs + metadata + NSFW scores per human.
648
 
 
924
  logger.info(f"✓ Dataset ready in {time.time() - start_ds:.1f}s")
925
  sys.stdout.flush()
926
 
927
+ # Process using dataset.map() for efficient batching
928
+ batch_size = 4 # Adjust based on GPU memory (higher = more efficient)
929
+ logger.info(f"Processing with batch_size={batch_size} using dataset.map()")
 
 
 
 
 
 
 
930
  sys.stdout.flush()
931
+
932
+ process_batch_partial = functools.partial(
933
+ process_batch,
934
+ teacher=teacher,
935
+ nsfw_classifier=nsfw_classifier,
936
+ gaze_estimator=gaze_estimator,
937
+ face_embedder=face_embedder,
938
+ faces=faces,
939
+ out_dir=out_dir
940
+ )
941
+
942
+ processed_ds = ds.map(
943
+ process_batch_partial,
944
+ batched=True,
945
+ batch_size=batch_size,
946
+ remove_columns=ds.column_names # Remove original columns, keep only metadata
947
+ )
948
+
949
+ # Collect metadata from processed dataset
950
  metadata_records = []
951
+ batch_count = 0
952
  start_process = time.time()
953
 
954
+ for batch_result in processed_ds:
955
+ metadata_records.extend(batch_result['metadata'])
956
+ batch_count += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
957
 
958
+ if batch_count % 10 == 0:
959
  elapsed = time.time() - start_process
960
+ processed = sum(1 for m in metadata_records if m and m['status'] == 'success')
961
  speed = processed / elapsed if elapsed > 0 else 0
962
+ logger.info(f"[{batch_count} batches] success={processed}, speed={speed:.2f} img/s")
963
  sys.stdout.flush()
964
+
965
  total_time = time.time() - start_process
966
+ processed = sum(1 for m in metadata_records if m and m['status'] == 'success')
967
+ no_detection = sum(1 for m in metadata_records if m and m['status'] == 'no_detection')
968
+ failed = sum(1 for m in metadata_records if m and m['status'] == 'error')
969
+
970
  logger.info("="*60)
971
  logger.info(f"✓ Processing complete!")
972
  logger.info(f" Processed: {processed} images in {total_time:.1f}s ({processed/total_time:.2f} img/s)")