vlordier commited on
Commit
793169b
·
verified ·
1 Parent(s): 43f4332

Upload hf_collect_teacher_metadata.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. hf_collect_teacher_metadata.py +14 -207
hf_collect_teacher_metadata.py CHANGED
@@ -426,10 +426,12 @@ def process_batch(batch, teacher, nsfw_classifier, gaze_estimator, face_embedder
426
  # First pass: process images and collect humans data (without NSFW)
427
  humans_data_list = []
428
  outputs_list = []
 
429
 
430
  for img_idx, image_pil in enumerate(images):
431
  img_width, img_height = image_pil.size
432
  image_rgb = np.array(image_pil.convert('RGB'))
 
433
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
434
 
435
  outputs = teacher.process_one_image(image_bgr)
@@ -536,8 +538,11 @@ def process_batch(batch, teacher, nsfw_classifier, gaze_estimator, face_embedder
536
  bbox = human['bbox']
537
  if bbox is not None:
538
  x1, y1, x2, y2 = bbox
539
- crop = image_pil.crop((x1, y1, x2, y2))
540
- crops.append(crop)
 
 
 
541
  crop_info.append((img_idx, human_idx))
542
 
543
  if crops and nsfw_classifier is not None and nsfw_classifier.enabled:
@@ -643,211 +648,6 @@ def process_batch(batch, teacher, nsfw_classifier, gaze_estimator, face_embedder
643
  metadatas.append(metadata)
644
 
645
  return {'metadata': metadatas}
646
- """
647
- Process PIL image and save .npz with ALL outputs + metadata + NSFW scores per human.
648
-
649
- Returns metadata dict or None if failed.
650
- """
651
- out_path = out_dir / f"{image_id}.npz"
652
- if out_path.exists():
653
- return None
654
-
655
- # Get image dimensions
656
- img_width, img_height = image_pil.size
657
-
658
- # Convert PIL to numpy array
659
- image_rgb = np.array(image_pil.convert('RGB'))
660
- image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
661
-
662
- start_time = time.time()
663
-
664
- try:
665
- # Use process_one_image
666
- outputs = estimator.process_one_image(image_bgr)
667
-
668
- processing_time = time.time() - start_time
669
-
670
- if not outputs:
671
- # No humans detected
672
- return {
673
- 'image_id': image_id,
674
- 'num_humans': 0,
675
- 'image_width': img_width,
676
- 'image_height': img_height,
677
- 'processing_time_ms': int(processing_time * 1000),
678
- 'status': 'no_detection',
679
- 'humans': []
680
- }
681
-
682
- num_humans = len(outputs)
683
-
684
- # Process each detected human
685
- humans_data = []
686
- for human_idx, pred in enumerate(outputs):
687
- # Get 3D body outputs
688
- vertices = pred.get('pred_vertices')
689
- cam_t = pred.get('pred_cam_t')
690
- focal_length = pred.get('focal_length')
691
- kpts2d = pred.get('pred_keypoints_2d')
692
- kpts3d = pred.get('pred_keypoints_3d')
693
-
694
- # Get bounding box from detection
695
- bbox = pred.get('bbox', None) # [x1, y1, x2, y2]
696
-
697
- # Check if we have valid face keypoints (nose, eyes) in the image
698
- has_face = False
699
- if kpts2d is not None and kpts3d is not None and len(kpts2d) >= 3 and len(kpts3d) >= 3:
700
- # Get 2D projected keypoints (nose, left eye, right eye)
701
- nose_2d = kpts2d[0] # [x, y]
702
- left_eye_2d = kpts2d[1]
703
- right_eye_2d = kpts2d[2]
704
-
705
- # Get 3D keypoints to check they exist
706
- nose_3d = kpts3d[0]
707
- left_eye_3d = kpts3d[1]
708
- right_eye_3d = kpts3d[2]
709
-
710
- # Check if face keypoints are valid:
711
- # 1. 3D keypoints are not at origin
712
- # 2. 2D keypoints are inside image bounds
713
- keypoints_valid_3d = (np.linalg.norm(nose_3d) > 1e-6 and
714
- np.linalg.norm(left_eye_3d) > 1e-6 and
715
- np.linalg.norm(right_eye_3d) > 1e-6)
716
-
717
- keypoints_in_image = True
718
- if keypoints_valid_3d:
719
- # Check if face keypoints are within image bounds
720
- for kp in [nose_2d, left_eye_2d, right_eye_2d]:
721
- if (kp[0] < 0 or kp[0] >= img_width or
722
- kp[1] < 0 or kp[1] >= img_height):
723
- keypoints_in_image = False
724
- break
725
-
726
- has_face = keypoints_valid_3d and keypoints_in_image
727
-
728
- # Compute face orientation from mesh (only if face visible in image)
729
- face_orientation = None
730
- if has_face:
731
- face_orientation = compute_face_orientation(vertices, kpts3d)
732
-
733
- # Estimate gaze direction (only if face visible and bbox available)
734
- gaze_direction = None
735
- if has_face and bbox is not None and gaze_estimator is not None:
736
- try:
737
- gaze_direction = gaze_estimator.estimate_gaze(image_pil, bbox)
738
- except Exception as e:
739
- print(f"! Gaze estimation failed for {image_id} human {human_idx}: {e}")
740
- gaze_direction = None
741
-
742
- # Extract face embedding (only if face visible)
743
- face_embedding = None
744
- if has_face and bbox is not None and face_embedder is not None:
745
- try:
746
- face_embedding = face_embedder.extract_embedding(image_pil, bbox, kpts2d)
747
- except Exception as e:
748
- print(f"! Face embedding extraction failed for {image_id} human {human_idx}: {e}")
749
- face_embedding = None
750
-
751
- # NSFW classification for this human
752
- nsfw_scores = None
753
- if bbox is not None and nsfw_classifier is not None:
754
- try:
755
- nsfw_scores = nsfw_classifier.classify_crop(image_pil, bbox)
756
- except Exception as e:
757
- print(f"! NSFW classification failed for {image_id} human {human_idx}: {e}")
758
- nsfw_scores = None
759
-
760
- # Compute hand and foot bboxes from keypoints
761
- left_hand_bbox = None
762
- right_hand_bbox = None
763
- left_foot_bbox = None
764
- right_foot_bbox = None
765
-
766
- if kpts2d is not None:
767
- # Left hand keypoints: indices 42-61 (left_thumb4 to left_pinky_finger_third_joint)
768
- left_hand_indices = list(range(42, 62))
769
- left_hand_bbox = compute_bbox_from_keypoints(kpts2d, left_hand_indices)
770
-
771
- # Right hand keypoints: indices 21-40 (right_thumb4 to right_pinky_finger_third_joint)
772
- right_hand_indices = list(range(21, 41))
773
- right_hand_bbox = compute_bbox_from_keypoints(kpts2d, right_hand_indices)
774
-
775
- # Left foot keypoints: indices 15-17 (left_big_toe, left_small_toe, left_heel)
776
- left_foot_indices = [15, 16, 17]
777
- left_foot_bbox = compute_bbox_from_keypoints(kpts2d, left_foot_indices)
778
-
779
- # Right foot keypoints: indices 18-20 (right_big_toe, right_small_toe, right_heel)
780
- right_foot_indices = [18, 19, 20]
781
- right_foot_bbox = compute_bbox_from_keypoints(kpts2d, right_foot_indices)
782
-
783
- humans_data.append({
784
- 'human_idx': human_idx,
785
- 'bbox': bbox.tolist() if bbox is not None else None,
786
- 'left_hand_bbox': left_hand_bbox,
787
- 'right_hand_bbox': right_hand_bbox,
788
- 'left_foot_bbox': left_foot_bbox,
789
- 'right_foot_bbox': right_foot_bbox,
790
- 'has_face': has_face,
791
- 'face_orientation': face_orientation.tolist() if face_orientation is not None else None,
792
- 'gaze_direction': gaze_direction,
793
- 'face_embedding': face_embedding,
794
- 'nsfw_scores': nsfw_scores,
795
- 'has_mesh': vertices is not None
796
- })
797
-
798
- # Save first detected person's mesh (or could save all in future)
799
- pred = outputs[0]
800
- vertices = pred.get('pred_vertices')
801
- cam_t = pred.get('pred_cam_t')
802
- focal_length = pred.get('focal_length')
803
- kpts2d = pred.get('pred_keypoints_2d')
804
- kpts3d = pred.get('pred_keypoints_3d')
805
- bbox_0 = pred.get('bbox', None)
806
-
807
- # Save to npz with all humans metadata
808
- np.savez_compressed(
809
- out_path,
810
- # First human mesh data
811
- vertices=vertices.astype(np.float32) if vertices is not None else None,
812
- faces=faces.astype(np.int32),
813
- cam_t=cam_t.astype(np.float32) if cam_t is not None else None,
814
- focal_length=np.array([focal_length], dtype=np.float32) if focal_length is not None else None,
815
- keypoints_2d=kpts2d.astype(np.float32) if kpts2d is not None else None,
816
- keypoints_3d=kpts3d.astype(np.float32) if kpts3d is not None else None,
817
- bbox=np.array(bbox_0, dtype=np.float32) if bbox_0 is not None else None,
818
- # Image metadata
819
- image_id=image_id,
820
- num_humans=num_humans,
821
- image_width=img_width,
822
- image_height=img_height,
823
- # All humans data (as JSON string in npz)
824
- humans_metadata=json.dumps(humans_data)
825
- )
826
-
827
- return {
828
- 'image_id': image_id,
829
- 'num_humans': num_humans,
830
- 'image_width': img_width,
831
- 'image_height': img_height,
832
- 'processing_time_ms': int(processing_time * 1000),
833
- 'status': 'success',
834
- 'npz_size_bytes': out_path.stat().st_size,
835
- 'humans': humans_data
836
- }
837
-
838
- except Exception as e:
839
- processing_time = time.time() - start_time
840
- print(f"! Error on {image_id}: {e}")
841
- return {
842
- 'image_id': image_id,
843
- 'num_humans': 0,
844
- 'image_width': img_width,
845
- 'image_height': img_height,
846
- 'processing_time_ms': int(processing_time * 1000),
847
- 'status': 'error',
848
- 'error_message': str(e),
849
- 'humans': []
850
- }
851
 
852
 
853
  def main():
@@ -924,6 +724,13 @@ def main():
924
  logger.info(f"✓ Dataset ready in {time.time() - start_ds:.1f}s")
925
  sys.stdout.flush()
926
 
 
 
 
 
 
 
 
927
  # Process using dataset.map() for efficient batching
928
  batch_size = 4 # Adjust based on GPU memory (higher = more efficient)
929
  logger.info(f"Processing with batch_size={batch_size} using dataset.map()")
 
426
  # First pass: process images and collect humans data (without NSFW)
427
  humans_data_list = []
428
  outputs_list = []
429
+ image_rgbs = [] # cache RGB numpy arrays for later crops
430
 
431
  for img_idx, image_pil in enumerate(images):
432
  img_width, img_height = image_pil.size
433
  image_rgb = np.array(image_pil.convert('RGB'))
434
+ image_rgbs.append(image_rgb)
435
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
436
 
437
  outputs = teacher.process_one_image(image_bgr)
 
538
  bbox = human['bbox']
539
  if bbox is not None:
540
  x1, y1, x2, y2 = bbox
541
+ ix1, iy1, ix2, iy2 = map(lambda v: max(0, int(round(v))), [x1, y1, x2, y2])
542
+ ix1, iy1 = min(ix1, image_rgbs[img_idx].shape[1]-1), min(iy1, image_rgbs[img_idx].shape[0]-1)
543
+ ix2, iy2 = max(ix1+1, min(ix2, image_rgbs[img_idx].shape[1])), max(iy1+1, min(iy2, image_rgbs[img_idx].shape[0]))
544
+ crop_np = np.ascontiguousarray(image_rgbs[img_idx][iy1:iy2, ix1:ix2])
545
+ crops.append(crop_np)
546
  crop_info.append((img_idx, human_idx))
547
 
548
  if crops and nsfw_classifier is not None and nsfw_classifier.enabled:
 
648
  metadatas.append(metadata)
649
 
650
  return {'metadata': metadatas}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651
 
652
 
653
  def main():
 
724
  logger.info(f"✓ Dataset ready in {time.time() - start_ds:.1f}s")
725
  sys.stdout.flush()
726
 
727
+ # Prepare output directory and shared mesh topology
728
+ out_dir = Path('teacher_labels')
729
+ out_dir.mkdir(exist_ok=True)
730
+ faces = teacher.faces
731
+ logger.info(f"Mesh topology: {faces.shape[0]} faces")
732
+ sys.stdout.flush()
733
+
734
  # Process using dataset.map() for efficient batching
735
  batch_size = 4 # Adjust based on GPU memory (higher = more efficient)
736
  logger.info(f"Processing with batch_size={batch_size} using dataset.map()")