EgoPoseVR / inspect_dataset.py
AplusX's picture
Add files using upload-large-folder tool
ba80d27 verified
#!/usr/bin/env python3
"""
EgomotionData Dataset Inspector
================================
This script reads and displays the structure, statistics, and field meanings of the EgomotionData dataset.
Usage:
python inspect_dataset.py # View dataset overview + random sample details
python inspect_dataset.py --sample PATH.npz # View details of a specific npz file
python inspect_dataset.py --overview-only # View only dataset overview statistics
python inspect_dataset.py --check-consistency N # Randomly check consistency of N files
"""
import argparse
import os
import sys
import random
import numpy as np
from pathlib import Path
from collections import defaultdict
# ============================================================================
# Data Field Descriptions
# ============================================================================
FIELD_DESCRIPTIONS = {
"input_rgbd": {
"en": "Input RGBD image sequence",
"detail": (
"Shape (T, 4, H, W): T=frames, 4=RGB+Depth channels, H=height, W=width.\n"
" - Channels 0-2: RGB color image, normalized to [0, 1]\n"
" - Channel 3: Depth map"
),
},
"gt_joints_relativeCam_2Dpos": {
"en": "GT joint 2D positions relative to camera (projected)",
"detail": (
"Shape (T, J, 2): T=frames, J=joints(22), 2=pixel coords (x, y).\n"
" 3D body joints projected onto the camera image plane."
),
},
"gt_joints_relativePelvis_3Dpos": {
"en": "GT joint 3D positions relative to pelvis",
"detail": (
"Shape (T, J, 3): T=frames, J=joints(22), 3=(x, y, z).\n"
" 3D joint positions in pelvis-centered local coordinate system.\n"
" Pelvis joint is always at origin (0, 0, 0)."
),
},
"gt_pelvis_camera_3Dpos": {
"en": "GT pelvis 3D position in camera coordinate system",
"detail": (
"Shape (T, 3): T=frames, 3=(x, y, z).\n"
" Absolute 3D position of pelvis in camera space."
),
},
"gt_pelvis_camera_4Drot": {
"en": "GT pelvis rotation in camera space (quaternion)",
"detail": (
"Shape (T, 4): T=frames, 4=quaternion.\n"
" Rotation of pelvis joint in camera coordinate system."
),
},
"hmd_position_global_full_gt_list": {
"en": "HMD global position/orientation data",
"detail": (
"Shape (T, 54): T=frames, 54-dim vector.\n"
" Global pose information from HMD tracking (head + hands)."
),
},
"head_global_trans_list": {
"en": "Head global transformation matrices",
"detail": (
"Shape (T, 4, 4): T=frames, 4x4 homogeneous transformation matrix.\n"
" Head pose (rotation + translation) in global coordinates per frame."
),
},
"body_parms_list": {
"en": "SMPL body model parameters",
"detail": (
"Dictionary with sub-fields:\n"
" - root_orient (T, 3): Root (pelvis) rotation in axis-angle\n"
" - pose_body (T, 63): Body joint poses (21 joints × 3 axis-angle)\n"
" - trans (T, 3): Global translation (x, y, z)"
),
},
"pred_2d": {
"en": "Predicted 2D joint positions",
"detail": (
"Shape (T, J, 2): T=frames, J=joints(22), 2=pixel coords (x, y).\n"
" 2D joint detections from a pretrained pose estimator."
),
},
"pred_3d": {
"en": "Predicted 3D joint positions",
"detail": (
"Shape (T, J, 3): T=frames, J=joints(22), 3=(x, y, z).\n"
" 3D joint predictions from a pretrained pose estimator."
),
},
}
def print_separator(char="=", length=80):
print(char * length)
def print_header(title):
print_separator()
print(f" {title}")
print_separator()
def get_dataset_root():
"""Return dataset root directory"""
return Path(__file__).parent
def load_path_file(filepath):
"""Load path list file"""
if not filepath.exists():
return []
with open(filepath, "r") as f:
return [line.strip() for line in f if line.strip()]
def dataset_overview(root):
"""Print dataset overview"""
print_header("Dataset Overview")
# Scene statistics
scenes = sorted([d for d in os.listdir(root) if d.startswith("Scene") and os.path.isdir(root / d)])
print(f"\nNumber of scenes: {len(scenes)}")
total_seqs = 0
total_npz = 0
source_counter = defaultdict(int)
for scene in scenes:
scene_path = root / scene
seqs = [d for d in os.listdir(scene_path) if os.path.isdir(scene_path / d)]
npz_count = 0
for seq in seqs:
seq_path = scene_path / seq
npz_files = [f for f in os.listdir(seq_path) if f.endswith(".npz")]
npz_count += len(npz_files)
# Data source extraction
if "CMU" in seq:
source_counter["CMU"] += 1
elif "BioMotionLab_NTroje" in seq:
source_counter["BioMotionLab_NTroje"] += 1
else:
source_counter["Other"] += 1
total_seqs += len(seqs)
total_npz += npz_count
print(f" {scene}: {len(seqs):>4} sequences, {npz_count:>5} npz files")
print(f"\nTotal:")
print(f" Sequences: {total_seqs}")
print(f" NPZ files: {total_npz}")
# Data source statistics
print(f"\nData Sources:")
for src, cnt in sorted(source_counter.items(), key=lambda x: -x[1]):
print(f" {src}: {cnt} sequences")
# Train/Val/Test split
print(f"\nTrain/Val/Test Split:")
for split in ["train", "val", "test", "all"]:
path_file = root / f"{split}_npz_paths.txt"
paths = load_path_file(path_file)
print(f" {split:>5}: {len(paths):>6} samples")
print()
def inspect_single_npz(npz_path, verbose=True):
"""Inspect the detailed structure of a single npz file"""
data = np.load(npz_path, allow_pickle=True)
keys = list(data.keys())
if verbose:
print_header(f"NPZ File Details")
print(f"Path: {npz_path}")
print(f"Size: {os.path.getsize(npz_path) / 1024 / 1024:.2f} MB")
print(f"Number of fields: {len(keys)}")
print()
info = {}
for key in keys:
arr = data[key]
field_info = {"key": key, "dtype": str(arr.dtype), "shape": arr.shape}
if arr.dtype == object:
# Handle dictionary type (body_parms_list)
obj = arr.item()
if isinstance(obj, dict):
field_info["type"] = "dict"
field_info["sub_fields"] = {}
for k, v in obj.items():
field_info["sub_fields"][k] = {
"shape": v.shape,
"dtype": str(v.dtype),
"min": float(v.min()),
"max": float(v.max()),
"mean": float(v.mean()),
}
else:
field_info["type"] = "array"
if arr.size > 0 and arr.dtype.kind in ("f", "i", "u"):
field_info["min"] = float(arr.min())
field_info["max"] = float(arr.max())
field_info["mean"] = float(arr.mean())
field_info["std"] = float(arr.std())
info[key] = field_info
if verbose:
desc = FIELD_DESCRIPTIONS.get(key, {})
print_separator("-", 70)
print(f"Field: {key}")
if desc:
print(f" Desc: {desc['en']}")
print(f" Dtype: {arr.dtype}")
print(f" Shape: {arr.shape}")
if field_info["type"] == "dict":
print(f" Sub-fields:")
for k, v_info in field_info["sub_fields"].items():
print(f" - {k}: shape={v_info['shape']}, dtype={v_info['dtype']}, "
f"range=[{v_info['min']:.4f}, {v_info['max']:.4f}], mean={v_info['mean']:.4f}")
elif "min" in field_info:
print(f" Range: [{field_info['min']:.4f}, {field_info['max']:.4f}]")
print(f" Mean: {field_info['mean']:.4f}, Std: {field_info['std']:.4f}")
if desc and "detail" in desc:
print(f" Details:")
for line in desc["detail"].split("\n"):
print(f" {line}")
print()
data.close()
return info
def print_data_schema():
"""Print complete data schema documentation"""
print_header("Data Schema Documentation")
print("""
Each .npz file represents a motion clip with T consecutive frames (typically T=100).
The data is designed for egomotion estimation: recovering 3D human pose from egocentric RGBD views.
Directory Structure:
EgomotionData/
├── Scene{0-6}/ # 7 different virtual scenes
│ └── AllDataPath_{Source}_{split}_{id}/ # motion sequence directory
│ └── {clip_id}.npz # motion clip file
├── train_npz_paths.txt # train set path list
├── val_npz_paths.txt # validation set path list
├── test_npz_paths.txt # test set path list
└── all_npz_paths.txt # all paths list
Data Sources:
- CMU: CMU Graphics Lab Motion Capture Database
- BioMotionLab_NTroje: BioMotionLab (NTroje) Dataset
""")
print("Field Details:")
print_separator("-", 70)
for key, desc in FIELD_DESCRIPTIONS.items():
print(f"\n[{key}]")
print(f" English: {desc['en']}")
print(f" Details:")
for line in desc["detail"].split("\n"):
print(f" {line}")
print()
print("Joint Definition (22 joints):")
print(" SMPL model uses 22 joints, typical order:")
print(" 0:Pelvis 1:L_Hip 2:R_Hip 3:Spine1 4:L_Knee 5:R_Knee")
print(" 6:Spine2 7:L_Ankle 8:R_Ankle 9:Spine3 10:L_Foot 11:R_Foot")
print(" 12:Neck 13:L_Collar 14:R_Collar 15:Head 16:L_Shoulder 17:R_Shoulder")
print(" 18:L_Elbow 19:R_Elbow 20:L_Wrist 21:R_Wrist")
print()
def check_consistency(root, n_samples=10):
"""Randomly sample and check data consistency"""
print_header(f"Consistency Check (N={n_samples})")
all_paths_file = root / "all_npz_paths.txt"
all_paths = load_path_file(all_paths_file)
if not all_paths:
print("Error: all_npz_paths.txt is empty or does not exist")
return
samples = random.sample(all_paths, min(n_samples, len(all_paths)))
issues = []
for i, rel_path in enumerate(samples, 1):
# Path format: EgomotionData/Scene0/.../1.npz
npz_path = root.parent / rel_path
if not npz_path.exists():
npz_path = root / "/".join(rel_path.split("/")[1:])
if not npz_path.exists():
issues.append(f"File does not exist: {rel_path}")
continue
try:
data = np.load(npz_path, allow_pickle=True)
keys = set(data.keys())
expected_keys = set(FIELD_DESCRIPTIONS.keys())
missing = expected_keys - keys
extra = keys - expected_keys
# Check frame consistency
T = data["input_rgbd"].shape[0]
frame_checks = {
"gt_joints_relativeCam_2Dpos": data["gt_joints_relativeCam_2Dpos"].shape[0],
"gt_joints_relativePelvis_3Dpos": data["gt_joints_relativePelvis_3Dpos"].shape[0],
"gt_pelvis_camera_3Dpos": data["gt_pelvis_camera_3Dpos"].shape[0],
"pred_2d": data["pred_2d"].shape[0],
"pred_3d": data["pred_3d"].shape[0],
}
frame_mismatch = {k: v for k, v in frame_checks.items() if v != T}
status = "OK" if (not missing and not extra and not frame_mismatch) else "WARN"
print(f" [{i}/{len(samples)}] {status} - {rel_path} (T={T})")
if missing:
msg = f" Missing fields: {missing}"
print(f" {msg}")
issues.append(msg)
if extra:
print(f" Extra fields: {extra}")
if frame_mismatch:
msg = f" Frame count mismatch: {frame_mismatch}"
print(f" {msg}")
issues.append(msg)
data.close()
except Exception as e:
msg = f"Failed to read: {rel_path} - {e}"
print(f" [{i}/{len(samples)}] ERROR - {msg}")
issues.append(msg)
print()
if issues:
print(f"Found {len(issues)} issues:")
for issue in issues:
print(f" - {issue}")
else:
print("All samples passed consistency check.")
print()
def main():
parser = argparse.ArgumentParser(
description="EgomotionData Dataset Inspector",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python inspect_dataset.py # Overview + sample check
python inspect_dataset.py --overview-only # Show only statistics overview
python inspect_dataset.py --schema # Show data schema documentation
python inspect_dataset.py --sample Scene0/xxx/1.npz # Check specific file
python inspect_dataset.py --check-consistency 20 # Check 20 random files
""",
)
parser.add_argument("--sample", type=str, default=None,
help="Specify npz file path to inspect")
parser.add_argument("--overview-only", action="store_true",
help="Show only dataset overview statistics")
parser.add_argument("--schema", action="store_true",
help="Show complete data schema documentation")
parser.add_argument("--check-consistency", type=int, default=0, metavar="N",
help="Randomly check consistency of N files")
parser.add_argument("--seed", type=int, default=42,
help="Random seed (default: 42)")
args = parser.parse_args()
random.seed(args.seed)
root = get_dataset_root()
if args.schema:
print_data_schema()
return
if args.sample:
sample_path = Path(args.sample)
if not sample_path.is_absolute():
sample_path = root / args.sample
if not sample_path.exists():
print(f"Error: file does not exist - {sample_path}")
sys.exit(1)
inspect_single_npz(sample_path)
return
# Default: show overview
dataset_overview(root)
if args.overview_only:
return
# Show data schema
print_data_schema()
# Show details of a random sample
all_paths = load_path_file(root / "all_npz_paths.txt")
if all_paths:
sample_rel = random.choice(all_paths)
sample_path = root.parent / sample_rel
if not sample_path.exists():
sample_path = root / "/".join(sample_rel.split("/")[1:])
if sample_path.exists():
print_header("Random Sample Details")
inspect_single_npz(sample_path)
# Consistency check
n_check = args.check_consistency if args.check_consistency > 0 else 5
check_consistency(root, n_check)
if __name__ == "__main__":
main()