TopoSlots-MotionData / scripts /full_skeleton_audit.py
Tevior's picture
Upload scripts/full_skeleton_audit.py with huggingface_hub
571a68d verified
"""
Full skeleton audit: naming + structural features for all 79 skeletons.
Checks:
1. Canonical name quality (empty, duplicate, semantic sense)
2. Side tag correctness (L/R balance, missed sides)
3. Symmetry pair validity (matched pairs have similar bone lengths/depths)
4. Topology structure (tree depth, branching factor, connected)
5. Geodesic distance matrix sanity
6. Rest pose geometry (bone lengths, height, spread)
7. Cross-dataset consistency for shared canonical names
"""
import sys
import os
from pathlib import Path
import numpy as np
from collections import Counter, defaultdict
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))
from src.data.skeleton_graph import SkeletonGraph
RESULT_DIR = project_root / 'logs' / 'data_fix_20260318'
# Severity levels
ERRORS = [] # Must fix
WARNINGS = [] # Should review
INFO = [] # FYI
def log_error(dataset, msg):
ERRORS.append(f'[ERROR] {dataset}: {msg}')
def log_warn(dataset, msg):
WARNINGS.append(f'[WARN] {dataset}: {msg}')
def log_info(dataset, msg):
INFO.append(f'[INFO] {dataset}: {msg}')
def audit_skeleton(dataset_id: str, skel_data: dict, motion_sample: dict = None):
"""Full audit of one skeleton."""
sg = SkeletonGraph.from_dict(skel_data)
J = sg.num_joints
canon = [str(n) for n in skel_data.get('canonical_names', [])]
raw = [str(n) for n in skel_data['joint_names']]
results = {'dataset': dataset_id, 'num_joints': J, 'issues': []}
# ===== 1. Canonical Name Quality =====
if not canon:
log_error(dataset_id, 'No canonical_names field in skeleton.npz')
results['issues'].append('no_canonical')
else:
# Empty names
empty = [i for i, c in enumerate(canon) if not c.strip()]
if empty:
log_error(dataset_id, f'{len(empty)} empty canonical names at indices {empty[:5]}')
# Duplicate canonical names (within same skeleton)
dupes = {c: [i for i, x in enumerate(canon) if x == c] for c in set(canon)}
dupes = {c: idxs for c, idxs in dupes.items() if len(idxs) > 1}
if dupes:
for c, idxs in list(dupes.items())[:3]:
raw_names = [raw[i] for i in idxs]
log_warn(dataset_id, f'Duplicate canonical "{c}" for raw: {raw_names}')
# Names that are just numbers or single chars
bad_names = [c for c in canon if len(c) <= 2 and not c.isalpha()]
if bad_names:
log_warn(dataset_id, f'Very short canonical names: {bad_names[:5]}')
# Prefix residue (bip01, bn_, jt_, mixamorig still present)
prefix_residue = [c for c in canon if any(p in c.lower() for p in ['bip01', 'bn_', 'jt_', 'mixamorig', 'npc_'])]
if prefix_residue:
log_warn(dataset_id, f'Prefix residue in canonical: {prefix_residue[:5]}')
# ===== 2. Side Tag Correctness =====
n_left = sum(1 for t in sg.side_tags if t == 'left')
n_right = sum(1 for t in sg.side_tags if t == 'right')
n_center = sum(1 for t in sg.side_tags if t == 'center')
if n_left != n_right and J > 5: # small skeletons may be asymmetric
log_warn(dataset_id, f'L/R imbalance: L={n_left} R={n_right} C={n_center}')
# Check if canonical names agree with side tags
if canon:
for i, (c, t) in enumerate(zip(canon, sg.side_tags)):
if 'left' in c and t != 'left':
log_warn(dataset_id, f'Joint {i} "{raw[i]}" canonical="{c}" but side_tag="{t}"')
elif 'right' in c and t != 'right':
log_warn(dataset_id, f'Joint {i} "{raw[i]}" canonical="{c}" but side_tag="{t}"')
# ===== 3. Symmetry Pair Validation =====
for i, j in sg.symmetry_pairs:
# Check bone lengths match (should be ~equal for symmetric joints)
bl_i = sg.bone_lengths[i]
bl_j = sg.bone_lengths[j]
if bl_i > 0.01 and bl_j > 0.01:
ratio = max(bl_i, bl_j) / min(bl_i, bl_j)
if ratio > 1.5:
log_warn(dataset_id, f'Sym pair ({raw[i]}, {raw[j]}) bone length mismatch: {bl_i:.3f} vs {bl_j:.3f}')
# Check depths match
if sg.depths[i] != sg.depths[j]:
log_warn(dataset_id, f'Sym pair ({raw[i]}, {raw[j]}) depth mismatch: {sg.depths[i]} vs {sg.depths[j]}')
# ===== 4. Topology Structure =====
max_depth = sg.depths.max()
max_degree = sg.degrees.max()
leaf_count = sum(1 for d in sg.degrees if d == 0)
root_count = sum(1 for p in sg.parent_indices if p == -1)
if root_count != 1:
log_error(dataset_id, f'Expected 1 root, found {root_count}')
if max_depth > 20:
log_warn(dataset_id, f'Very deep tree: max_depth={max_depth}')
# Check connectivity (all joints reachable from root)
reachable = set()
queue = [i for i, p in enumerate(sg.parent_indices) if p == -1]
while queue:
curr = queue.pop(0)
reachable.add(curr)
for j in range(J):
if sg.parent_indices[j] == curr and j not in reachable:
queue.append(j)
if len(reachable) != J:
log_error(dataset_id, f'Disconnected: only {len(reachable)}/{J} joints reachable from root')
# ===== 5. Geodesic Distance Sanity =====
geo = sg.geodesic_dist
if geo.shape != (J, J):
log_error(dataset_id, f'Geodesic distance shape mismatch: {geo.shape} vs ({J},{J})')
else:
# Should be symmetric
asym = np.abs(geo - geo.T).max()
if asym > 0.01:
log_error(dataset_id, f'Geodesic distance not symmetric: max asymmetry={asym}')
# Diagonal should be 0
diag_max = np.diag(geo).max()
if diag_max > 0.01:
log_error(dataset_id, f'Geodesic distance diagonal non-zero: max={diag_max}')
# Max distance should be reasonable
max_geo = geo.max()
if max_geo > J:
log_warn(dataset_id, f'Geodesic max={max_geo} exceeds num_joints={J}')
# No unreachable pairs (distance should be < J+1)
unreachable = (geo >= J + 1).sum()
if unreachable > 0:
log_error(dataset_id, f'{unreachable} unreachable joint pairs in geodesic matrix')
# ===== 6. Rest Pose Geometry =====
offsets = sg.rest_offsets
bone_lengths = sg.bone_lengths
# Zero-length bones (excluding root)
zero_bones = sum(1 for i, bl in enumerate(bone_lengths) if bl < 1e-6 and sg.parent_indices[i] >= 0)
if zero_bones > 0:
log_info(dataset_id, f'{zero_bones} zero-length bones')
# Very long bones (> 10x median)
nonzero_bl = bone_lengths[bone_lengths > 1e-6]
if len(nonzero_bl) > 0:
median_bl = np.median(nonzero_bl)
long_bones = [(raw[i], bl) for i, bl in enumerate(bone_lengths)
if bl > 10 * median_bl and sg.parent_indices[i] >= 0]
if long_bones:
log_warn(dataset_id, f'Unusually long bones (>10x median={median_bl:.4f}): {long_bones[:3]}')
# ===== 7. Motion Data Spot Check =====
if motion_sample is not None:
T = int(motion_sample['num_frames'])
jp = motion_sample['joint_positions'][:T]
lp = motion_sample['local_positions'][:T]
vel = motion_sample['velocities'][:T]
# Check joint count matches
if jp.shape[1] != J:
log_error(dataset_id, f'Motion joints={jp.shape[1]} != skeleton joints={J}')
# Check for NaN/Inf
for key in ['joint_positions', 'local_positions', 'velocities']:
arr = motion_sample[key][:T]
if np.any(np.isnan(arr)):
log_error(dataset_id, f'NaN in motion {key}')
if np.any(np.isinf(arr)):
log_error(dataset_id, f'Inf in motion {key}')
# Height sanity
height = jp[0, :, 1].max() - jp[0, :, 1].min()
if height < 0.05:
log_warn(dataset_id, f'Very small body height: {height:.4f}m')
elif height > 10:
log_warn(dataset_id, f'Very large body height: {height:.2f}m (scale issue?)')
# Velocity sanity
vel_max = np.linalg.norm(vel, axis=-1).max()
if vel_max > 50:
log_warn(dataset_id, f'Very high velocity: max={vel_max:.1f} m/s')
# Collect summary
results['n_left'] = n_left
results['n_right'] = n_right
results['n_center'] = n_center
results['n_sym'] = len(sg.symmetry_pairs)
results['max_depth'] = int(max_depth)
results['max_degree'] = int(max_degree)
results['leaf_count'] = leaf_count
results['n_canonical'] = len(canon)
results['n_dupe_canonical'] = sum(len(v) - 1 for v in dupes.values()) if canon and dupes else 0
return results
def main():
all_results = []
# Audit human datasets
human_datasets = ['humanml3d', 'lafan1', '100style', 'bandai_namco', 'cmu_mocap', 'mixamo']
for ds in human_datasets:
ds_path = project_root / 'data' / 'processed' / ds
skel_data = dict(np.load(ds_path / 'skeleton.npz', allow_pickle=True))
# Load a motion sample
motions_dir = ds_path / 'motions'
files = sorted(os.listdir(motions_dir))
motion_sample = dict(np.load(motions_dir / files[0], allow_pickle=True)) if files else None
r = audit_skeleton(ds, skel_data, motion_sample)
all_results.append(r)
# Audit Zoo species
zoo_path = project_root / 'data' / 'processed' / 'truebones_zoo'
skel_dir = zoo_path / 'skeletons'
motions_dir = zoo_path / 'motions'
# Build species → motion mapping
species_motions = {}
for f in sorted(os.listdir(motions_dir))[:200]:
d = dict(np.load(motions_dir / f, allow_pickle=True))
sp = str(d.get('species', ''))
if sp and sp not in species_motions:
species_motions[sp] = d
for skel_file in sorted(skel_dir.glob('*.npz')):
species = skel_file.stem
skel_data = dict(np.load(skel_file, allow_pickle=True))
motion_sample = species_motions.get(species)
r = audit_skeleton(f'zoo/{species}', skel_data, motion_sample)
all_results.append(r)
# ===== Cross-dataset canonical consistency =====
# For human datasets, check that same canonical name → similar tree depth
canonical_depths = defaultdict(list)
for ds in human_datasets:
skel_data = dict(np.load(project_root / 'data' / 'processed' / ds / 'skeleton.npz', allow_pickle=True))
sg = SkeletonGraph.from_dict(skel_data)
canon = [str(n) for n in skel_data.get('canonical_names', [])]
for c, d in zip(canon, sg.depths):
canonical_depths[c].append((ds, int(d)))
for c, entries in canonical_depths.items():
depths = [d for _, d in entries]
if len(set(depths)) > 1 and max(depths) - min(depths) > 2:
sources = [(ds, d) for ds, d in entries]
log_warn('cross-dataset', f'Canonical "{c}" has depth variance: {sources}')
# ===== Write Report =====
report_path = RESULT_DIR / 'skeleton_audit_report.md'
with open(report_path, 'w') as f:
f.write('# Skeleton Audit Report\n\n')
f.write(f'**Date**: 2026-03-18\n')
f.write(f'**Datasets**: {len(human_datasets)} human + {len(list(skel_dir.glob("*.npz")))} zoo species\n\n')
# Summary table
f.write('## Summary\n\n')
f.write(f'| Dataset | J | L | R | C | Sym | Depth | Leaves | Canon | Dupes |\n')
f.write(f'|---------|:-:|:-:|:-:|:-:|:---:|:-----:|:------:|:-----:|:-----:|\n')
for r in all_results:
f.write(f'| {r["dataset"]:20s} | {r["num_joints"]:3d} | {r["n_left"]:2d} | {r["n_right"]:2d} | '
f'{r["n_center"]:2d} | {r["n_sym"]:2d} | {r["max_depth"]:2d} | {r["leaf_count"]:2d} | '
f'{r["n_canonical"]:3d} | {r["n_dupe_canonical"]:2d} |\n')
# Issues
f.write(f'\n## Errors ({len(ERRORS)})\n\n')
if ERRORS:
for e in ERRORS:
f.write(f'- {e}\n')
else:
f.write('None.\n')
f.write(f'\n## Warnings ({len(WARNINGS)})\n\n')
if WARNINGS:
for w in WARNINGS:
f.write(f'- {w}\n')
else:
f.write('None.\n')
f.write(f'\n## Info ({len(INFO)})\n\n')
if INFO:
for i in INFO:
f.write(f'- {i}\n')
else:
f.write('None.\n')
print(f'Audit complete: {len(ERRORS)} errors, {len(WARNINGS)} warnings, {len(INFO)} info')
print(f'Report: {report_path}')
# Print to console too
if ERRORS:
print(f'\n=== ERRORS ({len(ERRORS)}) ===')
for e in ERRORS:
print(f' {e}')
if WARNINGS:
print(f'\n=== WARNINGS ({len(WARNINGS)}) ===')
for w in WARNINGS[:30]:
print(f' {w}')
if len(WARNINGS) > 30:
print(f' ... and {len(WARNINGS) - 30} more')
if __name__ == '__main__':
main()