| """ |
| Full skeleton audit: naming + structural features for all 79 skeletons. |
| |
| Checks: |
| 1. Canonical name quality (empty, duplicate, semantic sense) |
| 2. Side tag correctness (L/R balance, missed sides) |
| 3. Symmetry pair validity (matched pairs have similar bone lengths/depths) |
| 4. Topology structure (tree depth, branching factor, connected) |
| 5. Geodesic distance matrix sanity |
| 6. Rest pose geometry (bone lengths, height, spread) |
| 7. Cross-dataset consistency for shared canonical names |
| """ |
|
|
| import sys |
| import os |
| from pathlib import Path |
| import numpy as np |
| from collections import Counter, defaultdict |
|
|
| project_root = Path(__file__).parent.parent |
| sys.path.insert(0, str(project_root)) |
|
|
| from src.data.skeleton_graph import SkeletonGraph |
|
|
| RESULT_DIR = project_root / 'logs' / 'data_fix_20260318' |
|
|
| |
| ERRORS = [] |
| WARNINGS = [] |
| INFO = [] |
|
|
|
|
| def log_error(dataset, msg): |
| ERRORS.append(f'[ERROR] {dataset}: {msg}') |
|
|
| def log_warn(dataset, msg): |
| WARNINGS.append(f'[WARN] {dataset}: {msg}') |
|
|
| def log_info(dataset, msg): |
| INFO.append(f'[INFO] {dataset}: {msg}') |
|
|
|
|
| def audit_skeleton(dataset_id: str, skel_data: dict, motion_sample: dict = None): |
| """Full audit of one skeleton.""" |
| sg = SkeletonGraph.from_dict(skel_data) |
| J = sg.num_joints |
| canon = [str(n) for n in skel_data.get('canonical_names', [])] |
| raw = [str(n) for n in skel_data['joint_names']] |
|
|
| results = {'dataset': dataset_id, 'num_joints': J, 'issues': []} |
|
|
| |
| if not canon: |
| log_error(dataset_id, 'No canonical_names field in skeleton.npz') |
| results['issues'].append('no_canonical') |
| else: |
| |
| empty = [i for i, c in enumerate(canon) if not c.strip()] |
| if empty: |
| log_error(dataset_id, f'{len(empty)} empty canonical names at indices {empty[:5]}') |
|
|
| |
| dupes = {c: [i for i, x in enumerate(canon) if x == c] for c in set(canon)} |
| dupes = {c: idxs for c, idxs in dupes.items() if len(idxs) > 1} |
| if dupes: |
| for c, idxs in list(dupes.items())[:3]: |
| raw_names = [raw[i] for i in idxs] |
| log_warn(dataset_id, f'Duplicate canonical "{c}" for raw: {raw_names}') |
|
|
| |
| bad_names = [c for c in canon if len(c) <= 2 and not c.isalpha()] |
| if bad_names: |
| log_warn(dataset_id, f'Very short canonical names: {bad_names[:5]}') |
|
|
| |
| prefix_residue = [c for c in canon if any(p in c.lower() for p in ['bip01', 'bn_', 'jt_', 'mixamorig', 'npc_'])] |
| if prefix_residue: |
| log_warn(dataset_id, f'Prefix residue in canonical: {prefix_residue[:5]}') |
|
|
| |
| n_left = sum(1 for t in sg.side_tags if t == 'left') |
| n_right = sum(1 for t in sg.side_tags if t == 'right') |
| n_center = sum(1 for t in sg.side_tags if t == 'center') |
|
|
| if n_left != n_right and J > 5: |
| log_warn(dataset_id, f'L/R imbalance: L={n_left} R={n_right} C={n_center}') |
|
|
| |
| if canon: |
| for i, (c, t) in enumerate(zip(canon, sg.side_tags)): |
| if 'left' in c and t != 'left': |
| log_warn(dataset_id, f'Joint {i} "{raw[i]}" canonical="{c}" but side_tag="{t}"') |
| elif 'right' in c and t != 'right': |
| log_warn(dataset_id, f'Joint {i} "{raw[i]}" canonical="{c}" but side_tag="{t}"') |
|
|
| |
| for i, j in sg.symmetry_pairs: |
| |
| bl_i = sg.bone_lengths[i] |
| bl_j = sg.bone_lengths[j] |
| if bl_i > 0.01 and bl_j > 0.01: |
| ratio = max(bl_i, bl_j) / min(bl_i, bl_j) |
| if ratio > 1.5: |
| log_warn(dataset_id, f'Sym pair ({raw[i]}, {raw[j]}) bone length mismatch: {bl_i:.3f} vs {bl_j:.3f}') |
|
|
| |
| if sg.depths[i] != sg.depths[j]: |
| log_warn(dataset_id, f'Sym pair ({raw[i]}, {raw[j]}) depth mismatch: {sg.depths[i]} vs {sg.depths[j]}') |
|
|
| |
| max_depth = sg.depths.max() |
| max_degree = sg.degrees.max() |
| leaf_count = sum(1 for d in sg.degrees if d == 0) |
| root_count = sum(1 for p in sg.parent_indices if p == -1) |
|
|
| if root_count != 1: |
| log_error(dataset_id, f'Expected 1 root, found {root_count}') |
|
|
| if max_depth > 20: |
| log_warn(dataset_id, f'Very deep tree: max_depth={max_depth}') |
|
|
| |
| reachable = set() |
| queue = [i for i, p in enumerate(sg.parent_indices) if p == -1] |
| while queue: |
| curr = queue.pop(0) |
| reachable.add(curr) |
| for j in range(J): |
| if sg.parent_indices[j] == curr and j not in reachable: |
| queue.append(j) |
| if len(reachable) != J: |
| log_error(dataset_id, f'Disconnected: only {len(reachable)}/{J} joints reachable from root') |
|
|
| |
| geo = sg.geodesic_dist |
| if geo.shape != (J, J): |
| log_error(dataset_id, f'Geodesic distance shape mismatch: {geo.shape} vs ({J},{J})') |
| else: |
| |
| asym = np.abs(geo - geo.T).max() |
| if asym > 0.01: |
| log_error(dataset_id, f'Geodesic distance not symmetric: max asymmetry={asym}') |
|
|
| |
| diag_max = np.diag(geo).max() |
| if diag_max > 0.01: |
| log_error(dataset_id, f'Geodesic distance diagonal non-zero: max={diag_max}') |
|
|
| |
| max_geo = geo.max() |
| if max_geo > J: |
| log_warn(dataset_id, f'Geodesic max={max_geo} exceeds num_joints={J}') |
|
|
| |
| unreachable = (geo >= J + 1).sum() |
| if unreachable > 0: |
| log_error(dataset_id, f'{unreachable} unreachable joint pairs in geodesic matrix') |
|
|
| |
| offsets = sg.rest_offsets |
| bone_lengths = sg.bone_lengths |
|
|
| |
| zero_bones = sum(1 for i, bl in enumerate(bone_lengths) if bl < 1e-6 and sg.parent_indices[i] >= 0) |
| if zero_bones > 0: |
| log_info(dataset_id, f'{zero_bones} zero-length bones') |
|
|
| |
| nonzero_bl = bone_lengths[bone_lengths > 1e-6] |
| if len(nonzero_bl) > 0: |
| median_bl = np.median(nonzero_bl) |
| long_bones = [(raw[i], bl) for i, bl in enumerate(bone_lengths) |
| if bl > 10 * median_bl and sg.parent_indices[i] >= 0] |
| if long_bones: |
| log_warn(dataset_id, f'Unusually long bones (>10x median={median_bl:.4f}): {long_bones[:3]}') |
|
|
| |
| if motion_sample is not None: |
| T = int(motion_sample['num_frames']) |
| jp = motion_sample['joint_positions'][:T] |
| lp = motion_sample['local_positions'][:T] |
| vel = motion_sample['velocities'][:T] |
|
|
| |
| if jp.shape[1] != J: |
| log_error(dataset_id, f'Motion joints={jp.shape[1]} != skeleton joints={J}') |
|
|
| |
| for key in ['joint_positions', 'local_positions', 'velocities']: |
| arr = motion_sample[key][:T] |
| if np.any(np.isnan(arr)): |
| log_error(dataset_id, f'NaN in motion {key}') |
| if np.any(np.isinf(arr)): |
| log_error(dataset_id, f'Inf in motion {key}') |
|
|
| |
| height = jp[0, :, 1].max() - jp[0, :, 1].min() |
| if height < 0.05: |
| log_warn(dataset_id, f'Very small body height: {height:.4f}m') |
| elif height > 10: |
| log_warn(dataset_id, f'Very large body height: {height:.2f}m (scale issue?)') |
|
|
| |
| vel_max = np.linalg.norm(vel, axis=-1).max() |
| if vel_max > 50: |
| log_warn(dataset_id, f'Very high velocity: max={vel_max:.1f} m/s') |
|
|
| |
| results['n_left'] = n_left |
| results['n_right'] = n_right |
| results['n_center'] = n_center |
| results['n_sym'] = len(sg.symmetry_pairs) |
| results['max_depth'] = int(max_depth) |
| results['max_degree'] = int(max_degree) |
| results['leaf_count'] = leaf_count |
| results['n_canonical'] = len(canon) |
| results['n_dupe_canonical'] = sum(len(v) - 1 for v in dupes.values()) if canon and dupes else 0 |
|
|
| return results |
|
|
|
|
| def main(): |
| all_results = [] |
|
|
| |
| human_datasets = ['humanml3d', 'lafan1', '100style', 'bandai_namco', 'cmu_mocap', 'mixamo'] |
| for ds in human_datasets: |
| ds_path = project_root / 'data' / 'processed' / ds |
| skel_data = dict(np.load(ds_path / 'skeleton.npz', allow_pickle=True)) |
|
|
| |
| motions_dir = ds_path / 'motions' |
| files = sorted(os.listdir(motions_dir)) |
| motion_sample = dict(np.load(motions_dir / files[0], allow_pickle=True)) if files else None |
|
|
| r = audit_skeleton(ds, skel_data, motion_sample) |
| all_results.append(r) |
|
|
| |
| zoo_path = project_root / 'data' / 'processed' / 'truebones_zoo' |
| skel_dir = zoo_path / 'skeletons' |
| motions_dir = zoo_path / 'motions' |
|
|
| |
| species_motions = {} |
| for f in sorted(os.listdir(motions_dir))[:200]: |
| d = dict(np.load(motions_dir / f, allow_pickle=True)) |
| sp = str(d.get('species', '')) |
| if sp and sp not in species_motions: |
| species_motions[sp] = d |
|
|
| for skel_file in sorted(skel_dir.glob('*.npz')): |
| species = skel_file.stem |
| skel_data = dict(np.load(skel_file, allow_pickle=True)) |
| motion_sample = species_motions.get(species) |
| r = audit_skeleton(f'zoo/{species}', skel_data, motion_sample) |
| all_results.append(r) |
|
|
| |
| |
| canonical_depths = defaultdict(list) |
| for ds in human_datasets: |
| skel_data = dict(np.load(project_root / 'data' / 'processed' / ds / 'skeleton.npz', allow_pickle=True)) |
| sg = SkeletonGraph.from_dict(skel_data) |
| canon = [str(n) for n in skel_data.get('canonical_names', [])] |
| for c, d in zip(canon, sg.depths): |
| canonical_depths[c].append((ds, int(d))) |
|
|
| for c, entries in canonical_depths.items(): |
| depths = [d for _, d in entries] |
| if len(set(depths)) > 1 and max(depths) - min(depths) > 2: |
| sources = [(ds, d) for ds, d in entries] |
| log_warn('cross-dataset', f'Canonical "{c}" has depth variance: {sources}') |
|
|
| |
| report_path = RESULT_DIR / 'skeleton_audit_report.md' |
| with open(report_path, 'w') as f: |
| f.write('# Skeleton Audit Report\n\n') |
| f.write(f'**Date**: 2026-03-18\n') |
| f.write(f'**Datasets**: {len(human_datasets)} human + {len(list(skel_dir.glob("*.npz")))} zoo species\n\n') |
|
|
| |
| f.write('## Summary\n\n') |
| f.write(f'| Dataset | J | L | R | C | Sym | Depth | Leaves | Canon | Dupes |\n') |
| f.write(f'|---------|:-:|:-:|:-:|:-:|:---:|:-----:|:------:|:-----:|:-----:|\n') |
| for r in all_results: |
| f.write(f'| {r["dataset"]:20s} | {r["num_joints"]:3d} | {r["n_left"]:2d} | {r["n_right"]:2d} | ' |
| f'{r["n_center"]:2d} | {r["n_sym"]:2d} | {r["max_depth"]:2d} | {r["leaf_count"]:2d} | ' |
| f'{r["n_canonical"]:3d} | {r["n_dupe_canonical"]:2d} |\n') |
|
|
| |
| f.write(f'\n## Errors ({len(ERRORS)})\n\n') |
| if ERRORS: |
| for e in ERRORS: |
| f.write(f'- {e}\n') |
| else: |
| f.write('None.\n') |
|
|
| f.write(f'\n## Warnings ({len(WARNINGS)})\n\n') |
| if WARNINGS: |
| for w in WARNINGS: |
| f.write(f'- {w}\n') |
| else: |
| f.write('None.\n') |
|
|
| f.write(f'\n## Info ({len(INFO)})\n\n') |
| if INFO: |
| for i in INFO: |
| f.write(f'- {i}\n') |
| else: |
| f.write('None.\n') |
|
|
| print(f'Audit complete: {len(ERRORS)} errors, {len(WARNINGS)} warnings, {len(INFO)} info') |
| print(f'Report: {report_path}') |
|
|
| |
| if ERRORS: |
| print(f'\n=== ERRORS ({len(ERRORS)}) ===') |
| for e in ERRORS: |
| print(f' {e}') |
| if WARNINGS: |
| print(f'\n=== WARNINGS ({len(WARNINGS)}) ===') |
| for w in WARNINGS[:30]: |
| print(f' {w}') |
| if len(WARNINGS) > 30: |
| print(f' ... and {len(WARNINGS) - 30} more') |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|