| """ |
| Preprocess Truebones Zoo: per-species BVH → unified Scheme C + text captions. |
| |
| Each species has its own skeleton topology. We process them all into one |
| unified dataset with per-motion skeleton_id = species name. |
| """ |
|
|
| import sys |
| import json |
| import argparse |
| from pathlib import Path |
| import numpy as np |
| from tqdm import tqdm |
|
|
| project_root = Path(__file__).parent.parent |
| sys.path.insert(0, str(project_root)) |
|
|
| from scripts.preprocess_bvh import process_bvh_file |
| from src.data.skeleton_graph import SkeletonGraph |
|
|
|
|
| def load_captions(captions_dir: Path) -> dict: |
| """Load all captions into a dict: file_id → list of caption strings.""" |
| captions = {} |
| for species_dir in sorted(captions_dir.iterdir()): |
| if not species_dir.is_dir(): |
| continue |
| for json_file in species_dir.glob('*.json'): |
| try: |
| with open(json_file) as f: |
| data = json.load(f) |
| file_id = data.get('file_id', '') |
| |
| caps = data.get('captions', {}) |
| short_caps = caps.get('short', {}).get('original', []) |
| if short_caps: |
| |
| key = file_id.replace('.fbx', '').replace('.bvh', '') |
| captions[key] = short_caps |
| except Exception: |
| continue |
| return captions |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument('--zoo_dir', type=str, |
| default='data/raw/Truebones_Zoo/New-FBX-BVH_Z-OO/Truebone_Z-OO') |
| parser.add_argument('--captions_dir', type=str, |
| default='data/raw/Truebones_Zoo_captions/captions') |
| parser.add_argument('--output_dir', type=str, |
| default='data/processed/truebones_zoo') |
| parser.add_argument('--target_fps', type=float, default=20.0) |
| parser.add_argument('--max_frames', type=int, default=196) |
| parser.add_argument('--min_frames', type=int, default=16) |
| args = parser.parse_args() |
|
|
| zoo_dir = Path(args.zoo_dir) |
| output_dir = Path(args.output_dir) |
| (output_dir / 'motions').mkdir(parents=True, exist_ok=True) |
| (output_dir / 'splits').mkdir(parents=True, exist_ok=True) |
| (output_dir / 'skeletons').mkdir(parents=True, exist_ok=True) |
|
|
| |
| captions_dir = Path(args.captions_dir) |
| if captions_dir.exists(): |
| captions = load_captions(captions_dir) |
| print(f"Loaded captions for {len(captions)} motions") |
| else: |
| captions = {} |
| print("No captions directory found") |
|
|
| |
| species_dirs = sorted([d for d in zoo_dir.iterdir() if d.is_dir()]) |
| print(f"Found {len(species_dirs)} species") |
|
|
| motion_ids = [] |
| skeletons = {} |
| all_local_pos = [] |
| all_velocities = [] |
| processed = 0 |
| failed = 0 |
|
|
| for species_dir in tqdm(species_dirs, desc="Species"): |
| species_name = species_dir.name |
| bvh_files = sorted(species_dir.glob('*.bvh')) |
| if not bvh_files: |
| continue |
|
|
| species_skeleton = None |
|
|
| for bvh_path in bvh_files: |
| result = process_bvh_file( |
| bvh_path, args.target_fps, args.max_frames, args.min_frames, |
| do_remove_end_sites=True, |
| ) |
| if result is None: |
| failed += 1 |
| continue |
|
|
| skeleton = result['skeleton'] |
| data = result['data'] |
|
|
| |
| if species_skeleton is None: |
| species_skeleton = skeleton |
| skeletons[species_name] = { |
| 'num_joints': skeleton.num_joints, |
| 'joint_names': skeleton.joint_names, |
| } |
| np.savez( |
| output_dir / 'skeletons' / f'{species_name}.npz', |
| **skeleton.to_dict(), |
| ) |
|
|
| |
| |
| bvh_stem = bvh_path.stem.lstrip('_') |
| caption_keys = [ |
| f"{species_name}/{bvh_stem}", |
| f"{species_name}/{species_name}-{bvh_stem}", |
| f"{species_name}/{species_name}_{bvh_stem}", |
| ] |
| text_list = [] |
| for ck in caption_keys: |
| if ck in captions: |
| text_list = captions[ck] |
| break |
|
|
| |
| if not text_list: |
| for ck, caps in captions.items(): |
| if species_name.lower() in ck.lower() and bvh_stem.lower().replace('_', '-') in ck.lower().replace('_', '-'): |
| text_list = caps |
| break |
|
|
| motion_id = f"{species_name}_{processed:04d}" |
| data['skeleton_id'] = species_name |
| data['texts'] = '|||'.join(text_list[:5]) if text_list else '' |
| data['species'] = species_name |
| data['source_file'] = bvh_path.name |
|
|
| np.savez_compressed( |
| output_dir / 'motions' / f'{motion_id}.npz', |
| **data, |
| ) |
| motion_ids.append(motion_id) |
|
|
| if processed % 5 == 0: |
| all_local_pos.append(data['local_positions']) |
| all_velocities.append(data['velocities']) |
|
|
| processed += 1 |
|
|
| print(f"\nProcessed: {processed}, Failed: {failed}") |
| print(f"Species with skeletons: {len(skeletons)}") |
|
|
| if not motion_ids: |
| print("No motions processed!") |
| return |
|
|
| |
| first_species = list(skeletons.keys())[0] |
| first_skel = dict(np.load(output_dir / 'skeletons' / f'{first_species}.npz', allow_pickle=True)) |
| np.savez(output_dir / 'skeleton.npz', **first_skel) |
|
|
| |
| all_pos_flat = np.concatenate([p.reshape(-1, 3) for p in all_local_pos], axis=0) |
| all_vel_flat = np.concatenate([v.reshape(-1, 3) for v in all_velocities], axis=0) |
| stats = { |
| 'local_pos_mean': np.zeros((1, 3), dtype=np.float32), |
| 'local_pos_std': all_pos_flat.std(axis=0, keepdims=True).astype(np.float32) + 1e-8, |
| 'velocity_mean': np.zeros((1, 3), dtype=np.float32), |
| 'velocity_std': all_vel_flat.std(axis=0, keepdims=True).astype(np.float32) + 1e-8, |
| 'root_vel_mean': np.zeros(3, dtype=np.float32), |
| 'root_vel_std': np.ones(3, dtype=np.float32), |
| } |
| np.savez(output_dir / 'stats.npz', **stats) |
|
|
| |
| np.random.seed(42) |
| indices = np.random.permutation(len(motion_ids)) |
| n_train = int(0.8 * len(indices)) |
| n_val = int(0.1 * len(indices)) |
| splits = { |
| 'train': [motion_ids[i] for i in indices[:n_train]], |
| 'val': [motion_ids[i] for i in indices[n_train:n_train + n_val]], |
| 'test': [motion_ids[i] for i in indices[n_train + n_val:]], |
| 'all': motion_ids, |
| } |
| for split_name, ids in splits.items(): |
| with open(output_dir / 'splits' / f'{split_name}.txt', 'w') as f: |
| for mid in ids: |
| f.write(f'{mid}\n') |
| print(f" {split_name}: {len(ids)} motions") |
|
|
| |
| print(f"\nSkeleton diversity:") |
| for sp, info in sorted(skeletons.items()): |
| print(f" {sp:15s}: {info['num_joints']:3d} joints") |
|
|
| |
| text_count = sum(1 for mid in motion_ids |
| if (output_dir / 'motions' / f'{mid}.npz').exists() |
| and str(np.load(output_dir / 'motions' / f'{mid}.npz', allow_pickle=True).get('texts', '')) != '') |
| print(f"\nMotions with text: {text_count}/{len(motion_ids)}") |
| print(f"\nDone! Output: {output_dir}") |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|