Tevior commited on
Commit
6e46f91
·
verified ·
1 Parent(s): c3b2920

Upload scripts/preprocess_truebones_zoo.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/preprocess_truebones_zoo.py +211 -0
scripts/preprocess_truebones_zoo.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Preprocess Truebones Zoo: per-species BVH → unified Scheme C + text captions.
3
+
4
+ Each species has its own skeleton topology. We process them all into one
5
+ unified dataset with per-motion skeleton_id = species name.
6
+ """
7
+
8
+ import sys
9
+ import json
10
+ import argparse
11
+ from pathlib import Path
12
+ import numpy as np
13
+ from tqdm import tqdm
14
+
15
+ project_root = Path(__file__).parent.parent
16
+ sys.path.insert(0, str(project_root))
17
+
18
+ from scripts.preprocess_bvh import process_bvh_file
19
+ from src.data.skeleton_graph import SkeletonGraph
20
+
21
+
22
+ def load_captions(captions_dir: Path) -> dict:
23
+ """Load all captions into a dict: file_id → list of caption strings."""
24
+ captions = {}
25
+ for species_dir in sorted(captions_dir.iterdir()):
26
+ if not species_dir.is_dir():
27
+ continue
28
+ for json_file in species_dir.glob('*.json'):
29
+ try:
30
+ with open(json_file) as f:
31
+ data = json.load(f)
32
+ file_id = data.get('file_id', '')
33
+ # Extract short captions (most concise)
34
+ caps = data.get('captions', {})
35
+ short_caps = caps.get('short', {}).get('original', [])
36
+ if short_caps:
37
+ # Use file_id without extension as key
38
+ key = file_id.replace('.fbx', '').replace('.bvh', '')
39
+ captions[key] = short_caps
40
+ except Exception:
41
+ continue
42
+ return captions
43
+
44
+
45
+ def main():
46
+ parser = argparse.ArgumentParser()
47
+ parser.add_argument('--zoo_dir', type=str,
48
+ default='data/raw/Truebones_Zoo/New-FBX-BVH_Z-OO/Truebone_Z-OO')
49
+ parser.add_argument('--captions_dir', type=str,
50
+ default='data/raw/Truebones_Zoo_captions/captions')
51
+ parser.add_argument('--output_dir', type=str,
52
+ default='data/processed/truebones_zoo')
53
+ parser.add_argument('--target_fps', type=float, default=20.0)
54
+ parser.add_argument('--max_frames', type=int, default=196)
55
+ parser.add_argument('--min_frames', type=int, default=16) # shorter for animals
56
+ args = parser.parse_args()
57
+
58
+ zoo_dir = Path(args.zoo_dir)
59
+ output_dir = Path(args.output_dir)
60
+ (output_dir / 'motions').mkdir(parents=True, exist_ok=True)
61
+ (output_dir / 'splits').mkdir(parents=True, exist_ok=True)
62
+ (output_dir / 'skeletons').mkdir(parents=True, exist_ok=True)
63
+
64
+ # Load captions
65
+ captions_dir = Path(args.captions_dir)
66
+ if captions_dir.exists():
67
+ captions = load_captions(captions_dir)
68
+ print(f"Loaded captions for {len(captions)} motions")
69
+ else:
70
+ captions = {}
71
+ print("No captions directory found")
72
+
73
+ # Find all species
74
+ species_dirs = sorted([d for d in zoo_dir.iterdir() if d.is_dir()])
75
+ print(f"Found {len(species_dirs)} species")
76
+
77
+ motion_ids = []
78
+ skeletons = {} # species_name → skeleton info
79
+ all_local_pos = []
80
+ all_velocities = []
81
+ processed = 0
82
+ failed = 0
83
+
84
+ for species_dir in tqdm(species_dirs, desc="Species"):
85
+ species_name = species_dir.name
86
+ bvh_files = sorted(species_dir.glob('*.bvh'))
87
+ if not bvh_files:
88
+ continue
89
+
90
+ species_skeleton = None
91
+
92
+ for bvh_path in bvh_files:
93
+ result = process_bvh_file(
94
+ bvh_path, args.target_fps, args.max_frames, args.min_frames,
95
+ do_remove_end_sites=True,
96
+ )
97
+ if result is None:
98
+ failed += 1
99
+ continue
100
+
101
+ skeleton = result['skeleton']
102
+ data = result['data']
103
+
104
+ # Store skeleton per species (first one)
105
+ if species_skeleton is None:
106
+ species_skeleton = skeleton
107
+ skeletons[species_name] = {
108
+ 'num_joints': skeleton.num_joints,
109
+ 'joint_names': skeleton.joint_names,
110
+ }
111
+ np.savez(
112
+ output_dir / 'skeletons' / f'{species_name}.npz',
113
+ **skeleton.to_dict(),
114
+ )
115
+
116
+ # Match captions
117
+ # Try: "Species/filename" format
118
+ bvh_stem = bvh_path.stem.lstrip('_') # remove leading underscores
119
+ caption_keys = [
120
+ f"{species_name}/{bvh_stem}",
121
+ f"{species_name}/{species_name}-{bvh_stem}",
122
+ f"{species_name}/{species_name}_{bvh_stem}",
123
+ ]
124
+ text_list = []
125
+ for ck in caption_keys:
126
+ if ck in captions:
127
+ text_list = captions[ck]
128
+ break
129
+
130
+ # Also try fuzzy match
131
+ if not text_list:
132
+ for ck, caps in captions.items():
133
+ if species_name.lower() in ck.lower() and bvh_stem.lower().replace('_', '-') in ck.lower().replace('_', '-'):
134
+ text_list = caps
135
+ break
136
+
137
+ motion_id = f"{species_name}_{processed:04d}"
138
+ data['skeleton_id'] = species_name
139
+ data['texts'] = '|||'.join(text_list[:5]) if text_list else ''
140
+ data['species'] = species_name
141
+ data['source_file'] = bvh_path.name
142
+
143
+ np.savez_compressed(
144
+ output_dir / 'motions' / f'{motion_id}.npz',
145
+ **data,
146
+ )
147
+ motion_ids.append(motion_id)
148
+
149
+ if processed % 5 == 0:
150
+ all_local_pos.append(data['local_positions'])
151
+ all_velocities.append(data['velocities'])
152
+
153
+ processed += 1
154
+
155
+ print(f"\nProcessed: {processed}, Failed: {failed}")
156
+ print(f"Species with skeletons: {len(skeletons)}")
157
+
158
+ if not motion_ids:
159
+ print("No motions processed!")
160
+ return
161
+
162
+ # Save a "representative" skeleton (the first one, for compatibility)
163
+ first_species = list(skeletons.keys())[0]
164
+ first_skel = dict(np.load(output_dir / 'skeletons' / f'{first_species}.npz', allow_pickle=True))
165
+ np.savez(output_dir / 'skeleton.npz', **first_skel)
166
+
167
+ # Stats — flatten across variable joint counts
168
+ all_pos_flat = np.concatenate([p.reshape(-1, 3) for p in all_local_pos], axis=0)
169
+ all_vel_flat = np.concatenate([v.reshape(-1, 3) for v in all_velocities], axis=0)
170
+ stats = {
171
+ 'local_pos_mean': np.zeros((1, 3), dtype=np.float32),
172
+ 'local_pos_std': all_pos_flat.std(axis=0, keepdims=True).astype(np.float32) + 1e-8,
173
+ 'velocity_mean': np.zeros((1, 3), dtype=np.float32),
174
+ 'velocity_std': all_vel_flat.std(axis=0, keepdims=True).astype(np.float32) + 1e-8,
175
+ 'root_vel_mean': np.zeros(3, dtype=np.float32),
176
+ 'root_vel_std': np.ones(3, dtype=np.float32),
177
+ }
178
+ np.savez(output_dir / 'stats.npz', **stats)
179
+
180
+ # Splits
181
+ np.random.seed(42)
182
+ indices = np.random.permutation(len(motion_ids))
183
+ n_train = int(0.8 * len(indices))
184
+ n_val = int(0.1 * len(indices))
185
+ splits = {
186
+ 'train': [motion_ids[i] for i in indices[:n_train]],
187
+ 'val': [motion_ids[i] for i in indices[n_train:n_train + n_val]],
188
+ 'test': [motion_ids[i] for i in indices[n_train + n_val:]],
189
+ 'all': motion_ids,
190
+ }
191
+ for split_name, ids in splits.items():
192
+ with open(output_dir / 'splits' / f'{split_name}.txt', 'w') as f:
193
+ for mid in ids:
194
+ f.write(f'{mid}\n')
195
+ print(f" {split_name}: {len(ids)} motions")
196
+
197
+ # Summary
198
+ print(f"\nSkeleton diversity:")
199
+ for sp, info in sorted(skeletons.items()):
200
+ print(f" {sp:15s}: {info['num_joints']:3d} joints")
201
+
202
+ # Count text matches
203
+ text_count = sum(1 for mid in motion_ids
204
+ if (output_dir / 'motions' / f'{mid}.npz').exists()
205
+ and str(np.load(output_dir / 'motions' / f'{mid}.npz', allow_pickle=True).get('texts', '')) != '')
206
+ print(f"\nMotions with text: {text_count}/{len(motion_ids)}")
207
+ print(f"\nDone! Output: {output_dir}")
208
+
209
+
210
+ if __name__ == '__main__':
211
+ main()