Tevior commited on
Commit
4d2eebd
·
verified ·
1 Parent(s): 3a78f46

Upload src/data/unified_dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/data/unified_dataset.py +276 -0
src/data/unified_dataset.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified multi-skeleton motion dataset for TopoSlots (Scheme C).
3
+
4
+ Motion representation:
5
+ Slot token input : per-joint [local_pos(3) + velocity(3)] = 6D (cross-skeleton)
6
+ Decoder GT : per-joint local_rotations_6d (skeleton-specific, FK supervision)
7
+ Root track : root_position(3) + root_velocity(3) (separate)
8
+ Auxiliary : foot_contact(4), bone_lengths, accelerations (losses)
9
+ """
10
+
11
+ import numpy as np
12
+ import torch
13
+ from torch.utils.data import Dataset
14
+ from pathlib import Path
15
+ from typing import Optional
16
+
17
+
18
+ class UnifiedMotionDataset(Dataset):
19
+ """
20
+ Multi-skeleton motion dataset with unified format.
21
+
22
+ Each sample returns:
23
+ - motion_features: [T, J, D] padded to max_joints
24
+ - skeleton_features: [J, D_skel] padded to max_joints
25
+ - joint_mask: [J] boolean mask (True = valid joint)
26
+ - adjacency: [J, J] padded adjacency matrix
27
+ - geodesic_dist: [J, J] padded geodesic distances
28
+ - text: str (empty if unavailable)
29
+ - metadata: dict
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ data_dirs: list[str | Path],
35
+ split: str = 'train',
36
+ max_joints: int = 128,
37
+ max_frames: int = 196,
38
+ target_fps: float = 20.0,
39
+ motion_dim: int = 6, # local_pos (3) + velocity (3)
40
+ ):
41
+ self.data_dirs = [Path(d) for d in data_dirs]
42
+ self.split = split
43
+ self.max_joints = max_joints
44
+ self.max_frames = max_frames
45
+ self.target_fps = target_fps
46
+ self.motion_dim = motion_dim
47
+
48
+ # Load all samples
49
+ self.samples = []
50
+ self.skeletons = {} # skeleton_id -> skeleton data
51
+ self.stats = {} # skeleton_id -> normalization stats
52
+
53
+ for data_dir in self.data_dirs:
54
+ self._load_data_source(data_dir)
55
+
56
+ print(f"UnifiedMotionDataset [{split}]: {len(self.samples)} motions, "
57
+ f"{len(self.skeletons)} skeleton types")
58
+
59
+ def _load_data_source(self, data_dir: Path):
60
+ """Load one data source (e.g., processed/humanml3d)."""
61
+ if not data_dir.exists():
62
+ print(f" Warning: {data_dir} not found, skipping")
63
+ return
64
+
65
+ # Load skeleton
66
+ skel_path = data_dir / 'skeleton.npz'
67
+ if skel_path.exists():
68
+ skel_data = dict(np.load(skel_path, allow_pickle=True))
69
+ skeleton_id = data_dir.name
70
+ self.skeletons[skeleton_id] = skel_data
71
+
72
+ # Load stats
73
+ stats_path = data_dir / 'stats.npz'
74
+ if stats_path.exists():
75
+ self.stats[data_dir.name] = dict(np.load(stats_path))
76
+
77
+ # Load split
78
+ split_path = data_dir / 'splits' / f'{self.split}.txt'
79
+ if not split_path.exists():
80
+ # Fall back to all.txt
81
+ split_path = data_dir / 'splits' / 'all.txt'
82
+ if not split_path.exists():
83
+ print(f" Warning: no split file for {data_dir.name}, skipping")
84
+ return
85
+
86
+ motion_ids = []
87
+ with open(split_path, 'r') as f:
88
+ for line in f:
89
+ line = line.strip()
90
+ if line:
91
+ motion_ids.append(line)
92
+
93
+ for mid in motion_ids:
94
+ motion_path = data_dir / 'motions' / f'{mid}.npz'
95
+ if motion_path.exists():
96
+ self.samples.append({
97
+ 'motion_path': str(motion_path),
98
+ 'motion_id': mid,
99
+ 'data_source': data_dir.name,
100
+ 'skeleton_id': data_dir.name,
101
+ })
102
+
103
+ def __len__(self) -> int:
104
+ return len(self.samples)
105
+
106
+ def __getitem__(self, idx: int) -> dict:
107
+ sample_info = self.samples[idx]
108
+
109
+ # Load motion data
110
+ data = dict(np.load(sample_info['motion_path'], allow_pickle=True))
111
+
112
+ # Get skeleton info
113
+ skeleton_id = sample_info['skeleton_id']
114
+ skel_data = self.skeletons.get(skeleton_id, {})
115
+
116
+ # Extract motion features
117
+ local_pos = data['local_positions'] # [T, J, 3]
118
+ velocities = data['velocities'] # [T, J, 3]
119
+ T, J, _ = local_pos.shape
120
+
121
+ # Normalize if stats available
122
+ if skeleton_id in self.stats:
123
+ stats = self.stats[skeleton_id]
124
+ local_pos = (local_pos - stats['local_pos_mean']) / stats['local_pos_std']
125
+ velocities = (velocities - stats['velocity_mean']) / stats['velocity_std']
126
+
127
+ # Concatenate motion features: [T, J, 6]
128
+ motion_features = np.concatenate([local_pos, velocities], axis=-1)
129
+
130
+ # Crop/pad temporal dimension
131
+ if T > self.max_frames:
132
+ # Random crop during training
133
+ if self.split == 'train':
134
+ start = np.random.randint(0, T - self.max_frames)
135
+ else:
136
+ start = 0
137
+ motion_features = motion_features[start:start + self.max_frames]
138
+ actual_frames = self.max_frames
139
+ else:
140
+ actual_frames = T
141
+ # Pad with zeros
142
+ pad = np.zeros(
143
+ (self.max_frames - T, J, self.motion_dim),
144
+ dtype=np.float32,
145
+ )
146
+ motion_features = np.concatenate([motion_features, pad], axis=0)
147
+
148
+ # Pad joint dimension
149
+ padded_motion = np.zeros(
150
+ (self.max_frames, self.max_joints, self.motion_dim),
151
+ dtype=np.float32,
152
+ )
153
+ padded_motion[:, :J, :] = motion_features
154
+
155
+ # Joint mask
156
+ joint_mask = np.zeros(self.max_joints, dtype=np.bool_)
157
+ joint_mask[:J] = True
158
+
159
+ # Frame mask
160
+ frame_mask = np.zeros(self.max_frames, dtype=np.bool_)
161
+ frame_mask[:actual_frames] = True
162
+
163
+ # Skeleton features
164
+ skeleton_features = np.zeros(
165
+ (self.max_joints, 9), dtype=np.float32
166
+ )
167
+ if 'joint_names' in skel_data:
168
+ from .skeleton_graph import SkeletonGraph
169
+ sg = SkeletonGraph.from_dict(skel_data)
170
+ skel_feats = sg.get_joint_features() # [J, 9]
171
+ skeleton_features[:J] = skel_feats
172
+
173
+ # Adjacency and geodesic distance matrices
174
+ adjacency = np.zeros(
175
+ (self.max_joints, self.max_joints), dtype=np.float32
176
+ )
177
+ geodesic_dist = np.zeros(
178
+ (self.max_joints, self.max_joints), dtype=np.float32
179
+ )
180
+ if 'adjacency' in skel_data:
181
+ adj = skel_data['adjacency']
182
+ adjacency[:J, :J] = adj
183
+ if 'geodesic_dist' in skel_data:
184
+ gdist = skel_data['geodesic_dist']
185
+ geodesic_dist[:J, :J] = gdist
186
+
187
+ # Text
188
+ text = ''
189
+ if 'texts' in data:
190
+ texts_str = str(data['texts'])
191
+ if texts_str:
192
+ text_list = texts_str.split('|||')
193
+ if text_list and text_list[0]:
194
+ # Random text during training
195
+ if self.split == 'train':
196
+ text = text_list[np.random.randint(len(text_list))]
197
+ else:
198
+ text = text_list[0]
199
+
200
+ # --- Root track (separate from slot tokens) ---
201
+ root_pos = data.get('root_position', np.zeros((T, 3), dtype=np.float32))
202
+ root_vel = data.get('root_velocity', np.zeros((T, 3), dtype=np.float32))
203
+ padded_root_pos = np.zeros((self.max_frames, 3), dtype=np.float32)
204
+ padded_root_vel = np.zeros((self.max_frames, 3), dtype=np.float32)
205
+ padded_root_pos[:actual_frames] = root_pos[:actual_frames]
206
+ padded_root_vel[:actual_frames] = root_vel[:actual_frames]
207
+
208
+ # --- Foot contact: [T, 4] (l_heel, l_toe, r_heel, r_toe) ---
209
+ fc_raw = data.get('foot_contact', np.zeros((T, 4), dtype=np.float32))
210
+ if fc_raw.shape[-1] == 2:
211
+ # Legacy 2-channel → duplicate into 4-channel
212
+ fc_4ch = np.zeros((fc_raw.shape[0], 4), dtype=np.float32)
213
+ fc_4ch[:, 0] = fc_4ch[:, 1] = fc_raw[:, 0]
214
+ fc_4ch[:, 2] = fc_4ch[:, 3] = fc_raw[:, 1]
215
+ fc_raw = fc_4ch
216
+ padded_contact = np.zeros((self.max_frames, 4), dtype=np.float32)
217
+ padded_contact[:actual_frames] = fc_raw[:actual_frames]
218
+
219
+ # --- Decoder GT: local rotations 6D (skeleton-specific, for FK supervision) ---
220
+ rot_6d = data.get('local_rotations_6d', None)
221
+ if rot_6d is not None:
222
+ # [T, J-1, 6] → pad to [T_max, J_max, 6]
223
+ Jr = rot_6d.shape[1] # J-1 (non-root)
224
+ padded_rot = np.zeros((self.max_frames, self.max_joints, 6), dtype=np.float32)
225
+ T_rot = min(rot_6d.shape[0], actual_frames)
226
+ padded_rot[:T_rot, :Jr, :] = rot_6d[:T_rot]
227
+ has_rotations = True
228
+ else:
229
+ padded_rot = np.zeros((self.max_frames, self.max_joints, 6), dtype=np.float32)
230
+ has_rotations = False
231
+
232
+ # --- Bone lengths [T, J] ---
233
+ bone_raw = data.get('bone_lengths', np.zeros((T, J), dtype=np.float32))
234
+ padded_bones = np.zeros((self.max_frames, self.max_joints), dtype=np.float32)
235
+ padded_bones[:actual_frames, :J] = bone_raw[:actual_frames]
236
+
237
+ return {
238
+ # Slot token input: per-joint [local_pos(3) + velocity(3)] = 6D
239
+ 'motion_features': torch.from_numpy(padded_motion), # [T, J_max, 6]
240
+ # Skeleton graph
241
+ 'skeleton_features': torch.from_numpy(skeleton_features), # [J_max, 9]
242
+ 'joint_mask': torch.from_numpy(joint_mask), # [J_max]
243
+ 'frame_mask': torch.from_numpy(frame_mask), # [T_max]
244
+ 'adjacency': torch.from_numpy(adjacency), # [J_max, J_max]
245
+ 'geodesic_dist': torch.from_numpy(geodesic_dist), # [J_max, J_max]
246
+ # Root track (separate)
247
+ 'root_position': torch.from_numpy(padded_root_pos), # [T_max, 3]
248
+ 'root_velocity': torch.from_numpy(padded_root_vel), # [T_max, 3]
249
+ # Decoder GT (skeleton-specific)
250
+ 'local_rotations_6d': torch.from_numpy(padded_rot), # [T_max, J_max, 6]
251
+ 'has_rotations': has_rotations,
252
+ # Auxiliary
253
+ 'foot_contact': torch.from_numpy(padded_contact), # [T_max, 4]
254
+ 'bone_lengths': torch.from_numpy(padded_bones), # [T_max, J_max]
255
+ # Metadata
256
+ 'text': text,
257
+ 'num_joints': J,
258
+ 'num_frames': actual_frames,
259
+ 'skeleton_id': skeleton_id,
260
+ 'motion_id': sample_info['motion_id'],
261
+ }
262
+
263
+
264
+ def collate_fn(batch: list[dict]) -> dict:
265
+ """Custom collate function for variable-length text."""
266
+ result = {}
267
+ for key in batch[0]:
268
+ if key == 'text':
269
+ result[key] = [b[key] for b in batch]
270
+ elif isinstance(batch[0][key], torch.Tensor):
271
+ result[key] = torch.stack([b[key] for b in batch])
272
+ elif isinstance(batch[0][key], (int, float)):
273
+ result[key] = torch.tensor([b[key] for b in batch])
274
+ else:
275
+ result[key] = [b[key] for b in batch]
276
+ return result