| """ |
| Convert HumanML3D data (SMPL-based .npy format) into our unified representation. |
| |
| HumanML3D stores motions as: |
| - new_joints/XXXXXX.npy: [T, 22, 3] joint positions (SMPL 22-joint skeleton) |
| - new_joint_vecs/XXXXXX.npy: [T, 263] rotation-invariant features |
| - texts/XXXXXX.txt: text descriptions (multiple per motion) |
| |
| We convert to: |
| - SkeletonGraph (fixed SMPL-22 topology) |
| - Motion dict with positions, velocities, and text annotations |
| """ |
|
|
| import numpy as np |
| from pathlib import Path |
| from typing import Optional |
|
|
| from .skeleton_graph import SkeletonGraph |
|
|
|
|
| |
| SMPL_22_JOINT_NAMES = [ |
| 'pelvis', |
| 'left_hip', |
| 'right_hip', |
| 'spine1', |
| 'left_knee', |
| 'right_knee', |
| 'spine2', |
| 'left_ankle', |
| 'right_ankle', |
| 'spine3', |
| 'left_foot', |
| 'right_foot', |
| 'neck', |
| 'left_collar', |
| 'right_collar', |
| 'head', |
| 'left_shoulder', |
| 'right_shoulder', |
| 'left_elbow', |
| 'right_elbow', |
| 'left_wrist', |
| 'right_wrist', |
| ] |
|
|
| SMPL_22_PARENTS = [ |
| -1, |
| 0, |
| 0, |
| 0, |
| 1, |
| 2, |
| 3, |
| 4, |
| 5, |
| 6, |
| 7, |
| 8, |
| 9, |
| 9, |
| 9, |
| 12, |
| 13, |
| 14, |
| 16, |
| 17, |
| 18, |
| 19, |
| ] |
|
|
|
|
| def get_smpl22_skeleton(rest_pose: Optional[np.ndarray] = None) -> SkeletonGraph: |
| """ |
| Get the SMPL 22-joint skeleton graph. |
| |
| Args: |
| rest_pose: [22, 3] rest-pose joint positions. If None, uses default T-pose offsets. |
| |
| Returns: |
| SkeletonGraph for SMPL-22. |
| """ |
| if rest_pose is None: |
| |
| rest_pose = np.array([ |
| [0.0, 0.0, 0.0], |
| [0.08, -0.05, 0.0], |
| [-0.08, -0.05, 0.0], |
| [0.0, 0.1, 0.0], |
| [0.0, -0.4, 0.0], |
| [0.0, -0.4, 0.0], |
| [0.0, 0.15, 0.0], |
| [0.0, -0.4, 0.0], |
| [0.0, -0.4, 0.0], |
| [0.0, 0.15, 0.0], |
| [0.0, -0.05, 0.1], |
| [0.0, -0.05, 0.1], |
| [0.0, 0.12, 0.0], |
| [0.05, 0.0, 0.0], |
| [-0.05, 0.0, 0.0], |
| [0.0, 0.12, 0.0], |
| [0.15, 0.0, 0.0], |
| [-0.15, 0.0, 0.0], |
| [0.25, 0.0, 0.0], |
| [-0.25, 0.0, 0.0], |
| [0.25, 0.0, 0.0], |
| [-0.25, 0.0, 0.0], |
| ], dtype=np.float32) |
|
|
| |
| offsets = np.zeros_like(rest_pose) |
| for j in range(len(SMPL_22_PARENTS)): |
| p = SMPL_22_PARENTS[j] |
| if p >= 0: |
| offsets[j] = rest_pose[j] - rest_pose[p] |
| else: |
| offsets[j] = rest_pose[j] |
|
|
| return SkeletonGraph( |
| joint_names=SMPL_22_JOINT_NAMES, |
| parent_indices=SMPL_22_PARENTS, |
| rest_offsets=offsets, |
| ) |
|
|
|
|
| def load_humanml3d_motion( |
| motion_id: str, |
| data_dir: str | Path, |
| ) -> dict: |
| """ |
| Load a single HumanML3D motion sample. |
| |
| Args: |
| motion_id: e.g., '000000' |
| data_dir: path to HumanML3D directory |
| |
| Returns: |
| dict with keys: |
| - 'joint_positions': [T, 22, 3] global joint positions |
| - 'joint_vecs': [T, 263] rotation-invariant features (if available) |
| - 'texts': list of text descriptions |
| - 'motion_id': str |
| """ |
| data_dir = Path(data_dir) |
|
|
| |
| joints_path = data_dir / 'new_joints' / f'{motion_id}.npy' |
| joint_positions = np.load(joints_path) |
|
|
| |
| vecs_path = data_dir / 'new_joint_vecs' / f'{motion_id}.npy' |
| joint_vecs = None |
| if vecs_path.exists(): |
| joint_vecs = np.load(vecs_path) |
|
|
| |
| text_path = data_dir / 'texts' / f'{motion_id}.txt' |
| texts = [] |
| if text_path.exists(): |
| with open(text_path, 'r') as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| |
| parts = line.split('#') |
| if parts: |
| texts.append(parts[0].strip()) |
|
|
| return { |
| 'joint_positions': joint_positions.astype(np.float32), |
| 'joint_vecs': joint_vecs, |
| 'texts': texts, |
| 'motion_id': motion_id, |
| } |
|
|
|
|
| def compute_motion_features( |
| joint_positions: np.ndarray, |
| skeleton: SkeletonGraph, |
| fps: float = 20.0, |
| ) -> dict: |
| """ |
| Compute motion features from joint positions for TopoSlots (Scheme C). |
| |
| Scheme C: |
| - Slot tokens: per-joint [local_pos(3) + velocity(3)] = 6D (cross-skeleton compatible) |
| - Decoder GT: per-joint rotations via FK supervision (skeleton-specific) |
| - Root trajectory: separate track |
| - Foot contact: auxiliary loss |
| |
| Args: |
| joint_positions: [T, J, 3] global joint positions |
| skeleton: SkeletonGraph |
| fps: frames per second |
| |
| Returns: |
| dict with: |
| - 'root_position': [T, 3] |
| - 'root_velocity': [T, 3] |
| - 'local_positions': [T, J, 3] root-relative joint positions |
| - 'velocities': [T, J, 3] joint velocities |
| - 'accelerations': [T, J, 3] joint accelerations |
| - 'bone_lengths': [T, J] per-frame bone lengths |
| - 'foot_contact': [T, 4] 4-channel (l_heel, l_toe, r_heel, r_toe) |
| """ |
| T, J, _ = joint_positions.shape |
|
|
| |
| root_pos = joint_positions[:, 0, :] |
|
|
| |
| local_pos = joint_positions - root_pos[:, None, :] |
|
|
| |
| vel = np.zeros_like(joint_positions) |
| vel[1:] = (joint_positions[1:] - joint_positions[:-1]) * fps |
| vel[0] = vel[1] |
|
|
| root_vel = vel[:, 0, :] |
|
|
| |
| acc = np.zeros_like(vel) |
| acc[1:] = (vel[1:] - vel[:-1]) * fps |
| acc[0] = acc[1] |
|
|
| |
| bone_lengths = np.zeros((T, J), dtype=np.float32) |
| for j in range(J): |
| p = skeleton.parent_indices[j] |
| if p >= 0: |
| bone_lengths[:, j] = np.linalg.norm( |
| joint_positions[:, j] - joint_positions[:, p], axis=-1 |
| ) |
|
|
| |
| foot_contact = _detect_foot_contact(joint_positions, vel, skeleton) |
|
|
| return { |
| 'root_position': root_pos, |
| 'root_velocity': root_vel, |
| 'local_positions': local_pos, |
| 'velocities': vel, |
| 'accelerations': acc, |
| 'bone_lengths': bone_lengths, |
| 'foot_contact': foot_contact, |
| } |
|
|
|
|
| def _detect_foot_contact( |
| positions: np.ndarray, |
| velocities: np.ndarray, |
| skeleton: SkeletonGraph, |
| vel_thresh: float = None, |
| ) -> np.ndarray: |
| """ |
| Detect 4-channel foot contact: [l_heel, l_toe, r_heel, r_toe]. |
| |
| Auto-adapts thresholds based on data scale (meters vs centimeters). |
| """ |
| T = positions.shape[0] |
| foot_contact = np.zeros((T, 4), dtype=np.float32) |
|
|
| |
| body_height = positions[0, :, 1].max() - positions[0, :, 1].min() |
| if body_height < 0.01: |
| return foot_contact |
| |
| |
| if vel_thresh is None: |
| vel_thresh = 0.3 * body_height |
| height_margin = 0.03 * body_height |
|
|
| names_lower = [n.lower() for n in skeleton.joint_names] |
|
|
| |
| joint_map = { |
| 'l_heel': None, 'l_toe': None, |
| 'r_heel': None, 'r_toe': None, |
| } |
| for j, n in enumerate(names_lower): |
| is_left = 'left' in n or n.startswith('l_') or n.startswith('l ') or 'leftfoot' in n.replace(' ', '') |
| is_right = 'right' in n or n.startswith('r_') or n.startswith('r ') or 'rightfoot' in n.replace(' ', '') |
| is_ankle = 'ankle' in n or 'heel' in n |
| is_foot = 'foot' in n or 'toe' in n |
|
|
| if is_left and is_ankle and joint_map['l_heel'] is None: |
| joint_map['l_heel'] = j |
| elif is_left and is_foot and joint_map['l_toe'] is None: |
| joint_map['l_toe'] = j |
| elif is_right and is_ankle and joint_map['r_heel'] is None: |
| joint_map['r_heel'] = j |
| elif is_right and is_foot and joint_map['r_toe'] is None: |
| joint_map['r_toe'] = j |
|
|
| channels = ['l_heel', 'l_toe', 'r_heel', 'r_toe'] |
| for ch_idx, ch_name in enumerate(channels): |
| jidx = joint_map[ch_name] |
| if jidx is None: |
| continue |
| jvel = np.linalg.norm(velocities[:, jidx, :], axis=-1) |
| jheight = positions[:, jidx, 1] |
| height_thresh = np.percentile(jheight, 10) + height_margin |
| foot_contact[:, ch_idx] = ( |
| (jvel < vel_thresh) & (jheight < height_thresh) |
| ).astype(np.float32) |
|
|
| return foot_contact |
|
|
|
|
| def extract_rotations_from_263d(joint_vecs: np.ndarray) -> dict: |
| """ |
| Extract structured features from HumanML3D 263D vector. |
| |
| Layout (22-joint SMPL): |
| [0:1] root angular velocity (y-axis) |
| [1:3] root linear velocity (xz) |
| [3:4] root height (y) |
| [4:67] joint positions relative to root (21 × 3 = 63) |
| [67:193] joint 6D continuous rotations (21 × 6 = 126) |
| [193:259] joint velocities (22 × 3 = 66) |
| [259:263] foot contact (4 channels) |
| |
| Returns: |
| dict with: |
| - 'root_angular_vel': [T, 1] |
| - 'root_linear_vel': [T, 2] |
| - 'root_height': [T, 1] |
| - 'ric_positions': [T, 21, 3] |
| - 'local_rotations_6d': [T, 21, 6] |
| - 'joint_velocities': [T, 22, 3] |
| - 'foot_contact_4ch': [T, 4] |
| """ |
| T = joint_vecs.shape[0] |
| return { |
| 'root_angular_vel': joint_vecs[:, 0:1], |
| 'root_linear_vel': joint_vecs[:, 1:3], |
| 'root_height': joint_vecs[:, 3:4], |
| 'ric_positions': joint_vecs[:, 4:67].reshape(T, 21, 3), |
| 'local_rotations_6d': joint_vecs[:, 67:193].reshape(T, 21, 6), |
| 'joint_velocities': joint_vecs[:, 193:259].reshape(T, 22, 3), |
| 'foot_contact_4ch': joint_vecs[:, 259:263], |
| } |
|
|
|
|
| def load_humanml3d_split( |
| data_dir: str | Path, |
| split: str = 'train', |
| ) -> list[str]: |
| """Load motion IDs for a data split.""" |
| data_dir = Path(data_dir) |
| split_file = data_dir / f'{split}.txt' |
|
|
| if not split_file.exists(): |
| raise FileNotFoundError(f"Split file not found: {split_file}") |
|
|
| motion_ids = [] |
| with open(split_file, 'r') as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| motion_ids.append(line) |
|
|
| return motion_ids |
|
|