TopoSlots-MotionData / src /data /humanml3d_converter.py
Tevior's picture
Upload src/data/humanml3d_converter.py with huggingface_hub
3a78f46 verified
"""
Convert HumanML3D data (SMPL-based .npy format) into our unified representation.
HumanML3D stores motions as:
- new_joints/XXXXXX.npy: [T, 22, 3] joint positions (SMPL 22-joint skeleton)
- new_joint_vecs/XXXXXX.npy: [T, 263] rotation-invariant features
- texts/XXXXXX.txt: text descriptions (multiple per motion)
We convert to:
- SkeletonGraph (fixed SMPL-22 topology)
- Motion dict with positions, velocities, and text annotations
"""
import numpy as np
from pathlib import Path
from typing import Optional
from .skeleton_graph import SkeletonGraph
# SMPL 22-joint skeleton definition
SMPL_22_JOINT_NAMES = [
'pelvis', # 0
'left_hip', # 1
'right_hip', # 2
'spine1', # 3
'left_knee', # 4
'right_knee', # 5
'spine2', # 6
'left_ankle', # 7
'right_ankle', # 8
'spine3', # 9
'left_foot', # 10
'right_foot', # 11
'neck', # 12
'left_collar', # 13
'right_collar', # 14
'head', # 15
'left_shoulder', # 16
'right_shoulder', # 17
'left_elbow', # 18
'right_elbow', # 19
'left_wrist', # 20
'right_wrist', # 21
]
SMPL_22_PARENTS = [
-1, # 0 pelvis (root)
0, # 1 left_hip -> pelvis
0, # 2 right_hip -> pelvis
0, # 3 spine1 -> pelvis
1, # 4 left_knee -> left_hip
2, # 5 right_knee -> right_hip
3, # 6 spine2 -> spine1
4, # 7 left_ankle -> left_knee
5, # 8 right_ankle -> right_knee
6, # 9 spine3 -> spine2
7, # 10 left_foot -> left_ankle
8, # 11 right_foot -> right_ankle
9, # 12 neck -> spine3
9, # 13 left_collar -> spine3
9, # 14 right_collar -> spine3
12, # 15 head -> neck
13, # 16 left_shoulder -> left_collar
14, # 17 right_shoulder -> right_collar
16, # 18 left_elbow -> left_shoulder
17, # 19 right_elbow -> right_shoulder
18, # 20 left_wrist -> left_elbow
19, # 21 right_wrist -> right_elbow
]
def get_smpl22_skeleton(rest_pose: Optional[np.ndarray] = None) -> SkeletonGraph:
"""
Get the SMPL 22-joint skeleton graph.
Args:
rest_pose: [22, 3] rest-pose joint positions. If None, uses default T-pose offsets.
Returns:
SkeletonGraph for SMPL-22.
"""
if rest_pose is None:
# Default T-pose offsets (approximate, from HumanML3D average)
rest_pose = np.array([
[0.0, 0.0, 0.0], # pelvis
[0.08, -0.05, 0.0], # left_hip
[-0.08, -0.05, 0.0], # right_hip
[0.0, 0.1, 0.0], # spine1
[0.0, -0.4, 0.0], # left_knee
[0.0, -0.4, 0.0], # right_knee
[0.0, 0.15, 0.0], # spine2
[0.0, -0.4, 0.0], # left_ankle
[0.0, -0.4, 0.0], # right_ankle
[0.0, 0.15, 0.0], # spine3
[0.0, -0.05, 0.1], # left_foot
[0.0, -0.05, 0.1], # right_foot
[0.0, 0.12, 0.0], # neck
[0.05, 0.0, 0.0], # left_collar
[-0.05, 0.0, 0.0], # right_collar
[0.0, 0.12, 0.0], # head
[0.15, 0.0, 0.0], # left_shoulder
[-0.15, 0.0, 0.0], # right_shoulder
[0.25, 0.0, 0.0], # left_elbow
[-0.25, 0.0, 0.0], # right_elbow
[0.25, 0.0, 0.0], # left_wrist
[-0.25, 0.0, 0.0], # right_wrist
], dtype=np.float32)
# Compute offsets from parent
offsets = np.zeros_like(rest_pose)
for j in range(len(SMPL_22_PARENTS)):
p = SMPL_22_PARENTS[j]
if p >= 0:
offsets[j] = rest_pose[j] - rest_pose[p]
else:
offsets[j] = rest_pose[j]
return SkeletonGraph(
joint_names=SMPL_22_JOINT_NAMES,
parent_indices=SMPL_22_PARENTS,
rest_offsets=offsets,
)
def load_humanml3d_motion(
motion_id: str,
data_dir: str | Path,
) -> dict:
"""
Load a single HumanML3D motion sample.
Args:
motion_id: e.g., '000000'
data_dir: path to HumanML3D directory
Returns:
dict with keys:
- 'joint_positions': [T, 22, 3] global joint positions
- 'joint_vecs': [T, 263] rotation-invariant features (if available)
- 'texts': list of text descriptions
- 'motion_id': str
"""
data_dir = Path(data_dir)
# Load joint positions
joints_path = data_dir / 'new_joints' / f'{motion_id}.npy'
joint_positions = np.load(joints_path) # [T, 22, 3]
# Load joint vectors (rotation-invariant features) if available
vecs_path = data_dir / 'new_joint_vecs' / f'{motion_id}.npy'
joint_vecs = None
if vecs_path.exists():
joint_vecs = np.load(vecs_path) # [T, 263]
# Load text descriptions
text_path = data_dir / 'texts' / f'{motion_id}.txt'
texts = []
if text_path.exists():
with open(text_path, 'r') as f:
for line in f:
line = line.strip()
if line:
# Format: "text#token1 token2#start#end"
parts = line.split('#')
if parts:
texts.append(parts[0].strip())
return {
'joint_positions': joint_positions.astype(np.float32),
'joint_vecs': joint_vecs,
'texts': texts,
'motion_id': motion_id,
}
def compute_motion_features(
joint_positions: np.ndarray,
skeleton: SkeletonGraph,
fps: float = 20.0,
) -> dict:
"""
Compute motion features from joint positions for TopoSlots (Scheme C).
Scheme C:
- Slot tokens: per-joint [local_pos(3) + velocity(3)] = 6D (cross-skeleton compatible)
- Decoder GT: per-joint rotations via FK supervision (skeleton-specific)
- Root trajectory: separate track
- Foot contact: auxiliary loss
Args:
joint_positions: [T, J, 3] global joint positions
skeleton: SkeletonGraph
fps: frames per second
Returns:
dict with:
- 'root_position': [T, 3]
- 'root_velocity': [T, 3]
- 'local_positions': [T, J, 3] root-relative joint positions
- 'velocities': [T, J, 3] joint velocities
- 'accelerations': [T, J, 3] joint accelerations
- 'bone_lengths': [T, J] per-frame bone lengths
- 'foot_contact': [T, 4] 4-channel (l_heel, l_toe, r_heel, r_toe)
"""
T, J, _ = joint_positions.shape
# Root position (joint 0)
root_pos = joint_positions[:, 0, :] # [T, 3]
# Local positions (relative to root)
local_pos = joint_positions - root_pos[:, None, :] # [T, J, 3]
# Velocities (finite difference)
vel = np.zeros_like(joint_positions)
vel[1:] = (joint_positions[1:] - joint_positions[:-1]) * fps
vel[0] = vel[1]
root_vel = vel[:, 0, :] # [T, 3]
# Accelerations (finite difference of velocity)
acc = np.zeros_like(vel)
acc[1:] = (vel[1:] - vel[:-1]) * fps
acc[0] = acc[1]
# Bone lengths per frame
bone_lengths = np.zeros((T, J), dtype=np.float32)
for j in range(J):
p = skeleton.parent_indices[j]
if p >= 0:
bone_lengths[:, j] = np.linalg.norm(
joint_positions[:, j] - joint_positions[:, p], axis=-1
)
# Foot contact: 4-channel detection via velocity + height
foot_contact = _detect_foot_contact(joint_positions, vel, skeleton)
return {
'root_position': root_pos,
'root_velocity': root_vel,
'local_positions': local_pos,
'velocities': vel,
'accelerations': acc,
'bone_lengths': bone_lengths,
'foot_contact': foot_contact,
}
def _detect_foot_contact(
positions: np.ndarray,
velocities: np.ndarray,
skeleton: SkeletonGraph,
vel_thresh: float = None,
) -> np.ndarray:
"""
Detect 4-channel foot contact: [l_heel, l_toe, r_heel, r_toe].
Auto-adapts thresholds based on data scale (meters vs centimeters).
"""
T = positions.shape[0]
foot_contact = np.zeros((T, 4), dtype=np.float32)
# Auto-detect scale for thresholds
body_height = positions[0, :, 1].max() - positions[0, :, 1].min()
if body_height < 0.01:
return foot_contact # degenerate
# Velocity threshold proportional to body height
# ~0.5 m/s for 1.7m human → 0.3 * body_height
if vel_thresh is None:
vel_thresh = 0.3 * body_height
height_margin = 0.03 * body_height # ~5cm for 1.7m human
names_lower = [n.lower() for n in skeleton.joint_names]
# Find foot-related joints with broader matching
joint_map = {
'l_heel': None, 'l_toe': None,
'r_heel': None, 'r_toe': None,
}
for j, n in enumerate(names_lower):
is_left = 'left' in n or n.startswith('l_') or n.startswith('l ') or 'leftfoot' in n.replace(' ', '')
is_right = 'right' in n or n.startswith('r_') or n.startswith('r ') or 'rightfoot' in n.replace(' ', '')
is_ankle = 'ankle' in n or 'heel' in n
is_foot = 'foot' in n or 'toe' in n
if is_left and is_ankle and joint_map['l_heel'] is None:
joint_map['l_heel'] = j
elif is_left and is_foot and joint_map['l_toe'] is None:
joint_map['l_toe'] = j
elif is_right and is_ankle and joint_map['r_heel'] is None:
joint_map['r_heel'] = j
elif is_right and is_foot and joint_map['r_toe'] is None:
joint_map['r_toe'] = j
channels = ['l_heel', 'l_toe', 'r_heel', 'r_toe']
for ch_idx, ch_name in enumerate(channels):
jidx = joint_map[ch_name]
if jidx is None:
continue
jvel = np.linalg.norm(velocities[:, jidx, :], axis=-1)
jheight = positions[:, jidx, 1]
height_thresh = np.percentile(jheight, 10) + height_margin
foot_contact[:, ch_idx] = (
(jvel < vel_thresh) & (jheight < height_thresh)
).astype(np.float32)
return foot_contact
def extract_rotations_from_263d(joint_vecs: np.ndarray) -> dict:
"""
Extract structured features from HumanML3D 263D vector.
Layout (22-joint SMPL):
[0:1] root angular velocity (y-axis)
[1:3] root linear velocity (xz)
[3:4] root height (y)
[4:67] joint positions relative to root (21 × 3 = 63)
[67:193] joint 6D continuous rotations (21 × 6 = 126)
[193:259] joint velocities (22 × 3 = 66)
[259:263] foot contact (4 channels)
Returns:
dict with:
- 'root_angular_vel': [T, 1]
- 'root_linear_vel': [T, 2]
- 'root_height': [T, 1]
- 'ric_positions': [T, 21, 3]
- 'local_rotations_6d': [T, 21, 6]
- 'joint_velocities': [T, 22, 3]
- 'foot_contact_4ch': [T, 4]
"""
T = joint_vecs.shape[0]
return {
'root_angular_vel': joint_vecs[:, 0:1],
'root_linear_vel': joint_vecs[:, 1:3],
'root_height': joint_vecs[:, 3:4],
'ric_positions': joint_vecs[:, 4:67].reshape(T, 21, 3),
'local_rotations_6d': joint_vecs[:, 67:193].reshape(T, 21, 6),
'joint_velocities': joint_vecs[:, 193:259].reshape(T, 22, 3),
'foot_contact_4ch': joint_vecs[:, 259:263],
}
def load_humanml3d_split(
data_dir: str | Path,
split: str = 'train',
) -> list[str]:
"""Load motion IDs for a data split."""
data_dir = Path(data_dir)
split_file = data_dir / f'{split}.txt'
if not split_file.exists():
raise FileNotFoundError(f"Split file not found: {split_file}")
motion_ids = []
with open(split_file, 'r') as f:
for line in f:
line = line.strip()
if line:
motion_ids.append(line)
return motion_ids