TopoSlots-MotionData / scripts /preprocess_humanml3d.py
Tevior's picture
Upload scripts/preprocess_humanml3d.py with huggingface_hub
c3b2920 verified
"""
Preprocess HumanML3D data into TopoSlots unified format.
Input: HumanML3D raw data (new_joints/*.npy, texts/*.txt)
Output: Processed data in data/processed/humanml3d/
- skeleton.npz: SMPL-22 skeleton graph
- motions/{motion_id}.npz: per-motion features
- splits/{train,val,test}.txt: data splits
- stats.npz: dataset statistics (mean, std)
"""
import sys
import argparse
from pathlib import Path
import numpy as np
from tqdm import tqdm
# Add project root to path
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))
from src.data.skeleton_graph import SkeletonGraph
from src.data.humanml3d_converter import (
get_smpl22_skeleton,
load_humanml3d_motion,
compute_motion_features,
extract_rotations_from_263d,
load_humanml3d_split,
SMPL_22_JOINT_NAMES,
)
def preprocess_humanml3d(
raw_dir: str,
output_dir: str,
target_fps: float = 20.0,
max_frames: int = 196, # ~10s at 20fps
min_frames: int = 24, # ~1.2s at 20fps
):
raw_dir = Path(raw_dir)
output_dir = Path(output_dir)
# Create output directories
(output_dir / 'motions').mkdir(parents=True, exist_ok=True)
(output_dir / 'splits').mkdir(parents=True, exist_ok=True)
# 1. Save skeleton graph
print("Building SMPL-22 skeleton graph...")
# Compute average rest pose from first 100 motions
train_ids = load_humanml3d_split(raw_dir, 'train')
rest_poses = []
for mid in train_ids[:100]:
try:
motion = load_humanml3d_motion(mid, raw_dir)
rest_poses.append(motion['joint_positions'][0]) # first frame
except Exception:
continue
avg_rest_pose = np.mean(rest_poses, axis=0) if rest_poses else None
skeleton = get_smpl22_skeleton(avg_rest_pose)
# Save skeleton
np.savez(
output_dir / 'skeleton.npz',
**skeleton.to_dict(),
)
print(f" Skeleton: {skeleton.num_joints} joints")
# 2. Process all motions
all_splits = {}
for split in ['train', 'val', 'test']:
try:
ids = load_humanml3d_split(raw_dir, split)
all_splits[split] = ids
except FileNotFoundError:
print(f" Warning: {split}.txt not found, skipping")
all_splits[split] = []
all_ids = set()
for ids in all_splits.values():
all_ids.update(ids)
print(f"\nProcessing {len(all_ids)} motions...")
# Collect statistics
all_local_pos = []
all_velocities = []
all_root_vel = []
processed_count = 0
skipped_count = 0
for motion_id in tqdm(sorted(all_ids)):
try:
# Load raw motion
motion = load_humanml3d_motion(motion_id, raw_dir)
joint_positions = motion['joint_positions']
T = joint_positions.shape[0]
# Filter by length
if T < min_frames:
skipped_count += 1
continue
if T > max_frames:
joint_positions = joint_positions[:max_frames]
# Compute position-based features (Scheme C: slot token input)
features = compute_motion_features(
joint_positions, skeleton, fps=target_fps
)
# Extract rotation-based features from 263D (Scheme C: decoder GT)
joint_vecs = motion['joint_vecs']
rot_features = None
if joint_vecs is not None and joint_vecs.shape[0] == joint_positions.shape[0]:
rot_features = extract_rotations_from_263d(joint_vecs)
# Build save dict
# --- Scheme C layout ---
# Slot token input: local_positions [T,J,3] + velocities [T,J,3] = 6D per joint
# Decoder GT: local_rotations_6d [T,J-1,6] (for FK supervision)
# Root track: root_position [T,3] + root_velocity [T,3]
# Auxiliary: foot_contact [T,4], bone_lengths [T,J], accelerations [T,J,3]
save_dict = {
# Slot token features (cross-skeleton compatible)
'local_positions': features['local_positions'].astype(np.float32), # [T, 22, 3]
'velocities': features['velocities'].astype(np.float32), # [T, 22, 3]
# Root trajectory (separate track)
'root_position': features['root_position'].astype(np.float32), # [T, 3]
'root_velocity': features['root_velocity'].astype(np.float32), # [T, 3]
# Decoder GT (skeleton-specific, for FK supervision)
'joint_positions': joint_positions.astype(np.float32), # [T, 22, 3]
'accelerations': features['accelerations'].astype(np.float32), # [T, 22, 3]
'bone_lengths': features['bone_lengths'].astype(np.float32), # [T, 22]
# Auxiliary
'foot_contact': features['foot_contact'].astype(np.float32), # [T, 4]
# Metadata
'num_frames': joint_positions.shape[0],
'fps': target_fps,
'skeleton_id': 'smpl_22',
}
# Add rotation data if available (from 263D vector)
if rot_features is not None:
save_dict['local_rotations_6d'] = rot_features['local_rotations_6d'].astype(np.float32) # [T, 21, 6]
save_dict['foot_contact'] = rot_features['foot_contact_4ch'].astype(np.float32) # [T, 4] (override with GT)
# Save texts
texts = motion['texts']
save_dict['texts'] = '|||'.join(texts) if texts else ''
np.savez_compressed(
output_dir / 'motions' / f'{motion_id}.npz',
**save_dict,
)
# Collect stats (subsample for memory)
if processed_count % 5 == 0:
all_local_pos.append(features['local_positions'])
all_velocities.append(features['velocities'])
all_root_vel.append(features['root_velocity'])
processed_count += 1
except Exception as e:
print(f" Error processing {motion_id}: {e}")
skipped_count += 1
print(f"\nProcessed: {processed_count}, Skipped: {skipped_count}")
# 3. Compute and save statistics
print("Computing dataset statistics...")
all_local_pos = np.concatenate(all_local_pos, axis=0) # [N, J, 3]
all_velocities = np.concatenate(all_velocities, axis=0)
all_root_vel = np.concatenate(all_root_vel, axis=0)
stats = {
'local_pos_mean': all_local_pos.mean(axis=0),
'local_pos_std': all_local_pos.std(axis=0) + 1e-8,
'velocity_mean': all_velocities.mean(axis=0),
'velocity_std': all_velocities.std(axis=0) + 1e-8,
'root_vel_mean': all_root_vel.mean(axis=0),
'root_vel_std': all_root_vel.std(axis=0) + 1e-8,
}
np.savez(output_dir / 'stats.npz', **stats)
# 4. Save splits
for split, ids in all_splits.items():
# Filter to only processed motions
valid_ids = [
mid for mid in ids
if (output_dir / 'motions' / f'{mid}.npz').exists()
]
with open(output_dir / 'splits' / f'{split}.txt', 'w') as f:
for mid in valid_ids:
f.write(f'{mid}\n')
print(f" {split}: {len(valid_ids)} motions")
print(f"\nDone! Output saved to {output_dir}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--raw_dir',
type=str,
default='data/raw/HumanML3D',
help='Path to raw HumanML3D data',
)
parser.add_argument(
'--output_dir',
type=str,
default='data/processed/humanml3d',
help='Output directory',
)
parser.add_argument('--target_fps', type=float, default=20.0)
parser.add_argument('--max_frames', type=int, default=196)
parser.add_argument('--min_frames', type=int, default=24)
args = parser.parse_args()
preprocess_humanml3d(
args.raw_dir, args.output_dir,
target_fps=args.target_fps,
max_frames=args.max_frames,
min_frames=args.min_frames,
)