MoCapDataset / AMASSDataset /convert_motion_data.py
gourav-wadhwa's picture
adding everything from the dataset
b2e15d5
#!/usr/bin/env python3
import numpy as np
import torch
from pathlib import Path
import isaaclab.utils.math as math_utils
def convert_motion_data(input_path: str, output_path: str):
"""Convert motion data from AMASS format to our desired format.
Args:
input_path: Path to input npz file
output_path: Path to save the converted data
"""
# Load the source data
data = np.load(input_path, allow_pickle=True)
# Extract data with proper slicing
qpos = data['qpos'] # shape: [T, n_dofs]
qvel = data['qvel'] # shape: [T, n_dofs]
# Convert quaternions to rotation matrices
# Move to GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Extract quaternions and reorder from [x,y,z,w] to [w,x,y,z] format
quats_raw = torch.from_numpy(qpos[:, 3:7]).float() # [x,y,z,w]
quats = torch.zeros_like(quats_raw)
quats[..., 0] = quats_raw[..., 3] # w
quats[..., 1:] = quats_raw[..., :3] # xyz
quats = quats.to(device)
# Create basis vectors
num_frames = quats.shape[0]
basis_vectors = torch.eye(3, device=device).unsqueeze(0).repeat(num_frames, 1, 1) # [num_frames, 3, 3]
# Rotate each basis vector
rotations = torch.zeros((num_frames, 3, 3), device=device)
for i in range(3):
rotations[..., i] = math_utils.quat_rotate(quats, basis_vectors[..., i])
rotations = rotations.cpu().numpy()
# Create the output dictionary
output_data = {
'dof_names': data['joint_names'],
'body_names': data['body_names'],
'dof_positions': qpos[:, 7:], # Remove root state (7 DoFs)
'dof_velocities': qvel[:, 6:], # Remove root velocities (6 DoFs)
'body_positions': qpos[:, :3], # Root position
'body_rotations': rotations, # Root rotation as 3x3 matrix
'body_linear_velocities': qvel[:, :3], # Root linear velocity
'body_angular_velocities': qvel[:, 3:6], # Root angular velocity
'fps': 50, # Fixed at 50Hz
}
# Print input data shapes for verification
print("\nInput data shapes:")
print(f"qpos shape: {qpos.shape}")
print(f"qvel shape: {qvel.shape}")
print(f"quaternions shape: {quats.shape}")
print(f"rotations shape: {rotations.shape}")
# Save the converted data
np.savez(output_path, **output_data)
print(f"\nConverted data saved to {output_path}")
print("\nOutput data contains:")
for key, value in output_data.items():
if isinstance(value, np.ndarray):
print(f"- {key}: shape {value.shape} (dtype: {value.dtype})")
else:
print(f"- {key}: {value}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Convert AMASS motion data to our format")
parser.add_argument("--input", type=str, required=True, help="Input npz file path")
parser.add_argument("--output", type=str, help="Output npz file path. If not provided, will use input path with _converted suffix")
args = parser.parse_args()
input_path = args.input
if args.output is None:
# Auto-generate output path by adding _converted before .npz
input_path_obj = Path(input_path)
output_path = str(input_path_obj.parent / f"{input_path_obj.stem}_converted.npz")
else:
output_path = args.output
convert_motion_data(input_path, output_path)