|
|
|
|
|
"""Filter and augment Unitree G1 dataset to contain only walking fragments. |
|
|
|
|
|
The script: |
|
|
1. Reads *episode_stats.json* produced by **analyze_dataset.py**. |
|
|
2. Determines whether base angular velocity appears to be in rad/s or deg/s. |
|
|
3. Walks through each trajectory file, extracts contiguous segments where |
|
|
linear speed <= 2 m/s, and saves them as new .pt files in an output folder. |
|
|
4. Optionally performs left↔right mirroring augmentation. |
|
|
|
|
|
Run: |
|
|
python augment_dataset.py \ |
|
|
--root /home/ubuntu/MoCapDataset/AMASSDataset/UnitreeG1 \ |
|
|
--out /home/ubuntu/MoCapDataset/AMASSDataset/UnitreeG1_WalkOnly \ |
|
|
--mirror |
|
|
""" |
|
|
from __future__ import annotations |
|
|
|
|
|
import argparse |
|
|
import json |
|
|
import os |
|
|
from pathlib import Path |
|
|
from typing import List, Dict, Tuple |
|
|
|
|
|
import torch |
|
|
import numpy as np |
|
|
|
|
|
import isaaclab.utils.math as math_utils |
|
|
|
|
|
_ALLOWED_EXT = {".pt", ".pth", ".pkl", ".npz"} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_file(path: Path) -> Dict[str, torch.Tensor]: |
|
|
if path.suffix in {".npz", ".pkl"}: |
|
|
data = dict(np.load(path, allow_pickle=True)) |
|
|
for k, v in data.items(): |
|
|
if isinstance(v, np.ndarray) and v.dtype.kind in {"f", "c", "i", "u", "b"}: |
|
|
data[k] = torch.from_numpy(v) |
|
|
else: |
|
|
data[k] = v |
|
|
return data |
|
|
return torch.load(path, map_location="cpu") |
|
|
|
|
|
|
|
|
def save_pt(data: Dict[str, torch.Tensor], path: Path): |
|
|
path.parent.mkdir(parents=True, exist_ok=True) |
|
|
torch.save(data, path) |
|
|
|
|
|
|
|
|
def mirror_left_right(data: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: |
|
|
"""Simple left↔right mirror assuming naming pattern 'left_' / 'right_'.""" |
|
|
mirrored = {k: v.clone() if torch.is_tensor(v) else v for k, v in data.items()} |
|
|
names = data.get("joint_names", None) |
|
|
if names is None: |
|
|
return mirrored |
|
|
names = list(names) |
|
|
swap = {} |
|
|
for i, n in enumerate(names): |
|
|
if n.startswith("left_"): |
|
|
mirror_name = "right_" + n[5:] |
|
|
elif n.startswith("right_"): |
|
|
mirror_name = "left_" + n[6:] |
|
|
else: |
|
|
continue |
|
|
if mirror_name in names: |
|
|
swap[i] = names.index(mirror_name) |
|
|
if not swap: |
|
|
return mirrored |
|
|
|
|
|
qpos = mirrored["qpos"] |
|
|
qvel = mirrored["qvel"] |
|
|
qpos_new = qpos.clone() |
|
|
qvel_new = qvel.clone() |
|
|
for i, j in swap.items(): |
|
|
qpos_new[:, 7+i] = qpos[:, 7+j] |
|
|
qpos_new[:, 7+j] = qpos[:, 7+i] |
|
|
qvel_new[:, 6+i] = qvel[:, 6+j] |
|
|
qvel_new[:, 6+j] = qvel[:, 6+i] |
|
|
|
|
|
qpos_new[:, 1] = -qpos_new[:, 1] |
|
|
qvel_new[:, 1] = -qvel_new[:, 1] |
|
|
qpos_new[:, 4] = -qpos_new[:, 4] |
|
|
qpos_new[:, 6] = -qpos_new[:, 6] |
|
|
mirrored["qpos"] = qpos_new |
|
|
mirrored["qvel"] = qvel_new |
|
|
return mirrored |
|
|
|
|
|
|
|
|
def extract_walking_segments(data: Dict[str, torch.Tensor], min_len: int = 50) -> List[Dict[str, torch.Tensor]]: |
|
|
"""Return list of new dicts containing walking-only contiguous clips. |
|
|
Filtering is done using body-frame velocities: |
|
|
|vx_body| < 1.5 m/s and |vy_body| < 0.5 m/s |
|
|
""" |
|
|
qpos = data["qpos"] |
|
|
qvel = data["qvel"] |
|
|
base_lin_vel = qvel[:, :3] |
|
|
base_quat = qpos[:, 3:7] |
|
|
|
|
|
base_lin_vel_body = math_utils.quat_rotate_inverse(base_quat, base_lin_vel) |
|
|
|
|
|
mask = (base_lin_vel_body[:, 0].abs() < 1.5) & (base_lin_vel_body[:, 1].abs() < 0.5) |
|
|
|
|
|
segments: List[Tuple[int, int]] = [] |
|
|
start = None |
|
|
for i, m in enumerate(mask): |
|
|
if m and start is None: |
|
|
start = i |
|
|
elif not m and start is not None: |
|
|
if i - start >= min_len: |
|
|
segments.append((start, i)) |
|
|
start = None |
|
|
|
|
|
if start is not None and len(qpos) - start >= min_len: |
|
|
segments.append((start, len(qpos))) |
|
|
|
|
|
clips = [] |
|
|
for s, e in segments: |
|
|
clip = { |
|
|
k: v[s:e].clone() if torch.is_tensor(v) and v.ndim > 0 else (v.clone() if torch.is_tensor(v) else v) |
|
|
for k, v in data.items() |
|
|
} |
|
|
clips.append(clip) |
|
|
return clips |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="Filter and augment walking trajectories") |
|
|
parser.add_argument("--root", required=True, type=str, help="Original dataset root") |
|
|
parser.add_argument("--out", required=True, type=str, help="Output folder for walking clips") |
|
|
parser.add_argument("--mirror", default=False, action="store_true", help="Generate left-right mirrored copies") |
|
|
parser.add_argument("--speed_thr", type=float, default=1.5, help="Max linear speed (m/s) for walking") |
|
|
parser.add_argument("--min_len", type=int, default=50, help="Minimum clip length to keep") |
|
|
args = parser.parse_args() |
|
|
|
|
|
root = Path(args.root) |
|
|
out_root = Path(args.out) |
|
|
out_root.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
files = [] |
|
|
for p, _, names in os.walk(root): |
|
|
for n in names: |
|
|
if n == "shape_optimized.pkl": |
|
|
continue |
|
|
if Path(n).suffix in _ALLOWED_EXT: |
|
|
files.append(Path(p) / n) |
|
|
files.sort() |
|
|
|
|
|
total_clips = 0 |
|
|
for f_idx, path in enumerate(files): |
|
|
data = load_file(path) |
|
|
clips = extract_walking_segments(data, min_len=args.min_len) |
|
|
for idx, clip in enumerate(clips): |
|
|
rel_dir = path.relative_to(root).parent |
|
|
name = path.stem + f"_walk_{idx}.pt" |
|
|
save_pt(clip, out_root / rel_dir / name) |
|
|
total_clips += 1 |
|
|
if args.mirror: |
|
|
mirror_clip = mirror_left_right(clip) |
|
|
name_m = path.stem + f"_walk_{idx}_mir.pt" |
|
|
save_pt(mirror_clip, out_root / rel_dir / name_m) |
|
|
total_clips += 1 |
|
|
if (f_idx + 1) % 20 == 0: |
|
|
print(f"Processed {f_idx+1}/{len(files)} files…") |
|
|
|
|
|
print(f"Done. Saved {total_clips} walking clips to {out_root}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |