File size: 19,388 Bytes
b2e15d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
#!/usr/bin/env python3
"""Dataset analysis utility for the Unitree G1 AMASS trajectories.

Run:
    python analyze_dataset.py --root /home/ubuntu/MoCapDataset/AMASSDataset/UnitreeG1

The script outputs two files in the same folder:
    ├─ episode_stats.json   – per-trajectory statistics
    └─ aggregate_stats.json – averages / extrema over the whole dataset

Useful for diagnosing AMP training issues (range mismatch, outliers, etc.).
"""

from __future__ import annotations

import argparse
import json
import os
from pathlib import Path
from collections import defaultdict
from typing import Any, Dict, List

import torch
import numpy as np

import isaaclab.utils.math as math_utils

_ALLOWED_EXT = {".pt", ".pth", ".pkl", ".npz"}

# ----------------------------------------------------------------------------------
# helpers
# ----------------------------------------------------------------------------------

def _load_file(path: Path) -> Dict[str, torch.Tensor]:
    """Load torch / numpy trajectory file into a dict of torch tensors."""
    if path.suffix in {".npz", ".pkl"}:
        data = dict(np.load(path, allow_pickle=True))
        for k, v in data.items():
            if isinstance(v, np.ndarray) and v.dtype.kind in {"f", "c", "i", "u", "b"}:
                data[k] = torch.from_numpy(v)
            else:
                data[k] = v  # keep python objects (e.g. list[str]) as-is
        return data  # type: ignore[return-value]
    # torch files
    return torch.load(path, map_location="cpu")  # type: ignore[return-value]


def _tensor_range(t: torch.Tensor):
    return t.min().item(), t.max().item()


def _analyze_field(value: Any) -> Dict[str, Any]:
    """Analyze a field value and return metadata about it."""
    if isinstance(value, torch.Tensor):
        return {
            "type": "tensor",
            "dtype": str(value.dtype),
            "shape": list(value.shape),
            "ndim": value.ndim,
            "size": value.numel(),
        }
    elif isinstance(value, np.ndarray):
        return {
            "type": "numpy_array", 
            "dtype": str(value.dtype),
            "shape": list(value.shape),
            "ndim": value.ndim,
            "size": value.size,
        }
    elif isinstance(value, list):
        return {
            "type": "list",
            "length": len(value),
            "element_type": type(value[0]).__name__ if value else "unknown",
        }
    elif isinstance(value, (str, int, float, bool)):
        return {
            "type": type(value).__name__,
            "value": value if isinstance(value, (int, float, bool)) else f"<string of length {len(value)}>",
        }
    else:
        return {
            "type": type(value).__name__,
            "repr": str(value)[:100] + ("..." if len(str(value)) > 100 else ""),
        }

def _determine_angular_unit(max_vel: float) -> str:
    """Determine if angular velocity is in deg/s or rad/s based on magnitude."""
    return "deg/s" if max_vel > 20.0 else "rad/s"

def _analyze_angular_velocity(velocities: torch.Tensor, name: str = "") -> Dict[str, Any]:
    """Analyze angular velocity data to determine if it's in deg/s or rad/s.
    
    Uses multiple heuristics:
    1. Physical limits - robots rarely exceed 1000 deg/s or ~17 rad/s
    2. Distribution of values - deg/s values tend to be larger
    3. Common ranges for motion capture data
    
    Args:
        velocities: Tensor of angular velocities
        name: Name of the joint/axis for reporting
    
    Returns:
        Dict with analysis results
    """
    abs_max = float(torch.abs(velocities).max())
    abs_mean = float(torch.abs(velocities).mean())
    
    # Convert to both units for analysis
    if abs_max > 20.0:  # Assuming it might be deg/s
        rad_max = abs_max * np.pi / 180
        rad_mean = abs_mean * np.pi / 180
        deg_max = abs_max
        deg_mean = abs_mean
        original_unit = "deg/s"
    else:  # Assuming it might be rad/s
        rad_max = abs_max
        rad_mean = abs_mean
        deg_max = abs_max * 180 / np.pi
        deg_mean = abs_mean * 180 / np.pi
        original_unit = "rad/s"
    
    # Scoring system for unit determination
    deg_score = 0
    rad_score = 0
    
    # Physical limits check
    if deg_max > 1000:  # Unusually high for deg/s
        rad_score += 2
    if rad_max > 17:  # Unusually high for rad/s (~1000 deg/s)
        deg_score += 2
        
    # Common ranges check
    if 20 <= deg_max <= 720:  # Common range for deg/s in motion capture
        deg_score += 1
    if 0.3 <= rad_max <= 12:  # Common range for rad/s in motion capture
        rad_score += 1
        
    # Mean value check
    if 5 <= deg_mean <= 180:  # Common mean range for deg/s
        deg_score += 1
    if 0.1 <= rad_mean <= 3:  # Common mean range for rad/s
        rad_score += 1
    
    # Determine most likely unit
    likely_unit = "deg/s" if deg_score > rad_score else "rad/s"
    confidence = abs(deg_score - rad_score) / (deg_score + rad_score) if (deg_score + rad_score) > 0 else 0
    
    return {
        "likely_unit": likely_unit,
        "confidence": confidence,
        "max_value": abs_max,
        "mean_value": abs_mean,
        "deg_score": deg_score,
        "rad_score": rad_score,
        "analysis": {
            "deg/s": {"max": deg_max, "mean": deg_mean},
            "rad/s": {"max": rad_max, "mean": rad_mean}
        },
        "original_unit": original_unit
    }

# ----------------------------------------------------------------------------------
# main analysis
# ----------------------------------------------------------------------------------

def analyse_dataset(root: Path) -> None:
    # discover files
    files: List[Path] = []
    for p, _, names in os.walk(root):
        for n in names:
            if n == "shape_optimized.pkl":
                continue
            if Path(n).suffix in _ALLOWED_EXT:
                files.append(Path(p) / n)
    files.sort()

    if not files:
        raise RuntimeError(f"No trajectory files found under {root}")

    print(f"Found {len(files)} trajectory files.  Analysing…")

    # aggregate accumulators
    agg = {
        "num_episodes": len(files),
        "total_frames": 0,
        "lengths": [],
        "base_lin_vel_b": {"min": torch.full((3,), torch.inf), "max": torch.full((3,), -torch.inf)},
        "base_ang_vel_b": {"min": torch.full((3,), torch.inf), "max": torch.full((3,), -torch.inf)},
        "base_height": {"min": torch.tensor(torch.inf), "max": torch.tensor(-torch.inf)},
        "base_quat": {"min": torch.full((4,), torch.inf), "max": torch.full((4,), -torch.inf)},
        "joint_pos": {},  # type: ignore[dict[str, Any]]
        "joint_vel": {},  # type: ignore[dict[str, Any]]
        "base_pos": {"min": torch.full((3,), torch.inf), "max": torch.full((3,), -torch.inf)},
        "start_pos_list": [],
    }

    # field analysis accumulator
    field_analysis = defaultdict(lambda: {
        "count": 0,
        "files_with_field": [],
        "metadata": None,
        "consistent_shape": True,
        "shapes_seen": set(),
    })

    episode_stats: List[Dict[str, Any]] = []

    for f_idx, path in enumerate(files):
        data = _load_file(path)
        
        # Analyze all fields in this file
        for field_name, field_value in data.items():
            field_info = field_analysis[field_name]
            field_info["count"] += 1
            field_info["files_with_field"].append(str(path.relative_to(root)))
            
            # Analyze field metadata
            metadata = _analyze_field(field_value)
            if field_info["metadata"] is None:
                field_info["metadata"] = metadata
            
            # Track shape consistency for tensors/arrays
            if metadata["type"] in ["tensor", "numpy_array"]:
                shape_tuple = tuple(metadata["shape"])
                field_info["shapes_seen"].add(shape_tuple)
                if len(field_info["shapes_seen"]) > 1:
                    field_info["consistent_shape"] = False

        # Continue with existing analysis for specific fields
        if "qpos" not in data or "qvel" not in data:
            print(f"[WARN] {path.name}: Missing qpos or qvel, skipping detailed analysis")
            continue

        qpos, qvel = data["qpos"].float(), data["qvel"].float()
        n = qpos.shape[0]
        agg["total_frames"] += n
        agg["lengths"].append(n)

        # root quantities
        base_pos = qpos[:, :3]
        base_quat = math_utils.quat_unique(qpos[:, 3:7])  # shape [n,4]
        base_lin_vel = qvel[:, :3]
        base_ang_vel = qvel[:, 3:6]
        base_lin_vel_b = math_utils.quat_rotate_inverse(base_quat, base_lin_vel)
        base_ang_vel_b = math_utils.quat_rotate_inverse(base_quat, base_ang_vel)

        height = base_pos[:, 2]

        # update global min/max
        agg["base_height"]["min"] = torch.minimum(agg["base_height"]["min"], height.min())
        agg["base_height"]["max"] = torch.maximum(agg["base_height"]["max"], height.max())
        for k, tensor in zip(["base_lin_vel_b", "base_ang_vel_b"], [base_lin_vel_b, base_ang_vel_b]):
            agg[k]["min"] = torch.minimum(agg[k]["min"], tensor.min(dim=0).values)
            agg[k]["max"] = torch.maximum(agg[k]["max"], tensor.max(dim=0).values)
        agg["base_quat"]["min"] = torch.minimum(agg["base_quat"]["min"], base_quat.min(dim=0).values)
        agg["base_quat"]["max"] = torch.maximum(agg["base_quat"]["max"], base_quat.max(dim=0).values)

        # update base_pos ranges
        agg["base_pos"]["min"] = torch.minimum(agg["base_pos"]["min"], base_pos.min(dim=0).values)
        agg["base_pos"]["max"] = torch.maximum(agg["base_pos"]["max"], base_pos.max(dim=0).values)

        # save starting position
        agg["start_pos_list"].append(base_pos[0].tolist())

        # joint ranges per episode
        joint_pos = qpos[:, 7:]
        joint_vel = qvel[:, 6:]  # skip floating base velocities (first 6)
        num_joints = joint_pos.shape[1]
        joint_names = data.get("joint_names", None)
        if joint_names is not None and len(joint_names) == num_joints + 1:
            # Likely the first entry is the floating base/root joint which is not in joint_pos
            joint_names = joint_names[1:]

        if joint_names is None or len(joint_names) != num_joints:
            # Generate fallback names if missing or mismatched
            if joint_names is not None and len(joint_names) != num_joints:
                print(f"[WARN] {path.name}: joint_names length {len(joint_names)} != joint dim {num_joints}. Using generic names.")
            joint_names = [f"joint_{i}" for i in range(num_joints)]

        ep_joint_range = {}
        ep_joint_vel_range = {}
        for j in range(num_joints):
            name = joint_names[j]
            # Position ranges
            j_min, j_max = _tensor_range(joint_pos[:, j])
            ep_joint_range[name] = {"min": j_min, "max": j_max}

            # Velocity ranges
            v_min, v_max = _tensor_range(joint_vel[:, j])
            ep_joint_vel_range[name] = {"min": v_min, "max": v_max}

            # accumulate global position ranges
            if name not in agg["joint_pos"]:
                agg["joint_pos"][name] = {"min": j_min, "max": j_max}
            else:
                agg["joint_pos"][name]["min"] = min(agg["joint_pos"][name]["min"], j_min)
                agg["joint_pos"][name]["max"] = max(agg["joint_pos"][name]["max"], j_max)

            # accumulate global velocity ranges
            if name not in agg["joint_vel"]:
                agg["joint_vel"][name] = {"min": v_min, "max": v_max}
            else:
                agg["joint_vel"][name]["min"] = min(agg["joint_vel"][name]["min"], v_min)
                agg["joint_vel"][name]["max"] = max(agg["joint_vel"][name]["max"], v_max)

        # Analyze base angular velocities
        base_ang_vel_analysis = {}
        for i, axis in enumerate(['x', 'y', 'z']):
            base_ang_vel_axis = base_ang_vel_b[:, i]
            base_ang_vel_analysis[axis] = _analyze_angular_velocity(base_ang_vel_axis, f"base_ang_vel_{axis}")

        # Analyze joint velocities
        joint_vel_analysis = {}
        for j in range(num_joints):
            name = joint_names[j]
            joint_vel_analysis[name] = _analyze_angular_velocity(joint_vel[:, j], name)

        # store per-episode stats
        episode_stats.append({
            "file": str(path.relative_to(root)),
            "length": n,
            "base_height": _tensor_range(height),
            "base_pos_start": base_pos[0].tolist(),
            "base_pos_range": {
                "min": base_pos.min(dim=0).values.tolist(),
                "max": base_pos.max(dim=0).values.tolist(),
            },
            "base_lin_vel_b": {
                "min": base_lin_vel_b.min(dim=0).values.tolist(),
                "max": base_lin_vel_b.max(dim=0).values.tolist(),
            },
            "base_ang_vel_b": {
                "min": base_ang_vel_b.min(dim=0).values.tolist(),
                "max": base_ang_vel_b.max(dim=0).values.tolist(),
            },
            "base_quat": {k: v.tolist() for k, v in agg["base_quat"].items()},
            "joint_pos_range": ep_joint_range,
            "joint_vel_range": ep_joint_vel_range,
            "base_ang_vel_analysis": base_ang_vel_analysis,
            "joint_vel_analysis": joint_vel_analysis,
        })

        if (f_idx + 1) % 50 == 0:
            print(f"Processed {f_idx+1}/{len(files)} files…")

    # Process field analysis results
    # print("\n" + "="*60)
    # print("DATASET FIELD ANALYSIS")
    # print("="*60)
    
    # all_fields = sorted(field_analysis.keys())
    # for field_name in all_fields:
    #     info = field_analysis[field_name]
    #     print(f"\nField: '{field_name}'")
    #     print(f"  Present in: {info['count']}/{len(files)} files ({info['count']/len(files)*100:.1f}%)")
        
    #     if info['metadata']:
    #         meta = info['metadata']
    #         if meta['type'] in ['tensor', 'numpy_array']:
    #             if info['consistent_shape']:
    #                 print(f"  Type: {meta['type']} ({meta['dtype']})")
    #                 print(f"  Shape: {meta['shape']}")
    #             else:
    #                 print(f"  Type: {meta['type']} ({meta['dtype']}) - INCONSISTENT SHAPES")
    #                 print(f"  Shapes seen: {sorted(info['shapes_seen'])}")
    #         elif meta['type'] == 'list':
    #             print(f"  Type: {meta['type']} (length: {meta['length']}, elements: {meta['element_type']})")
    #         else:
    #             print(f"  Type: {meta['type']}")
    #             if 'value' in meta:
    #                 print(f"  Value: {meta['value']}")

    # Create field summary for JSON output
    field_summary = {}
    for field_name, info in field_analysis.items():
        field_summary[field_name] = {
            "present_in_files": info["count"],
            "present_in_percentage": round(info["count"] / len(files) * 100, 1),
            "metadata": info["metadata"],
            "consistent_shape": info["consistent_shape"],
        }
        if not info["consistent_shape"]:
            field_summary[field_name]["shapes_seen"] = sorted([list(s) for s in info["shapes_seen"]])

    # final aggregate statistics
    agg_stats = {
        "num_episodes": agg["num_episodes"],
        "average_length": float(np.mean(agg["lengths"])),
        "min_length": int(min(agg["lengths"])),
        "max_length": int(max(agg["lengths"])),
        "base_height": {k: v.item() if torch.is_tensor(v) else v.tolist() for k, v in agg["base_height"].items()},
        "base_lin_vel_b": {k: v.tolist() for k, v in agg["base_lin_vel_b"].items()},
        "base_ang_vel_b": {k: v.tolist() for k, v in agg["base_ang_vel_b"].items()},
        "base_quat": {k: v.tolist() for k, v in agg["base_quat"].items()},
        "joint_pos_global_range": agg["joint_pos"],
        "joint_vel_global_range": agg["joint_vel"],  # Added joint velocity global ranges
        "base_pos": {k: v.tolist() for k, v in agg["base_pos"].items()},
        "avg_start_pos": np.mean(agg["start_pos_list"], axis=0).tolist(),
        "field_analysis": field_summary,
        "angular_velocity_analysis": {
            "base": base_ang_vel_analysis,
            "joints": joint_vel_analysis,
        }
    }

    # Determine angular-velocity unit (deg/s vs rad/s)
    max_ang_vel = max(abs(x) for x in agg_stats["base_ang_vel_b"]["max"])
    base_ang_unit = _determine_angular_unit(max_ang_vel)
    agg_stats["base_ang_vel_unit"] = base_ang_unit

    # Analyze joint velocity units
    joint_vel_units = {}
    max_joint_vels = {}
    for joint_name, vel_range in agg["joint_vel"].items():
        max_vel = max(abs(vel_range["min"]), abs(vel_range["max"]))
        max_joint_vels[joint_name] = max_vel
        joint_vel_units[joint_name] = _determine_angular_unit(max_vel)
    
    # Check if all joints use the same unit
    unique_units = set(joint_vel_units.values())
    if len(unique_units) == 1:
        joint_vel_unit = next(iter(unique_units))
        print(f"\nAll joint velocities appear to be in {joint_vel_unit}")
    else:
        print("\nWARNING: Inconsistent joint velocity units detected:")
        for unit in unique_units:
            joints = [name for name, u in joint_vel_units.items() if u == unit]
            print(f"  {unit}: {', '.join(joints)}")

    # Add joint velocity analysis to aggregate stats
    agg_stats["joint_vel_units"] = joint_vel_units
    agg_stats["joint_vel_max_magnitude"] = max_joint_vels

    print(f"\nEstimated base angular velocity unit: {base_ang_unit} (max ω ≈ {max_ang_vel:.2f})")
    print("Joint velocity analysis added to aggregate_stats.json")

    # Print detailed analysis
    print("\nAngular Velocity Analysis:")
    print("\nBase Angular Velocity:")
    for axis, analysis in base_ang_vel_analysis.items():
        print(f"  {axis}-axis: Likely {analysis['likely_unit']} (confidence: {analysis['confidence']:.2f})")
        print(f"    Max: {analysis['max_value']:.2f} {analysis['original_unit']}")
        print(f"    Mean: {analysis['mean_value']:.2f} {analysis['original_unit']}")

    print("\nJoint Velocities:")
    for joint, analysis in joint_vel_analysis.items():
        print(f"  {joint}: Likely {analysis['likely_unit']} (confidence: {analysis['confidence']:.2f})")
        print(f"    Max: {analysis['max_value']:.2f} {analysis['original_unit']}")
        print(f"    Mean: {analysis['mean_value']:.2f} {analysis['original_unit']}")

    # write files
    (root / "episode_stats.json").write_text(json.dumps(episode_stats, indent=2))
    (root / "aggregate_stats.json").write_text(json.dumps(agg_stats, indent=2))

    print(f"\nAnalysis complete. Results saved to episode_stats.json and aggregate_stats.json")
    # print(f"Field analysis included for {len(all_fields)} unique fields found across all files.")

# ----------------------------------------------------------------------------------

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Analyse Unitree G1 AMASS dataset")
    parser.add_argument("--root", type=str, required=True, help="Root folder of trajectories")
    args = parser.parse_args()

    analyse_dataset(Path(args.root).expanduser())