H_RDT / scripts /check_pick_box_dataset.py
zhenyuzhao's picture
Add files using upload-large-folder tool
aceb411 verified
#!/usr/bin/env python3
import argparse
import json
import random
import sys
from pathlib import Path
import numpy as np
import pyarrow.parquet as pq
PROJECT_ROOT = Path(__file__).resolve().parents[1]
if str(PROJECT_ROOT) not in sys.path:
sys.path.insert(0, str(PROJECT_ROOT))
STATE_COLS = [
"observation.arm_joints",
"observation.hand_joints",
"observation.prev_height",
"observation.prev_rpy",
"observation.prev_vx",
"observation.prev_vy",
"observation.prev_vyaw",
"observation.prev_dyaw",
]
def sample_rows(parquet_path: Path, max_rows: int, rng: random.Random):
table = pq.read_table(parquet_path, columns=["action", *STATE_COLS])
total = table.num_rows
if total == 0:
return None
if max_rows >= total:
indices = list(range(total))
else:
indices = rng.sample(range(total), max_rows)
sub = table.take(indices)
actions = np.array(sub.column("action").to_pylist(), dtype=np.float32)
arms = np.array(sub.column("observation.arm_joints").to_pylist(), dtype=np.float32)
hands = np.array(sub.column("observation.hand_joints").to_pylist(), dtype=np.float32)
prev_h = np.array(sub.column("observation.prev_height").to_pylist(), dtype=np.float32).reshape(-1, 1)
prev_rpy = np.array(sub.column("observation.prev_rpy").to_pylist(), dtype=np.float32)
prev_vx = np.array(sub.column("observation.prev_vx").to_pylist(), dtype=np.float32).reshape(-1, 1)
prev_vy = np.array(sub.column("observation.prev_vy").to_pylist(), dtype=np.float32).reshape(-1, 1)
prev_vyaw = np.array(sub.column("observation.prev_vyaw").to_pylist(), dtype=np.float32).reshape(-1, 1)
prev_dyaw = np.array(sub.column("observation.prev_dyaw").to_pylist(), dtype=np.float32).reshape(-1, 1)
state = np.concatenate(
[arms, hands, prev_h, prev_rpy, prev_vx, prev_vy, prev_vyaw, prev_dyaw], axis=1
)
return actions, state
def main() -> None:
parser = argparse.ArgumentParser(description="Quick sanity checks for pick_box dataset.")
parser.add_argument("--data_root", default="/hfm/data/pick_box")
parser.add_argument("--max_episodes", type=int, default=10)
parser.add_argument("--rows_per_episode", type=int, default=200)
parser.add_argument("--seed", type=int, default=0)
args = parser.parse_args()
rng = random.Random(args.seed)
data_root = Path(args.data_root)
meta = data_root / "meta"
info_path = meta / "info.json"
episodes_path = meta / "episodes.jsonl"
if not info_path.exists() or not episodes_path.exists():
raise FileNotFoundError("Missing meta/info.json or meta/episodes.jsonl.")
info = json.loads(info_path.read_text())
episodes = [json.loads(line) for line in episodes_path.read_text().splitlines() if line.strip()]
print("Dataset root:", data_root)
print("Total episodes (meta):", info.get("total_episodes"))
print("Episodes listed:", len(episodes))
lengths = [int(e.get("length", 0)) for e in episodes]
print("Episode length: min", min(lengths), "max", max(lengths), "avg", np.mean(lengths))
instructions = [str(e.get("instruction", "") or "").strip() for e in episodes]
empty_instr = sum(1 for i in instructions if not i)
print("Unique instructions:", len(set(instructions)))
print("Empty instructions:", empty_instr)
data_path_tpl = info["data_path"]
chunk_size = int(info["chunks_size"])
actions_all = []
states_all = []
for ep in episodes[: args.max_episodes]:
ep_index = int(ep["episode_index"])
chunk = ep_index // chunk_size
parquet_path = data_root / data_path_tpl.format(
episode_chunk=chunk, episode_index=ep_index
)
if not parquet_path.exists():
print("Missing parquet:", parquet_path)
continue
sample = sample_rows(parquet_path, args.rows_per_episode, rng)
if sample is None:
continue
actions, states = sample
actions_all.append(actions)
states_all.append(states)
if not actions_all:
print("No action samples collected. Check dataset paths.")
return
actions = np.concatenate(actions_all, axis=0)
states = np.concatenate(states_all, axis=0)
print("Sampled actions shape:", actions.shape)
print("Sampled states shape:", states.shape)
print("Action dim:", actions.shape[1])
print("State dim:", states.shape[1])
print("Action NaN:", np.isnan(actions).any(), "Inf:", np.isinf(actions).any())
print("State NaN:", np.isnan(states).any(), "Inf:", np.isinf(states).any())
action_std = actions.std(axis=0)
state_std = states.std(axis=0)
print("Action std (min/median/max):", action_std.min(), np.median(action_std), action_std.max())
print("State std (min/median/max):", state_std.min(), np.median(state_std), state_std.max())
near_zero = np.mean(np.abs(actions) < 1e-4, axis=0)
print("Action near-zero ratio (min/median/max):", near_zero.min(), np.median(near_zero), near_zero.max())
stats_path = meta / "action_stats.json"
if stats_path.exists():
stats = json.loads(stats_path.read_text())
min_stat = np.array(stats["min"], dtype=np.float32)
max_stat = np.array(stats["max"], dtype=np.float32)
print("Stats file min/max (first 8 dims):")
print(" min:", min_stat[:8])
print(" max:", max_stat[:8])
if __name__ == "__main__":
main()