File size: 6,415 Bytes
bb4cf72 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 | """Compute normalization statistics for a config.
This script is used to compute the normalization statistics for a given config. It
will compute the mean and standard deviation of the data in the dataset and save it
to the config assets directory.
"""
from __future__ import annotations
import pathlib
import numpy as np
import tqdm
import tyro
from datasets import load_dataset
import openpi.models.model as _model
import openpi.shared.normalize as normalize
import openpi.training.config as _config
import openpi.training.data_loader as _data_loader
import openpi.transforms as transforms
class RemoveStrings(transforms.DataTransformFn):
def __call__(self, x: dict) -> dict:
return {k: v for k, v in x.items() if not np.issubdtype(np.asarray(v).dtype, np.str_)}
def create_torch_dataloader(
data_config: _config.DataConfig,
action_horizon: int,
batch_size: int,
model_config: _model.BaseModelConfig,
num_workers: int,
max_frames: int | None = None,
) -> tuple[_data_loader.Dataset, int]:
if data_config.repo_id is None:
raise ValueError("Data config must have a repo_id")
dataset = _data_loader.create_torch_dataset(data_config, action_horizon, model_config)
dataset = _data_loader.TransformedDataset(
dataset,
[
*data_config.repack_transforms.inputs,
*data_config.data_transforms.inputs,
# Remove strings since they are not supported by JAX and are not needed to compute norm stats.
RemoveStrings(),
],
)
if max_frames is not None and max_frames < len(dataset):
num_batches = max_frames // batch_size
shuffle = True
else:
num_batches = len(dataset) // batch_size
shuffle = False
data_loader = _data_loader.TorchDataLoader(
dataset,
local_batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
num_batches=num_batches,
)
return data_loader, num_batches
def create_rlds_dataloader(
data_config: _config.DataConfig,
action_horizon: int,
batch_size: int,
max_frames: int | None = None,
) -> tuple[_data_loader.Dataset, int]:
dataset = _data_loader.create_rlds_dataset(data_config, action_horizon, batch_size, shuffle=False)
dataset = _data_loader.IterableTransformedDataset(
dataset,
[
*data_config.repack_transforms.inputs,
*data_config.data_transforms.inputs,
# Remove strings since they are not supported by JAX and are not needed to compute norm stats.
RemoveStrings(),
],
is_batched=True,
)
if max_frames is not None and max_frames < len(dataset):
num_batches = max_frames // batch_size
else:
# NOTE: this length is currently hard-coded for DROID.
num_batches = len(dataset) // batch_size
data_loader = _data_loader.RLDSDataLoader(
dataset,
num_batches=num_batches,
)
return data_loader, num_batches
def _iter_parquet_batches(
dataset_root: pathlib.Path,
*,
batch_size: int,
max_frames: int | None,
):
"""Iterate parquet data in batches without decoding videos.
Expects a LeRobot-format dataset directory at `dataset_root` containing `data/`.
Only reads `observation.state` and `action` columns.
"""
data_dir = dataset_root / "data"
if not data_dir.exists():
raise FileNotFoundError(f"Expected parquet directory at {data_dir}")
ds = load_dataset("parquet", data_dir=str(data_dir), split="train")
total = len(ds)
limit = total if max_frames is None else min(total, max_frames)
# Access only required columns to reduce work.
state_col = "observation.state"
action_col = "action"
for start in range(0, limit, batch_size):
end = min(limit, start + batch_size)
batch = ds[start:end]
# HF datasets returns lists; convert to numpy arrays.
yield {
"state": np.asarray(batch[state_col]),
"actions": np.asarray(batch[action_col]),
}
def main(config_name: str, max_frames: int | None = None, parquet_only: bool = False):
config = _config.get_config(config_name)
data_config = config.data.create(config.assets_dirs, config.model)
if parquet_only:
if data_config.rlds_data_dir is not None:
raise ValueError("--parquet-only is only supported for LeRobot (parquet) datasets, not RLDS.")
if data_config.repo_id is None:
raise ValueError("Data config must have a repo_id")
if data_config.lerobot_root is None:
raise ValueError(
"Data config must set `lerobot_root` for --parquet-only (path to the LeRobot dataset directory)."
)
# NOTE: In LeRobot, `root` is the dataset directory containing `meta/`, `data/`, and `videos/`.
dataset_root = pathlib.Path(data_config.lerobot_root)
data_loader = _iter_parquet_batches(
dataset_root,
batch_size=config.batch_size,
max_frames=max_frames,
)
if max_frames is not None:
num_batches = max_frames // config.batch_size
else:
# Derive total batches from parquet dataset length (requires reading dataset metadata once).
num_examples = len(load_dataset("parquet", data_dir=str(dataset_root / "data"), split="train"))
num_batches = num_examples // config.batch_size
elif data_config.rlds_data_dir is not None:
data_loader, num_batches = create_rlds_dataloader(
data_config, config.model.action_horizon, config.batch_size, max_frames
)
else:
data_loader, num_batches = create_torch_dataloader(
data_config, config.model.action_horizon, config.batch_size, config.model, config.num_workers, max_frames
)
keys = ["state", "actions"]
stats = {key: normalize.RunningStats() for key in keys}
for batch in tqdm.tqdm(data_loader, total=num_batches, desc="Computing stats"):
for key in keys:
stats[key].update(np.asarray(batch[key]))
norm_stats = {key: stats.get_statistics() for key, stats in stats.items()}
output_path = config.assets_dirs / data_config.repo_id
print(f"Writing stats to: {output_path}")
normalize.save(output_path, norm_stats)
if __name__ == "__main__":
tyro.cli(main) |