Add standalone conversion script (no lerobot dependency)
Browse files- convert_to_lerobot_format.py +134 -0
convert_to_lerobot_format.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
"""Convert dataset from flat parquet to LeRobot chunked format.
|
| 3 |
+
|
| 4 |
+
Standalone - no lerobot dependency. Add this to your dataset repo and run:
|
| 5 |
+
|
| 6 |
+
python convert_to_lerobot_format.py --data-dir ./data --push-to-hub
|
| 7 |
+
|
| 8 |
+
Requires: pip install datasets huggingface_hub
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import argparse
|
| 12 |
+
import os
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
from datasets import Dataset
|
| 17 |
+
from huggingface_hub import delete_file, snapshot_download, upload_folder
|
| 18 |
+
|
| 19 |
+
CHUNK_FILE_PATTERN = "chunk-{chunk_index:03d}/file-{file_index:03d}"
|
| 20 |
+
DEFAULT_DATA_FILE_SIZE_MB = 100
|
| 21 |
+
DEFAULT_CHUNK_SIZE = 1000
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def get_dataset_size_mb(ds: Dataset) -> float:
|
| 25 |
+
return ds.data.nbytes / (1024**2)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def update_chunk_file_indices(chunk_idx: int, file_idx: int, chunk_size: int) -> tuple[int, int]:
|
| 29 |
+
if file_idx == chunk_size - 1:
|
| 30 |
+
return chunk_idx + 1, 0
|
| 31 |
+
return chunk_idx, file_idx + 1
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def convert_flat_to_chunked(data_dir: Path, chunk_size: int = DEFAULT_CHUNK_SIZE) -> bool:
|
| 35 |
+
flat_files = sorted(data_dir.glob("*.parquet"))
|
| 36 |
+
chunked_files = list(data_dir.glob("*/*.parquet"))
|
| 37 |
+
|
| 38 |
+
if chunked_files:
|
| 39 |
+
print(f"Already in chunked format ({len(chunked_files)} files).")
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
if not flat_files:
|
| 43 |
+
raise FileNotFoundError(f"No parquet files in {data_dir}")
|
| 44 |
+
|
| 45 |
+
print(f"Converting {len(flat_files)} flat parquet file(s) to LeRobot chunked format...")
|
| 46 |
+
|
| 47 |
+
hf_dataset = Dataset.from_parquet([str(p) for p in flat_files])
|
| 48 |
+
dataset_size_mb = get_dataset_size_mb(hf_dataset)
|
| 49 |
+
|
| 50 |
+
if dataset_size_mb <= DEFAULT_DATA_FILE_SIZE_MB:
|
| 51 |
+
path = data_dir / f"{CHUNK_FILE_PATTERN.format(chunk_index=0, file_index=0)}.parquet"
|
| 52 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 53 |
+
hf_dataset.to_parquet(path)
|
| 54 |
+
print(f" Wrote {path}")
|
| 55 |
+
else:
|
| 56 |
+
episode_indices = np.array(hf_dataset["episode_index"])
|
| 57 |
+
episode_boundaries = np.where(np.diff(episode_indices) != 0)[0] + 1
|
| 58 |
+
episode_starts = np.concatenate(([0], episode_boundaries))
|
| 59 |
+
episode_ends = np.concatenate((episode_boundaries, [len(hf_dataset)]))
|
| 60 |
+
|
| 61 |
+
num_episodes = len(episode_starts)
|
| 62 |
+
current_episode_idx = 0
|
| 63 |
+
chunk_idx, file_idx = 0, 0
|
| 64 |
+
|
| 65 |
+
while current_episode_idx < num_episodes:
|
| 66 |
+
shard_start_row = episode_starts[current_episode_idx]
|
| 67 |
+
shard_end_row = episode_ends[current_episode_idx]
|
| 68 |
+
next_episode_to_try_idx = current_episode_idx + 1
|
| 69 |
+
|
| 70 |
+
while next_episode_to_try_idx < num_episodes:
|
| 71 |
+
potential_shard_end_row = episode_ends[next_episode_to_try_idx]
|
| 72 |
+
shard_candidate = hf_dataset.select(
|
| 73 |
+
range(shard_start_row, potential_shard_end_row)
|
| 74 |
+
)
|
| 75 |
+
if get_dataset_size_mb(shard_candidate) > DEFAULT_DATA_FILE_SIZE_MB:
|
| 76 |
+
break
|
| 77 |
+
shard_end_row = potential_shard_end_row
|
| 78 |
+
next_episode_to_try_idx += 1
|
| 79 |
+
|
| 80 |
+
dataset_shard = hf_dataset.select(range(shard_start_row, shard_end_row))
|
| 81 |
+
path = data_dir / f"{CHUNK_FILE_PATTERN.format(chunk_index=chunk_idx, file_index=file_idx)}.parquet"
|
| 82 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 83 |
+
dataset_shard.to_parquet(path)
|
| 84 |
+
print(f" Wrote {path}")
|
| 85 |
+
|
| 86 |
+
chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, chunk_size)
|
| 87 |
+
current_episode_idx = next_episode_to_try_idx
|
| 88 |
+
|
| 89 |
+
for f in flat_files:
|
| 90 |
+
f.unlink()
|
| 91 |
+
print(f" Removed {f}")
|
| 92 |
+
|
| 93 |
+
print("Conversion complete.")
|
| 94 |
+
return True
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def main():
|
| 98 |
+
parser = argparse.ArgumentParser()
|
| 99 |
+
parser.add_argument("--repo-id", default="ases200q2/libero_object", help="HF dataset repo ID")
|
| 100 |
+
parser.add_argument("--data-dir", type=Path, required=True, help="Path to data/ directory")
|
| 101 |
+
parser.add_argument("--push-to-hub", action="store_true")
|
| 102 |
+
parser.add_argument("--delete-flat-from-hub", action="store_true", help="Remove old flat file from hub")
|
| 103 |
+
args = parser.parse_args()
|
| 104 |
+
|
| 105 |
+
data_dir = Path(args.data_dir)
|
| 106 |
+
if not data_dir.exists():
|
| 107 |
+
raise FileNotFoundError(f"Data dir not found: {data_dir}")
|
| 108 |
+
|
| 109 |
+
convert_flat_to_chunked(data_dir)
|
| 110 |
+
|
| 111 |
+
if args.push_to_hub:
|
| 112 |
+
root = data_dir.parent
|
| 113 |
+
print(f"\nPushing to https://huggingface.co/datasets/{args.repo_id} ...")
|
| 114 |
+
upload_folder(
|
| 115 |
+
repo_id=args.repo_id,
|
| 116 |
+
folder_path=str(root),
|
| 117 |
+
repo_type="dataset",
|
| 118 |
+
commit_message="Convert data to LeRobot chunked format",
|
| 119 |
+
)
|
| 120 |
+
print("Done.")
|
| 121 |
+
|
| 122 |
+
if args.delete_flat_from_hub:
|
| 123 |
+
print(f"\nDeleting flat parquet from {args.repo_id}...")
|
| 124 |
+
delete_file(
|
| 125 |
+
path_in_repo="data/train-00000-of-00001.parquet",
|
| 126 |
+
repo_id=args.repo_id,
|
| 127 |
+
repo_type="dataset",
|
| 128 |
+
commit_message="Remove flat parquet",
|
| 129 |
+
)
|
| 130 |
+
print("Done.")
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
if __name__ == "__main__":
|
| 134 |
+
main()
|