File size: 6,409 Bytes
8e6eed0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 | """Combine multiple LeRobot v2 datasets into a single dataset.
Combines three g1_procedural_room_navigation datasets by:
- Re-indexing episodes across all source datasets
- Copying parquet files with updated episode_id columns
- Symlinking video files to originals
- Merging episodes.jsonl with new indices
- Creating origin.yaml to track provenance
Properly handles chunk boundaries: when the new episode index crosses
chunks_size, files are placed into chunk-001/, chunk-002/, etc.
"""
import json
import os
import shutil
from pathlib import Path
import pyarrow.parquet as pq
import pyarrow as pa
import yaml
DEMO_DATA_DIR = Path("demo_data")
SOURCE_DATASETS = [
"g1_procedural_room_navigation_20260206_062009",
"g1_procedural_room_navigation_20260206_080307",
"g1_procedural_room_navigation_20260206_095145",
]
OUTPUT_DATASET = "g1_procedural_room_navigation_combined"
def count_episodes(dataset_dir: Path) -> int:
with open(dataset_dir / "meta" / "episodes.jsonl") as f:
return sum(1 for line in f if line.strip())
def get_video_keys(info: dict) -> list[str]:
"""Extract video feature keys from info.json features."""
return [
key for key, feat in info["features"].items()
if feat.get("dtype") == "video"
]
def main():
output_dir = DEMO_DATA_DIR / OUTPUT_DATASET
if output_dir.exists():
print(f"Output directory {output_dir} already exists. Aborting.")
return
source_dirs = [DEMO_DATA_DIR / name for name in SOURCE_DATASETS]
for src in source_dirs:
if not src.exists():
print(f"Source dataset {src} not found. Aborting.")
return
# Load info.json to get chunks_size and video keys
with open(source_dirs[0] / "meta" / "info.json") as f:
info = json.load(f)
chunks_size = info["chunks_size"]
data_path_template = info["data_path"]
video_path_template = info["video_path"]
video_keys = get_video_keys(info)
print(f"chunks_size: {chunks_size}")
print(f"Video keys: {video_keys}")
# Create output directory structure
(output_dir / "meta").mkdir(parents=True)
# 1. Copy info.json, modality.json, tasks.jsonl from first source (all identical)
first_src = source_dirs[0]
for filename in ["info.json", "modality.json", "tasks.jsonl"]:
shutil.copy2(first_src / "meta" / filename, output_dir / "meta" / filename)
print("Copied info.json, modality.json, tasks.jsonl")
# 2. Merge episodes.jsonl with re-indexed episode_index
all_episodes = []
episode_offset = 0
for src in source_dirs:
with open(src / "meta" / "episodes.jsonl") as f:
for line in f:
line = line.strip()
if not line:
continue
ep = json.loads(line)
ep["episode_index"] = ep["episode_index"] + episode_offset
all_episodes.append(ep)
episode_offset += count_episodes(src)
with open(output_dir / "meta" / "episodes.jsonl", "w") as f:
for ep in all_episodes:
f.write(json.dumps(ep) + "\n")
print(f"Merged episodes.jsonl: {len(all_episodes)} total episodes")
# 3. Copy parquet files with updated episode_id, respecting chunk boundaries
episode_offset = 0
for src in source_dirs:
num_episodes = count_episodes(src)
src_chunks_size = chunks_size # assume same chunks_size across sources
for local_idx in range(num_episodes):
new_idx = local_idx + episode_offset
src_chunk = local_idx // src_chunks_size
dst_chunk = new_idx // chunks_size
src_parquet = src / data_path_template.format(
episode_chunk=src_chunk, episode_index=local_idx
)
dst_parquet = output_dir / data_path_template.format(
episode_chunk=dst_chunk, episode_index=new_idx
)
dst_parquet.parent.mkdir(parents=True, exist_ok=True)
table = pq.read_table(src_parquet)
new_episode_id = pa.array([new_idx] * len(table), type=pa.int64())
col_idx = table.schema.get_field_index("episode_id")
table = table.set_column(col_idx, "episode_id", new_episode_id)
pq.write_table(table, dst_parquet)
print(f"Copied {num_episodes} parquet files from {src.name} (episodes {episode_offset}-{episode_offset + num_episodes - 1})")
episode_offset += num_episodes
# 4. Symlink video files, respecting chunk boundaries
episode_offset = 0
for src in source_dirs:
num_episodes = count_episodes(src)
src_chunks_size = chunks_size
for local_idx in range(num_episodes):
new_idx = local_idx + episode_offset
src_chunk = local_idx // src_chunks_size
dst_chunk = new_idx // chunks_size
for video_key in video_keys:
src_video = src / video_path_template.format(
episode_chunk=src_chunk, video_key=video_key, episode_index=local_idx
)
src_video = src_video.resolve()
dst_video = output_dir / video_path_template.format(
episode_chunk=dst_chunk, video_key=video_key, episode_index=new_idx
)
dst_video.parent.mkdir(parents=True, exist_ok=True)
os.symlink(src_video, dst_video)
print(f"Symlinked {num_episodes} videos from {src.name}")
episode_offset += num_episodes
# 5. Create origin.yaml
origin = {
"description": "Combined dataset from multiple collection sessions",
"sources": [],
}
episode_offset = 0
for src in source_dirs:
num_episodes = count_episodes(src)
origin["sources"].append({
"name": src.name,
"path": str(src.resolve()),
"original_episodes": num_episodes,
"mapped_range": [episode_offset, episode_offset + num_episodes - 1],
})
episode_offset += num_episodes
origin["total_episodes"] = episode_offset
with open(output_dir / "meta" / "origin.yaml", "w") as f:
yaml.dump(origin, f, default_flow_style=False, sort_keys=False)
print("Created origin.yaml")
print(f"\nDone! Combined dataset at: {output_dir}")
print(f"Total episodes: {episode_offset}")
if __name__ == "__main__":
main()
|