PAI_UE-IsaacSim-Navigation / scripts /combine_datasets.py
guguntara's picture
Initial upload of g1_procedural_room_navigation dataset
8e6eed0 verified
"""Combine multiple LeRobot v2 datasets into a single dataset.
Combines three g1_procedural_room_navigation datasets by:
- Re-indexing episodes across all source datasets
- Copying parquet files with updated episode_id columns
- Symlinking video files to originals
- Merging episodes.jsonl with new indices
- Creating origin.yaml to track provenance
Properly handles chunk boundaries: when the new episode index crosses
chunks_size, files are placed into chunk-001/, chunk-002/, etc.
"""
import json
import os
import shutil
from pathlib import Path
import pyarrow.parquet as pq
import pyarrow as pa
import yaml
DEMO_DATA_DIR = Path("demo_data")
SOURCE_DATASETS = [
"g1_procedural_room_navigation_20260206_062009",
"g1_procedural_room_navigation_20260206_080307",
"g1_procedural_room_navigation_20260206_095145",
]
OUTPUT_DATASET = "g1_procedural_room_navigation_combined"
def count_episodes(dataset_dir: Path) -> int:
with open(dataset_dir / "meta" / "episodes.jsonl") as f:
return sum(1 for line in f if line.strip())
def get_video_keys(info: dict) -> list[str]:
"""Extract video feature keys from info.json features."""
return [
key for key, feat in info["features"].items()
if feat.get("dtype") == "video"
]
def main():
output_dir = DEMO_DATA_DIR / OUTPUT_DATASET
if output_dir.exists():
print(f"Output directory {output_dir} already exists. Aborting.")
return
source_dirs = [DEMO_DATA_DIR / name for name in SOURCE_DATASETS]
for src in source_dirs:
if not src.exists():
print(f"Source dataset {src} not found. Aborting.")
return
# Load info.json to get chunks_size and video keys
with open(source_dirs[0] / "meta" / "info.json") as f:
info = json.load(f)
chunks_size = info["chunks_size"]
data_path_template = info["data_path"]
video_path_template = info["video_path"]
video_keys = get_video_keys(info)
print(f"chunks_size: {chunks_size}")
print(f"Video keys: {video_keys}")
# Create output directory structure
(output_dir / "meta").mkdir(parents=True)
# 1. Copy info.json, modality.json, tasks.jsonl from first source (all identical)
first_src = source_dirs[0]
for filename in ["info.json", "modality.json", "tasks.jsonl"]:
shutil.copy2(first_src / "meta" / filename, output_dir / "meta" / filename)
print("Copied info.json, modality.json, tasks.jsonl")
# 2. Merge episodes.jsonl with re-indexed episode_index
all_episodes = []
episode_offset = 0
for src in source_dirs:
with open(src / "meta" / "episodes.jsonl") as f:
for line in f:
line = line.strip()
if not line:
continue
ep = json.loads(line)
ep["episode_index"] = ep["episode_index"] + episode_offset
all_episodes.append(ep)
episode_offset += count_episodes(src)
with open(output_dir / "meta" / "episodes.jsonl", "w") as f:
for ep in all_episodes:
f.write(json.dumps(ep) + "\n")
print(f"Merged episodes.jsonl: {len(all_episodes)} total episodes")
# 3. Copy parquet files with updated episode_id, respecting chunk boundaries
episode_offset = 0
for src in source_dirs:
num_episodes = count_episodes(src)
src_chunks_size = chunks_size # assume same chunks_size across sources
for local_idx in range(num_episodes):
new_idx = local_idx + episode_offset
src_chunk = local_idx // src_chunks_size
dst_chunk = new_idx // chunks_size
src_parquet = src / data_path_template.format(
episode_chunk=src_chunk, episode_index=local_idx
)
dst_parquet = output_dir / data_path_template.format(
episode_chunk=dst_chunk, episode_index=new_idx
)
dst_parquet.parent.mkdir(parents=True, exist_ok=True)
table = pq.read_table(src_parquet)
new_episode_id = pa.array([new_idx] * len(table), type=pa.int64())
col_idx = table.schema.get_field_index("episode_id")
table = table.set_column(col_idx, "episode_id", new_episode_id)
pq.write_table(table, dst_parquet)
print(f"Copied {num_episodes} parquet files from {src.name} (episodes {episode_offset}-{episode_offset + num_episodes - 1})")
episode_offset += num_episodes
# 4. Symlink video files, respecting chunk boundaries
episode_offset = 0
for src in source_dirs:
num_episodes = count_episodes(src)
src_chunks_size = chunks_size
for local_idx in range(num_episodes):
new_idx = local_idx + episode_offset
src_chunk = local_idx // src_chunks_size
dst_chunk = new_idx // chunks_size
for video_key in video_keys:
src_video = src / video_path_template.format(
episode_chunk=src_chunk, video_key=video_key, episode_index=local_idx
)
src_video = src_video.resolve()
dst_video = output_dir / video_path_template.format(
episode_chunk=dst_chunk, video_key=video_key, episode_index=new_idx
)
dst_video.parent.mkdir(parents=True, exist_ok=True)
os.symlink(src_video, dst_video)
print(f"Symlinked {num_episodes} videos from {src.name}")
episode_offset += num_episodes
# 5. Create origin.yaml
origin = {
"description": "Combined dataset from multiple collection sessions",
"sources": [],
}
episode_offset = 0
for src in source_dirs:
num_episodes = count_episodes(src)
origin["sources"].append({
"name": src.name,
"path": str(src.resolve()),
"original_episodes": num_episodes,
"mapped_range": [episode_offset, episode_offset + num_episodes - 1],
})
episode_offset += num_episodes
origin["total_episodes"] = episode_offset
with open(output_dir / "meta" / "origin.yaml", "w") as f:
yaml.dump(origin, f, default_flow_style=False, sort_keys=False)
print("Created origin.yaml")
print(f"\nDone! Combined dataset at: {output_dir}")
print(f"Total episodes: {episode_offset}")
if __name__ == "__main__":
main()