charithmunasinghe commited on
Commit
8221873
·
1 Parent(s): a1af2ba

clean up raw dataset

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. helper_scripts/LEROBOT_V3_CONVERSION_PLAN.md +688 -0
  2. helper_scripts/lerobotv3_format_explanation.md +762 -0
  3. cleaningcloth_20251104_205021.hdf5 → raw_dataset/cleaningcloth_20251104_205021.hdf5 +0 -0
  4. cleaningcloth_20251104_205021.json → raw_dataset/cleaningcloth_20251104_205021.json +0 -0
  5. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000000.png +0 -0
  6. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000001.png +0 -0
  7. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000002.png +0 -0
  8. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000003.png +0 -0
  9. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000004.png +0 -0
  10. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000005.png +0 -0
  11. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000006.png +0 -0
  12. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000007.png +0 -0
  13. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000008.png +0 -0
  14. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000009.png +0 -0
  15. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000010.png +0 -0
  16. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000011.png +0 -0
  17. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000012.png +0 -0
  18. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000013.png +0 -0
  19. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000014.png +0 -0
  20. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000015.png +0 -0
  21. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000016.png +0 -0
  22. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000017.png +0 -0
  23. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000018.png +0 -0
  24. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000019.png +0 -0
  25. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000020.png +0 -0
  26. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000021.png +0 -0
  27. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000022.png +0 -0
  28. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000023.png +0 -0
  29. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000024.png +0 -0
  30. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000025.png +0 -0
  31. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000026.png +0 -0
  32. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000027.png +0 -0
  33. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000028.png +0 -0
  34. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000029.png +0 -0
  35. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000030.png +0 -0
  36. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000031.png +0 -0
  37. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000032.png +0 -0
  38. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000033.png +0 -0
  39. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000034.png +0 -0
  40. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000035.png +0 -0
  41. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000036.png +0 -0
  42. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000037.png +0 -0
  43. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000038.png +0 -0
  44. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000039.png +0 -0
  45. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000040.png +0 -0
  46. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000041.png +0 -0
  47. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000042.png +0 -0
  48. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000043.png +0 -0
  49. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000044.png +0 -0
  50. {cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000045.png +0 -0
helper_scripts/LEROBOT_V3_CONVERSION_PLAN.md ADDED
@@ -0,0 +1,688 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LeRobot v3.0 Conversion Plan for PiPER Picking Tests Dataset
2
+
3
+ ## Executive Summary
4
+
5
+ Converting piper_picking_tests from HDF5+PNG format to LeRobot v3.0 (Parquet+MP4) for VLA fine-tuning.
6
+
7
+ **VERIFIED Dataset Stats (from actual files):**
8
+ - Episodes: 13
9
+ - Tasks: 12 (unique picking tasks)
10
+ - Total frames: 5,016
11
+ - Cameras: 2 (table_cam 800×720, wrist_cam **1280×720**)
12
+ - FPS: **11-12 FPS** (verified from actual timestamps, NOT 30!)
13
+ - Robot: 7-DOF arm
14
+ - Current size: ~480 MB (PNG images + HDF5)
15
+ - Expected output: ~150-200 MB (compressed MP4)
16
+ - **Dependencies:** LeRobot v0.4.3, PyAV 15.1.0, PyTorch 2.7.1 (all installed ✅)
17
+
18
+ **Data Sources:**
19
+ - **State/Action/Timestamps**: HDF5 files (`observation/state`, `action`, `timestamp`)
20
+ - **Images**: PNG files referenced by paths in HDF5
21
+ - **FPS**: Calculated from actual timestamp data
22
+
23
+ **Strategy:** LIBERO multi-task approach with separate tasks.parquet and task_index in frames.
24
+
25
+ **Reference:** See `lerobotv3_format_explanation.md` for complete v3.0 format knowledge.
26
+
27
+ ## Current Format (VERIFIED)
28
+
29
+ ### Actual File Organization
30
+ ```
31
+ piper_picking_tests/
32
+ ├── {episode_name}_{timestamp}.hdf5 # 13 files, 58-98 KB each
33
+ ├── {episode_name}_{timestamp}.json # Episode metadata (optional, not used)
34
+ └── {task_name}_images/ # 12 folders (NOTE: task name, NOT episode name!)
35
+ ├── observation.images.table_cam/ # PNG frames (800×720), frame_000000.png format
36
+ └── observation.images.wrist_cam/ # PNG frames (1280×720), frame_000000.png format
37
+ ```
38
+
39
+ **IMPORTANT:** Image folder naming uses task name only (e.g., `cleaningcloth_images`), not full episode name with timestamp!
40
+
41
+ ### HDF5 Structure (VERIFIED from pencil episode)
42
+ ```python
43
+ observation/state # [n_frames, 7] float32 - joint angles in degrees
44
+ action # [n_frames, 7] float32 - commands
45
+ timestamp # [n_frames] float64 - frame timestamps in seconds
46
+ episode_index # [n_frames] int64 - all same value per episode
47
+ observation/images/table_cam # [n_frames] object - paths to PNG files
48
+ observation/images/wrist_cam # [n_frames] object - paths to PNG files
49
+ ```
50
+
51
+ **Key Finding:** HDF5 stores **PATHS** to images, not the images themselves!
52
+ - Example: `b'cleaningcloth_images/observation.images.table_cam/frame_000000.png'`
53
+ - Images are separate PNG files at 800×720 (table) and 1280×720 (wrist)
54
+ - **Image naming:** Current format uses `frame_000000.png` (underscore), but LeRobot's `encode_video_frames` expects `frame-000000.png` (dash)
55
+ - **Solution:** Copy/rename images during conversion to match required format
56
+
57
+ ### Verified Episode List
58
+ ```python
59
+ EPISODES = {
60
+ 'cleaningcloth_20251104_205021': (168 frames, 14.6s),
61
+ 'fillamentroll_20251104_204834': (276 frames, 23.1s),
62
+ 'gamecontroller_20251104_203816': (335 frames, 25.0s),
63
+ 'hexwrench_20251104_204002': (333 frames, 24.4s),
64
+ 'pencil_20251104_205415': (297 frames, 23.2s),
65
+ 'scissors_20251104_204120': (290 frames, 21.0s),
66
+ 'scissors_hidden_20251104_205751': (358 frames, 28.6s),
67
+ 'screwdriver_20251104_203022': (324 frames, 24.8s),
68
+ 'smallkey_20251104_203257': (529 frames, 39.7s),
69
+ 'smallpaper_20251104_203636': (429 frames, 31.2s),
70
+ 'smallwoodenstick_20251104_204353': (485 frames, 34.4s),
71
+ 'thinmetaldisk_20251104_204557': (764 frames, 55.5s),
72
+ 'thinmetaldisk_20251104_204721': (428 frames, 30.7s),
73
+ }
74
+ # Total: 5,016 frames
75
+ ```
76
+
77
+ ### Image Resolution (CORRECTED)
78
+ - **table_cam**: 800×720 (W×H) RGB PNG
79
+ - **wrist_cam**: 1280×720 (W×H) RGB PNG ← **NOT 640×480!**
80
+ - File sizes: ~387 KB (table), ~619 KB (wrist) per frame
81
+ - Total images per episode: 2 × n_frames PNG files
82
+
83
+ ## Target v3.0 Structure
84
+
85
+ ```
86
+ piper_picking_tests_v3/
87
+ ├── meta/
88
+ │ ├── info.json # Dataset configuration
89
+ │ ├── stats.json # Aggregated statistics
90
+ │ ├── tasks.parquet # 12 task descriptions (LIBERO style)
91
+ │ └── episodes/
92
+ │ └── chunk-000/
93
+ │ └── file-000.parquet # 13 episode metadata (NO tasks field)
94
+ ├── data/
95
+ │ └── chunk-000/
96
+ │ └── file-000.parquet # All 5,016 frames (WITH task_index)
97
+ └── videos/
98
+ ├── observation.images.table_cam/
99
+ │ └── chunk-000/
100
+ │ ├── file-000.mp4 # Episode 0 (cleaningcloth)
101
+ │ ├── file-001.mp4 # Episode 1 (fillamentroll)
102
+ │ └── ... # 13 videos total
103
+ └── observation.images.wrist_cam/
104
+ └── chunk-000/
105
+ └── ... # 13 videos total
106
+ ```
107
+
108
+ ### Why LIBERO Multi-Task Approach?
109
+
110
+ **Chosen because:**
111
+ - ✅ 12 distinct tasks (multi-task dataset)
112
+ - ✅ Clean task management via tasks.parquet
113
+ - ✅ Explicit task conditioning with task_index
114
+ - ✅ Scalable for adding more tasks
115
+ - ✅ One video per episode (flexible loading)
116
+
117
+ ### Video Encoding Strategy
118
+
119
+ **Using LeRobot's built-in `encode_video_frames` function (recommended):**
120
+ ```python
121
+ from lerobot.datasets.video_utils import encode_video_frames
122
+ import shutil
123
+ from pathlib import Path
124
+
125
+ def prepare_and_encode_video(image_paths, output_path, fps=12, temp_dir=None):
126
+ """
127
+ Prepare images and encode to MP4 using LeRobot's encode_video_frames.
128
+
129
+ NOTE: encode_video_frames expects images named 'frame-XXXXXX.png' (dash, not underscore)
130
+ """
131
+ temp = Path(temp_dir) if temp_dir else Path(output_path).parent / "temp_frames"
132
+ temp.mkdir(parents=True, exist_ok=True)
133
+
134
+ # Copy images with correct naming (frame-XXXXXX.png)
135
+ for i, src_path in enumerate(image_paths):
136
+ dst = temp / f"frame-{i:06d}.png"
137
+ shutil.copy(src_path, dst)
138
+
139
+ # Encode using LeRobot's function
140
+ encode_video_frames(
141
+ imgs_dir=temp,
142
+ video_path=output_path,
143
+ fps=fps,
144
+ vcodec="libsvtav1", # AV1 codec (default)
145
+ pix_fmt="yuv420p",
146
+ crf=30, # Quality (lower = better, 0-51)
147
+ overwrite=True
148
+ )
149
+
150
+ # Cleanup temp directory
151
+ shutil.rmtree(temp)
152
+ ```
153
+
154
+ **✅ TESTED and VERIFIED:**
155
+ - 10 frames (800×720) encoded to 0.10 MB MP4 using libsvtav1
156
+ - Video properties: 800×720, AV1 codec (libdav1d decoder)
157
+ - Encoding parameters: YUV420, CRF 30, GOP 2
158
+
159
+ **Expected compression:**
160
+ - PNG: ~480 MB total (all episodes, both cameras)
161
+ - MP4 (libsvtav1): ~150-200 MB total (60-70% compression)
162
+ - Per episode: ~6-12 MB per camera, both cameras)
163
+ - MP4 (av1): ~150-200 MB total (60-70% compression)
164
+ - Per episode: ~6-12 MB per camera
165
+
166
+ ## Conversion Requirements
167
+
168
+ ### 1. Task Language Descriptions (CRITICAL for VLA)
169
+
170
+ **Current:** Task names only (`"screwdriver"`, `"scissors"`)
171
+ **Required:** Natural language instructions for VLA models (SmolVLA, Pi0, XVLA)
172
+
173
+ ```python
174
+ TASK_LANGUAGE_MAP = {
175
+ 0: "Pick up the cleaning cloth from the table.",
176
+ 1: "Grasp and pick up the filament roll.",
177
+ 2: "Pick up the game controller from the table.",
178
+ 3: "Pick up the hex wrench tool.",
179
+ 4: "Grasp and pick up the pencil.",
180
+ 5: "Pick up the scissors from the table.",
181
+ 6: "Find and pick up the scissors that are partially hidden.",
182
+ 7: "Pick up the screwdriver from the table.",
183
+ 8: "Grasp and pick up the small key.",
184
+ 9: "Pick up the small piece of paper.",
185
+ 10: "Pick up the small wooden stick.",
186
+ 11: "Pick up the thin metal disk.",
187
+ }
188
+ ```
189
+
190
+ **Episode-to-task mapping:**
191
+ ```python
192
+ EPISODE_TO_TASK = {
193
+ 'cleaningcloth_20251104_205021': 0,
194
+ 'fillamentroll_20251104_204834': 1,
195
+ 'gamecontroller_20251104_203816': 2,
196
+ 'hexwrench_20251104_204002': 3,
197
+ 'pencil_20251104_205415': 4,
198
+ 'scissors_20251104_204120': 5,
199
+ 'scissors_hidden_20251104_205751': 6,
200
+ 'screwdriver_20251104_203022': 7,
201
+ 'smallkey_20251104_203257': 8,
202
+ 'smallpaper_20251104_203636': 9,
203
+ 'smallwoodenstick_20251104_204353': 10,
204
+ 'thinmetaldisk_20251104_204557': 11,
205
+ 'thinmetaldisk_20251104_204721': 11, # Same task, different demo
206
+ }
207
+ ```
208
+
209
+ ### 2. Create tasks.parquet
210
+
211
+ Task descriptions as DataFrame INDEX (LIBERO style):
212
+ ```python
213
+ import pandas as pd
214
+
215
+ # Create tasks DataFrame
216
+ tasks_data = {'task_index': list(range(12))}
217
+ task_descriptions = list(TASK_LANGUAGE_MAP.values())
218
+ tasks_df = pd.DataFrame(tasks_data, index=task_descriptions)
219
+
220
+ # Save to parquet
221
+ tasks_df.to_parquet('meta/tasks.parquet')
222
+ ```
223
+
224
+ ### 3. Data Parquet Schema
225
+
226
+ Frame-level data with task_index:
227
+ ```python
228
+ {
229
+ 'observation.state': float32[7], # Joint angles
230
+ 'action': float32[7], # Commands
231
+ 'timestamp': float32, # Frame time
232
+ 'frame_index': int64, # Frame in episode
233
+ 'episode_index': int64, # Which episode
234
+ 'index': int64, # Global frame index
235
+ 'task_index': int64, # Maps to tasks.parquet ← CRITICAL!
236
+ 'next.done': bool # Last frame marker
237
+ }
238
+ ```
239
+
240
+ ### 4. Episode Metadata Schema
241
+
242
+ NO tasks field (LIBERO approach):
243
+ ```python
244
+ {
245
+ 'episode_index': int64,
246
+ 'length': int64, # Number of frames
247
+
248
+ # Data file mappings
249
+ 'data/chunk_index': 0,
250
+ 'data/file_index': 0,
251
+ 'dataset_from_index': int64,
252
+ 'dataset_to_index': int64,
253
+
254
+ # Video file mappings (per camera)
255
+ 'videos/observation.images.table_cam/chunk_index': 0,
256
+ 'videos/observation.images.table_cam/file_index': int64,
257
+ 'videos/observation.images.table_cam/from_timestamp': float,
258
+ 'videos/observation.images.table_cam/to_timestamp': float,
259
+
260
+ # Per-episode statistics
261
+ 'stats/action/min': float32[7],
262
+ 'stats/action/max': float32[7],
263
+ // ... other stats
264
+ }
265
+ ```
266
+
267
+ ### Step 1: Setup
268
+
269
+ ```python
270
+ import h5py
271
+ import json
272
+ import pandas as pd
273
+ import numpy as np
274
+ from pathlib import Path
275
+ from PIL import Image
276
+ import shutil
277
+ import tempfile
278
+
279
+ # CORRECT import paths for LeRobot v0.4.3
280
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
281
+ from lerobot.datasets.video_utils import encode_video_frames
282
+
283
+ # Dependencies (ALL INSTALLED ✅)
284
+ # - lerobot v0.4.3 (installed from local repo)
285
+ # - av 15.1.0 (PyAV for video encoding)
286
+ "observation.images.table_cam": {
287
+ "dtype": "video",
288
+ "shape": [720, 800, 3], # Height × Width × Channels
289
+ "names": ["height", "width", "channel"],
290
+ "video_info": {
291
+ "video.fps": 12.0, # ACTUAL FPS from timestamps (not 30!)
292
+ "video.codec": "libsvtav1", # CORRECT codec name
293
+ "video.pix_fmt": "yuv420p",
294
+ "video.is_depth_map": False,
295
+ "has_audio": False
296
+ }
297
+ },
298
+ "observation.images.wrist_cam": {
299
+ "dtype": "video",
300
+ "shape": [720, 1280, 3], # Height × Width × Channels ← CORRECTED!
301
+ "names": ["height", "width", "channel"],
302
+ "video_info": {
303
+ "video.fps": 12.0, # ACTUAL FPS from timestamps (not 30!)
304
+ "video.codec": "libsvtav1", # CORRECT codec name
305
+ "video.pix_fmt": "yuv420p",
306
+ "video.is_depth_map": False,
307
+ "has_audio": False
308
+ }
309
+ }, "video_info": {
310
+ "video.fps": 30.0,
311
+ "video.codec": "av1",
312
+ "video.pix_fmt": "yuv420p",
313
+ "video.is_depth_map": False,
314
+ "has_audio": False
315
+ }
316
+ },
317
+ "observation.images.wrist_cam": {
318
+ "dtype": "video",
319
+ "shape": [720, 1280, 3], # Height × Width × Channels ← CORRECTED!
320
+ "names": ["height", "width", "channel"],
321
+ "video_info": {
322
+ "video.fps": 30.0,
323
+ "video.codec": "av1",
324
+ "video.pix_fmt": "yuv420p",
325
+ "video.is_depth_map": False,
326
+ "has_audio": False
327
+ }
328
+ },
329
+ "observation.state": {
330
+ "dtype": "float32",
331
+ "shape": [7],
332
+ "names": {"motors": ["joint_1", "joint_2", "joint_3", "joint_4",
333
+ "joint_5", "joint_6", "joint_7"]},
334
+ "fps": 30.0
335
+ },
336
+ "action": {
337
+ "dtype": "float32",
338
+ "shape": [7],
339
+ "names": {"motors": ["joint_1", "joint_2", "joint_3", "joint_4",
340
+ "joint_5", "joint_6", "joint_7"]},
341
+ "fps": 30.0
342
+ },
343
+ "episode_index": {"dtype": "int64", "shape": [1], "names": None, "fps": 30.0},
344
+ "frame_index": {"dtype": "int64", "shape": [1], "names": None, "fps": 30.0},
345
+ "timestamp": {"dtype": "float32", "shape": [1], "names": None, "fps": 30.0},
346
+ "next.done": {"dtype": "bool", "shape": [1], "names": None, "fps": 30.0},
347
+ "index": {"dtype": "int64", "shape": [1], "names": None, "fps": 30.0},
348
+ "task_index": {"dtype": "int64", "shape": [1], "names": None, "fps": 30.0},
349
+ }
350
+ ```
351
+
352
+ ### Step 3: Create Tasks Parquet (LIBERO Style)
353
+
354
+ ```python
355
+ def create_tasks_parquet(output_dir):
356
+ """Create meta/tasks.parquet with task descriptions as index."""
357
+ task_descriptions = [
358
+ "Pick up the cleaning cloth from the table.",
359
+ "Grasp and pick up the filament roll.",
360
+ "Pick up the game controller from the table.",
361
+ "Pick up the hex wrench tool.",
362
+ "Grasp and pick up the pencil.",
363
+ "Pick up the scissors from the table.",
364
+ "Find and pick up the scissors that are partially hidden.",
365
+ "Pick up the screwdriver from the table.",
366
+ "Grasp and pick up the small key.",
367
+ "Pick up the small piece of paper.",
368
+ "Pick up the small wooden stick.",
369
+ "Pick up the thin metal disk.",
370
+ ]
371
+
372
+ tasks_data = {'task_index': list(range(12))}
373
+ tasks_df = pd.DataFrame(tasks_data, index=task_descriptions)
374
+
375
+ output_path = Path(output_dir) / 'meta' / 'tasks.parquet'
376
+ output_path.parent.mkdir(parents=True, exist_ok=True)
377
+ tasks_df.to_parquet(output_path)
378
+ print(f"Created {output_path}")
379
+ ```
380
+
381
+ ### Step 4: Main Conversion Function
382
+
383
+ ```python
384
+ def convert_piper_to_lerobot_v3(
385
+ source_path: Path,
386
+ output_path: Path,
387
+ repo_id: str = "your_username/piper_picking_tests"
388
+ ):
389
+ """Convert PiPER dataset to LeRobot v3.0 format."""
390
+
391
+ # Episode to task mapping (from verified data)
392
+ EPISODE_TO_TASK = {
393
+ 'cleaningcloth_20251104_205021': 0,
394
+ 'fillamentroll_20251104_204834': 1,
395
+ 'gamecontroller_20251104_203816': 2,
396
+ 'hexwrench_20251104_204002': 3,
397
+ 'pencil_20251104_205415': 4,
398
+ 'scissors_20251104_204120': 5,
399
+ 'scissors_hidden_20251104_205751': 6,
400
+ 'screwdriver_20251104_203022': 7,
401
+ 'smallkey_20251104_203257': 8,
402
+ # Create dataset
403
+ dataset = LeRobotDataset.create(
404
+ repo_id=repo_id,
405
+ fps=12, # ACTUAL FPS from timestamp analysis
406
+ features=PIPER_FEATURES,
407
+ root=output_path,
408
+ robot_type="piper",
409
+ use_videos=True,
410
+ )
411
+ # Create dataset
412
+ dataset = LeRobotDataset.create(
413
+ repo_id=repo_id,
414
+ fps=30,
415
+ features=PIPER_FEATURES,
416
+ root=output_path,
417
+ robot_type="piper",
418
+ use_videos=True,
419
+ )
420
+
421
+ # Process each episode
422
+ for ep_idx, ep_name in enumerate(episodes):
423
+ print(f"\nProcessing episode {ep_idx}: {ep_name}")
424
+ task_idx = EPISODE_TO_TASK[ep_name]
425
+
426
+ # Load HDF5 data
427
+ hdf5_path = source_path / f"{ep_name}.hdf5"
428
+ with h5py.File(hdf5_path, 'r') as f:
429
+ # Load arrays from HDF5
430
+ states = f['observation/state'][:]
431
+ actions = f['action'][:]
432
+ timestamps = f['timestamp'][:]
433
+ n_frames = len(timestamps)
434
+
435
+ # Get image paths from HDF5
436
+ table_paths = [p.decode('utf-8') for p in f['observation/images/table_cam'][:]]
437
+ wrist_paths = [p.decode('utf-8') for p in f['observation/images/wrist_cam'][:]]
438
+
439
+ print(f" Frames: {n_frames}, Task: {task_idx}")
440
+
441
+ # Add frames
442
+ for frame_idx in range(n_frames):
443
+ # Load images from paths stored in HDF5
444
+ # NOTE: Paths use task name (e.g., cleaningcloth_images), not episode name
445
+ table_img_path = source_path / table_paths[frame_idx]
446
+ wrist_img_path = source_path / wrist_paths[frame_idx]
447
+
448
+ # Verify files exist
449
+ if not table_img_path.exists():
450
+ raise FileNotFoundError(f"Missing table image: {table_img_path}")
451
+ if not wrist_img_path.exists():
452
+ raise FileNotFoundError(f"Missing wrist image: {wrist_img_path}")
453
+
454
+ table_img = Image.open(table_img_path)
455
+ wrist_img = Image.open(wrist_img_path)
456
+
457
+ frame = {
458
+ "observation.state": states[frame_idx],
459
+ "action": actions[frame_idx],
460
+ "observation.images.table_cam": np.array(table_img),
461
+ "observation.images.wrist_cam": np.array(wrist_img),
462
+ "timestamp": timestamps[frame_idx],
463
+ "next.done": frame_idx == n_frames - 1,
464
+ "task_index": task_idx, # LIBERO approach
465
+ }
466
+
467
+ dataset.add_frame(frame)
468
+
469
+ # Save episode (NO task parameter - we use task_index in frames)
470
+ dataset.save_episode()
471
+ print(f" ✓ Saved {n_frames} frames")
472
+
473
+ # Create tasks.parquet
474
+ create_tasks_parquet(output_path)
475
+
476
+ # Finalize dataset
477
+ print("\nFinalizing dataset...")
478
+ dataset.finalize()
479
+ print("Conversion complete!")
480
+
481
+ return dataset
482
+
483
+ # Usage
484
+ if __name__ == "__main__":
485
+ source = Path("/home/charith/projects/PiPER/piper_picking_tests")
486
+ output = Path("/home/charith/projects/PiPER/piper_picking_tests_v3")
487
+
488
+ dataset = convert_piper_to_lerobot_v3(source, output)
489
+ ```
490
+
491
+ ### Step 5: Validation
492
+
493
+ ```python
494
+ def validate_dataset(dataset_path):
495
+ """Validate converted dataset."""
496
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
497
+
498
+ # Load dataset
499
+ dataset = LeRobotDataset(str(dataset_path))
500
+
501
+ print(f"Total episodes: {dataset.num_episodes}")
502
+ print(f"Total frames: {dataset.num_frames}")
503
+ print(f"Total tasks: {len(dataset.meta.tasks) if hasattr(dataset.meta, 'tasks') else 'N/A'}")
504
+
505
+ # Check tasks.parquet
506
+ tasks_path = dataset_path / 'meta' / 'tasks.parquet'
507
+ if tasks_path.exists():
508
+ tasks_df = pd.read_parquet(tasks_path)
509
+ print(f"\nTasks parquet: {len(tasks_df)} tasks")
510
+ print(tasks_df.head())
511
+
512
+ # Load sample episode
513
+ sample = dataset[0]
514
+ print(f"\nSample frame keys: {sample.keys()}")
515
+ print(f"Task index: {sample.get('task_index', 'NOT FOUND')}")
516
+
517
+ # Check video playback
518
+ print(f"\nVideo shapes:")
519
+ for key in sample.keys():
520
+ if 'image' in key:
521
+ print(f" {key}: {sample[key].shape}")
522
+
523
+ return dataset
524
+ ```
525
+
526
+ ## Testing Plan
527
+
528
+ ### Phase 1: Single Episode Test (30 min)
529
+ ```bash
530
+ # Test on screwdriver episode only
531
+ python convert_script.py --episode screwdriver_20251104_203022
532
+ ```
533
+
534
+ **Validate:**
535
+ - [ ] HDF5 data loads correctly (observation/state, action, timestamp)
536
+ - [ ] Images load and convert to video
537
+ - [ ] task_index assigned correctly
538
+ - [ ] Episode metadata has file mappings
539
+ - [ ] Can load with LeRobotDataset
540
+
541
+ ### Phase 2: Full Conversion (1-2 hours)
542
+ ```bash
543
+ # Convert all 13 episodes
544
+ python convert_script.py --all
545
+ ```
546
+
547
+ **Validate:**
548
+ - [ ] All 13 episodes present
549
+ - [ ] 5,016 total frames
550
+ - [ ] tasks.parquet has 12 tasks
551
+ - [ ] Video quality acceptable
552
+ - [ ] File sizes reasonable (~210 MB total)
553
+
554
+ ### Phase 3: VLA Compatibility Test
555
+ ```python
556
+ # Test with VLA model loading
557
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
558
+
559
+ dataset = LeRobotDataset("path/to/piper_picking_tests_v3")
560
+
561
+ # Check task conditioning
562
+ sample = dataset[0]
563
+ assert 'task_index' in sample
564
+ print(f"Task: {dataset.meta.tasks.iloc[sample['task_index']].name}")
565
+
566
+ # Try loading with VLA model
567
+ # from lerobot.common.policies.vla import SmolVLA
568
+ # model = SmolVLA(...)
569
+ # model.select_action(sample)
570
+ ```
571
+
572
+ ## Expected Outcomes
573
+
574
+ ### File Structure
575
+ ```
576
+ piper_picking_tests_v3/ (~150-200 MB total)
577
+ ├── meta/
578
+ │ ├── info.json (~10 KB)
579
+ │ ├── stats.json (~2 KB)
580
+ │ ├── tasks.parquet (~5 KB)
581
+ │ └── episodes/
582
+ │ └── chunk-000/
583
+ │ └── file-000.parquet (~50 KB)
584
+ ├── data/
585
+ │ └── chunk-000/
586
+ │ └── file-000.parquet (~400 KB)
587
+ └── videos/
588
+ ├── observation.images.table_cam/
589
+ │ └── chunk-000/
590
+ │ └── file-000.mp4 to file-012.mp4 (~75 MB total)
591
+ └── observation.images.wrist_cam/
592
+ └── chunk-000/
593
+ └── file-000.mp4 to file-012.mp4 (~120 MB total)
594
+ ```
595
+
596
+ ### Statistics
597
+ ### Dependencies Installed ✅
598
+ - ✅ **LeRobot v0.4.3** - Installed in editable mode from `/home/charith/projects/PiPER/lerobot`
599
+ - ✅ **PyAV 15.1.0** - Video encoding/decoding (downgraded from 16.0.1 for compatibility)
600
+ - ✅ **PyTorch 2.7.1** - Deep learning framework with CUDA 12.6
601
+ - ✅ **torchvision 0.22.1** - Image/video processing
602
+ ## Next Steps
603
+
604
+ 1. ✅ **Knowledge documented** in `lerobotv3_format_explanation.md`
605
+ 2. ✅ **Conversion plan created** (this file)
606
+ 3. ✅ **Dependencies installed** - LeRobot v0.4.3, PyAV 15.1.0, PyTorch 2.7.1
607
+ 4. ✅ **Strategy validated** - End-to-end pipeline tested with 10 frames
608
+ 5. ✅ **Video encoding verified** - libsvtav1 codec produces correct output
609
+ 6. ⏭️ **Implement full conversion script**
610
+ 7. ⏭️ **Test on single episode** (cleaningcloth or pencil)
611
+ 8. ⏭️ **Debug and refine**
612
+ 9. ⏭️ **Run full conversion** (all 13 episodes)
613
+ 10. ⏭️ **Validate with VLA models**
614
+ 11. ⏭️ **(Optional) Push to Hugging Face Hub**)
615
+ - **Metadata**: <100 KB total
616
+
617
+ ### Dependencies Installed
618
+ - ✅ PyAV (av) - Video encoding/decoding
619
+ - ✅ OpenCV (cv2) - Already available
620
+ - ✅ h5py, pillow, pandas, pyarrow - Already installed
621
+
622
+ ## Next Steps
623
+
624
+ 1. ✅ **Knowledge documented** in `lerobotv3_format_explanation.md`
625
+ 2. ✅ **Conversion plan created** (this file)
626
+ 3. ⏭️ **Implement conversion script**
627
+ 4. ⏭️ **Test on single episode**
628
+ 5. ⏭️ **Debug and refine**
629
+ 6. ⏭️ **Run full conversion**
630
+ 7. ⏭️ **Validate with VLA models**
631
+ 8. ⏭️ **(Optional) Push to Hugging Face Hub**
632
+ ### Critical for Success
633
+ - ✅ Use LIBERO approach (tasks.parquet + task_index in frames)
634
+ - ✅ Natural language task descriptions (not just labels!)
635
+ - ✅ Correct HDF5 paths: `observation/state` (singular)
636
+ - ✅ One video per episode (13 files per camera)
637
+ - ✅ task_index in every frame
638
+ - ✅ NO tasks field in episode metadata
639
+ - ✅ **Correct import paths:** `lerobot.datasets.*` (NOT `lerobot.common.datasets.*`)
640
+ - ✅ **Actual FPS:** 11-12 FPS (calculate from timestamps, don't assume 30)
641
+ - ✅ **Image folder naming:** Uses task name only, not full episode name
642
+ - ✅ **Image renaming:** Copy `frame_XXXXXX.png` → `frame-XXXXXX.png` for encode_video_frames
643
+ - ✅ **Codec name:** `libsvtav1` (not just `av1`)tate` (singular)
644
+ - ✅ One video per episode (13 files per camera)
645
+ - ✅ task_index in every frame
646
+ - ✅ NO tasks field in episode metadata
647
+
648
+ ### Common Mistakes to Avoid
649
+ - ❌ Using `observations/state` instead of `observation/state`
650
+ - ❌ Using task names instead of language descriptions
651
+ - ❌ Adding tasks field to episodes (SVLA style, not needed for LIBERO)
652
+ - ❌ Forgetting task_index in frames
653
+ - ❌ Consolidating all videos into one file (use one per episode for multi-task)
654
+ **Last Updated:** December 10, 2025
655
+
656
+ ---
657
+
658
+ ## Testing Summary (December 10, 2025)
659
+
660
+ ### ✅ Pipeline Validation Complete
661
+
662
+ **Test Episode:** cleaningcloth_20251104_205021 (168 frames)
663
+
664
+ **Results:**
665
+ 1. ✅ **All imports successful** - LeRobotDataset, encode_video_frames, PyAV, h5py, PIL
666
+ 2. ✅ **HDF5 data loading** - States (168,7), Actions (168,7), Timestamps (168) all loaded correctly
667
+ 3. ✅ **Image path resolution** - Successfully read paths from HDF5 and loaded PNG files
668
+ 4. ✅ **Video encoding** - 10 frames encoded to 0.10 MB MP4 using libsvtav1 codec
669
+ 5. ✅ **Video verification** - Output is 800×720, AV1 codec (libdav1d), playable
670
+ 6. ✅ **FPS calculation** - Actual FPS is 11.53 (NOT 30 as initially assumed!)
671
+
672
+ **Key Findings:**
673
+ - Calculated FPS from timestamps: **11.53 FPS** (episode duration 14.6s for 168 frames)
674
+ - Video codec: libsvtav1 (SVT-AV1 Encoder v3.0.0)
675
+ - Encoding parameters: Preset M10, CRF 30, YUV420, 800×720
676
+ - Compression: 10 frames = 0.10 MB (excellent compression ratio)
677
+ - Image paths in HDF5 use task name: `cleaningcloth_images/...` (not full episode name)
678
+
679
+ **Next Action:** Create full conversion script and test with complete episode
680
+ ## References
681
+
682
+ - **Complete format knowledge**: `lerobotv3_format_explanation.md`
683
+ - [LeRobot v3.0 Documentation](https://huggingface.co/docs/lerobot/lerobot-dataset-v3)
684
+ - [Porting Datasets Guide](https://huggingface.co/docs/lerobot/porting_datasets_v3)
685
+ - [DROID Example](https://github.com/huggingface/lerobot/blob/main/examples/port_datasets/port_droid.py)
686
+
687
+ **Last Updated:** December 2025
688
+
helper_scripts/lerobotv3_format_explanation.md ADDED
@@ -0,0 +1,762 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LeRobot v3.0 Format Complete Knowledge Base
2
+
3
+ ## Table of Contents
4
+ 1. [Overview](#overview)
5
+ 2. [Critical Requirements](#critical-requirements)
6
+ 3. [Dataset Structure](#dataset-structure)
7
+ 4. [Two Valid Task Approaches](#two-valid-task-approaches)
8
+ 5. [Chunking Strategy](#chunking-strategy)
9
+ 6. [Episode-to-File Mapping](#episode-to-file-mapping)
10
+ 7. [Configuration Parameters](#configuration-parameters)
11
+ 8. [Best Practices](#best-practices)
12
+ 9. [v2.1 vs v3.0 Changes](#v21-vs-v30-changes)
13
+ 10. [Our Dataset Strategy](#our-dataset-strategy)
14
+
15
+ ---
16
+
17
+ ## Overview
18
+
19
+ LeRobot v3.0 introduces a **consolidated file format** for improved scalability, efficiency, and standardization. Instead of one file per episode (v2.1), v3.0 groups multiple episodes into larger Parquet data files and MP4 video files.
20
+
21
+ **Key Benefits:**
22
+ - **Scalability**: Handle massive datasets (1M+ episodes)
23
+ - **Efficiency**: Fewer, larger files = faster I/O
24
+ - **Standardization**: Parquet for all metadata
25
+ - **Flexibility**: Configurable chunking strategies
26
+
27
+ **Source:** Verified against Phospho.ai documentation and real datasets (SVLA, LIBERO)
28
+
29
+ ---
30
+
31
+ ## Critical Requirements
32
+
33
+ ### 1. Language Instructions are MANDATORY for VLA
34
+
35
+ **All Vision-Language-Action (VLA) models require natural language task descriptions:**
36
+ - **SmolVLA**: Processes language through tokenizer, conditions actions on language embeddings
37
+ - **Pi0/Pi0.5**: Uses Paligemma (vision-language model), requires task descriptions
38
+ - **XVLA**: Uses Florence-2 backbone with language tokenizer
39
+
40
+ **Quality Requirements:**
41
+ - ✅ **Full sentences**: "Pick up the red cube from the left side"
42
+ - ❌ **Not labels**: "red_cube" or "pickup"
43
+ - ✅ **Specific & descriptive**: Include object details, spatial context
44
+ - ✅ **Natural language**: How a human would describe the task
45
+ - ✅ **Goal-oriented**: Describe what to achieve, not just object names
46
+
47
+ **Example (from LIBERO dataset):**
48
+ ```
49
+ "put the white mug on the left plate and put the yellow and white mug on the right plate"
50
+ "put the yellow and white mug in the microwave and close it"
51
+ "turn on the stove and put the moka pot on it"
52
+ ```
53
+
54
+ ### 2. Proper HDF5 Paths (if converting from HDF5)
55
+
56
+ **Correct structure:**
57
+ - `observation/state` (singular, not `observations/state`)
58
+ - `action` (singular, not `actions`)
59
+ - `timestamp` (singular, not `timestamps`)
60
+
61
+ ---
62
+
63
+ ## Dataset Structure
64
+
65
+ ### Complete v3.0 Directory Layout
66
+
67
+ ```
68
+ dataset_name/
69
+ ├── meta/
70
+ │ ├── info.json # Dataset configuration
71
+ │ ├── stats.json # Aggregated statistics
72
+ │ ├── tasks.parquet # Task definitions (optional, see approaches below)
73
+ │ └── episodes/
74
+ │ ├── chunk-000/
75
+ │ │ ├── file-000.parquet # Episodes 0-999 metadata
76
+ │ │ └── file-001.parquet # Episodes 1000-1999 metadata (if >1000 episodes)
77
+ │ └── chunk-001/ # If >1000 episode metadata files
78
+ │ └── file-000.parquet
79
+ ├── data/
80
+ │ ├── chunk-000/
81
+ │ │ ├── file-000.parquet # Frame data for episodes 0-14
82
+ │ │ ├── file-001.parquet # Frame data for episodes 15-29
83
+ │ │ └── ... # More files based on data_files_size_in_mb
84
+ │ └── chunk-001/ # If >1000 data files
85
+ │ └── file-000.parquet
86
+ └── videos/
87
+ ├── observation.images.camera1/
88
+ │ ├── chunk-000/
89
+ │ │ ├── file-000.mp4 # Video(s) for episodes 0-N
90
+ │ │ ├── file-001.mp4
91
+ │ │ └── ... # Number depends on strategy
92
+ │ └── chunk-001/ # If >1000 video files
93
+ │ └── file-000.mp4
94
+ └── observation.images.camera2/
95
+ └── chunk-000/
96
+ └── ...
97
+ ```
98
+
99
+ ### File Schemas
100
+
101
+ #### meta/info.json
102
+ ```json
103
+ {
104
+ "codebase_version": "v3.0",
105
+ "robot_type": "piper",
106
+ "total_episodes": 13,
107
+ "total_frames": 5016,
108
+ "total_tasks": 12,
109
+ "total_videos": 26,
110
+ "total_chunks": 1,
111
+ "chunks_size": 1000,
112
+ "data_files_size_in_mb": 50,
113
+ "video_files_size_in_mb": 200,
114
+ "fps": 30,
115
+ "features": {
116
+ "observation.state": {
117
+ "dtype": "float32",
118
+ "shape": [7],
119
+ "names": {"motors": ["joint_1", ..., "joint_7"]},
120
+ "fps": 30.0
121
+ },
122
+ "action": {
123
+ "dtype": "float32",
124
+ "shape": [7],
125
+ "names": {"motors": ["joint_1", ..., "joint_7"]},
126
+ "fps": 30.0
127
+ },
128
+ "observation.images.table_cam": {
129
+ "dtype": "video",
130
+ "shape": [720, 800, 3],
131
+ "video_info": {
132
+ "video.fps": 30.0,
133
+ "video.codec": "av1",
134
+ "video.pix_fmt": "yuv420p"
135
+ }
136
+ }
137
+ // ... other features
138
+ }
139
+ }
140
+ ```
141
+
142
+ #### data/chunk-000/file-000.parquet
143
+ Frame-level data:
144
+ ```python
145
+ {
146
+ 'observation.state': [7], # Robot state
147
+ 'action': [7], # Robot action
148
+ 'timestamp': float, # Frame timestamp
149
+ 'frame_index': int, # Frame within episode
150
+ 'episode_index': int, # Which episode
151
+ 'index': int, # Global frame index
152
+ 'task_index': int, # Maps to tasks.parquet (LIBERO approach)
153
+ 'next.done': bool # True on last frame of episode
154
+ }
155
+ ```
156
+
157
+ #### meta/episodes/chunk-000/file-000.parquet
158
+ Episode-level metadata:
159
+ ```python
160
+ {
161
+ 'episode_index': 0,
162
+ 'length': 324, # Number of frames
163
+
164
+ # Data file location
165
+ 'data/chunk_index': 0, # Which data chunk
166
+ 'data/file_index': 0, # Which file in that chunk
167
+ 'dataset_from_index': 0, # Start frame in that file
168
+ 'dataset_to_index': 324, # End frame in that file
169
+
170
+ # Video file locations (per camera)
171
+ 'videos/observation.images.table_cam/chunk_index': 0,
172
+ 'videos/observation.images.table_cam/file_index': 0,
173
+ 'videos/observation.images.table_cam/from_timestamp': 0.0,
174
+ 'videos/observation.images.table_cam/to_timestamp': 10.8,
175
+
176
+ 'videos/observation.images.wrist_cam/chunk_index': 0,
177
+ 'videos/observation.images.wrist_cam/file_index': 0,
178
+ 'videos/observation.images.wrist_cam/from_timestamp': 0.0,
179
+ 'videos/observation.images.wrist_cam/to_timestamp': 10.8,
180
+
181
+ # Per-episode statistics
182
+ 'stats/action/min': [7],
183
+ 'stats/action/max': [7],
184
+ 'stats/action/mean': [7],
185
+ 'stats/action/std': [7],
186
+ 'stats/observation.state/min': [7],
187
+ 'stats/observation.state/max': [7],
188
+ // ... other stats
189
+
190
+ # Task information (optional, depends on approach)
191
+ 'tasks': ["Pick up the screwdriver from the table."], # SVLA approach
192
+ 'task_index': 7 # LIBERO approach
193
+ }
194
+ ```
195
+
196
+ #### meta/tasks.parquet (LIBERO approach)
197
+ ```python
198
+ # Task descriptions are the INDEX of the DataFrame!
199
+ index (task description) | task_index
200
+ ------------------------------------------------------|------------
201
+ "put the white mug on the left plate and..." | 0
202
+ "put the white mug on the plate and put..." | 1
203
+ "pick up the screwdriver from the table." | 7
204
+ ```
205
+
206
+ #### meta/stats.json
207
+ Aggregated statistics for normalization:
208
+ ```json
209
+ {
210
+ "observation.state": {
211
+ "mean": [7 values],
212
+ "std": [7 values],
213
+ "min": [7 values],
214
+ "max": [7 values]
215
+ },
216
+ "action": {
217
+ "mean": [7 values],
218
+ "std": [7 values],
219
+ "min": [7 values],
220
+ "max": [7 values]
221
+ }
222
+ }
223
+ ```
224
+
225
+ ---
226
+
227
+ ## Two Valid Task Approaches
228
+
229
+ ### Approach 1: Tasks in Episode Metadata (SVLA Style)
230
+
231
+ **Use when:**
232
+ - Single task repeated across all episodes
233
+ - Per-episode task variations needed
234
+ - Episodes can have multiple task descriptions
235
+
236
+ **Structure:**
237
+ ```python
238
+ # meta/episodes/chunk-000/file-000.parquet
239
+ {
240
+ 'episode_index': 0,
241
+ 'tasks': ['Put the red cube on top of the blue cube.'], # List of strings
242
+ 'length': 447,
243
+ // ... other metadata
244
+ }
245
+ ```
246
+
247
+ **Example Dataset:** svla_so100_stacking
248
+ - 56 episodes, 1 task
249
+ - All episodes do same task
250
+ - Task stored in each episode's metadata
251
+
252
+ **Pros:**
253
+ - Simple for single-task datasets
254
+ - Allows per-episode task variations
255
+ - Tasks directly in episode metadata
256
+
257
+ **Cons:**
258
+ - Redundant for multi-task datasets
259
+ - Harder to manage many distinct tasks
260
+
261
+ ### Approach 2: Separate tasks.parquet (LIBERO Style) ✅
262
+
263
+ **Use when:**
264
+ - Multiple distinct tasks in dataset
265
+ - Each episode demonstrates one task
266
+ - Want centralized task management
267
+
268
+ **Structure:**
269
+ ```python
270
+ # meta/tasks.parquet (task descriptions are INDEX!)
271
+ index="Pick up the screwdriver..." | task_index=7
272
+
273
+ # data/chunk-000/file-000.parquet (each frame)
274
+ {
275
+ 'observation.state': [...],
276
+ 'action': [...],
277
+ 'task_index': 7, # Maps to tasks.parquet!
278
+ 'episode_index': 0,
279
+ // ... other frame data
280
+ }
281
+
282
+ # meta/episodes/chunk-000/file-000.parquet
283
+ {
284
+ 'episode_index': 0,
285
+ 'length': 214,
286
+ # NO tasks field!
287
+ // ... video metadata, data indices
288
+ }
289
+ ```
290
+
291
+ **Example Dataset:** LIBERO
292
+ - 1,693 episodes, 40 tasks
293
+ - Each episode has one task type
294
+ - tasks.parquet centralizes task definitions
295
+
296
+ **Pros:**
297
+ - ✅ Clean task definition management
298
+ - ✅ Define each task once
299
+ - ✅ Easy to add new tasks
300
+ - ✅ Better for multi-task datasets
301
+ - ✅ Explicit task conditioning via task_index
302
+
303
+ **Cons:**
304
+ - Extra file to manage
305
+ - Slightly more complex lookup
306
+
307
+ **Both approaches are valid v3.0 and work with VLA models!**
308
+
309
+ ---
310
+
311
+ ## Chunking Strategy
312
+
313
+ ### Three Types of Chunking
314
+
315
+ #### 1. Directory Chunks (`chunks_size`)
316
+ **Purpose:** Limit number of FILES per directory for filesystem performance
317
+
318
+ **Configuration:**
319
+ ```json
320
+ {
321
+ "chunks_size": 1000 // Max 1000 files per chunk-XXX directory
322
+ }
323
+ ```
324
+
325
+ **When it splits:**
326
+ - ✅ More than 1000 files → creates chunk-001, chunk-002, etc.
327
+ - ❌ Fewer than 1000 files → everything stays in chunk-000
328
+
329
+ **Example (LIBERO videos):**
330
+ ```
331
+ 1,693 video files (one per episode):
332
+ - chunk-000/: file-000.mp4 through file-999.mp4 (1000 files)
333
+ - chunk-001/: file-000.mp4 through file-692.mp4 (693 files)
334
+ ```
335
+
336
+ **Why:** File system performance degrades with 1000+ files in one directory
337
+
338
+ #### 2. Data File Chunks (`data_files_size_in_mb`)
339
+ **Purpose:** Target parquet file size for efficient I/O
340
+
341
+ **Configuration:**
342
+ ```json
343
+ {
344
+ "data_files_size_in_mb": 100 // Target ~100MB per data parquet file
345
+ }
346
+ ```
347
+
348
+ **How it works:**
349
+ - Multiple episodes consolidated into each parquet file
350
+ - System creates new file when size target reached
351
+ - Episode metadata tracks which file contains each episode
352
+
353
+ **Examples:**
354
+ - **LIBERO**: 273,465 frames → 377 files (~730 frames per file, ~100MB each)
355
+ - **SVLA**: 22,956 frames → 1 file (~20MB total, under target)
356
+ - **PIPER**: 5,016 frames → 1 file (~0.4MB total, well under target)
357
+
358
+ **Key Insight:** Data files often contain MULTIPLE episodes!
359
+
360
+ #### 3. Video File Strategy (`video_files_size_in_mb`)
361
+ **Purpose:** Target video file size for streaming/download
362
+
363
+ **Configuration:**
364
+ ```json
365
+ {
366
+ "video_files_size_in_mb": 500 // Target ~500MB per video file
367
+ }
368
+ ```
369
+
370
+ **Two valid strategies:**
371
+
372
+ **Strategy A: One video per episode (LIBERO)**
373
+ - Each episode = separate MP4 file
374
+ - Good for: Different episode lengths, multi-task, selective loading
375
+ - Example: LIBERO has 1,693 MP4s per camera (one per episode)
376
+
377
+ **Strategy B: Multiple episodes per video (SVLA)**
378
+ - Episodes concatenated into fewer large videos
379
+ - Good for: Uniform episodes, sequential access, single-task
380
+ - Example: SVLA has 1 MP4 per camera (all 56 episodes concatenated)
381
+
382
+ **Both are valid v3.0!** Choice depends on dataset characteristics.
383
+
384
+ ### Comparison: Data vs Video Chunking
385
+
386
+ | Aspect | Data Files | Video Files |
387
+ |--------|-----------|-------------|
388
+ | **Consolidation** | Always multiple episodes per file | Depends on strategy |
389
+ | **File count** | Few (377 in LIBERO) | Can be many (1,693 in LIBERO) or few (1 in SVLA) |
390
+ | **When splits** | Based on data size | Based on strategy choice |
391
+ | **Directory chunks** | Rare (377 < 1000) | Common if one-per-episode (1693 > 1000) |
392
+ | **Episode mapping** | Via from/to indices | Via timestamps or per-file |
393
+
394
+ ### When to Split into Chunks?
395
+
396
+ | Scenario | Data Chunks | Video Chunks | Directory Chunks |
397
+ |----------|-------------|--------------|------------------|
398
+ | **Small dataset (<100 episodes)** | 1 file in chunk-000 | 1 file or 1-per-episode in chunk-000 | All in chunk-000 |
399
+ | **Medium dataset (100-1000 episodes)** | Multiple files in chunk-000 | Multiple files in chunk-000 | All in chunk-000 |
400
+ | **Large dataset (1000-10000 episodes)** | Many files, maybe 2 chunks | Many files, likely 2-10 chunks | Multiple chunk-XXX dirs |
401
+ | **Massive dataset (>10000 episodes)** | Many files, many chunks | Many files, many chunks | Many chunk-XXX dirs |
402
+
403
+ ---
404
+
405
+ ## Episode-to-File Mapping
406
+
407
+ ### How the System Tracks Episodes
408
+
409
+ Episode metadata includes ALL location information:
410
+
411
+ ```python
412
+ {
413
+ 'episode_index': 1234,
414
+
415
+ # Where is the data?
416
+ 'data/chunk_index': 0, # data/chunk-000/
417
+ 'data/file_index': 82, # file-082.parquet
418
+ 'dataset_from_index': 123450, # Starts at global frame 123450
419
+ 'dataset_to_index': 123774, # Ends at global frame 123774 (324 frames)
420
+
421
+ # Where are the videos?
422
+ 'videos/observation.images.camera1/chunk_index': 1, # videos/.../chunk-001/
423
+ 'videos/observation.images.camera1/file_index': 234, # file-234.mp4
424
+ 'videos/observation.images.camera1/from_timestamp': 0.0,
425
+ 'videos/observation.images.camera1/to_timestamp': 10.8, # 324 frames @ 30fps
426
+ }
427
+ ```
428
+
429
+ ### Lookup Process
430
+
431
+ **To load episode 1234:**
432
+ 1. Read `meta/episodes/chunk-000/file-XXX.parquet`
433
+ 2. Find row where `episode_index == 1234`
434
+ 3. **For data:** Go to `data/chunk-000/file-082.parquet`, read rows 123450-123774
435
+ 4. **For video:** Go to `videos/.../chunk-001/file-234.mp4`, decode timestamps 0.0-10.8
436
+
437
+ ### Patterns
438
+
439
+ **LIBERO (one video per episode):**
440
+ ```python
441
+ video_file_index == episode_index # Direct mapping!
442
+ from_timestamp == 0.0 # Each video starts at 0
443
+ ```
444
+
445
+ **SVLA (all episodes in one video):**
446
+ ```python
447
+ video_file_index == 0 # All episodes in file-000.mp4
448
+ from_timestamp varies # Use timestamps to locate episode
449
+ ```
450
+
451
+ ---
452
+
453
+ ## Configuration Parameters
454
+
455
+ ### Complete info.json Configuration
456
+
457
+ ```json
458
+ {
459
+ "codebase_version": "v3.0",
460
+ "robot_type": "your_robot",
461
+ "total_episodes": 13,
462
+ "total_frames": 5016,
463
+ "total_tasks": 12,
464
+ "total_videos": 26,
465
+ "total_chunks": 1,
466
+
467
+ "chunks_size": 1000, // Max FILES per directory chunk
468
+ "data_files_size_in_mb": 100, // Target size for EACH data parquet
469
+ "video_files_size_in_mb": 500, // Target size for EACH video MP4
470
+
471
+ "fps": 30,
472
+ "features": { /* ... */ }
473
+ }
474
+ ```
475
+
476
+ ### What Each Parameter Controls
477
+
478
+ | Parameter | Controls | Example | Impact |
479
+ |-----------|----------|---------|--------|
480
+ | `chunks_size` | Max files per chunk-XXX dir | 1000 | 1693 files → 2 chunks |
481
+ | `data_files_size_in_mb` | Target data file size | 100 | Creates new file when reached |
482
+ | `video_files_size_in_mb` | Target video file size | 500 | Strategy-dependent |
483
+ | `fps` | Frame rate | 30 | Affects timestamp calculations |
484
+
485
+ **Common mistake:** Thinking `chunks_size` controls episodes per chunk
486
+ **Reality:** It controls FILES per directory (for filesystem performance)
487
+
488
+ ---
489
+
490
+ ## Best Practices
491
+
492
+ ### Language Instructions
493
+
494
+ 1. **Be specific**: "Pick up the red cube from the left side" > "pick cube"
495
+ 2. **Include spatial context**: "Put the mug on the left plate"
496
+ 3. **Describe the goal**: Not just object names
497
+ 4. **Use natural language**: How a human would explain it
498
+ 5. **Consistency**: Similar phrasing for similar tasks
499
+ 6. **Variation**: Multiple phrasings help generalization
500
+
501
+ ### Dataset Quality for VLA Fine-Tuning
502
+
503
+ From SmolVLA/Pi0 documentation:
504
+
505
+ 1. **50+ episodes recommended** for good performance
506
+ 2. **Language diversity**: Vary descriptions, include context
507
+ 3. **Visual variations**: Multiple object positions, angles
508
+ 4. **Multiple camera views**: Helps spatial understanding
509
+ 5. **Consistent frame rate**: 30 FPS standard
510
+ 6. **Quality over quantity**: Good demos > many poor demos
511
+
512
+ ### Chunking Recommendations
513
+
514
+ **Small datasets (<100 episodes):**
515
+ ```json
516
+ {
517
+ "chunks_size": 1000,
518
+ "data_files_size_in_mb": 50,
519
+ "video_files_size_in_mb": 200
520
+ }
521
+ ```
522
+ - Use LIBERO approach (one video per episode)
523
+ - All in chunk-000
524
+ - 1 data file sufficient
525
+
526
+ **Medium datasets (100-1000 episodes):**
527
+ ```json
528
+ {
529
+ "chunks_size": 1000,
530
+ "data_files_size_in_mb": 100,
531
+ "video_files_size_in_mb": 500
532
+ }
533
+ ```
534
+ - Use LIBERO approach if multi-task
535
+ - May need multiple data files
536
+ - All in chunk-000
537
+
538
+ **Large datasets (>1000 episodes):**
539
+ ```json
540
+ {
541
+ "chunks_size": 1000,
542
+ "data_files_size_in_mb": 100,
543
+ "video_files_size_in_mb": 500
544
+ }
545
+ ```
546
+ - Will need multiple chunks
547
+ - Many data files
548
+ - Consider SVLA consolidation if single-task
549
+
550
+ ---
551
+
552
+ ## v2.1 vs v3.0 Changes
553
+
554
+ ### File Organization
555
+
556
+ **v2.1:**
557
+ ```
558
+ dataset/
559
+ ├── episode_000000.parquet
560
+ ├── episode_000001.parquet
561
+ ├── video_000000.mp4
562
+ ├── video_000001.mp4
563
+ └── ...
564
+ ```
565
+ - One file per episode
566
+ - Can have 1000s of files
567
+ - Scattered structure
568
+
569
+ **v3.0:**
570
+ ```
571
+ dataset/
572
+ ├── data/chunk-000/file-000.parquet # Many episodes
573
+ ├── videos/.../chunk-000/file-000.mp4 # One or many episodes
574
+ └── meta/episodes/chunk-000/file-000.parquet
575
+ ```
576
+ - Consolidated files
577
+ - Organized chunks
578
+ - Metadata-driven access
579
+
580
+ ### Metadata Format
581
+
582
+ **v2.1:**
583
+ - tasks.jsonl (JSON Lines)
584
+ - episodes_stats.jsonl
585
+ - JSON-based metadata
586
+
587
+ **v3.0:**
588
+ - tasks.parquet (Parquet)
589
+ - meta/episodes/chunk-000/*.parquet
590
+ - Parquet-based metadata
591
+
592
+ **Benefits:** Columnar storage, compression, schema evolution
593
+
594
+ ### Scalability
595
+
596
+ **v2.1:**
597
+ - Works well for <1000 episodes
598
+ - File system struggles with large datasets
599
+ - Slow directory listing
600
+
601
+ **v3.0:**
602
+ - Handles millions of episodes
603
+ - Efficient chunk-based organization
604
+ - Fast I/O with larger files
605
+
606
+ ### Migration
607
+
608
+ **Automatic conversion available:**
609
+ ```bash
610
+ python scripts/convert_dataset_v2_to_v3.py
611
+ ```
612
+
613
+ **Or create v3.0 directly using LeRobotDataset.create()**
614
+
615
+ ---
616
+
617
+ ## Our Dataset Strategy
618
+
619
+ ### piper_picking_tests Characteristics
620
+
621
+ ```
622
+ Total Episodes: 13
623
+ Total Frames: 5,016
624
+ Total Tasks: 12 (one duplicate: scissors/scissors_hidden)
625
+ Cameras: 2 (table_cam 800×720, wrist_cam 640×480)
626
+ FPS: 30
627
+ Robot: 7-DOF arm
628
+ State: 7D joint angles
629
+ Action: 7D commands
630
+ ```
631
+
632
+ ### Recommended Structure: LIBERO Multi-Task Style
633
+
634
+ **Why LIBERO approach?**
635
+ - ✅ Multi-task dataset (12 distinct tasks)
636
+ - ✅ Each episode = one task type
637
+ - ✅ Easier task management
638
+ - ✅ Better for VLA training
639
+ - ✅ Explicit task conditioning
640
+
641
+ **File Structure:**
642
+ ```
643
+ piper_picking_tests_v3/
644
+ ├── meta/
645
+ │ ├── info.json
646
+ │ ├── stats.json
647
+ │ ├── tasks.parquet # 12 tasks, descriptions as index
648
+ │ └── episodes/
649
+ │ └── chunk-000/
650
+ │ └── file-000.parquet # 13 episodes (NO tasks field)
651
+ ├── data/
652
+ │ └── chunk-000/
653
+ │ └── file-000.parquet # All 5,016 frames (WITH task_index)
654
+ └── videos/
655
+ ├── observation.images.table_cam/
656
+ │ └── chunk-000/
657
+ │ ├── file-000.mp4 # Episode 0 (cleaningcloth)
658
+ │ ├── file-001.mp4 # Episode 1 (fillamentroll)
659
+ │ └── ... # 13 files total
660
+ └── observation.images.wrist_cam/
661
+ └── chunk-000/
662
+ └── ... # 13 files total
663
+ ```
664
+
665
+ ### Configuration
666
+
667
+ ```json
668
+ {
669
+ "codebase_version": "v3.0",
670
+ "robot_type": "piper",
671
+ "total_episodes": 13,
672
+ "total_frames": 5016,
673
+ "total_tasks": 12,
674
+ "chunks_size": 1000,
675
+ "data_files_size_in_mb": 50,
676
+ "video_files_size_in_mb": 200,
677
+ "fps": 30
678
+ }
679
+ ```
680
+
681
+ ### Task Language Mapping
682
+
683
+ ```python
684
+ TASK_LANGUAGE_MAP = {
685
+ 0: "Pick up the cleaning cloth from the table.",
686
+ 1: "Grasp and pick up the filament roll.",
687
+ 2: "Pick up the game controller from the table.",
688
+ 3: "Pick up the hex wrench tool.",
689
+ 4: "Grasp and pick up the pencil.",
690
+ 5: "Pick up the scissors from the table.",
691
+ 6: "Find and pick up the scissors that are partially hidden.",
692
+ 7: "Pick up the screwdriver from the table.",
693
+ 8: "Grasp and pick up the small key.",
694
+ 9: "Pick up the small piece of paper.",
695
+ 10: "Pick up the small wooden stick.",
696
+ 11: "Pick up the thin metal disk.",
697
+ }
698
+ ```
699
+
700
+ ### Expected Sizes
701
+
702
+ - **Data parquet**: ~0.4 MB (5,016 frames × 80 bytes/frame)
703
+ - **Videos**: ~8 MB per episode per camera (compressed av1)
704
+ - 13 episodes × 2 cameras × 8 MB = ~208 MB total
705
+ - **Metadata**: <1 MB
706
+ - **Total**: ~210 MB (vs 6.2 GB current PNG format)
707
+
708
+ ### Why This Strategy?
709
+
710
+ 1. ✅ **Multi-task structure** matches dataset nature
711
+ 2. ✅ **One video per episode** enables flexible loading
712
+ 3. ✅ **LIBERO approach** provides clean task management
713
+ 4. ✅ **task_index in frames** ensures proper VLA conditioning
714
+ 5. ✅ **Single chunk** sufficient for 13 episodes
715
+ 6. ✅ **Scalable** if we add more episodes later
716
+
717
+ ---
718
+
719
+ ## Verification Checklist
720
+
721
+ ### Format Compliance
722
+ - [ ] data/ directory with parquet files
723
+ - [ ] videos/ directories with MP4 files (av1 codec)
724
+ - [ ] meta/info.json with v3.0 schema
725
+ - [ ] meta/episodes/ with parquet metadata
726
+ - [ ] meta/tasks.parquet (if LIBERO approach)
727
+ - [ ] meta/stats.json with aggregated statistics
728
+
729
+ ### Data Integrity
730
+ - [ ] All episodes present (13)
731
+ - [ ] All frames present (5,016)
732
+ - [ ] All tasks defined (12)
733
+ - [ ] task_index in data frames (LIBERO approach)
734
+ - [ ] Episode metadata includes file mappings
735
+ - [ ] Videos playable and correct length
736
+
737
+ ### VLA Compatibility
738
+ - [ ] Language instructions are natural, descriptive
739
+ - [ ] task_index maps correctly to language descriptions
740
+ - [ ] Can load with LeRobotDataset.load()
741
+ - [ ] Test with SmolVLA/Pi0 data loading
742
+ - [ ] Language conditioning works in VLA models
743
+
744
+ ### Statistics
745
+ - [ ] Per-episode stats in episode metadata
746
+ - [ ] Aggregated stats in meta/stats.json
747
+ - [ ] Reasonable value ranges
748
+ - [ ] Mean/std suitable for normalization
749
+
750
+ ---
751
+
752
+ ## References
753
+
754
+ - [Phospho.ai LeRobot Dataset Documentation](https://docs.phospho.ai/learn/lerobot-dataset)
755
+ - [LeRobot v3.0 Official Docs](https://huggingface.co/docs/lerobot/lerobot-dataset-v3)
756
+ - [Porting Datasets Guide](https://huggingface.co/docs/lerobot/porting_datasets_v3)
757
+ - **Example Datasets Analyzed:**
758
+ - svla_so100_stacking (56 episodes, 1 task, SVLA approach)
759
+ - LIBERO (1,693 episodes, 40 tasks, LIBERO approach)
760
+
761
+ **Document Version:** 1.0 (December 2025)
762
+ **Verified Against:** LeRobot v3.0, Phospho.ai docs, SVLA, LIBERO datasets
cleaningcloth_20251104_205021.hdf5 → raw_dataset/cleaningcloth_20251104_205021.hdf5 RENAMED
File without changes
cleaningcloth_20251104_205021.json → raw_dataset/cleaningcloth_20251104_205021.json RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000000.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000001.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000002.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000003.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000004.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000005.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000006.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000007.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000008.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000009.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000010.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000011.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000012.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000013.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000014.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000015.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000016.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000017.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000018.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000019.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000020.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000021.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000022.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000023.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000024.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000025.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000026.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000027.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000028.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000029.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000030.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000031.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000032.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000033.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000034.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000035.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000036.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000037.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000038.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000039.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000040.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000041.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000042.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000043.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000044.png RENAMED
File without changes
{cleaningcloth_images → raw_dataset/cleaningcloth_images}/observation.images.table_cam/frame_000045.png RENAMED
File without changes