rollingoat commited on
Commit
2d3950a
·
verified ·
1 Parent(s): a9b00c0

Upload convert_synced_h5_to_lerobot.py

Browse files
Files changed (1) hide show
  1. convert_synced_h5_to_lerobot.py +334 -0
convert_synced_h5_to_lerobot.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Convert one or more synced HDF5 files (output of sync_image_low_dim.py) to a single
2
+ LeRobot v2.1 dataset for OpenPI.
3
+ Input HDF5 layout (produced by sync_image_low_dim.py):
4
+ data/<demo>/obs/<timestamp_key> (T,)
5
+ data/<demo>/obs/<image_key_i> (T, H, W, 3) uint8
6
+ data/<demo>/obs/<lowdim_key_j> (T, D_j)
7
+ data/<demo>/actions (T, A) optional
8
+ Output: LeRobot v2.1 dataset written directly into <output-dir>.
9
+ All input files are concatenated as episodes in the order given; feature shapes,
10
+ state/action dims, and source fps are inferred from the first file and validated
11
+ against the rest.
12
+ Example:
13
+ python convert_synced_h5_to_lerobot.py \\
14
+ --synced-h5 run1.h5 run2.h5 run3.h5 \\
15
+ --output-dir /DATA/lerobot/my_dataset \\
16
+ --fps 10 \\
17
+ --task "pick up the block" \\
18
+ --image-map agentview_image:base_rgb wrist_image:wrist_rgb \\
19
+ --state-keys joint_positions gripper_pos \\
20
+ --action-source next_state
21
+ """
22
+
23
+ from __future__ import annotations
24
+
25
+ import argparse
26
+ import gc
27
+ import sys
28
+ from pathlib import Path
29
+ from typing import Dict, List, Tuple
30
+
31
+ import h5py
32
+ import numpy as np
33
+
34
+ try:
35
+ import cv2
36
+ _HAS_CV2 = True
37
+ except ImportError:
38
+ _HAS_CV2 = False
39
+
40
+ from datasets import Dataset
41
+
42
+
43
+ def _free_hf_dataset(dataset) -> None:
44
+ """Mirror convert_to_lerobot.py: empty hf_dataset between episodes to avoid OOM."""
45
+ if hasattr(dataset, "hf_dataset") and dataset.hf_dataset is not None:
46
+ features = dataset.hf_dataset.features
47
+ cols = dataset.hf_dataset.column_names
48
+ dataset.hf_dataset = Dataset.from_dict(
49
+ {c: [] for c in cols}, features=features
50
+ )
51
+ gc.collect()
52
+
53
+
54
+ def parse_kv_list(items: List[str]) -> Dict[str, str]:
55
+ out = {}
56
+ for item in items:
57
+ if ":" not in item:
58
+ raise ValueError(f"Expected 'src:dst', got {item!r}")
59
+ k, v = item.split(":", 1)
60
+ out[k] = v
61
+ return out
62
+
63
+
64
+ def resize_image(img: np.ndarray, size: Tuple[int, int]) -> np.ndarray:
65
+ """Resize HxWx3 uint8 to size=(H,W). No-op if shapes already match."""
66
+ if img.shape[:2] == size:
67
+ return img
68
+ if not _HAS_CV2:
69
+ raise RuntimeError(
70
+ f"Image shape {img.shape[:2]} != target {size}; install opencv-python to resize."
71
+ )
72
+ return cv2.resize(img, (size[1], size[0]), interpolation=cv2.INTER_AREA)
73
+
74
+
75
+ def ensure_uint8_rgb(img: np.ndarray) -> np.ndarray:
76
+ if img.dtype != np.uint8:
77
+ img = np.clip(img, 0, 255).astype(np.uint8)
78
+ if img.ndim == 2:
79
+ img = np.stack([img] * 3, axis=-1)
80
+ if img.shape[-1] == 4:
81
+ img = img[..., :3]
82
+ return img
83
+
84
+
85
+ def build_state(obs: h5py.Group, state_keys: List[str]) -> np.ndarray:
86
+ parts = []
87
+ for key in state_keys:
88
+ arr = np.asarray(obs[key][:])
89
+ if arr.ndim == 1:
90
+ arr = arr[:, None]
91
+ parts.append(arr.astype(np.float32))
92
+ return np.concatenate(parts, axis=1)
93
+
94
+
95
+ def build_actions(
96
+ state: np.ndarray,
97
+ demo_group: h5py.Group,
98
+ source: str,
99
+ stride: int = 1,
100
+ ) -> np.ndarray:
101
+ """Build actions aligned with the (already downsampled) state.
102
+
103
+ For ``next_state`` this guarantees ``action[t] == state[t+1]`` in the output.
104
+ """
105
+ T = state.shape[0]
106
+ if source == "hdf5_actions":
107
+ if "actions" not in demo_group:
108
+ raise KeyError(f"'actions' missing in {demo_group.name}; cannot use --action-source hdf5_actions")
109
+ a = np.asarray(demo_group["actions"][:], dtype=np.float32)
110
+ a = downsample(a, stride)
111
+ if a.shape[0] != T:
112
+ raise ValueError(f"actions len {a.shape[0]} != state len {T} in {demo_group.name}")
113
+ return a
114
+ if source == "next_state":
115
+ a = np.empty_like(state)
116
+ a[:-1] = state[1:]
117
+ a[-1] = state[-1]
118
+ return a
119
+ raise ValueError(f"Unknown --action-source {source!r}")
120
+
121
+
122
+ def downsample(arr: np.ndarray, stride: int) -> np.ndarray:
123
+ if stride <= 1:
124
+ return arr
125
+ return arr[::stride]
126
+
127
+
128
+ def main() -> None:
129
+ p = argparse.ArgumentParser()
130
+ p.add_argument("--synced-h5", required=True, nargs="+",
131
+ help="One or more synced HDF5 files; all demos are concatenated as episodes.")
132
+ p.add_argument("--output-dir", required=True,
133
+ help="Local folder to write the LeRobot dataset into (created if missing).")
134
+ p.add_argument("--repo-id", default=None,
135
+ help="HuggingFace repo id 'user/name'. Required only with --push-to-hub; "
136
+ "otherwise auto-derived from --output-dir basename.")
137
+ p.add_argument("--fps", type=int, required=True)
138
+ p.add_argument("--source-fps", type=int, default=None,
139
+ help="Source FPS of the synced HDF5 (estimated from timestamps if omitted).")
140
+ p.add_argument("--task", required=True, help="Language instruction for all episodes.")
141
+ p.add_argument("--image-map", nargs="+", required=True,
142
+ help="Pairs 'hdf5_key:feature_name', e.g. agentview_image:base_rgb")
143
+ p.add_argument("--state-keys", nargs="+", required=True,
144
+ help="Ordered lowdim datasets to concatenate into 'state'.")
145
+ p.add_argument("--action-source", choices=["next_state", "hdf5_actions"], default="next_state")
146
+ p.add_argument("--image-size", type=int, nargs=2, default=None, metavar=("H", "W"),
147
+ help="Resize images to (H, W). Omit to keep the native resolution.")
148
+ p.add_argument("--timestamp-key", default="timestamp")
149
+ p.add_argument("--robot-type", default="franka research 3")
150
+ p.add_argument("--image-writer-threads", type=int, default=4)
151
+ p.add_argument("--push-to-hub", action="store_true")
152
+ args = p.parse_args()
153
+
154
+ image_map = parse_kv_list(args.image_map)
155
+ img_size = tuple(args.image_size) if args.image_size else None
156
+
157
+ try:
158
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
159
+ except ImportError:
160
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
161
+
162
+ synced_files = [Path(p).expanduser().resolve() for p in args.synced_h5]
163
+ for p_ in synced_files:
164
+ if not p_.is_file():
165
+ raise FileNotFoundError(f"--synced-h5 path not found: {p_}")
166
+
167
+ out_root = Path(args.output_dir).expanduser().resolve()
168
+ out_root.parent.mkdir(parents=True, exist_ok=True)
169
+ if out_root.exists():
170
+ raise FileExistsError(f"--output-dir {out_root} already exists; remove it or pick a new path")
171
+ repo_id = args.repo_id or f"local/{out_root.name}"
172
+ if args.push_to_hub and args.repo_id is None:
173
+ raise ValueError("--push-to-hub requires --repo-id 'user/name'")
174
+
175
+ def _estimate_src_fps(obs_group: h5py.Group, label: str) -> int:
176
+ ts = np.asarray(obs_group[args.timestamp_key][:], dtype=np.float64)
177
+ if ts.size < 2:
178
+ raise ValueError(f"Cannot estimate source fps from {label}: <2 timestamps")
179
+ dt = np.median(np.diff(ts))
180
+ return int(round(1.0 / dt))
181
+
182
+ # Peek the first file's first demo to fix feature schema + stride.
183
+ with h5py.File(synced_files[0], "r") as f0:
184
+ if "data" not in f0:
185
+ raise KeyError(f"{synced_files[0]}: no top-level 'data' group")
186
+ demos0 = sorted(f0["data"].keys())
187
+ if not demos0:
188
+ raise ValueError(f"{synced_files[0]}: no demos found")
189
+ first_obs = f0["data"][demos0[0]]["obs"]
190
+ for src in image_map:
191
+ if src not in first_obs:
192
+ raise KeyError(f"Image key {src!r} not in {first_obs.name}")
193
+ state0 = build_state(first_obs, args.state_keys)
194
+ state_dim = state0.shape[1]
195
+
196
+ if args.action_source == "hdf5_actions":
197
+ a0 = np.asarray(f0["data"][demos0[0]]["actions"])
198
+ action_dim = a0.shape[1] if a0.ndim > 1 else 1
199
+ else:
200
+ action_dim = state_dim
201
+
202
+ if args.source_fps is None:
203
+ src_fps = _estimate_src_fps(first_obs, f"{synced_files[0].name}::{demos0[0]}")
204
+ print(f"Estimated source FPS from {synced_files[0].name}: {src_fps}")
205
+ else:
206
+ src_fps = args.source_fps
207
+
208
+ if src_fps % args.fps != 0:
209
+ raise ValueError(f"source fps {src_fps} not divisible by target fps {args.fps}")
210
+ stride = src_fps // args.fps
211
+
212
+ if img_size is None:
213
+ sample_img = np.asarray(first_obs[next(iter(image_map))][0])
214
+ feat_hw = tuple(sample_img.shape[:2])
215
+ else:
216
+ feat_hw = img_size
217
+
218
+ features = {}
219
+ for dst in image_map.values():
220
+ features[dst] = {
221
+ "dtype": "image",
222
+ "shape": (feat_hw[0], feat_hw[1], 3),
223
+ "names": ["height", "width", "channel"],
224
+ }
225
+ features["state"] = {
226
+ "dtype": "float32",
227
+ "shape": (state_dim,),
228
+ "names": {"motors": [f"s{i}" for i in range(state_dim)]},
229
+ }
230
+ features["action"] = {
231
+ "dtype": "float32",
232
+ "shape": (action_dim,),
233
+ "names": {"motors": [f"a{i}" for i in range(action_dim)]},
234
+ }
235
+
236
+ dataset = LeRobotDataset.create(
237
+ repo_id=repo_id,
238
+ fps=args.fps,
239
+ robot_type=args.robot_type,
240
+ features=features,
241
+ use_videos=False,
242
+ image_writer_threads=args.image_writer_threads,
243
+ root=str(out_root),
244
+ )
245
+
246
+ total_frames = 0
247
+ converted = 0
248
+ total_demos = 0
249
+ for file_idx, h5_path in enumerate(synced_files):
250
+ with h5py.File(h5_path, "r") as f:
251
+ if "data" not in f:
252
+ raise KeyError(f"{h5_path}: no top-level 'data' group")
253
+ demos = sorted(f["data"].keys())
254
+ if not demos:
255
+ raise ValueError(f"{h5_path}: no demos found")
256
+ total_demos += len(demos)
257
+ print(f"\n[{file_idx + 1}/{len(synced_files)}] {h5_path.name}: {len(demos)} demo(s)")
258
+
259
+ # Validate schema on files after the first.
260
+ if file_idx > 0:
261
+ peek_obs = f["data"][demos[0]]["obs"]
262
+ for src in image_map:
263
+ if src not in peek_obs:
264
+ raise KeyError(f"{h5_path}: image key {src!r} not in {peek_obs.name}")
265
+ peek_state_dim = build_state(peek_obs, args.state_keys).shape[1]
266
+ if peek_state_dim != state_dim:
267
+ raise ValueError(
268
+ f"{h5_path}: state dim {peek_state_dim} != {state_dim} from first file")
269
+ if args.action_source == "hdf5_actions":
270
+ a_peek = np.asarray(f["data"][demos[0]]["actions"])
271
+ peek_action_dim = a_peek.shape[1] if a_peek.ndim > 1 else 1
272
+ if peek_action_dim != action_dim:
273
+ raise ValueError(
274
+ f"{h5_path}: action dim {peek_action_dim} != {action_dim} from first file")
275
+ if args.source_fps is None:
276
+ peek_src_fps = _estimate_src_fps(peek_obs, f"{h5_path.name}::{demos[0]}")
277
+ if peek_src_fps != src_fps:
278
+ raise ValueError(
279
+ f"{h5_path}: estimated source fps {peek_src_fps} != {src_fps} "
280
+ f"from first file; pass --source-fps explicitly if this is intended")
281
+
282
+ for demo in demos:
283
+ label = f"{h5_path.name}::{demo}"
284
+ g = f["data"][demo]
285
+ obs = g["obs"]
286
+ try:
287
+ state = build_state(obs, args.state_keys)
288
+ # Downsample state first so next_state actions align with the
289
+ # *dataset* timestep (action[t] == state[t+1] in the output).
290
+ state = downsample(state, stride)
291
+ action = build_actions(state, g, args.action_source, stride)
292
+
293
+ imgs = {}
294
+ for src, dst in image_map.items():
295
+ raw = np.asarray(obs[src][:])
296
+ imgs[dst] = downsample(raw, stride)
297
+
298
+ T = state.shape[0]
299
+ if T < 2:
300
+ print(f" Skip {label}: only {T} frame(s) after downsampling")
301
+ continue
302
+
303
+ for t in range(T):
304
+ frame = {"state": state[t].astype(np.float32),
305
+ "action": action[t].astype(np.float32),
306
+ "task": args.task}
307
+ for dst, arr in imgs.items():
308
+ img = ensure_uint8_rgb(arr[t])
309
+ if img_size is not None:
310
+ img = resize_image(img, img_size)
311
+ frame[dst] = img
312
+ dataset.add_frame(frame)
313
+
314
+ dataset.save_episode()
315
+ _free_hf_dataset(dataset)
316
+ total_frames += T
317
+ converted += 1
318
+ print(f" {label}: {T} frames")
319
+ except Exception as e:
320
+ print(f" ERROR on {label}: {e}", file=sys.stderr)
321
+ raise
322
+
323
+ if hasattr(dataset, "finalize"):
324
+ dataset.finalize()
325
+
326
+ print(f"\nConverted {converted}/{total_demos} demos across {len(synced_files)} file(s), "
327
+ f"{total_frames} frames -> {out_root}")
328
+
329
+ if args.push_to_hub:
330
+ dataset.push_to_hub()
331
+
332
+
333
+ if __name__ == "__main__":
334
+ main()