zero7101 commited on
Commit
d27f90e
·
verified ·
1 Parent(s): 7876fcd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lerobot/src/lerobot/cameras/opencv/__init__.py +18 -0
  2. lerobot/src/lerobot/cameras/opencv/camera_opencv.py +541 -0
  3. lerobot/src/lerobot/cameras/opencv/configuration_opencv.py +85 -0
  4. lerobot/src/lerobot/cameras/reachy2_camera/__init__.py +16 -0
  5. lerobot/src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py +80 -0
  6. lerobot/src/lerobot/cameras/reachy2_camera/reachy2_camera.py +220 -0
  7. lerobot/src/lerobot/cameras/realsense/__init__.py +16 -0
  8. lerobot/src/lerobot/cameras/realsense/camera_realsense.py +568 -0
  9. lerobot/src/lerobot/cameras/realsense/configuration_realsense.py +82 -0
  10. lerobot/src/lerobot/cameras/zmq/__init__.py +20 -0
  11. lerobot/src/lerobot/cameras/zmq/camera_zmq.py +235 -0
  12. lerobot/src/lerobot/cameras/zmq/configuration_zmq.py +46 -0
  13. lerobot/src/lerobot/cameras/zmq/image_server.py +114 -0
  14. lerobot/src/lerobot/data_processing/sarm_annotations/__init__.py +13 -0
  15. lerobot/src/lerobot/data_processing/sarm_annotations/subtask_annotation.py +1202 -0
  16. lerobot/src/lerobot/datasets/push_dataset_to_hub/utils.py +73 -0
  17. lerobot/src/lerobot/datasets/v30/augment_dataset_quantile_stats.py +260 -0
  18. lerobot/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py +571 -0
  19. lerobot/src/lerobot/motors/dynamixel/__init__.py +18 -0
  20. lerobot/src/lerobot/motors/dynamixel/dynamixel.py +264 -0
  21. lerobot/src/lerobot/motors/dynamixel/tables.py +199 -0
  22. lerobot/src/lerobot/motors/feetech/__init__.py +18 -0
  23. lerobot/src/lerobot/motors/feetech/feetech.py +455 -0
  24. lerobot/src/lerobot/motors/feetech/tables.py +256 -0
  25. lerobot/src/lerobot/policies/act/README.md +1 -0
  26. lerobot/src/lerobot/policies/act/configuration_act.py +186 -0
  27. lerobot/src/lerobot/policies/act/modeling_act.py +746 -0
  28. lerobot/src/lerobot/policies/act/processor_act.py +85 -0
  29. lerobot/src/lerobot/policies/diffusion/configuration_diffusion.py +238 -0
  30. lerobot/src/lerobot/policies/diffusion/modeling_diffusion.py +764 -0
  31. lerobot/src/lerobot/robots/lekiwi/__init__.py +19 -0
  32. lerobot/src/lerobot/robots/lekiwi/lekiwi.py +417 -0
  33. lerobot/src/lerobot/robots/lekiwi/lekiwi_client.py +335 -0
  34. lerobot/src/lerobot/robots/lekiwi/lekiwi_host.py +136 -0
  35. lerobot/src/lerobot/robots/omx_follower/__init__.py +21 -0
  36. lerobot/src/lerobot/robots/omx_follower/config_omx_follower.py +39 -0
  37. lerobot/src/lerobot/robots/omx_follower/omx_follower.py +219 -0
  38. lerobot/src/lerobot/robots/reachy2/__init__.py +25 -0
  39. lerobot/src/lerobot/robots/reachy2/configuration_reachy2.py +117 -0
  40. lerobot/src/lerobot/robots/reachy2/robot_reachy2.py +235 -0
  41. lerobot/src/lerobot/robots/so_follower/__init__.py +23 -0
  42. lerobot/src/lerobot/robots/so_follower/config_so_follower.py +54 -0
  43. lerobot/src/lerobot/robots/so_follower/robot_kinematic_processor.py +611 -0
  44. lerobot/src/lerobot/robots/so_follower/so100.md +1 -0
  45. lerobot/src/lerobot/robots/so_follower/so101.md +1 -0
  46. lerobot/src/lerobot/robots/so_follower/so_follower.py +234 -0
  47. lerobot/src/lerobot/robots/unitree_g1/__init__.py +18 -0
  48. lerobot/src/lerobot/robots/unitree_g1/config_unitree_g1.py +67 -0
  49. lerobot/src/lerobot/robots/unitree_g1/g1_utils.py +81 -0
  50. lerobot/src/lerobot/robots/unitree_g1/run_g1_server.py +212 -0
lerobot/src/lerobot/cameras/opencv/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .camera_opencv import OpenCVCamera
16
+ from .configuration_opencv import OpenCVCameraConfig
17
+
18
+ __all__ = ["OpenCVCamera", "OpenCVCameraConfig"]
lerobot/src/lerobot/cameras/opencv/camera_opencv.py ADDED
@@ -0,0 +1,541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Provides the OpenCVCamera class for capturing frames from cameras using OpenCV.
17
+ """
18
+
19
+ import logging
20
+ import math
21
+ import os
22
+ import platform
23
+ import time
24
+ from pathlib import Path
25
+ from threading import Event, Lock, Thread
26
+ from typing import Any
27
+
28
+ from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing
29
+
30
+ # Fix MSMF hardware transform compatibility for Windows before importing cv2
31
+ if platform.system() == "Windows" and "OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS" not in os.environ:
32
+ os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0"
33
+ import cv2 # type: ignore # TODO: add type stubs for OpenCV
34
+
35
+ from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
36
+
37
+ from ..camera import Camera
38
+ from ..utils import get_cv2_backend, get_cv2_rotation
39
+ from .configuration_opencv import ColorMode, OpenCVCameraConfig
40
+
41
+ # NOTE(Steven): The maximum opencv device index depends on your operating system. For instance,
42
+ # if you have 3 cameras, they should be associated to index 0, 1, and 2. This is the case
43
+ # on MacOS. However, on Ubuntu, the indices are different like 6, 16, 23.
44
+ # When you change the USB port or reboot the computer, the operating system might
45
+ # treat the same cameras as new devices. Thus we select a higher bound to search indices.
46
+ MAX_OPENCV_INDEX = 60
47
+
48
+ logger = logging.getLogger(__name__)
49
+
50
+
51
+ class OpenCVCamera(Camera):
52
+ """
53
+ Manages camera interactions using OpenCV for efficient frame recording.
54
+
55
+ This class provides a high-level interface to connect to, configure, and read
56
+ frames from cameras compatible with OpenCV's VideoCapture. It supports both
57
+ synchronous and asynchronous frame reading.
58
+
59
+ An OpenCVCamera instance requires a camera index (e.g., 0) or a device path
60
+ (e.g., '/dev/video0' on Linux). Camera indices can be unstable across reboots
61
+ or port changes, especially on Linux. Use the provided utility script to find
62
+ available camera indices or paths:
63
+ ```bash
64
+ lerobot-find-cameras opencv
65
+ ```
66
+
67
+ The camera's default settings (FPS, resolution, color mode) are used unless
68
+ overridden in the configuration.
69
+
70
+ Example:
71
+ ```python
72
+ from lerobot.cameras.opencv import OpenCVCamera
73
+ from lerobot.cameras.configuration_opencv import OpenCVCameraConfig, ColorMode, Cv2Rotation
74
+
75
+ # Basic usage with camera index 0
76
+ config = OpenCVCameraConfig(index_or_path=0)
77
+ camera = OpenCVCamera(config)
78
+ camera.connect()
79
+
80
+ # Read 1 frame synchronously
81
+ color_image = camera.read()
82
+ print(color_image.shape)
83
+
84
+ # Read 1 frame asynchronously
85
+ async_image = camera.async_read()
86
+
87
+ # When done, properly disconnect the camera using
88
+ camera.disconnect()
89
+
90
+ # Example with custom settings
91
+ custom_config = OpenCVCameraConfig(
92
+ index_or_path='/dev/video0', # Or use an index
93
+ fps=30,
94
+ width=1280,
95
+ height=720,
96
+ color_mode=ColorMode.RGB,
97
+ rotation=Cv2Rotation.ROTATE_90
98
+ )
99
+ custom_camera = OpenCVCamera(custom_config)
100
+ # ... connect, read, disconnect ...
101
+ ```
102
+ """
103
+
104
+ def __init__(self, config: OpenCVCameraConfig):
105
+ """
106
+ Initializes the OpenCVCamera instance.
107
+
108
+ Args:
109
+ config: The configuration settings for the camera.
110
+ """
111
+ super().__init__(config)
112
+
113
+ self.config = config
114
+ self.index_or_path = config.index_or_path
115
+
116
+ self.fps = config.fps
117
+ self.color_mode = config.color_mode
118
+ self.warmup_s = config.warmup_s
119
+
120
+ self.videocapture: cv2.VideoCapture | None = None
121
+
122
+ self.thread: Thread | None = None
123
+ self.stop_event: Event | None = None
124
+ self.frame_lock: Lock = Lock()
125
+ self.latest_frame: NDArray[Any] | None = None
126
+ self.new_frame_event: Event = Event()
127
+
128
+ self.rotation: int | None = get_cv2_rotation(config.rotation)
129
+ self.backend: int = get_cv2_backend()
130
+
131
+ if self.height and self.width:
132
+ self.capture_width, self.capture_height = self.width, self.height
133
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
134
+ self.capture_width, self.capture_height = self.height, self.width
135
+
136
+ def __str__(self) -> str:
137
+ return f"{self.__class__.__name__}({self.index_or_path})"
138
+
139
+ @property
140
+ def is_connected(self) -> bool:
141
+ """Checks if the camera is currently connected and opened."""
142
+ return isinstance(self.videocapture, cv2.VideoCapture) and self.videocapture.isOpened()
143
+
144
+ def connect(self, warmup: bool = True) -> None:
145
+ """
146
+ Connects to the OpenCV camera specified in the configuration.
147
+
148
+ Initializes the OpenCV VideoCapture object, sets desired camera properties
149
+ (FPS, width, height), and performs initial checks.
150
+
151
+ Raises:
152
+ DeviceAlreadyConnectedError: If the camera is already connected.
153
+ ConnectionError: If the specified camera index/path is not found or the camera is found but fails to open.
154
+ RuntimeError: If the camera opens but fails to apply requested FPS/resolution settings.
155
+ """
156
+ if self.is_connected:
157
+ raise DeviceAlreadyConnectedError(f"{self} is already connected.")
158
+
159
+ # Use 1 thread for OpenCV operations to avoid potential conflicts or
160
+ # blocking in multi-threaded applications, especially during data collection.
161
+ cv2.setNumThreads(1)
162
+
163
+ self.videocapture = cv2.VideoCapture(self.index_or_path, self.backend)
164
+
165
+ if not self.videocapture.isOpened():
166
+ self.videocapture.release()
167
+ self.videocapture = None
168
+ raise ConnectionError(
169
+ f"Failed to open {self}.Run `lerobot-find-cameras opencv` to find available cameras."
170
+ )
171
+
172
+ self._configure_capture_settings()
173
+
174
+ if warmup:
175
+ start_time = time.time()
176
+ while time.time() - start_time < self.warmup_s:
177
+ self.read()
178
+ time.sleep(0.1)
179
+
180
+ logger.info(f"{self} connected.")
181
+
182
+ def _configure_capture_settings(self) -> None:
183
+ """
184
+ Applies the specified FOURCC, FPS, width, and height settings to the connected camera.
185
+
186
+ This method attempts to set the camera properties via OpenCV. It checks if
187
+ the camera successfully applied the settings and raises an error if not.
188
+ FOURCC is set first (if specified) as it can affect the available FPS and resolution options.
189
+
190
+ Args:
191
+ fourcc: The desired FOURCC code (e.g., "MJPG", "YUYV"). If None, auto-detect.
192
+ fps: The desired frames per second. If None, the setting is skipped.
193
+ width: The desired capture width. If None, the setting is skipped.
194
+ height: The desired capture height. If None, the setting is skipped.
195
+
196
+ Raises:
197
+ RuntimeError: If the camera fails to set any of the specified properties
198
+ to the requested value.
199
+ DeviceNotConnectedError: If the camera is not connected when attempting
200
+ to configure settings.
201
+ """
202
+ if not self.is_connected:
203
+ raise DeviceNotConnectedError(f"Cannot configure settings for {self} as it is not connected.")
204
+
205
+ # Set FOURCC first (if specified) as it can affect available FPS/resolution options
206
+ if self.config.fourcc is not None:
207
+ self._validate_fourcc()
208
+ if self.videocapture is None:
209
+ raise DeviceNotConnectedError(f"{self} videocapture is not initialized")
210
+
211
+ default_width = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_WIDTH)))
212
+ default_height = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
213
+
214
+ if self.width is None or self.height is None:
215
+ self.width, self.height = default_width, default_height
216
+ self.capture_width, self.capture_height = default_width, default_height
217
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
218
+ self.width, self.height = default_height, default_width
219
+ self.capture_width, self.capture_height = default_width, default_height
220
+ else:
221
+ self._validate_width_and_height()
222
+
223
+ if self.fps is None:
224
+ self.fps = self.videocapture.get(cv2.CAP_PROP_FPS)
225
+ else:
226
+ self._validate_fps()
227
+
228
+ def _validate_fps(self) -> None:
229
+ """Validates and sets the camera's frames per second (FPS)."""
230
+
231
+ if self.videocapture is None:
232
+ raise DeviceNotConnectedError(f"{self} videocapture is not initialized")
233
+
234
+ if self.fps is None:
235
+ raise ValueError(f"{self} FPS is not set")
236
+
237
+ success = self.videocapture.set(cv2.CAP_PROP_FPS, float(self.fps))
238
+ actual_fps = self.videocapture.get(cv2.CAP_PROP_FPS)
239
+ # Use math.isclose for robust float comparison
240
+ if not success or not math.isclose(self.fps, actual_fps, rel_tol=1e-3):
241
+ raise RuntimeError(f"{self} failed to set fps={self.fps} ({actual_fps=}).")
242
+
243
+ def _validate_fourcc(self) -> None:
244
+ """Validates and sets the camera's FOURCC code."""
245
+
246
+ fourcc_code = cv2.VideoWriter_fourcc(*self.config.fourcc)
247
+
248
+ if self.videocapture is None:
249
+ raise DeviceNotConnectedError(f"{self} videocapture is not initialized")
250
+
251
+ success = self.videocapture.set(cv2.CAP_PROP_FOURCC, fourcc_code)
252
+ actual_fourcc_code = self.videocapture.get(cv2.CAP_PROP_FOURCC)
253
+
254
+ # Convert actual FOURCC code back to string for comparison
255
+ actual_fourcc_code_int = int(actual_fourcc_code)
256
+ actual_fourcc = "".join([chr((actual_fourcc_code_int >> 8 * i) & 0xFF) for i in range(4)])
257
+
258
+ if not success or actual_fourcc != self.config.fourcc:
259
+ logger.warning(
260
+ f"{self} failed to set fourcc={self.config.fourcc} (actual={actual_fourcc}, success={success}). "
261
+ f"Continuing with default format."
262
+ )
263
+
264
+ def _validate_width_and_height(self) -> None:
265
+ """Validates and sets the camera's frame capture width and height."""
266
+
267
+ if self.videocapture is None:
268
+ raise DeviceNotConnectedError(f"{self} videocapture is not initialized")
269
+
270
+ if self.capture_width is None or self.capture_height is None:
271
+ raise ValueError(f"{self} capture_width or capture_height is not set")
272
+
273
+ width_success = self.videocapture.set(cv2.CAP_PROP_FRAME_WIDTH, float(self.capture_width))
274
+ height_success = self.videocapture.set(cv2.CAP_PROP_FRAME_HEIGHT, float(self.capture_height))
275
+
276
+ actual_width = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_WIDTH)))
277
+ if not width_success or self.capture_width != actual_width:
278
+ raise RuntimeError(
279
+ f"{self} failed to set capture_width={self.capture_width} ({actual_width=}, {width_success=})."
280
+ )
281
+
282
+ actual_height = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
283
+ if not height_success or self.capture_height != actual_height:
284
+ raise RuntimeError(
285
+ f"{self} failed to set capture_height={self.capture_height} ({actual_height=}, {height_success=})."
286
+ )
287
+
288
+ @staticmethod
289
+ def find_cameras() -> list[dict[str, Any]]:
290
+ """
291
+ Detects available OpenCV cameras connected to the system.
292
+
293
+ On Linux, it scans '/dev/video*' paths. On other systems (like macOS, Windows),
294
+ it checks indices from 0 up to `MAX_OPENCV_INDEX`.
295
+
296
+ Returns:
297
+ List[Dict[str, Any]]: A list of dictionaries,
298
+ where each dictionary contains 'type', 'id' (port index or path),
299
+ and the default profile properties (width, height, fps, format).
300
+ """
301
+ found_cameras_info = []
302
+
303
+ targets_to_scan: list[str | int]
304
+ if platform.system() == "Linux":
305
+ possible_paths = sorted(Path("/dev").glob("video*"), key=lambda p: p.name)
306
+ targets_to_scan = [str(p) for p in possible_paths]
307
+ else:
308
+ targets_to_scan = [int(i) for i in range(MAX_OPENCV_INDEX)]
309
+
310
+ for target in targets_to_scan:
311
+ camera = cv2.VideoCapture(target)
312
+ if camera.isOpened():
313
+ default_width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH))
314
+ default_height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
315
+ default_fps = camera.get(cv2.CAP_PROP_FPS)
316
+ default_format = camera.get(cv2.CAP_PROP_FORMAT)
317
+
318
+ # Get FOURCC code and convert to string
319
+ default_fourcc_code = camera.get(cv2.CAP_PROP_FOURCC)
320
+ default_fourcc_code_int = int(default_fourcc_code)
321
+ default_fourcc = "".join([chr((default_fourcc_code_int >> 8 * i) & 0xFF) for i in range(4)])
322
+
323
+ camera_info = {
324
+ "name": f"OpenCV Camera @ {target}",
325
+ "type": "OpenCV",
326
+ "id": target,
327
+ "backend_api": camera.getBackendName(),
328
+ "default_stream_profile": {
329
+ "format": default_format,
330
+ "fourcc": default_fourcc,
331
+ "width": default_width,
332
+ "height": default_height,
333
+ "fps": default_fps,
334
+ },
335
+ }
336
+
337
+ found_cameras_info.append(camera_info)
338
+ camera.release()
339
+
340
+ return found_cameras_info
341
+
342
+ def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]:
343
+ """
344
+ Reads a single frame synchronously from the camera.
345
+
346
+ This is a blocking call. It waits for the next available frame from the
347
+ camera hardware via OpenCV.
348
+
349
+ Args:
350
+ color_mode (Optional[ColorMode]): If specified, overrides the default
351
+ color mode (`self.color_mode`) for this read operation (e.g.,
352
+ request RGB even if default is BGR).
353
+
354
+ Returns:
355
+ np.ndarray: The captured frame as a NumPy array in the format
356
+ (height, width, channels), using the specified or default
357
+ color mode and applying any configured rotation.
358
+
359
+ Raises:
360
+ DeviceNotConnectedError: If the camera is not connected.
361
+ RuntimeError: If reading the frame from the camera fails or if the
362
+ received frame dimensions don't match expectations before rotation.
363
+ ValueError: If an invalid `color_mode` is requested.
364
+ """
365
+ if not self.is_connected:
366
+ raise DeviceNotConnectedError(f"{self} is not connected.")
367
+
368
+ start_time = time.perf_counter()
369
+
370
+ if self.videocapture is None:
371
+ raise DeviceNotConnectedError(f"{self} videocapture is not initialized")
372
+
373
+ ret, frame = self.videocapture.read()
374
+
375
+ if not ret or frame is None:
376
+ raise RuntimeError(f"{self} read failed (status={ret}).")
377
+
378
+ processed_frame = self._postprocess_image(frame, color_mode)
379
+
380
+ read_duration_ms = (time.perf_counter() - start_time) * 1e3
381
+ logger.debug(f"{self} read took: {read_duration_ms:.1f}ms")
382
+
383
+ return processed_frame
384
+
385
+ def _postprocess_image(self, image: NDArray[Any], color_mode: ColorMode | None = None) -> NDArray[Any]:
386
+ """
387
+ Applies color conversion, dimension validation, and rotation to a raw frame.
388
+
389
+ Args:
390
+ image (np.ndarray): The raw image frame (expected BGR format from OpenCV).
391
+ color_mode (Optional[ColorMode]): The target color mode (RGB or BGR). If None,
392
+ uses the instance's default `self.color_mode`.
393
+
394
+ Returns:
395
+ np.ndarray: The processed image frame.
396
+
397
+ Raises:
398
+ ValueError: If the requested `color_mode` is invalid.
399
+ RuntimeError: If the raw frame dimensions do not match the configured
400
+ `width` and `height`.
401
+ """
402
+ requested_color_mode = self.color_mode if color_mode is None else color_mode
403
+
404
+ if requested_color_mode not in (ColorMode.RGB, ColorMode.BGR):
405
+ raise ValueError(
406
+ f"Invalid color mode '{requested_color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}."
407
+ )
408
+
409
+ h, w, c = image.shape
410
+
411
+ if h != self.capture_height or w != self.capture_width:
412
+ raise RuntimeError(
413
+ f"{self} frame width={w} or height={h} do not match configured width={self.capture_width} or height={self.capture_height}."
414
+ )
415
+
416
+ if c != 3:
417
+ raise RuntimeError(f"{self} frame channels={c} do not match expected 3 channels (RGB/BGR).")
418
+
419
+ processed_image = image
420
+ if requested_color_mode == ColorMode.RGB:
421
+ processed_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
422
+
423
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]:
424
+ processed_image = cv2.rotate(processed_image, self.rotation)
425
+
426
+ return processed_image
427
+
428
+ def _read_loop(self) -> None:
429
+ """
430
+ Internal loop run by the background thread for asynchronous reading.
431
+
432
+ On each iteration:
433
+ 1. Reads a color frame
434
+ 2. Stores result in latest_frame (thread-safe)
435
+ 3. Sets new_frame_event to notify listeners
436
+
437
+ Stops on DeviceNotConnectedError, logs other errors and continues.
438
+ """
439
+ if self.stop_event is None:
440
+ raise RuntimeError(f"{self}: stop_event is not initialized before starting read loop.")
441
+
442
+ while not self.stop_event.is_set():
443
+ try:
444
+ color_image = self.read()
445
+
446
+ with self.frame_lock:
447
+ self.latest_frame = color_image
448
+ self.new_frame_event.set()
449
+
450
+ except DeviceNotConnectedError:
451
+ break
452
+ except Exception as e:
453
+ logger.warning(f"Error reading frame in background thread for {self}: {e}")
454
+
455
+ def _start_read_thread(self) -> None:
456
+ """Starts or restarts the background read thread if it's not running."""
457
+ if self.thread is not None and self.thread.is_alive():
458
+ self.thread.join(timeout=0.1)
459
+ if self.stop_event is not None:
460
+ self.stop_event.set()
461
+
462
+ self.stop_event = Event()
463
+ self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop")
464
+ self.thread.daemon = True
465
+ self.thread.start()
466
+
467
+ def _stop_read_thread(self) -> None:
468
+ """Signals the background read thread to stop and waits for it to join."""
469
+ if self.stop_event is not None:
470
+ self.stop_event.set()
471
+
472
+ if self.thread is not None and self.thread.is_alive():
473
+ self.thread.join(timeout=2.0)
474
+
475
+ self.thread = None
476
+ self.stop_event = None
477
+
478
+ def async_read(self, timeout_ms: float = 200) -> NDArray[Any]:
479
+ """
480
+ Reads the latest available frame asynchronously.
481
+
482
+ This method retrieves the most recent frame captured by the background
483
+ read thread. It does not block waiting for the camera hardware directly,
484
+ but may wait up to timeout_ms for the background thread to provide a frame.
485
+
486
+ Args:
487
+ timeout_ms (float): Maximum time in milliseconds to wait for a frame
488
+ to become available. Defaults to 200ms (0.2 seconds).
489
+
490
+ Returns:
491
+ np.ndarray: The latest captured frame as a NumPy array in the format
492
+ (height, width, channels), processed according to configuration.
493
+
494
+ Raises:
495
+ DeviceNotConnectedError: If the camera is not connected.
496
+ TimeoutError: If no frame becomes available within the specified timeout.
497
+ RuntimeError: If an unexpected error occurs.
498
+ """
499
+ if not self.is_connected:
500
+ raise DeviceNotConnectedError(f"{self} is not connected.")
501
+
502
+ if self.thread is None or not self.thread.is_alive():
503
+ self._start_read_thread()
504
+
505
+ if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0):
506
+ thread_alive = self.thread is not None and self.thread.is_alive()
507
+ raise TimeoutError(
508
+ f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. "
509
+ f"Read thread alive: {thread_alive}."
510
+ )
511
+
512
+ with self.frame_lock:
513
+ frame = self.latest_frame
514
+ self.new_frame_event.clear()
515
+
516
+ if frame is None:
517
+ raise RuntimeError(f"Internal error: Event set but no frame available for {self}.")
518
+
519
+ return frame
520
+
521
+ def disconnect(self) -> None:
522
+ """
523
+ Disconnects from the camera and cleans up resources.
524
+
525
+ Stops the background read thread (if running) and releases the OpenCV
526
+ VideoCapture object.
527
+
528
+ Raises:
529
+ DeviceNotConnectedError: If the camera is already disconnected.
530
+ """
531
+ if not self.is_connected and self.thread is None:
532
+ raise DeviceNotConnectedError(f"{self} not connected.")
533
+
534
+ if self.thread is not None:
535
+ self._stop_read_thread()
536
+
537
+ if self.videocapture is not None:
538
+ self.videocapture.release()
539
+ self.videocapture = None
540
+
541
+ logger.info(f"{self} disconnected.")
lerobot/src/lerobot/cameras/opencv/configuration_opencv.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from pathlib import Path
17
+
18
+ from ..configs import CameraConfig, ColorMode, Cv2Rotation
19
+
20
+ __all__ = ["OpenCVCameraConfig", "ColorMode", "Cv2Rotation"]
21
+
22
+
23
+ @CameraConfig.register_subclass("opencv")
24
+ @dataclass
25
+ class OpenCVCameraConfig(CameraConfig):
26
+ """Configuration class for OpenCV-based camera devices or video files.
27
+
28
+ This class provides configuration options for cameras accessed through OpenCV,
29
+ supporting both physical camera devices and video files. It includes settings
30
+ for resolution, frame rate, color mode, and image rotation.
31
+
32
+ Example configurations:
33
+ ```python
34
+ # Basic configurations
35
+ OpenCVCameraConfig(0, 30, 1280, 720) # 1280x720 @ 30FPS
36
+ OpenCVCameraConfig(/dev/video4, 60, 640, 480) # 640x480 @ 60FPS
37
+
38
+ # Advanced configurations with FOURCC format
39
+ OpenCVCameraConfig(128422271347, 30, 640, 480, rotation=Cv2Rotation.ROTATE_90, fourcc="MJPG") # With 90° rotation and MJPG format
40
+ OpenCVCameraConfig(0, 30, 1280, 720, fourcc="YUYV") # With YUYV format
41
+ ```
42
+
43
+ Attributes:
44
+ index_or_path: Either an integer representing the camera device index,
45
+ or a Path object pointing to a video file.
46
+ fps: Requested frames per second for the color stream.
47
+ width: Requested frame width in pixels for the color stream.
48
+ height: Requested frame height in pixels for the color stream.
49
+ color_mode: Color mode for image output (RGB or BGR). Defaults to RGB.
50
+ rotation: Image rotation setting (0°, 90°, 180°, or 270°). Defaults to no rotation.
51
+ warmup_s: Time reading frames before returning from connect (in seconds)
52
+ fourcc: FOURCC code for video format (e.g., "MJPG", "YUYV", "I420"). Defaults to None (auto-detect).
53
+
54
+ Note:
55
+ - Only 3-channel color output (RGB/BGR) is currently supported.
56
+ - FOURCC codes must be 4-character strings (e.g., "MJPG", "YUYV"). Some common FOUCC codes: https://learn.microsoft.com/en-us/windows/win32/medfound/video-fourccs#fourcc-constants
57
+ - Setting FOURCC can help achieve higher frame rates on some cameras.
58
+ """
59
+
60
+ index_or_path: int | Path
61
+ color_mode: ColorMode = ColorMode.RGB
62
+ rotation: Cv2Rotation = Cv2Rotation.NO_ROTATION
63
+ warmup_s: int = 1
64
+ fourcc: str | None = None
65
+
66
+ def __post_init__(self) -> None:
67
+ if self.color_mode not in (ColorMode.RGB, ColorMode.BGR):
68
+ raise ValueError(
69
+ f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided."
70
+ )
71
+
72
+ if self.rotation not in (
73
+ Cv2Rotation.NO_ROTATION,
74
+ Cv2Rotation.ROTATE_90,
75
+ Cv2Rotation.ROTATE_180,
76
+ Cv2Rotation.ROTATE_270,
77
+ ):
78
+ raise ValueError(
79
+ f"`rotation` is expected to be in {(Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270)}, but {self.rotation} is provided."
80
+ )
81
+
82
+ if self.fourcc is not None and (not isinstance(self.fourcc, str) or len(self.fourcc) != 4):
83
+ raise ValueError(
84
+ f"`fourcc` must be a 4-character string (e.g., 'MJPG', 'YUYV'), but '{self.fourcc}' is provided."
85
+ )
lerobot/src/lerobot/cameras/reachy2_camera/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .configuration_reachy2_camera import Reachy2CameraConfig
16
+ from .reachy2_camera import Reachy2Camera
lerobot/src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+
17
+ from ..configs import CameraConfig, ColorMode
18
+
19
+ __all__ = ["CameraConfig", "ColorMode", "Reachy2CameraConfig"]
20
+
21
+
22
+ @CameraConfig.register_subclass("reachy2_camera")
23
+ @dataclass
24
+ class Reachy2CameraConfig(CameraConfig):
25
+ """Configuration class for Reachy 2 camera devices.
26
+
27
+ This class provides configuration options for Reachy 2 cameras,
28
+ supporting both the teleop and depth cameras. It includes settings
29
+ for resolution, frame rate, color mode, and the selection of the cameras.
30
+
31
+ Example configurations:
32
+ ```python
33
+ # Basic configurations
34
+ Reachy2CameraConfig(
35
+ name="teleop",
36
+ image_type="left",
37
+ ip_address="192.168.0.200", # IP address of the robot
38
+ port=50065, # Port of the camera server
39
+ width=640,
40
+ height=480,
41
+ fps=30, # Not configurable for Reachy 2 cameras
42
+ color_mode=ColorMode.RGB,
43
+ ) # Left teleop camera, 640x480 @ 30FPS
44
+ ```
45
+
46
+ Attributes:
47
+ name: Name of the camera device. Can be "teleop" or "depth".
48
+ image_type: Type of image stream. For "teleop" camera, can be "left" or "right".
49
+ For "depth" camera, can be "rgb" or "depth". (depth is not supported yet)
50
+ fps: Requested frames per second for the color stream. Not configurable for Reachy 2 cameras.
51
+ width: Requested frame width in pixels for the color stream.
52
+ height: Requested frame height in pixels for the color stream.
53
+ color_mode: Color mode for image output (RGB or BGR). Defaults to RGB.
54
+ ip_address: IP address of the robot. Defaults to "localhost".
55
+ port: Port number for the camera server. Defaults to 50065.
56
+
57
+ Note:
58
+ - Only 3-channel color output (RGB/BGR) is currently supported.
59
+ """
60
+
61
+ name: str
62
+ image_type: str
63
+ color_mode: ColorMode = ColorMode.RGB
64
+ ip_address: str | None = "localhost"
65
+ port: int = 50065
66
+
67
+ def __post_init__(self) -> None:
68
+ if self.name not in ["teleop", "depth"]:
69
+ raise ValueError(f"`name` is expected to be 'teleop' or 'depth', but {self.name} is provided.")
70
+ if (self.name == "teleop" and self.image_type not in ["left", "right"]) or (
71
+ self.name == "depth" and self.image_type not in ["rgb", "depth"]
72
+ ):
73
+ raise ValueError(
74
+ f"`image_type` is expected to be 'left' or 'right' for teleop camera, and 'rgb' or 'depth' for depth camera, but {self.image_type} is provided."
75
+ )
76
+
77
+ if self.color_mode not in ["rgb", "bgr"]:
78
+ raise ValueError(
79
+ f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided."
80
+ )
lerobot/src/lerobot/cameras/reachy2_camera/reachy2_camera.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Provides the Reachy2Camera class for capturing frames from Reachy 2 cameras using Reachy 2's CameraManager.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ import logging
22
+ import os
23
+ import platform
24
+ import time
25
+ from typing import TYPE_CHECKING, Any
26
+
27
+ from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing
28
+
29
+ # Fix MSMF hardware transform compatibility for Windows before importing cv2
30
+ if platform.system() == "Windows" and "OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS" not in os.environ:
31
+ os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0"
32
+ import cv2 # type: ignore # TODO: add type stubs for OpenCV
33
+ import numpy as np # type: ignore # TODO: add type stubs for numpy
34
+
35
+ from lerobot.utils.import_utils import _reachy2_sdk_available
36
+
37
+ if TYPE_CHECKING or _reachy2_sdk_available:
38
+ from reachy2_sdk.media.camera import CameraView
39
+ from reachy2_sdk.media.camera_manager import CameraManager
40
+ else:
41
+ CameraManager = None
42
+
43
+ class CameraView:
44
+ LEFT = 0
45
+ RIGHT = 1
46
+
47
+
48
+ from lerobot.utils.errors import DeviceNotConnectedError
49
+
50
+ from ..camera import Camera
51
+ from .configuration_reachy2_camera import ColorMode, Reachy2CameraConfig
52
+
53
+ logger = logging.getLogger(__name__)
54
+
55
+
56
+ class Reachy2Camera(Camera):
57
+ """
58
+ Manages Reachy 2 camera using Reachy 2 CameraManager.
59
+
60
+ This class provides a high-level interface to connect to, configure, and read
61
+ frames from Reachy 2 cameras. It supports both synchronous and asynchronous
62
+ frame reading.
63
+
64
+ An Reachy2Camera instance requires a camera name (e.g., "teleop") and an image
65
+ type (e.g., "left") to be specified in the configuration.
66
+
67
+ The camera's default settings (FPS, resolution, color mode) are used unless
68
+ overridden in the configuration.
69
+ """
70
+
71
+ def __init__(self, config: Reachy2CameraConfig):
72
+ """
73
+ Initializes the Reachy2Camera instance.
74
+
75
+ Args:
76
+ config: The configuration settings for the camera.
77
+ """
78
+ super().__init__(config)
79
+
80
+ self.config = config
81
+
82
+ self.color_mode = config.color_mode
83
+
84
+ self.cam_manager: CameraManager | None = None
85
+
86
+ def __str__(self) -> str:
87
+ return f"{self.__class__.__name__}({self.config.name}, {self.config.image_type})"
88
+
89
+ @property
90
+ def is_connected(self) -> bool:
91
+ """Checks if the camera is currently connected and opened."""
92
+ if self.config.name == "teleop":
93
+ return bool(
94
+ self.cam_manager._grpc_connected and self.cam_manager.teleop if self.cam_manager else False
95
+ )
96
+ elif self.config.name == "depth":
97
+ return bool(
98
+ self.cam_manager._grpc_connected and self.cam_manager.depth if self.cam_manager else False
99
+ )
100
+ else:
101
+ raise ValueError(f"Invalid camera name '{self.config.name}'. Expected 'teleop' or 'depth'.")
102
+
103
+ def connect(self, warmup: bool = True) -> None:
104
+ """
105
+ Connects to the Reachy2 CameraManager as specified in the configuration.
106
+
107
+ Raises:
108
+ DeviceNotConnectedError: If the camera is not connected.
109
+ """
110
+ self.cam_manager = CameraManager(host=self.config.ip_address, port=self.config.port)
111
+ if self.cam_manager is None:
112
+ raise DeviceNotConnectedError(f"Could not connect to {self}.")
113
+ self.cam_manager.initialize_cameras()
114
+
115
+ logger.info(f"{self} connected.")
116
+
117
+ @staticmethod
118
+ def find_cameras() -> list[dict[str, Any]]:
119
+ """
120
+ Detection not implemented for Reachy2 cameras.
121
+ """
122
+ raise NotImplementedError("Camera detection is not implemented for Reachy2 cameras.")
123
+
124
+ def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]:
125
+ """
126
+ Reads a single frame synchronously from the camera.
127
+
128
+ This is a blocking call.
129
+
130
+ Args:
131
+ color_mode (Optional[ColorMode]): If specified, overrides the default
132
+ color mode (`self.color_mode`) for this read operation (e.g.,
133
+ request RGB even if default is BGR).
134
+
135
+ Returns:
136
+ np.ndarray: The captured frame as a NumPy array in the format
137
+ (height, width, channels), using the specified or default
138
+ color mode and applying any configured rotation.
139
+ """
140
+ start_time = time.perf_counter()
141
+
142
+ if not self.is_connected:
143
+ raise DeviceNotConnectedError(f"{self} is not connected.")
144
+
145
+ if self.cam_manager is None:
146
+ raise DeviceNotConnectedError(f"{self} is not connected.")
147
+
148
+ frame: NDArray[Any] = np.empty((0, 0, 3), dtype=np.uint8)
149
+
150
+ if self.config.name == "teleop" and hasattr(self.cam_manager, "teleop"):
151
+ if self.config.image_type == "left":
152
+ frame = self.cam_manager.teleop.get_frame(
153
+ CameraView.LEFT, size=(self.config.width, self.config.height)
154
+ )[0]
155
+ elif self.config.image_type == "right":
156
+ frame = self.cam_manager.teleop.get_frame(
157
+ CameraView.RIGHT, size=(self.config.width, self.config.height)
158
+ )[0]
159
+ elif self.config.name == "depth" and hasattr(self.cam_manager, "depth"):
160
+ if self.config.image_type == "depth":
161
+ frame = self.cam_manager.depth.get_depth_frame()[0]
162
+ elif self.config.image_type == "rgb":
163
+ frame = self.cam_manager.depth.get_frame(size=(self.config.width, self.config.height))[0]
164
+ else:
165
+ raise ValueError(f"Invalid camera name '{self.config.name}'. Expected 'teleop' or 'depth'.")
166
+
167
+ if frame is None:
168
+ return np.empty((0, 0, 3), dtype=np.uint8)
169
+
170
+ if self.config.color_mode == "rgb":
171
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
172
+
173
+ read_duration_ms = (time.perf_counter() - start_time) * 1e3
174
+ logger.debug(f"{self} read took: {read_duration_ms:.1f}ms")
175
+
176
+ return frame
177
+
178
+ def async_read(self, timeout_ms: float = 200) -> NDArray[Any]:
179
+ """
180
+ Reads the latest available frame.
181
+
182
+ This method retrieves the most recent frame available in Reachy 2's low-level software.
183
+
184
+ Args:
185
+ timeout_ms (float): Maximum time in milliseconds to wait for a frame
186
+ to become available. Defaults to 200ms (0.2 seconds).
187
+
188
+ Returns:
189
+ np.ndarray: The latest captured frame as a NumPy array in the format
190
+ (height, width, channels), processed according to configuration.
191
+
192
+ Raises:
193
+ DeviceNotConnectedError: If the camera is not connected.
194
+ TimeoutError: If no frame becomes available within the specified timeout.
195
+ RuntimeError: If an unexpected error occurs.
196
+ """
197
+ if not self.is_connected:
198
+ raise DeviceNotConnectedError(f"{self} is not connected.")
199
+
200
+ frame = self.read()
201
+
202
+ if frame is None:
203
+ raise RuntimeError(f"Internal error: No frame available for {self}.")
204
+
205
+ return frame
206
+
207
+ def disconnect(self) -> None:
208
+ """
209
+ Stops the background read thread (if running).
210
+
211
+ Raises:
212
+ DeviceNotConnectedError: If the camera is already disconnected.
213
+ """
214
+ if not self.is_connected:
215
+ raise DeviceNotConnectedError(f"{self} not connected.")
216
+
217
+ if self.cam_manager is not None:
218
+ self.cam_manager.disconnect()
219
+
220
+ logger.info(f"{self} disconnected.")
lerobot/src/lerobot/cameras/realsense/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .camera_realsense import RealSenseCamera
16
+ from .configuration_realsense import RealSenseCameraConfig
lerobot/src/lerobot/cameras/realsense/camera_realsense.py ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Provides the RealSenseCamera class for capturing frames from Intel RealSense cameras.
17
+ """
18
+
19
+ import logging
20
+ import time
21
+ from threading import Event, Lock, Thread
22
+ from typing import Any
23
+
24
+ import cv2 # type: ignore # TODO: add type stubs for OpenCV
25
+ import numpy as np # type: ignore # TODO: add type stubs for numpy
26
+ from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing
27
+
28
+ try:
29
+ import pyrealsense2 as rs # type: ignore # TODO: add type stubs for pyrealsense2
30
+ except Exception as e:
31
+ logging.info(f"Could not import realsense: {e}")
32
+
33
+ from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
34
+
35
+ from ..camera import Camera
36
+ from ..configs import ColorMode
37
+ from ..utils import get_cv2_rotation
38
+ from .configuration_realsense import RealSenseCameraConfig
39
+
40
+ logger = logging.getLogger(__name__)
41
+
42
+
43
+ class RealSenseCamera(Camera):
44
+ """
45
+ Manages interactions with Intel RealSense cameras for frame and depth recording.
46
+
47
+ This class provides an interface similar to `OpenCVCamera` but tailored for
48
+ RealSense devices, leveraging the `pyrealsense2` library. It uses the camera's
49
+ unique serial number for identification, offering more stability than device
50
+ indices, especially on Linux. It also supports capturing depth maps alongside
51
+ color frames.
52
+
53
+ Use the provided utility script to find available camera indices and default profiles:
54
+ ```bash
55
+ lerobot-find-cameras realsense
56
+ ```
57
+
58
+ A `RealSenseCamera` instance requires a configuration object specifying the
59
+ camera's serial number or a unique device name. If using the name, ensure only
60
+ one camera with that name is connected.
61
+
62
+ The camera's default settings (FPS, resolution, color mode) from the stream
63
+ profile are used unless overridden in the configuration.
64
+
65
+ Example:
66
+ ```python
67
+ from lerobot.cameras.realsense import RealSenseCamera, RealSenseCameraConfig
68
+ from lerobot.cameras import ColorMode, Cv2Rotation
69
+
70
+ # Basic usage with serial number
71
+ config = RealSenseCameraConfig(serial_number_or_name="0123456789") # Replace with actual SN
72
+ camera = RealSenseCamera(config)
73
+ camera.connect()
74
+
75
+ # Read 1 frame synchronously
76
+ color_image = camera.read()
77
+ print(color_image.shape)
78
+
79
+ # Read 1 frame asynchronously
80
+ async_image = camera.async_read()
81
+
82
+ # When done, properly disconnect the camera using
83
+ camera.disconnect()
84
+
85
+ # Example with depth capture and custom settings
86
+ custom_config = RealSenseCameraConfig(
87
+ serial_number_or_name="0123456789", # Replace with actual SN
88
+ fps=30,
89
+ width=1280,
90
+ height=720,
91
+ color_mode=ColorMode.BGR, # Request BGR output
92
+ rotation=Cv2Rotation.NO_ROTATION,
93
+ use_depth=True
94
+ )
95
+ depth_camera = RealSenseCamera(custom_config)
96
+ depth_camera.connect()
97
+
98
+ # Read 1 depth frame
99
+ depth_map = depth_camera.read_depth()
100
+
101
+ # Example using a unique camera name
102
+ name_config = RealSenseCameraConfig(serial_number_or_name="Intel RealSense D435") # If unique
103
+ name_camera = RealSenseCamera(name_config)
104
+ # ... connect, read, disconnect ...
105
+ ```
106
+ """
107
+
108
+ def __init__(self, config: RealSenseCameraConfig):
109
+ """
110
+ Initializes the RealSenseCamera instance.
111
+
112
+ Args:
113
+ config: The configuration settings for the camera.
114
+ """
115
+
116
+ super().__init__(config)
117
+
118
+ self.config = config
119
+
120
+ if config.serial_number_or_name.isdigit():
121
+ self.serial_number = config.serial_number_or_name
122
+ else:
123
+ self.serial_number = self._find_serial_number_from_name(config.serial_number_or_name)
124
+
125
+ self.fps = config.fps
126
+ self.color_mode = config.color_mode
127
+ self.use_depth = config.use_depth
128
+ self.warmup_s = config.warmup_s
129
+
130
+ self.rs_pipeline: rs.pipeline | None = None
131
+ self.rs_profile: rs.pipeline_profile | None = None
132
+
133
+ self.thread: Thread | None = None
134
+ self.stop_event: Event | None = None
135
+ self.frame_lock: Lock = Lock()
136
+ self.latest_frame: NDArray[Any] | None = None
137
+ self.new_frame_event: Event = Event()
138
+
139
+ self.rotation: int | None = get_cv2_rotation(config.rotation)
140
+
141
+ if self.height and self.width:
142
+ self.capture_width, self.capture_height = self.width, self.height
143
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
144
+ self.capture_width, self.capture_height = self.height, self.width
145
+
146
+ def __str__(self) -> str:
147
+ return f"{self.__class__.__name__}({self.serial_number})"
148
+
149
+ @property
150
+ def is_connected(self) -> bool:
151
+ """Checks if the camera pipeline is started and streams are active."""
152
+ return self.rs_pipeline is not None and self.rs_profile is not None
153
+
154
+ def connect(self, warmup: bool = True) -> None:
155
+ """
156
+ Connects to the RealSense camera specified in the configuration.
157
+
158
+ Initializes the RealSense pipeline, configures the required streams (color
159
+ and optionally depth), starts the pipeline, and validates the actual stream settings.
160
+
161
+ Raises:
162
+ DeviceAlreadyConnectedError: If the camera is already connected.
163
+ ValueError: If the configuration is invalid (e.g., missing serial/name, name not unique).
164
+ ConnectionError: If the camera is found but fails to start the pipeline or no RealSense devices are detected at all.
165
+ RuntimeError: If the pipeline starts but fails to apply requested settings.
166
+ """
167
+ if self.is_connected:
168
+ raise DeviceAlreadyConnectedError(f"{self} is already connected.")
169
+
170
+ self.rs_pipeline = rs.pipeline()
171
+ rs_config = rs.config()
172
+ self._configure_rs_pipeline_config(rs_config)
173
+
174
+ try:
175
+ self.rs_profile = self.rs_pipeline.start(rs_config)
176
+ except RuntimeError as e:
177
+ self.rs_profile = None
178
+ self.rs_pipeline = None
179
+ raise ConnectionError(
180
+ f"Failed to open {self}.Run `lerobot-find-cameras realsense` to find available cameras."
181
+ ) from e
182
+
183
+ self._configure_capture_settings()
184
+
185
+ if warmup:
186
+ time.sleep(
187
+ 1
188
+ ) # NOTE(Steven): RS cameras need a bit of time to warm up before the first read. If we don't wait, the first read from the warmup will raise.
189
+ start_time = time.time()
190
+ while time.time() - start_time < self.warmup_s:
191
+ self.read()
192
+ time.sleep(0.1)
193
+
194
+ logger.info(f"{self} connected.")
195
+
196
+ @staticmethod
197
+ def find_cameras() -> list[dict[str, Any]]:
198
+ """
199
+ Detects available Intel RealSense cameras connected to the system.
200
+
201
+ Returns:
202
+ List[Dict[str, Any]]: A list of dictionaries,
203
+ where each dictionary contains 'type', 'id' (serial number), 'name',
204
+ firmware version, USB type, and other available specs, and the default profile properties (width, height, fps, format).
205
+
206
+ Raises:
207
+ OSError: If pyrealsense2 is not installed.
208
+ ImportError: If pyrealsense2 is not installed.
209
+ """
210
+ found_cameras_info = []
211
+ context = rs.context()
212
+ devices = context.query_devices()
213
+
214
+ for device in devices:
215
+ camera_info = {
216
+ "name": device.get_info(rs.camera_info.name),
217
+ "type": "RealSense",
218
+ "id": device.get_info(rs.camera_info.serial_number),
219
+ "firmware_version": device.get_info(rs.camera_info.firmware_version),
220
+ "usb_type_descriptor": device.get_info(rs.camera_info.usb_type_descriptor),
221
+ "physical_port": device.get_info(rs.camera_info.physical_port),
222
+ "product_id": device.get_info(rs.camera_info.product_id),
223
+ "product_line": device.get_info(rs.camera_info.product_line),
224
+ }
225
+
226
+ # Get stream profiles for each sensor
227
+ sensors = device.query_sensors()
228
+ for sensor in sensors:
229
+ profiles = sensor.get_stream_profiles()
230
+
231
+ for profile in profiles:
232
+ if profile.is_video_stream_profile() and profile.is_default():
233
+ vprofile = profile.as_video_stream_profile()
234
+ stream_info = {
235
+ "stream_type": vprofile.stream_name(),
236
+ "format": vprofile.format().name,
237
+ "width": vprofile.width(),
238
+ "height": vprofile.height(),
239
+ "fps": vprofile.fps(),
240
+ }
241
+ camera_info["default_stream_profile"] = stream_info
242
+
243
+ found_cameras_info.append(camera_info)
244
+
245
+ return found_cameras_info
246
+
247
+ def _find_serial_number_from_name(self, name: str) -> str:
248
+ """Finds the serial number for a given unique camera name."""
249
+ camera_infos = self.find_cameras()
250
+ found_devices = [cam for cam in camera_infos if str(cam["name"]) == name]
251
+
252
+ if not found_devices:
253
+ available_names = [cam["name"] for cam in camera_infos]
254
+ raise ValueError(
255
+ f"No RealSense camera found with name '{name}'. Available camera names: {available_names}"
256
+ )
257
+
258
+ if len(found_devices) > 1:
259
+ serial_numbers = [dev["serial_number"] for dev in found_devices]
260
+ raise ValueError(
261
+ f"Multiple RealSense cameras found with name '{name}'. "
262
+ f"Please use a unique serial number instead. Found SNs: {serial_numbers}"
263
+ )
264
+
265
+ serial_number = str(found_devices[0]["serial_number"])
266
+ return serial_number
267
+
268
+ def _configure_rs_pipeline_config(self, rs_config: Any) -> None:
269
+ """Creates and configures the RealSense pipeline configuration object."""
270
+ rs.config.enable_device(rs_config, self.serial_number)
271
+
272
+ if self.width and self.height and self.fps:
273
+ rs_config.enable_stream(
274
+ rs.stream.color, self.capture_width, self.capture_height, rs.format.rgb8, self.fps
275
+ )
276
+ if self.use_depth:
277
+ rs_config.enable_stream(
278
+ rs.stream.depth, self.capture_width, self.capture_height, rs.format.z16, self.fps
279
+ )
280
+ else:
281
+ rs_config.enable_stream(rs.stream.color)
282
+ if self.use_depth:
283
+ rs_config.enable_stream(rs.stream.depth)
284
+
285
+ def _configure_capture_settings(self) -> None:
286
+ """Sets fps, width, and height from device stream if not already configured.
287
+
288
+ Uses the color stream profile to update unset attributes. Handles rotation by
289
+ swapping width/height when needed. Original capture dimensions are always stored.
290
+
291
+ Raises:
292
+ DeviceNotConnectedError: If device is not connected.
293
+ """
294
+ if not self.is_connected:
295
+ raise DeviceNotConnectedError(f"Cannot validate settings for {self} as it is not connected.")
296
+
297
+ if self.rs_profile is None:
298
+ raise RuntimeError(f"{self}: rs_profile must be initialized before use.")
299
+
300
+ stream = self.rs_profile.get_stream(rs.stream.color).as_video_stream_profile()
301
+
302
+ if self.fps is None:
303
+ self.fps = stream.fps()
304
+
305
+ if self.width is None or self.height is None:
306
+ actual_width = int(round(stream.width()))
307
+ actual_height = int(round(stream.height()))
308
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]:
309
+ self.width, self.height = actual_height, actual_width
310
+ self.capture_width, self.capture_height = actual_width, actual_height
311
+ else:
312
+ self.width, self.height = actual_width, actual_height
313
+ self.capture_width, self.capture_height = actual_width, actual_height
314
+
315
+ def read_depth(self, timeout_ms: int = 200) -> NDArray[Any]:
316
+ """
317
+ Reads a single frame (depth) synchronously from the camera.
318
+
319
+ This is a blocking call. It waits for a coherent set of frames (depth)
320
+ from the camera hardware via the RealSense pipeline.
321
+
322
+ Args:
323
+ timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms.
324
+
325
+ Returns:
326
+ np.ndarray: The depth map as a NumPy array (height, width)
327
+ of type `np.uint16` (raw depth values in millimeters) and rotation.
328
+
329
+ Raises:
330
+ DeviceNotConnectedError: If the camera is not connected.
331
+ RuntimeError: If reading frames from the pipeline fails or frames are invalid.
332
+ """
333
+
334
+ if not self.is_connected:
335
+ raise DeviceNotConnectedError(f"{self} is not connected.")
336
+ if not self.use_depth:
337
+ raise RuntimeError(
338
+ f"Failed to capture depth frame '.read_depth()'. Depth stream is not enabled for {self}."
339
+ )
340
+
341
+ start_time = time.perf_counter()
342
+
343
+ if self.rs_pipeline is None:
344
+ raise RuntimeError(f"{self}: rs_pipeline must be initialized before use.")
345
+
346
+ ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms)
347
+
348
+ if not ret or frame is None:
349
+ raise RuntimeError(f"{self} read_depth failed (status={ret}).")
350
+
351
+ depth_frame = frame.get_depth_frame()
352
+ depth_map = np.asanyarray(depth_frame.get_data())
353
+
354
+ depth_map_processed = self._postprocess_image(depth_map, depth_frame=True)
355
+
356
+ read_duration_ms = (time.perf_counter() - start_time) * 1e3
357
+ logger.debug(f"{self} read took: {read_duration_ms:.1f}ms")
358
+
359
+ return depth_map_processed
360
+
361
+ def read(self, color_mode: ColorMode | None = None, timeout_ms: int = 200) -> NDArray[Any]:
362
+ """
363
+ Reads a single frame (color) synchronously from the camera.
364
+
365
+ This is a blocking call. It waits for a coherent set of frames (color)
366
+ from the camera hardware via the RealSense pipeline.
367
+
368
+ Args:
369
+ timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms.
370
+
371
+ Returns:
372
+ np.ndarray: The captured color frame as a NumPy array
373
+ (height, width, channels), processed according to `color_mode` and rotation.
374
+
375
+ Raises:
376
+ DeviceNotConnectedError: If the camera is not connected.
377
+ RuntimeError: If reading frames from the pipeline fails or frames are invalid.
378
+ ValueError: If an invalid `color_mode` is requested.
379
+ """
380
+
381
+ if not self.is_connected:
382
+ raise DeviceNotConnectedError(f"{self} is not connected.")
383
+
384
+ start_time = time.perf_counter()
385
+
386
+ if self.rs_pipeline is None:
387
+ raise RuntimeError(f"{self}: rs_pipeline must be initialized before use.")
388
+
389
+ ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms)
390
+
391
+ if not ret or frame is None:
392
+ raise RuntimeError(f"{self} read failed (status={ret}).")
393
+
394
+ color_frame = frame.get_color_frame()
395
+ color_image_raw = np.asanyarray(color_frame.get_data())
396
+
397
+ color_image_processed = self._postprocess_image(color_image_raw, color_mode)
398
+
399
+ read_duration_ms = (time.perf_counter() - start_time) * 1e3
400
+ logger.debug(f"{self} read took: {read_duration_ms:.1f}ms")
401
+
402
+ return color_image_processed
403
+
404
+ def _postprocess_image(
405
+ self, image: NDArray[Any], color_mode: ColorMode | None = None, depth_frame: bool = False
406
+ ) -> NDArray[Any]:
407
+ """
408
+ Applies color conversion, dimension validation, and rotation to a raw color frame.
409
+
410
+ Args:
411
+ image (np.ndarray): The raw image frame (expected RGB format from RealSense).
412
+ color_mode (Optional[ColorMode]): The target color mode (RGB or BGR). If None,
413
+ uses the instance's default `self.color_mode`.
414
+
415
+ Returns:
416
+ np.ndarray: The processed image frame according to `self.color_mode` and `self.rotation`.
417
+
418
+ Raises:
419
+ ValueError: If the requested `color_mode` is invalid.
420
+ RuntimeError: If the raw frame dimensions do not match the configured
421
+ `width` and `height`.
422
+ """
423
+
424
+ if color_mode and color_mode not in (ColorMode.RGB, ColorMode.BGR):
425
+ raise ValueError(
426
+ f"Invalid requested color mode '{color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}."
427
+ )
428
+
429
+ if depth_frame:
430
+ h, w = image.shape
431
+ else:
432
+ h, w, c = image.shape
433
+
434
+ if c != 3:
435
+ raise RuntimeError(f"{self} frame channels={c} do not match expected 3 channels (RGB/BGR).")
436
+
437
+ if h != self.capture_height or w != self.capture_width:
438
+ raise RuntimeError(
439
+ f"{self} frame width={w} or height={h} do not match configured width={self.capture_width} or height={self.capture_height}."
440
+ )
441
+
442
+ processed_image = image
443
+ if self.color_mode == ColorMode.BGR:
444
+ processed_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
445
+
446
+ if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]:
447
+ processed_image = cv2.rotate(processed_image, self.rotation)
448
+
449
+ return processed_image
450
+
451
+ def _read_loop(self) -> None:
452
+ """
453
+ Internal loop run by the background thread for asynchronous reading.
454
+
455
+ On each iteration:
456
+ 1. Reads a color frame with 500ms timeout
457
+ 2. Stores result in latest_frame (thread-safe)
458
+ 3. Sets new_frame_event to notify listeners
459
+
460
+ Stops on DeviceNotConnectedError, logs other errors and continues.
461
+ """
462
+ if self.stop_event is None:
463
+ raise RuntimeError(f"{self}: stop_event is not initialized before starting read loop.")
464
+
465
+ while not self.stop_event.is_set():
466
+ try:
467
+ color_image = self.read(timeout_ms=500)
468
+
469
+ with self.frame_lock:
470
+ self.latest_frame = color_image
471
+ self.new_frame_event.set()
472
+
473
+ except DeviceNotConnectedError:
474
+ break
475
+ except Exception as e:
476
+ logger.warning(f"Error reading frame in background thread for {self}: {e}")
477
+
478
+ def _start_read_thread(self) -> None:
479
+ """Starts or restarts the background read thread if it's not running."""
480
+ if self.thread is not None and self.thread.is_alive():
481
+ self.thread.join(timeout=0.1)
482
+ if self.stop_event is not None:
483
+ self.stop_event.set()
484
+
485
+ self.stop_event = Event()
486
+ self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop")
487
+ self.thread.daemon = True
488
+ self.thread.start()
489
+
490
+ def _stop_read_thread(self) -> None:
491
+ """Signals the background read thread to stop and waits for it to join."""
492
+ if self.stop_event is not None:
493
+ self.stop_event.set()
494
+
495
+ if self.thread is not None and self.thread.is_alive():
496
+ self.thread.join(timeout=2.0)
497
+
498
+ self.thread = None
499
+ self.stop_event = None
500
+
501
+ # NOTE(Steven): Missing implementation for depth for now
502
+ def async_read(self, timeout_ms: float = 200) -> NDArray[Any]:
503
+ """
504
+ Reads the latest available frame data (color) asynchronously.
505
+
506
+ This method retrieves the most recent color frame captured by the background
507
+ read thread. It does not block waiting for the camera hardware directly,
508
+ but may wait up to timeout_ms for the background thread to provide a frame.
509
+
510
+ Args:
511
+ timeout_ms (float): Maximum time in milliseconds to wait for a frame
512
+ to become available. Defaults to 200ms (0.2 seconds).
513
+
514
+ Returns:
515
+ np.ndarray:
516
+ The latest captured frame data (color image), processed according to configuration.
517
+
518
+ Raises:
519
+ DeviceNotConnectedError: If the camera is not connected.
520
+ TimeoutError: If no frame data becomes available within the specified timeout.
521
+ RuntimeError: If the background thread died unexpectedly or another error occurs.
522
+ """
523
+ if not self.is_connected:
524
+ raise DeviceNotConnectedError(f"{self} is not connected.")
525
+
526
+ if self.thread is None or not self.thread.is_alive():
527
+ self._start_read_thread()
528
+
529
+ if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0):
530
+ thread_alive = self.thread is not None and self.thread.is_alive()
531
+ raise TimeoutError(
532
+ f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. "
533
+ f"Read thread alive: {thread_alive}."
534
+ )
535
+
536
+ with self.frame_lock:
537
+ frame = self.latest_frame
538
+ self.new_frame_event.clear()
539
+
540
+ if frame is None:
541
+ raise RuntimeError(f"Internal error: Event set but no frame available for {self}.")
542
+
543
+ return frame
544
+
545
+ def disconnect(self) -> None:
546
+ """
547
+ Disconnects from the camera, stops the pipeline, and cleans up resources.
548
+
549
+ Stops the background read thread (if running) and stops the RealSense pipeline.
550
+
551
+ Raises:
552
+ DeviceNotConnectedError: If the camera is already disconnected (pipeline not running).
553
+ """
554
+
555
+ if not self.is_connected and self.thread is None:
556
+ raise DeviceNotConnectedError(
557
+ f"Attempted to disconnect {self}, but it appears already disconnected."
558
+ )
559
+
560
+ if self.thread is not None:
561
+ self._stop_read_thread()
562
+
563
+ if self.rs_pipeline is not None:
564
+ self.rs_pipeline.stop()
565
+ self.rs_pipeline = None
566
+ self.rs_profile = None
567
+
568
+ logger.info(f"{self} disconnected.")
lerobot/src/lerobot/cameras/realsense/configuration_realsense.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+
17
+ from ..configs import CameraConfig, ColorMode, Cv2Rotation
18
+
19
+
20
+ @CameraConfig.register_subclass("intelrealsense")
21
+ @dataclass
22
+ class RealSenseCameraConfig(CameraConfig):
23
+ """Configuration class for Intel RealSense cameras.
24
+
25
+ This class provides specialized configuration options for Intel RealSense cameras,
26
+ including support for depth sensing and device identification via serial number or name.
27
+
28
+ Example configurations for Intel RealSense D405:
29
+ ```python
30
+ # Basic configurations
31
+ RealSenseCameraConfig("0123456789", 30, 1280, 720) # 1280x720 @ 30FPS
32
+ RealSenseCameraConfig("0123456789", 60, 640, 480) # 640x480 @ 60FPS
33
+
34
+ # Advanced configurations
35
+ RealSenseCameraConfig("0123456789", 30, 640, 480, use_depth=True) # With depth sensing
36
+ RealSenseCameraConfig("0123456789", 30, 640, 480, rotation=Cv2Rotation.ROTATE_90) # With 90° rotation
37
+ ```
38
+
39
+ Attributes:
40
+ fps: Requested frames per second for the color stream.
41
+ width: Requested frame width in pixels for the color stream.
42
+ height: Requested frame height in pixels for the color stream.
43
+ serial_number_or_name: Unique serial number or human-readable name to identify the camera.
44
+ color_mode: Color mode for image output (RGB or BGR). Defaults to RGB.
45
+ use_depth: Whether to enable depth stream. Defaults to False.
46
+ rotation: Image rotation setting (0°, 90°, 180°, or 270°). Defaults to no rotation.
47
+ warmup_s: Time reading frames before returning from connect (in seconds)
48
+
49
+ Note:
50
+ - Either name or serial_number must be specified.
51
+ - Depth stream configuration (if enabled) will use the same FPS as the color stream.
52
+ - The actual resolution and FPS may be adjusted by the camera to the nearest supported mode.
53
+ - For `fps`, `width` and `height`, either all of them need to be set, or none of them.
54
+ """
55
+
56
+ serial_number_or_name: str
57
+ color_mode: ColorMode = ColorMode.RGB
58
+ use_depth: bool = False
59
+ rotation: Cv2Rotation = Cv2Rotation.NO_ROTATION
60
+ warmup_s: int = 1
61
+
62
+ def __post_init__(self) -> None:
63
+ if self.color_mode not in (ColorMode.RGB, ColorMode.BGR):
64
+ raise ValueError(
65
+ f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided."
66
+ )
67
+
68
+ if self.rotation not in (
69
+ Cv2Rotation.NO_ROTATION,
70
+ Cv2Rotation.ROTATE_90,
71
+ Cv2Rotation.ROTATE_180,
72
+ Cv2Rotation.ROTATE_270,
73
+ ):
74
+ raise ValueError(
75
+ f"`rotation` is expected to be in {(Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270)}, but {self.rotation} is provided."
76
+ )
77
+
78
+ values = (self.fps, self.width, self.height)
79
+ if any(v is not None for v in values) and any(v is None for v in values):
80
+ raise ValueError(
81
+ "For `fps`, `width` and `height`, either all of them need to be set, or none of them."
82
+ )
lerobot/src/lerobot/cameras/zmq/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .camera_zmq import ZMQCamera
18
+ from .configuration_zmq import ZMQCameraConfig
19
+
20
+ __all__ = ["ZMQCamera", "ZMQCameraConfig"]
lerobot/src/lerobot/cameras/zmq/camera_zmq.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ ZMQCamera - Captures frames from remote cameras via ZeroMQ using JSON protocol in the
19
+ following format:
20
+ {
21
+ "timestamps": {"camera_name": float},
22
+ "images": {"camera_name": "<base64-jpeg>"}
23
+ }
24
+ """
25
+
26
+ import base64
27
+ import json
28
+ import logging
29
+ import time
30
+ from threading import Event, Lock, Thread
31
+ from typing import Any
32
+
33
+ import cv2
34
+ import numpy as np
35
+ from numpy.typing import NDArray
36
+
37
+ from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
38
+
39
+ from ..camera import Camera
40
+ from ..configs import ColorMode
41
+ from .configuration_zmq import ZMQCameraConfig
42
+
43
+ logger = logging.getLogger(__name__)
44
+
45
+
46
+ class ZMQCamera(Camera):
47
+ """
48
+ Example usage:
49
+ ```python
50
+ from lerobot.cameras.zmq import ZMQCamera, ZMQCameraConfig
51
+
52
+ config = ZMQCameraConfig(server_address="192.168.123.164", port=5555, camera_name="head_camera")
53
+ camera = ZMQCamera(config)
54
+ camera.connect()
55
+ frame = camera.read()
56
+ camera.disconnect()
57
+ ```
58
+ """
59
+
60
+ def __init__(self, config: ZMQCameraConfig):
61
+ super().__init__(config)
62
+ import zmq
63
+
64
+ self.config = config
65
+ self.server_address = config.server_address
66
+ self.port = config.port
67
+ self.camera_name = config.camera_name
68
+ self.color_mode = config.color_mode
69
+ self.timeout_ms = config.timeout_ms
70
+
71
+ self.context: zmq.Context | None = None
72
+ self.socket: zmq.Socket | None = None
73
+ self._connected = False
74
+
75
+ self.thread: Thread | None = None
76
+ self.stop_event: Event | None = None
77
+ self.frame_lock: Lock = Lock()
78
+ self.latest_frame: NDArray[Any] | None = None
79
+ self.new_frame_event: Event = Event()
80
+
81
+ def __str__(self) -> str:
82
+ return f"ZMQCamera({self.camera_name}@{self.server_address}:{self.port})"
83
+
84
+ @property
85
+ def is_connected(self) -> bool:
86
+ return self._connected and self.context is not None and self.socket is not None
87
+
88
+ def connect(self, warmup: bool = True) -> None:
89
+ """Connect to ZMQ camera server."""
90
+ if self.is_connected:
91
+ raise DeviceAlreadyConnectedError(f"{self} is already connected.")
92
+
93
+ logger.info(f"Connecting to {self}...")
94
+
95
+ try:
96
+ import zmq
97
+
98
+ self.context = zmq.Context()
99
+ self.socket = self.context.socket(zmq.SUB)
100
+ self.socket.setsockopt_string(zmq.SUBSCRIBE, "")
101
+ self.socket.setsockopt(zmq.RCVTIMEO, self.timeout_ms)
102
+ self.socket.setsockopt(zmq.CONFLATE, True)
103
+ self.socket.connect(f"tcp://{self.server_address}:{self.port}")
104
+ self._connected = True
105
+
106
+ # Auto-detect resolution
107
+ if self.width is None or self.height is None:
108
+ h, w = self.read().shape[:2]
109
+ self.height = h
110
+ self.width = w
111
+ logger.info(f"{self} resolution: {w}x{h}")
112
+
113
+ logger.info(f"{self} connected.")
114
+
115
+ if warmup:
116
+ time.sleep(0.1)
117
+
118
+ except Exception as e:
119
+ self._cleanup()
120
+ raise RuntimeError(f"Failed to connect to {self}: {e}") from e
121
+
122
+ def _cleanup(self):
123
+ """Clean up ZMQ resources."""
124
+ self._connected = False
125
+ if self.socket:
126
+ self.socket.close()
127
+ self.socket = None
128
+ if self.context:
129
+ self.context.term()
130
+ self.context = None
131
+
132
+ @staticmethod
133
+ def find_cameras() -> list[dict[str, Any]]:
134
+ """ZMQ cameras require manual configuration (server address/port)."""
135
+ return []
136
+
137
+ def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]:
138
+ """
139
+ Read a single frame from the ZMQ camera.
140
+
141
+ Returns:
142
+ np.ndarray: Decoded frame (height, width, 3)
143
+ """
144
+ if not self.is_connected or self.socket is None:
145
+ raise DeviceNotConnectedError(f"{self} is not connected.")
146
+
147
+ try:
148
+ message = self.socket.recv_string()
149
+ except Exception as e:
150
+ if type(e).__name__ == "Again":
151
+ raise TimeoutError(f"{self} timeout after {self.timeout_ms}ms") from e
152
+ raise
153
+
154
+ # Decode JSON message
155
+ data = json.loads(message)
156
+
157
+ if "images" not in data:
158
+ raise RuntimeError(f"{self} invalid message: missing 'images' key")
159
+
160
+ images = data["images"]
161
+
162
+ # Get image by camera name or first available
163
+ if self.camera_name in images:
164
+ img_b64 = images[self.camera_name]
165
+ elif images:
166
+ img_b64 = next(iter(images.values()))
167
+ else:
168
+ raise RuntimeError(f"{self} no images in message")
169
+
170
+ # Decode base64 JPEG
171
+ img_bytes = base64.b64decode(img_b64)
172
+ frame = cv2.imdecode(np.frombuffer(img_bytes, np.uint8), cv2.IMREAD_COLOR)
173
+
174
+ if frame is None:
175
+ raise RuntimeError(f"{self} failed to decode image")
176
+
177
+ return frame
178
+
179
+ def _read_loop(self) -> None:
180
+ while self.stop_event and not self.stop_event.is_set():
181
+ try:
182
+ frame = self.read()
183
+ with self.frame_lock:
184
+ self.latest_frame = frame
185
+ self.new_frame_event.set()
186
+ except DeviceNotConnectedError:
187
+ break
188
+ except TimeoutError:
189
+ pass
190
+ except Exception as e:
191
+ logger.warning(f"Read error: {e}")
192
+
193
+ def _start_read_thread(self) -> None:
194
+ if self.thread and self.thread.is_alive():
195
+ return
196
+ self.stop_event = Event()
197
+ self.thread = Thread(target=self._read_loop, daemon=True)
198
+ self.thread.start()
199
+
200
+ def _stop_read_thread(self) -> None:
201
+ if self.stop_event:
202
+ self.stop_event.set()
203
+ if self.thread and self.thread.is_alive():
204
+ self.thread.join(timeout=2.0)
205
+ self.thread = None
206
+ self.stop_event = None
207
+
208
+ def async_read(self, timeout_ms: float = 10000) -> NDArray[Any]:
209
+ """Read latest frame asynchronously (non-blocking)."""
210
+ if not self.is_connected:
211
+ raise DeviceNotConnectedError(f"{self} is not connected.")
212
+
213
+ if not self.thread or not self.thread.is_alive():
214
+ self._start_read_thread()
215
+
216
+ if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0):
217
+ raise TimeoutError(f"{self} async_read timeout after {timeout_ms}ms")
218
+
219
+ with self.frame_lock:
220
+ frame = self.latest_frame
221
+ self.new_frame_event.clear()
222
+
223
+ if frame is None:
224
+ raise RuntimeError(f"{self} no frame available")
225
+
226
+ return frame
227
+
228
+ def disconnect(self) -> None:
229
+ """Disconnect from ZMQ camera."""
230
+ if not self.is_connected and not self.thread:
231
+ raise DeviceNotConnectedError(f"{self} not connected.")
232
+
233
+ self._stop_read_thread()
234
+ self._cleanup()
235
+ logger.info(f"{self} disconnected.")
lerobot/src/lerobot/cameras/zmq/configuration_zmq.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass
18
+
19
+ from ..configs import CameraConfig, ColorMode
20
+
21
+ __all__ = ["ZMQCameraConfig", "ColorMode"]
22
+
23
+
24
+ @CameraConfig.register_subclass("zmq")
25
+ @dataclass
26
+ class ZMQCameraConfig(CameraConfig):
27
+ server_address: str
28
+ port: int = 5555
29
+ camera_name: str = "zmq_camera"
30
+ color_mode: ColorMode = ColorMode.RGB
31
+ timeout_ms: int = 5000
32
+
33
+ def __post_init__(self) -> None:
34
+ if self.color_mode not in (ColorMode.RGB, ColorMode.BGR):
35
+ raise ValueError(
36
+ f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided."
37
+ )
38
+
39
+ if self.timeout_ms <= 0:
40
+ raise ValueError(f"`timeout_ms` must be positive, but {self.timeout_ms} is provided.")
41
+
42
+ if not self.server_address:
43
+ raise ValueError("`server_address` cannot be empty.")
44
+
45
+ if self.port <= 0 or self.port > 65535:
46
+ raise ValueError(f"`port` must be between 1 and 65535, but {self.port} is provided.")
lerobot/src/lerobot/cameras/zmq/image_server.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ Streams camera images over ZMQ.
19
+ Uses lerobot's OpenCVCamera for capture, encodes images to base64 and sends them over ZMQ.
20
+ """
21
+
22
+ import base64
23
+ import contextlib
24
+ import json
25
+ import logging
26
+ import time
27
+ from collections import deque
28
+
29
+ import cv2
30
+ import numpy as np
31
+ import zmq
32
+
33
+ from lerobot.cameras.configs import ColorMode
34
+ from lerobot.cameras.opencv import OpenCVCamera, OpenCVCameraConfig
35
+
36
+ logger = logging.getLogger(__name__)
37
+
38
+
39
+ def encode_image(image: np.ndarray, quality: int = 80) -> str:
40
+ """Encode RGB image to base64 JPEG string."""
41
+ _, buffer = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), quality])
42
+ return base64.b64encode(buffer).decode("utf-8")
43
+
44
+
45
+ class ImageServer:
46
+ def __init__(self, config: dict, port: int = 5555):
47
+ self.fps = config.get("fps", 30)
48
+ self.cameras: dict[str, OpenCVCamera] = {}
49
+
50
+ for name, cfg in config.get("cameras", {}).items():
51
+ shape = cfg.get("shape", [480, 640])
52
+ cam_config = OpenCVCameraConfig(
53
+ index_or_path=cfg.get("device_id", 0),
54
+ fps=self.fps,
55
+ width=shape[1],
56
+ height=shape[0],
57
+ color_mode=ColorMode.RGB,
58
+ )
59
+ camera = OpenCVCamera(cam_config)
60
+ camera.connect()
61
+ self.cameras[name] = camera
62
+ logger.info(f"Camera {name}: {shape[1]}x{shape[0]}")
63
+
64
+ # ZMQ PUB socket
65
+ self.context = zmq.Context()
66
+ self.socket = self.context.socket(zmq.PUB)
67
+ self.socket.setsockopt(zmq.SNDHWM, 20)
68
+ self.socket.setsockopt(zmq.LINGER, 0)
69
+ self.socket.bind(f"tcp://*:{port}")
70
+
71
+ logger.info(f"ImageServer running on port {port}")
72
+
73
+ def run(self):
74
+ frame_count = 0
75
+ frame_times = deque(maxlen=60)
76
+
77
+ try:
78
+ while True:
79
+ t0 = time.time()
80
+
81
+ # Build message
82
+ message = {"timestamps": {}, "images": {}}
83
+ for name, cam in self.cameras.items():
84
+ frame = cam.read() # Returns RGB
85
+ message["timestamps"][name] = time.time()
86
+ message["images"][name] = encode_image(frame)
87
+
88
+ # Send as JSON string (suppress if buffer full)
89
+ with contextlib.suppress(zmq.Again):
90
+ self.socket.send_string(json.dumps(message), zmq.NOBLOCK)
91
+
92
+ frame_count += 1
93
+ frame_times.append(time.time() - t0)
94
+
95
+ if frame_count % 60 == 0:
96
+ logger.debug(f"FPS: {len(frame_times) / sum(frame_times):.1f}")
97
+
98
+ sleep = (1.0 / self.fps) - (time.time() - t0)
99
+ if sleep > 0:
100
+ time.sleep(sleep)
101
+
102
+ except KeyboardInterrupt:
103
+ pass
104
+ finally:
105
+ for cam in self.cameras.values():
106
+ cam.disconnect()
107
+ self.socket.close()
108
+ self.context.term()
109
+
110
+
111
+ if __name__ == "__main__":
112
+ logging.basicConfig(level=logging.INFO)
113
+ config = {"fps": 30, "cameras": {"head_camera": {"device_id": 4, "shape": [480, 640]}}}
114
+ ImageServer(config, port=5555).run()
lerobot/src/lerobot/data_processing/sarm_annotations/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
lerobot/src/lerobot/data_processing/sarm_annotations/subtask_annotation.py ADDED
@@ -0,0 +1,1202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ SARM Subtask Annotation using local GPU (Qwen3-VL).
19
+
20
+ This script implements the annotation approach from the SARM paper using local GPU inference:
21
+ "SARM: Stage-Aware Reward Modeling for Long Horizon Robot Manipulation"
22
+ Paper: https://arxiv.org/pdf/2509.25358
23
+
24
+ What it does:
25
+ 1. Takes videos from a LeRobot dataset
26
+ 2. Uses Qwen3-VL running locally on GPU to identify when subtasks occur
27
+ 3. Saves subtask timestamps to the dataset metadata
28
+ 4. Optionally pushes the annotated dataset to HuggingFace Hub
29
+
30
+ SARM trains reward models that predict:
31
+ - Stage: Which subtask is currently being executed (discrete classification)
32
+ - Progress: How far along the subtask we are (continuous 0-1)
33
+
34
+ Supports three annotation modes:
35
+ 1. No annotations (no args): Auto-creates single sparse "task" stage covering full episode.
36
+ Use with SARM config annotation_mode="single_stage" for simple tasks.
37
+
38
+ 2. Dense-only (--dense-only --dense-subtasks): Dense annotations from VLM, auto-generated
39
+ single sparse "task" stage. Use with annotation_mode="dense_only".
40
+
41
+ 3. Dual mode (--sparse-subtasks + --dense-subtasks): Both sparse and dense annotations
42
+ from VLM. Use with annotation_mode="dual".
43
+
44
+ Requirements:
45
+ - GPU with sufficient VRAM (16GB+ recommended for 30B model)
46
+ - `pip install transformers, torch, qwen-vl-utils`
47
+
48
+ Run with:
49
+ ```bash
50
+ python examples/dataset_annotation/subtask_annotation.py \
51
+ --repo-id your-username/your-dataset \
52
+ --sparse-subtasks "Do ..." \
53
+ --dense-subtasks "Do task 1, Do task 2, Do task 3" \
54
+ --video-key observation.images.base \
55
+ --push-to-hub
56
+ ```
57
+ """
58
+
59
+ import argparse
60
+ import json
61
+ import multiprocessing as mp
62
+ import random
63
+ import re
64
+ import subprocess
65
+ import tempfile
66
+ import textwrap
67
+ import time
68
+ from concurrent.futures import ProcessPoolExecutor, as_completed
69
+ from pathlib import Path
70
+ from typing import Any
71
+
72
+ import cv2
73
+ import numpy as np
74
+ import pandas as pd
75
+ import torch
76
+ from pydantic import BaseModel, Field
77
+ from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration
78
+
79
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
80
+
81
+
82
+ # Pydantic Models for SARM Subtask Annotation
83
+ class Timestamp(BaseModel):
84
+ """Timestamp in MM:SS or SS format"""
85
+
86
+ start: str = Field(description="Start timestamp (MM:SS or just seconds)")
87
+ end: str = Field(description="End timestamp (MM:SS or just seconds)")
88
+
89
+
90
+ class Subtask(BaseModel):
91
+ """Individual subtask/stage - must use EXACT names from provided list"""
92
+
93
+ name: str = Field(description="Subtask name - MUST match one from the predefined list exactly")
94
+ timestamps: Timestamp
95
+
96
+
97
+ class SubtaskAnnotation(BaseModel):
98
+ """Complete annotation for a robot manipulation episode"""
99
+
100
+ subtasks: list[Subtask] = Field(description="List of all subtasks in temporal order")
101
+
102
+
103
+ def compute_temporal_proportions(
104
+ annotations: dict[int, Any], fps: int = 30, subtask_order: list[str] | None = None
105
+ ) -> dict[str, float]:
106
+ """
107
+ Compute dataset-level temporal proportions (priors) for each subtask.
108
+
109
+ Implements SARM Paper Formula (1): ᾱ_k = (1/M) × Σ_i (L_{i,k} / T_i)
110
+
111
+ Args:
112
+ annotations: Dict mapping episode index to SubtaskAnnotation object.
113
+ fps: Frames per second (unused, kept for API compatibility)
114
+ subtask_order: Optional list defining the output order of subtasks.
115
+
116
+ Returns:
117
+ Dict mapping subtask name to its temporal proportion (ᾱ_k), ordered by subtask_order if provided.
118
+ """
119
+ subtask_proportions: dict[str, list[float]] = {}
120
+
121
+ for annotation in annotations.values():
122
+ total_duration = 0
123
+ durations: dict[str, int] = {}
124
+
125
+ for subtask in annotation.subtasks:
126
+ start_parts = subtask.timestamps.start.split(":")
127
+ end_parts = subtask.timestamps.end.split(":")
128
+
129
+ start_seconds = (
130
+ int(start_parts[0]) * 60 + int(start_parts[1])
131
+ if len(start_parts) == 2
132
+ else int(start_parts[0])
133
+ )
134
+ end_seconds = (
135
+ int(end_parts[0]) * 60 + int(end_parts[1]) if len(end_parts) == 2 else int(end_parts[0])
136
+ )
137
+
138
+ duration = end_seconds - start_seconds
139
+ durations[subtask.name] = duration
140
+ total_duration += duration
141
+
142
+ if total_duration > 0:
143
+ for name, duration in durations.items():
144
+ if name not in subtask_proportions:
145
+ subtask_proportions[name] = []
146
+ subtask_proportions[name].append(duration / total_duration)
147
+
148
+ if not subtask_proportions:
149
+ return {}
150
+
151
+ avg_proportions = {name: sum(props) / len(props) for name, props in subtask_proportions.items()}
152
+
153
+ total = sum(avg_proportions.values())
154
+ if total > 0:
155
+ avg_proportions = {name: prop / total for name, prop in avg_proportions.items()}
156
+
157
+ # Reorder according to subtask_order if provided
158
+ if subtask_order:
159
+ avg_proportions = {
160
+ name: avg_proportions.get(name, 0.0) for name in subtask_order if name in avg_proportions
161
+ }
162
+
163
+ return avg_proportions
164
+
165
+
166
+ def create_sarm_prompt(subtask_list: list[str]) -> str:
167
+ subtask_str = "\n".join([f" - {name}" for name in subtask_list])
168
+
169
+ return textwrap.dedent(f"""\
170
+ # Role
171
+ You are a Robotics Vision System specializing in temporal action localization for robot manipulation. Your job is to segment a single demonstration video into distinct, non-overlapping atomic actions from a fixed subtask list.
172
+
173
+ # Subtask Label Set (Closed Vocabulary)
174
+ You must strictly identify the video segments using ONLY the following labels. Do not create new labels or modify existing ones:
175
+
176
+ [
177
+ {subtask_str}
178
+ ]
179
+
180
+ The video shows one successful execution of all subtasks in a logical order.
181
+
182
+ # Ground-Truth Semantics (Very Important)
183
+ Use **visual state changes** to define when a subtask starts and ends. Do NOT assume equal durations for the subtasks.
184
+
185
+ - A subtask **starts** at the first frame where the robot's motion clearly initiates that subtask.
186
+ - A subtask **ends** at the first frame where that specific action is visually completed and the manipulated object reaches a temporary, stable configuration.
187
+
188
+ If there are short pauses or micro-motions that don't clearly correspond to a new subtask, they belong to the **current** subtask.
189
+
190
+ # Hard Constraints & Logic
191
+ 1. **Continuous Coverage (No Gaps):**
192
+ - The entire video duration from "00:00" to the final timestamp must be covered by subtasks.
193
+ - There can be no gaps between subtasks.
194
+ - If there is any idle or ambiguous time between clear actions, extend the *preceding* subtask to cover it.
195
+
196
+ 2. **Boundary Consistency:**
197
+ - The `"end"` timestamp of one subtask must be exactly equal to the `"start"` timestamp of the next subtask.
198
+ - Boundaries must coincide with a real visual state transition, not just a convenient time split.
199
+
200
+ 3. **Chronological Order, One Occurrence Each:**
201
+ - This is a single successful demonstration.
202
+ - Each subtask from the vocabulary appears **exactly once**, in the correct logical order.
203
+ - **Durations may be very different** between subtasks. Never assume they are similar lengths. Base all boundaries only on the video.
204
+
205
+ 4. **Reject Uniform Segmentation (Important):**
206
+ - Do NOT simply divide the video into equal or nearly equal time chunks.
207
+ - If your boundaries would result in subtasks with similar durations (e.g. all around 5 seconds), treat this as evidence that your segmentation is wrong and refine the boundaries.
208
+ - Only use nearly equal durations if the video truly shows each subtask taking the same amount of time (this is very rare).
209
+
210
+ 5. **Timestamps:**
211
+ - Timestamps must be in `"MM:SS"` format.
212
+ - The first subtask always starts at `"00:00"`.
213
+ - The last subtask ends at the final visible frame of the video.
214
+
215
+ # Step 1 — Textual Timeline (must do this first)
216
+ First, write a extensive and detailed textual timeline describing what happens in the video with approximate timestamps.
217
+ For each subtask, include:
218
+ - its name
219
+ - an approximate start and end time,
220
+ - an description of the visual event at the boundary (e.g. "shirt fully folded to the left", "robot rotates folded shirt 90 degrees").
221
+
222
+ Format this as a bullet list.
223
+
224
+ # Step 2 — JSON Output (final answer)
225
+ After the textual timeline, output **only** valid JSON with this structure.
226
+ The JSON **must** be consistent with the textual timeline above:
227
+
228
+ {{
229
+ "subtasks": [
230
+ {{
231
+ "name": "EXACT_NAME_FROM_LIST",
232
+ "timestamps": {{
233
+ "start": "MM:SS",
234
+ "end": "MM:SS"
235
+ }}
236
+ }},
237
+ {{
238
+ "name": "EXACT_NAME_FROM_LIST",
239
+ "timestamps": {{
240
+ "start": "MM:SS",
241
+ "end": "MM:SS"
242
+ }}
243
+ }}
244
+ ]
245
+ }}
246
+
247
+ Do not add any extra keys to the JSON.
248
+ """)
249
+
250
+
251
+ class VideoAnnotator:
252
+ """Annotates robot manipulation videos using local Qwen3-VL model on GPU"""
253
+
254
+ def __init__(
255
+ self,
256
+ subtask_list: list[str],
257
+ model_name: str = "Qwen/Qwen3-VL-30B-A3B-Instruct",
258
+ device: str = "cuda",
259
+ torch_dtype: torch.dtype = torch.bfloat16,
260
+ model: Qwen3VLMoeForConditionalGeneration | None = None, # noqa: F821
261
+ processor: AutoProcessor | None = None, # noqa: F821
262
+ ):
263
+ """
264
+ Initialize the video annotator with local model.
265
+
266
+ Args:
267
+ subtask_list: List of allowed subtask names (for consistency)
268
+ model_name: Hugging Face model name (default: Qwen/Qwen3-VL-30B-A3B-Instruct)
269
+ device: Device to use (cuda, cpu)
270
+ torch_dtype: Data type for model (bfloat16, float16, float32)
271
+ model: Pre-loaded model instance (optional, to share between annotators)
272
+ processor: Pre-loaded processor instance (optional, to share between annotators)
273
+ """
274
+ self.subtask_list = subtask_list
275
+ self.prompt = create_sarm_prompt(subtask_list)
276
+ self.device = device
277
+
278
+ # Use provided model/processor or load new ones
279
+ if model is not None and processor is not None:
280
+ self.model = model
281
+ self.processor = processor
282
+ print(f"Using shared model on {device}")
283
+ else:
284
+ from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration
285
+
286
+ print(f"Loading model: {model_name}...")
287
+
288
+ self.model = Qwen3VLMoeForConditionalGeneration.from_pretrained(
289
+ model_name, torch_dtype=torch_dtype, device_map=device, trust_remote_code=True
290
+ )
291
+
292
+ self.processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
293
+
294
+ print(f"Model loaded successfully on {device}")
295
+
296
+ def extract_episode_segment(
297
+ self, file_path: Path, start_timestamp: float, end_timestamp: float, target_fps: int = 1
298
+ ) -> Path:
299
+ """
300
+ Extract a specific episode segment from concatenated video.
301
+ Uses minimal compression to preserve quality for local inference.
302
+
303
+ Args:
304
+ file_path: Path to the concatenated video file
305
+ start_timestamp: Starting timestamp in seconds (within this video file)
306
+ end_timestamp: Ending timestamp in seconds (within this video file)
307
+ target_fps: Target FPS (default: 1 for faster processing)
308
+
309
+ Returns:
310
+ Path to extracted video file
311
+ """
312
+ # Create temporary file for extracted video
313
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp_file:
314
+ tmp_path = Path(tmp_file.name)
315
+
316
+ try:
317
+ # Check if ffmpeg is available
318
+ subprocess.run( # nosec B607
319
+ ["ffmpeg", "-version"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True
320
+ )
321
+ except (subprocess.CalledProcessError, FileNotFoundError) as err:
322
+ raise RuntimeError("ffmpeg not found, cannot extract episode segment") from err
323
+
324
+ try:
325
+ # Calculate duration
326
+ duration = end_timestamp - start_timestamp
327
+
328
+ print(f"Extracting episode: {start_timestamp:.1f}s-{end_timestamp:.1f}s ({duration:.1f}s)")
329
+
330
+ # Use ffmpeg to extract segment with minimal quality loss
331
+ cmd = [
332
+ "ffmpeg",
333
+ "-i",
334
+ str(file_path),
335
+ "-ss",
336
+ str(start_timestamp),
337
+ "-t",
338
+ str(duration),
339
+ "-r",
340
+ str(target_fps),
341
+ "-c:v",
342
+ "libx264",
343
+ "-preset",
344
+ "ultrafast",
345
+ "-crf",
346
+ "23",
347
+ "-an",
348
+ "-y",
349
+ str(tmp_path),
350
+ ]
351
+
352
+ subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
353
+
354
+ # Verify the output file was created and is not empty
355
+ if not tmp_path.exists() or tmp_path.stat().st_size == 0:
356
+ print("Video extraction failed (0 bytes) - skipping episode")
357
+ if tmp_path.exists():
358
+ tmp_path.unlink()
359
+ raise RuntimeError("FFmpeg produced empty video file")
360
+
361
+ # Show extraction results
362
+ file_size_mb = tmp_path.stat().st_size / (1024 * 1024)
363
+
364
+ # Fail if file is too small (< 100KB likely means extraction failed)
365
+ if file_size_mb < 0.1:
366
+ print(f"Extracted video too small ({file_size_mb:.2f}MB) - skipping episode")
367
+ tmp_path.unlink()
368
+ raise RuntimeError(f"Video extraction produced invalid file ({file_size_mb:.2f}MB)")
369
+
370
+ print(f"Extracted: {file_size_mb:.1f}MB ({target_fps} FPS)")
371
+
372
+ return tmp_path
373
+
374
+ except subprocess.CalledProcessError as e:
375
+ raise RuntimeError(f"ffmpeg failed ({e})") from e
376
+
377
+ def annotate(
378
+ self,
379
+ file_path: str | Path,
380
+ fps: int,
381
+ start_timestamp: float = 0.0,
382
+ end_timestamp: float | None = None,
383
+ max_retries: int = 3,
384
+ ) -> SubtaskAnnotation:
385
+ """Annotate a video segment using local GPU."""
386
+ from qwen_vl_utils import process_vision_info
387
+
388
+ file_path = Path(file_path)
389
+
390
+ if end_timestamp is None:
391
+ cap = cv2.VideoCapture(str(file_path))
392
+ end_timestamp = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) / (cap.get(cv2.CAP_PROP_FPS) or 1)
393
+ cap.release()
394
+
395
+ duration = end_timestamp - start_timestamp
396
+ duration_str = f"{int(duration // 60):02d}:{int(duration % 60):02d}"
397
+
398
+ extracted_path = self.extract_episode_segment(file_path, start_timestamp, end_timestamp, 1)
399
+ is_extracted = extracted_path != file_path
400
+
401
+ try:
402
+ messages = [
403
+ {"role": "system", "content": [{"type": "text", "text": self.prompt}]},
404
+ {
405
+ "role": "user",
406
+ "content": [
407
+ {"type": "video", "video": str(extracted_path), "fps": 1.0},
408
+ {
409
+ "type": "text",
410
+ "text": f"Video is {duration_str} (~{duration:.1f}s). Follow instructions.",
411
+ },
412
+ ],
413
+ },
414
+ ]
415
+
416
+ for attempt in range(max_retries):
417
+ try:
418
+ text = self.processor.apply_chat_template(
419
+ messages, tokenize=False, add_generation_prompt=True
420
+ )
421
+ image_inputs, video_inputs = process_vision_info(messages)
422
+ inputs = self.processor(
423
+ text=[text],
424
+ images=image_inputs,
425
+ videos=video_inputs,
426
+ padding=True,
427
+ return_tensors="pt",
428
+ ).to(self.device)
429
+
430
+ with torch.no_grad():
431
+ generated_ids = self.model.generate(
432
+ **inputs, max_new_tokens=1024, do_sample=True, temperature=0.7
433
+ )
434
+
435
+ response = self.processor.batch_decode(
436
+ [out[len(inp) :] for inp, out in zip(inputs.input_ids, generated_ids, strict=True)],
437
+ skip_special_tokens=True,
438
+ )[0].strip()
439
+
440
+ # Extract JSON
441
+ if "```json" in response:
442
+ response = response.split("```json")[1].split("```")[0]
443
+ elif "```" in response:
444
+ response = response.split("```")[1].split("```")[0]
445
+
446
+ try:
447
+ return SubtaskAnnotation.model_validate(json.loads(response))
448
+ except json.JSONDecodeError:
449
+ match = re.search(r"\{.*\}", response, re.DOTALL)
450
+ if match:
451
+ return SubtaskAnnotation.model_validate(json.loads(match.group()))
452
+ raise ValueError("No JSON found") from None
453
+ except Exception as e:
454
+ if attempt == max_retries - 1:
455
+ raise RuntimeError(f"Failed after {max_retries} attempts") from e
456
+ time.sleep(1)
457
+ finally:
458
+ if is_extracted and extracted_path.exists():
459
+ extracted_path.unlink()
460
+
461
+
462
+ def display_annotation(annotation: SubtaskAnnotation, episode_idx: int, fps: int, prefix: str = ""):
463
+ """Display annotation summary."""
464
+ subtask_summary = ", ".join(
465
+ f"{s.name}({s.timestamps.start}-{s.timestamps.end})" for s in annotation.subtasks
466
+ )
467
+ print(f"Episode {episode_idx} {prefix}: {len(annotation.subtasks)} subtasks - {subtask_summary}")
468
+
469
+
470
+ def timestamp_to_seconds(timestamp: str) -> float:
471
+ """Convert MM:SS or SS timestamp to seconds"""
472
+ parts = timestamp.split(":")
473
+ if len(parts) == 2:
474
+ return int(parts[0]) * 60 + int(parts[1])
475
+ else:
476
+ return int(parts[0])
477
+
478
+
479
+ def extract_frame(video_path: Path, timestamp: float) -> np.ndarray | None:
480
+ """Extract a single frame from video at given timestamp."""
481
+ cap = cv2.VideoCapture(str(video_path))
482
+ if not cap.isOpened():
483
+ return None
484
+ cap.set(cv2.CAP_PROP_POS_MSEC, timestamp * 1000)
485
+ ret, frame = cap.read()
486
+ cap.release()
487
+ return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if ret else None
488
+
489
+
490
+ def draw_timeline(ax, subtasks, total_duration, colors):
491
+ """Draw a timeline with color-coded subtask segments."""
492
+ import matplotlib.patches as mpatches
493
+
494
+ bar_height, bar_y = 0.6, 0.5
495
+
496
+ for i, subtask in enumerate(subtasks):
497
+ start = timestamp_to_seconds(subtask.timestamps.start)
498
+ end = timestamp_to_seconds(subtask.timestamps.end)
499
+ color = colors[i % len(colors)]
500
+
501
+ rect = mpatches.FancyBboxPatch(
502
+ (start, bar_y - bar_height / 2),
503
+ end - start,
504
+ bar_height,
505
+ boxstyle="round,pad=0.02,rounding_size=0.1",
506
+ facecolor=color,
507
+ edgecolor="white",
508
+ linewidth=1.5,
509
+ alpha=0.85,
510
+ )
511
+ ax.add_patch(rect)
512
+
513
+ # Add label if segment is wide enough
514
+ duration = end - start
515
+ if duration > total_duration * 0.06:
516
+ ax.text(
517
+ (start + end) / 2,
518
+ bar_y,
519
+ subtask.name,
520
+ ha="center",
521
+ va="center",
522
+ fontsize=8,
523
+ fontweight="bold",
524
+ color="white",
525
+ rotation=0 if duration > total_duration * 0.12 else 45,
526
+ )
527
+
528
+ if i > 0:
529
+ ax.axvline(x=start, ymin=0.1, ymax=0.9, color="white", linestyle="--", linewidth=1.5, alpha=0.7)
530
+
531
+ ax.axvline(x=0, ymin=0.1, ymax=0.9, color="#00ff00", linestyle="-", linewidth=2, alpha=0.9)
532
+ if subtasks:
533
+ ax.axvline(
534
+ x=timestamp_to_seconds(subtasks[-1].timestamps.end),
535
+ ymin=0.1,
536
+ ymax=0.9,
537
+ color="white",
538
+ linestyle="--",
539
+ linewidth=1.5,
540
+ alpha=0.7,
541
+ )
542
+
543
+ ax.set_xlim(-total_duration * 0.02, total_duration * 1.02)
544
+ ax.set_ylim(-0.1, 1.1)
545
+ ax.set_xlabel("Time (seconds)", fontsize=10, color="white", labelpad=5)
546
+ for spine in ["top", "right", "left"]:
547
+ ax.spines[spine].set_visible(False)
548
+ ax.spines["bottom"].set_color("#444444")
549
+ ax.tick_params(axis="x", colors="#888888", labelsize=8)
550
+ ax.tick_params(axis="y", left=False, labelleft=False)
551
+
552
+
553
+ def visualize_episode(
554
+ ep_idx: int,
555
+ annotation: SubtaskAnnotation,
556
+ video_path: Path,
557
+ video_start: float,
558
+ video_end: float,
559
+ output_path: Path,
560
+ video_key: str,
561
+ ann_type: str,
562
+ ):
563
+ """Create visualization for a single episode with frames and timeline."""
564
+ import matplotlib.pyplot as plt
565
+
566
+ if annotation is None:
567
+ print(f"No {ann_type} annotation for episode {ep_idx}")
568
+ return
569
+
570
+ subtasks = annotation.subtasks
571
+ if not subtasks:
572
+ print(f"No subtasks for episode {ep_idx}")
573
+ return
574
+
575
+ colors = plt.cm.tab10(np.linspace(0, 1, max(len(subtasks), 10)))
576
+ total_duration = timestamp_to_seconds(subtasks[-1].timestamps.end)
577
+
578
+ # Extract middle frame from each subtask
579
+ sample_frames, frame_times = [], []
580
+ for subtask in subtasks:
581
+ start = timestamp_to_seconds(subtask.timestamps.start)
582
+ end = timestamp_to_seconds(subtask.timestamps.end)
583
+ mid = (start + end) / 2
584
+ frame_times.append(mid)
585
+ sample_frames.append(extract_frame(video_path, video_start + mid))
586
+
587
+ # Create figure
588
+ fig_width = max(16, len(subtasks) * 2.5)
589
+ fig = plt.figure(figsize=(fig_width, 10))
590
+ fig.patch.set_facecolor("#1a1a2e")
591
+
592
+ gs = fig.add_gridspec(
593
+ 2,
594
+ max(len(subtasks), 1),
595
+ height_ratios=[2, 1],
596
+ hspace=0.3,
597
+ wspace=0.1,
598
+ left=0.05,
599
+ right=0.95,
600
+ top=0.88,
601
+ bottom=0.1,
602
+ )
603
+
604
+ fig.suptitle(
605
+ f"Episode {ep_idx} - {ann_type.capitalize()} Annotations",
606
+ fontsize=18,
607
+ fontweight="bold",
608
+ color="white",
609
+ y=0.96,
610
+ )
611
+ fig.text(
612
+ 0.5,
613
+ 0.91,
614
+ f"Camera: {video_key} | Duration: {video_end - video_start:.1f}s | {len(subtasks)} subtasks",
615
+ ha="center",
616
+ fontsize=11,
617
+ color="#888888",
618
+ )
619
+
620
+ # Plot frames
621
+ for i, (frame, subtask) in enumerate(zip(sample_frames, subtasks, strict=True)):
622
+ ax = fig.add_subplot(gs[0, i])
623
+ ax.set_facecolor("#16213e")
624
+ if frame is not None:
625
+ ax.imshow(frame)
626
+ else:
627
+ ax.text(
628
+ 0.5, 0.5, "N/A", ha="center", va="center", fontsize=12, color="white", transform=ax.transAxes
629
+ )
630
+ ax.set_title(subtask.name, fontsize=10, fontweight="bold", color=colors[i % len(colors)], pad=8)
631
+ ax.axis("off")
632
+ ax.text(
633
+ 0.5,
634
+ -0.08,
635
+ f"t={frame_times[i]:.1f}s",
636
+ ha="center",
637
+ fontsize=9,
638
+ color="#888888",
639
+ transform=ax.transAxes,
640
+ )
641
+
642
+ # Plot timeline
643
+ ax_timeline = fig.add_subplot(gs[1, :])
644
+ ax_timeline.set_facecolor("#16213e")
645
+ draw_timeline(ax_timeline, subtasks, total_duration, colors)
646
+
647
+ output_path.parent.mkdir(parents=True, exist_ok=True)
648
+ plt.savefig(output_path, dpi=150, facecolor=fig.get_facecolor(), edgecolor="none", bbox_inches="tight")
649
+ plt.close()
650
+ print(f"Saved: {output_path}")
651
+
652
+
653
+ def visualize_annotations(
654
+ dataset: LeRobotDataset,
655
+ sparse_annotations: dict[int, SubtaskAnnotation],
656
+ dense_annotations: dict[int, SubtaskAnnotation] | None,
657
+ video_key: str,
658
+ output_dir: Path,
659
+ num_episodes: int = 5,
660
+ annotation_type: str = "sparse",
661
+ episode_indices: list[int] | None = None,
662
+ ):
663
+ """
664
+ Visualize subtask annotations for a set of episodes.
665
+
666
+ Args:
667
+ dataset: LeRobotDataset instance
668
+ sparse_annotations: Dict mapping episode index to sparse annotations
669
+ dense_annotations: Dict mapping episode index to dense annotations (or None)
670
+ video_key: Camera/video key to use
671
+ output_dir: Directory to save visualization images
672
+ num_episodes: Number of episodes to visualize (ignored if episode_indices provided)
673
+ annotation_type: "sparse", "dense", or "both"
674
+ episode_indices: Specific episode indices to visualize (optional)
675
+ """
676
+ # Determine available episodes based on annotation type
677
+ if annotation_type == "sparse":
678
+ available = set(sparse_annotations.keys())
679
+ elif annotation_type == "dense":
680
+ available = set(dense_annotations.keys()) if dense_annotations else set()
681
+ else: # both
682
+ sparse_set = set(sparse_annotations.keys())
683
+ dense_set = set(dense_annotations.keys()) if dense_annotations else set()
684
+ available = sparse_set | dense_set
685
+
686
+ if not available:
687
+ print("Error: No annotations found to visualize.")
688
+ return
689
+
690
+ # Select episodes to visualize
691
+ if episode_indices:
692
+ episodes = sorted([e for e in episode_indices if e in available])
693
+ missing = set(episode_indices) - available
694
+ if missing:
695
+ print(f"Episodes not found in annotations: {sorted(missing)}")
696
+ else:
697
+ episodes = sorted(random.sample(list(available), min(num_episodes, len(available))))
698
+ print(f"Visualizing {len(episodes)} episodes: {episodes}")
699
+ output_dir.mkdir(parents=True, exist_ok=True)
700
+
701
+ # Generate visualizations
702
+ for i, ep_idx in enumerate(episodes, 1):
703
+ print(f"Processing episode {ep_idx} ({i}/{len(episodes)})")
704
+ video_path = dataset.root / dataset.meta.get_video_file_path(ep_idx, video_key)
705
+ if not video_path.exists():
706
+ print(f"Video not found: {video_path}")
707
+ continue
708
+
709
+ video_start = float(dataset.meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx])
710
+ video_end = float(dataset.meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx])
711
+
712
+ if annotation_type == "both":
713
+ # Visualize both sparse and dense
714
+ for ann_type, annotations in [("sparse", sparse_annotations), ("dense", dense_annotations)]:
715
+ if annotations and ep_idx in annotations:
716
+ output_path = output_dir / f"episode_{ep_idx:04d}_{ann_type}.png"
717
+ visualize_episode(
718
+ ep_idx,
719
+ annotations.get(ep_idx),
720
+ video_path,
721
+ video_start,
722
+ video_end,
723
+ output_path,
724
+ video_key,
725
+ ann_type,
726
+ )
727
+ else:
728
+ annotations = sparse_annotations if annotation_type == "sparse" else dense_annotations
729
+ if annotations and ep_idx in annotations:
730
+ output_path = output_dir / f"episode_{ep_idx:04d}_{annotation_type}.png"
731
+ visualize_episode(
732
+ ep_idx,
733
+ annotations.get(ep_idx),
734
+ video_path,
735
+ video_start,
736
+ video_end,
737
+ output_path,
738
+ video_key,
739
+ annotation_type,
740
+ )
741
+
742
+ print(f"Visualizations saved to: {output_dir.absolute()}")
743
+
744
+
745
+ def save_annotations_to_dataset(
746
+ dataset_path: Path, annotations: dict[int, SubtaskAnnotation], fps: int, prefix: str = "sparse"
747
+ ):
748
+ """Save annotations to LeRobot dataset parquet format."""
749
+ from lerobot.datasets.utils import DEFAULT_EPISODES_PATH, load_episodes
750
+
751
+ episodes_dataset = load_episodes(dataset_path)
752
+ if not episodes_dataset or len(episodes_dataset) == 0:
753
+ return
754
+
755
+ episodes_df = episodes_dataset.to_pandas()
756
+ cols = [
757
+ f"{prefix}_{c}"
758
+ for c in [
759
+ "subtask_names",
760
+ "subtask_start_times",
761
+ "subtask_end_times",
762
+ "subtask_start_frames",
763
+ "subtask_end_frames",
764
+ ]
765
+ ]
766
+ for col in cols:
767
+ episodes_df[col] = None
768
+
769
+ for ep_idx, ann in annotations.items():
770
+ if ep_idx >= len(episodes_df):
771
+ continue
772
+ names, starts, ends, start_frames, end_frames = [], [], [], [], []
773
+ for s in ann.subtasks:
774
+ names.append(s.name)
775
+ st, et = timestamp_to_seconds(s.timestamps.start), timestamp_to_seconds(s.timestamps.end)
776
+ starts.append(st)
777
+ ends.append(et)
778
+ start_frames.append(int(st * fps))
779
+ end_frames.append(int(et * fps))
780
+ episodes_df.at[ep_idx, cols[0]] = names
781
+ episodes_df.at[ep_idx, cols[1]] = starts
782
+ episodes_df.at[ep_idx, cols[2]] = ends
783
+ episodes_df.at[ep_idx, cols[3]] = start_frames
784
+ episodes_df.at[ep_idx, cols[4]] = end_frames
785
+
786
+ # Group by file and write
787
+ for ep_idx in episodes_df.index:
788
+ key = (
789
+ episodes_df.loc[ep_idx, "meta/episodes/chunk_index"],
790
+ episodes_df.loc[ep_idx, "meta/episodes/file_index"],
791
+ )
792
+ path = dataset_path / DEFAULT_EPISODES_PATH.format(chunk_index=key[0], file_index=key[1])
793
+ if path.exists():
794
+ file_df = pd.read_parquet(path)
795
+ for col in cols + (
796
+ [
797
+ "subtask_names",
798
+ "subtask_start_times",
799
+ "subtask_end_times",
800
+ "subtask_start_frames",
801
+ "subtask_end_frames",
802
+ ]
803
+ if prefix == "sparse"
804
+ else []
805
+ ):
806
+ if col not in file_df.columns:
807
+ file_df[col] = None
808
+ if ep_idx in annotations:
809
+ for col in cols:
810
+ file_df.at[ep_idx, col] = episodes_df.loc[ep_idx, col]
811
+ if prefix == "sparse": # Legacy columns
812
+ for i, legacy in enumerate(
813
+ [
814
+ "subtask_names",
815
+ "subtask_start_times",
816
+ "subtask_end_times",
817
+ "subtask_start_frames",
818
+ "subtask_end_frames",
819
+ ]
820
+ ):
821
+ file_df.at[ep_idx, legacy] = episodes_df.loc[ep_idx, cols[i]]
822
+ file_df.to_parquet(path, engine="pyarrow", compression="snappy")
823
+
824
+
825
+ def generate_auto_sparse_annotations(
826
+ dataset: LeRobotDataset, episode_indices: list[int], video_key: str
827
+ ) -> dict[int, SubtaskAnnotation]:
828
+ """Auto-generate single 'task' stage annotations for all episodes."""
829
+ annotations = {}
830
+ for ep_idx in episode_indices:
831
+ start = float(dataset.meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx])
832
+ end = float(dataset.meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx])
833
+ duration = end - start
834
+ end_str = f"{int(duration // 60):02d}:{int(duration % 60):02d}"
835
+ annotations[ep_idx] = SubtaskAnnotation(
836
+ subtasks=[Subtask(name="task", timestamps=Timestamp(start="00:00", end=end_str))]
837
+ )
838
+ return annotations
839
+
840
+
841
+ def load_annotations_from_dataset(dataset_path: Path, prefix: str = "sparse") -> dict[int, SubtaskAnnotation]:
842
+ """Load annotations from LeRobot dataset parquet files."""
843
+ from lerobot.datasets.utils import load_episodes
844
+
845
+ episodes_dataset = load_episodes(dataset_path)
846
+ if not episodes_dataset or len(episodes_dataset) == 0:
847
+ return {}
848
+
849
+ col_names = f"{prefix}_subtask_names"
850
+ col_start = f"{prefix}_subtask_start_times"
851
+ col_end = f"{prefix}_subtask_end_times"
852
+
853
+ # Fall back to legacy columns for sparse
854
+ if col_names not in episodes_dataset.column_names:
855
+ if prefix == "sparse" and "subtask_names" in episodes_dataset.column_names:
856
+ col_names, col_start, col_end = "subtask_names", "subtask_start_times", "subtask_end_times"
857
+ else:
858
+ return {}
859
+
860
+ df = episodes_dataset.to_pandas()
861
+ annotations = {}
862
+ for ep_idx in df.index:
863
+ names = df.loc[ep_idx, col_names]
864
+ if names is None or (isinstance(names, float) and pd.isna(names)):
865
+ continue
866
+ starts, ends = df.loc[ep_idx, col_start], df.loc[ep_idx, col_end]
867
+ annotations[int(ep_idx)] = SubtaskAnnotation(
868
+ subtasks=[
869
+ Subtask(
870
+ name=n,
871
+ timestamps=Timestamp(
872
+ start=f"{int(s) // 60:02d}:{int(s) % 60:02d}",
873
+ end=f"{int(e) // 60:02d}:{int(e) % 60:02d}",
874
+ ),
875
+ )
876
+ for n, s, e in zip(names, starts, ends, strict=True)
877
+ ]
878
+ )
879
+ return annotations
880
+
881
+
882
+ def process_single_episode(
883
+ ep_idx: int,
884
+ dataset_root: Path,
885
+ dataset_meta,
886
+ video_key: str,
887
+ fps: int,
888
+ annotator: VideoAnnotator,
889
+ ) -> tuple[int, SubtaskAnnotation | None, str | None]:
890
+ """Process a single episode annotation."""
891
+ try:
892
+ video_path = dataset_root / dataset_meta.get_video_file_path(ep_idx, video_key)
893
+ if not video_path.exists():
894
+ return ep_idx, None, f"Video not found: {video_path}"
895
+
896
+ start = float(dataset_meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx])
897
+ end = float(dataset_meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx])
898
+ return ep_idx, annotator.annotate(video_path, fps, start, end), None
899
+ except Exception as e:
900
+ return ep_idx, None, str(e)
901
+
902
+
903
+ def worker_process_episodes(
904
+ worker_id: int,
905
+ gpu_id: int,
906
+ episode_indices: list[int],
907
+ repo_id: str,
908
+ video_key: str,
909
+ sparse_subtask_list: list[str],
910
+ dense_subtask_list: list[str] | None,
911
+ model_name: str,
912
+ torch_dtype: torch.dtype,
913
+ ) -> tuple[dict, dict | None]:
914
+ """Worker for parallel processing across GPUs."""
915
+ device = f"cuda:{gpu_id}"
916
+ dataset = LeRobotDataset(repo_id, download_videos=False)
917
+
918
+ sparse_annotator = VideoAnnotator(sparse_subtask_list, model_name, device, torch_dtype)
919
+ dense_annotator = (
920
+ VideoAnnotator(
921
+ dense_subtask_list,
922
+ model_name,
923
+ device,
924
+ torch_dtype,
925
+ sparse_annotator.model,
926
+ sparse_annotator.processor,
927
+ )
928
+ if dense_subtask_list
929
+ else None
930
+ )
931
+
932
+ sparse_annotations, dense_annotations = {}, {} if dense_subtask_list else None
933
+
934
+ for ep_idx in episode_indices:
935
+ _, sparse_ann, err = process_single_episode(
936
+ ep_idx, dataset.root, dataset.meta, video_key, dataset.fps, sparse_annotator
937
+ )
938
+ if sparse_ann:
939
+ sparse_annotations[ep_idx] = sparse_ann
940
+
941
+ if dense_annotator:
942
+ _, dense_ann, _ = process_single_episode(
943
+ ep_idx, dataset.root, dataset.meta, video_key, dataset.fps, dense_annotator
944
+ )
945
+ if dense_ann:
946
+ dense_annotations[ep_idx] = dense_ann
947
+
948
+ return sparse_annotations, dense_annotations
949
+
950
+
951
+ def main():
952
+ parser = argparse.ArgumentParser(description="SARM-style subtask annotation using local GPU (Qwen3-VL)")
953
+ parser.add_argument("--repo-id", type=str, required=True, help="HuggingFace dataset repository ID")
954
+ parser.add_argument(
955
+ "--sparse-subtasks", type=str, default=None, help="Comma-separated sparse subtask names"
956
+ )
957
+ parser.add_argument(
958
+ "--dense-subtasks", type=str, default=None, help="Comma-separated dense subtask names"
959
+ )
960
+ parser.add_argument(
961
+ "--dense-only", action="store_true", help="Dense-only mode with auto-generated sparse 'task' stage"
962
+ )
963
+ parser.add_argument("--episodes", type=int, nargs="+", default=None, help="Episode indices to annotate")
964
+ parser.add_argument("--model", type=str, default="Qwen/Qwen3-VL-30B-A3B-Instruct", help="VLM model")
965
+ parser.add_argument("--skip-existing", action="store_true", help="Skip already annotated episodes")
966
+ parser.add_argument("--video-key", type=str, default=None, help="Video key (default: first available)")
967
+ parser.add_argument("--push-to-hub", action="store_true", help="Push to HuggingFace Hub")
968
+ parser.add_argument("--output-repo-id", type=str, default=None, help="Output repo ID for push")
969
+ parser.add_argument("--device", type=str, default="cuda", help="Device (cuda/cpu)")
970
+ parser.add_argument("--dtype", type=str, default="bfloat16", choices=["bfloat16", "float16", "float32"])
971
+ parser.add_argument("--num-workers", type=int, default=1, help="Parallel workers for multi-GPU")
972
+ parser.add_argument("--gpu-ids", type=int, nargs="+", default=None, help="GPU IDs to use")
973
+ # Visualization options
974
+ parser.add_argument(
975
+ "--visualize-only",
976
+ action="store_true",
977
+ help="Only visualize existing annotations (no generation)",
978
+ )
979
+ parser.add_argument(
980
+ "--num-visualizations",
981
+ type=int,
982
+ default=5,
983
+ help="Number of episodes to visualize (default: 5)",
984
+ )
985
+ parser.add_argument(
986
+ "--visualize-type",
987
+ type=str,
988
+ default="sparse",
989
+ choices=["sparse", "dense", "both"],
990
+ help="Type of annotations to visualize (default: sparse)",
991
+ )
992
+ parser.add_argument(
993
+ "--output-dir",
994
+ type=str,
995
+ default="./subtask_viz",
996
+ help="Output directory for visualizations (default: ./subtask_viz)",
997
+ )
998
+
999
+ args = parser.parse_args()
1000
+
1001
+ # Load dataset first (needed for both annotation and visualization)
1002
+ print(f"Loading dataset: {args.repo_id}")
1003
+ dataset = LeRobotDataset(args.repo_id, download_videos=True)
1004
+ fps = dataset.fps
1005
+
1006
+ if not dataset.meta.video_keys:
1007
+ raise ValueError("No video keys found")
1008
+
1009
+ video_key = (
1010
+ args.video_key if args.video_key in (dataset.meta.video_keys or []) else dataset.meta.video_keys[0]
1011
+ )
1012
+ print(f"Using camera: {video_key}, FPS: {fps}")
1013
+
1014
+ # Handle visualization-only mode
1015
+ if args.visualize_only:
1016
+ print("Visualization-only mode")
1017
+ sparse_annotations = load_annotations_from_dataset(dataset.root, prefix="sparse")
1018
+ dense_annotations = load_annotations_from_dataset(dataset.root, prefix="dense")
1019
+
1020
+ if not sparse_annotations and not dense_annotations:
1021
+ return print("Error: No annotations found. Run annotation first.")
1022
+
1023
+ print(f"Found {len(sparse_annotations)} sparse, {len(dense_annotations)} dense annotations")
1024
+
1025
+ visualize_annotations(
1026
+ dataset=dataset,
1027
+ sparse_annotations=sparse_annotations,
1028
+ dense_annotations=dense_annotations if dense_annotations else None,
1029
+ video_key=video_key,
1030
+ output_dir=Path(args.output_dir),
1031
+ num_episodes=args.num_visualizations,
1032
+ annotation_type=args.visualize_type,
1033
+ episode_indices=args.episodes,
1034
+ )
1035
+ return
1036
+
1037
+ # Validate arguments for annotation mode
1038
+ if args.dense_only and not args.dense_subtasks:
1039
+ return print("Error: --dense-only requires --dense-subtasks")
1040
+ if args.dense_subtasks and not args.sparse_subtasks and not args.dense_only:
1041
+ return print("Error: --dense-subtasks requires --sparse-subtasks or --dense-only")
1042
+
1043
+ sparse_subtask_list = (
1044
+ [s.strip() for s in args.sparse_subtasks.split(",")] if args.sparse_subtasks else None
1045
+ )
1046
+ dense_subtask_list = [s.strip() for s in args.dense_subtasks.split(",")] if args.dense_subtasks else None
1047
+ auto_sparse = sparse_subtask_list is None
1048
+ dense_mode = dense_subtask_list is not None
1049
+ torch_dtype = {"bfloat16": torch.bfloat16, "float16": torch.float16, "float32": torch.float32}[args.dtype]
1050
+
1051
+ # Determine episodes
1052
+ episode_indices = args.episodes or list(range(dataset.meta.total_episodes))
1053
+
1054
+ existing_annotations = load_annotations_from_dataset(dataset.root, prefix="sparse")
1055
+ if args.skip_existing:
1056
+ episode_indices = [ep for ep in episode_indices if ep not in existing_annotations]
1057
+
1058
+ if not episode_indices:
1059
+ return print("All episodes already annotated!")
1060
+ print(f"Annotating {len(episode_indices)} episodes")
1061
+
1062
+ # GPU setup
1063
+ gpu_ids = args.gpu_ids or list(
1064
+ range(min(args.num_workers, torch.cuda.device_count() if torch.cuda.is_available() else 1))
1065
+ )
1066
+ args.num_workers = len(gpu_ids)
1067
+
1068
+ sparse_annotations = existing_annotations.copy()
1069
+ dense_annotations = {} if dense_mode else None
1070
+
1071
+ # Auto-sparse mode
1072
+ if auto_sparse:
1073
+ sparse_annotations.update(generate_auto_sparse_annotations(dataset, episode_indices, video_key))
1074
+ save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse")
1075
+ print(f"Auto-generated {len(episode_indices)} sparse 'task' annotations")
1076
+
1077
+ # VLM annotation (for sparse if not auto, and for dense)
1078
+ need_vlm = (not auto_sparse) or dense_mode
1079
+
1080
+ if need_vlm:
1081
+ if args.num_workers > 1 and not auto_sparse:
1082
+ # Parallel processing
1083
+ print(f"Parallel processing with {args.num_workers} workers")
1084
+ episodes_per_worker = [[] for _ in range(args.num_workers)]
1085
+ for i, ep_idx in enumerate(episode_indices):
1086
+ episodes_per_worker[i % args.num_workers].append(ep_idx)
1087
+
1088
+ with ProcessPoolExecutor(
1089
+ max_workers=args.num_workers, mp_context=mp.get_context("spawn")
1090
+ ) as executor:
1091
+ futures = [
1092
+ executor.submit(
1093
+ worker_process_episodes,
1094
+ w,
1095
+ gpu_ids[w],
1096
+ episodes_per_worker[w],
1097
+ args.repo_id,
1098
+ video_key,
1099
+ sparse_subtask_list,
1100
+ dense_subtask_list,
1101
+ args.model,
1102
+ torch_dtype,
1103
+ )
1104
+ for w in range(args.num_workers)
1105
+ if episodes_per_worker[w]
1106
+ ]
1107
+
1108
+ for future in as_completed(futures):
1109
+ try:
1110
+ worker_sparse, worker_dense = future.result()
1111
+ sparse_annotations.update(worker_sparse)
1112
+ if dense_mode and worker_dense:
1113
+ dense_annotations.update(worker_dense)
1114
+ save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse")
1115
+ if dense_mode:
1116
+ save_annotations_to_dataset(dataset.root, dense_annotations, fps, prefix="dense")
1117
+ except Exception as e:
1118
+ raise RuntimeError(f"Worker failed: {e}") from e
1119
+ else:
1120
+ # Sequential processing
1121
+ sparse_annotator = (
1122
+ VideoAnnotator(sparse_subtask_list, args.model, args.device, torch_dtype)
1123
+ if not auto_sparse and sparse_subtask_list
1124
+ else None
1125
+ )
1126
+ dense_annotator = (
1127
+ VideoAnnotator(
1128
+ dense_subtask_list,
1129
+ args.model,
1130
+ args.device,
1131
+ torch_dtype,
1132
+ sparse_annotator.model if sparse_annotator else None,
1133
+ sparse_annotator.processor if sparse_annotator else None,
1134
+ )
1135
+ if dense_mode
1136
+ else None
1137
+ )
1138
+
1139
+ for i, ep_idx in enumerate(episode_indices):
1140
+ print(f"Episode {ep_idx} ({i + 1}/{len(episode_indices)})")
1141
+
1142
+ if sparse_annotator:
1143
+ _, sparse_ann, err = process_single_episode(
1144
+ ep_idx, dataset.root, dataset.meta, video_key, fps, sparse_annotator
1145
+ )
1146
+ if sparse_ann:
1147
+ sparse_annotations[ep_idx] = sparse_ann
1148
+ save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse")
1149
+ elif err:
1150
+ print(f"Sparse failed: {err}")
1151
+
1152
+ if dense_annotator:
1153
+ _, dense_ann, err = process_single_episode(
1154
+ ep_idx, dataset.root, dataset.meta, video_key, fps, dense_annotator
1155
+ )
1156
+ if dense_ann:
1157
+ dense_annotations[ep_idx] = dense_ann
1158
+ save_annotations_to_dataset(dataset.root, dense_annotations, fps, prefix="dense")
1159
+ elif err:
1160
+ print(f"Dense failed: {err}")
1161
+
1162
+ # Save temporal proportions
1163
+ def save_proportions(annotations, prefix, subtask_list=None, is_auto=False):
1164
+ props: dict[str, float] = (
1165
+ {"task": 1.0} if is_auto else compute_temporal_proportions(annotations, fps, subtask_list)
1166
+ )
1167
+ path = dataset.root / "meta" / f"temporal_proportions_{prefix}.json"
1168
+ path.parent.mkdir(parents=True, exist_ok=True)
1169
+ with open(path, "w") as f:
1170
+ json.dump(props, f, indent=2)
1171
+ print(f"Saved {prefix} temporal proportions")
1172
+
1173
+ save_proportions(sparse_annotations, "sparse", sparse_subtask_list, auto_sparse)
1174
+ if dense_mode and dense_annotations:
1175
+ save_proportions(dense_annotations, "dense", dense_subtask_list)
1176
+
1177
+ print(f"\nComplete! {len(sparse_annotations)} sparse, {len(dense_annotations or {})} dense annotations")
1178
+
1179
+ # Visualize annotations after generation
1180
+ if args.num_visualizations > 0:
1181
+ print(f"\nGenerating {args.num_visualizations} visualizations...")
1182
+ visualize_type = "both" if dense_mode else "sparse"
1183
+ visualize_annotations(
1184
+ dataset=dataset,
1185
+ sparse_annotations=sparse_annotations,
1186
+ dense_annotations=dense_annotations,
1187
+ video_key=video_key,
1188
+ output_dir=Path(args.output_dir),
1189
+ num_episodes=args.num_visualizations,
1190
+ annotation_type=visualize_type,
1191
+ )
1192
+
1193
+ if args.push_to_hub:
1194
+ try:
1195
+ dataset.push_to_hub(push_videos=True)
1196
+ print(f"Pushed to {args.output_repo_id or args.repo_id}")
1197
+ except Exception as e:
1198
+ print(f"Push failed: {e}")
1199
+
1200
+
1201
+ if __name__ == "__main__":
1202
+ main()
lerobot/src/lerobot/datasets/push_dataset_to_hub/utils.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import datasets
18
+ import torch
19
+
20
+
21
+ # TODO(aliberts): remove
22
+ def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> dict[str, torch.Tensor]:
23
+ """
24
+ Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset.
25
+
26
+ Parameters:
27
+ - hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index.
28
+
29
+ Returns:
30
+ - episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys:
31
+ - "from": A tensor containing the starting index of each episode.
32
+ - "to": A tensor containing the ending index of each episode.
33
+ """
34
+ episode_data_index = {"from": [], "to": []}
35
+
36
+ current_episode = None
37
+ """
38
+ The episode_index is a list of integers, each representing the episode index of the corresponding example.
39
+ For instance, the following is a valid episode_index:
40
+ [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]
41
+
42
+ Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and
43
+ ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this:
44
+ {
45
+ "from": [0, 3, 7],
46
+ "to": [3, 7, 12]
47
+ }
48
+ """
49
+ if len(hf_dataset) == 0:
50
+ episode_data_index = {
51
+ "from": torch.tensor([]),
52
+ "to": torch.tensor([]),
53
+ }
54
+ return episode_data_index
55
+ for idx, episode_idx in enumerate(hf_dataset["episode_index"]):
56
+ if episode_idx != current_episode:
57
+ # We encountered a new episode, so we append its starting location to the "from" list
58
+ episode_data_index["from"].append(idx)
59
+ # If this is not the first episode, we append the ending location of the previous episode to the "to" list
60
+ if current_episode is not None:
61
+ episode_data_index["to"].append(idx)
62
+ # Let's keep track of the current episode index
63
+ current_episode = episode_idx
64
+ else:
65
+ # We are still in the same episode, so there is nothing for us to do here
66
+ pass
67
+ # We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list
68
+ episode_data_index["to"].append(idx + 1)
69
+
70
+ for k in ["from", "to"]:
71
+ episode_data_index[k] = torch.tensor(episode_data_index[k])
72
+
73
+ return episode_data_index
lerobot/src/lerobot/datasets/v30/augment_dataset_quantile_stats.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ This script augments existing LeRobot datasets with quantile statistics.
19
+
20
+ Most datasets created before the quantile feature was added do not contain
21
+ quantile statistics (q01, q10, q50, q90, q99) in their metadata. This script:
22
+
23
+ 1. Loads an existing LeRobot dataset in v3.0 format
24
+ 2. Checks if it already contains quantile statistics
25
+ 3. If missing, computes quantile statistics for all features
26
+ 4. Updates the dataset metadata with the new quantile statistics
27
+
28
+ Usage:
29
+
30
+ ```bash
31
+ python src/lerobot/datasets/v30/augment_dataset_quantile_stats.py \
32
+ --repo-id=lerobot/pusht \
33
+ ```
34
+ """
35
+
36
+ import argparse
37
+ import concurrent.futures
38
+ import logging
39
+ from pathlib import Path
40
+
41
+ import numpy as np
42
+ import torch
43
+ from huggingface_hub import HfApi
44
+ from requests import HTTPError
45
+ from tqdm import tqdm
46
+
47
+ from lerobot.datasets.compute_stats import DEFAULT_QUANTILES, aggregate_stats, get_feature_stats
48
+ from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
49
+ from lerobot.datasets.utils import write_stats
50
+ from lerobot.utils.utils import init_logging
51
+
52
+
53
+ def has_quantile_stats(stats: dict[str, dict] | None, quantile_list_keys: list[str] | None = None) -> bool:
54
+ """Check if dataset statistics already contain quantile information.
55
+
56
+ Args:
57
+ stats: Dataset statistics dictionary
58
+
59
+ Returns:
60
+ True if quantile statistics are present, False otherwise
61
+ """
62
+ if quantile_list_keys is None:
63
+ quantile_list_keys = [f"q{int(q * 100):02d}" for q in DEFAULT_QUANTILES]
64
+
65
+ if stats is None:
66
+ return False
67
+
68
+ for feature_stats in stats.values():
69
+ if any(q_key in feature_stats for q_key in quantile_list_keys):
70
+ return True
71
+
72
+ return False
73
+
74
+
75
+ def process_single_episode(dataset: LeRobotDataset, episode_idx: int) -> dict:
76
+ """Process a single episode and return its statistics.
77
+
78
+ Args:
79
+ dataset: The LeRobot dataset
80
+ episode_idx: Index of the episode to process
81
+
82
+ Returns:
83
+ Dictionary containing episode statistics
84
+ """
85
+ logging.info(f"Computing stats for episode {episode_idx}")
86
+
87
+ start_idx = dataset.meta.episodes[episode_idx]["dataset_from_index"]
88
+ end_idx = dataset.meta.episodes[episode_idx]["dataset_to_index"]
89
+
90
+ collected_data: dict[str, list] = {}
91
+ for idx in range(start_idx, end_idx):
92
+ item = dataset[idx]
93
+ for key, value in item.items():
94
+ if key not in dataset.features:
95
+ continue
96
+
97
+ if key not in collected_data:
98
+ collected_data[key] = []
99
+ collected_data[key].append(value)
100
+
101
+ ep_stats = {}
102
+ for key, data_list in collected_data.items():
103
+ if dataset.features[key]["dtype"] == "string":
104
+ continue
105
+
106
+ data = torch.stack(data_list).cpu().numpy()
107
+ if dataset.features[key]["dtype"] in ["image", "video"]:
108
+ if data.dtype == np.uint8:
109
+ data = data.astype(np.float32) / 255.0
110
+
111
+ axes_to_reduce = (0, 2, 3)
112
+ keepdims = True
113
+ else:
114
+ axes_to_reduce = 0
115
+ keepdims = data.ndim == 1
116
+
117
+ ep_stats[key] = get_feature_stats(
118
+ data, axis=axes_to_reduce, keepdims=keepdims, quantile_list=DEFAULT_QUANTILES
119
+ )
120
+
121
+ if dataset.features[key]["dtype"] in ["image", "video"]:
122
+ ep_stats[key] = {
123
+ k: v if k == "count" else np.squeeze(v, axis=0) for k, v in ep_stats[key].items()
124
+ }
125
+
126
+ return ep_stats
127
+
128
+
129
+ def compute_quantile_stats_for_dataset(dataset: LeRobotDataset) -> dict[str, dict]:
130
+ """Compute quantile statistics for all episodes in the dataset.
131
+
132
+ Args:
133
+ dataset: The LeRobot dataset to compute statistics for
134
+
135
+ Returns:
136
+ Dictionary containing aggregated statistics with quantiles
137
+
138
+ Note:
139
+ Video decoding operations are not thread-safe, so we process episodes sequentially
140
+ when video keys are present. For datasets without videos, we use parallel processing
141
+ with ThreadPoolExecutor for better performance.
142
+ """
143
+ logging.info(f"Computing quantile statistics for dataset with {dataset.num_episodes} episodes")
144
+
145
+ episode_stats_list = []
146
+ has_videos = len(dataset.meta.video_keys) > 0
147
+
148
+ if has_videos:
149
+ logging.info("Dataset contains video keys - using sequential processing for thread safety")
150
+ for episode_idx in tqdm(range(dataset.num_episodes), desc="Processing episodes"):
151
+ ep_stats = process_single_episode(dataset, episode_idx)
152
+ episode_stats_list.append(ep_stats)
153
+ else:
154
+ logging.info("Dataset has no video keys - using parallel processing for better performance")
155
+ max_workers = min(dataset.num_episodes, 16)
156
+
157
+ with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
158
+ future_to_episode = {
159
+ executor.submit(process_single_episode, dataset, episode_idx): episode_idx
160
+ for episode_idx in range(dataset.num_episodes)
161
+ }
162
+
163
+ episode_results = {}
164
+ with tqdm(total=dataset.num_episodes, desc="Processing episodes") as pbar:
165
+ for future in concurrent.futures.as_completed(future_to_episode):
166
+ episode_idx = future_to_episode[future]
167
+ ep_stats = future.result()
168
+ episode_results[episode_idx] = ep_stats
169
+ pbar.update(1)
170
+
171
+ for episode_idx in range(dataset.num_episodes):
172
+ if episode_idx in episode_results:
173
+ episode_stats_list.append(episode_results[episode_idx])
174
+
175
+ if not episode_stats_list:
176
+ raise ValueError("No episode data found for computing statistics")
177
+
178
+ logging.info(f"Aggregating statistics from {len(episode_stats_list)} episodes")
179
+ return aggregate_stats(episode_stats_list)
180
+
181
+
182
+ def augment_dataset_with_quantile_stats(
183
+ repo_id: str,
184
+ root: str | Path | None = None,
185
+ overwrite: bool = False,
186
+ ) -> None:
187
+ """Augment a dataset with quantile statistics if they are missing.
188
+
189
+ Args:
190
+ repo_id: Repository ID of the dataset
191
+ root: Local root directory for the dataset
192
+ overwrite: Overwrite existing quantile statistics if they already exist
193
+ """
194
+ logging.info(f"Loading dataset: {repo_id}")
195
+ dataset = LeRobotDataset(
196
+ repo_id=repo_id,
197
+ root=root,
198
+ )
199
+
200
+ if not overwrite and has_quantile_stats(dataset.meta.stats):
201
+ logging.info("Dataset already contains quantile statistics. No action needed.")
202
+ return
203
+
204
+ logging.info("Dataset does not contain quantile statistics. Computing them now...")
205
+
206
+ new_stats = compute_quantile_stats_for_dataset(dataset)
207
+
208
+ logging.info("Updating dataset metadata with new quantile statistics")
209
+ dataset.meta.stats = new_stats
210
+
211
+ write_stats(new_stats, dataset.meta.root)
212
+
213
+ logging.info("Successfully updated dataset with quantile statistics")
214
+ dataset.push_to_hub()
215
+
216
+ hub_api = HfApi()
217
+ try:
218
+ hub_api.delete_tag(repo_id, tag=CODEBASE_VERSION, repo_type="dataset")
219
+ except HTTPError as e:
220
+ logging.info(f"tag={CODEBASE_VERSION} probably doesn't exist. Skipping exception ({e})")
221
+ pass
222
+ hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=None, repo_type="dataset")
223
+
224
+
225
+ def main():
226
+ """Main function to run the augmentation script."""
227
+ parser = argparse.ArgumentParser(description="Augment LeRobot dataset with quantile statistics")
228
+
229
+ parser.add_argument(
230
+ "--repo-id",
231
+ type=str,
232
+ required=True,
233
+ help="Repository ID of the dataset (e.g., 'lerobot/pusht')",
234
+ )
235
+
236
+ parser.add_argument(
237
+ "--root",
238
+ type=str,
239
+ help="Local root directory for the dataset",
240
+ )
241
+ parser.add_argument(
242
+ "--overwrite",
243
+ action="store_true",
244
+ help="Overwrite existing quantile statistics if they already exist",
245
+ )
246
+
247
+ args = parser.parse_args()
248
+ root = Path(args.root) if args.root else None
249
+
250
+ init_logging()
251
+
252
+ augment_dataset_with_quantile_stats(
253
+ repo_id=args.repo_id,
254
+ root=root,
255
+ overwrite=args.overwrite,
256
+ )
257
+
258
+
259
+ if __name__ == "__main__":
260
+ main()
lerobot/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 2.1 to
19
+ 3.0. It will:
20
+
21
+ - Generate per-episodes stats and writes them in `episodes_stats.jsonl`
22
+ - Check consistency between these new stats and the old ones.
23
+ - Remove the deprecated `stats.json`.
24
+ - Update codebase_version in `info.json`.
25
+ - Push this new version to the hub on the 'main' branch and tags it with "v3.0".
26
+
27
+ Usage:
28
+
29
+ Convert a dataset from the hub:
30
+ ```bash
31
+ python src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py \
32
+ --repo-id=lerobot/pusht
33
+ ```
34
+
35
+ Convert a local dataset (works in place):
36
+ ```bash
37
+ python src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py \
38
+ --repo-id=lerobot/pusht \
39
+ --root=/path/to/local/dataset/directory
40
+ --push-to-hub=false
41
+ ```
42
+
43
+ """
44
+
45
+ import argparse
46
+ import logging
47
+ import shutil
48
+ from pathlib import Path
49
+ from typing import Any
50
+
51
+ import jsonlines
52
+ import pandas as pd
53
+ import pyarrow as pa
54
+ import tqdm
55
+ from datasets import Dataset, Features, Image
56
+ from huggingface_hub import HfApi, snapshot_download
57
+ from requests import HTTPError
58
+
59
+ from lerobot.datasets.compute_stats import aggregate_stats
60
+ from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
61
+ from lerobot.datasets.utils import (
62
+ DEFAULT_CHUNK_SIZE,
63
+ DEFAULT_DATA_FILE_SIZE_IN_MB,
64
+ DEFAULT_DATA_PATH,
65
+ DEFAULT_VIDEO_FILE_SIZE_IN_MB,
66
+ DEFAULT_VIDEO_PATH,
67
+ LEGACY_EPISODES_PATH,
68
+ LEGACY_EPISODES_STATS_PATH,
69
+ LEGACY_TASKS_PATH,
70
+ cast_stats_to_numpy,
71
+ flatten_dict,
72
+ get_file_size_in_mb,
73
+ get_parquet_file_size_in_mb,
74
+ get_parquet_num_frames,
75
+ load_info,
76
+ update_chunk_file_indices,
77
+ write_episodes,
78
+ write_info,
79
+ write_stats,
80
+ write_tasks,
81
+ )
82
+ from lerobot.datasets.video_utils import concatenate_video_files, get_video_duration_in_s
83
+ from lerobot.utils.constants import HF_LEROBOT_HOME
84
+ from lerobot.utils.utils import init_logging
85
+
86
+ V21 = "v2.1"
87
+ V30 = "v3.0"
88
+
89
+ """
90
+ -------------------------
91
+ OLD
92
+ data/chunk-000/episode_000000.parquet
93
+
94
+ NEW
95
+ data/chunk-000/file_000.parquet
96
+ -------------------------
97
+ OLD
98
+ videos/chunk-000/CAMERA/episode_000000.mp4
99
+
100
+ NEW
101
+ videos/CAMERA/chunk-000/file_000.mp4
102
+ -------------------------
103
+ OLD
104
+ episodes.jsonl
105
+ {"episode_index": 1, "tasks": ["Put the blue block in the green bowl"], "length": 266}
106
+
107
+ NEW
108
+ meta/episodes/chunk-000/episodes_000.parquet
109
+ episode_index | video_chunk_index | video_file_index | data_chunk_index | data_file_index | tasks | length
110
+ -------------------------
111
+ OLD
112
+ tasks.jsonl
113
+ {"task_index": 1, "task": "Put the blue block in the green bowl"}
114
+
115
+ NEW
116
+ meta/tasks/chunk-000/file_000.parquet
117
+ task_index | task
118
+ -------------------------
119
+ OLD
120
+ episodes_stats.jsonl
121
+
122
+ NEW
123
+ meta/episodes_stats/chunk-000/file_000.parquet
124
+ episode_index | mean | std | min | max
125
+ -------------------------
126
+ UPDATE
127
+ meta/info.json
128
+ -------------------------
129
+ """
130
+
131
+
132
+ def load_jsonlines(fpath: Path) -> list[Any]:
133
+ with jsonlines.open(fpath, "r") as reader:
134
+ return list(reader)
135
+
136
+
137
+ def legacy_load_episodes(local_dir: Path) -> dict:
138
+ episodes = load_jsonlines(local_dir / LEGACY_EPISODES_PATH)
139
+ return {item["episode_index"]: item for item in sorted(episodes, key=lambda x: x["episode_index"])}
140
+
141
+
142
+ def legacy_load_episodes_stats(local_dir: Path) -> dict:
143
+ episodes_stats = load_jsonlines(local_dir / LEGACY_EPISODES_STATS_PATH)
144
+ return {
145
+ item["episode_index"]: cast_stats_to_numpy(item["stats"])
146
+ for item in sorted(episodes_stats, key=lambda x: x["episode_index"])
147
+ }
148
+
149
+
150
+ def legacy_load_tasks(local_dir: Path) -> tuple[dict, dict]:
151
+ tasks = load_jsonlines(local_dir / LEGACY_TASKS_PATH)
152
+ tasks = {item["task_index"]: item["task"] for item in sorted(tasks, key=lambda x: x["task_index"])}
153
+ task_to_task_index = {task: task_index for task_index, task in tasks.items()}
154
+ return tasks, task_to_task_index
155
+
156
+
157
+ def validate_local_dataset_version(local_path: Path) -> None:
158
+ """Validate that the local dataset has the expected v2.1 version."""
159
+ info = load_info(local_path)
160
+ dataset_version = info.get("codebase_version", "unknown")
161
+ if dataset_version != V21:
162
+ raise ValueError(
163
+ f"Local dataset has codebase version '{dataset_version}', expected '{V21}'. "
164
+ f"This script is specifically for converting v2.1 datasets to v3.0."
165
+ )
166
+
167
+
168
+ def convert_tasks(root, new_root):
169
+ logging.info(f"Converting tasks from {root} to {new_root}")
170
+ tasks, _ = legacy_load_tasks(root)
171
+ task_indices = tasks.keys()
172
+ task_strings = tasks.values()
173
+ df_tasks = pd.DataFrame({"task_index": task_indices}, index=task_strings)
174
+ write_tasks(df_tasks, new_root)
175
+
176
+
177
+ def concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys):
178
+ # TODO(rcadene): to save RAM use Dataset.from_parquet(file) and concatenate_datasets
179
+ dataframes = [pd.read_parquet(file) for file in paths_to_cat]
180
+ # Concatenate all DataFrames along rows
181
+ concatenated_df = pd.concat(dataframes, ignore_index=True)
182
+
183
+ path = new_root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx)
184
+ path.parent.mkdir(parents=True, exist_ok=True)
185
+
186
+ if len(image_keys) > 0:
187
+ schema = pa.Schema.from_pandas(concatenated_df)
188
+ features = Features.from_arrow_schema(schema)
189
+ for key in image_keys:
190
+ features[key] = Image()
191
+ schema = features.arrow_schema
192
+ else:
193
+ schema = None
194
+
195
+ concatenated_df.to_parquet(path, index=False, schema=schema)
196
+
197
+
198
+ def convert_data(root: Path, new_root: Path, data_file_size_in_mb: int):
199
+ data_dir = root / "data"
200
+ ep_paths = sorted(data_dir.glob("*/*.parquet"))
201
+
202
+ image_keys = get_image_keys(root)
203
+
204
+ ep_idx = 0
205
+ chunk_idx = 0
206
+ file_idx = 0
207
+ size_in_mb = 0
208
+ num_frames = 0
209
+ paths_to_cat = []
210
+ episodes_metadata = []
211
+
212
+ logging.info(f"Converting data files from {len(ep_paths)} episodes")
213
+
214
+ for ep_path in tqdm.tqdm(ep_paths, desc="convert data files"):
215
+ ep_size_in_mb = get_parquet_file_size_in_mb(ep_path)
216
+ ep_num_frames = get_parquet_num_frames(ep_path)
217
+ ep_metadata = {
218
+ "episode_index": ep_idx,
219
+ "data/chunk_index": chunk_idx,
220
+ "data/file_index": file_idx,
221
+ "dataset_from_index": num_frames,
222
+ "dataset_to_index": num_frames + ep_num_frames,
223
+ }
224
+ size_in_mb += ep_size_in_mb
225
+ num_frames += ep_num_frames
226
+ episodes_metadata.append(ep_metadata)
227
+ ep_idx += 1
228
+
229
+ if size_in_mb < data_file_size_in_mb:
230
+ paths_to_cat.append(ep_path)
231
+ continue
232
+
233
+ if paths_to_cat:
234
+ concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys)
235
+
236
+ # Reset for the next file
237
+ size_in_mb = ep_size_in_mb
238
+ paths_to_cat = [ep_path]
239
+
240
+ chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE)
241
+
242
+ # Write remaining data if any
243
+ if paths_to_cat:
244
+ concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys)
245
+
246
+ return episodes_metadata
247
+
248
+
249
+ def get_video_keys(root):
250
+ info = load_info(root)
251
+ features = info["features"]
252
+ video_keys = [key for key, ft in features.items() if ft["dtype"] == "video"]
253
+ return video_keys
254
+
255
+
256
+ def get_image_keys(root):
257
+ info = load_info(root)
258
+ features = info["features"]
259
+ image_keys = [key for key, ft in features.items() if ft["dtype"] == "image"]
260
+ return image_keys
261
+
262
+
263
+ def convert_videos(root: Path, new_root: Path, video_file_size_in_mb: int):
264
+ logging.info(f"Converting videos from {root} to {new_root}")
265
+
266
+ video_keys = get_video_keys(root)
267
+ if len(video_keys) == 0:
268
+ return None
269
+
270
+ video_keys = sorted(video_keys)
271
+
272
+ eps_metadata_per_cam = []
273
+ for camera in video_keys:
274
+ eps_metadata = convert_videos_of_camera(root, new_root, camera, video_file_size_in_mb)
275
+ eps_metadata_per_cam.append(eps_metadata)
276
+
277
+ num_eps_per_cam = [len(eps_cam_map) for eps_cam_map in eps_metadata_per_cam]
278
+ if len(set(num_eps_per_cam)) != 1:
279
+ raise ValueError(f"All cams dont have same number of episodes ({num_eps_per_cam}).")
280
+
281
+ episods_metadata = []
282
+ num_cameras = len(video_keys)
283
+ num_episodes = num_eps_per_cam[0]
284
+ for ep_idx in tqdm.tqdm(range(num_episodes), desc="convert videos"):
285
+ # Sanity check
286
+ ep_ids = [eps_metadata_per_cam[cam_idx][ep_idx]["episode_index"] for cam_idx in range(num_cameras)]
287
+ ep_ids += [ep_idx]
288
+ if len(set(ep_ids)) != 1:
289
+ raise ValueError(f"All episode indices need to match ({ep_ids}).")
290
+
291
+ ep_dict = {}
292
+ for cam_idx in range(num_cameras):
293
+ ep_dict.update(eps_metadata_per_cam[cam_idx][ep_idx])
294
+ episods_metadata.append(ep_dict)
295
+
296
+ return episods_metadata
297
+
298
+
299
+ def convert_videos_of_camera(root: Path, new_root: Path, video_key: str, video_file_size_in_mb: int):
300
+ # Access old paths to mp4
301
+ videos_dir = root / "videos"
302
+ ep_paths = sorted(videos_dir.glob(f"*/{video_key}/*.mp4"))
303
+
304
+ ep_idx = 0
305
+ chunk_idx = 0
306
+ file_idx = 0
307
+ size_in_mb = 0
308
+ duration_in_s = 0.0
309
+ paths_to_cat = []
310
+ episodes_metadata = []
311
+
312
+ for ep_path in tqdm.tqdm(ep_paths, desc=f"convert videos of {video_key}"):
313
+ ep_size_in_mb = get_file_size_in_mb(ep_path)
314
+ ep_duration_in_s = get_video_duration_in_s(ep_path)
315
+
316
+ # Check if adding this episode would exceed the limit
317
+ if size_in_mb + ep_size_in_mb >= video_file_size_in_mb and len(paths_to_cat) > 0:
318
+ # Size limit would be exceeded, save current accumulation WITHOUT this episode
319
+ concatenate_video_files(
320
+ paths_to_cat,
321
+ new_root
322
+ / DEFAULT_VIDEO_PATH.format(video_key=video_key, chunk_index=chunk_idx, file_index=file_idx),
323
+ )
324
+
325
+ # Update episodes metadata for the file we just saved
326
+ for i, _ in enumerate(paths_to_cat):
327
+ past_ep_idx = ep_idx - len(paths_to_cat) + i
328
+ episodes_metadata[past_ep_idx][f"videos/{video_key}/chunk_index"] = chunk_idx
329
+ episodes_metadata[past_ep_idx][f"videos/{video_key}/file_index"] = file_idx
330
+
331
+ # Move to next file and start fresh with current episode
332
+ chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE)
333
+ size_in_mb = 0
334
+ duration_in_s = 0.0
335
+ paths_to_cat = []
336
+
337
+ # Add current episode metadata
338
+ ep_metadata = {
339
+ "episode_index": ep_idx,
340
+ f"videos/{video_key}/chunk_index": chunk_idx, # Will be updated when file is saved
341
+ f"videos/{video_key}/file_index": file_idx, # Will be updated when file is saved
342
+ f"videos/{video_key}/from_timestamp": duration_in_s,
343
+ f"videos/{video_key}/to_timestamp": duration_in_s + ep_duration_in_s,
344
+ }
345
+ episodes_metadata.append(ep_metadata)
346
+
347
+ # Add current episode to accumulation
348
+ paths_to_cat.append(ep_path)
349
+ size_in_mb += ep_size_in_mb
350
+ duration_in_s += ep_duration_in_s
351
+ ep_idx += 1
352
+
353
+ # Write remaining videos if any
354
+ if paths_to_cat:
355
+ concatenate_video_files(
356
+ paths_to_cat,
357
+ new_root
358
+ / DEFAULT_VIDEO_PATH.format(video_key=video_key, chunk_index=chunk_idx, file_index=file_idx),
359
+ )
360
+
361
+ # Update episodes metadata for the final file
362
+ for i, _ in enumerate(paths_to_cat):
363
+ past_ep_idx = ep_idx - len(paths_to_cat) + i
364
+ episodes_metadata[past_ep_idx][f"videos/{video_key}/chunk_index"] = chunk_idx
365
+ episodes_metadata[past_ep_idx][f"videos/{video_key}/file_index"] = file_idx
366
+
367
+ return episodes_metadata
368
+
369
+
370
+ def generate_episode_metadata_dict(
371
+ episodes_legacy_metadata, episodes_metadata, episodes_stats, episodes_videos=None
372
+ ):
373
+ num_episodes = len(episodes_metadata)
374
+ episodes_legacy_metadata_vals = list(episodes_legacy_metadata.values())
375
+ episodes_stats_vals = list(episodes_stats.values())
376
+ episodes_stats_keys = list(episodes_stats.keys())
377
+
378
+ for i in range(num_episodes):
379
+ ep_legacy_metadata = episodes_legacy_metadata_vals[i]
380
+ ep_metadata = episodes_metadata[i]
381
+ ep_stats = episodes_stats_vals[i]
382
+
383
+ ep_ids_set = {
384
+ ep_legacy_metadata["episode_index"],
385
+ ep_metadata["episode_index"],
386
+ episodes_stats_keys[i],
387
+ }
388
+
389
+ if episodes_videos is None:
390
+ ep_video = {}
391
+ else:
392
+ ep_video = episodes_videos[i]
393
+ ep_ids_set.add(ep_video["episode_index"])
394
+
395
+ if len(ep_ids_set) != 1:
396
+ raise ValueError(f"Number of episodes is not the same ({ep_ids_set}).")
397
+
398
+ ep_dict = {**ep_metadata, **ep_video, **ep_legacy_metadata, **flatten_dict({"stats": ep_stats})}
399
+ ep_dict["meta/episodes/chunk_index"] = 0
400
+ ep_dict["meta/episodes/file_index"] = 0
401
+ yield ep_dict
402
+
403
+
404
+ def convert_episodes_metadata(root, new_root, episodes_metadata, episodes_video_metadata=None):
405
+ logging.info(f"Converting episodes metadata from {root} to {new_root}")
406
+
407
+ episodes_legacy_metadata = legacy_load_episodes(root)
408
+ episodes_stats = legacy_load_episodes_stats(root)
409
+
410
+ num_eps_set = {len(episodes_legacy_metadata), len(episodes_metadata)}
411
+ if episodes_video_metadata is not None:
412
+ num_eps_set.add(len(episodes_video_metadata))
413
+
414
+ if len(num_eps_set) != 1:
415
+ raise ValueError(f"Number of episodes is not the same ({num_eps_set}).")
416
+
417
+ ds_episodes = Dataset.from_generator(
418
+ lambda: generate_episode_metadata_dict(
419
+ episodes_legacy_metadata, episodes_metadata, episodes_stats, episodes_video_metadata
420
+ )
421
+ )
422
+ write_episodes(ds_episodes, new_root)
423
+
424
+ stats = aggregate_stats(list(episodes_stats.values()))
425
+ write_stats(stats, new_root)
426
+
427
+
428
+ def convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb):
429
+ info = load_info(root)
430
+ info["codebase_version"] = V30
431
+ del info["total_chunks"]
432
+ del info["total_videos"]
433
+ info["data_files_size_in_mb"] = data_file_size_in_mb
434
+ info["video_files_size_in_mb"] = video_file_size_in_mb
435
+ info["data_path"] = DEFAULT_DATA_PATH
436
+ info["video_path"] = DEFAULT_VIDEO_PATH if info["video_path"] is not None else None
437
+ info["fps"] = int(info["fps"])
438
+ logging.info(f"Converting info from {root} to {new_root}")
439
+ for key in info["features"]:
440
+ if info["features"][key]["dtype"] == "video":
441
+ # already has fps in video_info
442
+ continue
443
+ info["features"][key]["fps"] = info["fps"]
444
+ write_info(info, new_root)
445
+
446
+
447
+ def convert_dataset(
448
+ repo_id: str,
449
+ branch: str | None = None,
450
+ data_file_size_in_mb: int | None = None,
451
+ video_file_size_in_mb: int | None = None,
452
+ root: str | Path | None = None,
453
+ push_to_hub: bool = True,
454
+ force_conversion: bool = False,
455
+ ):
456
+ if data_file_size_in_mb is None:
457
+ data_file_size_in_mb = DEFAULT_DATA_FILE_SIZE_IN_MB
458
+ if video_file_size_in_mb is None:
459
+ video_file_size_in_mb = DEFAULT_VIDEO_FILE_SIZE_IN_MB
460
+
461
+ # First check if the dataset already has a v3.0 version
462
+ if root is None and not force_conversion:
463
+ try:
464
+ print("Trying to download v3.0 version of the dataset from the hub...")
465
+ snapshot_download(repo_id, repo_type="dataset", revision=V30, local_dir=HF_LEROBOT_HOME / repo_id)
466
+ return
467
+ except Exception:
468
+ print("Dataset does not have an uploaded v3.0 version. Continuing with conversion.")
469
+
470
+ # Set root based on whether local dataset path is provided
471
+ use_local_dataset = False
472
+ root = HF_LEROBOT_HOME / repo_id if root is None else Path(root) / repo_id
473
+ if root.exists():
474
+ validate_local_dataset_version(root)
475
+ use_local_dataset = True
476
+ print(f"Using local dataset at {root}")
477
+
478
+ old_root = root.parent / f"{root.name}_old"
479
+ new_root = root.parent / f"{root.name}_v30"
480
+
481
+ # Handle old_root cleanup if both old_root and root exist
482
+ if old_root.is_dir() and root.is_dir():
483
+ shutil.rmtree(str(root))
484
+ shutil.move(str(old_root), str(root))
485
+
486
+ if new_root.is_dir():
487
+ shutil.rmtree(new_root)
488
+
489
+ if not use_local_dataset:
490
+ snapshot_download(
491
+ repo_id,
492
+ repo_type="dataset",
493
+ revision=V21,
494
+ local_dir=root,
495
+ )
496
+
497
+ convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb)
498
+ convert_tasks(root, new_root)
499
+ episodes_metadata = convert_data(root, new_root, data_file_size_in_mb)
500
+ episodes_videos_metadata = convert_videos(root, new_root, video_file_size_in_mb)
501
+ convert_episodes_metadata(root, new_root, episodes_metadata, episodes_videos_metadata)
502
+
503
+ shutil.move(str(root), str(old_root))
504
+ shutil.move(str(new_root), str(root))
505
+
506
+ if push_to_hub:
507
+ hub_api = HfApi()
508
+ try:
509
+ hub_api.delete_tag(repo_id, tag=CODEBASE_VERSION, repo_type="dataset")
510
+ except HTTPError as e:
511
+ print(f"tag={CODEBASE_VERSION} probably doesn't exist. Skipping exception ({e})")
512
+ pass
513
+ hub_api.delete_files(
514
+ delete_patterns=["data/chunk*/episode_*", "meta/*.jsonl", "videos/chunk*"],
515
+ repo_id=repo_id,
516
+ revision=branch,
517
+ repo_type="dataset",
518
+ )
519
+ hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
520
+
521
+ LeRobotDataset(repo_id).push_to_hub()
522
+
523
+
524
+ if __name__ == "__main__":
525
+ init_logging()
526
+ parser = argparse.ArgumentParser()
527
+ parser.add_argument(
528
+ "--repo-id",
529
+ type=str,
530
+ required=True,
531
+ help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset "
532
+ "(e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).",
533
+ )
534
+ parser.add_argument(
535
+ "--branch",
536
+ type=str,
537
+ default=None,
538
+ help="Repo branch to push your dataset. Defaults to the main branch.",
539
+ )
540
+ parser.add_argument(
541
+ "--data-file-size-in-mb",
542
+ type=int,
543
+ default=None,
544
+ help="File size in MB. Defaults to 100 for data and 500 for videos.",
545
+ )
546
+ parser.add_argument(
547
+ "--video-file-size-in-mb",
548
+ type=int,
549
+ default=None,
550
+ help="File size in MB. Defaults to 100 for data and 500 for videos.",
551
+ )
552
+ parser.add_argument(
553
+ "--root",
554
+ type=str,
555
+ default=None,
556
+ help="Local directory to use for downloading/writing the dataset.",
557
+ )
558
+ parser.add_argument(
559
+ "--push-to-hub",
560
+ type=lambda input: input.lower() == "true",
561
+ default=True,
562
+ help="Push the converted dataset to the hub.",
563
+ )
564
+ parser.add_argument(
565
+ "--force-conversion",
566
+ action="store_true",
567
+ help="Force conversion even if the dataset already has a v3.0 version.",
568
+ )
569
+
570
+ args = parser.parse_args()
571
+ convert_dataset(**vars(args))
lerobot/src/lerobot/motors/dynamixel/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .dynamixel import DriveMode, DynamixelMotorsBus, OperatingMode, TorqueMode
18
+ from .tables import *
lerobot/src/lerobot/motors/dynamixel/dynamixel.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # TODO(aliberts): Should we implement FastSyncRead/Write?
16
+ # https://github.com/ROBOTIS-GIT/DynamixelSDK/pull/643
17
+ # https://github.com/ROBOTIS-GIT/DynamixelSDK/releases/tag/3.8.2
18
+ # https://emanual.robotis.com/docs/en/dxl/protocol2/#fast-sync-read-0x8a
19
+ # -> Need to check compatibility across models
20
+
21
+ import logging
22
+ from copy import deepcopy
23
+ from enum import Enum
24
+
25
+ from lerobot.motors.encoding_utils import decode_twos_complement, encode_twos_complement
26
+
27
+ from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address
28
+ from .tables import (
29
+ AVAILABLE_BAUDRATES,
30
+ MODEL_BAUDRATE_TABLE,
31
+ MODEL_CONTROL_TABLE,
32
+ MODEL_ENCODING_TABLE,
33
+ MODEL_NUMBER_TABLE,
34
+ MODEL_RESOLUTION,
35
+ )
36
+
37
+ PROTOCOL_VERSION = 2.0
38
+ DEFAULT_BAUDRATE = 1_000_000
39
+ DEFAULT_TIMEOUT_MS = 1000
40
+
41
+ NORMALIZED_DATA = ["Goal_Position", "Present_Position"]
42
+
43
+ logger = logging.getLogger(__name__)
44
+
45
+
46
+ class OperatingMode(Enum):
47
+ # DYNAMIXEL only controls current(torque) regardless of speed and position. This mode is ideal for a
48
+ # gripper or a system that only uses current(torque) control or a system that has additional
49
+ # velocity/position controllers.
50
+ CURRENT = 0
51
+
52
+ # This mode controls velocity. This mode is identical to the Wheel Mode(endless) from existing DYNAMIXEL.
53
+ # This mode is ideal for wheel-type robots.
54
+ VELOCITY = 1
55
+
56
+ # This mode controls position. This mode is identical to the Joint Mode from existing DYNAMIXEL. Operating
57
+ # position range is limited by the Max Position Limit(48) and the Min Position Limit(52). This mode is
58
+ # ideal for articulated robots that each joint rotates less than 360 degrees.
59
+ POSITION = 3
60
+
61
+ # This mode controls position. This mode is identical to the Multi-turn Position Control from existing
62
+ # DYNAMIXEL. 512 turns are supported(-256[rev] ~ 256[rev]). This mode is ideal for multi-turn wrists or
63
+ # conveyor systems or a system that requires an additional reduction gear. Note that Max Position
64
+ # Limit(48), Min Position Limit(52) are not used on Extended Position Control Mode.
65
+ EXTENDED_POSITION = 4
66
+
67
+ # This mode controls both position and current(torque). Up to 512 turns are supported (-256[rev] ~
68
+ # 256[rev]). This mode is ideal for a system that requires both position and current control such as
69
+ # articulated robots or grippers.
70
+ CURRENT_POSITION = 5
71
+
72
+ # This mode directly controls PWM output. (Voltage Control Mode)
73
+ PWM = 16
74
+
75
+
76
+ class DriveMode(Enum):
77
+ NON_INVERTED = 0
78
+ INVERTED = 1
79
+
80
+
81
+ class TorqueMode(Enum):
82
+ ENABLED = 1
83
+ DISABLED = 0
84
+
85
+
86
+ def _split_into_byte_chunks(value: int, length: int) -> list[int]:
87
+ import dynamixel_sdk as dxl
88
+
89
+ if length == 1:
90
+ data = [value]
91
+ elif length == 2:
92
+ data = [dxl.DXL_LOBYTE(value), dxl.DXL_HIBYTE(value)]
93
+ elif length == 4:
94
+ data = [
95
+ dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
96
+ dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)),
97
+ dxl.DXL_LOBYTE(dxl.DXL_HIWORD(value)),
98
+ dxl.DXL_HIBYTE(dxl.DXL_HIWORD(value)),
99
+ ]
100
+ return data
101
+
102
+
103
+ class DynamixelMotorsBus(MotorsBus):
104
+ """
105
+ The Dynamixel implementation for a MotorsBus. It relies on the python dynamixel sdk to communicate with
106
+ the motors. For more info, see the Dynamixel SDK Documentation:
107
+ https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20
108
+ """
109
+
110
+ apply_drive_mode = False
111
+ available_baudrates = deepcopy(AVAILABLE_BAUDRATES)
112
+ default_baudrate = DEFAULT_BAUDRATE
113
+ default_timeout = DEFAULT_TIMEOUT_MS
114
+ model_baudrate_table = deepcopy(MODEL_BAUDRATE_TABLE)
115
+ model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
116
+ model_encoding_table = deepcopy(MODEL_ENCODING_TABLE)
117
+ model_number_table = deepcopy(MODEL_NUMBER_TABLE)
118
+ model_resolution_table = deepcopy(MODEL_RESOLUTION)
119
+ normalized_data = deepcopy(NORMALIZED_DATA)
120
+
121
+ def __init__(
122
+ self,
123
+ port: str,
124
+ motors: dict[str, Motor],
125
+ calibration: dict[str, MotorCalibration] | None = None,
126
+ ):
127
+ super().__init__(port, motors, calibration)
128
+ import dynamixel_sdk as dxl
129
+
130
+ self.port_handler = dxl.PortHandler(self.port)
131
+ self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION)
132
+ self.sync_reader = dxl.GroupSyncRead(self.port_handler, self.packet_handler, 0, 0)
133
+ self.sync_writer = dxl.GroupSyncWrite(self.port_handler, self.packet_handler, 0, 0)
134
+ self._comm_success = dxl.COMM_SUCCESS
135
+ self._no_error = 0x00
136
+
137
+ def _assert_protocol_is_compatible(self, instruction_name: str) -> None:
138
+ pass
139
+
140
+ def _handshake(self) -> None:
141
+ self._assert_motors_exist()
142
+
143
+ def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]:
144
+ model = self.motors[motor].model
145
+ search_baudrates = (
146
+ [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model]
147
+ )
148
+
149
+ for baudrate in search_baudrates:
150
+ self.set_baudrate(baudrate)
151
+ id_model = self.broadcast_ping()
152
+ if id_model:
153
+ found_id, found_model = next(iter(id_model.items()))
154
+ expected_model_nb = self.model_number_table[model]
155
+ if found_model != expected_model_nb:
156
+ raise RuntimeError(
157
+ f"Found one motor on {baudrate=} with id={found_id} but it has a "
158
+ f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. "
159
+ f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')."
160
+ )
161
+ return baudrate, found_id
162
+
163
+ raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.")
164
+
165
+ def configure_motors(self, return_delay_time=0) -> None:
166
+ # By default, Dynamixel motors have a 500µs delay response time (corresponding to a value of 250 on
167
+ # the 'Return_Delay_Time' address). We ensure this is reduced to the minimum of 2µs (value of 0).
168
+ for motor in self.motors:
169
+ self.write("Return_Delay_Time", motor, return_delay_time)
170
+
171
+ @property
172
+ def is_calibrated(self) -> bool:
173
+ return self.calibration == self.read_calibration()
174
+
175
+ def read_calibration(self) -> dict[str, MotorCalibration]:
176
+ offsets = self.sync_read("Homing_Offset", normalize=False)
177
+ mins = self.sync_read("Min_Position_Limit", normalize=False)
178
+ maxes = self.sync_read("Max_Position_Limit", normalize=False)
179
+ drive_modes = self.sync_read("Drive_Mode", normalize=False)
180
+
181
+ calibration = {}
182
+ for motor, m in self.motors.items():
183
+ calibration[motor] = MotorCalibration(
184
+ id=m.id,
185
+ drive_mode=drive_modes[motor],
186
+ homing_offset=offsets[motor],
187
+ range_min=mins[motor],
188
+ range_max=maxes[motor],
189
+ )
190
+
191
+ return calibration
192
+
193
+ def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None:
194
+ for motor, calibration in calibration_dict.items():
195
+ self.write("Homing_Offset", motor, calibration.homing_offset)
196
+ self.write("Min_Position_Limit", motor, calibration.range_min)
197
+ self.write("Max_Position_Limit", motor, calibration.range_max)
198
+
199
+ if cache:
200
+ self.calibration = calibration_dict
201
+
202
+ def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
203
+ for motor in self._get_motors_list(motors):
204
+ self.write("Torque_Enable", motor, TorqueMode.DISABLED.value, num_retry=num_retry)
205
+
206
+ def _disable_torque(self, motor_id: int, model: str, num_retry: int = 0) -> None:
207
+ addr, length = get_address(self.model_ctrl_table, model, "Torque_Enable")
208
+ self._write(addr, length, motor_id, TorqueMode.DISABLED.value, num_retry=num_retry)
209
+
210
+ def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
211
+ for motor in self._get_motors_list(motors):
212
+ self.write("Torque_Enable", motor, TorqueMode.ENABLED.value, num_retry=num_retry)
213
+
214
+ def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
215
+ for id_ in ids_values:
216
+ model = self._id_to_model(id_)
217
+ encoding_table = self.model_encoding_table.get(model)
218
+ if encoding_table and data_name in encoding_table:
219
+ n_bytes = encoding_table[data_name]
220
+ ids_values[id_] = encode_twos_complement(ids_values[id_], n_bytes)
221
+
222
+ return ids_values
223
+
224
+ def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
225
+ for id_ in ids_values:
226
+ model = self._id_to_model(id_)
227
+ encoding_table = self.model_encoding_table.get(model)
228
+ if encoding_table and data_name in encoding_table:
229
+ n_bytes = encoding_table[data_name]
230
+ ids_values[id_] = decode_twos_complement(ids_values[id_], n_bytes)
231
+
232
+ return ids_values
233
+
234
+ def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]:
235
+ """
236
+ On Dynamixel Motors:
237
+ Present_Position = Actual_Position + Homing_Offset
238
+ """
239
+ half_turn_homings = {}
240
+ for motor, pos in positions.items():
241
+ model = self._get_motor_model(motor)
242
+ max_res = self.model_resolution_table[model] - 1
243
+ half_turn_homings[motor] = int(max_res / 2) - pos
244
+
245
+ return half_turn_homings
246
+
247
+ def _split_into_byte_chunks(self, value: int, length: int) -> list[int]:
248
+ return _split_into_byte_chunks(value, length)
249
+
250
+ def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None:
251
+ for n_try in range(1 + num_retry):
252
+ data_list, comm = self.packet_handler.broadcastPing(self.port_handler)
253
+ if self._is_comm_success(comm):
254
+ break
255
+ logger.debug(f"Broadcast ping failed on port '{self.port}' ({n_try=})")
256
+ logger.debug(self.packet_handler.getTxRxResult(comm))
257
+
258
+ if not self._is_comm_success(comm):
259
+ if raise_on_error:
260
+ raise ConnectionError(self.packet_handler.getTxRxResult(comm))
261
+
262
+ return
263
+
264
+ return {id_: data[0] for id_, data in data_list.items()}
lerobot/src/lerobot/motors/dynamixel/tables.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # TODO(Steven): Consider doing the following:
16
+ # from enum import Enum
17
+ # class MyControlTableKey(Enum):
18
+ # ID = "ID"
19
+ # GOAL_SPEED = "Goal_Speed"
20
+ # ...
21
+ #
22
+ # MY_CONTROL_TABLE ={
23
+ # MyControlTableKey.ID.value: (5,1)
24
+ # MyControlTableKey.GOAL_SPEED.value: (46, 2)
25
+ # ...
26
+ # }
27
+ # This allows me do to:
28
+ # bus.write(MyControlTableKey.GOAL_SPEED, ...)
29
+ # Instead of:
30
+ # bus.write("Goal_Speed", ...)
31
+ # This is important for two reasons:
32
+ # 1. The linter will tell me if I'm trying to use an invalid key, instead of me realizing when I get the RunTimeError
33
+ # 2. We can change the value of the MyControlTableKey enums without impacting the client code
34
+
35
+
36
+ # {data_name: (address, size_byte)}
37
+ # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table
38
+ X_SERIES_CONTROL_TABLE = {
39
+ "Model_Number": (0, 2),
40
+ "Model_Information": (2, 4),
41
+ "Firmware_Version": (6, 1),
42
+ "ID": (7, 1),
43
+ "Baud_Rate": (8, 1),
44
+ "Return_Delay_Time": (9, 1),
45
+ "Drive_Mode": (10, 1),
46
+ "Operating_Mode": (11, 1),
47
+ "Secondary_ID": (12, 1),
48
+ "Protocol_Type": (13, 1),
49
+ "Homing_Offset": (20, 4),
50
+ "Moving_Threshold": (24, 4),
51
+ "Temperature_Limit": (31, 1),
52
+ "Max_Voltage_Limit": (32, 2),
53
+ "Min_Voltage_Limit": (34, 2),
54
+ "PWM_Limit": (36, 2),
55
+ "Current_Limit": (38, 2),
56
+ "Acceleration_Limit": (40, 4),
57
+ "Velocity_Limit": (44, 4),
58
+ "Max_Position_Limit": (48, 4),
59
+ "Min_Position_Limit": (52, 4),
60
+ "Shutdown": (63, 1),
61
+ "Torque_Enable": (64, 1),
62
+ "LED": (65, 1),
63
+ "Status_Return_Level": (68, 1),
64
+ "Registered_Instruction": (69, 1),
65
+ "Hardware_Error_Status": (70, 1),
66
+ "Velocity_I_Gain": (76, 2),
67
+ "Velocity_P_Gain": (78, 2),
68
+ "Position_D_Gain": (80, 2),
69
+ "Position_I_Gain": (82, 2),
70
+ "Position_P_Gain": (84, 2),
71
+ "Feedforward_2nd_Gain": (88, 2),
72
+ "Feedforward_1st_Gain": (90, 2),
73
+ "Bus_Watchdog": (98, 1),
74
+ "Goal_PWM": (100, 2),
75
+ "Goal_Current": (102, 2),
76
+ "Goal_Velocity": (104, 4),
77
+ "Profile_Acceleration": (108, 4),
78
+ "Profile_Velocity": (112, 4),
79
+ "Goal_Position": (116, 4),
80
+ "Realtime_Tick": (120, 2),
81
+ "Moving": (122, 1),
82
+ "Moving_Status": (123, 1),
83
+ "Present_PWM": (124, 2),
84
+ "Present_Current": (126, 2),
85
+ "Present_Velocity": (128, 4),
86
+ "Present_Position": (132, 4),
87
+ "Velocity_Trajectory": (136, 4),
88
+ "Position_Trajectory": (140, 4),
89
+ "Present_Input_Voltage": (144, 2),
90
+ "Present_Temperature": (146, 1),
91
+ }
92
+
93
+ # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#baud-rate8
94
+ X_SERIES_BAUDRATE_TABLE = {
95
+ 9_600: 0,
96
+ 57_600: 1,
97
+ 115_200: 2,
98
+ 1_000_000: 3,
99
+ 2_000_000: 4,
100
+ 3_000_000: 5,
101
+ 4_000_000: 6,
102
+ }
103
+
104
+ # {data_name: size_byte}
105
+ X_SERIES_ENCODINGS_TABLE = {
106
+ "Homing_Offset": X_SERIES_CONTROL_TABLE["Homing_Offset"][1],
107
+ "Goal_PWM": X_SERIES_CONTROL_TABLE["Goal_PWM"][1],
108
+ "Goal_Current": X_SERIES_CONTROL_TABLE["Goal_Current"][1],
109
+ "Goal_Velocity": X_SERIES_CONTROL_TABLE["Goal_Velocity"][1],
110
+ "Goal_Position": X_SERIES_CONTROL_TABLE["Goal_Position"][1],
111
+ "Present_Position": X_SERIES_CONTROL_TABLE["Present_Position"][1],
112
+ "Present_PWM": X_SERIES_CONTROL_TABLE["Present_PWM"][1],
113
+ "Present_Current": X_SERIES_CONTROL_TABLE["Present_Current"][1],
114
+ "Present_Velocity": X_SERIES_CONTROL_TABLE["Present_Velocity"][1],
115
+ }
116
+
117
+ MODEL_ENCODING_TABLE = {
118
+ "x_series": X_SERIES_ENCODINGS_TABLE,
119
+ "xl330-m077": X_SERIES_ENCODINGS_TABLE,
120
+ "xl330-m288": X_SERIES_ENCODINGS_TABLE,
121
+ "xl430-w250": X_SERIES_ENCODINGS_TABLE,
122
+ "xm430-w350": X_SERIES_ENCODINGS_TABLE,
123
+ "xm540-w270": X_SERIES_ENCODINGS_TABLE,
124
+ "xc430-w150": X_SERIES_ENCODINGS_TABLE,
125
+ }
126
+
127
+ # {model: model_resolution}
128
+ # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#specifications
129
+ MODEL_RESOLUTION = {
130
+ "x_series": 4096,
131
+ "xl330-m077": 4096,
132
+ "xl330-m288": 4096,
133
+ "xl430-w250": 4096,
134
+ "xm430-w350": 4096,
135
+ "xm540-w270": 4096,
136
+ "xc430-w150": 4096,
137
+ }
138
+
139
+ # {model: model_number}
140
+ # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table-of-eeprom-area
141
+ MODEL_NUMBER_TABLE = {
142
+ "xl330-m077": 1190,
143
+ "xl330-m288": 1200,
144
+ "xl430-w250": 1060,
145
+ "xm430-w350": 1020,
146
+ "xm540-w270": 1120,
147
+ "xc430-w150": 1070,
148
+ }
149
+
150
+ # {model: available_operating_modes}
151
+ # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#operating-mode11
152
+ MODEL_OPERATING_MODES = {
153
+ "xl330-m077": [0, 1, 3, 4, 5, 16],
154
+ "xl330-m288": [0, 1, 3, 4, 5, 16],
155
+ "xl430-w250": [1, 3, 4, 16],
156
+ "xm430-w350": [0, 1, 3, 4, 5, 16],
157
+ "xm540-w270": [0, 1, 3, 4, 5, 16],
158
+ "xc430-w150": [1, 3, 4, 16],
159
+ }
160
+
161
+ MODEL_CONTROL_TABLE = {
162
+ "x_series": X_SERIES_CONTROL_TABLE,
163
+ "xl330-m077": X_SERIES_CONTROL_TABLE,
164
+ "xl330-m288": X_SERIES_CONTROL_TABLE,
165
+ "xl430-w250": X_SERIES_CONTROL_TABLE,
166
+ "xm430-w350": X_SERIES_CONTROL_TABLE,
167
+ "xm540-w270": X_SERIES_CONTROL_TABLE,
168
+ "xc430-w150": X_SERIES_CONTROL_TABLE,
169
+ }
170
+
171
+ MODEL_BAUDRATE_TABLE = {
172
+ "x_series": X_SERIES_BAUDRATE_TABLE,
173
+ "xl330-m077": X_SERIES_BAUDRATE_TABLE,
174
+ "xl330-m288": X_SERIES_BAUDRATE_TABLE,
175
+ "xl430-w250": X_SERIES_BAUDRATE_TABLE,
176
+ "xm430-w350": X_SERIES_BAUDRATE_TABLE,
177
+ "xm540-w270": X_SERIES_BAUDRATE_TABLE,
178
+ "xc430-w150": X_SERIES_BAUDRATE_TABLE,
179
+ }
180
+
181
+ AVAILABLE_BAUDRATES = [
182
+ 9_600,
183
+ 19_200,
184
+ 38_400,
185
+ 57_600,
186
+ 115_200,
187
+ 230_400,
188
+ 460_800,
189
+ 500_000,
190
+ 576_000,
191
+ 921_600,
192
+ 1_000_000,
193
+ 1_152_000,
194
+ 2_000_000,
195
+ 2_500_000,
196
+ 3_000_000,
197
+ 3_500_000,
198
+ 4_000_000,
199
+ ]
lerobot/src/lerobot/motors/feetech/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .feetech import DriveMode, FeetechMotorsBus, OperatingMode, TorqueMode
18
+ from .tables import *
lerobot/src/lerobot/motors/feetech/feetech.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ from copy import deepcopy
17
+ from enum import Enum
18
+ from pprint import pformat
19
+
20
+ from lerobot.motors.encoding_utils import decode_sign_magnitude, encode_sign_magnitude
21
+
22
+ from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address
23
+ from .tables import (
24
+ FIRMWARE_MAJOR_VERSION,
25
+ FIRMWARE_MINOR_VERSION,
26
+ MODEL_BAUDRATE_TABLE,
27
+ MODEL_CONTROL_TABLE,
28
+ MODEL_ENCODING_TABLE,
29
+ MODEL_NUMBER,
30
+ MODEL_NUMBER_TABLE,
31
+ MODEL_PROTOCOL,
32
+ MODEL_RESOLUTION,
33
+ SCAN_BAUDRATES,
34
+ )
35
+
36
+ DEFAULT_PROTOCOL_VERSION = 0
37
+ DEFAULT_BAUDRATE = 1_000_000
38
+ DEFAULT_TIMEOUT_MS = 1000
39
+
40
+ NORMALIZED_DATA = ["Goal_Position", "Present_Position"]
41
+
42
+ logger = logging.getLogger(__name__)
43
+
44
+
45
+ class OperatingMode(Enum):
46
+ # position servo mode
47
+ POSITION = 0
48
+ # The motor is in constant speed mode, which is controlled by parameter 0x2e, and the highest bit 15 is
49
+ # the direction bit
50
+ VELOCITY = 1
51
+ # PWM open-loop speed regulation mode, with parameter 0x2c running time parameter control, bit11 as
52
+ # direction bit
53
+ PWM = 2
54
+ # In step servo mode, the number of step progress is represented by parameter 0x2a, and the highest bit 15
55
+ # is the direction bit
56
+ STEP = 3
57
+
58
+
59
+ class DriveMode(Enum):
60
+ NON_INVERTED = 0
61
+ INVERTED = 1
62
+
63
+
64
+ class TorqueMode(Enum):
65
+ ENABLED = 1
66
+ DISABLED = 0
67
+
68
+
69
+ def _split_into_byte_chunks(value: int, length: int) -> list[int]:
70
+ import scservo_sdk as scs
71
+
72
+ if length == 1:
73
+ data = [value]
74
+ elif length == 2:
75
+ data = [scs.SCS_LOBYTE(value), scs.SCS_HIBYTE(value)]
76
+ elif length == 4:
77
+ data = [
78
+ scs.SCS_LOBYTE(scs.SCS_LOWORD(value)),
79
+ scs.SCS_HIBYTE(scs.SCS_LOWORD(value)),
80
+ scs.SCS_LOBYTE(scs.SCS_HIWORD(value)),
81
+ scs.SCS_HIBYTE(scs.SCS_HIWORD(value)),
82
+ ]
83
+ return data
84
+
85
+
86
+ def patch_setPacketTimeout(self, packet_length): # noqa: N802
87
+ """
88
+ HACK: This patches the PortHandler behavior to set the correct packet timeouts.
89
+
90
+ It fixes https://gitee.com/ftservo/SCServoSDK/issues/IBY2S6
91
+ The bug is fixed on the official Feetech SDK repo (https://gitee.com/ftservo/FTServo_Python)
92
+ but because that version is not published on PyPI, we rely on the (unofficial) on that is, which needs
93
+ patching.
94
+ """
95
+ self.packet_start_time = self.getCurrentTime()
96
+ self.packet_timeout = (self.tx_time_per_byte * packet_length) + (self.tx_time_per_byte * 3.0) + 50
97
+
98
+
99
+ class FeetechMotorsBus(MotorsBus):
100
+ """
101
+ The FeetechMotorsBus class allows to efficiently read and write to the attached motors. It relies on the
102
+ python feetech sdk to communicate with the motors, which is itself based on the dynamixel sdk.
103
+ """
104
+
105
+ apply_drive_mode = True
106
+ available_baudrates = deepcopy(SCAN_BAUDRATES)
107
+ default_baudrate = DEFAULT_BAUDRATE
108
+ default_timeout = DEFAULT_TIMEOUT_MS
109
+ model_baudrate_table = deepcopy(MODEL_BAUDRATE_TABLE)
110
+ model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
111
+ model_encoding_table = deepcopy(MODEL_ENCODING_TABLE)
112
+ model_number_table = deepcopy(MODEL_NUMBER_TABLE)
113
+ model_resolution_table = deepcopy(MODEL_RESOLUTION)
114
+ normalized_data = deepcopy(NORMALIZED_DATA)
115
+
116
+ def __init__(
117
+ self,
118
+ port: str,
119
+ motors: dict[str, Motor],
120
+ calibration: dict[str, MotorCalibration] | None = None,
121
+ protocol_version: int = DEFAULT_PROTOCOL_VERSION,
122
+ ):
123
+ super().__init__(port, motors, calibration)
124
+ self.protocol_version = protocol_version
125
+ self._assert_same_protocol()
126
+ import scservo_sdk as scs
127
+
128
+ self.port_handler = scs.PortHandler(self.port)
129
+ # HACK: monkeypatch
130
+ self.port_handler.setPacketTimeout = patch_setPacketTimeout.__get__(
131
+ self.port_handler, scs.PortHandler
132
+ )
133
+ self.packet_handler = scs.PacketHandler(protocol_version)
134
+ self.sync_reader = scs.GroupSyncRead(self.port_handler, self.packet_handler, 0, 0)
135
+ self.sync_writer = scs.GroupSyncWrite(self.port_handler, self.packet_handler, 0, 0)
136
+ self._comm_success = scs.COMM_SUCCESS
137
+ self._no_error = 0x00
138
+
139
+ if any(MODEL_PROTOCOL[model] != self.protocol_version for model in self.models):
140
+ raise ValueError(f"Some motors are incompatible with protocol_version={self.protocol_version}")
141
+
142
+ def _assert_same_protocol(self) -> None:
143
+ if any(MODEL_PROTOCOL[model] != self.protocol_version for model in self.models):
144
+ raise RuntimeError("Some motors use an incompatible protocol.")
145
+
146
+ def _assert_protocol_is_compatible(self, instruction_name: str) -> None:
147
+ if instruction_name == "sync_read" and self.protocol_version == 1:
148
+ raise NotImplementedError(
149
+ "'Sync Read' is not available with Feetech motors using Protocol 1. Use 'Read' sequentially instead."
150
+ )
151
+ if instruction_name == "broadcast_ping" and self.protocol_version == 1:
152
+ raise NotImplementedError(
153
+ "'Broadcast Ping' is not available with Feetech motors using Protocol 1. Use 'Ping' sequentially instead."
154
+ )
155
+
156
+ def _assert_same_firmware(self) -> None:
157
+ firmware_versions = self._read_firmware_version(self.ids, raise_on_error=True)
158
+ if len(set(firmware_versions.values())) != 1:
159
+ raise RuntimeError(
160
+ "Some Motors use different firmware versions:"
161
+ f"\n{pformat(firmware_versions)}\n"
162
+ "Update their firmware first using Feetech's software. "
163
+ "Visit https://www.feetechrc.com/software."
164
+ )
165
+
166
+ def _handshake(self) -> None:
167
+ self._assert_motors_exist()
168
+ self._assert_same_firmware()
169
+
170
+ def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]:
171
+ if self.protocol_version == 0:
172
+ return self._find_single_motor_p0(motor, initial_baudrate)
173
+ else:
174
+ return self._find_single_motor_p1(motor, initial_baudrate)
175
+
176
+ def _find_single_motor_p0(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]:
177
+ model = self.motors[motor].model
178
+ search_baudrates = (
179
+ [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model]
180
+ )
181
+ expected_model_nb = self.model_number_table[model]
182
+
183
+ for baudrate in search_baudrates:
184
+ self.set_baudrate(baudrate)
185
+ id_model = self.broadcast_ping()
186
+ if id_model:
187
+ found_id, found_model = next(iter(id_model.items()))
188
+ if found_model != expected_model_nb:
189
+ raise RuntimeError(
190
+ f"Found one motor on {baudrate=} with id={found_id} but it has a "
191
+ f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. "
192
+ f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')."
193
+ )
194
+ return baudrate, found_id
195
+
196
+ raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.")
197
+
198
+ def _find_single_motor_p1(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]:
199
+ import scservo_sdk as scs
200
+
201
+ model = self.motors[motor].model
202
+ search_baudrates = (
203
+ [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model]
204
+ )
205
+ expected_model_nb = self.model_number_table[model]
206
+
207
+ for baudrate in search_baudrates:
208
+ self.set_baudrate(baudrate)
209
+ for id_ in range(scs.MAX_ID + 1):
210
+ found_model = self.ping(id_)
211
+ if found_model is not None:
212
+ if found_model != expected_model_nb:
213
+ raise RuntimeError(
214
+ f"Found one motor on {baudrate=} with id={id_} but it has a "
215
+ f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. "
216
+ f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')."
217
+ )
218
+ return baudrate, id_
219
+
220
+ raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.")
221
+
222
+ def configure_motors(self, return_delay_time=0, maximum_acceleration=254, acceleration=254) -> None:
223
+ for motor in self.motors:
224
+ # By default, Feetech motors have a 500µs delay response time (corresponding to a value of 250 on
225
+ # the 'Return_Delay_Time' address). We ensure this is reduced to the minimum of 2µs (value of 0).
226
+ self.write("Return_Delay_Time", motor, return_delay_time)
227
+ # Set 'Maximum_Acceleration' to 254 to speedup acceleration and deceleration of the motors.
228
+ if self.protocol_version == 0:
229
+ self.write("Maximum_Acceleration", motor, maximum_acceleration)
230
+ self.write("Acceleration", motor, acceleration)
231
+
232
+ @property
233
+ def is_calibrated(self) -> bool:
234
+ motors_calibration = self.read_calibration()
235
+ if set(motors_calibration) != set(self.calibration):
236
+ return False
237
+
238
+ same_ranges = all(
239
+ self.calibration[motor].range_min == cal.range_min
240
+ and self.calibration[motor].range_max == cal.range_max
241
+ for motor, cal in motors_calibration.items()
242
+ )
243
+ if self.protocol_version == 1:
244
+ return same_ranges
245
+
246
+ same_offsets = all(
247
+ self.calibration[motor].homing_offset == cal.homing_offset
248
+ for motor, cal in motors_calibration.items()
249
+ )
250
+ return same_ranges and same_offsets
251
+
252
+ def read_calibration(self) -> dict[str, MotorCalibration]:
253
+ offsets, mins, maxes = {}, {}, {}
254
+ for motor in self.motors:
255
+ mins[motor] = self.read("Min_Position_Limit", motor, normalize=False)
256
+ maxes[motor] = self.read("Max_Position_Limit", motor, normalize=False)
257
+ offsets[motor] = (
258
+ self.read("Homing_Offset", motor, normalize=False) if self.protocol_version == 0 else 0
259
+ )
260
+
261
+ calibration = {}
262
+ for motor, m in self.motors.items():
263
+ calibration[motor] = MotorCalibration(
264
+ id=m.id,
265
+ drive_mode=0,
266
+ homing_offset=offsets[motor],
267
+ range_min=mins[motor],
268
+ range_max=maxes[motor],
269
+ )
270
+
271
+ return calibration
272
+
273
+ def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None:
274
+ for motor, calibration in calibration_dict.items():
275
+ if self.protocol_version == 0:
276
+ self.write("Homing_Offset", motor, calibration.homing_offset)
277
+ self.write("Min_Position_Limit", motor, calibration.range_min)
278
+ self.write("Max_Position_Limit", motor, calibration.range_max)
279
+
280
+ if cache:
281
+ self.calibration = calibration_dict
282
+
283
+ def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]:
284
+ """
285
+ On Feetech Motors:
286
+ Present_Position = Actual_Position - Homing_Offset
287
+ """
288
+ half_turn_homings = {}
289
+ for motor, pos in positions.items():
290
+ model = self._get_motor_model(motor)
291
+ max_res = self.model_resolution_table[model] - 1
292
+ half_turn_homings[motor] = pos - int(max_res / 2)
293
+
294
+ return half_turn_homings
295
+
296
+ def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
297
+ for motor in self._get_motors_list(motors):
298
+ self.write("Torque_Enable", motor, TorqueMode.DISABLED.value, num_retry=num_retry)
299
+ self.write("Lock", motor, 0, num_retry=num_retry)
300
+
301
+ def _disable_torque(self, motor_id: int, model: str, num_retry: int = 0) -> None:
302
+ addr, length = get_address(self.model_ctrl_table, model, "Torque_Enable")
303
+ self._write(addr, length, motor_id, TorqueMode.DISABLED.value, num_retry=num_retry)
304
+ addr, length = get_address(self.model_ctrl_table, model, "Lock")
305
+ self._write(addr, length, motor_id, 0, num_retry=num_retry)
306
+
307
+ def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None:
308
+ for motor in self._get_motors_list(motors):
309
+ self.write("Torque_Enable", motor, TorqueMode.ENABLED.value, num_retry=num_retry)
310
+ self.write("Lock", motor, 1, num_retry=num_retry)
311
+
312
+ def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
313
+ for id_ in ids_values:
314
+ model = self._id_to_model(id_)
315
+ encoding_table = self.model_encoding_table.get(model)
316
+ if encoding_table and data_name in encoding_table:
317
+ sign_bit = encoding_table[data_name]
318
+ ids_values[id_] = encode_sign_magnitude(ids_values[id_], sign_bit)
319
+
320
+ return ids_values
321
+
322
+ def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]:
323
+ for id_ in ids_values:
324
+ model = self._id_to_model(id_)
325
+ encoding_table = self.model_encoding_table.get(model)
326
+ if encoding_table and data_name in encoding_table:
327
+ sign_bit = encoding_table[data_name]
328
+ ids_values[id_] = decode_sign_magnitude(ids_values[id_], sign_bit)
329
+
330
+ return ids_values
331
+
332
+ def _split_into_byte_chunks(self, value: int, length: int) -> list[int]:
333
+ return _split_into_byte_chunks(value, length)
334
+
335
+ def _broadcast_ping(self) -> tuple[dict[int, int], int]:
336
+ import scservo_sdk as scs
337
+
338
+ data_list = {}
339
+
340
+ status_length = 6
341
+
342
+ rx_length = 0
343
+ wait_length = status_length * scs.MAX_ID
344
+
345
+ txpacket = [0] * 6
346
+
347
+ tx_time_per_byte = (1000.0 / self.port_handler.getBaudRate()) * 10.0
348
+
349
+ txpacket[scs.PKT_ID] = scs.BROADCAST_ID
350
+ txpacket[scs.PKT_LENGTH] = 2
351
+ txpacket[scs.PKT_INSTRUCTION] = scs.INST_PING
352
+
353
+ result = self.packet_handler.txPacket(self.port_handler, txpacket)
354
+ if result != scs.COMM_SUCCESS:
355
+ self.port_handler.is_using = False
356
+ return data_list, result
357
+
358
+ # set rx timeout
359
+ self.port_handler.setPacketTimeoutMillis((wait_length * tx_time_per_byte) + (3.0 * scs.MAX_ID) + 16.0)
360
+
361
+ rxpacket = []
362
+ while not self.port_handler.isPacketTimeout() and rx_length < wait_length:
363
+ rxpacket += self.port_handler.readPort(wait_length - rx_length)
364
+ rx_length = len(rxpacket)
365
+
366
+ self.port_handler.is_using = False
367
+
368
+ if rx_length == 0:
369
+ return data_list, scs.COMM_RX_TIMEOUT
370
+
371
+ while True:
372
+ if rx_length < status_length:
373
+ return data_list, scs.COMM_RX_CORRUPT
374
+
375
+ # find packet header
376
+ for idx in range(0, (rx_length - 1)):
377
+ if (rxpacket[idx] == 0xFF) and (rxpacket[idx + 1] == 0xFF):
378
+ break
379
+
380
+ if idx == 0: # found at the beginning of the packet
381
+ # calculate checksum
382
+ checksum = 0
383
+ for idx in range(2, status_length - 1): # except header & checksum
384
+ checksum += rxpacket[idx]
385
+
386
+ checksum = ~checksum & 0xFF
387
+ if rxpacket[status_length - 1] == checksum:
388
+ result = scs.COMM_SUCCESS
389
+ data_list[rxpacket[scs.PKT_ID]] = rxpacket[scs.PKT_ERROR]
390
+
391
+ del rxpacket[0:status_length]
392
+ rx_length = rx_length - status_length
393
+
394
+ if rx_length == 0:
395
+ return data_list, result
396
+ else:
397
+ result = scs.COMM_RX_CORRUPT
398
+ # remove header (0xFF 0xFF)
399
+ del rxpacket[0:2]
400
+ rx_length = rx_length - 2
401
+ else:
402
+ # remove unnecessary packets
403
+ del rxpacket[0:idx]
404
+ rx_length = rx_length - idx
405
+
406
+ def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None:
407
+ self._assert_protocol_is_compatible("broadcast_ping")
408
+ for n_try in range(1 + num_retry):
409
+ ids_status, comm = self._broadcast_ping()
410
+ if self._is_comm_success(comm):
411
+ break
412
+ logger.debug(f"Broadcast ping failed on port '{self.port}' ({n_try=})")
413
+ logger.debug(self.packet_handler.getTxRxResult(comm))
414
+
415
+ if not self._is_comm_success(comm):
416
+ if raise_on_error:
417
+ raise ConnectionError(self.packet_handler.getTxRxResult(comm))
418
+ return
419
+
420
+ ids_errors = {id_: status for id_, status in ids_status.items() if self._is_error(status)}
421
+ if ids_errors:
422
+ display_dict = {id_: self.packet_handler.getRxPacketError(err) for id_, err in ids_errors.items()}
423
+ logger.error(f"Some motors found returned an error status:\n{pformat(display_dict, indent=4)}")
424
+
425
+ return self._read_model_number(list(ids_status), raise_on_error)
426
+
427
+ def _read_firmware_version(self, motor_ids: list[int], raise_on_error: bool = False) -> dict[int, str]:
428
+ firmware_versions = {}
429
+ for id_ in motor_ids:
430
+ firm_ver_major, comm, error = self._read(
431
+ *FIRMWARE_MAJOR_VERSION, id_, raise_on_error=raise_on_error
432
+ )
433
+ if not self._is_comm_success(comm) or self._is_error(error):
434
+ continue
435
+
436
+ firm_ver_minor, comm, error = self._read(
437
+ *FIRMWARE_MINOR_VERSION, id_, raise_on_error=raise_on_error
438
+ )
439
+ if not self._is_comm_success(comm) or self._is_error(error):
440
+ continue
441
+
442
+ firmware_versions[id_] = f"{firm_ver_major}.{firm_ver_minor}"
443
+
444
+ return firmware_versions
445
+
446
+ def _read_model_number(self, motor_ids: list[int], raise_on_error: bool = False) -> dict[int, int]:
447
+ model_numbers = {}
448
+ for id_ in motor_ids:
449
+ model_nb, comm, error = self._read(*MODEL_NUMBER, id_, raise_on_error=raise_on_error)
450
+ if not self._is_comm_success(comm) or self._is_error(error):
451
+ continue
452
+
453
+ model_numbers[id_] = model_nb
454
+
455
+ return model_numbers
lerobot/src/lerobot/motors/feetech/tables.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ FIRMWARE_MAJOR_VERSION = (0, 1)
16
+ FIRMWARE_MINOR_VERSION = (1, 1)
17
+ MODEL_NUMBER = (3, 2)
18
+
19
+ # TODO(Steven): Consider doing the following:
20
+ # from enum import Enum
21
+ # class MyControlTableKey(Enum):
22
+ # ID = "ID"
23
+ # GOAL_SPEED = "Goal_Speed"
24
+ # ...
25
+ #
26
+ # MY_CONTROL_TABLE ={
27
+ # MyControlTableKey.ID.value: (5,1)
28
+ # MyControlTableKey.GOAL_SPEED.value: (46, 2)
29
+ # ...
30
+ # }
31
+ # This allows me do to:
32
+ # bus.write(MyControlTableKey.GOAL_SPEED, ...)
33
+ # Instead of:
34
+ # bus.write("Goal_Speed", ...)
35
+ # This is important for two reasons:
36
+ # 1. The linter will tell me if I'm trying to use an invalid key, instead of me realizing when I get the RunTimeError
37
+ # 2. We can change the value of the MyControlTableKey enums without impacting the client code
38
+
39
+ # data_name: (address, size_byte)
40
+ # http://doc.feetech.cn/#/prodinfodownload?srcType=FT-SMS-STS-emanual-229f4476422d4059abfb1cb0
41
+ STS_SMS_SERIES_CONTROL_TABLE = {
42
+ # EPROM
43
+ "Firmware_Major_Version": FIRMWARE_MAJOR_VERSION, # read-only
44
+ "Firmware_Minor_Version": FIRMWARE_MINOR_VERSION, # read-only
45
+ "Model_Number": MODEL_NUMBER, # read-only
46
+ "ID": (5, 1),
47
+ "Baud_Rate": (6, 1),
48
+ "Return_Delay_Time": (7, 1),
49
+ "Response_Status_Level": (8, 1),
50
+ "Min_Position_Limit": (9, 2),
51
+ "Max_Position_Limit": (11, 2),
52
+ "Max_Temperature_Limit": (13, 1),
53
+ "Max_Voltage_Limit": (14, 1),
54
+ "Min_Voltage_Limit": (15, 1),
55
+ "Max_Torque_Limit": (16, 2),
56
+ "Phase": (18, 1),
57
+ "Unloading_Condition": (19, 1),
58
+ "LED_Alarm_Condition": (20, 1),
59
+ "P_Coefficient": (21, 1),
60
+ "D_Coefficient": (22, 1),
61
+ "I_Coefficient": (23, 1),
62
+ "Minimum_Startup_Force": (24, 2),
63
+ "CW_Dead_Zone": (26, 1),
64
+ "CCW_Dead_Zone": (27, 1),
65
+ "Protection_Current": (28, 2),
66
+ "Angular_Resolution": (30, 1),
67
+ "Homing_Offset": (31, 2),
68
+ "Operating_Mode": (33, 1),
69
+ "Protective_Torque": (34, 1),
70
+ "Protection_Time": (35, 1),
71
+ "Overload_Torque": (36, 1),
72
+ "Velocity_closed_loop_P_proportional_coefficient": (37, 1),
73
+ "Over_Current_Protection_Time": (38, 1),
74
+ "Velocity_closed_loop_I_integral_coefficient": (39, 1),
75
+ # SRAM
76
+ "Torque_Enable": (40, 1),
77
+ "Acceleration": (41, 1),
78
+ "Goal_Position": (42, 2),
79
+ "Goal_Time": (44, 2),
80
+ "Goal_Velocity": (46, 2),
81
+ "Torque_Limit": (48, 2),
82
+ "Lock": (55, 1),
83
+ "Present_Position": (56, 2), # read-only
84
+ "Present_Velocity": (58, 2), # read-only
85
+ "Present_Load": (60, 2), # read-only
86
+ "Present_Voltage": (62, 1), # read-only
87
+ "Present_Temperature": (63, 1), # read-only
88
+ "Status": (65, 1), # read-only
89
+ "Moving": (66, 1), # read-only
90
+ "Present_Current": (69, 2), # read-only
91
+ "Goal_Position_2": (71, 2), # read-only
92
+ # Factory
93
+ "Moving_Velocity": (80, 1),
94
+ "Moving_Velocity_Threshold": (80, 1),
95
+ "DTs": (81, 1), # (ms)
96
+ "Velocity_Unit_factor": (82, 1),
97
+ "Hts": (83, 1), # (ns) valid for firmware >= 2.54, other versions keep 0
98
+ "Maximum_Velocity_Limit": (84, 1),
99
+ "Maximum_Acceleration": (85, 1),
100
+ "Acceleration_Multiplier ": (86, 1), # Acceleration multiplier in effect when acceleration is 0
101
+ }
102
+
103
+ # http://doc.feetech.cn/#/prodinfodownload?srcType=FT-SCSCL-emanual-cbcc8ab2e3384282a01d4bf3
104
+ SCS_SERIES_CONTROL_TABLE = {
105
+ # EPROM
106
+ "Firmware_Major_Version": FIRMWARE_MAJOR_VERSION, # read-only
107
+ "Firmware_Minor_Version": FIRMWARE_MINOR_VERSION, # read-only
108
+ "Model_Number": MODEL_NUMBER, # read-only
109
+ "ID": (5, 1),
110
+ "Baud_Rate": (6, 1),
111
+ "Return_Delay_Time": (7, 1),
112
+ "Response_Status_Level": (8, 1),
113
+ "Min_Position_Limit": (9, 2),
114
+ "Max_Position_Limit": (11, 2),
115
+ "Max_Temperature_Limit": (13, 1),
116
+ "Max_Voltage_Limit": (14, 1),
117
+ "Min_Voltage_Limit": (15, 1),
118
+ "Max_Torque_Limit": (16, 2),
119
+ "Phase": (18, 1),
120
+ "Unloading_Condition": (19, 1),
121
+ "LED_Alarm_Condition": (20, 1),
122
+ "P_Coefficient": (21, 1),
123
+ "D_Coefficient": (22, 1),
124
+ "I_Coefficient": (23, 1),
125
+ "Minimum_Startup_Force": (24, 2),
126
+ "CW_Dead_Zone": (26, 1),
127
+ "CCW_Dead_Zone": (27, 1),
128
+ "Protective_Torque": (37, 1),
129
+ "Protection_Time": (38, 1),
130
+ # SRAM
131
+ "Torque_Enable": (40, 1),
132
+ "Acceleration": (41, 1),
133
+ "Goal_Position": (42, 2),
134
+ "Running_Time": (44, 2),
135
+ "Goal_Velocity": (46, 2),
136
+ "Lock": (48, 1),
137
+ "Present_Position": (56, 2), # read-only
138
+ "Present_Velocity": (58, 2), # read-only
139
+ "Present_Load": (60, 2), # read-only
140
+ "Present_Voltage": (62, 1), # read-only
141
+ "Present_Temperature": (63, 1), # read-only
142
+ "Sync_Write_Flag": (64, 1), # read-only
143
+ "Status": (65, 1), # read-only
144
+ "Moving": (66, 1), # read-only
145
+ # Factory
146
+ "PWM_Maximum_Step": (78, 1),
147
+ "Moving_Velocity_Threshold*50": (79, 1),
148
+ "DTs": (80, 1), # (ms)
149
+ "Minimum_Velocity_Limit*50": (81, 1),
150
+ "Maximum_Velocity_Limit*50": (82, 1),
151
+ "Acceleration_2": (83, 1), # don't know what that is
152
+ }
153
+
154
+ STS_SMS_SERIES_BAUDRATE_TABLE = {
155
+ 1_000_000: 0,
156
+ 500_000: 1,
157
+ 250_000: 2,
158
+ 128_000: 3,
159
+ 115_200: 4,
160
+ 57_600: 5,
161
+ 38_400: 6,
162
+ 19_200: 7,
163
+ }
164
+
165
+ SCS_SERIES_BAUDRATE_TABLE = {
166
+ 1_000_000: 0,
167
+ 500_000: 1,
168
+ 250_000: 2,
169
+ 128_000: 3,
170
+ 115_200: 4,
171
+ 57_600: 5,
172
+ 38_400: 6,
173
+ 19_200: 7,
174
+ }
175
+
176
+ MODEL_CONTROL_TABLE = {
177
+ "sts_series": STS_SMS_SERIES_CONTROL_TABLE,
178
+ "scs_series": SCS_SERIES_CONTROL_TABLE,
179
+ "sms_series": STS_SMS_SERIES_CONTROL_TABLE,
180
+ "sts3215": STS_SMS_SERIES_CONTROL_TABLE,
181
+ "sts3250": STS_SMS_SERIES_CONTROL_TABLE,
182
+ "scs0009": SCS_SERIES_CONTROL_TABLE,
183
+ "sm8512bl": STS_SMS_SERIES_CONTROL_TABLE,
184
+ }
185
+
186
+ MODEL_RESOLUTION = {
187
+ "sts_series": 4096,
188
+ "sms_series": 4096,
189
+ "scs_series": 1024,
190
+ "sts3215": 4096,
191
+ "sts3250": 4096,
192
+ "sm8512bl": 4096,
193
+ "scs0009": 1024,
194
+ }
195
+
196
+ MODEL_BAUDRATE_TABLE = {
197
+ "sts_series": STS_SMS_SERIES_BAUDRATE_TABLE,
198
+ "sms_series": STS_SMS_SERIES_BAUDRATE_TABLE,
199
+ "scs_series": SCS_SERIES_BAUDRATE_TABLE,
200
+ "sm8512bl": STS_SMS_SERIES_BAUDRATE_TABLE,
201
+ "sts3215": STS_SMS_SERIES_BAUDRATE_TABLE,
202
+ "sts3250": STS_SMS_SERIES_BAUDRATE_TABLE,
203
+ "scs0009": SCS_SERIES_BAUDRATE_TABLE,
204
+ }
205
+
206
+ # Sign-Magnitude encoding bits
207
+ STS_SMS_SERIES_ENCODINGS_TABLE = {
208
+ "Homing_Offset": 11,
209
+ "Goal_Position": 15,
210
+ "Goal_Velocity": 15,
211
+ "Goal_Speed": 15,
212
+ "Present_Position": 15,
213
+ "Present_Velocity": 15,
214
+ "Present_Speed": 15,
215
+ }
216
+
217
+ MODEL_ENCODING_TABLE = {
218
+ "sts_series": STS_SMS_SERIES_ENCODINGS_TABLE,
219
+ "sms_series": STS_SMS_SERIES_ENCODINGS_TABLE,
220
+ "scs_series": {},
221
+ "sts3215": STS_SMS_SERIES_ENCODINGS_TABLE,
222
+ "sts3250": STS_SMS_SERIES_ENCODINGS_TABLE,
223
+ "sm8512bl": STS_SMS_SERIES_ENCODINGS_TABLE,
224
+ "scs0009": {},
225
+ }
226
+
227
+ SCAN_BAUDRATES = [
228
+ 4_800,
229
+ 9_600,
230
+ 14_400,
231
+ 19_200,
232
+ 38_400,
233
+ 57_600,
234
+ 115_200,
235
+ 128_000,
236
+ 250_000,
237
+ 500_000,
238
+ 1_000_000,
239
+ ]
240
+
241
+ MODEL_NUMBER_TABLE = {
242
+ "sts3215": 777,
243
+ "sts3250": 2825,
244
+ "sm8512bl": 11272,
245
+ "scs0009": 1284,
246
+ }
247
+
248
+ MODEL_PROTOCOL = {
249
+ "sts_series": 0,
250
+ "sms_series": 0,
251
+ "scs_series": 1,
252
+ "sts3215": 0,
253
+ "sts3250": 0,
254
+ "sm8512bl": 0,
255
+ "scs0009": 1,
256
+ }
lerobot/src/lerobot/policies/act/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../../../docs/source/policy_act_README.md
lerobot/src/lerobot/policies/act/configuration_act.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from dataclasses import dataclass, field
17
+
18
+ from lerobot.configs.policies import PreTrainedConfig
19
+ from lerobot.configs.types import NormalizationMode
20
+ from lerobot.optim.optimizers import AdamWConfig
21
+
22
+
23
+ @PreTrainedConfig.register_subclass("act")
24
+ @dataclass
25
+ class ACTConfig(PreTrainedConfig):
26
+ """Configuration class for the Action Chunking Transformers policy.
27
+
28
+ Defaults are configured for training on bimanual Aloha tasks like "insertion" or "transfer".
29
+
30
+ The parameters you will most likely need to change are the ones which depend on the environment / sensors.
31
+ Those are: `input_shapes` and 'output_shapes`.
32
+
33
+ Notes on the inputs and outputs:
34
+ - Either:
35
+ - At least one key starting with "observation.image is required as an input.
36
+ AND/OR
37
+ - The key "observation.environment_state" is required as input.
38
+ - If there are multiple keys beginning with "observation.images." they are treated as multiple camera
39
+ views. Right now we only support all images having the same shape.
40
+ - May optionally work without an "observation.state" key for the proprioceptive robot state.
41
+ - "action" is required as an output key.
42
+
43
+ Args:
44
+ n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the
45
+ current step and additional steps going back).
46
+ chunk_size: The size of the action prediction "chunks" in units of environment steps.
47
+ n_action_steps: The number of action steps to run in the environment for one invocation of the policy.
48
+ This should be no greater than the chunk size. For example, if the chunk size size 100, you may
49
+ set this to 50. This would mean that the model predicts 100 steps worth of actions, runs 50 in the
50
+ environment, and throws the other 50 out.
51
+ input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents
52
+ the input data name, and the value is a list indicating the dimensions of the corresponding data.
53
+ For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96],
54
+ indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't
55
+ include batch dimension or temporal dimension.
56
+ output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents
57
+ the output data name, and the value is a list indicating the dimensions of the corresponding data.
58
+ For example, "action" refers to an output shape of [14], indicating 14-dimensional actions.
59
+ Importantly, `output_shapes` doesn't include batch dimension or temporal dimension.
60
+ input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"),
61
+ and the value specifies the normalization mode to apply. The two available modes are "mean_std"
62
+ which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a
63
+ [-1, 1] range.
64
+ output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the
65
+ original scale. Note that this is also used for normalizing the training targets.
66
+ vision_backbone: Name of the torchvision resnet backbone to use for encoding images.
67
+ pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
68
+ `None` means no pretrained weights.
69
+ replace_final_stride_with_dilation: Whether to replace the ResNet's final 2x2 stride with a dilated
70
+ convolution.
71
+ pre_norm: Whether to use "pre-norm" in the transformer blocks.
72
+ dim_model: The transformer blocks' main hidden dimension.
73
+ n_heads: The number of heads to use in the transformer blocks' multi-head attention.
74
+ dim_feedforward: The dimension to expand the transformer's hidden dimension to in the feed-forward
75
+ layers.
76
+ feedforward_activation: The activation to use in the transformer block's feed-forward layers.
77
+ n_encoder_layers: The number of transformer layers to use for the transformer encoder.
78
+ n_decoder_layers: The number of transformer layers to use for the transformer decoder.
79
+ use_vae: Whether to use a variational objective during training. This introduces another transformer
80
+ which is used as the VAE's encoder (not to be confused with the transformer encoder - see
81
+ documentation in the policy class).
82
+ latent_dim: The VAE's latent dimension.
83
+ n_vae_encoder_layers: The number of transformer layers to use for the VAE's encoder.
84
+ temporal_ensemble_coeff: Coefficient for the exponential weighting scheme to apply for temporal
85
+ ensembling. Defaults to None which means temporal ensembling is not used. `n_action_steps` must be
86
+ 1 when using this feature, as inference needs to happen at every step to form an ensemble. For
87
+ more information on how ensembling works, please see `ACTTemporalEnsembler`.
88
+ dropout: Dropout to use in the transformer layers (see code for details).
89
+ kl_weight: The weight to use for the KL-divergence component of the loss if the variational objective
90
+ is enabled. Loss is then calculated as: `reconstruction_loss + kl_weight * kld_loss`.
91
+ """
92
+
93
+ # Input / output structure.
94
+ n_obs_steps: int = 1
95
+ chunk_size: int = 100
96
+ n_action_steps: int = 100
97
+
98
+ normalization_mapping: dict[str, NormalizationMode] = field(
99
+ default_factory=lambda: {
100
+ "VISUAL": NormalizationMode.MEAN_STD,
101
+ "STATE": NormalizationMode.MEAN_STD,
102
+ "ACTION": NormalizationMode.MEAN_STD,
103
+ }
104
+ )
105
+
106
+ # Architecture.
107
+ # Vision backbone.
108
+ vision_backbone: str = "resnet18"
109
+ pretrained_backbone_weights: str | None = "ResNet18_Weights.IMAGENET1K_V1"
110
+ replace_final_stride_with_dilation: int = False
111
+ # Transformer layers.
112
+ pre_norm: bool = False
113
+ dim_model: int = 512
114
+ n_heads: int = 8
115
+ dim_feedforward: int = 3200
116
+ feedforward_activation: str = "relu"
117
+ n_encoder_layers: int = 4
118
+ # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
119
+ # that means only the first layer is used. Here we match the original implementation by setting this to 1.
120
+ # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
121
+ n_decoder_layers: int = 1
122
+ # VAE.
123
+ use_vae: bool = True
124
+ latent_dim: int = 32
125
+ n_vae_encoder_layers: int = 4
126
+
127
+ # Inference.
128
+ # Note: the value used in ACT when temporal ensembling is enabled is 0.01.
129
+ temporal_ensemble_coeff: float | None = None
130
+
131
+ # Training and loss computation.
132
+ dropout: float = 0.1
133
+ kl_weight: float = 10.0
134
+
135
+ # Training preset
136
+ optimizer_lr: float = 1e-5
137
+ optimizer_weight_decay: float = 1e-4
138
+ optimizer_lr_backbone: float = 1e-5
139
+
140
+ def __post_init__(self):
141
+ super().__post_init__()
142
+
143
+ """Input validation (not exhaustive)."""
144
+ if not self.vision_backbone.startswith("resnet"):
145
+ raise ValueError(
146
+ f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}."
147
+ )
148
+ if self.temporal_ensemble_coeff is not None and self.n_action_steps > 1:
149
+ raise NotImplementedError(
150
+ "`n_action_steps` must be 1 when using temporal ensembling. This is "
151
+ "because the policy needs to be queried every step to compute the ensembled action."
152
+ )
153
+ if self.n_action_steps > self.chunk_size:
154
+ raise ValueError(
155
+ f"The chunk size is the upper bound for the number of action steps per model invocation. Got "
156
+ f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`."
157
+ )
158
+ if self.n_obs_steps != 1:
159
+ raise ValueError(
160
+ f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`"
161
+ )
162
+
163
+ def get_optimizer_preset(self) -> AdamWConfig:
164
+ return AdamWConfig(
165
+ lr=self.optimizer_lr,
166
+ weight_decay=self.optimizer_weight_decay,
167
+ )
168
+
169
+ def get_scheduler_preset(self) -> None:
170
+ return None
171
+
172
+ def validate_features(self) -> None:
173
+ if not self.image_features and not self.env_state_feature:
174
+ raise ValueError("You must provide at least one image or the environment state among the inputs.")
175
+
176
+ @property
177
+ def observation_delta_indices(self) -> None:
178
+ return None
179
+
180
+ @property
181
+ def action_delta_indices(self) -> list:
182
+ return list(range(self.chunk_size))
183
+
184
+ @property
185
+ def reward_delta_indices(self) -> None:
186
+ return None
lerobot/src/lerobot/policies/act/modeling_act.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Action Chunking Transformer Policy
17
+
18
+ As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://huggingface.co/papers/2304.13705).
19
+ The majority of changes here involve removing unused code, unifying naming, and adding helpful comments.
20
+ """
21
+
22
+ import math
23
+ from collections import deque
24
+ from collections.abc import Callable
25
+ from itertools import chain
26
+
27
+ import einops
28
+ import numpy as np
29
+ import torch
30
+ import torch.nn.functional as F # noqa: N812
31
+ import torchvision
32
+ from torch import Tensor, nn
33
+ from torchvision.models._utils import IntermediateLayerGetter
34
+ from torchvision.ops.misc import FrozenBatchNorm2d
35
+
36
+ from lerobot.policies.act.configuration_act import ACTConfig
37
+ from lerobot.policies.pretrained import PreTrainedPolicy
38
+ from lerobot.utils.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE
39
+
40
+
41
+ class ACTPolicy(PreTrainedPolicy):
42
+ """
43
+ Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost
44
+ Hardware (paper: https://huggingface.co/papers/2304.13705, code: https://github.com/tonyzhaozh/act)
45
+ """
46
+
47
+ config_class = ACTConfig
48
+ name = "act"
49
+
50
+ def __init__(
51
+ self,
52
+ config: ACTConfig,
53
+ **kwargs,
54
+ ):
55
+ """
56
+ Args:
57
+ config: Policy configuration class instance or None, in which case the default instantiation of
58
+ the configuration class is used.
59
+ """
60
+ super().__init__(config)
61
+ config.validate_features()
62
+ self.config = config
63
+
64
+ self.model = ACT(config)
65
+
66
+ if config.temporal_ensemble_coeff is not None:
67
+ self.temporal_ensembler = ACTTemporalEnsembler(config.temporal_ensemble_coeff, config.chunk_size)
68
+
69
+ self.reset()
70
+
71
+ def get_optim_params(self) -> dict:
72
+ # TODO(aliberts, rcadene): As of now, lr_backbone == lr
73
+ # Should we remove this and just `return self.parameters()`?
74
+ return [
75
+ {
76
+ "params": [
77
+ p
78
+ for n, p in self.named_parameters()
79
+ if not n.startswith("model.backbone") and p.requires_grad
80
+ ]
81
+ },
82
+ {
83
+ "params": [
84
+ p
85
+ for n, p in self.named_parameters()
86
+ if n.startswith("model.backbone") and p.requires_grad
87
+ ],
88
+ "lr": self.config.optimizer_lr_backbone,
89
+ },
90
+ ]
91
+
92
+ def reset(self):
93
+ """This should be called whenever the environment is reset."""
94
+ if self.config.temporal_ensemble_coeff is not None:
95
+ self.temporal_ensembler.reset()
96
+ else:
97
+ self._action_queue = deque([], maxlen=self.config.n_action_steps)
98
+
99
+ @torch.no_grad()
100
+ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
101
+ """Select a single action given environment observations.
102
+
103
+ This method wraps `select_actions` in order to return one action at a time for execution in the
104
+ environment. It works by managing the actions in a queue and only calling `select_actions` when the
105
+ queue is empty.
106
+ """
107
+ self.eval() # keeping the policy in eval mode as it could be set to train mode while queue is consumed
108
+
109
+ if self.config.temporal_ensemble_coeff is not None:
110
+ actions = self.predict_action_chunk(batch)
111
+ action = self.temporal_ensembler.update(actions)
112
+ return action
113
+
114
+ # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
115
+ # querying the policy.
116
+ if len(self._action_queue) == 0:
117
+ actions = self.predict_action_chunk(batch)[:, : self.config.n_action_steps]
118
+
119
+ # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue
120
+ # effectively has shape (n_action_steps, batch_size, *), hence the transpose.
121
+ self._action_queue.extend(actions.transpose(0, 1))
122
+ return self._action_queue.popleft()
123
+
124
+ @torch.no_grad()
125
+ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
126
+ """Predict a chunk of actions given environment observations."""
127
+ self.eval()
128
+
129
+ if self.config.image_features:
130
+ batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
131
+ batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features]
132
+
133
+ actions = self.model(batch)[0]
134
+ return actions
135
+
136
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]:
137
+ """Run the batch through the model and compute the loss for training or validation."""
138
+ if self.config.image_features:
139
+ batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
140
+ batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features]
141
+
142
+ actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch)
143
+
144
+ l1_loss = (
145
+ F.l1_loss(batch[ACTION], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1)
146
+ ).mean()
147
+
148
+ loss_dict = {"l1_loss": l1_loss.item()}
149
+ if self.config.use_vae:
150
+ # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for
151
+ # each dimension independently, we sum over the latent dimension to get the total
152
+ # KL-divergence per batch element, then take the mean over the batch.
153
+ # (See App. B of https://huggingface.co/papers/1312.6114 for more details).
154
+ mean_kld = (
155
+ (-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - (log_sigma_x2_hat).exp())).sum(-1).mean()
156
+ )
157
+ loss_dict["kld_loss"] = mean_kld.item()
158
+ loss = l1_loss + mean_kld * self.config.kl_weight
159
+ else:
160
+ loss = l1_loss
161
+
162
+ return loss, loss_dict
163
+
164
+
165
+ class ACTTemporalEnsembler:
166
+ def __init__(self, temporal_ensemble_coeff: float, chunk_size: int) -> None:
167
+ """Temporal ensembling as described in Algorithm 2 of https://huggingface.co/papers/2304.13705.
168
+
169
+ The weights are calculated as wᵢ = exp(-temporal_ensemble_coeff * i) where w₀ is the oldest action.
170
+ They are then normalized to sum to 1 by dividing by Σwᵢ. Here's some intuition around how the
171
+ coefficient works:
172
+ - Setting it to 0 uniformly weighs all actions.
173
+ - Setting it positive gives more weight to older actions.
174
+ - Setting it negative gives more weight to newer actions.
175
+ NOTE: The default value for `temporal_ensemble_coeff` used by the original ACT work is 0.01. This
176
+ results in older actions being weighed more highly than newer actions (the experiments documented in
177
+ https://github.com/huggingface/lerobot/pull/319 hint at why highly weighing new actions might be
178
+ detrimental: doing so aggressively may diminish the benefits of action chunking).
179
+
180
+ Here we use an online method for computing the average rather than caching a history of actions in
181
+ order to compute the average offline. For a simple 1D sequence it looks something like:
182
+
183
+ ```
184
+ import torch
185
+
186
+ seq = torch.linspace(8, 8.5, 100)
187
+ print(seq)
188
+
189
+ m = 0.01
190
+ exp_weights = torch.exp(-m * torch.arange(len(seq)))
191
+ print(exp_weights)
192
+
193
+ # Calculate offline
194
+ avg = (exp_weights * seq).sum() / exp_weights.sum()
195
+ print("offline", avg)
196
+
197
+ # Calculate online
198
+ for i, item in enumerate(seq):
199
+ if i == 0:
200
+ avg = item
201
+ continue
202
+ avg *= exp_weights[:i].sum()
203
+ avg += item * exp_weights[i]
204
+ avg /= exp_weights[: i + 1].sum()
205
+ print("online", avg)
206
+ ```
207
+ """
208
+ self.chunk_size = chunk_size
209
+ self.ensemble_weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size))
210
+ self.ensemble_weights_cumsum = torch.cumsum(self.ensemble_weights, dim=0)
211
+ self.reset()
212
+
213
+ def reset(self):
214
+ """Resets the online computation variables."""
215
+ self.ensembled_actions = None
216
+ # (chunk_size,) count of how many actions are in the ensemble for each time step in the sequence.
217
+ self.ensembled_actions_count = None
218
+
219
+ def update(self, actions: Tensor) -> Tensor:
220
+ """
221
+ Takes a (batch, chunk_size, action_dim) sequence of actions, update the temporal ensemble for all
222
+ time steps, and pop/return the next batch of actions in the sequence.
223
+ """
224
+ self.ensemble_weights = self.ensemble_weights.to(device=actions.device)
225
+ self.ensemble_weights_cumsum = self.ensemble_weights_cumsum.to(device=actions.device)
226
+ if self.ensembled_actions is None:
227
+ # Initializes `self._ensembled_action` to the sequence of actions predicted during the first
228
+ # time step of the episode.
229
+ self.ensembled_actions = actions.clone()
230
+ # Note: The last dimension is unsqueeze to make sure we can broadcast properly for tensor
231
+ # operations later.
232
+ self.ensembled_actions_count = torch.ones(
233
+ (self.chunk_size, 1), dtype=torch.long, device=self.ensembled_actions.device
234
+ )
235
+ else:
236
+ # self.ensembled_actions will have shape (batch_size, chunk_size - 1, action_dim). Compute
237
+ # the online update for those entries.
238
+ self.ensembled_actions *= self.ensemble_weights_cumsum[self.ensembled_actions_count - 1]
239
+ self.ensembled_actions += actions[:, :-1] * self.ensemble_weights[self.ensembled_actions_count]
240
+ self.ensembled_actions /= self.ensemble_weights_cumsum[self.ensembled_actions_count]
241
+ self.ensembled_actions_count = torch.clamp(self.ensembled_actions_count + 1, max=self.chunk_size)
242
+ # The last action, which has no prior online average, needs to get concatenated onto the end.
243
+ self.ensembled_actions = torch.cat([self.ensembled_actions, actions[:, -1:]], dim=1)
244
+ self.ensembled_actions_count = torch.cat(
245
+ [self.ensembled_actions_count, torch.ones_like(self.ensembled_actions_count[-1:])]
246
+ )
247
+ # "Consume" the first action.
248
+ action, self.ensembled_actions, self.ensembled_actions_count = (
249
+ self.ensembled_actions[:, 0],
250
+ self.ensembled_actions[:, 1:],
251
+ self.ensembled_actions_count[1:],
252
+ )
253
+ return action
254
+
255
+
256
+ class ACT(nn.Module):
257
+ """Action Chunking Transformer: The underlying neural network for ACTPolicy.
258
+
259
+ Note: In this code we use the terms `vae_encoder`, 'encoder', `decoder`. The meanings are as follows.
260
+ - The `vae_encoder` is, as per the literature around variational auto-encoders (VAE), the part of the
261
+ model that encodes the target data (a sequence of actions), and the condition (the robot
262
+ joint-space).
263
+ - A transformer with an `encoder` (not the VAE encoder) and `decoder` (not the VAE decoder) with
264
+ cross-attention is used as the VAE decoder. For these terms, we drop the `vae_` prefix because we
265
+ have an option to train this model without the variational objective (in which case we drop the
266
+ `vae_encoder` altogether, and nothing about this model has anything to do with a VAE).
267
+
268
+ Transformer
269
+ Used alone for inference
270
+ (acts as VAE decoder
271
+ during training)
272
+ ┌───────────────────────┐
273
+ │ Outputs │
274
+ │ ▲ │
275
+ │ ┌─────►┌───────┐ │
276
+ ┌──────┐ │ │ │Transf.│ │
277
+ │ │ │ ├─────►│decoder│ │
278
+ ┌────┴────┐ │ │ │ │ │ │
279
+ │ │ │ │ ┌───┴───┬─►│ │ │
280
+ │ VAE │ │ │ │ │ └───────┘ │
281
+ │ encoder │ │ │ │Transf.│ │
282
+ │ │ │ │ │encoder│ │
283
+ └───▲─────┘ │ │ │ │ │
284
+ │ │ │ └▲──▲─▲─┘ │
285
+ │ │ │ │ │ │ │
286
+ inputs └─────┼──┘ │ image emb. │
287
+ │ state emb. │
288
+ └───────────────────────┘
289
+ """
290
+
291
+ def __init__(self, config: ACTConfig):
292
+ # BERT style VAE encoder with input tokens [cls, robot_state, *action_sequence].
293
+ # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]).
294
+ super().__init__()
295
+ self.config = config
296
+
297
+ if self.config.use_vae:
298
+ self.vae_encoder = ACTEncoder(config, is_vae_encoder=True)
299
+ self.vae_encoder_cls_embed = nn.Embedding(1, config.dim_model)
300
+ # Projection layer for joint-space configuration to hidden dimension.
301
+ if self.config.robot_state_feature:
302
+ self.vae_encoder_robot_state_input_proj = nn.Linear(
303
+ self.config.robot_state_feature.shape[0], config.dim_model
304
+ )
305
+ # Projection layer for action (joint-space target) to hidden dimension.
306
+ self.vae_encoder_action_input_proj = nn.Linear(
307
+ self.config.action_feature.shape[0],
308
+ config.dim_model,
309
+ )
310
+ # Projection layer from the VAE encoder's output to the latent distribution's parameter space.
311
+ self.vae_encoder_latent_output_proj = nn.Linear(config.dim_model, config.latent_dim * 2)
312
+ # Fixed sinusoidal positional embedding for the input to the VAE encoder. Unsqueeze for batch
313
+ # dimension.
314
+ num_input_token_encoder = 1 + config.chunk_size
315
+ if self.config.robot_state_feature:
316
+ num_input_token_encoder += 1
317
+ self.register_buffer(
318
+ "vae_encoder_pos_enc",
319
+ create_sinusoidal_pos_embedding(num_input_token_encoder, config.dim_model).unsqueeze(0),
320
+ )
321
+
322
+ # Backbone for image feature extraction.
323
+ if self.config.image_features:
324
+ backbone_model = getattr(torchvision.models, config.vision_backbone)(
325
+ replace_stride_with_dilation=[False, False, config.replace_final_stride_with_dilation],
326
+ weights=config.pretrained_backbone_weights,
327
+ norm_layer=FrozenBatchNorm2d,
328
+ )
329
+ # Note: The assumption here is that we are using a ResNet model (and hence layer4 is the final
330
+ # feature map).
331
+ # Note: The forward method of this returns a dict: {"feature_map": output}.
332
+ self.backbone = IntermediateLayerGetter(backbone_model, return_layers={"layer4": "feature_map"})
333
+
334
+ # Transformer (acts as VAE decoder when training with the variational objective).
335
+ self.encoder = ACTEncoder(config)
336
+ self.decoder = ACTDecoder(config)
337
+
338
+ # Transformer encoder input projections. The tokens will be structured like
339
+ # [latent, (robot_state), (env_state), (image_feature_map_pixels)].
340
+ if self.config.robot_state_feature:
341
+ self.encoder_robot_state_input_proj = nn.Linear(
342
+ self.config.robot_state_feature.shape[0], config.dim_model
343
+ )
344
+ if self.config.env_state_feature:
345
+ self.encoder_env_state_input_proj = nn.Linear(
346
+ self.config.env_state_feature.shape[0], config.dim_model
347
+ )
348
+ self.encoder_latent_input_proj = nn.Linear(config.latent_dim, config.dim_model)
349
+ if self.config.image_features:
350
+ self.encoder_img_feat_input_proj = nn.Conv2d(
351
+ backbone_model.fc.in_features, config.dim_model, kernel_size=1
352
+ )
353
+ # Transformer encoder positional embeddings.
354
+ n_1d_tokens = 1 # for the latent
355
+ if self.config.robot_state_feature:
356
+ n_1d_tokens += 1
357
+ if self.config.env_state_feature:
358
+ n_1d_tokens += 1
359
+ self.encoder_1d_feature_pos_embed = nn.Embedding(n_1d_tokens, config.dim_model)
360
+ if self.config.image_features:
361
+ self.encoder_cam_feat_pos_embed = ACTSinusoidalPositionEmbedding2d(config.dim_model // 2)
362
+
363
+ # Transformer decoder.
364
+ # Learnable positional embedding for the transformer's decoder (in the style of DETR object queries).
365
+ self.decoder_pos_embed = nn.Embedding(config.chunk_size, config.dim_model)
366
+
367
+ # Final action regression head on the output of the transformer's decoder.
368
+ self.action_head = nn.Linear(config.dim_model, self.config.action_feature.shape[0])
369
+
370
+ self._reset_parameters()
371
+
372
+ def _reset_parameters(self):
373
+ """Xavier-uniform initialization of the transformer parameters as in the original code."""
374
+ for p in chain(self.encoder.parameters(), self.decoder.parameters()):
375
+ if p.dim() > 1:
376
+ nn.init.xavier_uniform_(p)
377
+
378
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tensor] | tuple[None, None]]:
379
+ """A forward pass through the Action Chunking Transformer (with optional VAE encoder).
380
+
381
+ `batch` should have the following structure:
382
+ {
383
+ [robot_state_feature] (optional): (B, state_dim) batch of robot states.
384
+
385
+ [image_features]: (B, n_cameras, C, H, W) batch of images.
386
+ AND/OR
387
+ [env_state_feature]: (B, env_dim) batch of environment states.
388
+
389
+ [action_feature] (optional, only if training with VAE): (B, chunk_size, action dim) batch of actions.
390
+ }
391
+
392
+ Returns:
393
+ (B, chunk_size, action_dim) batch of action sequences
394
+ Tuple containing the latent PDF's parameters (mean, log(σ²)) both as (B, L) tensors where L is the
395
+ latent dimension.
396
+ """
397
+ if self.config.use_vae and self.training:
398
+ assert ACTION in batch, (
399
+ "actions must be provided when using the variational objective in training mode."
400
+ )
401
+
402
+ batch_size = batch[OBS_IMAGES][0].shape[0] if OBS_IMAGES in batch else batch[OBS_ENV_STATE].shape[0]
403
+
404
+ # Prepare the latent for input to the transformer encoder.
405
+ if self.config.use_vae and ACTION in batch and self.training:
406
+ # Prepare the input to the VAE encoder: [cls, *joint_space_configuration, *action_sequence].
407
+ cls_embed = einops.repeat(
408
+ self.vae_encoder_cls_embed.weight, "1 d -> b 1 d", b=batch_size
409
+ ) # (B, 1, D)
410
+ if self.config.robot_state_feature:
411
+ robot_state_embed = self.vae_encoder_robot_state_input_proj(batch[OBS_STATE])
412
+ robot_state_embed = robot_state_embed.unsqueeze(1) # (B, 1, D)
413
+ action_embed = self.vae_encoder_action_input_proj(batch[ACTION]) # (B, S, D)
414
+
415
+ if self.config.robot_state_feature:
416
+ vae_encoder_input = [cls_embed, robot_state_embed, action_embed] # (B, S+2, D)
417
+ else:
418
+ vae_encoder_input = [cls_embed, action_embed]
419
+ vae_encoder_input = torch.cat(vae_encoder_input, axis=1)
420
+
421
+ # Prepare fixed positional embedding.
422
+ # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case.
423
+ pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D)
424
+
425
+ # Prepare key padding mask for the transformer encoder. We have 1 or 2 extra tokens at the start of the
426
+ # sequence depending whether we use the input states or not (cls and robot state)
427
+ # False means not a padding token.
428
+ cls_joint_is_pad = torch.full(
429
+ (batch_size, 2 if self.config.robot_state_feature else 1),
430
+ False,
431
+ device=batch[OBS_STATE].device,
432
+ )
433
+ key_padding_mask = torch.cat(
434
+ [cls_joint_is_pad, batch["action_is_pad"]], axis=1
435
+ ) # (bs, seq+1 or 2)
436
+
437
+ # Forward pass through VAE encoder to get the latent PDF parameters.
438
+ cls_token_out = self.vae_encoder(
439
+ vae_encoder_input.permute(1, 0, 2),
440
+ pos_embed=pos_embed.permute(1, 0, 2),
441
+ key_padding_mask=key_padding_mask,
442
+ )[0] # select the class token, with shape (B, D)
443
+ latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out)
444
+ mu = latent_pdf_params[:, : self.config.latent_dim]
445
+ # This is 2log(sigma). Done this way to match the original implementation.
446
+ log_sigma_x2 = latent_pdf_params[:, self.config.latent_dim :]
447
+
448
+ # Sample the latent with the reparameterization trick.
449
+ latent_sample = mu + log_sigma_x2.div(2).exp() * torch.randn_like(mu)
450
+ else:
451
+ # When not using the VAE encoder, we set the latent to be all zeros.
452
+ mu = log_sigma_x2 = None
453
+ # TODO(rcadene, alexander-soare): remove call to `.to` to speedup forward ; precompute and use buffer
454
+ latent_sample = torch.zeros([batch_size, self.config.latent_dim], dtype=torch.float32).to(
455
+ batch[OBS_STATE].device
456
+ )
457
+
458
+ # Prepare transformer encoder inputs.
459
+ encoder_in_tokens = [self.encoder_latent_input_proj(latent_sample)]
460
+ encoder_in_pos_embed = list(self.encoder_1d_feature_pos_embed.weight.unsqueeze(1))
461
+ # Robot state token.
462
+ if self.config.robot_state_feature:
463
+ encoder_in_tokens.append(self.encoder_robot_state_input_proj(batch[OBS_STATE]))
464
+ # Environment state token.
465
+ if self.config.env_state_feature:
466
+ encoder_in_tokens.append(self.encoder_env_state_input_proj(batch[OBS_ENV_STATE]))
467
+
468
+ if self.config.image_features:
469
+ # For a list of images, the H and W may vary but H*W is constant.
470
+ # NOTE: If modifying this section, verify on MPS devices that
471
+ # gradients remain stable (no explosions or NaNs).
472
+ for img in batch[OBS_IMAGES]:
473
+ cam_features = self.backbone(img)["feature_map"]
474
+ cam_pos_embed = self.encoder_cam_feat_pos_embed(cam_features).to(dtype=cam_features.dtype)
475
+ cam_features = self.encoder_img_feat_input_proj(cam_features)
476
+
477
+ # Rearrange features to (sequence, batch, dim).
478
+ cam_features = einops.rearrange(cam_features, "b c h w -> (h w) b c")
479
+ cam_pos_embed = einops.rearrange(cam_pos_embed, "b c h w -> (h w) b c")
480
+
481
+ # Extend immediately instead of accumulating and concatenating
482
+ # Convert to list to extend properly
483
+ encoder_in_tokens.extend(list(cam_features))
484
+ encoder_in_pos_embed.extend(list(cam_pos_embed))
485
+
486
+ # Stack all tokens along the sequence dimension.
487
+ encoder_in_tokens = torch.stack(encoder_in_tokens, axis=0)
488
+ encoder_in_pos_embed = torch.stack(encoder_in_pos_embed, axis=0)
489
+
490
+ # Forward pass through the transformer modules.
491
+ encoder_out = self.encoder(encoder_in_tokens, pos_embed=encoder_in_pos_embed)
492
+ # TODO(rcadene, alexander-soare): remove call to `device` ; precompute and use buffer
493
+ decoder_in = torch.zeros(
494
+ (self.config.chunk_size, batch_size, self.config.dim_model),
495
+ dtype=encoder_in_pos_embed.dtype,
496
+ device=encoder_in_pos_embed.device,
497
+ )
498
+ decoder_out = self.decoder(
499
+ decoder_in,
500
+ encoder_out,
501
+ encoder_pos_embed=encoder_in_pos_embed,
502
+ decoder_pos_embed=self.decoder_pos_embed.weight.unsqueeze(1),
503
+ )
504
+
505
+ # Move back to (B, S, C).
506
+ decoder_out = decoder_out.transpose(0, 1)
507
+
508
+ actions = self.action_head(decoder_out)
509
+
510
+ return actions, (mu, log_sigma_x2)
511
+
512
+
513
+ class ACTEncoder(nn.Module):
514
+ """Convenience module for running multiple encoder layers, maybe followed by normalization."""
515
+
516
+ def __init__(self, config: ACTConfig, is_vae_encoder: bool = False):
517
+ super().__init__()
518
+ self.is_vae_encoder = is_vae_encoder
519
+ num_layers = config.n_vae_encoder_layers if self.is_vae_encoder else config.n_encoder_layers
520
+ self.layers = nn.ModuleList([ACTEncoderLayer(config) for _ in range(num_layers)])
521
+ self.norm = nn.LayerNorm(config.dim_model) if config.pre_norm else nn.Identity()
522
+
523
+ def forward(
524
+ self, x: Tensor, pos_embed: Tensor | None = None, key_padding_mask: Tensor | None = None
525
+ ) -> Tensor:
526
+ for layer in self.layers:
527
+ x = layer(x, pos_embed=pos_embed, key_padding_mask=key_padding_mask)
528
+ x = self.norm(x)
529
+ return x
530
+
531
+
532
+ class ACTEncoderLayer(nn.Module):
533
+ def __init__(self, config: ACTConfig):
534
+ super().__init__()
535
+ self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout)
536
+
537
+ # Feed forward layers.
538
+ self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward)
539
+ self.dropout = nn.Dropout(config.dropout)
540
+ self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model)
541
+
542
+ self.norm1 = nn.LayerNorm(config.dim_model)
543
+ self.norm2 = nn.LayerNorm(config.dim_model)
544
+ self.dropout1 = nn.Dropout(config.dropout)
545
+ self.dropout2 = nn.Dropout(config.dropout)
546
+
547
+ self.activation = get_activation_fn(config.feedforward_activation)
548
+ self.pre_norm = config.pre_norm
549
+
550
+ def forward(self, x, pos_embed: Tensor | None = None, key_padding_mask: Tensor | None = None) -> Tensor:
551
+ skip = x
552
+ if self.pre_norm:
553
+ x = self.norm1(x)
554
+ q = k = x if pos_embed is None else x + pos_embed
555
+ x = self.self_attn(q, k, value=x, key_padding_mask=key_padding_mask)
556
+ x = x[0] # note: [0] to select just the output, not the attention weights
557
+ x = skip + self.dropout1(x)
558
+ if self.pre_norm:
559
+ skip = x
560
+ x = self.norm2(x)
561
+ else:
562
+ x = self.norm1(x)
563
+ skip = x
564
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
565
+ x = skip + self.dropout2(x)
566
+ if not self.pre_norm:
567
+ x = self.norm2(x)
568
+ return x
569
+
570
+
571
+ class ACTDecoder(nn.Module):
572
+ def __init__(self, config: ACTConfig):
573
+ """Convenience module for running multiple decoder layers followed by normalization."""
574
+ super().__init__()
575
+ self.layers = nn.ModuleList([ACTDecoderLayer(config) for _ in range(config.n_decoder_layers)])
576
+ self.norm = nn.LayerNorm(config.dim_model)
577
+
578
+ def forward(
579
+ self,
580
+ x: Tensor,
581
+ encoder_out: Tensor,
582
+ decoder_pos_embed: Tensor | None = None,
583
+ encoder_pos_embed: Tensor | None = None,
584
+ ) -> Tensor:
585
+ for layer in self.layers:
586
+ x = layer(
587
+ x, encoder_out, decoder_pos_embed=decoder_pos_embed, encoder_pos_embed=encoder_pos_embed
588
+ )
589
+ if self.norm is not None:
590
+ x = self.norm(x)
591
+ return x
592
+
593
+
594
+ class ACTDecoderLayer(nn.Module):
595
+ def __init__(self, config: ACTConfig):
596
+ super().__init__()
597
+ self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout)
598
+ self.multihead_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout)
599
+
600
+ # Feed forward layers.
601
+ self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward)
602
+ self.dropout = nn.Dropout(config.dropout)
603
+ self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model)
604
+
605
+ self.norm1 = nn.LayerNorm(config.dim_model)
606
+ self.norm2 = nn.LayerNorm(config.dim_model)
607
+ self.norm3 = nn.LayerNorm(config.dim_model)
608
+ self.dropout1 = nn.Dropout(config.dropout)
609
+ self.dropout2 = nn.Dropout(config.dropout)
610
+ self.dropout3 = nn.Dropout(config.dropout)
611
+
612
+ self.activation = get_activation_fn(config.feedforward_activation)
613
+ self.pre_norm = config.pre_norm
614
+
615
+ def maybe_add_pos_embed(self, tensor: Tensor, pos_embed: Tensor | None) -> Tensor:
616
+ return tensor if pos_embed is None else tensor + pos_embed
617
+
618
+ def forward(
619
+ self,
620
+ x: Tensor,
621
+ encoder_out: Tensor,
622
+ decoder_pos_embed: Tensor | None = None,
623
+ encoder_pos_embed: Tensor | None = None,
624
+ ) -> Tensor:
625
+ """
626
+ Args:
627
+ x: (Decoder Sequence, Batch, Channel) tensor of input tokens.
628
+ encoder_out: (Encoder Sequence, B, C) output features from the last layer of the encoder we are
629
+ cross-attending with.
630
+ encoder_pos_embed: (ES, 1, C) positional embedding for keys (from the encoder).
631
+ decoder_pos_embed: (DS, 1, C) positional embedding for the queries (from the decoder).
632
+ Returns:
633
+ (DS, B, C) tensor of decoder output features.
634
+ """
635
+ skip = x
636
+ if self.pre_norm:
637
+ x = self.norm1(x)
638
+ q = k = self.maybe_add_pos_embed(x, decoder_pos_embed)
639
+ x = self.self_attn(q, k, value=x)[0] # select just the output, not the attention weights
640
+ x = skip + self.dropout1(x)
641
+ if self.pre_norm:
642
+ skip = x
643
+ x = self.norm2(x)
644
+ else:
645
+ x = self.norm1(x)
646
+ skip = x
647
+ x = self.multihead_attn(
648
+ query=self.maybe_add_pos_embed(x, decoder_pos_embed),
649
+ key=self.maybe_add_pos_embed(encoder_out, encoder_pos_embed),
650
+ value=encoder_out,
651
+ )[0] # select just the output, not the attention weights
652
+ x = skip + self.dropout2(x)
653
+ if self.pre_norm:
654
+ skip = x
655
+ x = self.norm3(x)
656
+ else:
657
+ x = self.norm2(x)
658
+ skip = x
659
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
660
+ x = skip + self.dropout3(x)
661
+ if not self.pre_norm:
662
+ x = self.norm3(x)
663
+ return x
664
+
665
+
666
+ def create_sinusoidal_pos_embedding(num_positions: int, dimension: int) -> Tensor:
667
+ """1D sinusoidal positional embeddings as in Attention is All You Need.
668
+
669
+ Args:
670
+ num_positions: Number of token positions required.
671
+ Returns: (num_positions, dimension) position embeddings (the first dimension is the batch dimension).
672
+
673
+ """
674
+
675
+ def get_position_angle_vec(position):
676
+ return [position / np.power(10000, 2 * (hid_j // 2) / dimension) for hid_j in range(dimension)]
677
+
678
+ sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(num_positions)])
679
+ sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
680
+ sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
681
+ return torch.from_numpy(sinusoid_table).float()
682
+
683
+
684
+ class ACTSinusoidalPositionEmbedding2d(nn.Module):
685
+ """2D sinusoidal positional embeddings similar to what's presented in Attention Is All You Need.
686
+
687
+ The variation is that the position indices are normalized in [0, 2π] (not quite: the lower bound is 1/H
688
+ for the vertical direction, and 1/W for the horizontal direction.
689
+ """
690
+
691
+ def __init__(self, dimension: int):
692
+ """
693
+ Args:
694
+ dimension: The desired dimension of the embeddings.
695
+ """
696
+ super().__init__()
697
+ self.dimension = dimension
698
+ self._two_pi = 2 * math.pi
699
+ self._eps = 1e-6
700
+ # Inverse "common ratio" for the geometric progression in sinusoid frequencies.
701
+ self._temperature = 10000
702
+
703
+ def forward(self, x: Tensor) -> Tensor:
704
+ """
705
+ Args:
706
+ x: A (B, C, H, W) batch of 2D feature map to generate the embeddings for.
707
+ Returns:
708
+ A (1, C, H, W) batch of corresponding sinusoidal positional embeddings.
709
+ """
710
+ not_mask = torch.ones_like(x[0, :1]) # (1, H, W)
711
+ # Note: These are like range(1, H+1) and range(1, W+1) respectively, but in most implementations
712
+ # they would be range(0, H) and range(0, W). Keeping it at as is to match the original code.
713
+ y_range = not_mask.cumsum(1, dtype=torch.float32)
714
+ x_range = not_mask.cumsum(2, dtype=torch.float32)
715
+
716
+ # "Normalize" the position index such that it ranges in [0, 2π].
717
+ # Note: Adding epsilon on the denominator should not be needed as all values of y_embed and x_range
718
+ # are non-zero by construction. This is an artifact of the original code.
719
+ y_range = y_range / (y_range[:, -1:, :] + self._eps) * self._two_pi
720
+ x_range = x_range / (x_range[:, :, -1:] + self._eps) * self._two_pi
721
+
722
+ inverse_frequency = self._temperature ** (
723
+ 2 * (torch.arange(self.dimension, dtype=torch.float32, device=x.device) // 2) / self.dimension
724
+ )
725
+
726
+ x_range = x_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1)
727
+ y_range = y_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1)
728
+
729
+ # Note: this stack then flatten operation results in interleaved sine and cosine terms.
730
+ # pos_embed_x and pos_embed_y are (1, H, W, C // 2).
731
+ pos_embed_x = torch.stack((x_range[..., 0::2].sin(), x_range[..., 1::2].cos()), dim=-1).flatten(3)
732
+ pos_embed_y = torch.stack((y_range[..., 0::2].sin(), y_range[..., 1::2].cos()), dim=-1).flatten(3)
733
+ pos_embed = torch.cat((pos_embed_y, pos_embed_x), dim=3).permute(0, 3, 1, 2) # (1, C, H, W)
734
+
735
+ return pos_embed
736
+
737
+
738
+ def get_activation_fn(activation: str) -> Callable:
739
+ """Return an activation function given a string."""
740
+ if activation == "relu":
741
+ return F.relu
742
+ if activation == "gelu":
743
+ return F.gelu
744
+ if activation == "glu":
745
+ return F.glu
746
+ raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.")
lerobot/src/lerobot/policies/act/processor_act.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from typing import Any
17
+
18
+ import torch
19
+
20
+ from lerobot.policies.act.configuration_act import ACTConfig
21
+ from lerobot.processor import (
22
+ AddBatchDimensionProcessorStep,
23
+ DeviceProcessorStep,
24
+ NormalizerProcessorStep,
25
+ PolicyAction,
26
+ PolicyProcessorPipeline,
27
+ RenameObservationsProcessorStep,
28
+ UnnormalizerProcessorStep,
29
+ )
30
+ from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
31
+ from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME
32
+
33
+
34
+ def make_act_pre_post_processors(
35
+ config: ACTConfig,
36
+ dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
37
+ ) -> tuple[
38
+ PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
39
+ PolicyProcessorPipeline[PolicyAction, PolicyAction],
40
+ ]:
41
+ """Creates the pre- and post-processing pipelines for the ACT policy.
42
+
43
+ The pre-processing pipeline handles normalization, batching, and device placement for the model inputs.
44
+ The post-processing pipeline handles unnormalization and moves the model outputs back to the CPU.
45
+
46
+ Args:
47
+ config (ACTConfig): The ACT policy configuration object.
48
+ dataset_stats (dict[str, dict[str, torch.Tensor]] | None): A dictionary containing dataset
49
+ statistics (e.g., mean and std) used for normalization. Defaults to None.
50
+
51
+ Returns:
52
+ tuple[PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], PolicyProcessorPipeline[PolicyAction, PolicyAction]]: A tuple containing the
53
+ pre-processor pipeline and the post-processor pipeline.
54
+ """
55
+
56
+ input_steps = [
57
+ RenameObservationsProcessorStep(rename_map={}),
58
+ AddBatchDimensionProcessorStep(),
59
+ DeviceProcessorStep(device=config.device),
60
+ NormalizerProcessorStep(
61
+ features={**config.input_features, **config.output_features},
62
+ norm_map=config.normalization_mapping,
63
+ stats=dataset_stats,
64
+ device=config.device,
65
+ ),
66
+ ]
67
+ output_steps = [
68
+ UnnormalizerProcessorStep(
69
+ features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
70
+ ),
71
+ DeviceProcessorStep(device="cpu"),
72
+ ]
73
+
74
+ return (
75
+ PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
76
+ steps=input_steps,
77
+ name=POLICY_PREPROCESSOR_DEFAULT_NAME,
78
+ ),
79
+ PolicyProcessorPipeline[PolicyAction, PolicyAction](
80
+ steps=output_steps,
81
+ name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
82
+ to_transition=policy_action_to_transition,
83
+ to_output=transition_to_policy_action,
84
+ ),
85
+ )
lerobot/src/lerobot/policies/diffusion/configuration_diffusion.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 Columbia Artificial Intelligence, Robotics Lab,
4
+ # and The HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ from dataclasses import dataclass, field
18
+
19
+ from lerobot.configs.policies import PreTrainedConfig
20
+ from lerobot.configs.types import NormalizationMode
21
+ from lerobot.optim.optimizers import AdamConfig
22
+ from lerobot.optim.schedulers import DiffuserSchedulerConfig
23
+
24
+
25
+ @PreTrainedConfig.register_subclass("diffusion")
26
+ @dataclass
27
+ class DiffusionConfig(PreTrainedConfig):
28
+ """Configuration class for DiffusionPolicy.
29
+
30
+ Defaults are configured for training with PushT providing proprioceptive and single camera observations.
31
+
32
+ The parameters you will most likely need to change are the ones which depend on the environment / sensors.
33
+ Those are: `input_shapes` and `output_shapes`.
34
+
35
+ Notes on the inputs and outputs:
36
+ - "observation.state" is required as an input key.
37
+ - Either:
38
+ - At least one key starting with "observation.image is required as an input.
39
+ AND/OR
40
+ - The key "observation.environment_state" is required as input.
41
+ - If there are multiple keys beginning with "observation.image" they are treated as multiple camera
42
+ views. Right now we only support all images having the same shape.
43
+ - "action" is required as an output key.
44
+
45
+ Args:
46
+ n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the
47
+ current step and additional steps going back).
48
+ horizon: Diffusion model action prediction size as detailed in `DiffusionPolicy.select_action`.
49
+ n_action_steps: The number of action steps to run in the environment for one invocation of the policy.
50
+ See `DiffusionPolicy.select_action` for more details.
51
+ input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents
52
+ the input data name, and the value is a list indicating the dimensions of the corresponding data.
53
+ For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96],
54
+ indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't
55
+ include batch dimension or temporal dimension.
56
+ output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents
57
+ the output data name, and the value is a list indicating the dimensions of the corresponding data.
58
+ For example, "action" refers to an output shape of [14], indicating 14-dimensional actions.
59
+ Importantly, `output_shapes` doesn't include batch dimension or temporal dimension.
60
+ input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"),
61
+ and the value specifies the normalization mode to apply. The two available modes are "mean_std"
62
+ which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a
63
+ [-1, 1] range.
64
+ output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the
65
+ original scale. Note that this is also used for normalizing the training targets.
66
+ vision_backbone: Name of the torchvision resnet backbone to use for encoding images.
67
+ crop_shape: (H, W) shape to crop images to as a preprocessing step for the vision backbone. Must fit
68
+ within the image size. If None, no cropping is done.
69
+ crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval
70
+ mode).
71
+ pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
72
+ `None` means no pretrained weights.
73
+ use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
74
+ The group sizes are set to be about 16 (to be precise, feature_dim // 16).
75
+ spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax.
76
+ use_separate_rgb_encoders_per_camera: Whether to use a separate RGB encoder for each camera view.
77
+ down_dims: Feature dimension for each stage of temporal downsampling in the diffusion modeling Unet.
78
+ You may provide a variable number of dimensions, therefore also controlling the degree of
79
+ downsampling.
80
+ kernel_size: The convolutional kernel size of the diffusion modeling Unet.
81
+ n_groups: Number of groups used in the group norm of the Unet's convolutional blocks.
82
+ diffusion_step_embed_dim: The Unet is conditioned on the diffusion timestep via a small non-linear
83
+ network. This is the output dimension of that network, i.e., the embedding dimension.
84
+ use_film_scale_modulation: FiLM (https://huggingface.co/papers/1709.07871) is used for the Unet conditioning.
85
+ Bias modulation is used be default, while this parameter indicates whether to also use scale
86
+ modulation.
87
+ noise_scheduler_type: Name of the noise scheduler to use. Supported options: ["DDPM", "DDIM"].
88
+ num_train_timesteps: Number of diffusion steps for the forward diffusion schedule.
89
+ beta_schedule: Name of the diffusion beta schedule as per DDPMScheduler from Hugging Face diffusers.
90
+ beta_start: Beta value for the first forward-diffusion step.
91
+ beta_end: Beta value for the last forward-diffusion step.
92
+ prediction_type: The type of prediction that the diffusion modeling Unet makes. Choose from "epsilon"
93
+ or "sample". These have equivalent outcomes from a latent variable modeling perspective, but
94
+ "epsilon" has been shown to work better in many deep neural network settings.
95
+ clip_sample: Whether to clip the sample to [-`clip_sample_range`, +`clip_sample_range`] for each
96
+ denoising step at inference time. WARNING: you will need to make sure your action-space is
97
+ normalized to fit within this range.
98
+ clip_sample_range: The magnitude of the clipping range as described above.
99
+ num_inference_steps: Number of reverse diffusion steps to use at inference time (steps are evenly
100
+ spaced). If not provided, this defaults to be the same as `num_train_timesteps`.
101
+ do_mask_loss_for_padding: Whether to mask the loss when there are copy-padded actions. See
102
+ `LeRobotDataset` and `load_previous_and_future_frames` for more information. Note, this defaults
103
+ to False as the original Diffusion Policy implementation does the same.
104
+ """
105
+
106
+ # Inputs / output structure.
107
+ n_obs_steps: int = 2
108
+ horizon: int = 16
109
+ n_action_steps: int = 8
110
+
111
+ normalization_mapping: dict[str, NormalizationMode] = field(
112
+ default_factory=lambda: {
113
+ "VISUAL": NormalizationMode.MEAN_STD,
114
+ "STATE": NormalizationMode.MIN_MAX,
115
+ "ACTION": NormalizationMode.MIN_MAX,
116
+ }
117
+ )
118
+
119
+ # The original implementation doesn't sample frames for the last 7 steps,
120
+ # which avoids excessive padding and leads to improved training results.
121
+ drop_n_last_frames: int = 7 # horizon - n_action_steps - n_obs_steps + 1
122
+
123
+ # Architecture / modeling.
124
+ # Vision backbone.
125
+ vision_backbone: str = "resnet18"
126
+ crop_shape: tuple[int, int] | None = (84, 84)
127
+ crop_is_random: bool = True
128
+ pretrained_backbone_weights: str | None = None
129
+ use_group_norm: bool = True
130
+ spatial_softmax_num_keypoints: int = 32
131
+ use_separate_rgb_encoder_per_camera: bool = False
132
+ # Unet.
133
+ down_dims: tuple[int, ...] = (512, 1024, 2048)
134
+ kernel_size: int = 5
135
+ n_groups: int = 8
136
+ diffusion_step_embed_dim: int = 128
137
+ use_film_scale_modulation: bool = True
138
+ # Noise scheduler.
139
+ noise_scheduler_type: str = "DDPM"
140
+ num_train_timesteps: int = 100
141
+ beta_schedule: str = "squaredcos_cap_v2"
142
+ beta_start: float = 0.0001
143
+ beta_end: float = 0.02
144
+ prediction_type: str = "epsilon"
145
+ clip_sample: bool = True
146
+ clip_sample_range: float = 1.0
147
+
148
+ # Inference
149
+ num_inference_steps: int | None = None
150
+
151
+ # Loss computation
152
+ do_mask_loss_for_padding: bool = False
153
+
154
+ # Training presets
155
+ optimizer_lr: float = 1e-4
156
+ optimizer_betas: tuple = (0.95, 0.999)
157
+ optimizer_eps: float = 1e-8
158
+ optimizer_weight_decay: float = 1e-6
159
+ scheduler_name: str = "cosine"
160
+ scheduler_warmup_steps: int = 500
161
+
162
+ def __post_init__(self):
163
+ super().__post_init__()
164
+
165
+ """Input validation (not exhaustive)."""
166
+ if not self.vision_backbone.startswith("resnet"):
167
+ raise ValueError(
168
+ f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}."
169
+ )
170
+
171
+ supported_prediction_types = ["epsilon", "sample"]
172
+ if self.prediction_type not in supported_prediction_types:
173
+ raise ValueError(
174
+ f"`prediction_type` must be one of {supported_prediction_types}. Got {self.prediction_type}."
175
+ )
176
+ supported_noise_schedulers = ["DDPM", "DDIM"]
177
+ if self.noise_scheduler_type not in supported_noise_schedulers:
178
+ raise ValueError(
179
+ f"`noise_scheduler_type` must be one of {supported_noise_schedulers}. "
180
+ f"Got {self.noise_scheduler_type}."
181
+ )
182
+
183
+ # Check that the horizon size and U-Net downsampling is compatible.
184
+ # U-Net downsamples by 2 with each stage.
185
+ downsampling_factor = 2 ** len(self.down_dims)
186
+ if self.horizon % downsampling_factor != 0:
187
+ raise ValueError(
188
+ "The horizon should be an integer multiple of the downsampling factor (which is determined "
189
+ f"by `len(down_dims)`). Got {self.horizon=} and {self.down_dims=}"
190
+ )
191
+
192
+ def get_optimizer_preset(self) -> AdamConfig:
193
+ return AdamConfig(
194
+ lr=self.optimizer_lr,
195
+ betas=self.optimizer_betas,
196
+ eps=self.optimizer_eps,
197
+ weight_decay=self.optimizer_weight_decay,
198
+ )
199
+
200
+ def get_scheduler_preset(self) -> DiffuserSchedulerConfig:
201
+ return DiffuserSchedulerConfig(
202
+ name=self.scheduler_name,
203
+ num_warmup_steps=self.scheduler_warmup_steps,
204
+ )
205
+
206
+ def validate_features(self) -> None:
207
+ if len(self.image_features) == 0 and self.env_state_feature is None:
208
+ raise ValueError("You must provide at least one image or the environment state among the inputs.")
209
+
210
+ if self.crop_shape is not None:
211
+ for key, image_ft in self.image_features.items():
212
+ if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]:
213
+ raise ValueError(
214
+ f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} "
215
+ f"for `crop_shape` and {image_ft.shape} for "
216
+ f"`{key}`."
217
+ )
218
+
219
+ # Check that all input images have the same shape.
220
+ if len(self.image_features) > 0:
221
+ first_image_key, first_image_ft = next(iter(self.image_features.items()))
222
+ for key, image_ft in self.image_features.items():
223
+ if image_ft.shape != first_image_ft.shape:
224
+ raise ValueError(
225
+ f"`{key}` does not match `{first_image_key}`, but we expect all image shapes to match."
226
+ )
227
+
228
+ @property
229
+ def observation_delta_indices(self) -> list:
230
+ return list(range(1 - self.n_obs_steps, 1))
231
+
232
+ @property
233
+ def action_delta_indices(self) -> list:
234
+ return list(range(1 - self.n_obs_steps, 1 - self.n_obs_steps + self.horizon))
235
+
236
+ @property
237
+ def reward_delta_indices(self) -> None:
238
+ return None
lerobot/src/lerobot/policies/diffusion/modeling_diffusion.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 Columbia Artificial Intelligence, Robotics Lab,
4
+ # and The HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion"
18
+
19
+ TODO(alexander-soare):
20
+ - Remove reliance on diffusers for DDPMScheduler and LR scheduler.
21
+ """
22
+
23
+ import math
24
+ from collections import deque
25
+ from collections.abc import Callable
26
+
27
+ import einops
28
+ import numpy as np
29
+ import torch
30
+ import torch.nn.functional as F # noqa: N812
31
+ import torchvision
32
+ from diffusers.schedulers.scheduling_ddim import DDIMScheduler
33
+ from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
34
+ from torch import Tensor, nn
35
+
36
+ from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
37
+ from lerobot.policies.pretrained import PreTrainedPolicy
38
+ from lerobot.policies.utils import (
39
+ get_device_from_parameters,
40
+ get_dtype_from_parameters,
41
+ get_output_shape,
42
+ populate_queues,
43
+ )
44
+ from lerobot.utils.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE
45
+
46
+
47
+ class DiffusionPolicy(PreTrainedPolicy):
48
+ """
49
+ Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion"
50
+ (paper: https://huggingface.co/papers/2303.04137, code: https://github.com/real-stanford/diffusion_policy).
51
+ """
52
+
53
+ config_class = DiffusionConfig
54
+ name = "diffusion"
55
+
56
+ def __init__(
57
+ self,
58
+ config: DiffusionConfig,
59
+ **kwargs,
60
+ ):
61
+ """
62
+ Args:
63
+ config: Policy configuration class instance or None, in which case the default instantiation of
64
+ the configuration class is used.
65
+ dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
66
+ that they will be passed with a call to `load_state_dict` before the policy is used.
67
+ """
68
+ super().__init__(config)
69
+ config.validate_features()
70
+ self.config = config
71
+
72
+ # queues are populated during rollout of the policy, they contain the n latest observations and actions
73
+ self._queues = None
74
+
75
+ self.diffusion = DiffusionModel(config)
76
+
77
+ self.reset()
78
+
79
+ def get_optim_params(self) -> dict:
80
+ return self.diffusion.parameters()
81
+
82
+ def reset(self):
83
+ """Clear observation and action queues. Should be called on `env.reset()`"""
84
+ self._queues = {
85
+ OBS_STATE: deque(maxlen=self.config.n_obs_steps),
86
+ ACTION: deque(maxlen=self.config.n_action_steps),
87
+ }
88
+ if self.config.image_features:
89
+ self._queues[OBS_IMAGES] = deque(maxlen=self.config.n_obs_steps)
90
+ if self.config.env_state_feature:
91
+ self._queues[OBS_ENV_STATE] = deque(maxlen=self.config.n_obs_steps)
92
+
93
+ @torch.no_grad()
94
+ def predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor:
95
+ """Predict a chunk of actions given environment observations."""
96
+ # stack n latest observations from the queue
97
+ batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues}
98
+ actions = self.diffusion.generate_actions(batch, noise=noise)
99
+
100
+ return actions
101
+
102
+ @torch.no_grad()
103
+ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor:
104
+ """Select a single action given environment observations.
105
+
106
+ This method handles caching a history of observations and an action trajectory generated by the
107
+ underlying diffusion model. Here's how it works:
108
+ - `n_obs_steps` steps worth of observations are cached (for the first steps, the observation is
109
+ copied `n_obs_steps` times to fill the cache).
110
+ - The diffusion model generates `horizon` steps worth of actions.
111
+ - `n_action_steps` worth of actions are actually kept for execution, starting from the current step.
112
+ Schematically this looks like:
113
+ ----------------------------------------------------------------------------------------------
114
+ (legend: o = n_obs_steps, h = horizon, a = n_action_steps)
115
+ |timestep | n-o+1 | n-o+2 | ..... | n | ..... | n+a-1 | n+a | ..... | n-o+h |
116
+ |observation is used | YES | YES | YES | YES | NO | NO | NO | NO | NO |
117
+ |action is generated | YES | YES | YES | YES | YES | YES | YES | YES | YES |
118
+ |action is used | NO | NO | NO | YES | YES | YES | NO | NO | NO |
119
+ ----------------------------------------------------------------------------------------------
120
+ Note that this means we require: `n_action_steps <= horizon - n_obs_steps + 1`. Also, note that
121
+ "horizon" may not the best name to describe what the variable actually means, because this period is
122
+ actually measured from the first observation which (if `n_obs_steps` > 1) happened in the past.
123
+ """
124
+ # NOTE: for offline evaluation, we have action in the batch, so we need to pop it out
125
+ if ACTION in batch:
126
+ batch.pop(ACTION)
127
+
128
+ if self.config.image_features:
129
+ batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
130
+ batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
131
+ # NOTE: It's important that this happens after stacking the images into a single key.
132
+ self._queues = populate_queues(self._queues, batch)
133
+
134
+ if len(self._queues[ACTION]) == 0:
135
+ actions = self.predict_action_chunk(batch, noise=noise)
136
+ self._queues[ACTION].extend(actions.transpose(0, 1))
137
+
138
+ action = self._queues[ACTION].popleft()
139
+ return action
140
+
141
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, None]:
142
+ """Run the batch through the model and compute the loss for training or validation."""
143
+ if self.config.image_features:
144
+ batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
145
+ batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4)
146
+ loss = self.diffusion.compute_loss(batch)
147
+ # no output_dict so returning None
148
+ return loss, None
149
+
150
+
151
+ def _make_noise_scheduler(name: str, **kwargs: dict) -> DDPMScheduler | DDIMScheduler:
152
+ """
153
+ Factory for noise scheduler instances of the requested type. All kwargs are passed
154
+ to the scheduler.
155
+ """
156
+ if name == "DDPM":
157
+ return DDPMScheduler(**kwargs)
158
+ elif name == "DDIM":
159
+ return DDIMScheduler(**kwargs)
160
+ else:
161
+ raise ValueError(f"Unsupported noise scheduler type {name}")
162
+
163
+
164
+ class DiffusionModel(nn.Module):
165
+ def __init__(self, config: DiffusionConfig):
166
+ super().__init__()
167
+ self.config = config
168
+
169
+ # Build observation encoders (depending on which observations are provided).
170
+ global_cond_dim = self.config.robot_state_feature.shape[0]
171
+ if self.config.image_features:
172
+ num_images = len(self.config.image_features)
173
+ if self.config.use_separate_rgb_encoder_per_camera:
174
+ encoders = [DiffusionRgbEncoder(config) for _ in range(num_images)]
175
+ self.rgb_encoder = nn.ModuleList(encoders)
176
+ global_cond_dim += encoders[0].feature_dim * num_images
177
+ else:
178
+ self.rgb_encoder = DiffusionRgbEncoder(config)
179
+ global_cond_dim += self.rgb_encoder.feature_dim * num_images
180
+ if self.config.env_state_feature:
181
+ global_cond_dim += self.config.env_state_feature.shape[0]
182
+
183
+ self.unet = DiffusionConditionalUnet1d(config, global_cond_dim=global_cond_dim * config.n_obs_steps)
184
+
185
+ self.noise_scheduler = _make_noise_scheduler(
186
+ config.noise_scheduler_type,
187
+ num_train_timesteps=config.num_train_timesteps,
188
+ beta_start=config.beta_start,
189
+ beta_end=config.beta_end,
190
+ beta_schedule=config.beta_schedule,
191
+ clip_sample=config.clip_sample,
192
+ clip_sample_range=config.clip_sample_range,
193
+ prediction_type=config.prediction_type,
194
+ )
195
+
196
+ if config.num_inference_steps is None:
197
+ self.num_inference_steps = self.noise_scheduler.config.num_train_timesteps
198
+ else:
199
+ self.num_inference_steps = config.num_inference_steps
200
+
201
+ # ========= inference ============
202
+ def conditional_sample(
203
+ self,
204
+ batch_size: int,
205
+ global_cond: Tensor | None = None,
206
+ generator: torch.Generator | None = None,
207
+ noise: Tensor | None = None,
208
+ ) -> Tensor:
209
+ device = get_device_from_parameters(self)
210
+ dtype = get_dtype_from_parameters(self)
211
+
212
+ # Sample prior.
213
+ sample = (
214
+ noise
215
+ if noise is not None
216
+ else torch.randn(
217
+ size=(batch_size, self.config.horizon, self.config.action_feature.shape[0]),
218
+ dtype=dtype,
219
+ device=device,
220
+ generator=generator,
221
+ )
222
+ )
223
+
224
+ self.noise_scheduler.set_timesteps(self.num_inference_steps)
225
+
226
+ for t in self.noise_scheduler.timesteps:
227
+ # Predict model output.
228
+ model_output = self.unet(
229
+ sample,
230
+ torch.full(sample.shape[:1], t, dtype=torch.long, device=sample.device),
231
+ global_cond=global_cond,
232
+ )
233
+ # Compute previous image: x_t -> x_t-1
234
+ sample = self.noise_scheduler.step(model_output, t, sample, generator=generator).prev_sample
235
+
236
+ return sample
237
+
238
+ def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor:
239
+ """Encode image features and concatenate them all together along with the state vector."""
240
+ batch_size, n_obs_steps = batch[OBS_STATE].shape[:2]
241
+ global_cond_feats = [batch[OBS_STATE]]
242
+ # Extract image features.
243
+ if self.config.image_features:
244
+ if self.config.use_separate_rgb_encoder_per_camera:
245
+ # Combine batch and sequence dims while rearranging to make the camera index dimension first.
246
+ images_per_camera = einops.rearrange(batch[OBS_IMAGES], "b s n ... -> n (b s) ...")
247
+ img_features_list = torch.cat(
248
+ [
249
+ encoder(images)
250
+ for encoder, images in zip(self.rgb_encoder, images_per_camera, strict=True)
251
+ ]
252
+ )
253
+ # Separate batch and sequence dims back out. The camera index dim gets absorbed into the
254
+ # feature dim (effectively concatenating the camera features).
255
+ img_features = einops.rearrange(
256
+ img_features_list, "(n b s) ... -> b s (n ...)", b=batch_size, s=n_obs_steps
257
+ )
258
+ else:
259
+ # Combine batch, sequence, and "which camera" dims before passing to shared encoder.
260
+ img_features = self.rgb_encoder(
261
+ einops.rearrange(batch[OBS_IMAGES], "b s n ... -> (b s n) ...")
262
+ )
263
+ # Separate batch dim and sequence dim back out. The camera index dim gets absorbed into the
264
+ # feature dim (effectively concatenating the camera features).
265
+ img_features = einops.rearrange(
266
+ img_features, "(b s n) ... -> b s (n ...)", b=batch_size, s=n_obs_steps
267
+ )
268
+ global_cond_feats.append(img_features)
269
+
270
+ if self.config.env_state_feature:
271
+ global_cond_feats.append(batch[OBS_ENV_STATE])
272
+
273
+ # Concatenate features then flatten to (B, global_cond_dim).
274
+ return torch.cat(global_cond_feats, dim=-1).flatten(start_dim=1)
275
+
276
+ def generate_actions(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor:
277
+ """
278
+ This function expects `batch` to have:
279
+ {
280
+ "observation.state": (B, n_obs_steps, state_dim)
281
+
282
+ "observation.images": (B, n_obs_steps, num_cameras, C, H, W)
283
+ AND/OR
284
+ "observation.environment_state": (B, n_obs_steps, environment_dim)
285
+ }
286
+ """
287
+ batch_size, n_obs_steps = batch[OBS_STATE].shape[:2]
288
+ assert n_obs_steps == self.config.n_obs_steps
289
+
290
+ # Encode image features and concatenate them all together along with the state vector.
291
+ global_cond = self._prepare_global_conditioning(batch) # (B, global_cond_dim)
292
+
293
+ # run sampling
294
+ actions = self.conditional_sample(batch_size, global_cond=global_cond, noise=noise)
295
+
296
+ # Extract `n_action_steps` steps worth of actions (from the current observation).
297
+ start = n_obs_steps - 1
298
+ end = start + self.config.n_action_steps
299
+ actions = actions[:, start:end]
300
+
301
+ return actions
302
+
303
+ def compute_loss(self, batch: dict[str, Tensor]) -> Tensor:
304
+ """
305
+ This function expects `batch` to have (at least):
306
+ {
307
+ "observation.state": (B, n_obs_steps, state_dim)
308
+
309
+ "observation.images": (B, n_obs_steps, num_cameras, C, H, W)
310
+ AND/OR
311
+ "observation.environment_state": (B, n_obs_steps, environment_dim)
312
+
313
+ "action": (B, horizon, action_dim)
314
+ "action_is_pad": (B, horizon)
315
+ }
316
+ """
317
+ # Input validation.
318
+ assert set(batch).issuperset({OBS_STATE, ACTION, "action_is_pad"})
319
+ assert OBS_IMAGES in batch or OBS_ENV_STATE in batch
320
+ n_obs_steps = batch[OBS_STATE].shape[1]
321
+ horizon = batch[ACTION].shape[1]
322
+ assert horizon == self.config.horizon
323
+ assert n_obs_steps == self.config.n_obs_steps
324
+
325
+ # Encode image features and concatenate them all together along with the state vector.
326
+ global_cond = self._prepare_global_conditioning(batch) # (B, global_cond_dim)
327
+
328
+ # Forward diffusion.
329
+ trajectory = batch[ACTION]
330
+ # Sample noise to add to the trajectory.
331
+ eps = torch.randn(trajectory.shape, device=trajectory.device)
332
+ # Sample a random noising timestep for each item in the batch.
333
+ timesteps = torch.randint(
334
+ low=0,
335
+ high=self.noise_scheduler.config.num_train_timesteps,
336
+ size=(trajectory.shape[0],),
337
+ device=trajectory.device,
338
+ ).long()
339
+ # Add noise to the clean trajectories according to the noise magnitude at each timestep.
340
+ noisy_trajectory = self.noise_scheduler.add_noise(trajectory, eps, timesteps)
341
+
342
+ # Run the denoising network (that might denoise the trajectory, or attempt to predict the noise).
343
+ pred = self.unet(noisy_trajectory, timesteps, global_cond=global_cond)
344
+
345
+ # Compute the loss.
346
+ # The target is either the original trajectory, or the noise.
347
+ if self.config.prediction_type == "epsilon":
348
+ target = eps
349
+ elif self.config.prediction_type == "sample":
350
+ target = batch[ACTION]
351
+ else:
352
+ raise ValueError(f"Unsupported prediction type {self.config.prediction_type}")
353
+
354
+ loss = F.mse_loss(pred, target, reduction="none")
355
+
356
+ # Mask loss wherever the action is padded with copies (edges of the dataset trajectory).
357
+ if self.config.do_mask_loss_for_padding:
358
+ if "action_is_pad" not in batch:
359
+ raise ValueError(
360
+ "You need to provide 'action_is_pad' in the batch when "
361
+ f"{self.config.do_mask_loss_for_padding=}."
362
+ )
363
+ in_episode_bound = ~batch["action_is_pad"]
364
+ loss = loss * in_episode_bound.unsqueeze(-1)
365
+
366
+ return loss.mean()
367
+
368
+
369
+ class SpatialSoftmax(nn.Module):
370
+ """
371
+ Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al.
372
+ (https://huggingface.co/papers/1509.06113). A minimal port of the robomimic implementation.
373
+
374
+ At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass"
375
+ of activations of each channel, i.e., keypoints in the image space for the policy to focus on.
376
+
377
+ Example: take feature maps of size (512x10x12). We generate a grid of normalized coordinates (10x12x2):
378
+ -----------------------------------------------------
379
+ | (-1., -1.) | (-0.82, -1.) | ... | (1., -1.) |
380
+ | (-1., -0.78) | (-0.82, -0.78) | ... | (1., -0.78) |
381
+ | ... | ... | ... | ... |
382
+ | (-1., 1.) | (-0.82, 1.) | ... | (1., 1.) |
383
+ -----------------------------------------------------
384
+ This is achieved by applying channel-wise softmax over the activations (512x120) and computing the dot
385
+ product with the coordinates (120x2) to get expected points of maximal activation (512x2).
386
+
387
+ The example above results in 512 keypoints (corresponding to the 512 input channels). We can optionally
388
+ provide num_kp != None to control the number of keypoints. This is achieved by a first applying a learnable
389
+ linear mapping (in_channels, H, W) -> (num_kp, H, W).
390
+ """
391
+
392
+ def __init__(self, input_shape, num_kp=None):
393
+ """
394
+ Args:
395
+ input_shape (list): (C, H, W) input feature map shape.
396
+ num_kp (int): number of keypoints in output. If None, output will have the same number of channels as input.
397
+ """
398
+ super().__init__()
399
+
400
+ assert len(input_shape) == 3
401
+ self._in_c, self._in_h, self._in_w = input_shape
402
+
403
+ if num_kp is not None:
404
+ self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1)
405
+ self._out_c = num_kp
406
+ else:
407
+ self.nets = None
408
+ self._out_c = self._in_c
409
+
410
+ # we could use torch.linspace directly but that seems to behave slightly differently than numpy
411
+ # and causes a small degradation in pc_success of pre-trained models.
412
+ pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h))
413
+ pos_x = torch.from_numpy(pos_x.reshape(self._in_h * self._in_w, 1)).float()
414
+ pos_y = torch.from_numpy(pos_y.reshape(self._in_h * self._in_w, 1)).float()
415
+ # register as buffer so it's moved to the correct device.
416
+ self.register_buffer("pos_grid", torch.cat([pos_x, pos_y], dim=1))
417
+
418
+ def forward(self, features: Tensor) -> Tensor:
419
+ """
420
+ Args:
421
+ features: (B, C, H, W) input feature maps.
422
+ Returns:
423
+ (B, K, 2) image-space coordinates of keypoints.
424
+ """
425
+ if self.nets is not None:
426
+ features = self.nets(features)
427
+
428
+ # [B, K, H, W] -> [B * K, H * W] where K is number of keypoints
429
+ features = features.reshape(-1, self._in_h * self._in_w)
430
+ # 2d softmax normalization
431
+ attention = F.softmax(features, dim=-1)
432
+ # [B * K, H * W] x [H * W, 2] -> [B * K, 2] for spatial coordinate mean in x and y dimensions
433
+ expected_xy = attention @ self.pos_grid
434
+ # reshape to [B, K, 2]
435
+ feature_keypoints = expected_xy.view(-1, self._out_c, 2)
436
+
437
+ return feature_keypoints
438
+
439
+
440
+ class DiffusionRgbEncoder(nn.Module):
441
+ """Encodes an RGB image into a 1D feature vector.
442
+
443
+ Includes the ability to normalize and crop the image first.
444
+ """
445
+
446
+ def __init__(self, config: DiffusionConfig):
447
+ super().__init__()
448
+ # Set up optional preprocessing.
449
+ if config.crop_shape is not None:
450
+ self.do_crop = True
451
+ # Always use center crop for eval
452
+ self.center_crop = torchvision.transforms.CenterCrop(config.crop_shape)
453
+ if config.crop_is_random:
454
+ self.maybe_random_crop = torchvision.transforms.RandomCrop(config.crop_shape)
455
+ else:
456
+ self.maybe_random_crop = self.center_crop
457
+ else:
458
+ self.do_crop = False
459
+
460
+ # Set up backbone.
461
+ backbone_model = getattr(torchvision.models, config.vision_backbone)(
462
+ weights=config.pretrained_backbone_weights
463
+ )
464
+ # Note: This assumes that the layer4 feature map is children()[-3]
465
+ # TODO(alexander-soare): Use a safer alternative.
466
+ self.backbone = nn.Sequential(*(list(backbone_model.children())[:-2]))
467
+ if config.use_group_norm:
468
+ if config.pretrained_backbone_weights:
469
+ raise ValueError(
470
+ "You can't replace BatchNorm in a pretrained model without ruining the weights!"
471
+ )
472
+ self.backbone = _replace_submodules(
473
+ root_module=self.backbone,
474
+ predicate=lambda x: isinstance(x, nn.BatchNorm2d),
475
+ func=lambda x: nn.GroupNorm(num_groups=x.num_features // 16, num_channels=x.num_features),
476
+ )
477
+
478
+ # Set up pooling and final layers.
479
+ # Use a dry run to get the feature map shape.
480
+ # The dummy input should take the number of image channels from `config.image_features` and it should
481
+ # use the height and width from `config.crop_shape` if it is provided, otherwise it should use the
482
+ # height and width from `config.image_features`.
483
+
484
+ # Note: we have a check in the config class to make sure all images have the same shape.
485
+ images_shape = next(iter(config.image_features.values())).shape
486
+ dummy_shape_h_w = config.crop_shape if config.crop_shape is not None else images_shape[1:]
487
+ dummy_shape = (1, images_shape[0], *dummy_shape_h_w)
488
+ feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:]
489
+
490
+ self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints)
491
+ self.feature_dim = config.spatial_softmax_num_keypoints * 2
492
+ self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim)
493
+ self.relu = nn.ReLU()
494
+
495
+ def forward(self, x: Tensor) -> Tensor:
496
+ """
497
+ Args:
498
+ x: (B, C, H, W) image tensor with pixel values in [0, 1].
499
+ Returns:
500
+ (B, D) image feature.
501
+ """
502
+ # Preprocess: maybe crop (if it was set up in the __init__).
503
+ if self.do_crop:
504
+ if self.training: # noqa: SIM108
505
+ x = self.maybe_random_crop(x)
506
+ else:
507
+ # Always use center crop for eval.
508
+ x = self.center_crop(x)
509
+ # Extract backbone feature.
510
+ x = torch.flatten(self.pool(self.backbone(x)), start_dim=1)
511
+ # Final linear layer with non-linearity.
512
+ x = self.relu(self.out(x))
513
+ return x
514
+
515
+
516
+ def _replace_submodules(
517
+ root_module: nn.Module, predicate: Callable[[nn.Module], bool], func: Callable[[nn.Module], nn.Module]
518
+ ) -> nn.Module:
519
+ """
520
+ Args:
521
+ root_module: The module for which the submodules need to be replaced
522
+ predicate: Takes a module as an argument and must return True if the that module is to be replaced.
523
+ func: Takes a module as an argument and returns a new module to replace it with.
524
+ Returns:
525
+ The root module with its submodules replaced.
526
+ """
527
+ if predicate(root_module):
528
+ return func(root_module)
529
+
530
+ replace_list = [k.split(".") for k, m in root_module.named_modules(remove_duplicate=True) if predicate(m)]
531
+ for *parents, k in replace_list:
532
+ parent_module = root_module
533
+ if len(parents) > 0:
534
+ parent_module = root_module.get_submodule(".".join(parents))
535
+ if isinstance(parent_module, nn.Sequential):
536
+ src_module = parent_module[int(k)]
537
+ else:
538
+ src_module = getattr(parent_module, k)
539
+ tgt_module = func(src_module)
540
+ if isinstance(parent_module, nn.Sequential):
541
+ parent_module[int(k)] = tgt_module
542
+ else:
543
+ setattr(parent_module, k, tgt_module)
544
+ # verify that all BN are replaced
545
+ assert not any(predicate(m) for _, m in root_module.named_modules(remove_duplicate=True))
546
+ return root_module
547
+
548
+
549
+ class DiffusionSinusoidalPosEmb(nn.Module):
550
+ """1D sinusoidal positional embeddings as in Attention is All You Need."""
551
+
552
+ def __init__(self, dim: int):
553
+ super().__init__()
554
+ self.dim = dim
555
+
556
+ def forward(self, x: Tensor) -> Tensor:
557
+ device = x.device
558
+ half_dim = self.dim // 2
559
+ emb = math.log(10000) / (half_dim - 1)
560
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
561
+ emb = x.unsqueeze(-1) * emb.unsqueeze(0)
562
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
563
+ return emb
564
+
565
+
566
+ class DiffusionConv1dBlock(nn.Module):
567
+ """Conv1d --> GroupNorm --> Mish"""
568
+
569
+ def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8):
570
+ super().__init__()
571
+
572
+ self.block = nn.Sequential(
573
+ nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2),
574
+ nn.GroupNorm(n_groups, out_channels),
575
+ nn.Mish(),
576
+ )
577
+
578
+ def forward(self, x):
579
+ return self.block(x)
580
+
581
+
582
+ class DiffusionConditionalUnet1d(nn.Module):
583
+ """A 1D convolutional UNet with FiLM modulation for conditioning.
584
+
585
+ Note: this removes local conditioning as compared to the original diffusion policy code.
586
+ """
587
+
588
+ def __init__(self, config: DiffusionConfig, global_cond_dim: int):
589
+ super().__init__()
590
+
591
+ self.config = config
592
+
593
+ # Encoder for the diffusion timestep.
594
+ self.diffusion_step_encoder = nn.Sequential(
595
+ DiffusionSinusoidalPosEmb(config.diffusion_step_embed_dim),
596
+ nn.Linear(config.diffusion_step_embed_dim, config.diffusion_step_embed_dim * 4),
597
+ nn.Mish(),
598
+ nn.Linear(config.diffusion_step_embed_dim * 4, config.diffusion_step_embed_dim),
599
+ )
600
+
601
+ # The FiLM conditioning dimension.
602
+ cond_dim = config.diffusion_step_embed_dim + global_cond_dim
603
+
604
+ # In channels / out channels for each downsampling block in the Unet's encoder. For the decoder, we
605
+ # just reverse these.
606
+ in_out = [(config.action_feature.shape[0], config.down_dims[0])] + list(
607
+ zip(config.down_dims[:-1], config.down_dims[1:], strict=True)
608
+ )
609
+
610
+ # Unet encoder.
611
+ common_res_block_kwargs = {
612
+ "cond_dim": cond_dim,
613
+ "kernel_size": config.kernel_size,
614
+ "n_groups": config.n_groups,
615
+ "use_film_scale_modulation": config.use_film_scale_modulation,
616
+ }
617
+ self.down_modules = nn.ModuleList([])
618
+ for ind, (dim_in, dim_out) in enumerate(in_out):
619
+ is_last = ind >= (len(in_out) - 1)
620
+ self.down_modules.append(
621
+ nn.ModuleList(
622
+ [
623
+ DiffusionConditionalResidualBlock1d(dim_in, dim_out, **common_res_block_kwargs),
624
+ DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs),
625
+ # Downsample as long as it is not the last block.
626
+ nn.Conv1d(dim_out, dim_out, 3, 2, 1) if not is_last else nn.Identity(),
627
+ ]
628
+ )
629
+ )
630
+
631
+ # Processing in the middle of the auto-encoder.
632
+ self.mid_modules = nn.ModuleList(
633
+ [
634
+ DiffusionConditionalResidualBlock1d(
635
+ config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs
636
+ ),
637
+ DiffusionConditionalResidualBlock1d(
638
+ config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs
639
+ ),
640
+ ]
641
+ )
642
+
643
+ # Unet decoder.
644
+ self.up_modules = nn.ModuleList([])
645
+ for ind, (dim_out, dim_in) in enumerate(reversed(in_out[1:])):
646
+ is_last = ind >= (len(in_out) - 1)
647
+ self.up_modules.append(
648
+ nn.ModuleList(
649
+ [
650
+ # dim_in * 2, because it takes the encoder's skip connection as well
651
+ DiffusionConditionalResidualBlock1d(dim_in * 2, dim_out, **common_res_block_kwargs),
652
+ DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs),
653
+ # Upsample as long as it is not the last block.
654
+ nn.ConvTranspose1d(dim_out, dim_out, 4, 2, 1) if not is_last else nn.Identity(),
655
+ ]
656
+ )
657
+ )
658
+
659
+ self.final_conv = nn.Sequential(
660
+ DiffusionConv1dBlock(config.down_dims[0], config.down_dims[0], kernel_size=config.kernel_size),
661
+ nn.Conv1d(config.down_dims[0], config.action_feature.shape[0], 1),
662
+ )
663
+
664
+ def forward(self, x: Tensor, timestep: Tensor | int, global_cond=None) -> Tensor:
665
+ """
666
+ Args:
667
+ x: (B, T, input_dim) tensor for input to the Unet.
668
+ timestep: (B,) tensor of (timestep_we_are_denoising_from - 1).
669
+ global_cond: (B, global_cond_dim)
670
+ output: (B, T, input_dim)
671
+ Returns:
672
+ (B, T, input_dim) diffusion model prediction.
673
+ """
674
+ # For 1D convolutions we'll need feature dimension first.
675
+ x = einops.rearrange(x, "b t d -> b d t")
676
+
677
+ timesteps_embed = self.diffusion_step_encoder(timestep)
678
+
679
+ # If there is a global conditioning feature, concatenate it to the timestep embedding.
680
+ if global_cond is not None:
681
+ global_feature = torch.cat([timesteps_embed, global_cond], axis=-1)
682
+ else:
683
+ global_feature = timesteps_embed
684
+
685
+ # Run encoder, keeping track of skip features to pass to the decoder.
686
+ encoder_skip_features: list[Tensor] = []
687
+ for resnet, resnet2, downsample in self.down_modules:
688
+ x = resnet(x, global_feature)
689
+ x = resnet2(x, global_feature)
690
+ encoder_skip_features.append(x)
691
+ x = downsample(x)
692
+
693
+ for mid_module in self.mid_modules:
694
+ x = mid_module(x, global_feature)
695
+
696
+ # Run decoder, using the skip features from the encoder.
697
+ for resnet, resnet2, upsample in self.up_modules:
698
+ x = torch.cat((x, encoder_skip_features.pop()), dim=1)
699
+ x = resnet(x, global_feature)
700
+ x = resnet2(x, global_feature)
701
+ x = upsample(x)
702
+
703
+ x = self.final_conv(x)
704
+
705
+ x = einops.rearrange(x, "b d t -> b t d")
706
+ return x
707
+
708
+
709
+ class DiffusionConditionalResidualBlock1d(nn.Module):
710
+ """ResNet style 1D convolutional block with FiLM modulation for conditioning."""
711
+
712
+ def __init__(
713
+ self,
714
+ in_channels: int,
715
+ out_channels: int,
716
+ cond_dim: int,
717
+ kernel_size: int = 3,
718
+ n_groups: int = 8,
719
+ # Set to True to do scale modulation with FiLM as well as bias modulation (defaults to False meaning
720
+ # FiLM just modulates bias).
721
+ use_film_scale_modulation: bool = False,
722
+ ):
723
+ super().__init__()
724
+
725
+ self.use_film_scale_modulation = use_film_scale_modulation
726
+ self.out_channels = out_channels
727
+
728
+ self.conv1 = DiffusionConv1dBlock(in_channels, out_channels, kernel_size, n_groups=n_groups)
729
+
730
+ # FiLM modulation (https://huggingface.co/papers/1709.07871) outputs per-channel bias and (maybe) scale.
731
+ cond_channels = out_channels * 2 if use_film_scale_modulation else out_channels
732
+ self.cond_encoder = nn.Sequential(nn.Mish(), nn.Linear(cond_dim, cond_channels))
733
+
734
+ self.conv2 = DiffusionConv1dBlock(out_channels, out_channels, kernel_size, n_groups=n_groups)
735
+
736
+ # A final convolution for dimension matching the residual (if needed).
737
+ self.residual_conv = (
738
+ nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity()
739
+ )
740
+
741
+ def forward(self, x: Tensor, cond: Tensor) -> Tensor:
742
+ """
743
+ Args:
744
+ x: (B, in_channels, T)
745
+ cond: (B, cond_dim)
746
+ Returns:
747
+ (B, out_channels, T)
748
+ """
749
+ out = self.conv1(x)
750
+
751
+ # Get condition embedding. Unsqueeze for broadcasting to `out`, resulting in (B, out_channels, 1).
752
+ cond_embed = self.cond_encoder(cond).unsqueeze(-1)
753
+ if self.use_film_scale_modulation:
754
+ # Treat the embedding as a list of scales and biases.
755
+ scale = cond_embed[:, : self.out_channels]
756
+ bias = cond_embed[:, self.out_channels :]
757
+ out = scale * out + bias
758
+ else:
759
+ # Treat the embedding as biases.
760
+ out = out + cond_embed
761
+
762
+ out = self.conv2(out)
763
+ out = out + self.residual_conv(x)
764
+ return out
lerobot/src/lerobot/robots/lekiwi/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .config_lekiwi import LeKiwiClientConfig, LeKiwiConfig
18
+ from .lekiwi import LeKiwi
19
+ from .lekiwi_client import LeKiwiClient
lerobot/src/lerobot/robots/lekiwi/lekiwi.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import logging
18
+ import time
19
+ from functools import cached_property
20
+ from itertools import chain
21
+ from typing import Any
22
+
23
+ import numpy as np
24
+
25
+ from lerobot.cameras.utils import make_cameras_from_configs
26
+ from lerobot.motors import Motor, MotorCalibration, MotorNormMode
27
+ from lerobot.motors.feetech import (
28
+ FeetechMotorsBus,
29
+ OperatingMode,
30
+ )
31
+ from lerobot.processor import RobotAction, RobotObservation
32
+ from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
33
+
34
+ from ..robot import Robot
35
+ from ..utils import ensure_safe_goal_position
36
+ from .config_lekiwi import LeKiwiConfig
37
+
38
+ logger = logging.getLogger(__name__)
39
+
40
+
41
+ class LeKiwi(Robot):
42
+ """
43
+ The robot includes a three omniwheel mobile base and a remote follower arm.
44
+ The leader arm is connected locally (on the laptop) and its joint positions are recorded and then
45
+ forwarded to the remote follower arm (after applying a safety clamp).
46
+ In parallel, keyboard teleoperation is used to generate raw velocity commands for the wheels.
47
+ """
48
+
49
+ config_class = LeKiwiConfig
50
+ name = "lekiwi"
51
+
52
+ def __init__(self, config: LeKiwiConfig):
53
+ super().__init__(config)
54
+ self.config = config
55
+ norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100
56
+ self.bus = FeetechMotorsBus(
57
+ port=self.config.port,
58
+ motors={
59
+ # arm
60
+ "arm_shoulder_pan": Motor(1, "sts3215", norm_mode_body),
61
+ "arm_shoulder_lift": Motor(2, "sts3215", norm_mode_body),
62
+ "arm_elbow_flex": Motor(3, "sts3215", norm_mode_body),
63
+ "arm_wrist_flex": Motor(4, "sts3215", norm_mode_body),
64
+ "arm_wrist_roll": Motor(5, "sts3215", norm_mode_body),
65
+ "arm_gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100),
66
+ # base
67
+ "base_left_wheel": Motor(7, "sts3215", MotorNormMode.RANGE_M100_100),
68
+ "base_back_wheel": Motor(8, "sts3215", MotorNormMode.RANGE_M100_100),
69
+ "base_right_wheel": Motor(9, "sts3215", MotorNormMode.RANGE_M100_100),
70
+ },
71
+ calibration=self.calibration,
72
+ )
73
+ self.arm_motors = [motor for motor in self.bus.motors if motor.startswith("arm")]
74
+ self.base_motors = [motor for motor in self.bus.motors if motor.startswith("base")]
75
+ self.cameras = make_cameras_from_configs(config.cameras)
76
+
77
+ @property
78
+ def _state_ft(self) -> dict[str, type]:
79
+ return dict.fromkeys(
80
+ (
81
+ "arm_shoulder_pan.pos",
82
+ "arm_shoulder_lift.pos",
83
+ "arm_elbow_flex.pos",
84
+ "arm_wrist_flex.pos",
85
+ "arm_wrist_roll.pos",
86
+ "arm_gripper.pos",
87
+ "x.vel",
88
+ "y.vel",
89
+ "theta.vel",
90
+ ),
91
+ float,
92
+ )
93
+
94
+ @property
95
+ def _cameras_ft(self) -> dict[str, tuple]:
96
+ return {
97
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
98
+ }
99
+
100
+ @cached_property
101
+ def observation_features(self) -> dict[str, type | tuple]:
102
+ return {**self._state_ft, **self._cameras_ft}
103
+
104
+ @cached_property
105
+ def action_features(self) -> dict[str, type]:
106
+ return self._state_ft
107
+
108
+ @property
109
+ def is_connected(self) -> bool:
110
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
111
+
112
+ @check_if_already_connected
113
+ def connect(self, calibrate: bool = True) -> None:
114
+ self.bus.connect()
115
+ if not self.is_calibrated and calibrate:
116
+ logger.info(
117
+ "Mismatch between calibration values in the motor and the calibration file or no calibration file found"
118
+ )
119
+ self.calibrate()
120
+
121
+ for cam in self.cameras.values():
122
+ cam.connect()
123
+
124
+ self.configure()
125
+ logger.info(f"{self} connected.")
126
+
127
+ @property
128
+ def is_calibrated(self) -> bool:
129
+ return self.bus.is_calibrated
130
+
131
+ def calibrate(self) -> None:
132
+ if self.calibration:
133
+ # Calibration file exists, ask user whether to use it or run new calibration
134
+ user_input = input(
135
+ f"Press ENTER to use provided calibration file associated with the id {self.id}, or type 'c' and press ENTER to run calibration: "
136
+ )
137
+ if user_input.strip().lower() != "c":
138
+ logger.info(f"Writing calibration file associated with the id {self.id} to the motors")
139
+ self.bus.write_calibration(self.calibration)
140
+ return
141
+ logger.info(f"\nRunning calibration of {self}")
142
+
143
+ motors = self.arm_motors + self.base_motors
144
+
145
+ self.bus.disable_torque(self.arm_motors)
146
+ for name in self.arm_motors:
147
+ self.bus.write("Operating_Mode", name, OperatingMode.POSITION.value)
148
+
149
+ input("Move robot to the middle of its range of motion and press ENTER....")
150
+ homing_offsets = self.bus.set_half_turn_homings(self.arm_motors)
151
+
152
+ homing_offsets.update(dict.fromkeys(self.base_motors, 0))
153
+
154
+ full_turn_motor = [
155
+ motor for motor in motors if any(keyword in motor for keyword in ["wheel", "wrist_roll"])
156
+ ]
157
+ unknown_range_motors = [motor for motor in motors if motor not in full_turn_motor]
158
+
159
+ print(
160
+ f"Move all arm joints except '{full_turn_motor}' sequentially through their "
161
+ "entire ranges of motion.\nRecording positions. Press ENTER to stop..."
162
+ )
163
+ range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors)
164
+ for name in full_turn_motor:
165
+ range_mins[name] = 0
166
+ range_maxes[name] = 4095
167
+
168
+ self.calibration = {}
169
+ for name, motor in self.bus.motors.items():
170
+ self.calibration[name] = MotorCalibration(
171
+ id=motor.id,
172
+ drive_mode=0,
173
+ homing_offset=homing_offsets[name],
174
+ range_min=range_mins[name],
175
+ range_max=range_maxes[name],
176
+ )
177
+
178
+ self.bus.write_calibration(self.calibration)
179
+ self._save_calibration()
180
+ print("Calibration saved to", self.calibration_fpath)
181
+
182
+ def configure(self):
183
+ # Set-up arm actuators (position mode)
184
+ # We assume that at connection time, arm is in a rest position,
185
+ # and torque can be safely disabled to run calibration.
186
+ self.bus.disable_torque()
187
+ self.bus.configure_motors()
188
+ for name in self.arm_motors:
189
+ self.bus.write("Operating_Mode", name, OperatingMode.POSITION.value)
190
+ # Set P_Coefficient to lower value to avoid shakiness (Default is 32)
191
+ self.bus.write("P_Coefficient", name, 16)
192
+ # Set I_Coefficient and D_Coefficient to default value 0 and 32
193
+ self.bus.write("I_Coefficient", name, 0)
194
+ self.bus.write("D_Coefficient", name, 32)
195
+
196
+ for name in self.base_motors:
197
+ self.bus.write("Operating_Mode", name, OperatingMode.VELOCITY.value)
198
+
199
+ self.bus.enable_torque()
200
+
201
+ def setup_motors(self) -> None:
202
+ for motor in chain(reversed(self.arm_motors), reversed(self.base_motors)):
203
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
204
+ self.bus.setup_motor(motor)
205
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
206
+
207
+ @staticmethod
208
+ def _degps_to_raw(degps: float) -> int:
209
+ steps_per_deg = 4096.0 / 360.0
210
+ speed_in_steps = degps * steps_per_deg
211
+ speed_int = int(round(speed_in_steps))
212
+ # Cap the value to fit within signed 16-bit range (-32768 to 32767)
213
+ if speed_int > 0x7FFF:
214
+ speed_int = 0x7FFF # 32767 -> maximum positive value
215
+ elif speed_int < -0x8000:
216
+ speed_int = -0x8000 # -32768 -> minimum negative value
217
+ return speed_int
218
+
219
+ @staticmethod
220
+ def _raw_to_degps(raw_speed: int) -> float:
221
+ steps_per_deg = 4096.0 / 360.0
222
+ magnitude = raw_speed
223
+ degps = magnitude / steps_per_deg
224
+ return degps
225
+
226
+ def _body_to_wheel_raw(
227
+ self,
228
+ x: float,
229
+ y: float,
230
+ theta: float,
231
+ wheel_radius: float = 0.05,
232
+ base_radius: float = 0.125,
233
+ max_raw: int = 3000,
234
+ ) -> dict:
235
+ """
236
+ Convert desired body-frame velocities into wheel raw commands.
237
+
238
+ Parameters:
239
+ x_cmd : Linear velocity in x (m/s).
240
+ y_cmd : Linear velocity in y (m/s).
241
+ theta_cmd : Rotational velocity (deg/s).
242
+ wheel_radius: Radius of each wheel (meters).
243
+ base_radius : Distance from the center of rotation to each wheel (meters).
244
+ max_raw : Maximum allowed raw command (ticks) per wheel.
245
+
246
+ Returns:
247
+ A dictionary with wheel raw commands:
248
+ {"base_left_wheel": value, "base_back_wheel": value, "base_right_wheel": value}.
249
+
250
+ Notes:
251
+ - Internally, the method converts theta_cmd to rad/s for the kinematics.
252
+ - The raw command is computed from the wheels angular speed in deg/s
253
+ using _degps_to_raw(). If any command exceeds max_raw, all commands
254
+ are scaled down proportionally.
255
+ """
256
+ # Convert rotational velocity from deg/s to rad/s.
257
+ theta_rad = theta * (np.pi / 180.0)
258
+ # Create the body velocity vector [x, y, theta_rad].
259
+ velocity_vector = np.array([x, y, theta_rad])
260
+
261
+ # Define the wheel mounting angles with a -90° offset.
262
+ angles = np.radians(np.array([240, 0, 120]) - 90)
263
+ # Build the kinematic matrix: each row maps body velocities to a wheel’s linear speed.
264
+ # The third column (base_radius) accounts for the effect of rotation.
265
+ m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles])
266
+
267
+ # Compute each wheel’s linear speed (m/s) and then its angular speed (rad/s).
268
+ wheel_linear_speeds = m.dot(velocity_vector)
269
+ wheel_angular_speeds = wheel_linear_speeds / wheel_radius
270
+
271
+ # Convert wheel angular speeds from rad/s to deg/s.
272
+ wheel_degps = wheel_angular_speeds * (180.0 / np.pi)
273
+
274
+ # Scaling
275
+ steps_per_deg = 4096.0 / 360.0
276
+ raw_floats = [abs(degps) * steps_per_deg for degps in wheel_degps]
277
+ max_raw_computed = max(raw_floats)
278
+ if max_raw_computed > max_raw:
279
+ scale = max_raw / max_raw_computed
280
+ wheel_degps = wheel_degps * scale
281
+
282
+ # Convert each wheel’s angular speed (deg/s) to a raw integer.
283
+ wheel_raw = [self._degps_to_raw(deg) for deg in wheel_degps]
284
+
285
+ return {
286
+ "base_left_wheel": wheel_raw[0],
287
+ "base_back_wheel": wheel_raw[1],
288
+ "base_right_wheel": wheel_raw[2],
289
+ }
290
+
291
+ def _wheel_raw_to_body(
292
+ self,
293
+ left_wheel_speed,
294
+ back_wheel_speed,
295
+ right_wheel_speed,
296
+ wheel_radius: float = 0.05,
297
+ base_radius: float = 0.125,
298
+ ) -> dict[str, Any]:
299
+ """
300
+ Convert wheel raw command feedback back into body-frame velocities.
301
+
302
+ Parameters:
303
+ wheel_raw : Vector with raw wheel commands ("base_left_wheel", "base_back_wheel", "base_right_wheel").
304
+ wheel_radius: Radius of each wheel (meters).
305
+ base_radius : Distance from the robot center to each wheel (meters).
306
+
307
+ Returns:
308
+ A dict (x.vel, y.vel, theta.vel) all in m/s
309
+ """
310
+
311
+ # Convert each raw command back to an angular speed in deg/s.
312
+ wheel_degps = np.array(
313
+ [
314
+ self._raw_to_degps(left_wheel_speed),
315
+ self._raw_to_degps(back_wheel_speed),
316
+ self._raw_to_degps(right_wheel_speed),
317
+ ]
318
+ )
319
+
320
+ # Convert from deg/s to rad/s.
321
+ wheel_radps = wheel_degps * (np.pi / 180.0)
322
+ # Compute each wheel’s linear speed (m/s) from its angular speed.
323
+ wheel_linear_speeds = wheel_radps * wheel_radius
324
+
325
+ # Define the wheel mounting angles with a -90° offset.
326
+ angles = np.radians(np.array([240, 0, 120]) - 90)
327
+ m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles])
328
+
329
+ # Solve the inverse kinematics: body_velocity = M⁻¹ · wheel_linear_speeds.
330
+ m_inv = np.linalg.inv(m)
331
+ velocity_vector = m_inv.dot(wheel_linear_speeds)
332
+ x, y, theta_rad = velocity_vector
333
+ theta = theta_rad * (180.0 / np.pi)
334
+ return {
335
+ "x.vel": x,
336
+ "y.vel": y,
337
+ "theta.vel": theta,
338
+ } # m/s and deg/s
339
+
340
+ @check_if_not_connected
341
+ def get_observation(self) -> RobotObservation:
342
+ # Read actuators position for arm and vel for base
343
+ start = time.perf_counter()
344
+ arm_pos = self.bus.sync_read("Present_Position", self.arm_motors)
345
+ base_wheel_vel = self.bus.sync_read("Present_Velocity", self.base_motors)
346
+
347
+ base_vel = self._wheel_raw_to_body(
348
+ base_wheel_vel["base_left_wheel"],
349
+ base_wheel_vel["base_back_wheel"],
350
+ base_wheel_vel["base_right_wheel"],
351
+ )
352
+
353
+ arm_state = {f"{k}.pos": v for k, v in arm_pos.items()}
354
+
355
+ obs_dict = {**arm_state, **base_vel}
356
+
357
+ dt_ms = (time.perf_counter() - start) * 1e3
358
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
359
+
360
+ # Capture images from cameras
361
+ for cam_key, cam in self.cameras.items():
362
+ start = time.perf_counter()
363
+ obs_dict[cam_key] = cam.async_read()
364
+ dt_ms = (time.perf_counter() - start) * 1e3
365
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
366
+
367
+ return obs_dict
368
+
369
+ @check_if_not_connected
370
+ def send_action(self, action: RobotAction) -> RobotAction:
371
+ """Command lekiwi to move to a target joint configuration.
372
+
373
+ The relative action magnitude may be clipped depending on the configuration parameter
374
+ `max_relative_target`. In this case, the action sent differs from original action.
375
+ Thus, this function always returns the action actually sent.
376
+
377
+ Raises:
378
+ RobotDeviceNotConnectedError: if robot is not connected.
379
+
380
+ Returns:
381
+ RobotAction: the action sent to the motors, potentially clipped.
382
+ """
383
+
384
+ arm_goal_pos = {k: v for k, v in action.items() if k.endswith(".pos")}
385
+ base_goal_vel = {k: v for k, v in action.items() if k.endswith(".vel")}
386
+
387
+ base_wheel_goal_vel = self._body_to_wheel_raw(
388
+ base_goal_vel["x.vel"], base_goal_vel["y.vel"], base_goal_vel["theta.vel"]
389
+ )
390
+
391
+ # Cap goal position when too far away from present position.
392
+ # /!\ Slower fps expected due to reading from the follower.
393
+ if self.config.max_relative_target is not None:
394
+ present_pos = self.bus.sync_read("Present_Position", self.arm_motors)
395
+ goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in arm_goal_pos.items()}
396
+ arm_safe_goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
397
+ arm_goal_pos = arm_safe_goal_pos
398
+
399
+ # Send goal position to the actuators
400
+ arm_goal_pos_raw = {k.replace(".pos", ""): v for k, v in arm_goal_pos.items()}
401
+ self.bus.sync_write("Goal_Position", arm_goal_pos_raw)
402
+ self.bus.sync_write("Goal_Velocity", base_wheel_goal_vel)
403
+
404
+ return {**arm_goal_pos, **base_goal_vel}
405
+
406
+ def stop_base(self):
407
+ self.bus.sync_write("Goal_Velocity", dict.fromkeys(self.base_motors, 0), num_retry=5)
408
+ logger.info("Base motors stopped")
409
+
410
+ @check_if_not_connected
411
+ def disconnect(self):
412
+ self.stop_base()
413
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
414
+ for cam in self.cameras.values():
415
+ cam.disconnect()
416
+
417
+ logger.info(f"{self} disconnected.")
lerobot/src/lerobot/robots/lekiwi/lekiwi_client.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # TODO(aliberts, Steven, Pepijn): use gRPC calls instead of zmq?
16
+
17
+ import base64
18
+ import json
19
+ import logging
20
+ from functools import cached_property
21
+
22
+ import cv2
23
+ import numpy as np
24
+
25
+ from lerobot.processor import RobotAction, RobotObservation
26
+ from lerobot.utils.constants import ACTION, OBS_STATE
27
+ from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
28
+ from lerobot.utils.errors import DeviceNotConnectedError
29
+
30
+ from ..robot import Robot
31
+ from .config_lekiwi import LeKiwiClientConfig
32
+
33
+
34
+ class LeKiwiClient(Robot):
35
+ config_class = LeKiwiClientConfig
36
+ name = "lekiwi_client"
37
+
38
+ def __init__(self, config: LeKiwiClientConfig):
39
+ import zmq
40
+
41
+ self._zmq = zmq
42
+ super().__init__(config)
43
+ self.config = config
44
+ self.id = config.id
45
+ self.robot_type = config.type
46
+
47
+ self.remote_ip = config.remote_ip
48
+ self.port_zmq_cmd = config.port_zmq_cmd
49
+ self.port_zmq_observations = config.port_zmq_observations
50
+
51
+ self.teleop_keys = config.teleop_keys
52
+
53
+ self.polling_timeout_ms = config.polling_timeout_ms
54
+ self.connect_timeout_s = config.connect_timeout_s
55
+
56
+ self.zmq_context = None
57
+ self.zmq_cmd_socket = None
58
+ self.zmq_observation_socket = None
59
+
60
+ self.last_frames = {}
61
+
62
+ self.last_remote_state = {}
63
+
64
+ # Define three speed levels and a current index
65
+ self.speed_levels = [
66
+ {"xy": 0.1, "theta": 30}, # slow
67
+ {"xy": 0.2, "theta": 60}, # medium
68
+ {"xy": 0.3, "theta": 90}, # fast
69
+ ]
70
+ self.speed_index = 0 # Start at slow
71
+
72
+ self._is_connected = False
73
+ self.logs = {}
74
+
75
+ @cached_property
76
+ def _state_ft(self) -> dict[str, type]:
77
+ return dict.fromkeys(
78
+ (
79
+ "arm_shoulder_pan.pos",
80
+ "arm_shoulder_lift.pos",
81
+ "arm_elbow_flex.pos",
82
+ "arm_wrist_flex.pos",
83
+ "arm_wrist_roll.pos",
84
+ "arm_gripper.pos",
85
+ "x.vel",
86
+ "y.vel",
87
+ "theta.vel",
88
+ ),
89
+ float,
90
+ )
91
+
92
+ @cached_property
93
+ def _state_order(self) -> tuple[str, ...]:
94
+ return tuple(self._state_ft.keys())
95
+
96
+ @cached_property
97
+ def _cameras_ft(self) -> dict[str, tuple[int, int, int]]:
98
+ return {name: (cfg.height, cfg.width, 3) for name, cfg in self.config.cameras.items()}
99
+
100
+ @cached_property
101
+ def observation_features(self) -> dict[str, type | tuple]:
102
+ return {**self._state_ft, **self._cameras_ft}
103
+
104
+ @cached_property
105
+ def action_features(self) -> dict[str, type]:
106
+ return self._state_ft
107
+
108
+ @property
109
+ def is_connected(self) -> bool:
110
+ return self._is_connected
111
+
112
+ @property
113
+ def is_calibrated(self) -> bool:
114
+ pass
115
+
116
+ @check_if_already_connected
117
+ def connect(self) -> None:
118
+ """Establishes ZMQ sockets with the remote mobile robot"""
119
+
120
+ zmq = self._zmq
121
+ self.zmq_context = zmq.Context()
122
+ self.zmq_cmd_socket = self.zmq_context.socket(zmq.PUSH)
123
+ zmq_cmd_locator = f"tcp://{self.remote_ip}:{self.port_zmq_cmd}"
124
+ self.zmq_cmd_socket.connect(zmq_cmd_locator)
125
+ self.zmq_cmd_socket.setsockopt(zmq.CONFLATE, 1)
126
+
127
+ self.zmq_observation_socket = self.zmq_context.socket(zmq.PULL)
128
+ zmq_observations_locator = f"tcp://{self.remote_ip}:{self.port_zmq_observations}"
129
+ self.zmq_observation_socket.connect(zmq_observations_locator)
130
+ self.zmq_observation_socket.setsockopt(zmq.CONFLATE, 1)
131
+
132
+ poller = zmq.Poller()
133
+ poller.register(self.zmq_observation_socket, zmq.POLLIN)
134
+ socks = dict(poller.poll(self.connect_timeout_s * 1000))
135
+ if self.zmq_observation_socket not in socks or socks[self.zmq_observation_socket] != zmq.POLLIN:
136
+ raise DeviceNotConnectedError("Timeout waiting for LeKiwi Host to connect expired.")
137
+
138
+ self._is_connected = True
139
+
140
+ def calibrate(self) -> None:
141
+ pass
142
+
143
+ def _poll_and_get_latest_message(self) -> str | None:
144
+ """Polls the ZMQ socket for a limited time and returns the latest message string."""
145
+ zmq = self._zmq
146
+ poller = zmq.Poller()
147
+ poller.register(self.zmq_observation_socket, zmq.POLLIN)
148
+
149
+ try:
150
+ socks = dict(poller.poll(self.polling_timeout_ms))
151
+ except zmq.ZMQError as e:
152
+ logging.error(f"ZMQ polling error: {e}")
153
+ return None
154
+
155
+ if self.zmq_observation_socket not in socks:
156
+ logging.info("No new data available within timeout.")
157
+ return None
158
+
159
+ last_msg = None
160
+ while True:
161
+ try:
162
+ msg = self.zmq_observation_socket.recv_string(zmq.NOBLOCK)
163
+ last_msg = msg
164
+ except zmq.Again:
165
+ break
166
+
167
+ if last_msg is None:
168
+ logging.warning("Poller indicated data, but failed to retrieve message.")
169
+
170
+ return last_msg
171
+
172
+ def _parse_observation_json(self, obs_string: str) -> RobotObservation | None:
173
+ """Parses the JSON observation string."""
174
+ try:
175
+ return json.loads(obs_string)
176
+ except json.JSONDecodeError as e:
177
+ logging.error(f"Error decoding JSON observation: {e}")
178
+ return None
179
+
180
+ def _decode_image_from_b64(self, image_b64: str) -> np.ndarray | None:
181
+ """Decodes a base64 encoded image string to an OpenCV image."""
182
+ if not image_b64:
183
+ return None
184
+ try:
185
+ jpg_data = base64.b64decode(image_b64)
186
+ np_arr = np.frombuffer(jpg_data, dtype=np.uint8)
187
+ frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
188
+ if frame is None:
189
+ logging.warning("cv2.imdecode returned None for an image.")
190
+ return frame
191
+ except (TypeError, ValueError) as e:
192
+ logging.error(f"Error decoding base64 image data: {e}")
193
+ return None
194
+
195
+ def _remote_state_from_obs(
196
+ self, observation: RobotObservation
197
+ ) -> tuple[dict[str, np.ndarray], RobotObservation]:
198
+ """Extracts frames, and state from the parsed observation."""
199
+
200
+ flat_state = {key: observation.get(key, 0.0) for key in self._state_order}
201
+
202
+ state_vec = np.array([flat_state[key] for key in self._state_order], dtype=np.float32)
203
+
204
+ obs_dict: RobotObservation = {**flat_state, OBS_STATE: state_vec}
205
+
206
+ # Decode images
207
+ current_frames: dict[str, np.ndarray] = {}
208
+ for cam_name, image_b64 in observation.items():
209
+ if cam_name not in self._cameras_ft:
210
+ continue
211
+ frame = self._decode_image_from_b64(image_b64)
212
+ if frame is not None:
213
+ current_frames[cam_name] = frame
214
+
215
+ return current_frames, obs_dict
216
+
217
+ def _get_data(self) -> tuple[dict[str, np.ndarray], RobotObservation]:
218
+ """
219
+ Polls the video socket for the latest observation data.
220
+
221
+ Attempts to retrieve and decode the latest message within a short timeout.
222
+ If successful, updates and returns the new frames, speed, and arm state.
223
+ If no new data arrives or decoding fails, returns the last known values.
224
+ """
225
+
226
+ # 1. Get the latest message string from the socket
227
+ latest_message_str = self._poll_and_get_latest_message()
228
+
229
+ # 2. If no message, return cached data
230
+ if latest_message_str is None:
231
+ return self.last_frames, self.last_remote_state
232
+
233
+ # 3. Parse the JSON message
234
+ observation = self._parse_observation_json(latest_message_str)
235
+
236
+ # 4. If JSON parsing failed, return cached data
237
+ if observation is None:
238
+ return self.last_frames, self.last_remote_state
239
+
240
+ # 5. Process the valid observation data
241
+ try:
242
+ new_frames, new_state = self._remote_state_from_obs(observation)
243
+ except Exception as e:
244
+ logging.error(f"Error processing observation data, serving last observation: {e}")
245
+ return self.last_frames, self.last_remote_state
246
+
247
+ self.last_frames = new_frames
248
+ self.last_remote_state = new_state
249
+
250
+ return new_frames, new_state
251
+
252
+ @check_if_not_connected
253
+ def get_observation(self) -> RobotObservation:
254
+ """
255
+ Capture observations from the remote robot: current follower arm positions,
256
+ present wheel speeds (converted to body-frame velocities: x, y, theta),
257
+ and a camera frame. Receives over ZMQ, translate to body-frame vel
258
+ """
259
+
260
+ frames, obs_dict = self._get_data()
261
+
262
+ # Loop over each configured camera
263
+ for cam_name, frame in frames.items():
264
+ if frame is None:
265
+ logging.warning("Frame is None")
266
+ frame = np.zeros((640, 480, 3), dtype=np.uint8)
267
+ obs_dict[cam_name] = frame
268
+
269
+ return obs_dict
270
+
271
+ def _from_keyboard_to_base_action(self, pressed_keys: np.ndarray):
272
+ # Speed control
273
+ if self.teleop_keys["speed_up"] in pressed_keys:
274
+ self.speed_index = min(self.speed_index + 1, 2)
275
+ if self.teleop_keys["speed_down"] in pressed_keys:
276
+ self.speed_index = max(self.speed_index - 1, 0)
277
+ speed_setting = self.speed_levels[self.speed_index]
278
+ xy_speed = speed_setting["xy"] # e.g. 0.1, 0.25, or 0.4
279
+ theta_speed = speed_setting["theta"] # e.g. 30, 60, or 90
280
+
281
+ x_cmd = 0.0 # m/s forward/backward
282
+ y_cmd = 0.0 # m/s lateral
283
+ theta_cmd = 0.0 # deg/s rotation
284
+
285
+ if self.teleop_keys["forward"] in pressed_keys:
286
+ x_cmd += xy_speed
287
+ if self.teleop_keys["backward"] in pressed_keys:
288
+ x_cmd -= xy_speed
289
+ if self.teleop_keys["left"] in pressed_keys:
290
+ y_cmd += xy_speed
291
+ if self.teleop_keys["right"] in pressed_keys:
292
+ y_cmd -= xy_speed
293
+ if self.teleop_keys["rotate_left"] in pressed_keys:
294
+ theta_cmd += theta_speed
295
+ if self.teleop_keys["rotate_right"] in pressed_keys:
296
+ theta_cmd -= theta_speed
297
+ return {
298
+ "x.vel": x_cmd,
299
+ "y.vel": y_cmd,
300
+ "theta.vel": theta_cmd,
301
+ }
302
+
303
+ def configure(self):
304
+ pass
305
+
306
+ @check_if_not_connected
307
+ def send_action(self, action: RobotAction) -> RobotAction:
308
+ """Command lekiwi to move to a target joint configuration. Translates to motor space + sends over ZMQ
309
+
310
+ Args:
311
+ action (RobotAction): array containing the goal positions for the motors.
312
+ Raises:
313
+ RobotDeviceNotConnectedError: if robot is not connected.
314
+
315
+ Returns:
316
+ np.ndarray: the action sent to the motors, potentially clipped.
317
+ """
318
+
319
+ self.zmq_cmd_socket.send_string(json.dumps(action)) # action is in motor space
320
+
321
+ # TODO(Steven): Remove the np conversion when it is possible to record a non-numpy array value
322
+ actions = np.array([action.get(k, 0.0) for k in self._state_order], dtype=np.float32)
323
+
324
+ action_sent = {key: actions[i] for i, key in enumerate(self._state_order)}
325
+ action_sent[ACTION] = actions
326
+ return action_sent
327
+
328
+ @check_if_not_connected
329
+ def disconnect(self):
330
+ """Cleans ZMQ comms"""
331
+
332
+ self.zmq_observation_socket.close()
333
+ self.zmq_cmd_socket.close()
334
+ self.zmq_context.term()
335
+ self._is_connected = False
lerobot/src/lerobot/robots/lekiwi/lekiwi_host.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import base64
18
+ import json
19
+ import logging
20
+ import time
21
+ from dataclasses import dataclass, field
22
+
23
+ import cv2
24
+ import draccus
25
+ import zmq
26
+
27
+ from .config_lekiwi import LeKiwiConfig, LeKiwiHostConfig
28
+ from .lekiwi import LeKiwi
29
+
30
+
31
+ @dataclass
32
+ class LeKiwiServerConfig:
33
+ """Configuration for the LeKiwi host script."""
34
+
35
+ robot: LeKiwiConfig = field(default_factory=LeKiwiConfig)
36
+ host: LeKiwiHostConfig = field(default_factory=LeKiwiHostConfig)
37
+
38
+
39
+ class LeKiwiHost:
40
+ def __init__(self, config: LeKiwiHostConfig):
41
+ self.zmq_context = zmq.Context()
42
+ self.zmq_cmd_socket = self.zmq_context.socket(zmq.PULL)
43
+ self.zmq_cmd_socket.setsockopt(zmq.CONFLATE, 1)
44
+ self.zmq_cmd_socket.bind(f"tcp://*:{config.port_zmq_cmd}")
45
+
46
+ self.zmq_observation_socket = self.zmq_context.socket(zmq.PUSH)
47
+ self.zmq_observation_socket.setsockopt(zmq.CONFLATE, 1)
48
+ self.zmq_observation_socket.bind(f"tcp://*:{config.port_zmq_observations}")
49
+
50
+ self.connection_time_s = config.connection_time_s
51
+ self.watchdog_timeout_ms = config.watchdog_timeout_ms
52
+ self.max_loop_freq_hz = config.max_loop_freq_hz
53
+
54
+ def disconnect(self):
55
+ self.zmq_observation_socket.close()
56
+ self.zmq_cmd_socket.close()
57
+ self.zmq_context.term()
58
+
59
+
60
+ @draccus.wrap()
61
+ def main(cfg: LeKiwiServerConfig):
62
+ logging.info("Configuring LeKiwi")
63
+ robot = LeKiwi(cfg.robot)
64
+
65
+ logging.info("Connecting LeKiwi")
66
+ robot.connect()
67
+
68
+ logging.info("Starting HostAgent")
69
+ host = LeKiwiHost(cfg.host)
70
+
71
+ last_cmd_time = time.time()
72
+ watchdog_active = False
73
+ logging.info("Waiting for commands...")
74
+ try:
75
+ # Business logic
76
+ start = time.perf_counter()
77
+ duration = 0
78
+ while duration < host.connection_time_s:
79
+ loop_start_time = time.time()
80
+ try:
81
+ msg = host.zmq_cmd_socket.recv_string(zmq.NOBLOCK)
82
+ data = dict(json.loads(msg))
83
+ _action_sent = robot.send_action(data)
84
+ last_cmd_time = time.time()
85
+ watchdog_active = False
86
+ except zmq.Again:
87
+ if not watchdog_active:
88
+ logging.warning("No command available")
89
+ except Exception as e:
90
+ logging.error("Message fetching failed: %s", e)
91
+
92
+ now = time.time()
93
+ if (now - last_cmd_time > host.watchdog_timeout_ms / 1000) and not watchdog_active:
94
+ logging.warning(
95
+ f"Command not received for more than {host.watchdog_timeout_ms} milliseconds. Stopping the base."
96
+ )
97
+ watchdog_active = True
98
+ robot.stop_base()
99
+
100
+ last_observation = robot.get_observation()
101
+
102
+ # Encode ndarrays to base64 strings
103
+ for cam_key, _ in robot.cameras.items():
104
+ ret, buffer = cv2.imencode(
105
+ ".jpg", last_observation[cam_key], [int(cv2.IMWRITE_JPEG_QUALITY), 90]
106
+ )
107
+ if ret:
108
+ last_observation[cam_key] = base64.b64encode(buffer).decode("utf-8")
109
+ else:
110
+ last_observation[cam_key] = ""
111
+
112
+ # Send the observation to the remote agent
113
+ try:
114
+ host.zmq_observation_socket.send_string(json.dumps(last_observation), flags=zmq.NOBLOCK)
115
+ except zmq.Again:
116
+ logging.info("Dropping observation, no client connected")
117
+
118
+ # Ensure a short sleep to avoid overloading the CPU.
119
+ elapsed = time.time() - loop_start_time
120
+
121
+ time.sleep(max(1 / host.max_loop_freq_hz - elapsed, 0))
122
+ duration = time.perf_counter() - start
123
+ print("Cycle time reached.")
124
+
125
+ except KeyboardInterrupt:
126
+ print("Keyboard interrupt received. Exiting...")
127
+ finally:
128
+ print("Shutting down Lekiwi Host.")
129
+ robot.disconnect()
130
+ host.disconnect()
131
+
132
+ logging.info("Finished LeKiwi cleanly")
133
+
134
+
135
+ if __name__ == "__main__":
136
+ main()
lerobot/src/lerobot/robots/omx_follower/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # OMX is a fully open-source robot from ROBOTIS.
18
+ # More information at: https://ai.robotis.com/omx/introduction_omx.html
19
+
20
+ from .config_omx_follower import OmxFollowerConfig
21
+ from .omx_follower import OmxFollower
lerobot/src/lerobot/robots/omx_follower/config_omx_follower.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+
17
+ from lerobot.cameras import CameraConfig
18
+
19
+ from ..config import RobotConfig
20
+
21
+
22
+ @RobotConfig.register_subclass("omx_follower")
23
+ @dataclass
24
+ class OmxFollowerConfig(RobotConfig):
25
+ # Port to connect to the arm
26
+ port: str
27
+
28
+ disable_torque_on_disconnect: bool = True
29
+
30
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
31
+ # Set this to a positive scalar to have the same value for all motors, or a dictionary that maps motor
32
+ # names to the max_relative_target value for that motor.
33
+ max_relative_target: float | dict[str, float] | None = None
34
+
35
+ # cameras
36
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
37
+
38
+ # Set to `True` for backward compatibility with previous policies/dataset
39
+ use_degrees: bool = False
lerobot/src/lerobot/robots/omx_follower/omx_follower.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import logging
18
+ import time
19
+ from functools import cached_property
20
+
21
+ from lerobot.cameras.utils import make_cameras_from_configs
22
+ from lerobot.motors import Motor, MotorCalibration, MotorNormMode
23
+ from lerobot.motors.dynamixel import (
24
+ DriveMode,
25
+ DynamixelMotorsBus,
26
+ OperatingMode,
27
+ )
28
+ from lerobot.processor import RobotAction, RobotObservation
29
+ from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
30
+
31
+ from ..robot import Robot
32
+ from ..utils import ensure_safe_goal_position
33
+ from .config_omx_follower import OmxFollowerConfig
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+
38
+ class OmxFollower(Robot):
39
+ """
40
+ - [OMX](https://github.com/ROBOTIS-GIT/open_manipulator),
41
+ expansion, developed by Woojin Wie and Junha Cha from [ROBOTIS](https://ai.robotis.com/)
42
+ """
43
+
44
+ config_class = OmxFollowerConfig
45
+ name = "omx_follower"
46
+
47
+ def __init__(self, config: OmxFollowerConfig):
48
+ super().__init__(config)
49
+ self.config = config
50
+ norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100
51
+ self.bus = DynamixelMotorsBus(
52
+ port=self.config.port,
53
+ motors={
54
+ "shoulder_pan": Motor(11, "xl430-w250", norm_mode_body),
55
+ "shoulder_lift": Motor(12, "xl430-w250", norm_mode_body),
56
+ "elbow_flex": Motor(13, "xl430-w250", norm_mode_body),
57
+ "wrist_flex": Motor(14, "xl330-m288", norm_mode_body),
58
+ "wrist_roll": Motor(15, "xl330-m288", norm_mode_body),
59
+ "gripper": Motor(16, "xl330-m288", MotorNormMode.RANGE_0_100),
60
+ },
61
+ calibration=self.calibration,
62
+ )
63
+ self.cameras = make_cameras_from_configs(config.cameras)
64
+
65
+ @property
66
+ def _motors_ft(self) -> dict[str, type]:
67
+ return {f"{motor}.pos": float for motor in self.bus.motors}
68
+
69
+ @property
70
+ def _cameras_ft(self) -> dict[str, tuple]:
71
+ return {
72
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
73
+ }
74
+
75
+ @cached_property
76
+ def observation_features(self) -> dict[str, type | tuple]:
77
+ return {**self._motors_ft, **self._cameras_ft}
78
+
79
+ @cached_property
80
+ def action_features(self) -> dict[str, type]:
81
+ return self._motors_ft
82
+
83
+ @property
84
+ def is_connected(self) -> bool:
85
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
86
+
87
+ @check_if_already_connected
88
+ def connect(self, calibrate: bool = True) -> None:
89
+ """
90
+ For OMX robots that come pre-calibrated:
91
+ - If default calibration from package doesn't match motors, read from motors and save
92
+ - This allows using pre-calibrated robots without manual calibration
93
+ - If no calibration file exists, use factory default values (homing_offset=0, range_min=0, range_max=4095)
94
+ """
95
+
96
+ self.bus.connect()
97
+ if not self.is_calibrated and calibrate:
98
+ logger.info(
99
+ "Mismatch between calibration values in the motor and the calibration file or no calibration file found"
100
+ )
101
+ self.calibrate()
102
+
103
+ for cam in self.cameras.values():
104
+ cam.connect()
105
+
106
+ self.configure()
107
+ logger.info(f"{self} connected.")
108
+
109
+ @property
110
+ def is_calibrated(self) -> bool:
111
+ return self.bus.is_calibrated
112
+
113
+ def calibrate(self) -> None:
114
+ self.bus.disable_torque()
115
+ logger.info(f"\nUsing factory default calibration values for {self}")
116
+ logger.info(f"\nWriting default configuration of {self} to the motors")
117
+ for motor in self.bus.motors:
118
+ self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value)
119
+
120
+ for motor in self.bus.motors:
121
+ self.bus.write("Drive_Mode", motor, DriveMode.NON_INVERTED.value)
122
+
123
+ self.calibration = {}
124
+ for motor, m in self.bus.motors.items():
125
+ self.calibration[motor] = MotorCalibration(
126
+ id=m.id,
127
+ drive_mode=0,
128
+ homing_offset=0,
129
+ range_min=0,
130
+ range_max=4095,
131
+ )
132
+
133
+ self.bus.write_calibration(self.calibration)
134
+ self._save_calibration()
135
+ logger.info(f"Calibration saved to {self.calibration_fpath}")
136
+
137
+ def configure(self) -> None:
138
+ with self.bus.torque_disabled():
139
+ self.bus.configure_motors()
140
+ # Use 'extended position mode' for all motors except gripper, because in joint mode the servos
141
+ # can't rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling
142
+ # the arm, you could end up with a servo with a position 0 or 4095 at a crucial point
143
+ for motor in self.bus.motors:
144
+ if motor != "gripper":
145
+ self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value)
146
+
147
+ # Use 'position control current based' for gripper to be limited by the limit of the current. For
148
+ # the follower gripper, it means it can grasp an object without forcing too much even tho, its
149
+ # goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch).
150
+ # For the leader gripper, it means we can use it as a physical trigger, since we can force with
151
+ # our finger to make it move, and it will move back to its original target position when we
152
+ # release the force.
153
+ self.bus.write("Operating_Mode", "gripper", OperatingMode.CURRENT_POSITION.value)
154
+
155
+ # Set better PID values to close the gap between recorded states and actions
156
+ # TODO(rcadene): Implement an automatic procedure to set optimal PID values for each motor
157
+ self.bus.write("Position_P_Gain", "elbow_flex", 1500)
158
+ self.bus.write("Position_I_Gain", "elbow_flex", 0)
159
+ self.bus.write("Position_D_Gain", "elbow_flex", 600)
160
+
161
+ def setup_motors(self) -> None:
162
+ for motor in reversed(self.bus.motors):
163
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
164
+ self.bus.setup_motor(motor)
165
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
166
+
167
+ @check_if_not_connected
168
+ def get_observation(self) -> RobotObservation:
169
+ # Read arm position
170
+ start = time.perf_counter()
171
+ obs_dict = self.bus.sync_read("Present_Position")
172
+ obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
173
+ dt_ms = (time.perf_counter() - start) * 1e3
174
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
175
+
176
+ # Capture images from cameras
177
+ for cam_key, cam in self.cameras.items():
178
+ start = time.perf_counter()
179
+ obs_dict[cam_key] = cam.async_read()
180
+ dt_ms = (time.perf_counter() - start) * 1e3
181
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
182
+
183
+ return obs_dict
184
+
185
+ @check_if_not_connected
186
+ def send_action(self, action: RobotAction) -> RobotAction:
187
+ """Command arm to move to a target joint configuration.
188
+
189
+ The relative action magnitude may be clipped depending on the configuration parameter
190
+ `max_relative_target`. In this case, the action sent differs from original action.
191
+ Thus, this function always returns the action actually sent.
192
+
193
+ Args:
194
+ action (RobotAction): The goal positions for the motors.
195
+
196
+ Returns:
197
+ RobotAction: The action sent to the motors, potentially clipped.
198
+ """
199
+
200
+ goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
201
+
202
+ # Cap goal position when too far away from present position.
203
+ # /!\ Slower fps expected due to reading from the follower.
204
+ if self.config.max_relative_target is not None:
205
+ present_pos = self.bus.sync_read("Present_Position")
206
+ goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()}
207
+ goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
208
+
209
+ # Send goal position to the arm
210
+ self.bus.sync_write("Goal_Position", goal_pos)
211
+ return {f"{motor}.pos": val for motor, val in goal_pos.items()}
212
+
213
+ @check_if_not_connected
214
+ def disconnect(self):
215
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
216
+ for cam in self.cameras.values():
217
+ cam.disconnect()
218
+
219
+ logger.info(f"{self} disconnected.")
lerobot/src/lerobot/robots/reachy2/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .configuration_reachy2 import Reachy2RobotConfig
18
+ from .robot_reachy2 import (
19
+ REACHY2_ANTENNAS_JOINTS,
20
+ REACHY2_L_ARM_JOINTS,
21
+ REACHY2_NECK_JOINTS,
22
+ REACHY2_R_ARM_JOINTS,
23
+ REACHY2_VEL,
24
+ Reachy2Robot,
25
+ )
lerobot/src/lerobot/robots/reachy2/configuration_reachy2.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass, field
16
+
17
+ from lerobot.cameras import CameraConfig
18
+ from lerobot.cameras.configs import ColorMode
19
+ from lerobot.cameras.reachy2_camera import Reachy2CameraConfig
20
+
21
+ from ..config import RobotConfig
22
+
23
+
24
+ @RobotConfig.register_subclass("reachy2")
25
+ @dataclass
26
+ class Reachy2RobotConfig(RobotConfig):
27
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
28
+ # Set this to a positive scalar to have the same value for all motors.
29
+ max_relative_target: float | None = None
30
+
31
+ # IP address of the Reachy 2 robot
32
+ ip_address: str | None = "localhost"
33
+ # Port of the Reachy 2 robot
34
+ port: int = 50065
35
+
36
+ # If True, turn_off_smoothly() will be sent to the robot before disconnecting.
37
+ disable_torque_on_disconnect: bool = False
38
+
39
+ # Tag for external commands control
40
+ # Set to True if you use an external commands system to control the robot,
41
+ # such as the official teleoperation application: https://github.com/pollen-robotics/Reachy2Teleoperation
42
+ # If True, robot.send_action() will not send commands to the robot.
43
+ use_external_commands: bool = False
44
+
45
+ # Robot parts
46
+ # Set to False to not add the corresponding joints part to the robot list of joints.
47
+ # By default, all parts are set to True.
48
+ with_mobile_base: bool = True
49
+ with_l_arm: bool = True
50
+ with_r_arm: bool = True
51
+ with_neck: bool = True
52
+ with_antennas: bool = True
53
+
54
+ # Robot cameras
55
+ # Set to True if you want to use the corresponding cameras in the observations.
56
+ # By default, no camera is used.
57
+ with_left_teleop_camera: bool = False
58
+ with_right_teleop_camera: bool = False
59
+ with_torso_camera: bool = False
60
+
61
+ # Camera parameters
62
+ camera_width: int = 640
63
+ camera_height: int = 480
64
+
65
+ # For cameras other than the 3 default Reachy 2 cameras.
66
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
67
+
68
+ def __post_init__(self) -> None:
69
+ # Add cameras with same ip_address as the robot
70
+ if self.with_left_teleop_camera:
71
+ self.cameras["teleop_left"] = Reachy2CameraConfig(
72
+ name="teleop",
73
+ image_type="left",
74
+ ip_address=self.ip_address,
75
+ port=self.port,
76
+ width=self.camera_width,
77
+ height=self.camera_height,
78
+ fps=30, # Not configurable for Reachy 2 cameras
79
+ color_mode=ColorMode.RGB,
80
+ )
81
+ if self.with_right_teleop_camera:
82
+ self.cameras["teleop_right"] = Reachy2CameraConfig(
83
+ name="teleop",
84
+ image_type="right",
85
+ ip_address=self.ip_address,
86
+ port=self.port,
87
+ width=self.camera_width,
88
+ height=self.camera_height,
89
+ fps=30, # Not configurable for Reachy 2 cameras
90
+ color_mode=ColorMode.RGB,
91
+ )
92
+ if self.with_torso_camera:
93
+ self.cameras["torso_rgb"] = Reachy2CameraConfig(
94
+ name="depth",
95
+ image_type="rgb",
96
+ ip_address=self.ip_address,
97
+ port=self.port,
98
+ width=self.camera_width,
99
+ height=self.camera_height,
100
+ fps=30, # Not configurable for Reachy 2 cameras
101
+ color_mode=ColorMode.RGB,
102
+ )
103
+
104
+ super().__post_init__()
105
+
106
+ if not (
107
+ self.with_mobile_base
108
+ or self.with_l_arm
109
+ or self.with_r_arm
110
+ or self.with_neck
111
+ or self.with_antennas
112
+ ):
113
+ raise ValueError(
114
+ "No Reachy2Robot part used.\n"
115
+ "At least one part of the robot must be set to True "
116
+ "(with_mobile_base, with_l_arm, with_r_arm, with_neck, with_antennas)"
117
+ )
lerobot/src/lerobot/robots/reachy2/robot_reachy2.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from __future__ import annotations
17
+
18
+ import time
19
+ from typing import TYPE_CHECKING, Any
20
+
21
+ from lerobot.cameras.utils import make_cameras_from_configs
22
+ from lerobot.processor import RobotAction, RobotObservation
23
+ from lerobot.utils.import_utils import _reachy2_sdk_available
24
+
25
+ from ..robot import Robot
26
+ from ..utils import ensure_safe_goal_position
27
+ from .configuration_reachy2 import Reachy2RobotConfig
28
+
29
+ if TYPE_CHECKING or _reachy2_sdk_available:
30
+ from reachy2_sdk import ReachySDK
31
+ else:
32
+ ReachySDK = None
33
+
34
+ # {lerobot_keys: reachy2_sdk_keys}
35
+ REACHY2_NECK_JOINTS = {
36
+ "neck_yaw.pos": "head.neck.yaw",
37
+ "neck_pitch.pos": "head.neck.pitch",
38
+ "neck_roll.pos": "head.neck.roll",
39
+ }
40
+
41
+ REACHY2_ANTENNAS_JOINTS = {
42
+ "l_antenna.pos": "head.l_antenna",
43
+ "r_antenna.pos": "head.r_antenna",
44
+ }
45
+
46
+ REACHY2_R_ARM_JOINTS = {
47
+ "r_shoulder_pitch.pos": "r_arm.shoulder.pitch",
48
+ "r_shoulder_roll.pos": "r_arm.shoulder.roll",
49
+ "r_elbow_yaw.pos": "r_arm.elbow.yaw",
50
+ "r_elbow_pitch.pos": "r_arm.elbow.pitch",
51
+ "r_wrist_roll.pos": "r_arm.wrist.roll",
52
+ "r_wrist_pitch.pos": "r_arm.wrist.pitch",
53
+ "r_wrist_yaw.pos": "r_arm.wrist.yaw",
54
+ "r_gripper.pos": "r_arm.gripper",
55
+ }
56
+
57
+ REACHY2_L_ARM_JOINTS = {
58
+ "l_shoulder_pitch.pos": "l_arm.shoulder.pitch",
59
+ "l_shoulder_roll.pos": "l_arm.shoulder.roll",
60
+ "l_elbow_yaw.pos": "l_arm.elbow.yaw",
61
+ "l_elbow_pitch.pos": "l_arm.elbow.pitch",
62
+ "l_wrist_roll.pos": "l_arm.wrist.roll",
63
+ "l_wrist_pitch.pos": "l_arm.wrist.pitch",
64
+ "l_wrist_yaw.pos": "l_arm.wrist.yaw",
65
+ "l_gripper.pos": "l_arm.gripper",
66
+ }
67
+
68
+ REACHY2_VEL = {
69
+ "mobile_base.vx": "vx",
70
+ "mobile_base.vy": "vy",
71
+ "mobile_base.vtheta": "vtheta",
72
+ }
73
+
74
+
75
+ class Reachy2Robot(Robot):
76
+ """
77
+ [Reachy 2](https://www.pollen-robotics.com/reachy/), by Pollen Robotics.
78
+ """
79
+
80
+ config_class = Reachy2RobotConfig
81
+ name = "reachy2"
82
+
83
+ def __init__(self, config: Reachy2RobotConfig):
84
+ super().__init__(config)
85
+
86
+ self.config = config
87
+ self.robot_type = self.config.type
88
+ self.use_external_commands = self.config.use_external_commands
89
+
90
+ self.reachy: None | ReachySDK = None
91
+ self.cameras = make_cameras_from_configs(config.cameras)
92
+
93
+ self.logs: dict[str, float] = {}
94
+
95
+ self.joints_dict: dict[str, str] = self._generate_joints_dict()
96
+
97
+ @property
98
+ def observation_features(self) -> dict[str, Any]:
99
+ return {**self.motors_features, **self.camera_features}
100
+
101
+ @property
102
+ def action_features(self) -> dict[str, type]:
103
+ return self.motors_features
104
+
105
+ @property
106
+ def camera_features(self) -> dict[str, tuple[int | None, int | None, int]]:
107
+ return {cam: (self.cameras[cam].height, self.cameras[cam].width, 3) for cam in self.cameras}
108
+
109
+ @property
110
+ def motors_features(self) -> dict[str, type]:
111
+ if self.config.with_mobile_base:
112
+ return {
113
+ **dict.fromkeys(
114
+ self.joints_dict.keys(),
115
+ float,
116
+ ),
117
+ **dict.fromkeys(
118
+ REACHY2_VEL.keys(),
119
+ float,
120
+ ),
121
+ }
122
+ else:
123
+ return dict.fromkeys(self.joints_dict.keys(), float)
124
+
125
+ @property
126
+ def is_connected(self) -> bool:
127
+ return self.reachy.is_connected() if self.reachy is not None else False
128
+
129
+ def connect(self, calibrate: bool = False) -> None:
130
+ self.reachy = ReachySDK(self.config.ip_address)
131
+ if not self.is_connected:
132
+ raise ConnectionError()
133
+
134
+ for cam in self.cameras.values():
135
+ cam.connect()
136
+
137
+ self.configure()
138
+
139
+ def configure(self) -> None:
140
+ if self.reachy is not None:
141
+ self.reachy.turn_on()
142
+ self.reachy.reset_default_limits()
143
+
144
+ @property
145
+ def is_calibrated(self) -> bool:
146
+ return True
147
+
148
+ def calibrate(self) -> None:
149
+ pass
150
+
151
+ def _generate_joints_dict(self) -> dict[str, str]:
152
+ joints = {}
153
+ if self.config.with_neck:
154
+ joints.update(REACHY2_NECK_JOINTS)
155
+ if self.config.with_l_arm:
156
+ joints.update(REACHY2_L_ARM_JOINTS)
157
+ if self.config.with_r_arm:
158
+ joints.update(REACHY2_R_ARM_JOINTS)
159
+ if self.config.with_antennas:
160
+ joints.update(REACHY2_ANTENNAS_JOINTS)
161
+ return joints
162
+
163
+ def _get_state(self) -> dict[str, float]:
164
+ if self.reachy is not None:
165
+ pos_dict = {k: self.reachy.joints[v].present_position for k, v in self.joints_dict.items()}
166
+ if not self.config.with_mobile_base:
167
+ return pos_dict
168
+ vel_dict = {k: self.reachy.mobile_base.odometry[v] for k, v in REACHY2_VEL.items()}
169
+ return {**pos_dict, **vel_dict}
170
+ else:
171
+ return {}
172
+
173
+ def get_observation(self) -> RobotObservation:
174
+ obs_dict: RobotObservation = {}
175
+
176
+ # Read Reachy 2 state
177
+ before_read_t = time.perf_counter()
178
+ obs_dict.update(self._get_state())
179
+ self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
180
+
181
+ # Capture images from cameras
182
+ for cam_key, cam in self.cameras.items():
183
+ obs_dict[cam_key] = cam.async_read()
184
+
185
+ return obs_dict
186
+
187
+ def send_action(self, action: RobotAction) -> RobotAction:
188
+ if self.reachy is not None:
189
+ if not self.is_connected:
190
+ raise ConnectionError()
191
+
192
+ before_write_t = time.perf_counter()
193
+
194
+ vel = {}
195
+ goal_pos = {}
196
+ for key, val in action.items():
197
+ if key not in self.joints_dict:
198
+ if key not in REACHY2_VEL:
199
+ raise KeyError(f"Key '{key}' is not a valid motor key in Reachy 2.")
200
+ else:
201
+ vel[REACHY2_VEL[key]] = float(val)
202
+ else:
203
+ if not self.use_external_commands and self.config.max_relative_target is not None:
204
+ goal_pos[key] = float(val)
205
+ goal_present_pos = {
206
+ key: (
207
+ goal_pos[key],
208
+ self.reachy.joints[self.joints_dict[key]].present_position,
209
+ )
210
+ }
211
+ safe_goal_pos = ensure_safe_goal_position(
212
+ goal_present_pos, float(self.config.max_relative_target)
213
+ )
214
+ val = safe_goal_pos[key]
215
+ self.reachy.joints[self.joints_dict[key]].goal_position = float(val)
216
+
217
+ if self.config.with_mobile_base:
218
+ self.reachy.mobile_base.set_goal_speed(vel["vx"], vel["vy"], vel["vtheta"])
219
+
220
+ # We don't send the goal positions if we control Reachy 2 externally
221
+ if not self.use_external_commands:
222
+ self.reachy.send_goal_positions()
223
+ if self.config.with_mobile_base:
224
+ self.reachy.mobile_base.send_speed_command()
225
+
226
+ self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
227
+ return action
228
+
229
+ def disconnect(self) -> None:
230
+ if self.reachy is not None:
231
+ for cam in self.cameras.values():
232
+ cam.disconnect()
233
+ if self.config.disable_torque_on_disconnect:
234
+ self.reachy.turn_off_smoothly()
235
+ self.reachy.disconnect()
lerobot/src/lerobot/robots/so_follower/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .config_so_follower import (
18
+ SO100FollowerConfig,
19
+ SO101FollowerConfig,
20
+ SOFollowerConfig,
21
+ SOFollowerRobotConfig,
22
+ )
23
+ from .so_follower import SO100Follower, SO101Follower, SOFollower
lerobot/src/lerobot/robots/so_follower/config_so_follower.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import TypeAlias
19
+
20
+ from lerobot.cameras import CameraConfig
21
+
22
+ from ..config import RobotConfig
23
+
24
+
25
+ @dataclass
26
+ class SOFollowerConfig:
27
+ """Base configuration class for SO Follower robots."""
28
+
29
+ # Port to connect to the arm
30
+ port: str
31
+
32
+ disable_torque_on_disconnect: bool = True
33
+
34
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
35
+ # Set this to a positive scalar to have the same value for all motors, or a dictionary that maps motor
36
+ # names to the max_relative_target value for that motor.
37
+ max_relative_target: float | dict[str, float] | None = None
38
+
39
+ # cameras
40
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
41
+
42
+ # Set to `True` for backward compatibility with previous policies/dataset
43
+ use_degrees: bool = False
44
+
45
+
46
+ @RobotConfig.register_subclass("so101_follower")
47
+ @RobotConfig.register_subclass("so100_follower")
48
+ @dataclass
49
+ class SOFollowerRobotConfig(RobotConfig, SOFollowerConfig):
50
+ pass
51
+
52
+
53
+ SO100FollowerConfig: TypeAlias = SOFollowerRobotConfig
54
+ SO101FollowerConfig: TypeAlias = SOFollowerRobotConfig
lerobot/src/lerobot/robots/so_follower/robot_kinematic_processor.py ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Any
19
+
20
+ import numpy as np
21
+
22
+ from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature
23
+ from lerobot.model.kinematics import RobotKinematics
24
+ from lerobot.processor import (
25
+ EnvTransition,
26
+ ObservationProcessorStep,
27
+ ProcessorStep,
28
+ ProcessorStepRegistry,
29
+ RobotAction,
30
+ RobotActionProcessorStep,
31
+ RobotObservation,
32
+ TransitionKey,
33
+ )
34
+ from lerobot.utils.rotation import Rotation
35
+
36
+
37
+ @ProcessorStepRegistry.register("ee_reference_and_delta")
38
+ @dataclass
39
+ class EEReferenceAndDelta(RobotActionProcessorStep):
40
+ """
41
+ Computes a target end-effector pose from a relative delta command.
42
+
43
+ This step takes a desired change in position and orientation (`target_*`) and applies it to a
44
+ reference end-effector pose to calculate an absolute target pose. The reference pose is derived
45
+ from the current robot joint positions using forward kinematics.
46
+
47
+ The processor can operate in two modes:
48
+ 1. `use_latched_reference=True`: The reference pose is "latched" or saved at the moment the action
49
+ is first enabled. Subsequent commands are relative to this fixed reference.
50
+ 2. `use_latched_reference=False`: The reference pose is updated to the robot's current pose at
51
+ every step.
52
+
53
+ Attributes:
54
+ kinematics: The robot's kinematic model for forward kinematics.
55
+ end_effector_step_sizes: A dictionary scaling the input delta commands.
56
+ motor_names: A list of motor names required for forward kinematics.
57
+ use_latched_reference: If True, latch the reference pose on enable; otherwise, always use the
58
+ current pose as the reference.
59
+ reference_ee_pose: Internal state storing the latched reference pose.
60
+ _prev_enabled: Internal state to detect the rising edge of the enable signal.
61
+ _command_when_disabled: Internal state to hold the last command while disabled.
62
+ """
63
+
64
+ kinematics: RobotKinematics
65
+ end_effector_step_sizes: dict
66
+ motor_names: list[str]
67
+ use_latched_reference: bool = (
68
+ True # If True, latch reference on enable; if False, always use current pose
69
+ )
70
+ use_ik_solution: bool = False
71
+
72
+ reference_ee_pose: np.ndarray | None = field(default=None, init=False, repr=False)
73
+ _prev_enabled: bool = field(default=False, init=False, repr=False)
74
+ _command_when_disabled: np.ndarray | None = field(default=None, init=False, repr=False)
75
+
76
+ def action(self, action: RobotAction) -> RobotAction:
77
+ observation = self.transition.get(TransitionKey.OBSERVATION).copy()
78
+
79
+ if observation is None:
80
+ raise ValueError("Joints observation is require for computing robot kinematics")
81
+
82
+ if self.use_ik_solution and "IK_solution" in self.transition.get(TransitionKey.COMPLEMENTARY_DATA):
83
+ q_raw = self.transition.get(TransitionKey.COMPLEMENTARY_DATA)["IK_solution"]
84
+ else:
85
+ q_raw = np.array(
86
+ [
87
+ float(v)
88
+ for k, v in observation.items()
89
+ if isinstance(k, str)
90
+ and k.endswith(".pos")
91
+ and k.removesuffix(".pos") in self.motor_names
92
+ ],
93
+ dtype=float,
94
+ )
95
+
96
+ if q_raw is None:
97
+ raise ValueError("Joints observation is require for computing robot kinematics")
98
+
99
+ # Current pose from FK on measured joints
100
+ t_curr = self.kinematics.forward_kinematics(q_raw)
101
+
102
+ enabled = bool(action.pop("enabled"))
103
+ tx = float(action.pop("target_x"))
104
+ ty = float(action.pop("target_y"))
105
+ tz = float(action.pop("target_z"))
106
+ wx = float(action.pop("target_wx"))
107
+ wy = float(action.pop("target_wy"))
108
+ wz = float(action.pop("target_wz"))
109
+ gripper_vel = float(action.pop("gripper_vel"))
110
+
111
+ desired = None
112
+
113
+ if enabled:
114
+ ref = t_curr
115
+ if self.use_latched_reference:
116
+ # Latched reference mode: latch reference at the rising edge
117
+ if not self._prev_enabled or self.reference_ee_pose is None:
118
+ self.reference_ee_pose = t_curr.copy()
119
+ ref = self.reference_ee_pose if self.reference_ee_pose is not None else t_curr
120
+
121
+ delta_p = np.array(
122
+ [
123
+ tx * self.end_effector_step_sizes["x"],
124
+ ty * self.end_effector_step_sizes["y"],
125
+ tz * self.end_effector_step_sizes["z"],
126
+ ],
127
+ dtype=float,
128
+ )
129
+ r_abs = Rotation.from_rotvec([wx, wy, wz]).as_matrix()
130
+ desired = np.eye(4, dtype=float)
131
+ desired[:3, :3] = ref[:3, :3] @ r_abs
132
+ desired[:3, 3] = ref[:3, 3] + delta_p
133
+
134
+ self._command_when_disabled = desired.copy()
135
+ else:
136
+ # While disabled, keep sending the same command to avoid drift.
137
+ if self._command_when_disabled is None:
138
+ # If we've never had an enabled command yet, freeze current FK pose once.
139
+ self._command_when_disabled = t_curr.copy()
140
+ desired = self._command_when_disabled.copy()
141
+
142
+ # Write action fields
143
+ pos = desired[:3, 3]
144
+ tw = Rotation.from_matrix(desired[:3, :3]).as_rotvec()
145
+ action["ee.x"] = float(pos[0])
146
+ action["ee.y"] = float(pos[1])
147
+ action["ee.z"] = float(pos[2])
148
+ action["ee.wx"] = float(tw[0])
149
+ action["ee.wy"] = float(tw[1])
150
+ action["ee.wz"] = float(tw[2])
151
+ action["ee.gripper_vel"] = gripper_vel
152
+
153
+ self._prev_enabled = enabled
154
+ return action
155
+
156
+ def reset(self):
157
+ """Resets the internal state of the processor."""
158
+ self._prev_enabled = False
159
+ self.reference_ee_pose = None
160
+ self._command_when_disabled = None
161
+
162
+ def transform_features(
163
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
164
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
165
+ for feat in [
166
+ "enabled",
167
+ "target_x",
168
+ "target_y",
169
+ "target_z",
170
+ "target_wx",
171
+ "target_wy",
172
+ "target_wz",
173
+ "gripper_vel",
174
+ ]:
175
+ features[PipelineFeatureType.ACTION].pop(f"{feat}", None)
176
+
177
+ for feat in ["x", "y", "z", "wx", "wy", "wz", "gripper_vel"]:
178
+ features[PipelineFeatureType.ACTION][f"ee.{feat}"] = PolicyFeature(
179
+ type=FeatureType.ACTION, shape=(1,)
180
+ )
181
+
182
+ return features
183
+
184
+
185
+ @ProcessorStepRegistry.register("ee_bounds_and_safety")
186
+ @dataclass
187
+ class EEBoundsAndSafety(RobotActionProcessorStep):
188
+ """
189
+ Clips the end-effector pose to predefined bounds and checks for unsafe jumps.
190
+
191
+ This step ensures that the target end-effector pose remains within a safe operational workspace.
192
+ It also moderates the command to prevent large, sudden movements between consecutive steps.
193
+
194
+ Attributes:
195
+ end_effector_bounds: A dictionary with "min" and "max" keys for position clipping.
196
+ max_ee_step_m: The maximum allowed change in position (in meters) between steps.
197
+ _last_pos: Internal state storing the last commanded position.
198
+ """
199
+
200
+ end_effector_bounds: dict
201
+ max_ee_step_m: float = 0.05
202
+ _last_pos: np.ndarray | None = field(default=None, init=False, repr=False)
203
+
204
+ def action(self, action: RobotAction) -> RobotAction:
205
+ x = action["ee.x"]
206
+ y = action["ee.y"]
207
+ z = action["ee.z"]
208
+ wx = action["ee.wx"]
209
+ wy = action["ee.wy"]
210
+ wz = action["ee.wz"]
211
+ # TODO(Steven): ee.gripper_vel does not need to be bounded
212
+
213
+ if None in (x, y, z, wx, wy, wz):
214
+ raise ValueError(
215
+ "Missing required end-effector pose components: x, y, z, wx, wy, wz must all be present in action"
216
+ )
217
+
218
+ pos = np.array([x, y, z], dtype=float)
219
+ twist = np.array([wx, wy, wz], dtype=float)
220
+
221
+ # Clip position
222
+ pos = np.clip(pos, self.end_effector_bounds["min"], self.end_effector_bounds["max"])
223
+
224
+ # Check for jumps in position
225
+ if self._last_pos is not None:
226
+ dpos = pos - self._last_pos
227
+ n = float(np.linalg.norm(dpos))
228
+ if n > self.max_ee_step_m and n > 0:
229
+ pos = self._last_pos + dpos * (self.max_ee_step_m / n)
230
+ raise ValueError(f"EE jump {n:.3f}m > {self.max_ee_step_m}m")
231
+
232
+ self._last_pos = pos
233
+
234
+ action["ee.x"] = float(pos[0])
235
+ action["ee.y"] = float(pos[1])
236
+ action["ee.z"] = float(pos[2])
237
+ action["ee.wx"] = float(twist[0])
238
+ action["ee.wy"] = float(twist[1])
239
+ action["ee.wz"] = float(twist[2])
240
+ return action
241
+
242
+ def reset(self):
243
+ """Resets the last known position and orientation."""
244
+ self._last_pos = None
245
+
246
+ def transform_features(
247
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
248
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
249
+ return features
250
+
251
+
252
+ @ProcessorStepRegistry.register("inverse_kinematics_ee_to_joints")
253
+ @dataclass
254
+ class InverseKinematicsEEToJoints(RobotActionProcessorStep):
255
+ """
256
+ Computes desired joint positions from a target end-effector pose using inverse kinematics (IK).
257
+
258
+ This step translates a Cartesian command (position and orientation of the end-effector) into
259
+ the corresponding joint-space commands for each motor.
260
+
261
+ Attributes:
262
+ kinematics: The robot's kinematic model for inverse kinematics.
263
+ motor_names: A list of motor names for which to compute joint positions.
264
+ q_curr: Internal state storing the last joint positions, used as an initial guess for the IK solver.
265
+ initial_guess_current_joints: If True, use the robot's current joint state as the IK guess.
266
+ If False, use the solution from the previous step.
267
+ """
268
+
269
+ kinematics: RobotKinematics
270
+ motor_names: list[str]
271
+ q_curr: np.ndarray | None = field(default=None, init=False, repr=False)
272
+ initial_guess_current_joints: bool = True
273
+
274
+ def action(self, action: RobotAction) -> RobotAction:
275
+ x = action.pop("ee.x")
276
+ y = action.pop("ee.y")
277
+ z = action.pop("ee.z")
278
+ wx = action.pop("ee.wx")
279
+ wy = action.pop("ee.wy")
280
+ wz = action.pop("ee.wz")
281
+ gripper_pos = action.pop("ee.gripper_pos")
282
+
283
+ if None in (x, y, z, wx, wy, wz, gripper_pos):
284
+ raise ValueError(
285
+ "Missing required end-effector pose components: ee.x, ee.y, ee.z, ee.wx, ee.wy, ee.wz, ee.gripper_pos must all be present in action"
286
+ )
287
+
288
+ observation = self.transition.get(TransitionKey.OBSERVATION).copy()
289
+ if observation is None:
290
+ raise ValueError("Joints observation is require for computing robot kinematics")
291
+
292
+ q_raw = np.array(
293
+ [float(v) for k, v in observation.items() if isinstance(k, str) and k.endswith(".pos")],
294
+ dtype=float,
295
+ )
296
+ if q_raw is None:
297
+ raise ValueError("Joints observation is require for computing robot kinematics")
298
+
299
+ if self.initial_guess_current_joints: # Use current joints as initial guess
300
+ self.q_curr = q_raw
301
+ else: # Use previous ik solution as initial guess
302
+ if self.q_curr is None:
303
+ self.q_curr = q_raw
304
+
305
+ # Build desired 4x4 transform from pos + rotvec (twist)
306
+ t_des = np.eye(4, dtype=float)
307
+ t_des[:3, :3] = Rotation.from_rotvec([wx, wy, wz]).as_matrix()
308
+ t_des[:3, 3] = [x, y, z]
309
+
310
+ # Compute inverse kinematics
311
+ q_target = self.kinematics.inverse_kinematics(self.q_curr, t_des)
312
+ self.q_curr = q_target
313
+
314
+ # TODO: This is sentitive to order of motor_names = q_target mapping
315
+ for i, name in enumerate(self.motor_names):
316
+ if name != "gripper":
317
+ action[f"{name}.pos"] = float(q_target[i])
318
+ else:
319
+ action["gripper.pos"] = float(gripper_pos)
320
+
321
+ return action
322
+
323
+ def transform_features(
324
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
325
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
326
+ for feat in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]:
327
+ features[PipelineFeatureType.ACTION].pop(f"ee.{feat}", None)
328
+
329
+ for name in self.motor_names:
330
+ features[PipelineFeatureType.ACTION][f"{name}.pos"] = PolicyFeature(
331
+ type=FeatureType.ACTION, shape=(1,)
332
+ )
333
+
334
+ return features
335
+
336
+ def reset(self):
337
+ """Resets the initial guess for the IK solver."""
338
+ self.q_curr = None
339
+
340
+
341
+ @ProcessorStepRegistry.register("gripper_velocity_to_joint")
342
+ @dataclass
343
+ class GripperVelocityToJoint(RobotActionProcessorStep):
344
+ """
345
+ Converts a gripper velocity command into a target gripper joint position.
346
+
347
+ This step integrates a normalized velocity command over time to produce a position command,
348
+ taking the current gripper position as a starting point. It also supports a discrete mode
349
+ where integer actions map to open, close, or no-op.
350
+
351
+ Attributes:
352
+ motor_names: A list of motor names, which must include 'gripper'.
353
+ speed_factor: A scaling factor to convert the normalized velocity command to a position change.
354
+ clip_min: The minimum allowed gripper joint position.
355
+ clip_max: The maximum allowed gripper joint position.
356
+ discrete_gripper: If True, treat the input action as discrete (0: open, 1: close, 2: stay).
357
+ """
358
+
359
+ speed_factor: float = 20.0
360
+ clip_min: float = 0.0
361
+ clip_max: float = 100.0
362
+ discrete_gripper: bool = False
363
+
364
+ def action(self, action: RobotAction) -> RobotAction:
365
+ observation = self.transition.get(TransitionKey.OBSERVATION).copy()
366
+
367
+ gripper_vel = action.pop("ee.gripper_vel")
368
+
369
+ if observation is None:
370
+ raise ValueError("Joints observation is require for computing robot kinematics")
371
+
372
+ q_raw = np.array(
373
+ [float(v) for k, v in observation.items() if isinstance(k, str) and k.endswith(".pos")],
374
+ dtype=float,
375
+ )
376
+ if q_raw is None:
377
+ raise ValueError("Joints observation is require for computing robot kinematics")
378
+
379
+ if self.discrete_gripper:
380
+ # Discrete gripper actions are in [0, 1, 2]
381
+ # 0: open, 1: close, 2: stay
382
+ # We need to shift them to [-1, 0, 1] and then scale them to clip_max
383
+ gripper_vel = (gripper_vel - 1) * self.clip_max
384
+
385
+ # Compute desired gripper position
386
+ delta = gripper_vel * float(self.speed_factor)
387
+ # TODO: This assumes gripper is the last specified joint in the robot
388
+ gripper_pos = float(np.clip(q_raw[-1] + delta, self.clip_min, self.clip_max))
389
+ action["ee.gripper_pos"] = gripper_pos
390
+
391
+ return action
392
+
393
+ def transform_features(
394
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
395
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
396
+ features[PipelineFeatureType.ACTION].pop("ee.gripper_vel", None)
397
+ features[PipelineFeatureType.ACTION]["ee.gripper_pos"] = PolicyFeature(
398
+ type=FeatureType.ACTION, shape=(1,)
399
+ )
400
+
401
+ return features
402
+
403
+
404
+ def compute_forward_kinematics_joints_to_ee(
405
+ joints: dict[str, Any], kinematics: RobotKinematics, motor_names: list[str]
406
+ ) -> dict[str, Any]:
407
+ motor_joint_values = [joints[f"{n}.pos"] for n in motor_names]
408
+
409
+ q = np.array(motor_joint_values, dtype=float)
410
+ t = kinematics.forward_kinematics(q)
411
+ pos = t[:3, 3]
412
+ tw = Rotation.from_matrix(t[:3, :3]).as_rotvec()
413
+ gripper_pos = joints["gripper.pos"]
414
+ for n in motor_names:
415
+ joints.pop(f"{n}.pos")
416
+ joints["ee.x"] = float(pos[0])
417
+ joints["ee.y"] = float(pos[1])
418
+ joints["ee.z"] = float(pos[2])
419
+ joints["ee.wx"] = float(tw[0])
420
+ joints["ee.wy"] = float(tw[1])
421
+ joints["ee.wz"] = float(tw[2])
422
+ joints["ee.gripper_pos"] = float(gripper_pos)
423
+ return joints
424
+
425
+
426
+ @ProcessorStepRegistry.register("forward_kinematics_joints_to_ee_observation")
427
+ @dataclass
428
+ class ForwardKinematicsJointsToEEObservation(ObservationProcessorStep):
429
+ """
430
+ Computes the end-effector pose from joint positions using forward kinematics (FK).
431
+
432
+ This step is typically used to add the robot's Cartesian pose to the observation space,
433
+ which can be useful for visualization or as an input to a policy.
434
+
435
+ Attributes:
436
+ kinematics: The robot's kinematic model.
437
+ """
438
+
439
+ kinematics: RobotKinematics
440
+ motor_names: list[str]
441
+
442
+ def observation(self, observation: RobotObservation) -> RobotObservation:
443
+ return compute_forward_kinematics_joints_to_ee(observation, self.kinematics, self.motor_names)
444
+
445
+ def transform_features(
446
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
447
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
448
+ # We only use the ee pose in the dataset, so we don't need the joint positions
449
+ for n in self.motor_names:
450
+ features[PipelineFeatureType.OBSERVATION].pop(f"{n}.pos", None)
451
+ # We specify the dataset features of this step that we want to be stored in the dataset
452
+ for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]:
453
+ features[PipelineFeatureType.OBSERVATION][f"ee.{k}"] = PolicyFeature(
454
+ type=FeatureType.STATE, shape=(1,)
455
+ )
456
+ return features
457
+
458
+
459
+ @ProcessorStepRegistry.register("forward_kinematics_joints_to_ee_action")
460
+ @dataclass
461
+ class ForwardKinematicsJointsToEEAction(RobotActionProcessorStep):
462
+ """
463
+ Computes the end-effector pose from joint positions using forward kinematics (FK).
464
+
465
+ This step is typically used to add the robot's Cartesian pose to the observation space,
466
+ which can be useful for visualization or as an input to a policy.
467
+
468
+ Attributes:
469
+ kinematics: The robot's kinematic model.
470
+ """
471
+
472
+ kinematics: RobotKinematics
473
+ motor_names: list[str]
474
+
475
+ def action(self, action: RobotAction) -> RobotAction:
476
+ return compute_forward_kinematics_joints_to_ee(action, self.kinematics, self.motor_names)
477
+
478
+ def transform_features(
479
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
480
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
481
+ # We only use the ee pose in the dataset, so we don't need the joint positions
482
+ for n in self.motor_names:
483
+ features[PipelineFeatureType.ACTION].pop(f"{n}.pos", None)
484
+ # We specify the dataset features of this step that we want to be stored in the dataset
485
+ for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]:
486
+ features[PipelineFeatureType.ACTION][f"ee.{k}"] = PolicyFeature(
487
+ type=FeatureType.STATE, shape=(1,)
488
+ )
489
+ return features
490
+
491
+
492
+ @ProcessorStepRegistry.register(name="forward_kinematics_joints_to_ee")
493
+ @dataclass
494
+ class ForwardKinematicsJointsToEE(ProcessorStep):
495
+ kinematics: RobotKinematics
496
+ motor_names: list[str]
497
+
498
+ def __post_init__(self):
499
+ self.joints_to_ee_action_processor = ForwardKinematicsJointsToEEAction(
500
+ kinematics=self.kinematics, motor_names=self.motor_names
501
+ )
502
+ self.joints_to_ee_observation_processor = ForwardKinematicsJointsToEEObservation(
503
+ kinematics=self.kinematics, motor_names=self.motor_names
504
+ )
505
+
506
+ def __call__(self, transition: EnvTransition) -> EnvTransition:
507
+ if transition.get(TransitionKey.ACTION) is not None:
508
+ transition = self.joints_to_ee_action_processor(transition)
509
+ if transition.get(TransitionKey.OBSERVATION) is not None:
510
+ transition = self.joints_to_ee_observation_processor(transition)
511
+ return transition
512
+
513
+ def transform_features(
514
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
515
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
516
+ if features[PipelineFeatureType.ACTION] is not None:
517
+ features = self.joints_to_ee_action_processor.transform_features(features)
518
+ if features[PipelineFeatureType.OBSERVATION] is not None:
519
+ features = self.joints_to_ee_observation_processor.transform_features(features)
520
+ return features
521
+
522
+
523
+ @ProcessorStepRegistry.register("inverse_kinematics_rl_step")
524
+ @dataclass
525
+ class InverseKinematicsRLStep(ProcessorStep):
526
+ """
527
+ Computes desired joint positions from a target end-effector pose using inverse kinematics (IK).
528
+
529
+ This is modified from the InverseKinematicsEEToJoints step to be used in the RL pipeline.
530
+ """
531
+
532
+ kinematics: RobotKinematics
533
+ motor_names: list[str]
534
+ q_curr: np.ndarray | None = field(default=None, init=False, repr=False)
535
+ initial_guess_current_joints: bool = True
536
+
537
+ def __call__(self, transition: EnvTransition) -> EnvTransition:
538
+ new_transition = dict(transition)
539
+ action = new_transition.get(TransitionKey.ACTION)
540
+ if action is None:
541
+ raise ValueError("Action is required for InverseKinematicsEEToJoints")
542
+ action = dict(action)
543
+
544
+ x = action.pop("ee.x")
545
+ y = action.pop("ee.y")
546
+ z = action.pop("ee.z")
547
+ wx = action.pop("ee.wx")
548
+ wy = action.pop("ee.wy")
549
+ wz = action.pop("ee.wz")
550
+ gripper_pos = action.pop("ee.gripper_pos")
551
+
552
+ if None in (x, y, z, wx, wy, wz, gripper_pos):
553
+ raise ValueError(
554
+ "Missing required end-effector pose components: ee.x, ee.y, ee.z, ee.wx, ee.wy, ee.wz, ee.gripper_pos must all be present in action"
555
+ )
556
+
557
+ observation = new_transition.get(TransitionKey.OBSERVATION).copy()
558
+ if observation is None:
559
+ raise ValueError("Joints observation is require for computing robot kinematics")
560
+
561
+ q_raw = np.array(
562
+ [float(v) for k, v in observation.items() if isinstance(k, str) and k.endswith(".pos")],
563
+ dtype=float,
564
+ )
565
+ if q_raw is None:
566
+ raise ValueError("Joints observation is require for computing robot kinematics")
567
+
568
+ if self.initial_guess_current_joints: # Use current joints as initial guess
569
+ self.q_curr = q_raw
570
+ else: # Use previous ik solution as initial guess
571
+ if self.q_curr is None:
572
+ self.q_curr = q_raw
573
+
574
+ # Build desired 4x4 transform from pos + rotvec (twist)
575
+ t_des = np.eye(4, dtype=float)
576
+ t_des[:3, :3] = Rotation.from_rotvec([wx, wy, wz]).as_matrix()
577
+ t_des[:3, 3] = [x, y, z]
578
+
579
+ # Compute inverse kinematics
580
+ q_target = self.kinematics.inverse_kinematics(self.q_curr, t_des)
581
+ self.q_curr = q_target
582
+
583
+ # TODO: This is sentitive to order of motor_names = q_target mapping
584
+ for i, name in enumerate(self.motor_names):
585
+ if name != "gripper":
586
+ action[f"{name}.pos"] = float(q_target[i])
587
+ else:
588
+ action["gripper.pos"] = float(gripper_pos)
589
+
590
+ new_transition[TransitionKey.ACTION] = action
591
+ complementary_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
592
+ complementary_data["IK_solution"] = q_target
593
+ new_transition[TransitionKey.COMPLEMENTARY_DATA] = complementary_data
594
+ return new_transition
595
+
596
+ def transform_features(
597
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
598
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
599
+ for feat in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]:
600
+ features[PipelineFeatureType.ACTION].pop(f"ee.{feat}", None)
601
+
602
+ for name in self.motor_names:
603
+ features[PipelineFeatureType.ACTION][f"{name}.pos"] = PolicyFeature(
604
+ type=FeatureType.ACTION, shape=(1,)
605
+ )
606
+
607
+ return features
608
+
609
+ def reset(self):
610
+ """Resets the initial guess for the IK solver."""
611
+ self.q_curr = None
lerobot/src/lerobot/robots/so_follower/so100.md ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../../../docs/source/so100.mdx
lerobot/src/lerobot/robots/so_follower/so101.md ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../../../docs/source/so101.mdx
lerobot/src/lerobot/robots/so_follower/so_follower.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import logging
18
+ import time
19
+ from functools import cached_property
20
+ from typing import TypeAlias
21
+
22
+ from lerobot.cameras.utils import make_cameras_from_configs
23
+ from lerobot.motors import Motor, MotorCalibration, MotorNormMode
24
+ from lerobot.motors.feetech import (
25
+ FeetechMotorsBus,
26
+ OperatingMode,
27
+ )
28
+ from lerobot.processor import RobotAction, RobotObservation
29
+ from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
30
+
31
+ from ..robot import Robot
32
+ from ..utils import ensure_safe_goal_position
33
+ from .config_so_follower import SOFollowerRobotConfig
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+
38
+ class SOFollower(Robot):
39
+ """
40
+ Generic SO follower base implementing common functionality for SO-100/101/10X.
41
+ Designed to be subclassed with a per-hardware-model `config_class` and `name`.
42
+ """
43
+
44
+ config_class = SOFollowerRobotConfig
45
+ name = "so_follower"
46
+
47
+ def __init__(self, config: SOFollowerRobotConfig):
48
+ super().__init__(config)
49
+ self.config = config
50
+ # choose normalization mode depending on config if available
51
+ norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100
52
+ self.bus = FeetechMotorsBus(
53
+ port=self.config.port,
54
+ motors={
55
+ "shoulder_pan": Motor(1, "sts3215", norm_mode_body),
56
+ "shoulder_lift": Motor(2, "sts3215", norm_mode_body),
57
+ "elbow_flex": Motor(3, "sts3215", norm_mode_body),
58
+ "wrist_flex": Motor(4, "sts3215", norm_mode_body),
59
+ "wrist_roll": Motor(5, "sts3215", norm_mode_body),
60
+ "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100),
61
+ },
62
+ calibration=self.calibration,
63
+ )
64
+ self.cameras = make_cameras_from_configs(config.cameras)
65
+
66
+ @property
67
+ def _motors_ft(self) -> dict[str, type]:
68
+ return {f"{motor}.pos": float for motor in self.bus.motors}
69
+
70
+ @property
71
+ def _cameras_ft(self) -> dict[str, tuple]:
72
+ return {
73
+ cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
74
+ }
75
+
76
+ @cached_property
77
+ def observation_features(self) -> dict[str, type | tuple]:
78
+ return {**self._motors_ft, **self._cameras_ft}
79
+
80
+ @cached_property
81
+ def action_features(self) -> dict[str, type]:
82
+ return self._motors_ft
83
+
84
+ @property
85
+ def is_connected(self) -> bool:
86
+ return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
87
+
88
+ @check_if_already_connected
89
+ def connect(self, calibrate: bool = True) -> None:
90
+ """
91
+ We assume that at connection time, arm is in a rest position,
92
+ and torque can be safely disabled to run calibration.
93
+ """
94
+
95
+ self.bus.connect()
96
+ if not self.is_calibrated and calibrate:
97
+ logger.info(
98
+ "Mismatch between calibration values in the motor and the calibration file or no calibration file found"
99
+ )
100
+ self.calibrate()
101
+
102
+ for cam in self.cameras.values():
103
+ cam.connect()
104
+
105
+ self.configure()
106
+ logger.info(f"{self} connected.")
107
+
108
+ @property
109
+ def is_calibrated(self) -> bool:
110
+ return self.bus.is_calibrated
111
+
112
+ def calibrate(self) -> None:
113
+ if self.calibration:
114
+ # Calibration file exists, ask user whether to use it or run new calibration
115
+ user_input = input(
116
+ f"Press ENTER to use provided calibration file associated with the id {self.id}, or type 'c' and press ENTER to run calibration: "
117
+ )
118
+ if user_input.strip().lower() != "c":
119
+ logger.info(f"Writing calibration file associated with the id {self.id} to the motors")
120
+ self.bus.write_calibration(self.calibration)
121
+ return
122
+
123
+ logger.info(f"\nRunning calibration of {self}")
124
+ self.bus.disable_torque()
125
+ for motor in self.bus.motors:
126
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
127
+
128
+ input(f"Move {self} to the middle of its range of motion and press ENTER....")
129
+ homing_offsets = self.bus.set_half_turn_homings()
130
+
131
+ # Attempt to call record_ranges_of_motion with a reduced motor set when appropriate.
132
+ full_turn_motor = "wrist_roll"
133
+ unknown_range_motors = [motor for motor in self.bus.motors if motor != full_turn_motor]
134
+ print(
135
+ f"Move all joints except '{full_turn_motor}' sequentially through their "
136
+ "entire ranges of motion.\nRecording positions. Press ENTER to stop..."
137
+ )
138
+ range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors)
139
+ range_mins[full_turn_motor] = 0
140
+ range_maxes[full_turn_motor] = 4095
141
+
142
+ self.calibration = {}
143
+ for motor, m in self.bus.motors.items():
144
+ self.calibration[motor] = MotorCalibration(
145
+ id=m.id,
146
+ drive_mode=0,
147
+ homing_offset=homing_offsets[motor],
148
+ range_min=range_mins[motor],
149
+ range_max=range_maxes[motor],
150
+ )
151
+
152
+ self.bus.write_calibration(self.calibration)
153
+ self._save_calibration()
154
+ print("Calibration saved to", self.calibration_fpath)
155
+
156
+ def configure(self) -> None:
157
+ with self.bus.torque_disabled():
158
+ self.bus.configure_motors()
159
+ for motor in self.bus.motors:
160
+ self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value)
161
+ # Set P_Coefficient to lower value to avoid shakiness (Default is 32)
162
+ self.bus.write("P_Coefficient", motor, 16)
163
+ # Set I_Coefficient and D_Coefficient to default value 0 and 32
164
+ self.bus.write("I_Coefficient", motor, 0)
165
+ self.bus.write("D_Coefficient", motor, 32)
166
+
167
+ if motor == "gripper":
168
+ self.bus.write("Max_Torque_Limit", motor, 500) # 50% of max torque to avoid burnout
169
+ self.bus.write("Protection_Current", motor, 250) # 50% of max current to avoid burnout
170
+ self.bus.write("Overload_Torque", motor, 25) # 25% torque when overloaded
171
+
172
+ def setup_motors(self) -> None:
173
+ for motor in reversed(self.bus.motors):
174
+ input(f"Connect the controller board to the '{motor}' motor only and press enter.")
175
+ self.bus.setup_motor(motor)
176
+ print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
177
+
178
+ @check_if_not_connected
179
+ def get_observation(self) -> RobotObservation:
180
+ # Read arm position
181
+ start = time.perf_counter()
182
+ obs_dict = self.bus.sync_read("Present_Position")
183
+ obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
184
+ dt_ms = (time.perf_counter() - start) * 1e3
185
+ logger.debug(f"{self} read state: {dt_ms:.1f}ms")
186
+
187
+ # Capture images from cameras
188
+ for cam_key, cam in self.cameras.items():
189
+ start = time.perf_counter()
190
+ obs_dict[cam_key] = cam.async_read()
191
+ dt_ms = (time.perf_counter() - start) * 1e3
192
+ logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
193
+
194
+ return obs_dict
195
+
196
+ @check_if_not_connected
197
+ def send_action(self, action: RobotAction) -> RobotAction:
198
+ """Command arm to move to a target joint configuration.
199
+
200
+ The relative action magnitude may be clipped depending on the configuration parameter
201
+ `max_relative_target`. In this case, the action sent differs from original action.
202
+ Thus, this function always returns the action actually sent.
203
+
204
+ Raises:
205
+ RobotDeviceNotConnectedError: if robot is not connected.
206
+
207
+ Returns:
208
+ RobotAction: the action sent to the motors, potentially clipped.
209
+ """
210
+
211
+ goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
212
+
213
+ # Cap goal position when too far away from present position.
214
+ # /!\ Slower fps expected due to reading from the follower.
215
+ if self.config.max_relative_target is not None:
216
+ present_pos = self.bus.sync_read("Present_Position")
217
+ goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()}
218
+ goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
219
+
220
+ # Send goal position to the arm
221
+ self.bus.sync_write("Goal_Position", goal_pos)
222
+ return {f"{motor}.pos": val for motor, val in goal_pos.items()}
223
+
224
+ @check_if_not_connected
225
+ def disconnect(self):
226
+ self.bus.disconnect(self.config.disable_torque_on_disconnect)
227
+ for cam in self.cameras.values():
228
+ cam.disconnect()
229
+
230
+ logger.info(f"{self} disconnected.")
231
+
232
+
233
+ SO100Follower: TypeAlias = SOFollower
234
+ SO101Follower: TypeAlias = SOFollower
lerobot/src/lerobot/robots/unitree_g1/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .config_unitree_g1 import UnitreeG1Config
18
+ from .unitree_g1 import UnitreeG1
lerobot/src/lerobot/robots/unitree_g1/config_unitree_g1.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+
19
+ from lerobot.cameras import CameraConfig
20
+
21
+ from ..config import RobotConfig
22
+
23
+ _GAINS: dict[str, dict[str, list[float]]] = {
24
+ "left_leg": {
25
+ "kp": [150, 150, 150, 300, 40, 40],
26
+ "kd": [2, 2, 2, 4, 2, 2],
27
+ }, # pitch, roll, yaw, knee, ankle_pitch, ankle_roll
28
+ "right_leg": {"kp": [150, 150, 150, 300, 40, 40], "kd": [2, 2, 2, 4, 2, 2]},
29
+ "waist": {"kp": [250, 250, 250], "kd": [5, 5, 5]}, # yaw, roll, pitch
30
+ "left_arm": {"kp": [80, 80, 80, 80], "kd": [3, 3, 3, 3]}, # shoulder_pitch/roll/yaw, elbow
31
+ "left_wrist": {"kp": [40, 40, 40], "kd": [1.5, 1.5, 1.5]}, # roll, pitch, yaw
32
+ "right_arm": {"kp": [80, 80, 80, 80], "kd": [3, 3, 3, 3]},
33
+ "right_wrist": {"kp": [40, 40, 40], "kd": [1.5, 1.5, 1.5]},
34
+ "other": {"kp": [80, 80, 80, 80, 80, 80], "kd": [3, 3, 3, 3, 3, 3]},
35
+ }
36
+
37
+
38
+ def _build_gains() -> tuple[list[float], list[float]]:
39
+ """Build kp and kd lists from body-part groupings."""
40
+ kp = [v for g in _GAINS.values() for v in g["kp"]]
41
+ kd = [v for g in _GAINS.values() for v in g["kd"]]
42
+ return kp, kd
43
+
44
+
45
+ _DEFAULT_KP, _DEFAULT_KD = _build_gains()
46
+
47
+
48
+ @RobotConfig.register_subclass("unitree_g1")
49
+ @dataclass
50
+ class UnitreeG1Config(RobotConfig):
51
+ kp: list[float] = field(default_factory=lambda: _DEFAULT_KP.copy())
52
+ kd: list[float] = field(default_factory=lambda: _DEFAULT_KD.copy())
53
+
54
+ # Default joint positions
55
+ default_positions: list[float] = field(default_factory=lambda: [0.0] * 29)
56
+
57
+ # Control loop timestep
58
+ control_dt: float = 1.0 / 250.0 # 250Hz
59
+
60
+ # Launch mujoco simulation
61
+ is_simulation: bool = True
62
+
63
+ # Socket config for ZMQ bridge
64
+ robot_ip: str = "192.168.123.164" # default G1 IP
65
+
66
+ # Cameras (ZMQ-based remote cameras)
67
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
lerobot/src/lerobot/robots/unitree_g1/g1_utils.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from enum import IntEnum
18
+
19
+ # ruff: noqa: N801, N815
20
+
21
+ NUM_MOTORS = 35
22
+
23
+
24
+ class G1_29_JointArmIndex(IntEnum):
25
+ # Left arm
26
+ kLeftShoulderPitch = 15
27
+ kLeftShoulderRoll = 16
28
+ kLeftShoulderYaw = 17
29
+ kLeftElbow = 18
30
+ kLeftWristRoll = 19
31
+ kLeftWristPitch = 20
32
+ kLeftWristyaw = 21
33
+
34
+ # Right arm
35
+ kRightShoulderPitch = 22
36
+ kRightShoulderRoll = 23
37
+ kRightShoulderYaw = 24
38
+ kRightElbow = 25
39
+ kRightWristRoll = 26
40
+ kRightWristPitch = 27
41
+ kRightWristYaw = 28
42
+
43
+
44
+ class G1_29_JointIndex(IntEnum):
45
+ # Left leg
46
+ kLeftHipPitch = 0
47
+ kLeftHipRoll = 1
48
+ kLeftHipYaw = 2
49
+ kLeftKnee = 3
50
+ kLeftAnklePitch = 4
51
+ kLeftAnkleRoll = 5
52
+
53
+ # Right leg
54
+ kRightHipPitch = 6
55
+ kRightHipRoll = 7
56
+ kRightHipYaw = 8
57
+ kRightKnee = 9
58
+ kRightAnklePitch = 10
59
+ kRightAnkleRoll = 11
60
+
61
+ kWaistYaw = 12
62
+ kWaistRoll = 13
63
+ kWaistPitch = 14
64
+
65
+ # Left arm
66
+ kLeftShoulderPitch = 15
67
+ kLeftShoulderRoll = 16
68
+ kLeftShoulderYaw = 17
69
+ kLeftElbow = 18
70
+ kLeftWristRoll = 19
71
+ kLeftWristPitch = 20
72
+ kLeftWristyaw = 21
73
+
74
+ # Right arm
75
+ kRightShoulderPitch = 22
76
+ kRightShoulderRoll = 23
77
+ kRightShoulderYaw = 24
78
+ kRightElbow = 25
79
+ kRightWristRoll = 26
80
+ kRightWristPitch = 27
81
+ kRightWristYaw = 28
lerobot/src/lerobot/robots/unitree_g1/run_g1_server.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ DDS-to-ZMQ bridge server for Unitree G1 robot.
19
+
20
+ This server runs on the robot and forwards:
21
+ - Robot state (LowState) from DDS to ZMQ (for remote clients)
22
+ - Robot commands (LowCmd) from ZMQ to DDS (from remote clients)
23
+
24
+ Uses JSON for secure serialization instead of pickle.
25
+ """
26
+
27
+ import base64
28
+ import contextlib
29
+ import json
30
+ import threading
31
+ import time
32
+ from typing import Any
33
+
34
+ import zmq
35
+ from unitree_sdk2py.comm.motion_switcher.motion_switcher_client import MotionSwitcherClient
36
+ from unitree_sdk2py.core.channel import ChannelFactoryInitialize, ChannelPublisher, ChannelSubscriber
37
+ from unitree_sdk2py.idl.default import unitree_hg_msg_dds__LowCmd_
38
+ from unitree_sdk2py.idl.unitree_hg.msg.dds_ import LowCmd_ as hg_LowCmd, LowState_ as hg_LowState
39
+ from unitree_sdk2py.utils.crc import CRC
40
+
41
+ # DDS topic names follow Unitree SDK naming conventions
42
+ # ruff: noqa: N816
43
+ kTopicLowCommand_Debug = "rt/lowcmd" # action to robot
44
+ kTopicLowState = "rt/lowstate" # observation from robot
45
+
46
+ LOWCMD_PORT = 6000
47
+ LOWSTATE_PORT = 6001
48
+ NUM_MOTORS = 35
49
+
50
+
51
+ def lowstate_to_dict(msg: hg_LowState) -> dict[str, Any]:
52
+ """Convert LowState SDK message to a JSON-serializable dictionary."""
53
+ motor_states = []
54
+ for i in range(NUM_MOTORS):
55
+ temp = msg.motor_state[i].temperature
56
+ avg_temp = float(sum(temp) / len(temp)) if isinstance(temp, list) else float(temp)
57
+ motor_states.append(
58
+ {
59
+ "q": float(msg.motor_state[i].q),
60
+ "dq": float(msg.motor_state[i].dq),
61
+ "tau_est": float(msg.motor_state[i].tau_est),
62
+ "temperature": avg_temp,
63
+ }
64
+ )
65
+
66
+ return {
67
+ "motor_state": motor_states,
68
+ "imu_state": {
69
+ "quaternion": [float(x) for x in msg.imu_state.quaternion],
70
+ "gyroscope": [float(x) for x in msg.imu_state.gyroscope],
71
+ "accelerometer": [float(x) for x in msg.imu_state.accelerometer],
72
+ "rpy": [float(x) for x in msg.imu_state.rpy],
73
+ "temperature": float(msg.imu_state.temperature),
74
+ },
75
+ # Encode bytes as base64 for JSON compatibility
76
+ "wireless_remote": base64.b64encode(bytes(msg.wireless_remote)).decode("ascii"),
77
+ "mode_machine": int(msg.mode_machine),
78
+ }
79
+
80
+
81
+ def dict_to_lowcmd(data: dict[str, Any]) -> hg_LowCmd:
82
+ """Convert dictionary back to LowCmd SDK message."""
83
+ cmd = unitree_hg_msg_dds__LowCmd_()
84
+ cmd.mode_pr = data.get("mode_pr", 0)
85
+ cmd.mode_machine = data.get("mode_machine", 0)
86
+
87
+ for i, motor_data in enumerate(data.get("motor_cmd", [])):
88
+ cmd.motor_cmd[i].mode = motor_data.get("mode", 0)
89
+ cmd.motor_cmd[i].q = motor_data.get("q", 0.0)
90
+ cmd.motor_cmd[i].dq = motor_data.get("dq", 0.0)
91
+ cmd.motor_cmd[i].kp = motor_data.get("kp", 0.0)
92
+ cmd.motor_cmd[i].kd = motor_data.get("kd", 0.0)
93
+ cmd.motor_cmd[i].tau = motor_data.get("tau", 0.0)
94
+
95
+ return cmd
96
+
97
+
98
+ def state_forward_loop(
99
+ lowstate_sub: ChannelSubscriber,
100
+ lowstate_sock: zmq.Socket,
101
+ state_period: float,
102
+ shutdown_event: threading.Event,
103
+ ) -> None:
104
+ """Read observation from DDS and forward to ZMQ clients."""
105
+ last_state_time = 0.0
106
+
107
+ while not shutdown_event.is_set():
108
+ # read from DDS
109
+ msg = lowstate_sub.Read()
110
+ if msg is None:
111
+ continue
112
+
113
+ now = time.time()
114
+ # optional downsampling (if robot dds rate > state_period)
115
+ if now - last_state_time >= state_period:
116
+ # Convert to dict and serialize with JSON
117
+ state_dict = lowstate_to_dict(msg)
118
+ payload = json.dumps({"topic": kTopicLowState, "data": state_dict}).encode("utf-8")
119
+ # if no subscribers / tx buffer full, just drop
120
+ with contextlib.suppress(zmq.Again):
121
+ lowstate_sock.send(payload, zmq.NOBLOCK)
122
+ last_state_time = now
123
+
124
+
125
+ def cmd_forward_loop(
126
+ lowcmd_sock: zmq.Socket,
127
+ lowcmd_pub_debug: ChannelPublisher,
128
+ crc: CRC,
129
+ ) -> None:
130
+ """Receive commands from ZMQ and forward to DDS."""
131
+ while True:
132
+ try:
133
+ payload = lowcmd_sock.recv()
134
+ except zmq.ContextTerminated:
135
+ break
136
+ msg_dict = json.loads(payload.decode("utf-8"))
137
+
138
+ topic = msg_dict.get("topic", "")
139
+ cmd_data = msg_dict.get("data", {})
140
+
141
+ # Reconstruct LowCmd object from dict
142
+ cmd = dict_to_lowcmd(cmd_data)
143
+
144
+ # recompute crc
145
+ cmd.crc = crc.Crc(cmd)
146
+
147
+ if topic == kTopicLowCommand_Debug:
148
+ lowcmd_pub_debug.Write(cmd)
149
+
150
+
151
+ def main() -> None:
152
+ """Main entry point for the robot server bridge."""
153
+ # initialize DDS
154
+ ChannelFactoryInitialize(0)
155
+
156
+ # stop all active publishers on the robot
157
+ msc = MotionSwitcherClient()
158
+ msc.SetTimeout(5.0)
159
+ msc.Init()
160
+
161
+ status, result = msc.CheckMode()
162
+ while result is not None and "name" in result and result["name"]:
163
+ msc.ReleaseMode()
164
+ status, result = msc.CheckMode()
165
+ time.sleep(1.0)
166
+
167
+ crc = CRC()
168
+
169
+ # initialize DDS publisher
170
+ lowcmd_pub_debug = ChannelPublisher(kTopicLowCommand_Debug, hg_LowCmd)
171
+ lowcmd_pub_debug.Init()
172
+
173
+ # initialize DDS subscriber
174
+ lowstate_sub = ChannelSubscriber(kTopicLowState, hg_LowState)
175
+ lowstate_sub.Init()
176
+
177
+ # initialize ZMQ
178
+ ctx = zmq.Context.instance()
179
+
180
+ # receive commands from remote client
181
+ lowcmd_sock = ctx.socket(zmq.PULL)
182
+ lowcmd_sock.bind(f"tcp://0.0.0.0:{LOWCMD_PORT}")
183
+
184
+ # publish state to remote clients
185
+ lowstate_sock = ctx.socket(zmq.PUB)
186
+ lowstate_sock.bind(f"tcp://0.0.0.0:{LOWSTATE_PORT}")
187
+
188
+ state_period = 0.002 # ~500 hz
189
+ shutdown_event = threading.Event()
190
+
191
+ # start observation forwarding in background thread
192
+ t_state = threading.Thread(
193
+ target=state_forward_loop,
194
+ args=(lowstate_sub, lowstate_sock, state_period, shutdown_event),
195
+ )
196
+ t_state.start()
197
+
198
+ print("bridge running (lowstate -> zmq, lowcmd -> dds)")
199
+
200
+ # run command forwarding in main thread
201
+ try:
202
+ cmd_forward_loop(lowcmd_sock, lowcmd_pub_debug, crc)
203
+ except KeyboardInterrupt:
204
+ print("shutting down bridge...")
205
+ finally:
206
+ shutdown_event.set()
207
+ ctx.term() # terminates blocking zmq.recv() calls
208
+ t_state.join(timeout=2.0)
209
+
210
+
211
+ if __name__ == "__main__":
212
+ main()