diff --git a/lerobot/src/lerobot/cameras/opencv/__init__.py b/lerobot/src/lerobot/cameras/opencv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bb7c12a7aa99a2f615ebe326dcc72226d7f48485 --- /dev/null +++ b/lerobot/src/lerobot/cameras/opencv/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .camera_opencv import OpenCVCamera +from .configuration_opencv import OpenCVCameraConfig + +__all__ = ["OpenCVCamera", "OpenCVCameraConfig"] diff --git a/lerobot/src/lerobot/cameras/opencv/camera_opencv.py b/lerobot/src/lerobot/cameras/opencv/camera_opencv.py new file mode 100644 index 0000000000000000000000000000000000000000..2026bad42e9ff325560d748669742ffb00ee5168 --- /dev/null +++ b/lerobot/src/lerobot/cameras/opencv/camera_opencv.py @@ -0,0 +1,541 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides the OpenCVCamera class for capturing frames from cameras using OpenCV. +""" + +import logging +import math +import os +import platform +import time +from pathlib import Path +from threading import Event, Lock, Thread +from typing import Any + +from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing + +# Fix MSMF hardware transform compatibility for Windows before importing cv2 +if platform.system() == "Windows" and "OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS" not in os.environ: + os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0" +import cv2 # type: ignore # TODO: add type stubs for OpenCV + +from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError + +from ..camera import Camera +from ..utils import get_cv2_backend, get_cv2_rotation +from .configuration_opencv import ColorMode, OpenCVCameraConfig + +# NOTE(Steven): The maximum opencv device index depends on your operating system. For instance, +# if you have 3 cameras, they should be associated to index 0, 1, and 2. This is the case +# on MacOS. However, on Ubuntu, the indices are different like 6, 16, 23. +# When you change the USB port or reboot the computer, the operating system might +# treat the same cameras as new devices. Thus we select a higher bound to search indices. +MAX_OPENCV_INDEX = 60 + +logger = logging.getLogger(__name__) + + +class OpenCVCamera(Camera): + """ + Manages camera interactions using OpenCV for efficient frame recording. + + This class provides a high-level interface to connect to, configure, and read + frames from cameras compatible with OpenCV's VideoCapture. It supports both + synchronous and asynchronous frame reading. + + An OpenCVCamera instance requires a camera index (e.g., 0) or a device path + (e.g., '/dev/video0' on Linux). Camera indices can be unstable across reboots + or port changes, especially on Linux. Use the provided utility script to find + available camera indices or paths: + ```bash + lerobot-find-cameras opencv + ``` + + The camera's default settings (FPS, resolution, color mode) are used unless + overridden in the configuration. + + Example: + ```python + from lerobot.cameras.opencv import OpenCVCamera + from lerobot.cameras.configuration_opencv import OpenCVCameraConfig, ColorMode, Cv2Rotation + + # Basic usage with camera index 0 + config = OpenCVCameraConfig(index_or_path=0) + camera = OpenCVCamera(config) + camera.connect() + + # Read 1 frame synchronously + color_image = camera.read() + print(color_image.shape) + + # Read 1 frame asynchronously + async_image = camera.async_read() + + # When done, properly disconnect the camera using + camera.disconnect() + + # Example with custom settings + custom_config = OpenCVCameraConfig( + index_or_path='/dev/video0', # Or use an index + fps=30, + width=1280, + height=720, + color_mode=ColorMode.RGB, + rotation=Cv2Rotation.ROTATE_90 + ) + custom_camera = OpenCVCamera(custom_config) + # ... connect, read, disconnect ... + ``` + """ + + def __init__(self, config: OpenCVCameraConfig): + """ + Initializes the OpenCVCamera instance. + + Args: + config: The configuration settings for the camera. + """ + super().__init__(config) + + self.config = config + self.index_or_path = config.index_or_path + + self.fps = config.fps + self.color_mode = config.color_mode + self.warmup_s = config.warmup_s + + self.videocapture: cv2.VideoCapture | None = None + + self.thread: Thread | None = None + self.stop_event: Event | None = None + self.frame_lock: Lock = Lock() + self.latest_frame: NDArray[Any] | None = None + self.new_frame_event: Event = Event() + + self.rotation: int | None = get_cv2_rotation(config.rotation) + self.backend: int = get_cv2_backend() + + if self.height and self.width: + self.capture_width, self.capture_height = self.width, self.height + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: + self.capture_width, self.capture_height = self.height, self.width + + def __str__(self) -> str: + return f"{self.__class__.__name__}({self.index_or_path})" + + @property + def is_connected(self) -> bool: + """Checks if the camera is currently connected and opened.""" + return isinstance(self.videocapture, cv2.VideoCapture) and self.videocapture.isOpened() + + def connect(self, warmup: bool = True) -> None: + """ + Connects to the OpenCV camera specified in the configuration. + + Initializes the OpenCV VideoCapture object, sets desired camera properties + (FPS, width, height), and performs initial checks. + + Raises: + DeviceAlreadyConnectedError: If the camera is already connected. + ConnectionError: If the specified camera index/path is not found or the camera is found but fails to open. + RuntimeError: If the camera opens but fails to apply requested FPS/resolution settings. + """ + if self.is_connected: + raise DeviceAlreadyConnectedError(f"{self} is already connected.") + + # Use 1 thread for OpenCV operations to avoid potential conflicts or + # blocking in multi-threaded applications, especially during data collection. + cv2.setNumThreads(1) + + self.videocapture = cv2.VideoCapture(self.index_or_path, self.backend) + + if not self.videocapture.isOpened(): + self.videocapture.release() + self.videocapture = None + raise ConnectionError( + f"Failed to open {self}.Run `lerobot-find-cameras opencv` to find available cameras." + ) + + self._configure_capture_settings() + + if warmup: + start_time = time.time() + while time.time() - start_time < self.warmup_s: + self.read() + time.sleep(0.1) + + logger.info(f"{self} connected.") + + def _configure_capture_settings(self) -> None: + """ + Applies the specified FOURCC, FPS, width, and height settings to the connected camera. + + This method attempts to set the camera properties via OpenCV. It checks if + the camera successfully applied the settings and raises an error if not. + FOURCC is set first (if specified) as it can affect the available FPS and resolution options. + + Args: + fourcc: The desired FOURCC code (e.g., "MJPG", "YUYV"). If None, auto-detect. + fps: The desired frames per second. If None, the setting is skipped. + width: The desired capture width. If None, the setting is skipped. + height: The desired capture height. If None, the setting is skipped. + + Raises: + RuntimeError: If the camera fails to set any of the specified properties + to the requested value. + DeviceNotConnectedError: If the camera is not connected when attempting + to configure settings. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"Cannot configure settings for {self} as it is not connected.") + + # Set FOURCC first (if specified) as it can affect available FPS/resolution options + if self.config.fourcc is not None: + self._validate_fourcc() + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + default_width = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_WIDTH))) + default_height = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_HEIGHT))) + + if self.width is None or self.height is None: + self.width, self.height = default_width, default_height + self.capture_width, self.capture_height = default_width, default_height + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: + self.width, self.height = default_height, default_width + self.capture_width, self.capture_height = default_width, default_height + else: + self._validate_width_and_height() + + if self.fps is None: + self.fps = self.videocapture.get(cv2.CAP_PROP_FPS) + else: + self._validate_fps() + + def _validate_fps(self) -> None: + """Validates and sets the camera's frames per second (FPS).""" + + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + if self.fps is None: + raise ValueError(f"{self} FPS is not set") + + success = self.videocapture.set(cv2.CAP_PROP_FPS, float(self.fps)) + actual_fps = self.videocapture.get(cv2.CAP_PROP_FPS) + # Use math.isclose for robust float comparison + if not success or not math.isclose(self.fps, actual_fps, rel_tol=1e-3): + raise RuntimeError(f"{self} failed to set fps={self.fps} ({actual_fps=}).") + + def _validate_fourcc(self) -> None: + """Validates and sets the camera's FOURCC code.""" + + fourcc_code = cv2.VideoWriter_fourcc(*self.config.fourcc) + + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + success = self.videocapture.set(cv2.CAP_PROP_FOURCC, fourcc_code) + actual_fourcc_code = self.videocapture.get(cv2.CAP_PROP_FOURCC) + + # Convert actual FOURCC code back to string for comparison + actual_fourcc_code_int = int(actual_fourcc_code) + actual_fourcc = "".join([chr((actual_fourcc_code_int >> 8 * i) & 0xFF) for i in range(4)]) + + if not success or actual_fourcc != self.config.fourcc: + logger.warning( + f"{self} failed to set fourcc={self.config.fourcc} (actual={actual_fourcc}, success={success}). " + f"Continuing with default format." + ) + + def _validate_width_and_height(self) -> None: + """Validates and sets the camera's frame capture width and height.""" + + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + if self.capture_width is None or self.capture_height is None: + raise ValueError(f"{self} capture_width or capture_height is not set") + + width_success = self.videocapture.set(cv2.CAP_PROP_FRAME_WIDTH, float(self.capture_width)) + height_success = self.videocapture.set(cv2.CAP_PROP_FRAME_HEIGHT, float(self.capture_height)) + + actual_width = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_WIDTH))) + if not width_success or self.capture_width != actual_width: + raise RuntimeError( + f"{self} failed to set capture_width={self.capture_width} ({actual_width=}, {width_success=})." + ) + + actual_height = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_HEIGHT))) + if not height_success or self.capture_height != actual_height: + raise RuntimeError( + f"{self} failed to set capture_height={self.capture_height} ({actual_height=}, {height_success=})." + ) + + @staticmethod + def find_cameras() -> list[dict[str, Any]]: + """ + Detects available OpenCV cameras connected to the system. + + On Linux, it scans '/dev/video*' paths. On other systems (like macOS, Windows), + it checks indices from 0 up to `MAX_OPENCV_INDEX`. + + Returns: + List[Dict[str, Any]]: A list of dictionaries, + where each dictionary contains 'type', 'id' (port index or path), + and the default profile properties (width, height, fps, format). + """ + found_cameras_info = [] + + targets_to_scan: list[str | int] + if platform.system() == "Linux": + possible_paths = sorted(Path("/dev").glob("video*"), key=lambda p: p.name) + targets_to_scan = [str(p) for p in possible_paths] + else: + targets_to_scan = [int(i) for i in range(MAX_OPENCV_INDEX)] + + for target in targets_to_scan: + camera = cv2.VideoCapture(target) + if camera.isOpened(): + default_width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)) + default_height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)) + default_fps = camera.get(cv2.CAP_PROP_FPS) + default_format = camera.get(cv2.CAP_PROP_FORMAT) + + # Get FOURCC code and convert to string + default_fourcc_code = camera.get(cv2.CAP_PROP_FOURCC) + default_fourcc_code_int = int(default_fourcc_code) + default_fourcc = "".join([chr((default_fourcc_code_int >> 8 * i) & 0xFF) for i in range(4)]) + + camera_info = { + "name": f"OpenCV Camera @ {target}", + "type": "OpenCV", + "id": target, + "backend_api": camera.getBackendName(), + "default_stream_profile": { + "format": default_format, + "fourcc": default_fourcc, + "width": default_width, + "height": default_height, + "fps": default_fps, + }, + } + + found_cameras_info.append(camera_info) + camera.release() + + return found_cameras_info + + def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]: + """ + Reads a single frame synchronously from the camera. + + This is a blocking call. It waits for the next available frame from the + camera hardware via OpenCV. + + Args: + color_mode (Optional[ColorMode]): If specified, overrides the default + color mode (`self.color_mode`) for this read operation (e.g., + request RGB even if default is BGR). + + Returns: + np.ndarray: The captured frame as a NumPy array in the format + (height, width, channels), using the specified or default + color mode and applying any configured rotation. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + RuntimeError: If reading the frame from the camera fails or if the + received frame dimensions don't match expectations before rotation. + ValueError: If an invalid `color_mode` is requested. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + start_time = time.perf_counter() + + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + ret, frame = self.videocapture.read() + + if not ret or frame is None: + raise RuntimeError(f"{self} read failed (status={ret}).") + + processed_frame = self._postprocess_image(frame, color_mode) + + read_duration_ms = (time.perf_counter() - start_time) * 1e3 + logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") + + return processed_frame + + def _postprocess_image(self, image: NDArray[Any], color_mode: ColorMode | None = None) -> NDArray[Any]: + """ + Applies color conversion, dimension validation, and rotation to a raw frame. + + Args: + image (np.ndarray): The raw image frame (expected BGR format from OpenCV). + color_mode (Optional[ColorMode]): The target color mode (RGB or BGR). If None, + uses the instance's default `self.color_mode`. + + Returns: + np.ndarray: The processed image frame. + + Raises: + ValueError: If the requested `color_mode` is invalid. + RuntimeError: If the raw frame dimensions do not match the configured + `width` and `height`. + """ + requested_color_mode = self.color_mode if color_mode is None else color_mode + + if requested_color_mode not in (ColorMode.RGB, ColorMode.BGR): + raise ValueError( + f"Invalid color mode '{requested_color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}." + ) + + h, w, c = image.shape + + if h != self.capture_height or w != self.capture_width: + raise RuntimeError( + f"{self} frame width={w} or height={h} do not match configured width={self.capture_width} or height={self.capture_height}." + ) + + if c != 3: + raise RuntimeError(f"{self} frame channels={c} do not match expected 3 channels (RGB/BGR).") + + processed_image = image + if requested_color_mode == ColorMode.RGB: + processed_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]: + processed_image = cv2.rotate(processed_image, self.rotation) + + return processed_image + + def _read_loop(self) -> None: + """ + Internal loop run by the background thread for asynchronous reading. + + On each iteration: + 1. Reads a color frame + 2. Stores result in latest_frame (thread-safe) + 3. Sets new_frame_event to notify listeners + + Stops on DeviceNotConnectedError, logs other errors and continues. + """ + if self.stop_event is None: + raise RuntimeError(f"{self}: stop_event is not initialized before starting read loop.") + + while not self.stop_event.is_set(): + try: + color_image = self.read() + + with self.frame_lock: + self.latest_frame = color_image + self.new_frame_event.set() + + except DeviceNotConnectedError: + break + except Exception as e: + logger.warning(f"Error reading frame in background thread for {self}: {e}") + + def _start_read_thread(self) -> None: + """Starts or restarts the background read thread if it's not running.""" + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=0.1) + if self.stop_event is not None: + self.stop_event.set() + + self.stop_event = Event() + self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop") + self.thread.daemon = True + self.thread.start() + + def _stop_read_thread(self) -> None: + """Signals the background read thread to stop and waits for it to join.""" + if self.stop_event is not None: + self.stop_event.set() + + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=2.0) + + self.thread = None + self.stop_event = None + + def async_read(self, timeout_ms: float = 200) -> NDArray[Any]: + """ + Reads the latest available frame asynchronously. + + This method retrieves the most recent frame captured by the background + read thread. It does not block waiting for the camera hardware directly, + but may wait up to timeout_ms for the background thread to provide a frame. + + Args: + timeout_ms (float): Maximum time in milliseconds to wait for a frame + to become available. Defaults to 200ms (0.2 seconds). + + Returns: + np.ndarray: The latest captured frame as a NumPy array in the format + (height, width, channels), processed according to configuration. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + TimeoutError: If no frame becomes available within the specified timeout. + RuntimeError: If an unexpected error occurs. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + if self.thread is None or not self.thread.is_alive(): + self._start_read_thread() + + if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0): + thread_alive = self.thread is not None and self.thread.is_alive() + raise TimeoutError( + f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. " + f"Read thread alive: {thread_alive}." + ) + + with self.frame_lock: + frame = self.latest_frame + self.new_frame_event.clear() + + if frame is None: + raise RuntimeError(f"Internal error: Event set but no frame available for {self}.") + + return frame + + def disconnect(self) -> None: + """ + Disconnects from the camera and cleans up resources. + + Stops the background read thread (if running) and releases the OpenCV + VideoCapture object. + + Raises: + DeviceNotConnectedError: If the camera is already disconnected. + """ + if not self.is_connected and self.thread is None: + raise DeviceNotConnectedError(f"{self} not connected.") + + if self.thread is not None: + self._stop_read_thread() + + if self.videocapture is not None: + self.videocapture.release() + self.videocapture = None + + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/cameras/opencv/configuration_opencv.py b/lerobot/src/lerobot/cameras/opencv/configuration_opencv.py new file mode 100644 index 0000000000000000000000000000000000000000..88ce873432972b561fa7d68062d4b50c7d3efd04 --- /dev/null +++ b/lerobot/src/lerobot/cameras/opencv/configuration_opencv.py @@ -0,0 +1,85 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from pathlib import Path + +from ..configs import CameraConfig, ColorMode, Cv2Rotation + +__all__ = ["OpenCVCameraConfig", "ColorMode", "Cv2Rotation"] + + +@CameraConfig.register_subclass("opencv") +@dataclass +class OpenCVCameraConfig(CameraConfig): + """Configuration class for OpenCV-based camera devices or video files. + + This class provides configuration options for cameras accessed through OpenCV, + supporting both physical camera devices and video files. It includes settings + for resolution, frame rate, color mode, and image rotation. + + Example configurations: + ```python + # Basic configurations + OpenCVCameraConfig(0, 30, 1280, 720) # 1280x720 @ 30FPS + OpenCVCameraConfig(/dev/video4, 60, 640, 480) # 640x480 @ 60FPS + + # Advanced configurations with FOURCC format + OpenCVCameraConfig(128422271347, 30, 640, 480, rotation=Cv2Rotation.ROTATE_90, fourcc="MJPG") # With 90° rotation and MJPG format + OpenCVCameraConfig(0, 30, 1280, 720, fourcc="YUYV") # With YUYV format + ``` + + Attributes: + index_or_path: Either an integer representing the camera device index, + or a Path object pointing to a video file. + fps: Requested frames per second for the color stream. + width: Requested frame width in pixels for the color stream. + height: Requested frame height in pixels for the color stream. + color_mode: Color mode for image output (RGB or BGR). Defaults to RGB. + rotation: Image rotation setting (0°, 90°, 180°, or 270°). Defaults to no rotation. + warmup_s: Time reading frames before returning from connect (in seconds) + fourcc: FOURCC code for video format (e.g., "MJPG", "YUYV", "I420"). Defaults to None (auto-detect). + + Note: + - Only 3-channel color output (RGB/BGR) is currently supported. + - FOURCC codes must be 4-character strings (e.g., "MJPG", "YUYV"). Some common FOUCC codes: https://learn.microsoft.com/en-us/windows/win32/medfound/video-fourccs#fourcc-constants + - Setting FOURCC can help achieve higher frame rates on some cameras. + """ + + index_or_path: int | Path + color_mode: ColorMode = ColorMode.RGB + rotation: Cv2Rotation = Cv2Rotation.NO_ROTATION + warmup_s: int = 1 + fourcc: str | None = None + + def __post_init__(self) -> None: + if self.color_mode not in (ColorMode.RGB, ColorMode.BGR): + raise ValueError( + f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided." + ) + + if self.rotation not in ( + Cv2Rotation.NO_ROTATION, + Cv2Rotation.ROTATE_90, + Cv2Rotation.ROTATE_180, + Cv2Rotation.ROTATE_270, + ): + raise ValueError( + f"`rotation` is expected to be in {(Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270)}, but {self.rotation} is provided." + ) + + if self.fourcc is not None and (not isinstance(self.fourcc, str) or len(self.fourcc) != 4): + raise ValueError( + f"`fourcc` must be a 4-character string (e.g., 'MJPG', 'YUYV'), but '{self.fourcc}' is provided." + ) diff --git a/lerobot/src/lerobot/cameras/reachy2_camera/__init__.py b/lerobot/src/lerobot/cameras/reachy2_camera/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc9d87f781dd48b14349d5d22fa5d2cf31367430 --- /dev/null +++ b/lerobot/src/lerobot/cameras/reachy2_camera/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .configuration_reachy2_camera import Reachy2CameraConfig +from .reachy2_camera import Reachy2Camera diff --git a/lerobot/src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py b/lerobot/src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py new file mode 100644 index 0000000000000000000000000000000000000000..ba1535042d03483ad15cbf7450d098b1de4a3140 --- /dev/null +++ b/lerobot/src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py @@ -0,0 +1,80 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..configs import CameraConfig, ColorMode + +__all__ = ["CameraConfig", "ColorMode", "Reachy2CameraConfig"] + + +@CameraConfig.register_subclass("reachy2_camera") +@dataclass +class Reachy2CameraConfig(CameraConfig): + """Configuration class for Reachy 2 camera devices. + + This class provides configuration options for Reachy 2 cameras, + supporting both the teleop and depth cameras. It includes settings + for resolution, frame rate, color mode, and the selection of the cameras. + + Example configurations: + ```python + # Basic configurations + Reachy2CameraConfig( + name="teleop", + image_type="left", + ip_address="192.168.0.200", # IP address of the robot + port=50065, # Port of the camera server + width=640, + height=480, + fps=30, # Not configurable for Reachy 2 cameras + color_mode=ColorMode.RGB, + ) # Left teleop camera, 640x480 @ 30FPS + ``` + + Attributes: + name: Name of the camera device. Can be "teleop" or "depth". + image_type: Type of image stream. For "teleop" camera, can be "left" or "right". + For "depth" camera, can be "rgb" or "depth". (depth is not supported yet) + fps: Requested frames per second for the color stream. Not configurable for Reachy 2 cameras. + width: Requested frame width in pixels for the color stream. + height: Requested frame height in pixels for the color stream. + color_mode: Color mode for image output (RGB or BGR). Defaults to RGB. + ip_address: IP address of the robot. Defaults to "localhost". + port: Port number for the camera server. Defaults to 50065. + + Note: + - Only 3-channel color output (RGB/BGR) is currently supported. + """ + + name: str + image_type: str + color_mode: ColorMode = ColorMode.RGB + ip_address: str | None = "localhost" + port: int = 50065 + + def __post_init__(self) -> None: + if self.name not in ["teleop", "depth"]: + raise ValueError(f"`name` is expected to be 'teleop' or 'depth', but {self.name} is provided.") + if (self.name == "teleop" and self.image_type not in ["left", "right"]) or ( + self.name == "depth" and self.image_type not in ["rgb", "depth"] + ): + raise ValueError( + f"`image_type` is expected to be 'left' or 'right' for teleop camera, and 'rgb' or 'depth' for depth camera, but {self.image_type} is provided." + ) + + if self.color_mode not in ["rgb", "bgr"]: + raise ValueError( + f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided." + ) diff --git a/lerobot/src/lerobot/cameras/reachy2_camera/reachy2_camera.py b/lerobot/src/lerobot/cameras/reachy2_camera/reachy2_camera.py new file mode 100644 index 0000000000000000000000000000000000000000..b681d0f2a90f05609d87dcfcf6ba720926f70aef --- /dev/null +++ b/lerobot/src/lerobot/cameras/reachy2_camera/reachy2_camera.py @@ -0,0 +1,220 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides the Reachy2Camera class for capturing frames from Reachy 2 cameras using Reachy 2's CameraManager. +""" + +from __future__ import annotations + +import logging +import os +import platform +import time +from typing import TYPE_CHECKING, Any + +from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing + +# Fix MSMF hardware transform compatibility for Windows before importing cv2 +if platform.system() == "Windows" and "OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS" not in os.environ: + os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0" +import cv2 # type: ignore # TODO: add type stubs for OpenCV +import numpy as np # type: ignore # TODO: add type stubs for numpy + +from lerobot.utils.import_utils import _reachy2_sdk_available + +if TYPE_CHECKING or _reachy2_sdk_available: + from reachy2_sdk.media.camera import CameraView + from reachy2_sdk.media.camera_manager import CameraManager +else: + CameraManager = None + + class CameraView: + LEFT = 0 + RIGHT = 1 + + +from lerobot.utils.errors import DeviceNotConnectedError + +from ..camera import Camera +from .configuration_reachy2_camera import ColorMode, Reachy2CameraConfig + +logger = logging.getLogger(__name__) + + +class Reachy2Camera(Camera): + """ + Manages Reachy 2 camera using Reachy 2 CameraManager. + + This class provides a high-level interface to connect to, configure, and read + frames from Reachy 2 cameras. It supports both synchronous and asynchronous + frame reading. + + An Reachy2Camera instance requires a camera name (e.g., "teleop") and an image + type (e.g., "left") to be specified in the configuration. + + The camera's default settings (FPS, resolution, color mode) are used unless + overridden in the configuration. + """ + + def __init__(self, config: Reachy2CameraConfig): + """ + Initializes the Reachy2Camera instance. + + Args: + config: The configuration settings for the camera. + """ + super().__init__(config) + + self.config = config + + self.color_mode = config.color_mode + + self.cam_manager: CameraManager | None = None + + def __str__(self) -> str: + return f"{self.__class__.__name__}({self.config.name}, {self.config.image_type})" + + @property + def is_connected(self) -> bool: + """Checks if the camera is currently connected and opened.""" + if self.config.name == "teleop": + return bool( + self.cam_manager._grpc_connected and self.cam_manager.teleop if self.cam_manager else False + ) + elif self.config.name == "depth": + return bool( + self.cam_manager._grpc_connected and self.cam_manager.depth if self.cam_manager else False + ) + else: + raise ValueError(f"Invalid camera name '{self.config.name}'. Expected 'teleop' or 'depth'.") + + def connect(self, warmup: bool = True) -> None: + """ + Connects to the Reachy2 CameraManager as specified in the configuration. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + """ + self.cam_manager = CameraManager(host=self.config.ip_address, port=self.config.port) + if self.cam_manager is None: + raise DeviceNotConnectedError(f"Could not connect to {self}.") + self.cam_manager.initialize_cameras() + + logger.info(f"{self} connected.") + + @staticmethod + def find_cameras() -> list[dict[str, Any]]: + """ + Detection not implemented for Reachy2 cameras. + """ + raise NotImplementedError("Camera detection is not implemented for Reachy2 cameras.") + + def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]: + """ + Reads a single frame synchronously from the camera. + + This is a blocking call. + + Args: + color_mode (Optional[ColorMode]): If specified, overrides the default + color mode (`self.color_mode`) for this read operation (e.g., + request RGB even if default is BGR). + + Returns: + np.ndarray: The captured frame as a NumPy array in the format + (height, width, channels), using the specified or default + color mode and applying any configured rotation. + """ + start_time = time.perf_counter() + + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + if self.cam_manager is None: + raise DeviceNotConnectedError(f"{self} is not connected.") + + frame: NDArray[Any] = np.empty((0, 0, 3), dtype=np.uint8) + + if self.config.name == "teleop" and hasattr(self.cam_manager, "teleop"): + if self.config.image_type == "left": + frame = self.cam_manager.teleop.get_frame( + CameraView.LEFT, size=(self.config.width, self.config.height) + )[0] + elif self.config.image_type == "right": + frame = self.cam_manager.teleop.get_frame( + CameraView.RIGHT, size=(self.config.width, self.config.height) + )[0] + elif self.config.name == "depth" and hasattr(self.cam_manager, "depth"): + if self.config.image_type == "depth": + frame = self.cam_manager.depth.get_depth_frame()[0] + elif self.config.image_type == "rgb": + frame = self.cam_manager.depth.get_frame(size=(self.config.width, self.config.height))[0] + else: + raise ValueError(f"Invalid camera name '{self.config.name}'. Expected 'teleop' or 'depth'.") + + if frame is None: + return np.empty((0, 0, 3), dtype=np.uint8) + + if self.config.color_mode == "rgb": + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + read_duration_ms = (time.perf_counter() - start_time) * 1e3 + logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") + + return frame + + def async_read(self, timeout_ms: float = 200) -> NDArray[Any]: + """ + Reads the latest available frame. + + This method retrieves the most recent frame available in Reachy 2's low-level software. + + Args: + timeout_ms (float): Maximum time in milliseconds to wait for a frame + to become available. Defaults to 200ms (0.2 seconds). + + Returns: + np.ndarray: The latest captured frame as a NumPy array in the format + (height, width, channels), processed according to configuration. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + TimeoutError: If no frame becomes available within the specified timeout. + RuntimeError: If an unexpected error occurs. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + frame = self.read() + + if frame is None: + raise RuntimeError(f"Internal error: No frame available for {self}.") + + return frame + + def disconnect(self) -> None: + """ + Stops the background read thread (if running). + + Raises: + DeviceNotConnectedError: If the camera is already disconnected. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} not connected.") + + if self.cam_manager is not None: + self.cam_manager.disconnect() + + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/cameras/realsense/__init__.py b/lerobot/src/lerobot/cameras/realsense/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5184a99bc33c17d0c759dc5a561ce800f5a278 --- /dev/null +++ b/lerobot/src/lerobot/cameras/realsense/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .camera_realsense import RealSenseCamera +from .configuration_realsense import RealSenseCameraConfig diff --git a/lerobot/src/lerobot/cameras/realsense/camera_realsense.py b/lerobot/src/lerobot/cameras/realsense/camera_realsense.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b8c3164c7f9b44bd2ed24a73ddfdf7d1961d6d --- /dev/null +++ b/lerobot/src/lerobot/cameras/realsense/camera_realsense.py @@ -0,0 +1,568 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides the RealSenseCamera class for capturing frames from Intel RealSense cameras. +""" + +import logging +import time +from threading import Event, Lock, Thread +from typing import Any + +import cv2 # type: ignore # TODO: add type stubs for OpenCV +import numpy as np # type: ignore # TODO: add type stubs for numpy +from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing + +try: + import pyrealsense2 as rs # type: ignore # TODO: add type stubs for pyrealsense2 +except Exception as e: + logging.info(f"Could not import realsense: {e}") + +from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError + +from ..camera import Camera +from ..configs import ColorMode +from ..utils import get_cv2_rotation +from .configuration_realsense import RealSenseCameraConfig + +logger = logging.getLogger(__name__) + + +class RealSenseCamera(Camera): + """ + Manages interactions with Intel RealSense cameras for frame and depth recording. + + This class provides an interface similar to `OpenCVCamera` but tailored for + RealSense devices, leveraging the `pyrealsense2` library. It uses the camera's + unique serial number for identification, offering more stability than device + indices, especially on Linux. It also supports capturing depth maps alongside + color frames. + + Use the provided utility script to find available camera indices and default profiles: + ```bash + lerobot-find-cameras realsense + ``` + + A `RealSenseCamera` instance requires a configuration object specifying the + camera's serial number or a unique device name. If using the name, ensure only + one camera with that name is connected. + + The camera's default settings (FPS, resolution, color mode) from the stream + profile are used unless overridden in the configuration. + + Example: + ```python + from lerobot.cameras.realsense import RealSenseCamera, RealSenseCameraConfig + from lerobot.cameras import ColorMode, Cv2Rotation + + # Basic usage with serial number + config = RealSenseCameraConfig(serial_number_or_name="0123456789") # Replace with actual SN + camera = RealSenseCamera(config) + camera.connect() + + # Read 1 frame synchronously + color_image = camera.read() + print(color_image.shape) + + # Read 1 frame asynchronously + async_image = camera.async_read() + + # When done, properly disconnect the camera using + camera.disconnect() + + # Example with depth capture and custom settings + custom_config = RealSenseCameraConfig( + serial_number_or_name="0123456789", # Replace with actual SN + fps=30, + width=1280, + height=720, + color_mode=ColorMode.BGR, # Request BGR output + rotation=Cv2Rotation.NO_ROTATION, + use_depth=True + ) + depth_camera = RealSenseCamera(custom_config) + depth_camera.connect() + + # Read 1 depth frame + depth_map = depth_camera.read_depth() + + # Example using a unique camera name + name_config = RealSenseCameraConfig(serial_number_or_name="Intel RealSense D435") # If unique + name_camera = RealSenseCamera(name_config) + # ... connect, read, disconnect ... + ``` + """ + + def __init__(self, config: RealSenseCameraConfig): + """ + Initializes the RealSenseCamera instance. + + Args: + config: The configuration settings for the camera. + """ + + super().__init__(config) + + self.config = config + + if config.serial_number_or_name.isdigit(): + self.serial_number = config.serial_number_or_name + else: + self.serial_number = self._find_serial_number_from_name(config.serial_number_or_name) + + self.fps = config.fps + self.color_mode = config.color_mode + self.use_depth = config.use_depth + self.warmup_s = config.warmup_s + + self.rs_pipeline: rs.pipeline | None = None + self.rs_profile: rs.pipeline_profile | None = None + + self.thread: Thread | None = None + self.stop_event: Event | None = None + self.frame_lock: Lock = Lock() + self.latest_frame: NDArray[Any] | None = None + self.new_frame_event: Event = Event() + + self.rotation: int | None = get_cv2_rotation(config.rotation) + + if self.height and self.width: + self.capture_width, self.capture_height = self.width, self.height + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: + self.capture_width, self.capture_height = self.height, self.width + + def __str__(self) -> str: + return f"{self.__class__.__name__}({self.serial_number})" + + @property + def is_connected(self) -> bool: + """Checks if the camera pipeline is started and streams are active.""" + return self.rs_pipeline is not None and self.rs_profile is not None + + def connect(self, warmup: bool = True) -> None: + """ + Connects to the RealSense camera specified in the configuration. + + Initializes the RealSense pipeline, configures the required streams (color + and optionally depth), starts the pipeline, and validates the actual stream settings. + + Raises: + DeviceAlreadyConnectedError: If the camera is already connected. + ValueError: If the configuration is invalid (e.g., missing serial/name, name not unique). + ConnectionError: If the camera is found but fails to start the pipeline or no RealSense devices are detected at all. + RuntimeError: If the pipeline starts but fails to apply requested settings. + """ + if self.is_connected: + raise DeviceAlreadyConnectedError(f"{self} is already connected.") + + self.rs_pipeline = rs.pipeline() + rs_config = rs.config() + self._configure_rs_pipeline_config(rs_config) + + try: + self.rs_profile = self.rs_pipeline.start(rs_config) + except RuntimeError as e: + self.rs_profile = None + self.rs_pipeline = None + raise ConnectionError( + f"Failed to open {self}.Run `lerobot-find-cameras realsense` to find available cameras." + ) from e + + self._configure_capture_settings() + + if warmup: + time.sleep( + 1 + ) # NOTE(Steven): RS cameras need a bit of time to warm up before the first read. If we don't wait, the first read from the warmup will raise. + start_time = time.time() + while time.time() - start_time < self.warmup_s: + self.read() + time.sleep(0.1) + + logger.info(f"{self} connected.") + + @staticmethod + def find_cameras() -> list[dict[str, Any]]: + """ + Detects available Intel RealSense cameras connected to the system. + + Returns: + List[Dict[str, Any]]: A list of dictionaries, + where each dictionary contains 'type', 'id' (serial number), 'name', + firmware version, USB type, and other available specs, and the default profile properties (width, height, fps, format). + + Raises: + OSError: If pyrealsense2 is not installed. + ImportError: If pyrealsense2 is not installed. + """ + found_cameras_info = [] + context = rs.context() + devices = context.query_devices() + + for device in devices: + camera_info = { + "name": device.get_info(rs.camera_info.name), + "type": "RealSense", + "id": device.get_info(rs.camera_info.serial_number), + "firmware_version": device.get_info(rs.camera_info.firmware_version), + "usb_type_descriptor": device.get_info(rs.camera_info.usb_type_descriptor), + "physical_port": device.get_info(rs.camera_info.physical_port), + "product_id": device.get_info(rs.camera_info.product_id), + "product_line": device.get_info(rs.camera_info.product_line), + } + + # Get stream profiles for each sensor + sensors = device.query_sensors() + for sensor in sensors: + profiles = sensor.get_stream_profiles() + + for profile in profiles: + if profile.is_video_stream_profile() and profile.is_default(): + vprofile = profile.as_video_stream_profile() + stream_info = { + "stream_type": vprofile.stream_name(), + "format": vprofile.format().name, + "width": vprofile.width(), + "height": vprofile.height(), + "fps": vprofile.fps(), + } + camera_info["default_stream_profile"] = stream_info + + found_cameras_info.append(camera_info) + + return found_cameras_info + + def _find_serial_number_from_name(self, name: str) -> str: + """Finds the serial number for a given unique camera name.""" + camera_infos = self.find_cameras() + found_devices = [cam for cam in camera_infos if str(cam["name"]) == name] + + if not found_devices: + available_names = [cam["name"] for cam in camera_infos] + raise ValueError( + f"No RealSense camera found with name '{name}'. Available camera names: {available_names}" + ) + + if len(found_devices) > 1: + serial_numbers = [dev["serial_number"] for dev in found_devices] + raise ValueError( + f"Multiple RealSense cameras found with name '{name}'. " + f"Please use a unique serial number instead. Found SNs: {serial_numbers}" + ) + + serial_number = str(found_devices[0]["serial_number"]) + return serial_number + + def _configure_rs_pipeline_config(self, rs_config: Any) -> None: + """Creates and configures the RealSense pipeline configuration object.""" + rs.config.enable_device(rs_config, self.serial_number) + + if self.width and self.height and self.fps: + rs_config.enable_stream( + rs.stream.color, self.capture_width, self.capture_height, rs.format.rgb8, self.fps + ) + if self.use_depth: + rs_config.enable_stream( + rs.stream.depth, self.capture_width, self.capture_height, rs.format.z16, self.fps + ) + else: + rs_config.enable_stream(rs.stream.color) + if self.use_depth: + rs_config.enable_stream(rs.stream.depth) + + def _configure_capture_settings(self) -> None: + """Sets fps, width, and height from device stream if not already configured. + + Uses the color stream profile to update unset attributes. Handles rotation by + swapping width/height when needed. Original capture dimensions are always stored. + + Raises: + DeviceNotConnectedError: If device is not connected. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"Cannot validate settings for {self} as it is not connected.") + + if self.rs_profile is None: + raise RuntimeError(f"{self}: rs_profile must be initialized before use.") + + stream = self.rs_profile.get_stream(rs.stream.color).as_video_stream_profile() + + if self.fps is None: + self.fps = stream.fps() + + if self.width is None or self.height is None: + actual_width = int(round(stream.width())) + actual_height = int(round(stream.height())) + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: + self.width, self.height = actual_height, actual_width + self.capture_width, self.capture_height = actual_width, actual_height + else: + self.width, self.height = actual_width, actual_height + self.capture_width, self.capture_height = actual_width, actual_height + + def read_depth(self, timeout_ms: int = 200) -> NDArray[Any]: + """ + Reads a single frame (depth) synchronously from the camera. + + This is a blocking call. It waits for a coherent set of frames (depth) + from the camera hardware via the RealSense pipeline. + + Args: + timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms. + + Returns: + np.ndarray: The depth map as a NumPy array (height, width) + of type `np.uint16` (raw depth values in millimeters) and rotation. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + RuntimeError: If reading frames from the pipeline fails or frames are invalid. + """ + + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + if not self.use_depth: + raise RuntimeError( + f"Failed to capture depth frame '.read_depth()'. Depth stream is not enabled for {self}." + ) + + start_time = time.perf_counter() + + if self.rs_pipeline is None: + raise RuntimeError(f"{self}: rs_pipeline must be initialized before use.") + + ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms) + + if not ret or frame is None: + raise RuntimeError(f"{self} read_depth failed (status={ret}).") + + depth_frame = frame.get_depth_frame() + depth_map = np.asanyarray(depth_frame.get_data()) + + depth_map_processed = self._postprocess_image(depth_map, depth_frame=True) + + read_duration_ms = (time.perf_counter() - start_time) * 1e3 + logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") + + return depth_map_processed + + def read(self, color_mode: ColorMode | None = None, timeout_ms: int = 200) -> NDArray[Any]: + """ + Reads a single frame (color) synchronously from the camera. + + This is a blocking call. It waits for a coherent set of frames (color) + from the camera hardware via the RealSense pipeline. + + Args: + timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms. + + Returns: + np.ndarray: The captured color frame as a NumPy array + (height, width, channels), processed according to `color_mode` and rotation. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + RuntimeError: If reading frames from the pipeline fails or frames are invalid. + ValueError: If an invalid `color_mode` is requested. + """ + + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + start_time = time.perf_counter() + + if self.rs_pipeline is None: + raise RuntimeError(f"{self}: rs_pipeline must be initialized before use.") + + ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms) + + if not ret or frame is None: + raise RuntimeError(f"{self} read failed (status={ret}).") + + color_frame = frame.get_color_frame() + color_image_raw = np.asanyarray(color_frame.get_data()) + + color_image_processed = self._postprocess_image(color_image_raw, color_mode) + + read_duration_ms = (time.perf_counter() - start_time) * 1e3 + logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") + + return color_image_processed + + def _postprocess_image( + self, image: NDArray[Any], color_mode: ColorMode | None = None, depth_frame: bool = False + ) -> NDArray[Any]: + """ + Applies color conversion, dimension validation, and rotation to a raw color frame. + + Args: + image (np.ndarray): The raw image frame (expected RGB format from RealSense). + color_mode (Optional[ColorMode]): The target color mode (RGB or BGR). If None, + uses the instance's default `self.color_mode`. + + Returns: + np.ndarray: The processed image frame according to `self.color_mode` and `self.rotation`. + + Raises: + ValueError: If the requested `color_mode` is invalid. + RuntimeError: If the raw frame dimensions do not match the configured + `width` and `height`. + """ + + if color_mode and color_mode not in (ColorMode.RGB, ColorMode.BGR): + raise ValueError( + f"Invalid requested color mode '{color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}." + ) + + if depth_frame: + h, w = image.shape + else: + h, w, c = image.shape + + if c != 3: + raise RuntimeError(f"{self} frame channels={c} do not match expected 3 channels (RGB/BGR).") + + if h != self.capture_height or w != self.capture_width: + raise RuntimeError( + f"{self} frame width={w} or height={h} do not match configured width={self.capture_width} or height={self.capture_height}." + ) + + processed_image = image + if self.color_mode == ColorMode.BGR: + processed_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]: + processed_image = cv2.rotate(processed_image, self.rotation) + + return processed_image + + def _read_loop(self) -> None: + """ + Internal loop run by the background thread for asynchronous reading. + + On each iteration: + 1. Reads a color frame with 500ms timeout + 2. Stores result in latest_frame (thread-safe) + 3. Sets new_frame_event to notify listeners + + Stops on DeviceNotConnectedError, logs other errors and continues. + """ + if self.stop_event is None: + raise RuntimeError(f"{self}: stop_event is not initialized before starting read loop.") + + while not self.stop_event.is_set(): + try: + color_image = self.read(timeout_ms=500) + + with self.frame_lock: + self.latest_frame = color_image + self.new_frame_event.set() + + except DeviceNotConnectedError: + break + except Exception as e: + logger.warning(f"Error reading frame in background thread for {self}: {e}") + + def _start_read_thread(self) -> None: + """Starts or restarts the background read thread if it's not running.""" + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=0.1) + if self.stop_event is not None: + self.stop_event.set() + + self.stop_event = Event() + self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop") + self.thread.daemon = True + self.thread.start() + + def _stop_read_thread(self) -> None: + """Signals the background read thread to stop and waits for it to join.""" + if self.stop_event is not None: + self.stop_event.set() + + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=2.0) + + self.thread = None + self.stop_event = None + + # NOTE(Steven): Missing implementation for depth for now + def async_read(self, timeout_ms: float = 200) -> NDArray[Any]: + """ + Reads the latest available frame data (color) asynchronously. + + This method retrieves the most recent color frame captured by the background + read thread. It does not block waiting for the camera hardware directly, + but may wait up to timeout_ms for the background thread to provide a frame. + + Args: + timeout_ms (float): Maximum time in milliseconds to wait for a frame + to become available. Defaults to 200ms (0.2 seconds). + + Returns: + np.ndarray: + The latest captured frame data (color image), processed according to configuration. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + TimeoutError: If no frame data becomes available within the specified timeout. + RuntimeError: If the background thread died unexpectedly or another error occurs. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + if self.thread is None or not self.thread.is_alive(): + self._start_read_thread() + + if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0): + thread_alive = self.thread is not None and self.thread.is_alive() + raise TimeoutError( + f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. " + f"Read thread alive: {thread_alive}." + ) + + with self.frame_lock: + frame = self.latest_frame + self.new_frame_event.clear() + + if frame is None: + raise RuntimeError(f"Internal error: Event set but no frame available for {self}.") + + return frame + + def disconnect(self) -> None: + """ + Disconnects from the camera, stops the pipeline, and cleans up resources. + + Stops the background read thread (if running) and stops the RealSense pipeline. + + Raises: + DeviceNotConnectedError: If the camera is already disconnected (pipeline not running). + """ + + if not self.is_connected and self.thread is None: + raise DeviceNotConnectedError( + f"Attempted to disconnect {self}, but it appears already disconnected." + ) + + if self.thread is not None: + self._stop_read_thread() + + if self.rs_pipeline is not None: + self.rs_pipeline.stop() + self.rs_pipeline = None + self.rs_profile = None + + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/cameras/realsense/configuration_realsense.py b/lerobot/src/lerobot/cameras/realsense/configuration_realsense.py new file mode 100644 index 0000000000000000000000000000000000000000..e981b35341e004a528c8bfeac9ef2c0f0542fdd4 --- /dev/null +++ b/lerobot/src/lerobot/cameras/realsense/configuration_realsense.py @@ -0,0 +1,82 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..configs import CameraConfig, ColorMode, Cv2Rotation + + +@CameraConfig.register_subclass("intelrealsense") +@dataclass +class RealSenseCameraConfig(CameraConfig): + """Configuration class for Intel RealSense cameras. + + This class provides specialized configuration options for Intel RealSense cameras, + including support for depth sensing and device identification via serial number or name. + + Example configurations for Intel RealSense D405: + ```python + # Basic configurations + RealSenseCameraConfig("0123456789", 30, 1280, 720) # 1280x720 @ 30FPS + RealSenseCameraConfig("0123456789", 60, 640, 480) # 640x480 @ 60FPS + + # Advanced configurations + RealSenseCameraConfig("0123456789", 30, 640, 480, use_depth=True) # With depth sensing + RealSenseCameraConfig("0123456789", 30, 640, 480, rotation=Cv2Rotation.ROTATE_90) # With 90° rotation + ``` + + Attributes: + fps: Requested frames per second for the color stream. + width: Requested frame width in pixels for the color stream. + height: Requested frame height in pixels for the color stream. + serial_number_or_name: Unique serial number or human-readable name to identify the camera. + color_mode: Color mode for image output (RGB or BGR). Defaults to RGB. + use_depth: Whether to enable depth stream. Defaults to False. + rotation: Image rotation setting (0°, 90°, 180°, or 270°). Defaults to no rotation. + warmup_s: Time reading frames before returning from connect (in seconds) + + Note: + - Either name or serial_number must be specified. + - Depth stream configuration (if enabled) will use the same FPS as the color stream. + - The actual resolution and FPS may be adjusted by the camera to the nearest supported mode. + - For `fps`, `width` and `height`, either all of them need to be set, or none of them. + """ + + serial_number_or_name: str + color_mode: ColorMode = ColorMode.RGB + use_depth: bool = False + rotation: Cv2Rotation = Cv2Rotation.NO_ROTATION + warmup_s: int = 1 + + def __post_init__(self) -> None: + if self.color_mode not in (ColorMode.RGB, ColorMode.BGR): + raise ValueError( + f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided." + ) + + if self.rotation not in ( + Cv2Rotation.NO_ROTATION, + Cv2Rotation.ROTATE_90, + Cv2Rotation.ROTATE_180, + Cv2Rotation.ROTATE_270, + ): + raise ValueError( + f"`rotation` is expected to be in {(Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270)}, but {self.rotation} is provided." + ) + + values = (self.fps, self.width, self.height) + if any(v is not None for v in values) and any(v is None for v in values): + raise ValueError( + "For `fps`, `width` and `height`, either all of them need to be set, or none of them." + ) diff --git a/lerobot/src/lerobot/cameras/zmq/__init__.py b/lerobot/src/lerobot/cameras/zmq/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..963a16ba2b04aed1d55942f6f0afcdf1114c4e2a --- /dev/null +++ b/lerobot/src/lerobot/cameras/zmq/__init__.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .camera_zmq import ZMQCamera +from .configuration_zmq import ZMQCameraConfig + +__all__ = ["ZMQCamera", "ZMQCameraConfig"] diff --git a/lerobot/src/lerobot/cameras/zmq/camera_zmq.py b/lerobot/src/lerobot/cameras/zmq/camera_zmq.py new file mode 100644 index 0000000000000000000000000000000000000000..d561aa8431d8bd21bb46fa1b92f65233eea899dc --- /dev/null +++ b/lerobot/src/lerobot/cameras/zmq/camera_zmq.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +ZMQCamera - Captures frames from remote cameras via ZeroMQ using JSON protocol in the +following format: + { + "timestamps": {"camera_name": float}, + "images": {"camera_name": ""} + } +""" + +import base64 +import json +import logging +import time +from threading import Event, Lock, Thread +from typing import Any + +import cv2 +import numpy as np +from numpy.typing import NDArray + +from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError + +from ..camera import Camera +from ..configs import ColorMode +from .configuration_zmq import ZMQCameraConfig + +logger = logging.getLogger(__name__) + + +class ZMQCamera(Camera): + """ + Example usage: + ```python + from lerobot.cameras.zmq import ZMQCamera, ZMQCameraConfig + + config = ZMQCameraConfig(server_address="192.168.123.164", port=5555, camera_name="head_camera") + camera = ZMQCamera(config) + camera.connect() + frame = camera.read() + camera.disconnect() + ``` + """ + + def __init__(self, config: ZMQCameraConfig): + super().__init__(config) + import zmq + + self.config = config + self.server_address = config.server_address + self.port = config.port + self.camera_name = config.camera_name + self.color_mode = config.color_mode + self.timeout_ms = config.timeout_ms + + self.context: zmq.Context | None = None + self.socket: zmq.Socket | None = None + self._connected = False + + self.thread: Thread | None = None + self.stop_event: Event | None = None + self.frame_lock: Lock = Lock() + self.latest_frame: NDArray[Any] | None = None + self.new_frame_event: Event = Event() + + def __str__(self) -> str: + return f"ZMQCamera({self.camera_name}@{self.server_address}:{self.port})" + + @property + def is_connected(self) -> bool: + return self._connected and self.context is not None and self.socket is not None + + def connect(self, warmup: bool = True) -> None: + """Connect to ZMQ camera server.""" + if self.is_connected: + raise DeviceAlreadyConnectedError(f"{self} is already connected.") + + logger.info(f"Connecting to {self}...") + + try: + import zmq + + self.context = zmq.Context() + self.socket = self.context.socket(zmq.SUB) + self.socket.setsockopt_string(zmq.SUBSCRIBE, "") + self.socket.setsockopt(zmq.RCVTIMEO, self.timeout_ms) + self.socket.setsockopt(zmq.CONFLATE, True) + self.socket.connect(f"tcp://{self.server_address}:{self.port}") + self._connected = True + + # Auto-detect resolution + if self.width is None or self.height is None: + h, w = self.read().shape[:2] + self.height = h + self.width = w + logger.info(f"{self} resolution: {w}x{h}") + + logger.info(f"{self} connected.") + + if warmup: + time.sleep(0.1) + + except Exception as e: + self._cleanup() + raise RuntimeError(f"Failed to connect to {self}: {e}") from e + + def _cleanup(self): + """Clean up ZMQ resources.""" + self._connected = False + if self.socket: + self.socket.close() + self.socket = None + if self.context: + self.context.term() + self.context = None + + @staticmethod + def find_cameras() -> list[dict[str, Any]]: + """ZMQ cameras require manual configuration (server address/port).""" + return [] + + def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]: + """ + Read a single frame from the ZMQ camera. + + Returns: + np.ndarray: Decoded frame (height, width, 3) + """ + if not self.is_connected or self.socket is None: + raise DeviceNotConnectedError(f"{self} is not connected.") + + try: + message = self.socket.recv_string() + except Exception as e: + if type(e).__name__ == "Again": + raise TimeoutError(f"{self} timeout after {self.timeout_ms}ms") from e + raise + + # Decode JSON message + data = json.loads(message) + + if "images" not in data: + raise RuntimeError(f"{self} invalid message: missing 'images' key") + + images = data["images"] + + # Get image by camera name or first available + if self.camera_name in images: + img_b64 = images[self.camera_name] + elif images: + img_b64 = next(iter(images.values())) + else: + raise RuntimeError(f"{self} no images in message") + + # Decode base64 JPEG + img_bytes = base64.b64decode(img_b64) + frame = cv2.imdecode(np.frombuffer(img_bytes, np.uint8), cv2.IMREAD_COLOR) + + if frame is None: + raise RuntimeError(f"{self} failed to decode image") + + return frame + + def _read_loop(self) -> None: + while self.stop_event and not self.stop_event.is_set(): + try: + frame = self.read() + with self.frame_lock: + self.latest_frame = frame + self.new_frame_event.set() + except DeviceNotConnectedError: + break + except TimeoutError: + pass + except Exception as e: + logger.warning(f"Read error: {e}") + + def _start_read_thread(self) -> None: + if self.thread and self.thread.is_alive(): + return + self.stop_event = Event() + self.thread = Thread(target=self._read_loop, daemon=True) + self.thread.start() + + def _stop_read_thread(self) -> None: + if self.stop_event: + self.stop_event.set() + if self.thread and self.thread.is_alive(): + self.thread.join(timeout=2.0) + self.thread = None + self.stop_event = None + + def async_read(self, timeout_ms: float = 10000) -> NDArray[Any]: + """Read latest frame asynchronously (non-blocking).""" + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + if not self.thread or not self.thread.is_alive(): + self._start_read_thread() + + if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0): + raise TimeoutError(f"{self} async_read timeout after {timeout_ms}ms") + + with self.frame_lock: + frame = self.latest_frame + self.new_frame_event.clear() + + if frame is None: + raise RuntimeError(f"{self} no frame available") + + return frame + + def disconnect(self) -> None: + """Disconnect from ZMQ camera.""" + if not self.is_connected and not self.thread: + raise DeviceNotConnectedError(f"{self} not connected.") + + self._stop_read_thread() + self._cleanup() + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/cameras/zmq/configuration_zmq.py b/lerobot/src/lerobot/cameras/zmq/configuration_zmq.py new file mode 100644 index 0000000000000000000000000000000000000000..569e37fd30622af9fb983ebd2896a71f969adfb8 --- /dev/null +++ b/lerobot/src/lerobot/cameras/zmq/configuration_zmq.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..configs import CameraConfig, ColorMode + +__all__ = ["ZMQCameraConfig", "ColorMode"] + + +@CameraConfig.register_subclass("zmq") +@dataclass +class ZMQCameraConfig(CameraConfig): + server_address: str + port: int = 5555 + camera_name: str = "zmq_camera" + color_mode: ColorMode = ColorMode.RGB + timeout_ms: int = 5000 + + def __post_init__(self) -> None: + if self.color_mode not in (ColorMode.RGB, ColorMode.BGR): + raise ValueError( + f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided." + ) + + if self.timeout_ms <= 0: + raise ValueError(f"`timeout_ms` must be positive, but {self.timeout_ms} is provided.") + + if not self.server_address: + raise ValueError("`server_address` cannot be empty.") + + if self.port <= 0 or self.port > 65535: + raise ValueError(f"`port` must be between 1 and 65535, but {self.port} is provided.") diff --git a/lerobot/src/lerobot/cameras/zmq/image_server.py b/lerobot/src/lerobot/cameras/zmq/image_server.py new file mode 100644 index 0000000000000000000000000000000000000000..87436bb7b84dd9fe07a1b48d9a8aa30b79e50a55 --- /dev/null +++ b/lerobot/src/lerobot/cameras/zmq/image_server.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Streams camera images over ZMQ. +Uses lerobot's OpenCVCamera for capture, encodes images to base64 and sends them over ZMQ. +""" + +import base64 +import contextlib +import json +import logging +import time +from collections import deque + +import cv2 +import numpy as np +import zmq + +from lerobot.cameras.configs import ColorMode +from lerobot.cameras.opencv import OpenCVCamera, OpenCVCameraConfig + +logger = logging.getLogger(__name__) + + +def encode_image(image: np.ndarray, quality: int = 80) -> str: + """Encode RGB image to base64 JPEG string.""" + _, buffer = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), quality]) + return base64.b64encode(buffer).decode("utf-8") + + +class ImageServer: + def __init__(self, config: dict, port: int = 5555): + self.fps = config.get("fps", 30) + self.cameras: dict[str, OpenCVCamera] = {} + + for name, cfg in config.get("cameras", {}).items(): + shape = cfg.get("shape", [480, 640]) + cam_config = OpenCVCameraConfig( + index_or_path=cfg.get("device_id", 0), + fps=self.fps, + width=shape[1], + height=shape[0], + color_mode=ColorMode.RGB, + ) + camera = OpenCVCamera(cam_config) + camera.connect() + self.cameras[name] = camera + logger.info(f"Camera {name}: {shape[1]}x{shape[0]}") + + # ZMQ PUB socket + self.context = zmq.Context() + self.socket = self.context.socket(zmq.PUB) + self.socket.setsockopt(zmq.SNDHWM, 20) + self.socket.setsockopt(zmq.LINGER, 0) + self.socket.bind(f"tcp://*:{port}") + + logger.info(f"ImageServer running on port {port}") + + def run(self): + frame_count = 0 + frame_times = deque(maxlen=60) + + try: + while True: + t0 = time.time() + + # Build message + message = {"timestamps": {}, "images": {}} + for name, cam in self.cameras.items(): + frame = cam.read() # Returns RGB + message["timestamps"][name] = time.time() + message["images"][name] = encode_image(frame) + + # Send as JSON string (suppress if buffer full) + with contextlib.suppress(zmq.Again): + self.socket.send_string(json.dumps(message), zmq.NOBLOCK) + + frame_count += 1 + frame_times.append(time.time() - t0) + + if frame_count % 60 == 0: + logger.debug(f"FPS: {len(frame_times) / sum(frame_times):.1f}") + + sleep = (1.0 / self.fps) - (time.time() - t0) + if sleep > 0: + time.sleep(sleep) + + except KeyboardInterrupt: + pass + finally: + for cam in self.cameras.values(): + cam.disconnect() + self.socket.close() + self.context.term() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + config = {"fps": 30, "cameras": {"head_camera": {"device_id": 4, "shape": [480, 640]}}} + ImageServer(config, port=5555).run() diff --git a/lerobot/src/lerobot/data_processing/sarm_annotations/__init__.py b/lerobot/src/lerobot/data_processing/sarm_annotations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2a07d1b051e63468ab496d3325eb3c74b7ccc22d --- /dev/null +++ b/lerobot/src/lerobot/data_processing/sarm_annotations/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/lerobot/src/lerobot/data_processing/sarm_annotations/subtask_annotation.py b/lerobot/src/lerobot/data_processing/sarm_annotations/subtask_annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..99e403cefd5043826aece3273acfaa9220e6ac99 --- /dev/null +++ b/lerobot/src/lerobot/data_processing/sarm_annotations/subtask_annotation.py @@ -0,0 +1,1202 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +SARM Subtask Annotation using local GPU (Qwen3-VL). + +This script implements the annotation approach from the SARM paper using local GPU inference: +"SARM: Stage-Aware Reward Modeling for Long Horizon Robot Manipulation" +Paper: https://arxiv.org/pdf/2509.25358 + +What it does: +1. Takes videos from a LeRobot dataset +2. Uses Qwen3-VL running locally on GPU to identify when subtasks occur +3. Saves subtask timestamps to the dataset metadata +4. Optionally pushes the annotated dataset to HuggingFace Hub + +SARM trains reward models that predict: + - Stage: Which subtask is currently being executed (discrete classification) + - Progress: How far along the subtask we are (continuous 0-1) + +Supports three annotation modes: + 1. No annotations (no args): Auto-creates single sparse "task" stage covering full episode. + Use with SARM config annotation_mode="single_stage" for simple tasks. + + 2. Dense-only (--dense-only --dense-subtasks): Dense annotations from VLM, auto-generated + single sparse "task" stage. Use with annotation_mode="dense_only". + + 3. Dual mode (--sparse-subtasks + --dense-subtasks): Both sparse and dense annotations + from VLM. Use with annotation_mode="dual". + +Requirements: + - GPU with sufficient VRAM (16GB+ recommended for 30B model) + - `pip install transformers, torch, qwen-vl-utils` + +Run with: +```bash +python examples/dataset_annotation/subtask_annotation.py \ + --repo-id your-username/your-dataset \ + --sparse-subtasks "Do ..." \ + --dense-subtasks "Do task 1, Do task 2, Do task 3" \ + --video-key observation.images.base \ + --push-to-hub +``` +""" + +import argparse +import json +import multiprocessing as mp +import random +import re +import subprocess +import tempfile +import textwrap +import time +from concurrent.futures import ProcessPoolExecutor, as_completed +from pathlib import Path +from typing import Any + +import cv2 +import numpy as np +import pandas as pd +import torch +from pydantic import BaseModel, Field +from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration + +from lerobot.datasets.lerobot_dataset import LeRobotDataset + + +# Pydantic Models for SARM Subtask Annotation +class Timestamp(BaseModel): + """Timestamp in MM:SS or SS format""" + + start: str = Field(description="Start timestamp (MM:SS or just seconds)") + end: str = Field(description="End timestamp (MM:SS or just seconds)") + + +class Subtask(BaseModel): + """Individual subtask/stage - must use EXACT names from provided list""" + + name: str = Field(description="Subtask name - MUST match one from the predefined list exactly") + timestamps: Timestamp + + +class SubtaskAnnotation(BaseModel): + """Complete annotation for a robot manipulation episode""" + + subtasks: list[Subtask] = Field(description="List of all subtasks in temporal order") + + +def compute_temporal_proportions( + annotations: dict[int, Any], fps: int = 30, subtask_order: list[str] | None = None +) -> dict[str, float]: + """ + Compute dataset-level temporal proportions (priors) for each subtask. + + Implements SARM Paper Formula (1): ᾱ_k = (1/M) × Σ_i (L_{i,k} / T_i) + + Args: + annotations: Dict mapping episode index to SubtaskAnnotation object. + fps: Frames per second (unused, kept for API compatibility) + subtask_order: Optional list defining the output order of subtasks. + + Returns: + Dict mapping subtask name to its temporal proportion (ᾱ_k), ordered by subtask_order if provided. + """ + subtask_proportions: dict[str, list[float]] = {} + + for annotation in annotations.values(): + total_duration = 0 + durations: dict[str, int] = {} + + for subtask in annotation.subtasks: + start_parts = subtask.timestamps.start.split(":") + end_parts = subtask.timestamps.end.split(":") + + start_seconds = ( + int(start_parts[0]) * 60 + int(start_parts[1]) + if len(start_parts) == 2 + else int(start_parts[0]) + ) + end_seconds = ( + int(end_parts[0]) * 60 + int(end_parts[1]) if len(end_parts) == 2 else int(end_parts[0]) + ) + + duration = end_seconds - start_seconds + durations[subtask.name] = duration + total_duration += duration + + if total_duration > 0: + for name, duration in durations.items(): + if name not in subtask_proportions: + subtask_proportions[name] = [] + subtask_proportions[name].append(duration / total_duration) + + if not subtask_proportions: + return {} + + avg_proportions = {name: sum(props) / len(props) for name, props in subtask_proportions.items()} + + total = sum(avg_proportions.values()) + if total > 0: + avg_proportions = {name: prop / total for name, prop in avg_proportions.items()} + + # Reorder according to subtask_order if provided + if subtask_order: + avg_proportions = { + name: avg_proportions.get(name, 0.0) for name in subtask_order if name in avg_proportions + } + + return avg_proportions + + +def create_sarm_prompt(subtask_list: list[str]) -> str: + subtask_str = "\n".join([f" - {name}" for name in subtask_list]) + + return textwrap.dedent(f"""\ + # Role + You are a Robotics Vision System specializing in temporal action localization for robot manipulation. Your job is to segment a single demonstration video into distinct, non-overlapping atomic actions from a fixed subtask list. + + # Subtask Label Set (Closed Vocabulary) + You must strictly identify the video segments using ONLY the following labels. Do not create new labels or modify existing ones: + + [ + {subtask_str} + ] + + The video shows one successful execution of all subtasks in a logical order. + + # Ground-Truth Semantics (Very Important) + Use **visual state changes** to define when a subtask starts and ends. Do NOT assume equal durations for the subtasks. + + - A subtask **starts** at the first frame where the robot's motion clearly initiates that subtask. + - A subtask **ends** at the first frame where that specific action is visually completed and the manipulated object reaches a temporary, stable configuration. + + If there are short pauses or micro-motions that don't clearly correspond to a new subtask, they belong to the **current** subtask. + + # Hard Constraints & Logic + 1. **Continuous Coverage (No Gaps):** + - The entire video duration from "00:00" to the final timestamp must be covered by subtasks. + - There can be no gaps between subtasks. + - If there is any idle or ambiguous time between clear actions, extend the *preceding* subtask to cover it. + + 2. **Boundary Consistency:** + - The `"end"` timestamp of one subtask must be exactly equal to the `"start"` timestamp of the next subtask. + - Boundaries must coincide with a real visual state transition, not just a convenient time split. + + 3. **Chronological Order, One Occurrence Each:** + - This is a single successful demonstration. + - Each subtask from the vocabulary appears **exactly once**, in the correct logical order. + - **Durations may be very different** between subtasks. Never assume they are similar lengths. Base all boundaries only on the video. + + 4. **Reject Uniform Segmentation (Important):** + - Do NOT simply divide the video into equal or nearly equal time chunks. + - If your boundaries would result in subtasks with similar durations (e.g. all around 5 seconds), treat this as evidence that your segmentation is wrong and refine the boundaries. + - Only use nearly equal durations if the video truly shows each subtask taking the same amount of time (this is very rare). + + 5. **Timestamps:** + - Timestamps must be in `"MM:SS"` format. + - The first subtask always starts at `"00:00"`. + - The last subtask ends at the final visible frame of the video. + + # Step 1 — Textual Timeline (must do this first) + First, write a extensive and detailed textual timeline describing what happens in the video with approximate timestamps. + For each subtask, include: + - its name + - an approximate start and end time, + - an description of the visual event at the boundary (e.g. "shirt fully folded to the left", "robot rotates folded shirt 90 degrees"). + + Format this as a bullet list. + + # Step 2 — JSON Output (final answer) + After the textual timeline, output **only** valid JSON with this structure. + The JSON **must** be consistent with the textual timeline above: + + {{ + "subtasks": [ + {{ + "name": "EXACT_NAME_FROM_LIST", + "timestamps": {{ + "start": "MM:SS", + "end": "MM:SS" + }} + }}, + {{ + "name": "EXACT_NAME_FROM_LIST", + "timestamps": {{ + "start": "MM:SS", + "end": "MM:SS" + }} + }} + ] + }} + + Do not add any extra keys to the JSON. + """) + + +class VideoAnnotator: + """Annotates robot manipulation videos using local Qwen3-VL model on GPU""" + + def __init__( + self, + subtask_list: list[str], + model_name: str = "Qwen/Qwen3-VL-30B-A3B-Instruct", + device: str = "cuda", + torch_dtype: torch.dtype = torch.bfloat16, + model: Qwen3VLMoeForConditionalGeneration | None = None, # noqa: F821 + processor: AutoProcessor | None = None, # noqa: F821 + ): + """ + Initialize the video annotator with local model. + + Args: + subtask_list: List of allowed subtask names (for consistency) + model_name: Hugging Face model name (default: Qwen/Qwen3-VL-30B-A3B-Instruct) + device: Device to use (cuda, cpu) + torch_dtype: Data type for model (bfloat16, float16, float32) + model: Pre-loaded model instance (optional, to share between annotators) + processor: Pre-loaded processor instance (optional, to share between annotators) + """ + self.subtask_list = subtask_list + self.prompt = create_sarm_prompt(subtask_list) + self.device = device + + # Use provided model/processor or load new ones + if model is not None and processor is not None: + self.model = model + self.processor = processor + print(f"Using shared model on {device}") + else: + from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration + + print(f"Loading model: {model_name}...") + + self.model = Qwen3VLMoeForConditionalGeneration.from_pretrained( + model_name, torch_dtype=torch_dtype, device_map=device, trust_remote_code=True + ) + + self.processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True) + + print(f"Model loaded successfully on {device}") + + def extract_episode_segment( + self, file_path: Path, start_timestamp: float, end_timestamp: float, target_fps: int = 1 + ) -> Path: + """ + Extract a specific episode segment from concatenated video. + Uses minimal compression to preserve quality for local inference. + + Args: + file_path: Path to the concatenated video file + start_timestamp: Starting timestamp in seconds (within this video file) + end_timestamp: Ending timestamp in seconds (within this video file) + target_fps: Target FPS (default: 1 for faster processing) + + Returns: + Path to extracted video file + """ + # Create temporary file for extracted video + with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp_file: + tmp_path = Path(tmp_file.name) + + try: + # Check if ffmpeg is available + subprocess.run( # nosec B607 + ["ffmpeg", "-version"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True + ) + except (subprocess.CalledProcessError, FileNotFoundError) as err: + raise RuntimeError("ffmpeg not found, cannot extract episode segment") from err + + try: + # Calculate duration + duration = end_timestamp - start_timestamp + + print(f"Extracting episode: {start_timestamp:.1f}s-{end_timestamp:.1f}s ({duration:.1f}s)") + + # Use ffmpeg to extract segment with minimal quality loss + cmd = [ + "ffmpeg", + "-i", + str(file_path), + "-ss", + str(start_timestamp), + "-t", + str(duration), + "-r", + str(target_fps), + "-c:v", + "libx264", + "-preset", + "ultrafast", + "-crf", + "23", + "-an", + "-y", + str(tmp_path), + ] + + subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) + + # Verify the output file was created and is not empty + if not tmp_path.exists() or tmp_path.stat().st_size == 0: + print("Video extraction failed (0 bytes) - skipping episode") + if tmp_path.exists(): + tmp_path.unlink() + raise RuntimeError("FFmpeg produced empty video file") + + # Show extraction results + file_size_mb = tmp_path.stat().st_size / (1024 * 1024) + + # Fail if file is too small (< 100KB likely means extraction failed) + if file_size_mb < 0.1: + print(f"Extracted video too small ({file_size_mb:.2f}MB) - skipping episode") + tmp_path.unlink() + raise RuntimeError(f"Video extraction produced invalid file ({file_size_mb:.2f}MB)") + + print(f"Extracted: {file_size_mb:.1f}MB ({target_fps} FPS)") + + return tmp_path + + except subprocess.CalledProcessError as e: + raise RuntimeError(f"ffmpeg failed ({e})") from e + + def annotate( + self, + file_path: str | Path, + fps: int, + start_timestamp: float = 0.0, + end_timestamp: float | None = None, + max_retries: int = 3, + ) -> SubtaskAnnotation: + """Annotate a video segment using local GPU.""" + from qwen_vl_utils import process_vision_info + + file_path = Path(file_path) + + if end_timestamp is None: + cap = cv2.VideoCapture(str(file_path)) + end_timestamp = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) / (cap.get(cv2.CAP_PROP_FPS) or 1) + cap.release() + + duration = end_timestamp - start_timestamp + duration_str = f"{int(duration // 60):02d}:{int(duration % 60):02d}" + + extracted_path = self.extract_episode_segment(file_path, start_timestamp, end_timestamp, 1) + is_extracted = extracted_path != file_path + + try: + messages = [ + {"role": "system", "content": [{"type": "text", "text": self.prompt}]}, + { + "role": "user", + "content": [ + {"type": "video", "video": str(extracted_path), "fps": 1.0}, + { + "type": "text", + "text": f"Video is {duration_str} (~{duration:.1f}s). Follow instructions.", + }, + ], + }, + ] + + for attempt in range(max_retries): + try: + text = self.processor.apply_chat_template( + messages, tokenize=False, add_generation_prompt=True + ) + image_inputs, video_inputs = process_vision_info(messages) + inputs = self.processor( + text=[text], + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt", + ).to(self.device) + + with torch.no_grad(): + generated_ids = self.model.generate( + **inputs, max_new_tokens=1024, do_sample=True, temperature=0.7 + ) + + response = self.processor.batch_decode( + [out[len(inp) :] for inp, out in zip(inputs.input_ids, generated_ids, strict=True)], + skip_special_tokens=True, + )[0].strip() + + # Extract JSON + if "```json" in response: + response = response.split("```json")[1].split("```")[0] + elif "```" in response: + response = response.split("```")[1].split("```")[0] + + try: + return SubtaskAnnotation.model_validate(json.loads(response)) + except json.JSONDecodeError: + match = re.search(r"\{.*\}", response, re.DOTALL) + if match: + return SubtaskAnnotation.model_validate(json.loads(match.group())) + raise ValueError("No JSON found") from None + except Exception as e: + if attempt == max_retries - 1: + raise RuntimeError(f"Failed after {max_retries} attempts") from e + time.sleep(1) + finally: + if is_extracted and extracted_path.exists(): + extracted_path.unlink() + + +def display_annotation(annotation: SubtaskAnnotation, episode_idx: int, fps: int, prefix: str = ""): + """Display annotation summary.""" + subtask_summary = ", ".join( + f"{s.name}({s.timestamps.start}-{s.timestamps.end})" for s in annotation.subtasks + ) + print(f"Episode {episode_idx} {prefix}: {len(annotation.subtasks)} subtasks - {subtask_summary}") + + +def timestamp_to_seconds(timestamp: str) -> float: + """Convert MM:SS or SS timestamp to seconds""" + parts = timestamp.split(":") + if len(parts) == 2: + return int(parts[0]) * 60 + int(parts[1]) + else: + return int(parts[0]) + + +def extract_frame(video_path: Path, timestamp: float) -> np.ndarray | None: + """Extract a single frame from video at given timestamp.""" + cap = cv2.VideoCapture(str(video_path)) + if not cap.isOpened(): + return None + cap.set(cv2.CAP_PROP_POS_MSEC, timestamp * 1000) + ret, frame = cap.read() + cap.release() + return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if ret else None + + +def draw_timeline(ax, subtasks, total_duration, colors): + """Draw a timeline with color-coded subtask segments.""" + import matplotlib.patches as mpatches + + bar_height, bar_y = 0.6, 0.5 + + for i, subtask in enumerate(subtasks): + start = timestamp_to_seconds(subtask.timestamps.start) + end = timestamp_to_seconds(subtask.timestamps.end) + color = colors[i % len(colors)] + + rect = mpatches.FancyBboxPatch( + (start, bar_y - bar_height / 2), + end - start, + bar_height, + boxstyle="round,pad=0.02,rounding_size=0.1", + facecolor=color, + edgecolor="white", + linewidth=1.5, + alpha=0.85, + ) + ax.add_patch(rect) + + # Add label if segment is wide enough + duration = end - start + if duration > total_duration * 0.06: + ax.text( + (start + end) / 2, + bar_y, + subtask.name, + ha="center", + va="center", + fontsize=8, + fontweight="bold", + color="white", + rotation=0 if duration > total_duration * 0.12 else 45, + ) + + if i > 0: + ax.axvline(x=start, ymin=0.1, ymax=0.9, color="white", linestyle="--", linewidth=1.5, alpha=0.7) + + ax.axvline(x=0, ymin=0.1, ymax=0.9, color="#00ff00", linestyle="-", linewidth=2, alpha=0.9) + if subtasks: + ax.axvline( + x=timestamp_to_seconds(subtasks[-1].timestamps.end), + ymin=0.1, + ymax=0.9, + color="white", + linestyle="--", + linewidth=1.5, + alpha=0.7, + ) + + ax.set_xlim(-total_duration * 0.02, total_duration * 1.02) + ax.set_ylim(-0.1, 1.1) + ax.set_xlabel("Time (seconds)", fontsize=10, color="white", labelpad=5) + for spine in ["top", "right", "left"]: + ax.spines[spine].set_visible(False) + ax.spines["bottom"].set_color("#444444") + ax.tick_params(axis="x", colors="#888888", labelsize=8) + ax.tick_params(axis="y", left=False, labelleft=False) + + +def visualize_episode( + ep_idx: int, + annotation: SubtaskAnnotation, + video_path: Path, + video_start: float, + video_end: float, + output_path: Path, + video_key: str, + ann_type: str, +): + """Create visualization for a single episode with frames and timeline.""" + import matplotlib.pyplot as plt + + if annotation is None: + print(f"No {ann_type} annotation for episode {ep_idx}") + return + + subtasks = annotation.subtasks + if not subtasks: + print(f"No subtasks for episode {ep_idx}") + return + + colors = plt.cm.tab10(np.linspace(0, 1, max(len(subtasks), 10))) + total_duration = timestamp_to_seconds(subtasks[-1].timestamps.end) + + # Extract middle frame from each subtask + sample_frames, frame_times = [], [] + for subtask in subtasks: + start = timestamp_to_seconds(subtask.timestamps.start) + end = timestamp_to_seconds(subtask.timestamps.end) + mid = (start + end) / 2 + frame_times.append(mid) + sample_frames.append(extract_frame(video_path, video_start + mid)) + + # Create figure + fig_width = max(16, len(subtasks) * 2.5) + fig = plt.figure(figsize=(fig_width, 10)) + fig.patch.set_facecolor("#1a1a2e") + + gs = fig.add_gridspec( + 2, + max(len(subtasks), 1), + height_ratios=[2, 1], + hspace=0.3, + wspace=0.1, + left=0.05, + right=0.95, + top=0.88, + bottom=0.1, + ) + + fig.suptitle( + f"Episode {ep_idx} - {ann_type.capitalize()} Annotations", + fontsize=18, + fontweight="bold", + color="white", + y=0.96, + ) + fig.text( + 0.5, + 0.91, + f"Camera: {video_key} | Duration: {video_end - video_start:.1f}s | {len(subtasks)} subtasks", + ha="center", + fontsize=11, + color="#888888", + ) + + # Plot frames + for i, (frame, subtask) in enumerate(zip(sample_frames, subtasks, strict=True)): + ax = fig.add_subplot(gs[0, i]) + ax.set_facecolor("#16213e") + if frame is not None: + ax.imshow(frame) + else: + ax.text( + 0.5, 0.5, "N/A", ha="center", va="center", fontsize=12, color="white", transform=ax.transAxes + ) + ax.set_title(subtask.name, fontsize=10, fontweight="bold", color=colors[i % len(colors)], pad=8) + ax.axis("off") + ax.text( + 0.5, + -0.08, + f"t={frame_times[i]:.1f}s", + ha="center", + fontsize=9, + color="#888888", + transform=ax.transAxes, + ) + + # Plot timeline + ax_timeline = fig.add_subplot(gs[1, :]) + ax_timeline.set_facecolor("#16213e") + draw_timeline(ax_timeline, subtasks, total_duration, colors) + + output_path.parent.mkdir(parents=True, exist_ok=True) + plt.savefig(output_path, dpi=150, facecolor=fig.get_facecolor(), edgecolor="none", bbox_inches="tight") + plt.close() + print(f"Saved: {output_path}") + + +def visualize_annotations( + dataset: LeRobotDataset, + sparse_annotations: dict[int, SubtaskAnnotation], + dense_annotations: dict[int, SubtaskAnnotation] | None, + video_key: str, + output_dir: Path, + num_episodes: int = 5, + annotation_type: str = "sparse", + episode_indices: list[int] | None = None, +): + """ + Visualize subtask annotations for a set of episodes. + + Args: + dataset: LeRobotDataset instance + sparse_annotations: Dict mapping episode index to sparse annotations + dense_annotations: Dict mapping episode index to dense annotations (or None) + video_key: Camera/video key to use + output_dir: Directory to save visualization images + num_episodes: Number of episodes to visualize (ignored if episode_indices provided) + annotation_type: "sparse", "dense", or "both" + episode_indices: Specific episode indices to visualize (optional) + """ + # Determine available episodes based on annotation type + if annotation_type == "sparse": + available = set(sparse_annotations.keys()) + elif annotation_type == "dense": + available = set(dense_annotations.keys()) if dense_annotations else set() + else: # both + sparse_set = set(sparse_annotations.keys()) + dense_set = set(dense_annotations.keys()) if dense_annotations else set() + available = sparse_set | dense_set + + if not available: + print("Error: No annotations found to visualize.") + return + + # Select episodes to visualize + if episode_indices: + episodes = sorted([e for e in episode_indices if e in available]) + missing = set(episode_indices) - available + if missing: + print(f"Episodes not found in annotations: {sorted(missing)}") + else: + episodes = sorted(random.sample(list(available), min(num_episodes, len(available)))) + print(f"Visualizing {len(episodes)} episodes: {episodes}") + output_dir.mkdir(parents=True, exist_ok=True) + + # Generate visualizations + for i, ep_idx in enumerate(episodes, 1): + print(f"Processing episode {ep_idx} ({i}/{len(episodes)})") + video_path = dataset.root / dataset.meta.get_video_file_path(ep_idx, video_key) + if not video_path.exists(): + print(f"Video not found: {video_path}") + continue + + video_start = float(dataset.meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx]) + video_end = float(dataset.meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx]) + + if annotation_type == "both": + # Visualize both sparse and dense + for ann_type, annotations in [("sparse", sparse_annotations), ("dense", dense_annotations)]: + if annotations and ep_idx in annotations: + output_path = output_dir / f"episode_{ep_idx:04d}_{ann_type}.png" + visualize_episode( + ep_idx, + annotations.get(ep_idx), + video_path, + video_start, + video_end, + output_path, + video_key, + ann_type, + ) + else: + annotations = sparse_annotations if annotation_type == "sparse" else dense_annotations + if annotations and ep_idx in annotations: + output_path = output_dir / f"episode_{ep_idx:04d}_{annotation_type}.png" + visualize_episode( + ep_idx, + annotations.get(ep_idx), + video_path, + video_start, + video_end, + output_path, + video_key, + annotation_type, + ) + + print(f"Visualizations saved to: {output_dir.absolute()}") + + +def save_annotations_to_dataset( + dataset_path: Path, annotations: dict[int, SubtaskAnnotation], fps: int, prefix: str = "sparse" +): + """Save annotations to LeRobot dataset parquet format.""" + from lerobot.datasets.utils import DEFAULT_EPISODES_PATH, load_episodes + + episodes_dataset = load_episodes(dataset_path) + if not episodes_dataset or len(episodes_dataset) == 0: + return + + episodes_df = episodes_dataset.to_pandas() + cols = [ + f"{prefix}_{c}" + for c in [ + "subtask_names", + "subtask_start_times", + "subtask_end_times", + "subtask_start_frames", + "subtask_end_frames", + ] + ] + for col in cols: + episodes_df[col] = None + + for ep_idx, ann in annotations.items(): + if ep_idx >= len(episodes_df): + continue + names, starts, ends, start_frames, end_frames = [], [], [], [], [] + for s in ann.subtasks: + names.append(s.name) + st, et = timestamp_to_seconds(s.timestamps.start), timestamp_to_seconds(s.timestamps.end) + starts.append(st) + ends.append(et) + start_frames.append(int(st * fps)) + end_frames.append(int(et * fps)) + episodes_df.at[ep_idx, cols[0]] = names + episodes_df.at[ep_idx, cols[1]] = starts + episodes_df.at[ep_idx, cols[2]] = ends + episodes_df.at[ep_idx, cols[3]] = start_frames + episodes_df.at[ep_idx, cols[4]] = end_frames + + # Group by file and write + for ep_idx in episodes_df.index: + key = ( + episodes_df.loc[ep_idx, "meta/episodes/chunk_index"], + episodes_df.loc[ep_idx, "meta/episodes/file_index"], + ) + path = dataset_path / DEFAULT_EPISODES_PATH.format(chunk_index=key[0], file_index=key[1]) + if path.exists(): + file_df = pd.read_parquet(path) + for col in cols + ( + [ + "subtask_names", + "subtask_start_times", + "subtask_end_times", + "subtask_start_frames", + "subtask_end_frames", + ] + if prefix == "sparse" + else [] + ): + if col not in file_df.columns: + file_df[col] = None + if ep_idx in annotations: + for col in cols: + file_df.at[ep_idx, col] = episodes_df.loc[ep_idx, col] + if prefix == "sparse": # Legacy columns + for i, legacy in enumerate( + [ + "subtask_names", + "subtask_start_times", + "subtask_end_times", + "subtask_start_frames", + "subtask_end_frames", + ] + ): + file_df.at[ep_idx, legacy] = episodes_df.loc[ep_idx, cols[i]] + file_df.to_parquet(path, engine="pyarrow", compression="snappy") + + +def generate_auto_sparse_annotations( + dataset: LeRobotDataset, episode_indices: list[int], video_key: str +) -> dict[int, SubtaskAnnotation]: + """Auto-generate single 'task' stage annotations for all episodes.""" + annotations = {} + for ep_idx in episode_indices: + start = float(dataset.meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx]) + end = float(dataset.meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx]) + duration = end - start + end_str = f"{int(duration // 60):02d}:{int(duration % 60):02d}" + annotations[ep_idx] = SubtaskAnnotation( + subtasks=[Subtask(name="task", timestamps=Timestamp(start="00:00", end=end_str))] + ) + return annotations + + +def load_annotations_from_dataset(dataset_path: Path, prefix: str = "sparse") -> dict[int, SubtaskAnnotation]: + """Load annotations from LeRobot dataset parquet files.""" + from lerobot.datasets.utils import load_episodes + + episodes_dataset = load_episodes(dataset_path) + if not episodes_dataset or len(episodes_dataset) == 0: + return {} + + col_names = f"{prefix}_subtask_names" + col_start = f"{prefix}_subtask_start_times" + col_end = f"{prefix}_subtask_end_times" + + # Fall back to legacy columns for sparse + if col_names not in episodes_dataset.column_names: + if prefix == "sparse" and "subtask_names" in episodes_dataset.column_names: + col_names, col_start, col_end = "subtask_names", "subtask_start_times", "subtask_end_times" + else: + return {} + + df = episodes_dataset.to_pandas() + annotations = {} + for ep_idx in df.index: + names = df.loc[ep_idx, col_names] + if names is None or (isinstance(names, float) and pd.isna(names)): + continue + starts, ends = df.loc[ep_idx, col_start], df.loc[ep_idx, col_end] + annotations[int(ep_idx)] = SubtaskAnnotation( + subtasks=[ + Subtask( + name=n, + timestamps=Timestamp( + start=f"{int(s) // 60:02d}:{int(s) % 60:02d}", + end=f"{int(e) // 60:02d}:{int(e) % 60:02d}", + ), + ) + for n, s, e in zip(names, starts, ends, strict=True) + ] + ) + return annotations + + +def process_single_episode( + ep_idx: int, + dataset_root: Path, + dataset_meta, + video_key: str, + fps: int, + annotator: VideoAnnotator, +) -> tuple[int, SubtaskAnnotation | None, str | None]: + """Process a single episode annotation.""" + try: + video_path = dataset_root / dataset_meta.get_video_file_path(ep_idx, video_key) + if not video_path.exists(): + return ep_idx, None, f"Video not found: {video_path}" + + start = float(dataset_meta.episodes[f"videos/{video_key}/from_timestamp"][ep_idx]) + end = float(dataset_meta.episodes[f"videos/{video_key}/to_timestamp"][ep_idx]) + return ep_idx, annotator.annotate(video_path, fps, start, end), None + except Exception as e: + return ep_idx, None, str(e) + + +def worker_process_episodes( + worker_id: int, + gpu_id: int, + episode_indices: list[int], + repo_id: str, + video_key: str, + sparse_subtask_list: list[str], + dense_subtask_list: list[str] | None, + model_name: str, + torch_dtype: torch.dtype, +) -> tuple[dict, dict | None]: + """Worker for parallel processing across GPUs.""" + device = f"cuda:{gpu_id}" + dataset = LeRobotDataset(repo_id, download_videos=False) + + sparse_annotator = VideoAnnotator(sparse_subtask_list, model_name, device, torch_dtype) + dense_annotator = ( + VideoAnnotator( + dense_subtask_list, + model_name, + device, + torch_dtype, + sparse_annotator.model, + sparse_annotator.processor, + ) + if dense_subtask_list + else None + ) + + sparse_annotations, dense_annotations = {}, {} if dense_subtask_list else None + + for ep_idx in episode_indices: + _, sparse_ann, err = process_single_episode( + ep_idx, dataset.root, dataset.meta, video_key, dataset.fps, sparse_annotator + ) + if sparse_ann: + sparse_annotations[ep_idx] = sparse_ann + + if dense_annotator: + _, dense_ann, _ = process_single_episode( + ep_idx, dataset.root, dataset.meta, video_key, dataset.fps, dense_annotator + ) + if dense_ann: + dense_annotations[ep_idx] = dense_ann + + return sparse_annotations, dense_annotations + + +def main(): + parser = argparse.ArgumentParser(description="SARM-style subtask annotation using local GPU (Qwen3-VL)") + parser.add_argument("--repo-id", type=str, required=True, help="HuggingFace dataset repository ID") + parser.add_argument( + "--sparse-subtasks", type=str, default=None, help="Comma-separated sparse subtask names" + ) + parser.add_argument( + "--dense-subtasks", type=str, default=None, help="Comma-separated dense subtask names" + ) + parser.add_argument( + "--dense-only", action="store_true", help="Dense-only mode with auto-generated sparse 'task' stage" + ) + parser.add_argument("--episodes", type=int, nargs="+", default=None, help="Episode indices to annotate") + parser.add_argument("--model", type=str, default="Qwen/Qwen3-VL-30B-A3B-Instruct", help="VLM model") + parser.add_argument("--skip-existing", action="store_true", help="Skip already annotated episodes") + parser.add_argument("--video-key", type=str, default=None, help="Video key (default: first available)") + parser.add_argument("--push-to-hub", action="store_true", help="Push to HuggingFace Hub") + parser.add_argument("--output-repo-id", type=str, default=None, help="Output repo ID for push") + parser.add_argument("--device", type=str, default="cuda", help="Device (cuda/cpu)") + parser.add_argument("--dtype", type=str, default="bfloat16", choices=["bfloat16", "float16", "float32"]) + parser.add_argument("--num-workers", type=int, default=1, help="Parallel workers for multi-GPU") + parser.add_argument("--gpu-ids", type=int, nargs="+", default=None, help="GPU IDs to use") + # Visualization options + parser.add_argument( + "--visualize-only", + action="store_true", + help="Only visualize existing annotations (no generation)", + ) + parser.add_argument( + "--num-visualizations", + type=int, + default=5, + help="Number of episodes to visualize (default: 5)", + ) + parser.add_argument( + "--visualize-type", + type=str, + default="sparse", + choices=["sparse", "dense", "both"], + help="Type of annotations to visualize (default: sparse)", + ) + parser.add_argument( + "--output-dir", + type=str, + default="./subtask_viz", + help="Output directory for visualizations (default: ./subtask_viz)", + ) + + args = parser.parse_args() + + # Load dataset first (needed for both annotation and visualization) + print(f"Loading dataset: {args.repo_id}") + dataset = LeRobotDataset(args.repo_id, download_videos=True) + fps = dataset.fps + + if not dataset.meta.video_keys: + raise ValueError("No video keys found") + + video_key = ( + args.video_key if args.video_key in (dataset.meta.video_keys or []) else dataset.meta.video_keys[0] + ) + print(f"Using camera: {video_key}, FPS: {fps}") + + # Handle visualization-only mode + if args.visualize_only: + print("Visualization-only mode") + sparse_annotations = load_annotations_from_dataset(dataset.root, prefix="sparse") + dense_annotations = load_annotations_from_dataset(dataset.root, prefix="dense") + + if not sparse_annotations and not dense_annotations: + return print("Error: No annotations found. Run annotation first.") + + print(f"Found {len(sparse_annotations)} sparse, {len(dense_annotations)} dense annotations") + + visualize_annotations( + dataset=dataset, + sparse_annotations=sparse_annotations, + dense_annotations=dense_annotations if dense_annotations else None, + video_key=video_key, + output_dir=Path(args.output_dir), + num_episodes=args.num_visualizations, + annotation_type=args.visualize_type, + episode_indices=args.episodes, + ) + return + + # Validate arguments for annotation mode + if args.dense_only and not args.dense_subtasks: + return print("Error: --dense-only requires --dense-subtasks") + if args.dense_subtasks and not args.sparse_subtasks and not args.dense_only: + return print("Error: --dense-subtasks requires --sparse-subtasks or --dense-only") + + sparse_subtask_list = ( + [s.strip() for s in args.sparse_subtasks.split(",")] if args.sparse_subtasks else None + ) + dense_subtask_list = [s.strip() for s in args.dense_subtasks.split(",")] if args.dense_subtasks else None + auto_sparse = sparse_subtask_list is None + dense_mode = dense_subtask_list is not None + torch_dtype = {"bfloat16": torch.bfloat16, "float16": torch.float16, "float32": torch.float32}[args.dtype] + + # Determine episodes + episode_indices = args.episodes or list(range(dataset.meta.total_episodes)) + + existing_annotations = load_annotations_from_dataset(dataset.root, prefix="sparse") + if args.skip_existing: + episode_indices = [ep for ep in episode_indices if ep not in existing_annotations] + + if not episode_indices: + return print("All episodes already annotated!") + print(f"Annotating {len(episode_indices)} episodes") + + # GPU setup + gpu_ids = args.gpu_ids or list( + range(min(args.num_workers, torch.cuda.device_count() if torch.cuda.is_available() else 1)) + ) + args.num_workers = len(gpu_ids) + + sparse_annotations = existing_annotations.copy() + dense_annotations = {} if dense_mode else None + + # Auto-sparse mode + if auto_sparse: + sparse_annotations.update(generate_auto_sparse_annotations(dataset, episode_indices, video_key)) + save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse") + print(f"Auto-generated {len(episode_indices)} sparse 'task' annotations") + + # VLM annotation (for sparse if not auto, and for dense) + need_vlm = (not auto_sparse) or dense_mode + + if need_vlm: + if args.num_workers > 1 and not auto_sparse: + # Parallel processing + print(f"Parallel processing with {args.num_workers} workers") + episodes_per_worker = [[] for _ in range(args.num_workers)] + for i, ep_idx in enumerate(episode_indices): + episodes_per_worker[i % args.num_workers].append(ep_idx) + + with ProcessPoolExecutor( + max_workers=args.num_workers, mp_context=mp.get_context("spawn") + ) as executor: + futures = [ + executor.submit( + worker_process_episodes, + w, + gpu_ids[w], + episodes_per_worker[w], + args.repo_id, + video_key, + sparse_subtask_list, + dense_subtask_list, + args.model, + torch_dtype, + ) + for w in range(args.num_workers) + if episodes_per_worker[w] + ] + + for future in as_completed(futures): + try: + worker_sparse, worker_dense = future.result() + sparse_annotations.update(worker_sparse) + if dense_mode and worker_dense: + dense_annotations.update(worker_dense) + save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse") + if dense_mode: + save_annotations_to_dataset(dataset.root, dense_annotations, fps, prefix="dense") + except Exception as e: + raise RuntimeError(f"Worker failed: {e}") from e + else: + # Sequential processing + sparse_annotator = ( + VideoAnnotator(sparse_subtask_list, args.model, args.device, torch_dtype) + if not auto_sparse and sparse_subtask_list + else None + ) + dense_annotator = ( + VideoAnnotator( + dense_subtask_list, + args.model, + args.device, + torch_dtype, + sparse_annotator.model if sparse_annotator else None, + sparse_annotator.processor if sparse_annotator else None, + ) + if dense_mode + else None + ) + + for i, ep_idx in enumerate(episode_indices): + print(f"Episode {ep_idx} ({i + 1}/{len(episode_indices)})") + + if sparse_annotator: + _, sparse_ann, err = process_single_episode( + ep_idx, dataset.root, dataset.meta, video_key, fps, sparse_annotator + ) + if sparse_ann: + sparse_annotations[ep_idx] = sparse_ann + save_annotations_to_dataset(dataset.root, sparse_annotations, fps, prefix="sparse") + elif err: + print(f"Sparse failed: {err}") + + if dense_annotator: + _, dense_ann, err = process_single_episode( + ep_idx, dataset.root, dataset.meta, video_key, fps, dense_annotator + ) + if dense_ann: + dense_annotations[ep_idx] = dense_ann + save_annotations_to_dataset(dataset.root, dense_annotations, fps, prefix="dense") + elif err: + print(f"Dense failed: {err}") + + # Save temporal proportions + def save_proportions(annotations, prefix, subtask_list=None, is_auto=False): + props: dict[str, float] = ( + {"task": 1.0} if is_auto else compute_temporal_proportions(annotations, fps, subtask_list) + ) + path = dataset.root / "meta" / f"temporal_proportions_{prefix}.json" + path.parent.mkdir(parents=True, exist_ok=True) + with open(path, "w") as f: + json.dump(props, f, indent=2) + print(f"Saved {prefix} temporal proportions") + + save_proportions(sparse_annotations, "sparse", sparse_subtask_list, auto_sparse) + if dense_mode and dense_annotations: + save_proportions(dense_annotations, "dense", dense_subtask_list) + + print(f"\nComplete! {len(sparse_annotations)} sparse, {len(dense_annotations or {})} dense annotations") + + # Visualize annotations after generation + if args.num_visualizations > 0: + print(f"\nGenerating {args.num_visualizations} visualizations...") + visualize_type = "both" if dense_mode else "sparse" + visualize_annotations( + dataset=dataset, + sparse_annotations=sparse_annotations, + dense_annotations=dense_annotations, + video_key=video_key, + output_dir=Path(args.output_dir), + num_episodes=args.num_visualizations, + annotation_type=visualize_type, + ) + + if args.push_to_hub: + try: + dataset.push_to_hub(push_videos=True) + print(f"Pushed to {args.output_repo_id or args.repo_id}") + except Exception as e: + print(f"Push failed: {e}") + + +if __name__ == "__main__": + main() diff --git a/lerobot/src/lerobot/datasets/push_dataset_to_hub/utils.py b/lerobot/src/lerobot/datasets/push_dataset_to_hub/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..970196d378b033c34b56a6750a87f3611ba3f968 --- /dev/null +++ b/lerobot/src/lerobot/datasets/push_dataset_to_hub/utils.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datasets +import torch + + +# TODO(aliberts): remove +def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> dict[str, torch.Tensor]: + """ + Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset. + + Parameters: + - hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index. + + Returns: + - episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys: + - "from": A tensor containing the starting index of each episode. + - "to": A tensor containing the ending index of each episode. + """ + episode_data_index = {"from": [], "to": []} + + current_episode = None + """ + The episode_index is a list of integers, each representing the episode index of the corresponding example. + For instance, the following is a valid episode_index: + [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2] + + Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and + ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this: + { + "from": [0, 3, 7], + "to": [3, 7, 12] + } + """ + if len(hf_dataset) == 0: + episode_data_index = { + "from": torch.tensor([]), + "to": torch.tensor([]), + } + return episode_data_index + for idx, episode_idx in enumerate(hf_dataset["episode_index"]): + if episode_idx != current_episode: + # We encountered a new episode, so we append its starting location to the "from" list + episode_data_index["from"].append(idx) + # If this is not the first episode, we append the ending location of the previous episode to the "to" list + if current_episode is not None: + episode_data_index["to"].append(idx) + # Let's keep track of the current episode index + current_episode = episode_idx + else: + # We are still in the same episode, so there is nothing for us to do here + pass + # We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list + episode_data_index["to"].append(idx + 1) + + for k in ["from", "to"]: + episode_data_index[k] = torch.tensor(episode_data_index[k]) + + return episode_data_index diff --git a/lerobot/src/lerobot/datasets/v30/augment_dataset_quantile_stats.py b/lerobot/src/lerobot/datasets/v30/augment_dataset_quantile_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..83a60c7442f4db0a9173f716ac765dcc208f5e66 --- /dev/null +++ b/lerobot/src/lerobot/datasets/v30/augment_dataset_quantile_stats.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script augments existing LeRobot datasets with quantile statistics. + +Most datasets created before the quantile feature was added do not contain +quantile statistics (q01, q10, q50, q90, q99) in their metadata. This script: + +1. Loads an existing LeRobot dataset in v3.0 format +2. Checks if it already contains quantile statistics +3. If missing, computes quantile statistics for all features +4. Updates the dataset metadata with the new quantile statistics + +Usage: + +```bash +python src/lerobot/datasets/v30/augment_dataset_quantile_stats.py \ + --repo-id=lerobot/pusht \ +``` +""" + +import argparse +import concurrent.futures +import logging +from pathlib import Path + +import numpy as np +import torch +from huggingface_hub import HfApi +from requests import HTTPError +from tqdm import tqdm + +from lerobot.datasets.compute_stats import DEFAULT_QUANTILES, aggregate_stats, get_feature_stats +from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset +from lerobot.datasets.utils import write_stats +from lerobot.utils.utils import init_logging + + +def has_quantile_stats(stats: dict[str, dict] | None, quantile_list_keys: list[str] | None = None) -> bool: + """Check if dataset statistics already contain quantile information. + + Args: + stats: Dataset statistics dictionary + + Returns: + True if quantile statistics are present, False otherwise + """ + if quantile_list_keys is None: + quantile_list_keys = [f"q{int(q * 100):02d}" for q in DEFAULT_QUANTILES] + + if stats is None: + return False + + for feature_stats in stats.values(): + if any(q_key in feature_stats for q_key in quantile_list_keys): + return True + + return False + + +def process_single_episode(dataset: LeRobotDataset, episode_idx: int) -> dict: + """Process a single episode and return its statistics. + + Args: + dataset: The LeRobot dataset + episode_idx: Index of the episode to process + + Returns: + Dictionary containing episode statistics + """ + logging.info(f"Computing stats for episode {episode_idx}") + + start_idx = dataset.meta.episodes[episode_idx]["dataset_from_index"] + end_idx = dataset.meta.episodes[episode_idx]["dataset_to_index"] + + collected_data: dict[str, list] = {} + for idx in range(start_idx, end_idx): + item = dataset[idx] + for key, value in item.items(): + if key not in dataset.features: + continue + + if key not in collected_data: + collected_data[key] = [] + collected_data[key].append(value) + + ep_stats = {} + for key, data_list in collected_data.items(): + if dataset.features[key]["dtype"] == "string": + continue + + data = torch.stack(data_list).cpu().numpy() + if dataset.features[key]["dtype"] in ["image", "video"]: + if data.dtype == np.uint8: + data = data.astype(np.float32) / 255.0 + + axes_to_reduce = (0, 2, 3) + keepdims = True + else: + axes_to_reduce = 0 + keepdims = data.ndim == 1 + + ep_stats[key] = get_feature_stats( + data, axis=axes_to_reduce, keepdims=keepdims, quantile_list=DEFAULT_QUANTILES + ) + + if dataset.features[key]["dtype"] in ["image", "video"]: + ep_stats[key] = { + k: v if k == "count" else np.squeeze(v, axis=0) for k, v in ep_stats[key].items() + } + + return ep_stats + + +def compute_quantile_stats_for_dataset(dataset: LeRobotDataset) -> dict[str, dict]: + """Compute quantile statistics for all episodes in the dataset. + + Args: + dataset: The LeRobot dataset to compute statistics for + + Returns: + Dictionary containing aggregated statistics with quantiles + + Note: + Video decoding operations are not thread-safe, so we process episodes sequentially + when video keys are present. For datasets without videos, we use parallel processing + with ThreadPoolExecutor for better performance. + """ + logging.info(f"Computing quantile statistics for dataset with {dataset.num_episodes} episodes") + + episode_stats_list = [] + has_videos = len(dataset.meta.video_keys) > 0 + + if has_videos: + logging.info("Dataset contains video keys - using sequential processing for thread safety") + for episode_idx in tqdm(range(dataset.num_episodes), desc="Processing episodes"): + ep_stats = process_single_episode(dataset, episode_idx) + episode_stats_list.append(ep_stats) + else: + logging.info("Dataset has no video keys - using parallel processing for better performance") + max_workers = min(dataset.num_episodes, 16) + + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_episode = { + executor.submit(process_single_episode, dataset, episode_idx): episode_idx + for episode_idx in range(dataset.num_episodes) + } + + episode_results = {} + with tqdm(total=dataset.num_episodes, desc="Processing episodes") as pbar: + for future in concurrent.futures.as_completed(future_to_episode): + episode_idx = future_to_episode[future] + ep_stats = future.result() + episode_results[episode_idx] = ep_stats + pbar.update(1) + + for episode_idx in range(dataset.num_episodes): + if episode_idx in episode_results: + episode_stats_list.append(episode_results[episode_idx]) + + if not episode_stats_list: + raise ValueError("No episode data found for computing statistics") + + logging.info(f"Aggregating statistics from {len(episode_stats_list)} episodes") + return aggregate_stats(episode_stats_list) + + +def augment_dataset_with_quantile_stats( + repo_id: str, + root: str | Path | None = None, + overwrite: bool = False, +) -> None: + """Augment a dataset with quantile statistics if they are missing. + + Args: + repo_id: Repository ID of the dataset + root: Local root directory for the dataset + overwrite: Overwrite existing quantile statistics if they already exist + """ + logging.info(f"Loading dataset: {repo_id}") + dataset = LeRobotDataset( + repo_id=repo_id, + root=root, + ) + + if not overwrite and has_quantile_stats(dataset.meta.stats): + logging.info("Dataset already contains quantile statistics. No action needed.") + return + + logging.info("Dataset does not contain quantile statistics. Computing them now...") + + new_stats = compute_quantile_stats_for_dataset(dataset) + + logging.info("Updating dataset metadata with new quantile statistics") + dataset.meta.stats = new_stats + + write_stats(new_stats, dataset.meta.root) + + logging.info("Successfully updated dataset with quantile statistics") + dataset.push_to_hub() + + hub_api = HfApi() + try: + hub_api.delete_tag(repo_id, tag=CODEBASE_VERSION, repo_type="dataset") + except HTTPError as e: + logging.info(f"tag={CODEBASE_VERSION} probably doesn't exist. Skipping exception ({e})") + pass + hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=None, repo_type="dataset") + + +def main(): + """Main function to run the augmentation script.""" + parser = argparse.ArgumentParser(description="Augment LeRobot dataset with quantile statistics") + + parser.add_argument( + "--repo-id", + type=str, + required=True, + help="Repository ID of the dataset (e.g., 'lerobot/pusht')", + ) + + parser.add_argument( + "--root", + type=str, + help="Local root directory for the dataset", + ) + parser.add_argument( + "--overwrite", + action="store_true", + help="Overwrite existing quantile statistics if they already exist", + ) + + args = parser.parse_args() + root = Path(args.root) if args.root else None + + init_logging() + + augment_dataset_with_quantile_stats( + repo_id=args.repo_id, + root=root, + overwrite=args.overwrite, + ) + + +if __name__ == "__main__": + main() diff --git a/lerobot/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py b/lerobot/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py new file mode 100644 index 0000000000000000000000000000000000000000..b3198053ba7c6773fbdc93ef4dd6bdb4ba50c524 --- /dev/null +++ b/lerobot/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py @@ -0,0 +1,571 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 2.1 to +3.0. It will: + +- Generate per-episodes stats and writes them in `episodes_stats.jsonl` +- Check consistency between these new stats and the old ones. +- Remove the deprecated `stats.json`. +- Update codebase_version in `info.json`. +- Push this new version to the hub on the 'main' branch and tags it with "v3.0". + +Usage: + +Convert a dataset from the hub: +```bash +python src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py \ + --repo-id=lerobot/pusht +``` + +Convert a local dataset (works in place): +```bash +python src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py \ + --repo-id=lerobot/pusht \ + --root=/path/to/local/dataset/directory + --push-to-hub=false +``` + +""" + +import argparse +import logging +import shutil +from pathlib import Path +from typing import Any + +import jsonlines +import pandas as pd +import pyarrow as pa +import tqdm +from datasets import Dataset, Features, Image +from huggingface_hub import HfApi, snapshot_download +from requests import HTTPError + +from lerobot.datasets.compute_stats import aggregate_stats +from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset +from lerobot.datasets.utils import ( + DEFAULT_CHUNK_SIZE, + DEFAULT_DATA_FILE_SIZE_IN_MB, + DEFAULT_DATA_PATH, + DEFAULT_VIDEO_FILE_SIZE_IN_MB, + DEFAULT_VIDEO_PATH, + LEGACY_EPISODES_PATH, + LEGACY_EPISODES_STATS_PATH, + LEGACY_TASKS_PATH, + cast_stats_to_numpy, + flatten_dict, + get_file_size_in_mb, + get_parquet_file_size_in_mb, + get_parquet_num_frames, + load_info, + update_chunk_file_indices, + write_episodes, + write_info, + write_stats, + write_tasks, +) +from lerobot.datasets.video_utils import concatenate_video_files, get_video_duration_in_s +from lerobot.utils.constants import HF_LEROBOT_HOME +from lerobot.utils.utils import init_logging + +V21 = "v2.1" +V30 = "v3.0" + +""" +------------------------- +OLD +data/chunk-000/episode_000000.parquet + +NEW +data/chunk-000/file_000.parquet +------------------------- +OLD +videos/chunk-000/CAMERA/episode_000000.mp4 + +NEW +videos/CAMERA/chunk-000/file_000.mp4 +------------------------- +OLD +episodes.jsonl +{"episode_index": 1, "tasks": ["Put the blue block in the green bowl"], "length": 266} + +NEW +meta/episodes/chunk-000/episodes_000.parquet +episode_index | video_chunk_index | video_file_index | data_chunk_index | data_file_index | tasks | length +------------------------- +OLD +tasks.jsonl +{"task_index": 1, "task": "Put the blue block in the green bowl"} + +NEW +meta/tasks/chunk-000/file_000.parquet +task_index | task +------------------------- +OLD +episodes_stats.jsonl + +NEW +meta/episodes_stats/chunk-000/file_000.parquet +episode_index | mean | std | min | max +------------------------- +UPDATE +meta/info.json +------------------------- +""" + + +def load_jsonlines(fpath: Path) -> list[Any]: + with jsonlines.open(fpath, "r") as reader: + return list(reader) + + +def legacy_load_episodes(local_dir: Path) -> dict: + episodes = load_jsonlines(local_dir / LEGACY_EPISODES_PATH) + return {item["episode_index"]: item for item in sorted(episodes, key=lambda x: x["episode_index"])} + + +def legacy_load_episodes_stats(local_dir: Path) -> dict: + episodes_stats = load_jsonlines(local_dir / LEGACY_EPISODES_STATS_PATH) + return { + item["episode_index"]: cast_stats_to_numpy(item["stats"]) + for item in sorted(episodes_stats, key=lambda x: x["episode_index"]) + } + + +def legacy_load_tasks(local_dir: Path) -> tuple[dict, dict]: + tasks = load_jsonlines(local_dir / LEGACY_TASKS_PATH) + tasks = {item["task_index"]: item["task"] for item in sorted(tasks, key=lambda x: x["task_index"])} + task_to_task_index = {task: task_index for task_index, task in tasks.items()} + return tasks, task_to_task_index + + +def validate_local_dataset_version(local_path: Path) -> None: + """Validate that the local dataset has the expected v2.1 version.""" + info = load_info(local_path) + dataset_version = info.get("codebase_version", "unknown") + if dataset_version != V21: + raise ValueError( + f"Local dataset has codebase version '{dataset_version}', expected '{V21}'. " + f"This script is specifically for converting v2.1 datasets to v3.0." + ) + + +def convert_tasks(root, new_root): + logging.info(f"Converting tasks from {root} to {new_root}") + tasks, _ = legacy_load_tasks(root) + task_indices = tasks.keys() + task_strings = tasks.values() + df_tasks = pd.DataFrame({"task_index": task_indices}, index=task_strings) + write_tasks(df_tasks, new_root) + + +def concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys): + # TODO(rcadene): to save RAM use Dataset.from_parquet(file) and concatenate_datasets + dataframes = [pd.read_parquet(file) for file in paths_to_cat] + # Concatenate all DataFrames along rows + concatenated_df = pd.concat(dataframes, ignore_index=True) + + path = new_root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx) + path.parent.mkdir(parents=True, exist_ok=True) + + if len(image_keys) > 0: + schema = pa.Schema.from_pandas(concatenated_df) + features = Features.from_arrow_schema(schema) + for key in image_keys: + features[key] = Image() + schema = features.arrow_schema + else: + schema = None + + concatenated_df.to_parquet(path, index=False, schema=schema) + + +def convert_data(root: Path, new_root: Path, data_file_size_in_mb: int): + data_dir = root / "data" + ep_paths = sorted(data_dir.glob("*/*.parquet")) + + image_keys = get_image_keys(root) + + ep_idx = 0 + chunk_idx = 0 + file_idx = 0 + size_in_mb = 0 + num_frames = 0 + paths_to_cat = [] + episodes_metadata = [] + + logging.info(f"Converting data files from {len(ep_paths)} episodes") + + for ep_path in tqdm.tqdm(ep_paths, desc="convert data files"): + ep_size_in_mb = get_parquet_file_size_in_mb(ep_path) + ep_num_frames = get_parquet_num_frames(ep_path) + ep_metadata = { + "episode_index": ep_idx, + "data/chunk_index": chunk_idx, + "data/file_index": file_idx, + "dataset_from_index": num_frames, + "dataset_to_index": num_frames + ep_num_frames, + } + size_in_mb += ep_size_in_mb + num_frames += ep_num_frames + episodes_metadata.append(ep_metadata) + ep_idx += 1 + + if size_in_mb < data_file_size_in_mb: + paths_to_cat.append(ep_path) + continue + + if paths_to_cat: + concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys) + + # Reset for the next file + size_in_mb = ep_size_in_mb + paths_to_cat = [ep_path] + + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE) + + # Write remaining data if any + if paths_to_cat: + concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys) + + return episodes_metadata + + +def get_video_keys(root): + info = load_info(root) + features = info["features"] + video_keys = [key for key, ft in features.items() if ft["dtype"] == "video"] + return video_keys + + +def get_image_keys(root): + info = load_info(root) + features = info["features"] + image_keys = [key for key, ft in features.items() if ft["dtype"] == "image"] + return image_keys + + +def convert_videos(root: Path, new_root: Path, video_file_size_in_mb: int): + logging.info(f"Converting videos from {root} to {new_root}") + + video_keys = get_video_keys(root) + if len(video_keys) == 0: + return None + + video_keys = sorted(video_keys) + + eps_metadata_per_cam = [] + for camera in video_keys: + eps_metadata = convert_videos_of_camera(root, new_root, camera, video_file_size_in_mb) + eps_metadata_per_cam.append(eps_metadata) + + num_eps_per_cam = [len(eps_cam_map) for eps_cam_map in eps_metadata_per_cam] + if len(set(num_eps_per_cam)) != 1: + raise ValueError(f"All cams dont have same number of episodes ({num_eps_per_cam}).") + + episods_metadata = [] + num_cameras = len(video_keys) + num_episodes = num_eps_per_cam[0] + for ep_idx in tqdm.tqdm(range(num_episodes), desc="convert videos"): + # Sanity check + ep_ids = [eps_metadata_per_cam[cam_idx][ep_idx]["episode_index"] for cam_idx in range(num_cameras)] + ep_ids += [ep_idx] + if len(set(ep_ids)) != 1: + raise ValueError(f"All episode indices need to match ({ep_ids}).") + + ep_dict = {} + for cam_idx in range(num_cameras): + ep_dict.update(eps_metadata_per_cam[cam_idx][ep_idx]) + episods_metadata.append(ep_dict) + + return episods_metadata + + +def convert_videos_of_camera(root: Path, new_root: Path, video_key: str, video_file_size_in_mb: int): + # Access old paths to mp4 + videos_dir = root / "videos" + ep_paths = sorted(videos_dir.glob(f"*/{video_key}/*.mp4")) + + ep_idx = 0 + chunk_idx = 0 + file_idx = 0 + size_in_mb = 0 + duration_in_s = 0.0 + paths_to_cat = [] + episodes_metadata = [] + + for ep_path in tqdm.tqdm(ep_paths, desc=f"convert videos of {video_key}"): + ep_size_in_mb = get_file_size_in_mb(ep_path) + ep_duration_in_s = get_video_duration_in_s(ep_path) + + # Check if adding this episode would exceed the limit + if size_in_mb + ep_size_in_mb >= video_file_size_in_mb and len(paths_to_cat) > 0: + # Size limit would be exceeded, save current accumulation WITHOUT this episode + concatenate_video_files( + paths_to_cat, + new_root + / DEFAULT_VIDEO_PATH.format(video_key=video_key, chunk_index=chunk_idx, file_index=file_idx), + ) + + # Update episodes metadata for the file we just saved + for i, _ in enumerate(paths_to_cat): + past_ep_idx = ep_idx - len(paths_to_cat) + i + episodes_metadata[past_ep_idx][f"videos/{video_key}/chunk_index"] = chunk_idx + episodes_metadata[past_ep_idx][f"videos/{video_key}/file_index"] = file_idx + + # Move to next file and start fresh with current episode + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE) + size_in_mb = 0 + duration_in_s = 0.0 + paths_to_cat = [] + + # Add current episode metadata + ep_metadata = { + "episode_index": ep_idx, + f"videos/{video_key}/chunk_index": chunk_idx, # Will be updated when file is saved + f"videos/{video_key}/file_index": file_idx, # Will be updated when file is saved + f"videos/{video_key}/from_timestamp": duration_in_s, + f"videos/{video_key}/to_timestamp": duration_in_s + ep_duration_in_s, + } + episodes_metadata.append(ep_metadata) + + # Add current episode to accumulation + paths_to_cat.append(ep_path) + size_in_mb += ep_size_in_mb + duration_in_s += ep_duration_in_s + ep_idx += 1 + + # Write remaining videos if any + if paths_to_cat: + concatenate_video_files( + paths_to_cat, + new_root + / DEFAULT_VIDEO_PATH.format(video_key=video_key, chunk_index=chunk_idx, file_index=file_idx), + ) + + # Update episodes metadata for the final file + for i, _ in enumerate(paths_to_cat): + past_ep_idx = ep_idx - len(paths_to_cat) + i + episodes_metadata[past_ep_idx][f"videos/{video_key}/chunk_index"] = chunk_idx + episodes_metadata[past_ep_idx][f"videos/{video_key}/file_index"] = file_idx + + return episodes_metadata + + +def generate_episode_metadata_dict( + episodes_legacy_metadata, episodes_metadata, episodes_stats, episodes_videos=None +): + num_episodes = len(episodes_metadata) + episodes_legacy_metadata_vals = list(episodes_legacy_metadata.values()) + episodes_stats_vals = list(episodes_stats.values()) + episodes_stats_keys = list(episodes_stats.keys()) + + for i in range(num_episodes): + ep_legacy_metadata = episodes_legacy_metadata_vals[i] + ep_metadata = episodes_metadata[i] + ep_stats = episodes_stats_vals[i] + + ep_ids_set = { + ep_legacy_metadata["episode_index"], + ep_metadata["episode_index"], + episodes_stats_keys[i], + } + + if episodes_videos is None: + ep_video = {} + else: + ep_video = episodes_videos[i] + ep_ids_set.add(ep_video["episode_index"]) + + if len(ep_ids_set) != 1: + raise ValueError(f"Number of episodes is not the same ({ep_ids_set}).") + + ep_dict = {**ep_metadata, **ep_video, **ep_legacy_metadata, **flatten_dict({"stats": ep_stats})} + ep_dict["meta/episodes/chunk_index"] = 0 + ep_dict["meta/episodes/file_index"] = 0 + yield ep_dict + + +def convert_episodes_metadata(root, new_root, episodes_metadata, episodes_video_metadata=None): + logging.info(f"Converting episodes metadata from {root} to {new_root}") + + episodes_legacy_metadata = legacy_load_episodes(root) + episodes_stats = legacy_load_episodes_stats(root) + + num_eps_set = {len(episodes_legacy_metadata), len(episodes_metadata)} + if episodes_video_metadata is not None: + num_eps_set.add(len(episodes_video_metadata)) + + if len(num_eps_set) != 1: + raise ValueError(f"Number of episodes is not the same ({num_eps_set}).") + + ds_episodes = Dataset.from_generator( + lambda: generate_episode_metadata_dict( + episodes_legacy_metadata, episodes_metadata, episodes_stats, episodes_video_metadata + ) + ) + write_episodes(ds_episodes, new_root) + + stats = aggregate_stats(list(episodes_stats.values())) + write_stats(stats, new_root) + + +def convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb): + info = load_info(root) + info["codebase_version"] = V30 + del info["total_chunks"] + del info["total_videos"] + info["data_files_size_in_mb"] = data_file_size_in_mb + info["video_files_size_in_mb"] = video_file_size_in_mb + info["data_path"] = DEFAULT_DATA_PATH + info["video_path"] = DEFAULT_VIDEO_PATH if info["video_path"] is not None else None + info["fps"] = int(info["fps"]) + logging.info(f"Converting info from {root} to {new_root}") + for key in info["features"]: + if info["features"][key]["dtype"] == "video": + # already has fps in video_info + continue + info["features"][key]["fps"] = info["fps"] + write_info(info, new_root) + + +def convert_dataset( + repo_id: str, + branch: str | None = None, + data_file_size_in_mb: int | None = None, + video_file_size_in_mb: int | None = None, + root: str | Path | None = None, + push_to_hub: bool = True, + force_conversion: bool = False, +): + if data_file_size_in_mb is None: + data_file_size_in_mb = DEFAULT_DATA_FILE_SIZE_IN_MB + if video_file_size_in_mb is None: + video_file_size_in_mb = DEFAULT_VIDEO_FILE_SIZE_IN_MB + + # First check if the dataset already has a v3.0 version + if root is None and not force_conversion: + try: + print("Trying to download v3.0 version of the dataset from the hub...") + snapshot_download(repo_id, repo_type="dataset", revision=V30, local_dir=HF_LEROBOT_HOME / repo_id) + return + except Exception: + print("Dataset does not have an uploaded v3.0 version. Continuing with conversion.") + + # Set root based on whether local dataset path is provided + use_local_dataset = False + root = HF_LEROBOT_HOME / repo_id if root is None else Path(root) / repo_id + if root.exists(): + validate_local_dataset_version(root) + use_local_dataset = True + print(f"Using local dataset at {root}") + + old_root = root.parent / f"{root.name}_old" + new_root = root.parent / f"{root.name}_v30" + + # Handle old_root cleanup if both old_root and root exist + if old_root.is_dir() and root.is_dir(): + shutil.rmtree(str(root)) + shutil.move(str(old_root), str(root)) + + if new_root.is_dir(): + shutil.rmtree(new_root) + + if not use_local_dataset: + snapshot_download( + repo_id, + repo_type="dataset", + revision=V21, + local_dir=root, + ) + + convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb) + convert_tasks(root, new_root) + episodes_metadata = convert_data(root, new_root, data_file_size_in_mb) + episodes_videos_metadata = convert_videos(root, new_root, video_file_size_in_mb) + convert_episodes_metadata(root, new_root, episodes_metadata, episodes_videos_metadata) + + shutil.move(str(root), str(old_root)) + shutil.move(str(new_root), str(root)) + + if push_to_hub: + hub_api = HfApi() + try: + hub_api.delete_tag(repo_id, tag=CODEBASE_VERSION, repo_type="dataset") + except HTTPError as e: + print(f"tag={CODEBASE_VERSION} probably doesn't exist. Skipping exception ({e})") + pass + hub_api.delete_files( + delete_patterns=["data/chunk*/episode_*", "meta/*.jsonl", "videos/chunk*"], + repo_id=repo_id, + revision=branch, + repo_type="dataset", + ) + hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset") + + LeRobotDataset(repo_id).push_to_hub() + + +if __name__ == "__main__": + init_logging() + parser = argparse.ArgumentParser() + parser.add_argument( + "--repo-id", + type=str, + required=True, + help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset " + "(e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).", + ) + parser.add_argument( + "--branch", + type=str, + default=None, + help="Repo branch to push your dataset. Defaults to the main branch.", + ) + parser.add_argument( + "--data-file-size-in-mb", + type=int, + default=None, + help="File size in MB. Defaults to 100 for data and 500 for videos.", + ) + parser.add_argument( + "--video-file-size-in-mb", + type=int, + default=None, + help="File size in MB. Defaults to 100 for data and 500 for videos.", + ) + parser.add_argument( + "--root", + type=str, + default=None, + help="Local directory to use for downloading/writing the dataset.", + ) + parser.add_argument( + "--push-to-hub", + type=lambda input: input.lower() == "true", + default=True, + help="Push the converted dataset to the hub.", + ) + parser.add_argument( + "--force-conversion", + action="store_true", + help="Force conversion even if the dataset already has a v3.0 version.", + ) + + args = parser.parse_args() + convert_dataset(**vars(args)) diff --git a/lerobot/src/lerobot/motors/dynamixel/__init__.py b/lerobot/src/lerobot/motors/dynamixel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..38b770cd6a0947395f4c0f6e2f37bbdc8120d965 --- /dev/null +++ b/lerobot/src/lerobot/motors/dynamixel/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .dynamixel import DriveMode, DynamixelMotorsBus, OperatingMode, TorqueMode +from .tables import * diff --git a/lerobot/src/lerobot/motors/dynamixel/dynamixel.py b/lerobot/src/lerobot/motors/dynamixel/dynamixel.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc63fef8e999e1756dd8c05784f0c4dce545d39 --- /dev/null +++ b/lerobot/src/lerobot/motors/dynamixel/dynamixel.py @@ -0,0 +1,264 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(aliberts): Should we implement FastSyncRead/Write? +# https://github.com/ROBOTIS-GIT/DynamixelSDK/pull/643 +# https://github.com/ROBOTIS-GIT/DynamixelSDK/releases/tag/3.8.2 +# https://emanual.robotis.com/docs/en/dxl/protocol2/#fast-sync-read-0x8a +# -> Need to check compatibility across models + +import logging +from copy import deepcopy +from enum import Enum + +from lerobot.motors.encoding_utils import decode_twos_complement, encode_twos_complement + +from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address +from .tables import ( + AVAILABLE_BAUDRATES, + MODEL_BAUDRATE_TABLE, + MODEL_CONTROL_TABLE, + MODEL_ENCODING_TABLE, + MODEL_NUMBER_TABLE, + MODEL_RESOLUTION, +) + +PROTOCOL_VERSION = 2.0 +DEFAULT_BAUDRATE = 1_000_000 +DEFAULT_TIMEOUT_MS = 1000 + +NORMALIZED_DATA = ["Goal_Position", "Present_Position"] + +logger = logging.getLogger(__name__) + + +class OperatingMode(Enum): + # DYNAMIXEL only controls current(torque) regardless of speed and position. This mode is ideal for a + # gripper or a system that only uses current(torque) control or a system that has additional + # velocity/position controllers. + CURRENT = 0 + + # This mode controls velocity. This mode is identical to the Wheel Mode(endless) from existing DYNAMIXEL. + # This mode is ideal for wheel-type robots. + VELOCITY = 1 + + # This mode controls position. This mode is identical to the Joint Mode from existing DYNAMIXEL. Operating + # position range is limited by the Max Position Limit(48) and the Min Position Limit(52). This mode is + # ideal for articulated robots that each joint rotates less than 360 degrees. + POSITION = 3 + + # This mode controls position. This mode is identical to the Multi-turn Position Control from existing + # DYNAMIXEL. 512 turns are supported(-256[rev] ~ 256[rev]). This mode is ideal for multi-turn wrists or + # conveyor systems or a system that requires an additional reduction gear. Note that Max Position + # Limit(48), Min Position Limit(52) are not used on Extended Position Control Mode. + EXTENDED_POSITION = 4 + + # This mode controls both position and current(torque). Up to 512 turns are supported (-256[rev] ~ + # 256[rev]). This mode is ideal for a system that requires both position and current control such as + # articulated robots or grippers. + CURRENT_POSITION = 5 + + # This mode directly controls PWM output. (Voltage Control Mode) + PWM = 16 + + +class DriveMode(Enum): + NON_INVERTED = 0 + INVERTED = 1 + + +class TorqueMode(Enum): + ENABLED = 1 + DISABLED = 0 + + +def _split_into_byte_chunks(value: int, length: int) -> list[int]: + import dynamixel_sdk as dxl + + if length == 1: + data = [value] + elif length == 2: + data = [dxl.DXL_LOBYTE(value), dxl.DXL_HIBYTE(value)] + elif length == 4: + data = [ + dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)), + dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)), + dxl.DXL_LOBYTE(dxl.DXL_HIWORD(value)), + dxl.DXL_HIBYTE(dxl.DXL_HIWORD(value)), + ] + return data + + +class DynamixelMotorsBus(MotorsBus): + """ + The Dynamixel implementation for a MotorsBus. It relies on the python dynamixel sdk to communicate with + the motors. For more info, see the Dynamixel SDK Documentation: + https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20 + """ + + apply_drive_mode = False + available_baudrates = deepcopy(AVAILABLE_BAUDRATES) + default_baudrate = DEFAULT_BAUDRATE + default_timeout = DEFAULT_TIMEOUT_MS + model_baudrate_table = deepcopy(MODEL_BAUDRATE_TABLE) + model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) + model_encoding_table = deepcopy(MODEL_ENCODING_TABLE) + model_number_table = deepcopy(MODEL_NUMBER_TABLE) + model_resolution_table = deepcopy(MODEL_RESOLUTION) + normalized_data = deepcopy(NORMALIZED_DATA) + + def __init__( + self, + port: str, + motors: dict[str, Motor], + calibration: dict[str, MotorCalibration] | None = None, + ): + super().__init__(port, motors, calibration) + import dynamixel_sdk as dxl + + self.port_handler = dxl.PortHandler(self.port) + self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION) + self.sync_reader = dxl.GroupSyncRead(self.port_handler, self.packet_handler, 0, 0) + self.sync_writer = dxl.GroupSyncWrite(self.port_handler, self.packet_handler, 0, 0) + self._comm_success = dxl.COMM_SUCCESS + self._no_error = 0x00 + + def _assert_protocol_is_compatible(self, instruction_name: str) -> None: + pass + + def _handshake(self) -> None: + self._assert_motors_exist() + + def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]: + model = self.motors[motor].model + search_baudrates = ( + [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model] + ) + + for baudrate in search_baudrates: + self.set_baudrate(baudrate) + id_model = self.broadcast_ping() + if id_model: + found_id, found_model = next(iter(id_model.items())) + expected_model_nb = self.model_number_table[model] + if found_model != expected_model_nb: + raise RuntimeError( + f"Found one motor on {baudrate=} with id={found_id} but it has a " + f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. " + f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')." + ) + return baudrate, found_id + + raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.") + + def configure_motors(self, return_delay_time=0) -> None: + # By default, Dynamixel motors have a 500µs delay response time (corresponding to a value of 250 on + # the 'Return_Delay_Time' address). We ensure this is reduced to the minimum of 2µs (value of 0). + for motor in self.motors: + self.write("Return_Delay_Time", motor, return_delay_time) + + @property + def is_calibrated(self) -> bool: + return self.calibration == self.read_calibration() + + def read_calibration(self) -> dict[str, MotorCalibration]: + offsets = self.sync_read("Homing_Offset", normalize=False) + mins = self.sync_read("Min_Position_Limit", normalize=False) + maxes = self.sync_read("Max_Position_Limit", normalize=False) + drive_modes = self.sync_read("Drive_Mode", normalize=False) + + calibration = {} + for motor, m in self.motors.items(): + calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=drive_modes[motor], + homing_offset=offsets[motor], + range_min=mins[motor], + range_max=maxes[motor], + ) + + return calibration + + def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None: + for motor, calibration in calibration_dict.items(): + self.write("Homing_Offset", motor, calibration.homing_offset) + self.write("Min_Position_Limit", motor, calibration.range_min) + self.write("Max_Position_Limit", motor, calibration.range_max) + + if cache: + self.calibration = calibration_dict + + def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + for motor in self._get_motors_list(motors): + self.write("Torque_Enable", motor, TorqueMode.DISABLED.value, num_retry=num_retry) + + def _disable_torque(self, motor_id: int, model: str, num_retry: int = 0) -> None: + addr, length = get_address(self.model_ctrl_table, model, "Torque_Enable") + self._write(addr, length, motor_id, TorqueMode.DISABLED.value, num_retry=num_retry) + + def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + for motor in self._get_motors_list(motors): + self.write("Torque_Enable", motor, TorqueMode.ENABLED.value, num_retry=num_retry) + + def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + for id_ in ids_values: + model = self._id_to_model(id_) + encoding_table = self.model_encoding_table.get(model) + if encoding_table and data_name in encoding_table: + n_bytes = encoding_table[data_name] + ids_values[id_] = encode_twos_complement(ids_values[id_], n_bytes) + + return ids_values + + def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + for id_ in ids_values: + model = self._id_to_model(id_) + encoding_table = self.model_encoding_table.get(model) + if encoding_table and data_name in encoding_table: + n_bytes = encoding_table[data_name] + ids_values[id_] = decode_twos_complement(ids_values[id_], n_bytes) + + return ids_values + + def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]: + """ + On Dynamixel Motors: + Present_Position = Actual_Position + Homing_Offset + """ + half_turn_homings = {} + for motor, pos in positions.items(): + model = self._get_motor_model(motor) + max_res = self.model_resolution_table[model] - 1 + half_turn_homings[motor] = int(max_res / 2) - pos + + return half_turn_homings + + def _split_into_byte_chunks(self, value: int, length: int) -> list[int]: + return _split_into_byte_chunks(value, length) + + def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None: + for n_try in range(1 + num_retry): + data_list, comm = self.packet_handler.broadcastPing(self.port_handler) + if self._is_comm_success(comm): + break + logger.debug(f"Broadcast ping failed on port '{self.port}' ({n_try=})") + logger.debug(self.packet_handler.getTxRxResult(comm)) + + if not self._is_comm_success(comm): + if raise_on_error: + raise ConnectionError(self.packet_handler.getTxRxResult(comm)) + + return + + return {id_: data[0] for id_, data in data_list.items()} diff --git a/lerobot/src/lerobot/motors/dynamixel/tables.py b/lerobot/src/lerobot/motors/dynamixel/tables.py new file mode 100644 index 0000000000000000000000000000000000000000..904cc3ae1a529017c0b7d33a47cd41c6806e1524 --- /dev/null +++ b/lerobot/src/lerobot/motors/dynamixel/tables.py @@ -0,0 +1,199 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(Steven): Consider doing the following: +# from enum import Enum +# class MyControlTableKey(Enum): +# ID = "ID" +# GOAL_SPEED = "Goal_Speed" +# ... +# +# MY_CONTROL_TABLE ={ +# MyControlTableKey.ID.value: (5,1) +# MyControlTableKey.GOAL_SPEED.value: (46, 2) +# ... +# } +# This allows me do to: +# bus.write(MyControlTableKey.GOAL_SPEED, ...) +# Instead of: +# bus.write("Goal_Speed", ...) +# This is important for two reasons: +# 1. The linter will tell me if I'm trying to use an invalid key, instead of me realizing when I get the RunTimeError +# 2. We can change the value of the MyControlTableKey enums without impacting the client code + + +# {data_name: (address, size_byte)} +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table +X_SERIES_CONTROL_TABLE = { + "Model_Number": (0, 2), + "Model_Information": (2, 4), + "Firmware_Version": (6, 1), + "ID": (7, 1), + "Baud_Rate": (8, 1), + "Return_Delay_Time": (9, 1), + "Drive_Mode": (10, 1), + "Operating_Mode": (11, 1), + "Secondary_ID": (12, 1), + "Protocol_Type": (13, 1), + "Homing_Offset": (20, 4), + "Moving_Threshold": (24, 4), + "Temperature_Limit": (31, 1), + "Max_Voltage_Limit": (32, 2), + "Min_Voltage_Limit": (34, 2), + "PWM_Limit": (36, 2), + "Current_Limit": (38, 2), + "Acceleration_Limit": (40, 4), + "Velocity_Limit": (44, 4), + "Max_Position_Limit": (48, 4), + "Min_Position_Limit": (52, 4), + "Shutdown": (63, 1), + "Torque_Enable": (64, 1), + "LED": (65, 1), + "Status_Return_Level": (68, 1), + "Registered_Instruction": (69, 1), + "Hardware_Error_Status": (70, 1), + "Velocity_I_Gain": (76, 2), + "Velocity_P_Gain": (78, 2), + "Position_D_Gain": (80, 2), + "Position_I_Gain": (82, 2), + "Position_P_Gain": (84, 2), + "Feedforward_2nd_Gain": (88, 2), + "Feedforward_1st_Gain": (90, 2), + "Bus_Watchdog": (98, 1), + "Goal_PWM": (100, 2), + "Goal_Current": (102, 2), + "Goal_Velocity": (104, 4), + "Profile_Acceleration": (108, 4), + "Profile_Velocity": (112, 4), + "Goal_Position": (116, 4), + "Realtime_Tick": (120, 2), + "Moving": (122, 1), + "Moving_Status": (123, 1), + "Present_PWM": (124, 2), + "Present_Current": (126, 2), + "Present_Velocity": (128, 4), + "Present_Position": (132, 4), + "Velocity_Trajectory": (136, 4), + "Position_Trajectory": (140, 4), + "Present_Input_Voltage": (144, 2), + "Present_Temperature": (146, 1), +} + +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#baud-rate8 +X_SERIES_BAUDRATE_TABLE = { + 9_600: 0, + 57_600: 1, + 115_200: 2, + 1_000_000: 3, + 2_000_000: 4, + 3_000_000: 5, + 4_000_000: 6, +} + +# {data_name: size_byte} +X_SERIES_ENCODINGS_TABLE = { + "Homing_Offset": X_SERIES_CONTROL_TABLE["Homing_Offset"][1], + "Goal_PWM": X_SERIES_CONTROL_TABLE["Goal_PWM"][1], + "Goal_Current": X_SERIES_CONTROL_TABLE["Goal_Current"][1], + "Goal_Velocity": X_SERIES_CONTROL_TABLE["Goal_Velocity"][1], + "Goal_Position": X_SERIES_CONTROL_TABLE["Goal_Position"][1], + "Present_Position": X_SERIES_CONTROL_TABLE["Present_Position"][1], + "Present_PWM": X_SERIES_CONTROL_TABLE["Present_PWM"][1], + "Present_Current": X_SERIES_CONTROL_TABLE["Present_Current"][1], + "Present_Velocity": X_SERIES_CONTROL_TABLE["Present_Velocity"][1], +} + +MODEL_ENCODING_TABLE = { + "x_series": X_SERIES_ENCODINGS_TABLE, + "xl330-m077": X_SERIES_ENCODINGS_TABLE, + "xl330-m288": X_SERIES_ENCODINGS_TABLE, + "xl430-w250": X_SERIES_ENCODINGS_TABLE, + "xm430-w350": X_SERIES_ENCODINGS_TABLE, + "xm540-w270": X_SERIES_ENCODINGS_TABLE, + "xc430-w150": X_SERIES_ENCODINGS_TABLE, +} + +# {model: model_resolution} +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#specifications +MODEL_RESOLUTION = { + "x_series": 4096, + "xl330-m077": 4096, + "xl330-m288": 4096, + "xl430-w250": 4096, + "xm430-w350": 4096, + "xm540-w270": 4096, + "xc430-w150": 4096, +} + +# {model: model_number} +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table-of-eeprom-area +MODEL_NUMBER_TABLE = { + "xl330-m077": 1190, + "xl330-m288": 1200, + "xl430-w250": 1060, + "xm430-w350": 1020, + "xm540-w270": 1120, + "xc430-w150": 1070, +} + +# {model: available_operating_modes} +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#operating-mode11 +MODEL_OPERATING_MODES = { + "xl330-m077": [0, 1, 3, 4, 5, 16], + "xl330-m288": [0, 1, 3, 4, 5, 16], + "xl430-w250": [1, 3, 4, 16], + "xm430-w350": [0, 1, 3, 4, 5, 16], + "xm540-w270": [0, 1, 3, 4, 5, 16], + "xc430-w150": [1, 3, 4, 16], +} + +MODEL_CONTROL_TABLE = { + "x_series": X_SERIES_CONTROL_TABLE, + "xl330-m077": X_SERIES_CONTROL_TABLE, + "xl330-m288": X_SERIES_CONTROL_TABLE, + "xl430-w250": X_SERIES_CONTROL_TABLE, + "xm430-w350": X_SERIES_CONTROL_TABLE, + "xm540-w270": X_SERIES_CONTROL_TABLE, + "xc430-w150": X_SERIES_CONTROL_TABLE, +} + +MODEL_BAUDRATE_TABLE = { + "x_series": X_SERIES_BAUDRATE_TABLE, + "xl330-m077": X_SERIES_BAUDRATE_TABLE, + "xl330-m288": X_SERIES_BAUDRATE_TABLE, + "xl430-w250": X_SERIES_BAUDRATE_TABLE, + "xm430-w350": X_SERIES_BAUDRATE_TABLE, + "xm540-w270": X_SERIES_BAUDRATE_TABLE, + "xc430-w150": X_SERIES_BAUDRATE_TABLE, +} + +AVAILABLE_BAUDRATES = [ + 9_600, + 19_200, + 38_400, + 57_600, + 115_200, + 230_400, + 460_800, + 500_000, + 576_000, + 921_600, + 1_000_000, + 1_152_000, + 2_000_000, + 2_500_000, + 3_000_000, + 3_500_000, + 4_000_000, +] diff --git a/lerobot/src/lerobot/motors/feetech/__init__.py b/lerobot/src/lerobot/motors/feetech/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33992c51d2ceb32f2480d8d3a727f9a9d75bab5d --- /dev/null +++ b/lerobot/src/lerobot/motors/feetech/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .feetech import DriveMode, FeetechMotorsBus, OperatingMode, TorqueMode +from .tables import * diff --git a/lerobot/src/lerobot/motors/feetech/feetech.py b/lerobot/src/lerobot/motors/feetech/feetech.py new file mode 100644 index 0000000000000000000000000000000000000000..98cde209c44158a244e4da03e756d2774fa0fb93 --- /dev/null +++ b/lerobot/src/lerobot/motors/feetech/feetech.py @@ -0,0 +1,455 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from copy import deepcopy +from enum import Enum +from pprint import pformat + +from lerobot.motors.encoding_utils import decode_sign_magnitude, encode_sign_magnitude + +from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address +from .tables import ( + FIRMWARE_MAJOR_VERSION, + FIRMWARE_MINOR_VERSION, + MODEL_BAUDRATE_TABLE, + MODEL_CONTROL_TABLE, + MODEL_ENCODING_TABLE, + MODEL_NUMBER, + MODEL_NUMBER_TABLE, + MODEL_PROTOCOL, + MODEL_RESOLUTION, + SCAN_BAUDRATES, +) + +DEFAULT_PROTOCOL_VERSION = 0 +DEFAULT_BAUDRATE = 1_000_000 +DEFAULT_TIMEOUT_MS = 1000 + +NORMALIZED_DATA = ["Goal_Position", "Present_Position"] + +logger = logging.getLogger(__name__) + + +class OperatingMode(Enum): + # position servo mode + POSITION = 0 + # The motor is in constant speed mode, which is controlled by parameter 0x2e, and the highest bit 15 is + # the direction bit + VELOCITY = 1 + # PWM open-loop speed regulation mode, with parameter 0x2c running time parameter control, bit11 as + # direction bit + PWM = 2 + # In step servo mode, the number of step progress is represented by parameter 0x2a, and the highest bit 15 + # is the direction bit + STEP = 3 + + +class DriveMode(Enum): + NON_INVERTED = 0 + INVERTED = 1 + + +class TorqueMode(Enum): + ENABLED = 1 + DISABLED = 0 + + +def _split_into_byte_chunks(value: int, length: int) -> list[int]: + import scservo_sdk as scs + + if length == 1: + data = [value] + elif length == 2: + data = [scs.SCS_LOBYTE(value), scs.SCS_HIBYTE(value)] + elif length == 4: + data = [ + scs.SCS_LOBYTE(scs.SCS_LOWORD(value)), + scs.SCS_HIBYTE(scs.SCS_LOWORD(value)), + scs.SCS_LOBYTE(scs.SCS_HIWORD(value)), + scs.SCS_HIBYTE(scs.SCS_HIWORD(value)), + ] + return data + + +def patch_setPacketTimeout(self, packet_length): # noqa: N802 + """ + HACK: This patches the PortHandler behavior to set the correct packet timeouts. + + It fixes https://gitee.com/ftservo/SCServoSDK/issues/IBY2S6 + The bug is fixed on the official Feetech SDK repo (https://gitee.com/ftservo/FTServo_Python) + but because that version is not published on PyPI, we rely on the (unofficial) on that is, which needs + patching. + """ + self.packet_start_time = self.getCurrentTime() + self.packet_timeout = (self.tx_time_per_byte * packet_length) + (self.tx_time_per_byte * 3.0) + 50 + + +class FeetechMotorsBus(MotorsBus): + """ + The FeetechMotorsBus class allows to efficiently read and write to the attached motors. It relies on the + python feetech sdk to communicate with the motors, which is itself based on the dynamixel sdk. + """ + + apply_drive_mode = True + available_baudrates = deepcopy(SCAN_BAUDRATES) + default_baudrate = DEFAULT_BAUDRATE + default_timeout = DEFAULT_TIMEOUT_MS + model_baudrate_table = deepcopy(MODEL_BAUDRATE_TABLE) + model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) + model_encoding_table = deepcopy(MODEL_ENCODING_TABLE) + model_number_table = deepcopy(MODEL_NUMBER_TABLE) + model_resolution_table = deepcopy(MODEL_RESOLUTION) + normalized_data = deepcopy(NORMALIZED_DATA) + + def __init__( + self, + port: str, + motors: dict[str, Motor], + calibration: dict[str, MotorCalibration] | None = None, + protocol_version: int = DEFAULT_PROTOCOL_VERSION, + ): + super().__init__(port, motors, calibration) + self.protocol_version = protocol_version + self._assert_same_protocol() + import scservo_sdk as scs + + self.port_handler = scs.PortHandler(self.port) + # HACK: monkeypatch + self.port_handler.setPacketTimeout = patch_setPacketTimeout.__get__( + self.port_handler, scs.PortHandler + ) + self.packet_handler = scs.PacketHandler(protocol_version) + self.sync_reader = scs.GroupSyncRead(self.port_handler, self.packet_handler, 0, 0) + self.sync_writer = scs.GroupSyncWrite(self.port_handler, self.packet_handler, 0, 0) + self._comm_success = scs.COMM_SUCCESS + self._no_error = 0x00 + + if any(MODEL_PROTOCOL[model] != self.protocol_version for model in self.models): + raise ValueError(f"Some motors are incompatible with protocol_version={self.protocol_version}") + + def _assert_same_protocol(self) -> None: + if any(MODEL_PROTOCOL[model] != self.protocol_version for model in self.models): + raise RuntimeError("Some motors use an incompatible protocol.") + + def _assert_protocol_is_compatible(self, instruction_name: str) -> None: + if instruction_name == "sync_read" and self.protocol_version == 1: + raise NotImplementedError( + "'Sync Read' is not available with Feetech motors using Protocol 1. Use 'Read' sequentially instead." + ) + if instruction_name == "broadcast_ping" and self.protocol_version == 1: + raise NotImplementedError( + "'Broadcast Ping' is not available with Feetech motors using Protocol 1. Use 'Ping' sequentially instead." + ) + + def _assert_same_firmware(self) -> None: + firmware_versions = self._read_firmware_version(self.ids, raise_on_error=True) + if len(set(firmware_versions.values())) != 1: + raise RuntimeError( + "Some Motors use different firmware versions:" + f"\n{pformat(firmware_versions)}\n" + "Update their firmware first using Feetech's software. " + "Visit https://www.feetechrc.com/software." + ) + + def _handshake(self) -> None: + self._assert_motors_exist() + self._assert_same_firmware() + + def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]: + if self.protocol_version == 0: + return self._find_single_motor_p0(motor, initial_baudrate) + else: + return self._find_single_motor_p1(motor, initial_baudrate) + + def _find_single_motor_p0(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]: + model = self.motors[motor].model + search_baudrates = ( + [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model] + ) + expected_model_nb = self.model_number_table[model] + + for baudrate in search_baudrates: + self.set_baudrate(baudrate) + id_model = self.broadcast_ping() + if id_model: + found_id, found_model = next(iter(id_model.items())) + if found_model != expected_model_nb: + raise RuntimeError( + f"Found one motor on {baudrate=} with id={found_id} but it has a " + f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. " + f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')." + ) + return baudrate, found_id + + raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.") + + def _find_single_motor_p1(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]: + import scservo_sdk as scs + + model = self.motors[motor].model + search_baudrates = ( + [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model] + ) + expected_model_nb = self.model_number_table[model] + + for baudrate in search_baudrates: + self.set_baudrate(baudrate) + for id_ in range(scs.MAX_ID + 1): + found_model = self.ping(id_) + if found_model is not None: + if found_model != expected_model_nb: + raise RuntimeError( + f"Found one motor on {baudrate=} with id={id_} but it has a " + f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. " + f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')." + ) + return baudrate, id_ + + raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.") + + def configure_motors(self, return_delay_time=0, maximum_acceleration=254, acceleration=254) -> None: + for motor in self.motors: + # By default, Feetech motors have a 500µs delay response time (corresponding to a value of 250 on + # the 'Return_Delay_Time' address). We ensure this is reduced to the minimum of 2µs (value of 0). + self.write("Return_Delay_Time", motor, return_delay_time) + # Set 'Maximum_Acceleration' to 254 to speedup acceleration and deceleration of the motors. + if self.protocol_version == 0: + self.write("Maximum_Acceleration", motor, maximum_acceleration) + self.write("Acceleration", motor, acceleration) + + @property + def is_calibrated(self) -> bool: + motors_calibration = self.read_calibration() + if set(motors_calibration) != set(self.calibration): + return False + + same_ranges = all( + self.calibration[motor].range_min == cal.range_min + and self.calibration[motor].range_max == cal.range_max + for motor, cal in motors_calibration.items() + ) + if self.protocol_version == 1: + return same_ranges + + same_offsets = all( + self.calibration[motor].homing_offset == cal.homing_offset + for motor, cal in motors_calibration.items() + ) + return same_ranges and same_offsets + + def read_calibration(self) -> dict[str, MotorCalibration]: + offsets, mins, maxes = {}, {}, {} + for motor in self.motors: + mins[motor] = self.read("Min_Position_Limit", motor, normalize=False) + maxes[motor] = self.read("Max_Position_Limit", motor, normalize=False) + offsets[motor] = ( + self.read("Homing_Offset", motor, normalize=False) if self.protocol_version == 0 else 0 + ) + + calibration = {} + for motor, m in self.motors.items(): + calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=0, + homing_offset=offsets[motor], + range_min=mins[motor], + range_max=maxes[motor], + ) + + return calibration + + def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None: + for motor, calibration in calibration_dict.items(): + if self.protocol_version == 0: + self.write("Homing_Offset", motor, calibration.homing_offset) + self.write("Min_Position_Limit", motor, calibration.range_min) + self.write("Max_Position_Limit", motor, calibration.range_max) + + if cache: + self.calibration = calibration_dict + + def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]: + """ + On Feetech Motors: + Present_Position = Actual_Position - Homing_Offset + """ + half_turn_homings = {} + for motor, pos in positions.items(): + model = self._get_motor_model(motor) + max_res = self.model_resolution_table[model] - 1 + half_turn_homings[motor] = pos - int(max_res / 2) + + return half_turn_homings + + def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + for motor in self._get_motors_list(motors): + self.write("Torque_Enable", motor, TorqueMode.DISABLED.value, num_retry=num_retry) + self.write("Lock", motor, 0, num_retry=num_retry) + + def _disable_torque(self, motor_id: int, model: str, num_retry: int = 0) -> None: + addr, length = get_address(self.model_ctrl_table, model, "Torque_Enable") + self._write(addr, length, motor_id, TorqueMode.DISABLED.value, num_retry=num_retry) + addr, length = get_address(self.model_ctrl_table, model, "Lock") + self._write(addr, length, motor_id, 0, num_retry=num_retry) + + def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + for motor in self._get_motors_list(motors): + self.write("Torque_Enable", motor, TorqueMode.ENABLED.value, num_retry=num_retry) + self.write("Lock", motor, 1, num_retry=num_retry) + + def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + for id_ in ids_values: + model = self._id_to_model(id_) + encoding_table = self.model_encoding_table.get(model) + if encoding_table and data_name in encoding_table: + sign_bit = encoding_table[data_name] + ids_values[id_] = encode_sign_magnitude(ids_values[id_], sign_bit) + + return ids_values + + def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + for id_ in ids_values: + model = self._id_to_model(id_) + encoding_table = self.model_encoding_table.get(model) + if encoding_table and data_name in encoding_table: + sign_bit = encoding_table[data_name] + ids_values[id_] = decode_sign_magnitude(ids_values[id_], sign_bit) + + return ids_values + + def _split_into_byte_chunks(self, value: int, length: int) -> list[int]: + return _split_into_byte_chunks(value, length) + + def _broadcast_ping(self) -> tuple[dict[int, int], int]: + import scservo_sdk as scs + + data_list = {} + + status_length = 6 + + rx_length = 0 + wait_length = status_length * scs.MAX_ID + + txpacket = [0] * 6 + + tx_time_per_byte = (1000.0 / self.port_handler.getBaudRate()) * 10.0 + + txpacket[scs.PKT_ID] = scs.BROADCAST_ID + txpacket[scs.PKT_LENGTH] = 2 + txpacket[scs.PKT_INSTRUCTION] = scs.INST_PING + + result = self.packet_handler.txPacket(self.port_handler, txpacket) + if result != scs.COMM_SUCCESS: + self.port_handler.is_using = False + return data_list, result + + # set rx timeout + self.port_handler.setPacketTimeoutMillis((wait_length * tx_time_per_byte) + (3.0 * scs.MAX_ID) + 16.0) + + rxpacket = [] + while not self.port_handler.isPacketTimeout() and rx_length < wait_length: + rxpacket += self.port_handler.readPort(wait_length - rx_length) + rx_length = len(rxpacket) + + self.port_handler.is_using = False + + if rx_length == 0: + return data_list, scs.COMM_RX_TIMEOUT + + while True: + if rx_length < status_length: + return data_list, scs.COMM_RX_CORRUPT + + # find packet header + for idx in range(0, (rx_length - 1)): + if (rxpacket[idx] == 0xFF) and (rxpacket[idx + 1] == 0xFF): + break + + if idx == 0: # found at the beginning of the packet + # calculate checksum + checksum = 0 + for idx in range(2, status_length - 1): # except header & checksum + checksum += rxpacket[idx] + + checksum = ~checksum & 0xFF + if rxpacket[status_length - 1] == checksum: + result = scs.COMM_SUCCESS + data_list[rxpacket[scs.PKT_ID]] = rxpacket[scs.PKT_ERROR] + + del rxpacket[0:status_length] + rx_length = rx_length - status_length + + if rx_length == 0: + return data_list, result + else: + result = scs.COMM_RX_CORRUPT + # remove header (0xFF 0xFF) + del rxpacket[0:2] + rx_length = rx_length - 2 + else: + # remove unnecessary packets + del rxpacket[0:idx] + rx_length = rx_length - idx + + def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None: + self._assert_protocol_is_compatible("broadcast_ping") + for n_try in range(1 + num_retry): + ids_status, comm = self._broadcast_ping() + if self._is_comm_success(comm): + break + logger.debug(f"Broadcast ping failed on port '{self.port}' ({n_try=})") + logger.debug(self.packet_handler.getTxRxResult(comm)) + + if not self._is_comm_success(comm): + if raise_on_error: + raise ConnectionError(self.packet_handler.getTxRxResult(comm)) + return + + ids_errors = {id_: status for id_, status in ids_status.items() if self._is_error(status)} + if ids_errors: + display_dict = {id_: self.packet_handler.getRxPacketError(err) for id_, err in ids_errors.items()} + logger.error(f"Some motors found returned an error status:\n{pformat(display_dict, indent=4)}") + + return self._read_model_number(list(ids_status), raise_on_error) + + def _read_firmware_version(self, motor_ids: list[int], raise_on_error: bool = False) -> dict[int, str]: + firmware_versions = {} + for id_ in motor_ids: + firm_ver_major, comm, error = self._read( + *FIRMWARE_MAJOR_VERSION, id_, raise_on_error=raise_on_error + ) + if not self._is_comm_success(comm) or self._is_error(error): + continue + + firm_ver_minor, comm, error = self._read( + *FIRMWARE_MINOR_VERSION, id_, raise_on_error=raise_on_error + ) + if not self._is_comm_success(comm) or self._is_error(error): + continue + + firmware_versions[id_] = f"{firm_ver_major}.{firm_ver_minor}" + + return firmware_versions + + def _read_model_number(self, motor_ids: list[int], raise_on_error: bool = False) -> dict[int, int]: + model_numbers = {} + for id_ in motor_ids: + model_nb, comm, error = self._read(*MODEL_NUMBER, id_, raise_on_error=raise_on_error) + if not self._is_comm_success(comm) or self._is_error(error): + continue + + model_numbers[id_] = model_nb + + return model_numbers diff --git a/lerobot/src/lerobot/motors/feetech/tables.py b/lerobot/src/lerobot/motors/feetech/tables.py new file mode 100644 index 0000000000000000000000000000000000000000..e26d24226275d0330254ca4b1ab028d7b7bfa850 --- /dev/null +++ b/lerobot/src/lerobot/motors/feetech/tables.py @@ -0,0 +1,256 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FIRMWARE_MAJOR_VERSION = (0, 1) +FIRMWARE_MINOR_VERSION = (1, 1) +MODEL_NUMBER = (3, 2) + +# TODO(Steven): Consider doing the following: +# from enum import Enum +# class MyControlTableKey(Enum): +# ID = "ID" +# GOAL_SPEED = "Goal_Speed" +# ... +# +# MY_CONTROL_TABLE ={ +# MyControlTableKey.ID.value: (5,1) +# MyControlTableKey.GOAL_SPEED.value: (46, 2) +# ... +# } +# This allows me do to: +# bus.write(MyControlTableKey.GOAL_SPEED, ...) +# Instead of: +# bus.write("Goal_Speed", ...) +# This is important for two reasons: +# 1. The linter will tell me if I'm trying to use an invalid key, instead of me realizing when I get the RunTimeError +# 2. We can change the value of the MyControlTableKey enums without impacting the client code + +# data_name: (address, size_byte) +# http://doc.feetech.cn/#/prodinfodownload?srcType=FT-SMS-STS-emanual-229f4476422d4059abfb1cb0 +STS_SMS_SERIES_CONTROL_TABLE = { + # EPROM + "Firmware_Major_Version": FIRMWARE_MAJOR_VERSION, # read-only + "Firmware_Minor_Version": FIRMWARE_MINOR_VERSION, # read-only + "Model_Number": MODEL_NUMBER, # read-only + "ID": (5, 1), + "Baud_Rate": (6, 1), + "Return_Delay_Time": (7, 1), + "Response_Status_Level": (8, 1), + "Min_Position_Limit": (9, 2), + "Max_Position_Limit": (11, 2), + "Max_Temperature_Limit": (13, 1), + "Max_Voltage_Limit": (14, 1), + "Min_Voltage_Limit": (15, 1), + "Max_Torque_Limit": (16, 2), + "Phase": (18, 1), + "Unloading_Condition": (19, 1), + "LED_Alarm_Condition": (20, 1), + "P_Coefficient": (21, 1), + "D_Coefficient": (22, 1), + "I_Coefficient": (23, 1), + "Minimum_Startup_Force": (24, 2), + "CW_Dead_Zone": (26, 1), + "CCW_Dead_Zone": (27, 1), + "Protection_Current": (28, 2), + "Angular_Resolution": (30, 1), + "Homing_Offset": (31, 2), + "Operating_Mode": (33, 1), + "Protective_Torque": (34, 1), + "Protection_Time": (35, 1), + "Overload_Torque": (36, 1), + "Velocity_closed_loop_P_proportional_coefficient": (37, 1), + "Over_Current_Protection_Time": (38, 1), + "Velocity_closed_loop_I_integral_coefficient": (39, 1), + # SRAM + "Torque_Enable": (40, 1), + "Acceleration": (41, 1), + "Goal_Position": (42, 2), + "Goal_Time": (44, 2), + "Goal_Velocity": (46, 2), + "Torque_Limit": (48, 2), + "Lock": (55, 1), + "Present_Position": (56, 2), # read-only + "Present_Velocity": (58, 2), # read-only + "Present_Load": (60, 2), # read-only + "Present_Voltage": (62, 1), # read-only + "Present_Temperature": (63, 1), # read-only + "Status": (65, 1), # read-only + "Moving": (66, 1), # read-only + "Present_Current": (69, 2), # read-only + "Goal_Position_2": (71, 2), # read-only + # Factory + "Moving_Velocity": (80, 1), + "Moving_Velocity_Threshold": (80, 1), + "DTs": (81, 1), # (ms) + "Velocity_Unit_factor": (82, 1), + "Hts": (83, 1), # (ns) valid for firmware >= 2.54, other versions keep 0 + "Maximum_Velocity_Limit": (84, 1), + "Maximum_Acceleration": (85, 1), + "Acceleration_Multiplier ": (86, 1), # Acceleration multiplier in effect when acceleration is 0 +} + +# http://doc.feetech.cn/#/prodinfodownload?srcType=FT-SCSCL-emanual-cbcc8ab2e3384282a01d4bf3 +SCS_SERIES_CONTROL_TABLE = { + # EPROM + "Firmware_Major_Version": FIRMWARE_MAJOR_VERSION, # read-only + "Firmware_Minor_Version": FIRMWARE_MINOR_VERSION, # read-only + "Model_Number": MODEL_NUMBER, # read-only + "ID": (5, 1), + "Baud_Rate": (6, 1), + "Return_Delay_Time": (7, 1), + "Response_Status_Level": (8, 1), + "Min_Position_Limit": (9, 2), + "Max_Position_Limit": (11, 2), + "Max_Temperature_Limit": (13, 1), + "Max_Voltage_Limit": (14, 1), + "Min_Voltage_Limit": (15, 1), + "Max_Torque_Limit": (16, 2), + "Phase": (18, 1), + "Unloading_Condition": (19, 1), + "LED_Alarm_Condition": (20, 1), + "P_Coefficient": (21, 1), + "D_Coefficient": (22, 1), + "I_Coefficient": (23, 1), + "Minimum_Startup_Force": (24, 2), + "CW_Dead_Zone": (26, 1), + "CCW_Dead_Zone": (27, 1), + "Protective_Torque": (37, 1), + "Protection_Time": (38, 1), + # SRAM + "Torque_Enable": (40, 1), + "Acceleration": (41, 1), + "Goal_Position": (42, 2), + "Running_Time": (44, 2), + "Goal_Velocity": (46, 2), + "Lock": (48, 1), + "Present_Position": (56, 2), # read-only + "Present_Velocity": (58, 2), # read-only + "Present_Load": (60, 2), # read-only + "Present_Voltage": (62, 1), # read-only + "Present_Temperature": (63, 1), # read-only + "Sync_Write_Flag": (64, 1), # read-only + "Status": (65, 1), # read-only + "Moving": (66, 1), # read-only + # Factory + "PWM_Maximum_Step": (78, 1), + "Moving_Velocity_Threshold*50": (79, 1), + "DTs": (80, 1), # (ms) + "Minimum_Velocity_Limit*50": (81, 1), + "Maximum_Velocity_Limit*50": (82, 1), + "Acceleration_2": (83, 1), # don't know what that is +} + +STS_SMS_SERIES_BAUDRATE_TABLE = { + 1_000_000: 0, + 500_000: 1, + 250_000: 2, + 128_000: 3, + 115_200: 4, + 57_600: 5, + 38_400: 6, + 19_200: 7, +} + +SCS_SERIES_BAUDRATE_TABLE = { + 1_000_000: 0, + 500_000: 1, + 250_000: 2, + 128_000: 3, + 115_200: 4, + 57_600: 5, + 38_400: 6, + 19_200: 7, +} + +MODEL_CONTROL_TABLE = { + "sts_series": STS_SMS_SERIES_CONTROL_TABLE, + "scs_series": SCS_SERIES_CONTROL_TABLE, + "sms_series": STS_SMS_SERIES_CONTROL_TABLE, + "sts3215": STS_SMS_SERIES_CONTROL_TABLE, + "sts3250": STS_SMS_SERIES_CONTROL_TABLE, + "scs0009": SCS_SERIES_CONTROL_TABLE, + "sm8512bl": STS_SMS_SERIES_CONTROL_TABLE, +} + +MODEL_RESOLUTION = { + "sts_series": 4096, + "sms_series": 4096, + "scs_series": 1024, + "sts3215": 4096, + "sts3250": 4096, + "sm8512bl": 4096, + "scs0009": 1024, +} + +MODEL_BAUDRATE_TABLE = { + "sts_series": STS_SMS_SERIES_BAUDRATE_TABLE, + "sms_series": STS_SMS_SERIES_BAUDRATE_TABLE, + "scs_series": SCS_SERIES_BAUDRATE_TABLE, + "sm8512bl": STS_SMS_SERIES_BAUDRATE_TABLE, + "sts3215": STS_SMS_SERIES_BAUDRATE_TABLE, + "sts3250": STS_SMS_SERIES_BAUDRATE_TABLE, + "scs0009": SCS_SERIES_BAUDRATE_TABLE, +} + +# Sign-Magnitude encoding bits +STS_SMS_SERIES_ENCODINGS_TABLE = { + "Homing_Offset": 11, + "Goal_Position": 15, + "Goal_Velocity": 15, + "Goal_Speed": 15, + "Present_Position": 15, + "Present_Velocity": 15, + "Present_Speed": 15, +} + +MODEL_ENCODING_TABLE = { + "sts_series": STS_SMS_SERIES_ENCODINGS_TABLE, + "sms_series": STS_SMS_SERIES_ENCODINGS_TABLE, + "scs_series": {}, + "sts3215": STS_SMS_SERIES_ENCODINGS_TABLE, + "sts3250": STS_SMS_SERIES_ENCODINGS_TABLE, + "sm8512bl": STS_SMS_SERIES_ENCODINGS_TABLE, + "scs0009": {}, +} + +SCAN_BAUDRATES = [ + 4_800, + 9_600, + 14_400, + 19_200, + 38_400, + 57_600, + 115_200, + 128_000, + 250_000, + 500_000, + 1_000_000, +] + +MODEL_NUMBER_TABLE = { + "sts3215": 777, + "sts3250": 2825, + "sm8512bl": 11272, + "scs0009": 1284, +} + +MODEL_PROTOCOL = { + "sts_series": 0, + "sms_series": 0, + "scs_series": 1, + "sts3215": 0, + "sts3250": 0, + "sm8512bl": 0, + "scs0009": 1, +} diff --git a/lerobot/src/lerobot/policies/act/README.md b/lerobot/src/lerobot/policies/act/README.md new file mode 100644 index 0000000000000000000000000000000000000000..04602009852778a28be44647b6e7ba445dae3a95 --- /dev/null +++ b/lerobot/src/lerobot/policies/act/README.md @@ -0,0 +1 @@ +../../../../docs/source/policy_act_README.md \ No newline at end of file diff --git a/lerobot/src/lerobot/policies/act/configuration_act.py b/lerobot/src/lerobot/policies/act/configuration_act.py new file mode 100644 index 0000000000000000000000000000000000000000..5c6fdf4275d6a5d40a7eebe443c8a756d3f0fc21 --- /dev/null +++ b/lerobot/src/lerobot/policies/act/configuration_act.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python + +# Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass, field + +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import NormalizationMode +from lerobot.optim.optimizers import AdamWConfig + + +@PreTrainedConfig.register_subclass("act") +@dataclass +class ACTConfig(PreTrainedConfig): + """Configuration class for the Action Chunking Transformers policy. + + Defaults are configured for training on bimanual Aloha tasks like "insertion" or "transfer". + + The parameters you will most likely need to change are the ones which depend on the environment / sensors. + Those are: `input_shapes` and 'output_shapes`. + + Notes on the inputs and outputs: + - Either: + - At least one key starting with "observation.image is required as an input. + AND/OR + - The key "observation.environment_state" is required as input. + - If there are multiple keys beginning with "observation.images." they are treated as multiple camera + views. Right now we only support all images having the same shape. + - May optionally work without an "observation.state" key for the proprioceptive robot state. + - "action" is required as an output key. + + Args: + n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the + current step and additional steps going back). + chunk_size: The size of the action prediction "chunks" in units of environment steps. + n_action_steps: The number of action steps to run in the environment for one invocation of the policy. + This should be no greater than the chunk size. For example, if the chunk size size 100, you may + set this to 50. This would mean that the model predicts 100 steps worth of actions, runs 50 in the + environment, and throws the other 50 out. + input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents + the input data name, and the value is a list indicating the dimensions of the corresponding data. + For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96], + indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't + include batch dimension or temporal dimension. + output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents + the output data name, and the value is a list indicating the dimensions of the corresponding data. + For example, "action" refers to an output shape of [14], indicating 14-dimensional actions. + Importantly, `output_shapes` doesn't include batch dimension or temporal dimension. + input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"), + and the value specifies the normalization mode to apply. The two available modes are "mean_std" + which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a + [-1, 1] range. + output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the + original scale. Note that this is also used for normalizing the training targets. + vision_backbone: Name of the torchvision resnet backbone to use for encoding images. + pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone. + `None` means no pretrained weights. + replace_final_stride_with_dilation: Whether to replace the ResNet's final 2x2 stride with a dilated + convolution. + pre_norm: Whether to use "pre-norm" in the transformer blocks. + dim_model: The transformer blocks' main hidden dimension. + n_heads: The number of heads to use in the transformer blocks' multi-head attention. + dim_feedforward: The dimension to expand the transformer's hidden dimension to in the feed-forward + layers. + feedforward_activation: The activation to use in the transformer block's feed-forward layers. + n_encoder_layers: The number of transformer layers to use for the transformer encoder. + n_decoder_layers: The number of transformer layers to use for the transformer decoder. + use_vae: Whether to use a variational objective during training. This introduces another transformer + which is used as the VAE's encoder (not to be confused with the transformer encoder - see + documentation in the policy class). + latent_dim: The VAE's latent dimension. + n_vae_encoder_layers: The number of transformer layers to use for the VAE's encoder. + temporal_ensemble_coeff: Coefficient for the exponential weighting scheme to apply for temporal + ensembling. Defaults to None which means temporal ensembling is not used. `n_action_steps` must be + 1 when using this feature, as inference needs to happen at every step to form an ensemble. For + more information on how ensembling works, please see `ACTTemporalEnsembler`. + dropout: Dropout to use in the transformer layers (see code for details). + kl_weight: The weight to use for the KL-divergence component of the loss if the variational objective + is enabled. Loss is then calculated as: `reconstruction_loss + kl_weight * kld_loss`. + """ + + # Input / output structure. + n_obs_steps: int = 1 + chunk_size: int = 100 + n_action_steps: int = 100 + + normalization_mapping: dict[str, NormalizationMode] = field( + default_factory=lambda: { + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MEAN_STD, + "ACTION": NormalizationMode.MEAN_STD, + } + ) + + # Architecture. + # Vision backbone. + vision_backbone: str = "resnet18" + pretrained_backbone_weights: str | None = "ResNet18_Weights.IMAGENET1K_V1" + replace_final_stride_with_dilation: int = False + # Transformer layers. + pre_norm: bool = False + dim_model: int = 512 + n_heads: int = 8 + dim_feedforward: int = 3200 + feedforward_activation: str = "relu" + n_encoder_layers: int = 4 + # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code + # that means only the first layer is used. Here we match the original implementation by setting this to 1. + # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. + n_decoder_layers: int = 1 + # VAE. + use_vae: bool = True + latent_dim: int = 32 + n_vae_encoder_layers: int = 4 + + # Inference. + # Note: the value used in ACT when temporal ensembling is enabled is 0.01. + temporal_ensemble_coeff: float | None = None + + # Training and loss computation. + dropout: float = 0.1 + kl_weight: float = 10.0 + + # Training preset + optimizer_lr: float = 1e-5 + optimizer_weight_decay: float = 1e-4 + optimizer_lr_backbone: float = 1e-5 + + def __post_init__(self): + super().__post_init__() + + """Input validation (not exhaustive).""" + if not self.vision_backbone.startswith("resnet"): + raise ValueError( + f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}." + ) + if self.temporal_ensemble_coeff is not None and self.n_action_steps > 1: + raise NotImplementedError( + "`n_action_steps` must be 1 when using temporal ensembling. This is " + "because the policy needs to be queried every step to compute the ensembled action." + ) + if self.n_action_steps > self.chunk_size: + raise ValueError( + f"The chunk size is the upper bound for the number of action steps per model invocation. Got " + f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`." + ) + if self.n_obs_steps != 1: + raise ValueError( + f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`" + ) + + def get_optimizer_preset(self) -> AdamWConfig: + return AdamWConfig( + lr=self.optimizer_lr, + weight_decay=self.optimizer_weight_decay, + ) + + def get_scheduler_preset(self) -> None: + return None + + def validate_features(self) -> None: + if not self.image_features and not self.env_state_feature: + raise ValueError("You must provide at least one image or the environment state among the inputs.") + + @property + def observation_delta_indices(self) -> None: + return None + + @property + def action_delta_indices(self) -> list: + return list(range(self.chunk_size)) + + @property + def reward_delta_indices(self) -> None: + return None diff --git a/lerobot/src/lerobot/policies/act/modeling_act.py b/lerobot/src/lerobot/policies/act/modeling_act.py new file mode 100644 index 0000000000000000000000000000000000000000..1c67af9caf15d5deafcdb5a0d3b5feafc5dd72e3 --- /dev/null +++ b/lerobot/src/lerobot/policies/act/modeling_act.py @@ -0,0 +1,746 @@ +#!/usr/bin/env python + +# Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Action Chunking Transformer Policy + +As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://huggingface.co/papers/2304.13705). +The majority of changes here involve removing unused code, unifying naming, and adding helpful comments. +""" + +import math +from collections import deque +from collections.abc import Callable +from itertools import chain + +import einops +import numpy as np +import torch +import torch.nn.functional as F # noqa: N812 +import torchvision +from torch import Tensor, nn +from torchvision.models._utils import IntermediateLayerGetter +from torchvision.ops.misc import FrozenBatchNorm2d + +from lerobot.policies.act.configuration_act import ACTConfig +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.utils.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE + + +class ACTPolicy(PreTrainedPolicy): + """ + Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost + Hardware (paper: https://huggingface.co/papers/2304.13705, code: https://github.com/tonyzhaozh/act) + """ + + config_class = ACTConfig + name = "act" + + def __init__( + self, + config: ACTConfig, + **kwargs, + ): + """ + Args: + config: Policy configuration class instance or None, in which case the default instantiation of + the configuration class is used. + """ + super().__init__(config) + config.validate_features() + self.config = config + + self.model = ACT(config) + + if config.temporal_ensemble_coeff is not None: + self.temporal_ensembler = ACTTemporalEnsembler(config.temporal_ensemble_coeff, config.chunk_size) + + self.reset() + + def get_optim_params(self) -> dict: + # TODO(aliberts, rcadene): As of now, lr_backbone == lr + # Should we remove this and just `return self.parameters()`? + return [ + { + "params": [ + p + for n, p in self.named_parameters() + if not n.startswith("model.backbone") and p.requires_grad + ] + }, + { + "params": [ + p + for n, p in self.named_parameters() + if n.startswith("model.backbone") and p.requires_grad + ], + "lr": self.config.optimizer_lr_backbone, + }, + ] + + def reset(self): + """This should be called whenever the environment is reset.""" + if self.config.temporal_ensemble_coeff is not None: + self.temporal_ensembler.reset() + else: + self._action_queue = deque([], maxlen=self.config.n_action_steps) + + @torch.no_grad() + def select_action(self, batch: dict[str, Tensor]) -> Tensor: + """Select a single action given environment observations. + + This method wraps `select_actions` in order to return one action at a time for execution in the + environment. It works by managing the actions in a queue and only calling `select_actions` when the + queue is empty. + """ + self.eval() # keeping the policy in eval mode as it could be set to train mode while queue is consumed + + if self.config.temporal_ensemble_coeff is not None: + actions = self.predict_action_chunk(batch) + action = self.temporal_ensembler.update(actions) + return action + + # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by + # querying the policy. + if len(self._action_queue) == 0: + actions = self.predict_action_chunk(batch)[:, : self.config.n_action_steps] + + # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue + # effectively has shape (n_action_steps, batch_size, *), hence the transpose. + self._action_queue.extend(actions.transpose(0, 1)) + return self._action_queue.popleft() + + @torch.no_grad() + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Predict a chunk of actions given environment observations.""" + self.eval() + + if self.config.image_features: + batch = dict(batch) # shallow copy so that adding a key doesn't modify the original + batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features] + + actions = self.model(batch)[0] + return actions + + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: + """Run the batch through the model and compute the loss for training or validation.""" + if self.config.image_features: + batch = dict(batch) # shallow copy so that adding a key doesn't modify the original + batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features] + + actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch) + + l1_loss = ( + F.l1_loss(batch[ACTION], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1) + ).mean() + + loss_dict = {"l1_loss": l1_loss.item()} + if self.config.use_vae: + # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for + # each dimension independently, we sum over the latent dimension to get the total + # KL-divergence per batch element, then take the mean over the batch. + # (See App. B of https://huggingface.co/papers/1312.6114 for more details). + mean_kld = ( + (-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - (log_sigma_x2_hat).exp())).sum(-1).mean() + ) + loss_dict["kld_loss"] = mean_kld.item() + loss = l1_loss + mean_kld * self.config.kl_weight + else: + loss = l1_loss + + return loss, loss_dict + + +class ACTTemporalEnsembler: + def __init__(self, temporal_ensemble_coeff: float, chunk_size: int) -> None: + """Temporal ensembling as described in Algorithm 2 of https://huggingface.co/papers/2304.13705. + + The weights are calculated as wᵢ = exp(-temporal_ensemble_coeff * i) where w₀ is the oldest action. + They are then normalized to sum to 1 by dividing by Σwᵢ. Here's some intuition around how the + coefficient works: + - Setting it to 0 uniformly weighs all actions. + - Setting it positive gives more weight to older actions. + - Setting it negative gives more weight to newer actions. + NOTE: The default value for `temporal_ensemble_coeff` used by the original ACT work is 0.01. This + results in older actions being weighed more highly than newer actions (the experiments documented in + https://github.com/huggingface/lerobot/pull/319 hint at why highly weighing new actions might be + detrimental: doing so aggressively may diminish the benefits of action chunking). + + Here we use an online method for computing the average rather than caching a history of actions in + order to compute the average offline. For a simple 1D sequence it looks something like: + + ``` + import torch + + seq = torch.linspace(8, 8.5, 100) + print(seq) + + m = 0.01 + exp_weights = torch.exp(-m * torch.arange(len(seq))) + print(exp_weights) + + # Calculate offline + avg = (exp_weights * seq).sum() / exp_weights.sum() + print("offline", avg) + + # Calculate online + for i, item in enumerate(seq): + if i == 0: + avg = item + continue + avg *= exp_weights[:i].sum() + avg += item * exp_weights[i] + avg /= exp_weights[: i + 1].sum() + print("online", avg) + ``` + """ + self.chunk_size = chunk_size + self.ensemble_weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size)) + self.ensemble_weights_cumsum = torch.cumsum(self.ensemble_weights, dim=0) + self.reset() + + def reset(self): + """Resets the online computation variables.""" + self.ensembled_actions = None + # (chunk_size,) count of how many actions are in the ensemble for each time step in the sequence. + self.ensembled_actions_count = None + + def update(self, actions: Tensor) -> Tensor: + """ + Takes a (batch, chunk_size, action_dim) sequence of actions, update the temporal ensemble for all + time steps, and pop/return the next batch of actions in the sequence. + """ + self.ensemble_weights = self.ensemble_weights.to(device=actions.device) + self.ensemble_weights_cumsum = self.ensemble_weights_cumsum.to(device=actions.device) + if self.ensembled_actions is None: + # Initializes `self._ensembled_action` to the sequence of actions predicted during the first + # time step of the episode. + self.ensembled_actions = actions.clone() + # Note: The last dimension is unsqueeze to make sure we can broadcast properly for tensor + # operations later. + self.ensembled_actions_count = torch.ones( + (self.chunk_size, 1), dtype=torch.long, device=self.ensembled_actions.device + ) + else: + # self.ensembled_actions will have shape (batch_size, chunk_size - 1, action_dim). Compute + # the online update for those entries. + self.ensembled_actions *= self.ensemble_weights_cumsum[self.ensembled_actions_count - 1] + self.ensembled_actions += actions[:, :-1] * self.ensemble_weights[self.ensembled_actions_count] + self.ensembled_actions /= self.ensemble_weights_cumsum[self.ensembled_actions_count] + self.ensembled_actions_count = torch.clamp(self.ensembled_actions_count + 1, max=self.chunk_size) + # The last action, which has no prior online average, needs to get concatenated onto the end. + self.ensembled_actions = torch.cat([self.ensembled_actions, actions[:, -1:]], dim=1) + self.ensembled_actions_count = torch.cat( + [self.ensembled_actions_count, torch.ones_like(self.ensembled_actions_count[-1:])] + ) + # "Consume" the first action. + action, self.ensembled_actions, self.ensembled_actions_count = ( + self.ensembled_actions[:, 0], + self.ensembled_actions[:, 1:], + self.ensembled_actions_count[1:], + ) + return action + + +class ACT(nn.Module): + """Action Chunking Transformer: The underlying neural network for ACTPolicy. + + Note: In this code we use the terms `vae_encoder`, 'encoder', `decoder`. The meanings are as follows. + - The `vae_encoder` is, as per the literature around variational auto-encoders (VAE), the part of the + model that encodes the target data (a sequence of actions), and the condition (the robot + joint-space). + - A transformer with an `encoder` (not the VAE encoder) and `decoder` (not the VAE decoder) with + cross-attention is used as the VAE decoder. For these terms, we drop the `vae_` prefix because we + have an option to train this model without the variational objective (in which case we drop the + `vae_encoder` altogether, and nothing about this model has anything to do with a VAE). + + Transformer + Used alone for inference + (acts as VAE decoder + during training) + ┌───────────────────────┐ + │ Outputs │ + │ ▲ │ + │ ┌─────►┌───────┐ │ + ┌──────┐ │ │ │Transf.│ │ + │ │ │ ├─────►│decoder│ │ + ┌────┴────┐ │ │ │ │ │ │ + │ │ │ │ ┌───┴───┬─►│ │ │ + │ VAE │ │ │ │ │ └───────┘ │ + │ encoder │ │ │ │Transf.│ │ + │ │ │ │ │encoder│ │ + └───▲─────┘ │ │ │ │ │ + │ │ │ └▲──▲─▲─┘ │ + │ │ │ │ │ │ │ + inputs └─────┼──┘ │ image emb. │ + │ state emb. │ + └───────────────────────┘ + """ + + def __init__(self, config: ACTConfig): + # BERT style VAE encoder with input tokens [cls, robot_state, *action_sequence]. + # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]). + super().__init__() + self.config = config + + if self.config.use_vae: + self.vae_encoder = ACTEncoder(config, is_vae_encoder=True) + self.vae_encoder_cls_embed = nn.Embedding(1, config.dim_model) + # Projection layer for joint-space configuration to hidden dimension. + if self.config.robot_state_feature: + self.vae_encoder_robot_state_input_proj = nn.Linear( + self.config.robot_state_feature.shape[0], config.dim_model + ) + # Projection layer for action (joint-space target) to hidden dimension. + self.vae_encoder_action_input_proj = nn.Linear( + self.config.action_feature.shape[0], + config.dim_model, + ) + # Projection layer from the VAE encoder's output to the latent distribution's parameter space. + self.vae_encoder_latent_output_proj = nn.Linear(config.dim_model, config.latent_dim * 2) + # Fixed sinusoidal positional embedding for the input to the VAE encoder. Unsqueeze for batch + # dimension. + num_input_token_encoder = 1 + config.chunk_size + if self.config.robot_state_feature: + num_input_token_encoder += 1 + self.register_buffer( + "vae_encoder_pos_enc", + create_sinusoidal_pos_embedding(num_input_token_encoder, config.dim_model).unsqueeze(0), + ) + + # Backbone for image feature extraction. + if self.config.image_features: + backbone_model = getattr(torchvision.models, config.vision_backbone)( + replace_stride_with_dilation=[False, False, config.replace_final_stride_with_dilation], + weights=config.pretrained_backbone_weights, + norm_layer=FrozenBatchNorm2d, + ) + # Note: The assumption here is that we are using a ResNet model (and hence layer4 is the final + # feature map). + # Note: The forward method of this returns a dict: {"feature_map": output}. + self.backbone = IntermediateLayerGetter(backbone_model, return_layers={"layer4": "feature_map"}) + + # Transformer (acts as VAE decoder when training with the variational objective). + self.encoder = ACTEncoder(config) + self.decoder = ACTDecoder(config) + + # Transformer encoder input projections. The tokens will be structured like + # [latent, (robot_state), (env_state), (image_feature_map_pixels)]. + if self.config.robot_state_feature: + self.encoder_robot_state_input_proj = nn.Linear( + self.config.robot_state_feature.shape[0], config.dim_model + ) + if self.config.env_state_feature: + self.encoder_env_state_input_proj = nn.Linear( + self.config.env_state_feature.shape[0], config.dim_model + ) + self.encoder_latent_input_proj = nn.Linear(config.latent_dim, config.dim_model) + if self.config.image_features: + self.encoder_img_feat_input_proj = nn.Conv2d( + backbone_model.fc.in_features, config.dim_model, kernel_size=1 + ) + # Transformer encoder positional embeddings. + n_1d_tokens = 1 # for the latent + if self.config.robot_state_feature: + n_1d_tokens += 1 + if self.config.env_state_feature: + n_1d_tokens += 1 + self.encoder_1d_feature_pos_embed = nn.Embedding(n_1d_tokens, config.dim_model) + if self.config.image_features: + self.encoder_cam_feat_pos_embed = ACTSinusoidalPositionEmbedding2d(config.dim_model // 2) + + # Transformer decoder. + # Learnable positional embedding for the transformer's decoder (in the style of DETR object queries). + self.decoder_pos_embed = nn.Embedding(config.chunk_size, config.dim_model) + + # Final action regression head on the output of the transformer's decoder. + self.action_head = nn.Linear(config.dim_model, self.config.action_feature.shape[0]) + + self._reset_parameters() + + def _reset_parameters(self): + """Xavier-uniform initialization of the transformer parameters as in the original code.""" + for p in chain(self.encoder.parameters(), self.decoder.parameters()): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tensor] | tuple[None, None]]: + """A forward pass through the Action Chunking Transformer (with optional VAE encoder). + + `batch` should have the following structure: + { + [robot_state_feature] (optional): (B, state_dim) batch of robot states. + + [image_features]: (B, n_cameras, C, H, W) batch of images. + AND/OR + [env_state_feature]: (B, env_dim) batch of environment states. + + [action_feature] (optional, only if training with VAE): (B, chunk_size, action dim) batch of actions. + } + + Returns: + (B, chunk_size, action_dim) batch of action sequences + Tuple containing the latent PDF's parameters (mean, log(σ²)) both as (B, L) tensors where L is the + latent dimension. + """ + if self.config.use_vae and self.training: + assert ACTION in batch, ( + "actions must be provided when using the variational objective in training mode." + ) + + batch_size = batch[OBS_IMAGES][0].shape[0] if OBS_IMAGES in batch else batch[OBS_ENV_STATE].shape[0] + + # Prepare the latent for input to the transformer encoder. + if self.config.use_vae and ACTION in batch and self.training: + # Prepare the input to the VAE encoder: [cls, *joint_space_configuration, *action_sequence]. + cls_embed = einops.repeat( + self.vae_encoder_cls_embed.weight, "1 d -> b 1 d", b=batch_size + ) # (B, 1, D) + if self.config.robot_state_feature: + robot_state_embed = self.vae_encoder_robot_state_input_proj(batch[OBS_STATE]) + robot_state_embed = robot_state_embed.unsqueeze(1) # (B, 1, D) + action_embed = self.vae_encoder_action_input_proj(batch[ACTION]) # (B, S, D) + + if self.config.robot_state_feature: + vae_encoder_input = [cls_embed, robot_state_embed, action_embed] # (B, S+2, D) + else: + vae_encoder_input = [cls_embed, action_embed] + vae_encoder_input = torch.cat(vae_encoder_input, axis=1) + + # Prepare fixed positional embedding. + # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case. + pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D) + + # Prepare key padding mask for the transformer encoder. We have 1 or 2 extra tokens at the start of the + # sequence depending whether we use the input states or not (cls and robot state) + # False means not a padding token. + cls_joint_is_pad = torch.full( + (batch_size, 2 if self.config.robot_state_feature else 1), + False, + device=batch[OBS_STATE].device, + ) + key_padding_mask = torch.cat( + [cls_joint_is_pad, batch["action_is_pad"]], axis=1 + ) # (bs, seq+1 or 2) + + # Forward pass through VAE encoder to get the latent PDF parameters. + cls_token_out = self.vae_encoder( + vae_encoder_input.permute(1, 0, 2), + pos_embed=pos_embed.permute(1, 0, 2), + key_padding_mask=key_padding_mask, + )[0] # select the class token, with shape (B, D) + latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) + mu = latent_pdf_params[:, : self.config.latent_dim] + # This is 2log(sigma). Done this way to match the original implementation. + log_sigma_x2 = latent_pdf_params[:, self.config.latent_dim :] + + # Sample the latent with the reparameterization trick. + latent_sample = mu + log_sigma_x2.div(2).exp() * torch.randn_like(mu) + else: + # When not using the VAE encoder, we set the latent to be all zeros. + mu = log_sigma_x2 = None + # TODO(rcadene, alexander-soare): remove call to `.to` to speedup forward ; precompute and use buffer + latent_sample = torch.zeros([batch_size, self.config.latent_dim], dtype=torch.float32).to( + batch[OBS_STATE].device + ) + + # Prepare transformer encoder inputs. + encoder_in_tokens = [self.encoder_latent_input_proj(latent_sample)] + encoder_in_pos_embed = list(self.encoder_1d_feature_pos_embed.weight.unsqueeze(1)) + # Robot state token. + if self.config.robot_state_feature: + encoder_in_tokens.append(self.encoder_robot_state_input_proj(batch[OBS_STATE])) + # Environment state token. + if self.config.env_state_feature: + encoder_in_tokens.append(self.encoder_env_state_input_proj(batch[OBS_ENV_STATE])) + + if self.config.image_features: + # For a list of images, the H and W may vary but H*W is constant. + # NOTE: If modifying this section, verify on MPS devices that + # gradients remain stable (no explosions or NaNs). + for img in batch[OBS_IMAGES]: + cam_features = self.backbone(img)["feature_map"] + cam_pos_embed = self.encoder_cam_feat_pos_embed(cam_features).to(dtype=cam_features.dtype) + cam_features = self.encoder_img_feat_input_proj(cam_features) + + # Rearrange features to (sequence, batch, dim). + cam_features = einops.rearrange(cam_features, "b c h w -> (h w) b c") + cam_pos_embed = einops.rearrange(cam_pos_embed, "b c h w -> (h w) b c") + + # Extend immediately instead of accumulating and concatenating + # Convert to list to extend properly + encoder_in_tokens.extend(list(cam_features)) + encoder_in_pos_embed.extend(list(cam_pos_embed)) + + # Stack all tokens along the sequence dimension. + encoder_in_tokens = torch.stack(encoder_in_tokens, axis=0) + encoder_in_pos_embed = torch.stack(encoder_in_pos_embed, axis=0) + + # Forward pass through the transformer modules. + encoder_out = self.encoder(encoder_in_tokens, pos_embed=encoder_in_pos_embed) + # TODO(rcadene, alexander-soare): remove call to `device` ; precompute and use buffer + decoder_in = torch.zeros( + (self.config.chunk_size, batch_size, self.config.dim_model), + dtype=encoder_in_pos_embed.dtype, + device=encoder_in_pos_embed.device, + ) + decoder_out = self.decoder( + decoder_in, + encoder_out, + encoder_pos_embed=encoder_in_pos_embed, + decoder_pos_embed=self.decoder_pos_embed.weight.unsqueeze(1), + ) + + # Move back to (B, S, C). + decoder_out = decoder_out.transpose(0, 1) + + actions = self.action_head(decoder_out) + + return actions, (mu, log_sigma_x2) + + +class ACTEncoder(nn.Module): + """Convenience module for running multiple encoder layers, maybe followed by normalization.""" + + def __init__(self, config: ACTConfig, is_vae_encoder: bool = False): + super().__init__() + self.is_vae_encoder = is_vae_encoder + num_layers = config.n_vae_encoder_layers if self.is_vae_encoder else config.n_encoder_layers + self.layers = nn.ModuleList([ACTEncoderLayer(config) for _ in range(num_layers)]) + self.norm = nn.LayerNorm(config.dim_model) if config.pre_norm else nn.Identity() + + def forward( + self, x: Tensor, pos_embed: Tensor | None = None, key_padding_mask: Tensor | None = None + ) -> Tensor: + for layer in self.layers: + x = layer(x, pos_embed=pos_embed, key_padding_mask=key_padding_mask) + x = self.norm(x) + return x + + +class ACTEncoderLayer(nn.Module): + def __init__(self, config: ACTConfig): + super().__init__() + self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout) + + # Feed forward layers. + self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward) + self.dropout = nn.Dropout(config.dropout) + self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model) + + self.norm1 = nn.LayerNorm(config.dim_model) + self.norm2 = nn.LayerNorm(config.dim_model) + self.dropout1 = nn.Dropout(config.dropout) + self.dropout2 = nn.Dropout(config.dropout) + + self.activation = get_activation_fn(config.feedforward_activation) + self.pre_norm = config.pre_norm + + def forward(self, x, pos_embed: Tensor | None = None, key_padding_mask: Tensor | None = None) -> Tensor: + skip = x + if self.pre_norm: + x = self.norm1(x) + q = k = x if pos_embed is None else x + pos_embed + x = self.self_attn(q, k, value=x, key_padding_mask=key_padding_mask) + x = x[0] # note: [0] to select just the output, not the attention weights + x = skip + self.dropout1(x) + if self.pre_norm: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout2(x) + if not self.pre_norm: + x = self.norm2(x) + return x + + +class ACTDecoder(nn.Module): + def __init__(self, config: ACTConfig): + """Convenience module for running multiple decoder layers followed by normalization.""" + super().__init__() + self.layers = nn.ModuleList([ACTDecoderLayer(config) for _ in range(config.n_decoder_layers)]) + self.norm = nn.LayerNorm(config.dim_model) + + def forward( + self, + x: Tensor, + encoder_out: Tensor, + decoder_pos_embed: Tensor | None = None, + encoder_pos_embed: Tensor | None = None, + ) -> Tensor: + for layer in self.layers: + x = layer( + x, encoder_out, decoder_pos_embed=decoder_pos_embed, encoder_pos_embed=encoder_pos_embed + ) + if self.norm is not None: + x = self.norm(x) + return x + + +class ACTDecoderLayer(nn.Module): + def __init__(self, config: ACTConfig): + super().__init__() + self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout) + self.multihead_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout) + + # Feed forward layers. + self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward) + self.dropout = nn.Dropout(config.dropout) + self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model) + + self.norm1 = nn.LayerNorm(config.dim_model) + self.norm2 = nn.LayerNorm(config.dim_model) + self.norm3 = nn.LayerNorm(config.dim_model) + self.dropout1 = nn.Dropout(config.dropout) + self.dropout2 = nn.Dropout(config.dropout) + self.dropout3 = nn.Dropout(config.dropout) + + self.activation = get_activation_fn(config.feedforward_activation) + self.pre_norm = config.pre_norm + + def maybe_add_pos_embed(self, tensor: Tensor, pos_embed: Tensor | None) -> Tensor: + return tensor if pos_embed is None else tensor + pos_embed + + def forward( + self, + x: Tensor, + encoder_out: Tensor, + decoder_pos_embed: Tensor | None = None, + encoder_pos_embed: Tensor | None = None, + ) -> Tensor: + """ + Args: + x: (Decoder Sequence, Batch, Channel) tensor of input tokens. + encoder_out: (Encoder Sequence, B, C) output features from the last layer of the encoder we are + cross-attending with. + encoder_pos_embed: (ES, 1, C) positional embedding for keys (from the encoder). + decoder_pos_embed: (DS, 1, C) positional embedding for the queries (from the decoder). + Returns: + (DS, B, C) tensor of decoder output features. + """ + skip = x + if self.pre_norm: + x = self.norm1(x) + q = k = self.maybe_add_pos_embed(x, decoder_pos_embed) + x = self.self_attn(q, k, value=x)[0] # select just the output, not the attention weights + x = skip + self.dropout1(x) + if self.pre_norm: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.multihead_attn( + query=self.maybe_add_pos_embed(x, decoder_pos_embed), + key=self.maybe_add_pos_embed(encoder_out, encoder_pos_embed), + value=encoder_out, + )[0] # select just the output, not the attention weights + x = skip + self.dropout2(x) + if self.pre_norm: + skip = x + x = self.norm3(x) + else: + x = self.norm2(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout3(x) + if not self.pre_norm: + x = self.norm3(x) + return x + + +def create_sinusoidal_pos_embedding(num_positions: int, dimension: int) -> Tensor: + """1D sinusoidal positional embeddings as in Attention is All You Need. + + Args: + num_positions: Number of token positions required. + Returns: (num_positions, dimension) position embeddings (the first dimension is the batch dimension). + + """ + + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / dimension) for hid_j in range(dimension)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(num_positions)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + return torch.from_numpy(sinusoid_table).float() + + +class ACTSinusoidalPositionEmbedding2d(nn.Module): + """2D sinusoidal positional embeddings similar to what's presented in Attention Is All You Need. + + The variation is that the position indices are normalized in [0, 2π] (not quite: the lower bound is 1/H + for the vertical direction, and 1/W for the horizontal direction. + """ + + def __init__(self, dimension: int): + """ + Args: + dimension: The desired dimension of the embeddings. + """ + super().__init__() + self.dimension = dimension + self._two_pi = 2 * math.pi + self._eps = 1e-6 + # Inverse "common ratio" for the geometric progression in sinusoid frequencies. + self._temperature = 10000 + + def forward(self, x: Tensor) -> Tensor: + """ + Args: + x: A (B, C, H, W) batch of 2D feature map to generate the embeddings for. + Returns: + A (1, C, H, W) batch of corresponding sinusoidal positional embeddings. + """ + not_mask = torch.ones_like(x[0, :1]) # (1, H, W) + # Note: These are like range(1, H+1) and range(1, W+1) respectively, but in most implementations + # they would be range(0, H) and range(0, W). Keeping it at as is to match the original code. + y_range = not_mask.cumsum(1, dtype=torch.float32) + x_range = not_mask.cumsum(2, dtype=torch.float32) + + # "Normalize" the position index such that it ranges in [0, 2π]. + # Note: Adding epsilon on the denominator should not be needed as all values of y_embed and x_range + # are non-zero by construction. This is an artifact of the original code. + y_range = y_range / (y_range[:, -1:, :] + self._eps) * self._two_pi + x_range = x_range / (x_range[:, :, -1:] + self._eps) * self._two_pi + + inverse_frequency = self._temperature ** ( + 2 * (torch.arange(self.dimension, dtype=torch.float32, device=x.device) // 2) / self.dimension + ) + + x_range = x_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1) + y_range = y_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1) + + # Note: this stack then flatten operation results in interleaved sine and cosine terms. + # pos_embed_x and pos_embed_y are (1, H, W, C // 2). + pos_embed_x = torch.stack((x_range[..., 0::2].sin(), x_range[..., 1::2].cos()), dim=-1).flatten(3) + pos_embed_y = torch.stack((y_range[..., 0::2].sin(), y_range[..., 1::2].cos()), dim=-1).flatten(3) + pos_embed = torch.cat((pos_embed_y, pos_embed_x), dim=3).permute(0, 3, 1, 2) # (1, C, H, W) + + return pos_embed + + +def get_activation_fn(activation: str) -> Callable: + """Return an activation function given a string.""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") diff --git a/lerobot/src/lerobot/policies/act/processor_act.py b/lerobot/src/lerobot/policies/act/processor_act.py new file mode 100644 index 0000000000000000000000000000000000000000..1dedf8a99dc60ed2bafe862646e69f77021bcf9e --- /dev/null +++ b/lerobot/src/lerobot/policies/act/processor_act.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python + +# Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any + +import torch + +from lerobot.policies.act.configuration_act import ACTConfig +from lerobot.processor import ( + AddBatchDimensionProcessorStep, + DeviceProcessorStep, + NormalizerProcessorStep, + PolicyAction, + PolicyProcessorPipeline, + RenameObservationsProcessorStep, + UnnormalizerProcessorStep, +) +from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action +from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME + + +def make_act_pre_post_processors( + config: ACTConfig, + dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None, +) -> tuple[ + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], + PolicyProcessorPipeline[PolicyAction, PolicyAction], +]: + """Creates the pre- and post-processing pipelines for the ACT policy. + + The pre-processing pipeline handles normalization, batching, and device placement for the model inputs. + The post-processing pipeline handles unnormalization and moves the model outputs back to the CPU. + + Args: + config (ACTConfig): The ACT policy configuration object. + dataset_stats (dict[str, dict[str, torch.Tensor]] | None): A dictionary containing dataset + statistics (e.g., mean and std) used for normalization. Defaults to None. + + Returns: + tuple[PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], PolicyProcessorPipeline[PolicyAction, PolicyAction]]: A tuple containing the + pre-processor pipeline and the post-processor pipeline. + """ + + input_steps = [ + RenameObservationsProcessorStep(rename_map={}), + AddBatchDimensionProcessorStep(), + DeviceProcessorStep(device=config.device), + NormalizerProcessorStep( + features={**config.input_features, **config.output_features}, + norm_map=config.normalization_mapping, + stats=dataset_stats, + device=config.device, + ), + ] + output_steps = [ + UnnormalizerProcessorStep( + features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats + ), + DeviceProcessorStep(device="cpu"), + ] + + return ( + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]]( + steps=input_steps, + name=POLICY_PREPROCESSOR_DEFAULT_NAME, + ), + PolicyProcessorPipeline[PolicyAction, PolicyAction]( + steps=output_steps, + name=POLICY_POSTPROCESSOR_DEFAULT_NAME, + to_transition=policy_action_to_transition, + to_output=transition_to_policy_action, + ), + ) diff --git a/lerobot/src/lerobot/policies/diffusion/configuration_diffusion.py b/lerobot/src/lerobot/policies/diffusion/configuration_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..0aab8040daa399926107bb14e69edddce3f2544c --- /dev/null +++ b/lerobot/src/lerobot/policies/diffusion/configuration_diffusion.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python + +# Copyright 2024 Columbia Artificial Intelligence, Robotics Lab, +# and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass, field + +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import NormalizationMode +from lerobot.optim.optimizers import AdamConfig +from lerobot.optim.schedulers import DiffuserSchedulerConfig + + +@PreTrainedConfig.register_subclass("diffusion") +@dataclass +class DiffusionConfig(PreTrainedConfig): + """Configuration class for DiffusionPolicy. + + Defaults are configured for training with PushT providing proprioceptive and single camera observations. + + The parameters you will most likely need to change are the ones which depend on the environment / sensors. + Those are: `input_shapes` and `output_shapes`. + + Notes on the inputs and outputs: + - "observation.state" is required as an input key. + - Either: + - At least one key starting with "observation.image is required as an input. + AND/OR + - The key "observation.environment_state" is required as input. + - If there are multiple keys beginning with "observation.image" they are treated as multiple camera + views. Right now we only support all images having the same shape. + - "action" is required as an output key. + + Args: + n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the + current step and additional steps going back). + horizon: Diffusion model action prediction size as detailed in `DiffusionPolicy.select_action`. + n_action_steps: The number of action steps to run in the environment for one invocation of the policy. + See `DiffusionPolicy.select_action` for more details. + input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents + the input data name, and the value is a list indicating the dimensions of the corresponding data. + For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96], + indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't + include batch dimension or temporal dimension. + output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents + the output data name, and the value is a list indicating the dimensions of the corresponding data. + For example, "action" refers to an output shape of [14], indicating 14-dimensional actions. + Importantly, `output_shapes` doesn't include batch dimension or temporal dimension. + input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"), + and the value specifies the normalization mode to apply. The two available modes are "mean_std" + which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a + [-1, 1] range. + output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the + original scale. Note that this is also used for normalizing the training targets. + vision_backbone: Name of the torchvision resnet backbone to use for encoding images. + crop_shape: (H, W) shape to crop images to as a preprocessing step for the vision backbone. Must fit + within the image size. If None, no cropping is done. + crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval + mode). + pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone. + `None` means no pretrained weights. + use_group_norm: Whether to replace batch normalization with group normalization in the backbone. + The group sizes are set to be about 16 (to be precise, feature_dim // 16). + spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax. + use_separate_rgb_encoders_per_camera: Whether to use a separate RGB encoder for each camera view. + down_dims: Feature dimension for each stage of temporal downsampling in the diffusion modeling Unet. + You may provide a variable number of dimensions, therefore also controlling the degree of + downsampling. + kernel_size: The convolutional kernel size of the diffusion modeling Unet. + n_groups: Number of groups used in the group norm of the Unet's convolutional blocks. + diffusion_step_embed_dim: The Unet is conditioned on the diffusion timestep via a small non-linear + network. This is the output dimension of that network, i.e., the embedding dimension. + use_film_scale_modulation: FiLM (https://huggingface.co/papers/1709.07871) is used for the Unet conditioning. + Bias modulation is used be default, while this parameter indicates whether to also use scale + modulation. + noise_scheduler_type: Name of the noise scheduler to use. Supported options: ["DDPM", "DDIM"]. + num_train_timesteps: Number of diffusion steps for the forward diffusion schedule. + beta_schedule: Name of the diffusion beta schedule as per DDPMScheduler from Hugging Face diffusers. + beta_start: Beta value for the first forward-diffusion step. + beta_end: Beta value for the last forward-diffusion step. + prediction_type: The type of prediction that the diffusion modeling Unet makes. Choose from "epsilon" + or "sample". These have equivalent outcomes from a latent variable modeling perspective, but + "epsilon" has been shown to work better in many deep neural network settings. + clip_sample: Whether to clip the sample to [-`clip_sample_range`, +`clip_sample_range`] for each + denoising step at inference time. WARNING: you will need to make sure your action-space is + normalized to fit within this range. + clip_sample_range: The magnitude of the clipping range as described above. + num_inference_steps: Number of reverse diffusion steps to use at inference time (steps are evenly + spaced). If not provided, this defaults to be the same as `num_train_timesteps`. + do_mask_loss_for_padding: Whether to mask the loss when there are copy-padded actions. See + `LeRobotDataset` and `load_previous_and_future_frames` for more information. Note, this defaults + to False as the original Diffusion Policy implementation does the same. + """ + + # Inputs / output structure. + n_obs_steps: int = 2 + horizon: int = 16 + n_action_steps: int = 8 + + normalization_mapping: dict[str, NormalizationMode] = field( + default_factory=lambda: { + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MIN_MAX, + "ACTION": NormalizationMode.MIN_MAX, + } + ) + + # The original implementation doesn't sample frames for the last 7 steps, + # which avoids excessive padding and leads to improved training results. + drop_n_last_frames: int = 7 # horizon - n_action_steps - n_obs_steps + 1 + + # Architecture / modeling. + # Vision backbone. + vision_backbone: str = "resnet18" + crop_shape: tuple[int, int] | None = (84, 84) + crop_is_random: bool = True + pretrained_backbone_weights: str | None = None + use_group_norm: bool = True + spatial_softmax_num_keypoints: int = 32 + use_separate_rgb_encoder_per_camera: bool = False + # Unet. + down_dims: tuple[int, ...] = (512, 1024, 2048) + kernel_size: int = 5 + n_groups: int = 8 + diffusion_step_embed_dim: int = 128 + use_film_scale_modulation: bool = True + # Noise scheduler. + noise_scheduler_type: str = "DDPM" + num_train_timesteps: int = 100 + beta_schedule: str = "squaredcos_cap_v2" + beta_start: float = 0.0001 + beta_end: float = 0.02 + prediction_type: str = "epsilon" + clip_sample: bool = True + clip_sample_range: float = 1.0 + + # Inference + num_inference_steps: int | None = None + + # Loss computation + do_mask_loss_for_padding: bool = False + + # Training presets + optimizer_lr: float = 1e-4 + optimizer_betas: tuple = (0.95, 0.999) + optimizer_eps: float = 1e-8 + optimizer_weight_decay: float = 1e-6 + scheduler_name: str = "cosine" + scheduler_warmup_steps: int = 500 + + def __post_init__(self): + super().__post_init__() + + """Input validation (not exhaustive).""" + if not self.vision_backbone.startswith("resnet"): + raise ValueError( + f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}." + ) + + supported_prediction_types = ["epsilon", "sample"] + if self.prediction_type not in supported_prediction_types: + raise ValueError( + f"`prediction_type` must be one of {supported_prediction_types}. Got {self.prediction_type}." + ) + supported_noise_schedulers = ["DDPM", "DDIM"] + if self.noise_scheduler_type not in supported_noise_schedulers: + raise ValueError( + f"`noise_scheduler_type` must be one of {supported_noise_schedulers}. " + f"Got {self.noise_scheduler_type}." + ) + + # Check that the horizon size and U-Net downsampling is compatible. + # U-Net downsamples by 2 with each stage. + downsampling_factor = 2 ** len(self.down_dims) + if self.horizon % downsampling_factor != 0: + raise ValueError( + "The horizon should be an integer multiple of the downsampling factor (which is determined " + f"by `len(down_dims)`). Got {self.horizon=} and {self.down_dims=}" + ) + + def get_optimizer_preset(self) -> AdamConfig: + return AdamConfig( + lr=self.optimizer_lr, + betas=self.optimizer_betas, + eps=self.optimizer_eps, + weight_decay=self.optimizer_weight_decay, + ) + + def get_scheduler_preset(self) -> DiffuserSchedulerConfig: + return DiffuserSchedulerConfig( + name=self.scheduler_name, + num_warmup_steps=self.scheduler_warmup_steps, + ) + + def validate_features(self) -> None: + if len(self.image_features) == 0 and self.env_state_feature is None: + raise ValueError("You must provide at least one image or the environment state among the inputs.") + + if self.crop_shape is not None: + for key, image_ft in self.image_features.items(): + if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]: + raise ValueError( + f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} " + f"for `crop_shape` and {image_ft.shape} for " + f"`{key}`." + ) + + # Check that all input images have the same shape. + if len(self.image_features) > 0: + first_image_key, first_image_ft = next(iter(self.image_features.items())) + for key, image_ft in self.image_features.items(): + if image_ft.shape != first_image_ft.shape: + raise ValueError( + f"`{key}` does not match `{first_image_key}`, but we expect all image shapes to match." + ) + + @property + def observation_delta_indices(self) -> list: + return list(range(1 - self.n_obs_steps, 1)) + + @property + def action_delta_indices(self) -> list: + return list(range(1 - self.n_obs_steps, 1 - self.n_obs_steps + self.horizon)) + + @property + def reward_delta_indices(self) -> None: + return None diff --git a/lerobot/src/lerobot/policies/diffusion/modeling_diffusion.py b/lerobot/src/lerobot/policies/diffusion/modeling_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f22ae991a9b38c4ac0e13cd024fe14c731c58a --- /dev/null +++ b/lerobot/src/lerobot/policies/diffusion/modeling_diffusion.py @@ -0,0 +1,764 @@ +#!/usr/bin/env python + +# Copyright 2024 Columbia Artificial Intelligence, Robotics Lab, +# and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion" + +TODO(alexander-soare): + - Remove reliance on diffusers for DDPMScheduler and LR scheduler. +""" + +import math +from collections import deque +from collections.abc import Callable + +import einops +import numpy as np +import torch +import torch.nn.functional as F # noqa: N812 +import torchvision +from diffusers.schedulers.scheduling_ddim import DDIMScheduler +from diffusers.schedulers.scheduling_ddpm import DDPMScheduler +from torch import Tensor, nn + +from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.utils import ( + get_device_from_parameters, + get_dtype_from_parameters, + get_output_shape, + populate_queues, +) +from lerobot.utils.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE + + +class DiffusionPolicy(PreTrainedPolicy): + """ + Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion" + (paper: https://huggingface.co/papers/2303.04137, code: https://github.com/real-stanford/diffusion_policy). + """ + + config_class = DiffusionConfig + name = "diffusion" + + def __init__( + self, + config: DiffusionConfig, + **kwargs, + ): + """ + Args: + config: Policy configuration class instance or None, in which case the default instantiation of + the configuration class is used. + dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected + that they will be passed with a call to `load_state_dict` before the policy is used. + """ + super().__init__(config) + config.validate_features() + self.config = config + + # queues are populated during rollout of the policy, they contain the n latest observations and actions + self._queues = None + + self.diffusion = DiffusionModel(config) + + self.reset() + + def get_optim_params(self) -> dict: + return self.diffusion.parameters() + + def reset(self): + """Clear observation and action queues. Should be called on `env.reset()`""" + self._queues = { + OBS_STATE: deque(maxlen=self.config.n_obs_steps), + ACTION: deque(maxlen=self.config.n_action_steps), + } + if self.config.image_features: + self._queues[OBS_IMAGES] = deque(maxlen=self.config.n_obs_steps) + if self.config.env_state_feature: + self._queues[OBS_ENV_STATE] = deque(maxlen=self.config.n_obs_steps) + + @torch.no_grad() + def predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + """Predict a chunk of actions given environment observations.""" + # stack n latest observations from the queue + batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} + actions = self.diffusion.generate_actions(batch, noise=noise) + + return actions + + @torch.no_grad() + def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + """Select a single action given environment observations. + + This method handles caching a history of observations and an action trajectory generated by the + underlying diffusion model. Here's how it works: + - `n_obs_steps` steps worth of observations are cached (for the first steps, the observation is + copied `n_obs_steps` times to fill the cache). + - The diffusion model generates `horizon` steps worth of actions. + - `n_action_steps` worth of actions are actually kept for execution, starting from the current step. + Schematically this looks like: + ---------------------------------------------------------------------------------------------- + (legend: o = n_obs_steps, h = horizon, a = n_action_steps) + |timestep | n-o+1 | n-o+2 | ..... | n | ..... | n+a-1 | n+a | ..... | n-o+h | + |observation is used | YES | YES | YES | YES | NO | NO | NO | NO | NO | + |action is generated | YES | YES | YES | YES | YES | YES | YES | YES | YES | + |action is used | NO | NO | NO | YES | YES | YES | NO | NO | NO | + ---------------------------------------------------------------------------------------------- + Note that this means we require: `n_action_steps <= horizon - n_obs_steps + 1`. Also, note that + "horizon" may not the best name to describe what the variable actually means, because this period is + actually measured from the first observation which (if `n_obs_steps` > 1) happened in the past. + """ + # NOTE: for offline evaluation, we have action in the batch, so we need to pop it out + if ACTION in batch: + batch.pop(ACTION) + + if self.config.image_features: + batch = dict(batch) # shallow copy so that adding a key doesn't modify the original + batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) + # NOTE: It's important that this happens after stacking the images into a single key. + self._queues = populate_queues(self._queues, batch) + + if len(self._queues[ACTION]) == 0: + actions = self.predict_action_chunk(batch, noise=noise) + self._queues[ACTION].extend(actions.transpose(0, 1)) + + action = self._queues[ACTION].popleft() + return action + + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, None]: + """Run the batch through the model and compute the loss for training or validation.""" + if self.config.image_features: + batch = dict(batch) # shallow copy so that adding a key doesn't modify the original + batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) + loss = self.diffusion.compute_loss(batch) + # no output_dict so returning None + return loss, None + + +def _make_noise_scheduler(name: str, **kwargs: dict) -> DDPMScheduler | DDIMScheduler: + """ + Factory for noise scheduler instances of the requested type. All kwargs are passed + to the scheduler. + """ + if name == "DDPM": + return DDPMScheduler(**kwargs) + elif name == "DDIM": + return DDIMScheduler(**kwargs) + else: + raise ValueError(f"Unsupported noise scheduler type {name}") + + +class DiffusionModel(nn.Module): + def __init__(self, config: DiffusionConfig): + super().__init__() + self.config = config + + # Build observation encoders (depending on which observations are provided). + global_cond_dim = self.config.robot_state_feature.shape[0] + if self.config.image_features: + num_images = len(self.config.image_features) + if self.config.use_separate_rgb_encoder_per_camera: + encoders = [DiffusionRgbEncoder(config) for _ in range(num_images)] + self.rgb_encoder = nn.ModuleList(encoders) + global_cond_dim += encoders[0].feature_dim * num_images + else: + self.rgb_encoder = DiffusionRgbEncoder(config) + global_cond_dim += self.rgb_encoder.feature_dim * num_images + if self.config.env_state_feature: + global_cond_dim += self.config.env_state_feature.shape[0] + + self.unet = DiffusionConditionalUnet1d(config, global_cond_dim=global_cond_dim * config.n_obs_steps) + + self.noise_scheduler = _make_noise_scheduler( + config.noise_scheduler_type, + num_train_timesteps=config.num_train_timesteps, + beta_start=config.beta_start, + beta_end=config.beta_end, + beta_schedule=config.beta_schedule, + clip_sample=config.clip_sample, + clip_sample_range=config.clip_sample_range, + prediction_type=config.prediction_type, + ) + + if config.num_inference_steps is None: + self.num_inference_steps = self.noise_scheduler.config.num_train_timesteps + else: + self.num_inference_steps = config.num_inference_steps + + # ========= inference ============ + def conditional_sample( + self, + batch_size: int, + global_cond: Tensor | None = None, + generator: torch.Generator | None = None, + noise: Tensor | None = None, + ) -> Tensor: + device = get_device_from_parameters(self) + dtype = get_dtype_from_parameters(self) + + # Sample prior. + sample = ( + noise + if noise is not None + else torch.randn( + size=(batch_size, self.config.horizon, self.config.action_feature.shape[0]), + dtype=dtype, + device=device, + generator=generator, + ) + ) + + self.noise_scheduler.set_timesteps(self.num_inference_steps) + + for t in self.noise_scheduler.timesteps: + # Predict model output. + model_output = self.unet( + sample, + torch.full(sample.shape[:1], t, dtype=torch.long, device=sample.device), + global_cond=global_cond, + ) + # Compute previous image: x_t -> x_t-1 + sample = self.noise_scheduler.step(model_output, t, sample, generator=generator).prev_sample + + return sample + + def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor: + """Encode image features and concatenate them all together along with the state vector.""" + batch_size, n_obs_steps = batch[OBS_STATE].shape[:2] + global_cond_feats = [batch[OBS_STATE]] + # Extract image features. + if self.config.image_features: + if self.config.use_separate_rgb_encoder_per_camera: + # Combine batch and sequence dims while rearranging to make the camera index dimension first. + images_per_camera = einops.rearrange(batch[OBS_IMAGES], "b s n ... -> n (b s) ...") + img_features_list = torch.cat( + [ + encoder(images) + for encoder, images in zip(self.rgb_encoder, images_per_camera, strict=True) + ] + ) + # Separate batch and sequence dims back out. The camera index dim gets absorbed into the + # feature dim (effectively concatenating the camera features). + img_features = einops.rearrange( + img_features_list, "(n b s) ... -> b s (n ...)", b=batch_size, s=n_obs_steps + ) + else: + # Combine batch, sequence, and "which camera" dims before passing to shared encoder. + img_features = self.rgb_encoder( + einops.rearrange(batch[OBS_IMAGES], "b s n ... -> (b s n) ...") + ) + # Separate batch dim and sequence dim back out. The camera index dim gets absorbed into the + # feature dim (effectively concatenating the camera features). + img_features = einops.rearrange( + img_features, "(b s n) ... -> b s (n ...)", b=batch_size, s=n_obs_steps + ) + global_cond_feats.append(img_features) + + if self.config.env_state_feature: + global_cond_feats.append(batch[OBS_ENV_STATE]) + + # Concatenate features then flatten to (B, global_cond_dim). + return torch.cat(global_cond_feats, dim=-1).flatten(start_dim=1) + + def generate_actions(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + """ + This function expects `batch` to have: + { + "observation.state": (B, n_obs_steps, state_dim) + + "observation.images": (B, n_obs_steps, num_cameras, C, H, W) + AND/OR + "observation.environment_state": (B, n_obs_steps, environment_dim) + } + """ + batch_size, n_obs_steps = batch[OBS_STATE].shape[:2] + assert n_obs_steps == self.config.n_obs_steps + + # Encode image features and concatenate them all together along with the state vector. + global_cond = self._prepare_global_conditioning(batch) # (B, global_cond_dim) + + # run sampling + actions = self.conditional_sample(batch_size, global_cond=global_cond, noise=noise) + + # Extract `n_action_steps` steps worth of actions (from the current observation). + start = n_obs_steps - 1 + end = start + self.config.n_action_steps + actions = actions[:, start:end] + + return actions + + def compute_loss(self, batch: dict[str, Tensor]) -> Tensor: + """ + This function expects `batch` to have (at least): + { + "observation.state": (B, n_obs_steps, state_dim) + + "observation.images": (B, n_obs_steps, num_cameras, C, H, W) + AND/OR + "observation.environment_state": (B, n_obs_steps, environment_dim) + + "action": (B, horizon, action_dim) + "action_is_pad": (B, horizon) + } + """ + # Input validation. + assert set(batch).issuperset({OBS_STATE, ACTION, "action_is_pad"}) + assert OBS_IMAGES in batch or OBS_ENV_STATE in batch + n_obs_steps = batch[OBS_STATE].shape[1] + horizon = batch[ACTION].shape[1] + assert horizon == self.config.horizon + assert n_obs_steps == self.config.n_obs_steps + + # Encode image features and concatenate them all together along with the state vector. + global_cond = self._prepare_global_conditioning(batch) # (B, global_cond_dim) + + # Forward diffusion. + trajectory = batch[ACTION] + # Sample noise to add to the trajectory. + eps = torch.randn(trajectory.shape, device=trajectory.device) + # Sample a random noising timestep for each item in the batch. + timesteps = torch.randint( + low=0, + high=self.noise_scheduler.config.num_train_timesteps, + size=(trajectory.shape[0],), + device=trajectory.device, + ).long() + # Add noise to the clean trajectories according to the noise magnitude at each timestep. + noisy_trajectory = self.noise_scheduler.add_noise(trajectory, eps, timesteps) + + # Run the denoising network (that might denoise the trajectory, or attempt to predict the noise). + pred = self.unet(noisy_trajectory, timesteps, global_cond=global_cond) + + # Compute the loss. + # The target is either the original trajectory, or the noise. + if self.config.prediction_type == "epsilon": + target = eps + elif self.config.prediction_type == "sample": + target = batch[ACTION] + else: + raise ValueError(f"Unsupported prediction type {self.config.prediction_type}") + + loss = F.mse_loss(pred, target, reduction="none") + + # Mask loss wherever the action is padded with copies (edges of the dataset trajectory). + if self.config.do_mask_loss_for_padding: + if "action_is_pad" not in batch: + raise ValueError( + "You need to provide 'action_is_pad' in the batch when " + f"{self.config.do_mask_loss_for_padding=}." + ) + in_episode_bound = ~batch["action_is_pad"] + loss = loss * in_episode_bound.unsqueeze(-1) + + return loss.mean() + + +class SpatialSoftmax(nn.Module): + """ + Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al. + (https://huggingface.co/papers/1509.06113). A minimal port of the robomimic implementation. + + At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass" + of activations of each channel, i.e., keypoints in the image space for the policy to focus on. + + Example: take feature maps of size (512x10x12). We generate a grid of normalized coordinates (10x12x2): + ----------------------------------------------------- + | (-1., -1.) | (-0.82, -1.) | ... | (1., -1.) | + | (-1., -0.78) | (-0.82, -0.78) | ... | (1., -0.78) | + | ... | ... | ... | ... | + | (-1., 1.) | (-0.82, 1.) | ... | (1., 1.) | + ----------------------------------------------------- + This is achieved by applying channel-wise softmax over the activations (512x120) and computing the dot + product with the coordinates (120x2) to get expected points of maximal activation (512x2). + + The example above results in 512 keypoints (corresponding to the 512 input channels). We can optionally + provide num_kp != None to control the number of keypoints. This is achieved by a first applying a learnable + linear mapping (in_channels, H, W) -> (num_kp, H, W). + """ + + def __init__(self, input_shape, num_kp=None): + """ + Args: + input_shape (list): (C, H, W) input feature map shape. + num_kp (int): number of keypoints in output. If None, output will have the same number of channels as input. + """ + super().__init__() + + assert len(input_shape) == 3 + self._in_c, self._in_h, self._in_w = input_shape + + if num_kp is not None: + self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1) + self._out_c = num_kp + else: + self.nets = None + self._out_c = self._in_c + + # we could use torch.linspace directly but that seems to behave slightly differently than numpy + # and causes a small degradation in pc_success of pre-trained models. + pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h)) + pos_x = torch.from_numpy(pos_x.reshape(self._in_h * self._in_w, 1)).float() + pos_y = torch.from_numpy(pos_y.reshape(self._in_h * self._in_w, 1)).float() + # register as buffer so it's moved to the correct device. + self.register_buffer("pos_grid", torch.cat([pos_x, pos_y], dim=1)) + + def forward(self, features: Tensor) -> Tensor: + """ + Args: + features: (B, C, H, W) input feature maps. + Returns: + (B, K, 2) image-space coordinates of keypoints. + """ + if self.nets is not None: + features = self.nets(features) + + # [B, K, H, W] -> [B * K, H * W] where K is number of keypoints + features = features.reshape(-1, self._in_h * self._in_w) + # 2d softmax normalization + attention = F.softmax(features, dim=-1) + # [B * K, H * W] x [H * W, 2] -> [B * K, 2] for spatial coordinate mean in x and y dimensions + expected_xy = attention @ self.pos_grid + # reshape to [B, K, 2] + feature_keypoints = expected_xy.view(-1, self._out_c, 2) + + return feature_keypoints + + +class DiffusionRgbEncoder(nn.Module): + """Encodes an RGB image into a 1D feature vector. + + Includes the ability to normalize and crop the image first. + """ + + def __init__(self, config: DiffusionConfig): + super().__init__() + # Set up optional preprocessing. + if config.crop_shape is not None: + self.do_crop = True + # Always use center crop for eval + self.center_crop = torchvision.transforms.CenterCrop(config.crop_shape) + if config.crop_is_random: + self.maybe_random_crop = torchvision.transforms.RandomCrop(config.crop_shape) + else: + self.maybe_random_crop = self.center_crop + else: + self.do_crop = False + + # Set up backbone. + backbone_model = getattr(torchvision.models, config.vision_backbone)( + weights=config.pretrained_backbone_weights + ) + # Note: This assumes that the layer4 feature map is children()[-3] + # TODO(alexander-soare): Use a safer alternative. + self.backbone = nn.Sequential(*(list(backbone_model.children())[:-2])) + if config.use_group_norm: + if config.pretrained_backbone_weights: + raise ValueError( + "You can't replace BatchNorm in a pretrained model without ruining the weights!" + ) + self.backbone = _replace_submodules( + root_module=self.backbone, + predicate=lambda x: isinstance(x, nn.BatchNorm2d), + func=lambda x: nn.GroupNorm(num_groups=x.num_features // 16, num_channels=x.num_features), + ) + + # Set up pooling and final layers. + # Use a dry run to get the feature map shape. + # The dummy input should take the number of image channels from `config.image_features` and it should + # use the height and width from `config.crop_shape` if it is provided, otherwise it should use the + # height and width from `config.image_features`. + + # Note: we have a check in the config class to make sure all images have the same shape. + images_shape = next(iter(config.image_features.values())).shape + dummy_shape_h_w = config.crop_shape if config.crop_shape is not None else images_shape[1:] + dummy_shape = (1, images_shape[0], *dummy_shape_h_w) + feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:] + + self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) + self.feature_dim = config.spatial_softmax_num_keypoints * 2 + self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim) + self.relu = nn.ReLU() + + def forward(self, x: Tensor) -> Tensor: + """ + Args: + x: (B, C, H, W) image tensor with pixel values in [0, 1]. + Returns: + (B, D) image feature. + """ + # Preprocess: maybe crop (if it was set up in the __init__). + if self.do_crop: + if self.training: # noqa: SIM108 + x = self.maybe_random_crop(x) + else: + # Always use center crop for eval. + x = self.center_crop(x) + # Extract backbone feature. + x = torch.flatten(self.pool(self.backbone(x)), start_dim=1) + # Final linear layer with non-linearity. + x = self.relu(self.out(x)) + return x + + +def _replace_submodules( + root_module: nn.Module, predicate: Callable[[nn.Module], bool], func: Callable[[nn.Module], nn.Module] +) -> nn.Module: + """ + Args: + root_module: The module for which the submodules need to be replaced + predicate: Takes a module as an argument and must return True if the that module is to be replaced. + func: Takes a module as an argument and returns a new module to replace it with. + Returns: + The root module with its submodules replaced. + """ + if predicate(root_module): + return func(root_module) + + replace_list = [k.split(".") for k, m in root_module.named_modules(remove_duplicate=True) if predicate(m)] + for *parents, k in replace_list: + parent_module = root_module + if len(parents) > 0: + parent_module = root_module.get_submodule(".".join(parents)) + if isinstance(parent_module, nn.Sequential): + src_module = parent_module[int(k)] + else: + src_module = getattr(parent_module, k) + tgt_module = func(src_module) + if isinstance(parent_module, nn.Sequential): + parent_module[int(k)] = tgt_module + else: + setattr(parent_module, k, tgt_module) + # verify that all BN are replaced + assert not any(predicate(m) for _, m in root_module.named_modules(remove_duplicate=True)) + return root_module + + +class DiffusionSinusoidalPosEmb(nn.Module): + """1D sinusoidal positional embeddings as in Attention is All You Need.""" + + def __init__(self, dim: int): + super().__init__() + self.dim = dim + + def forward(self, x: Tensor) -> Tensor: + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x.unsqueeze(-1) * emb.unsqueeze(0) + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class DiffusionConv1dBlock(nn.Module): + """Conv1d --> GroupNorm --> Mish""" + + def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): + super().__init__() + + self.block = nn.Sequential( + nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), + nn.GroupNorm(n_groups, out_channels), + nn.Mish(), + ) + + def forward(self, x): + return self.block(x) + + +class DiffusionConditionalUnet1d(nn.Module): + """A 1D convolutional UNet with FiLM modulation for conditioning. + + Note: this removes local conditioning as compared to the original diffusion policy code. + """ + + def __init__(self, config: DiffusionConfig, global_cond_dim: int): + super().__init__() + + self.config = config + + # Encoder for the diffusion timestep. + self.diffusion_step_encoder = nn.Sequential( + DiffusionSinusoidalPosEmb(config.diffusion_step_embed_dim), + nn.Linear(config.diffusion_step_embed_dim, config.diffusion_step_embed_dim * 4), + nn.Mish(), + nn.Linear(config.diffusion_step_embed_dim * 4, config.diffusion_step_embed_dim), + ) + + # The FiLM conditioning dimension. + cond_dim = config.diffusion_step_embed_dim + global_cond_dim + + # In channels / out channels for each downsampling block in the Unet's encoder. For the decoder, we + # just reverse these. + in_out = [(config.action_feature.shape[0], config.down_dims[0])] + list( + zip(config.down_dims[:-1], config.down_dims[1:], strict=True) + ) + + # Unet encoder. + common_res_block_kwargs = { + "cond_dim": cond_dim, + "kernel_size": config.kernel_size, + "n_groups": config.n_groups, + "use_film_scale_modulation": config.use_film_scale_modulation, + } + self.down_modules = nn.ModuleList([]) + for ind, (dim_in, dim_out) in enumerate(in_out): + is_last = ind >= (len(in_out) - 1) + self.down_modules.append( + nn.ModuleList( + [ + DiffusionConditionalResidualBlock1d(dim_in, dim_out, **common_res_block_kwargs), + DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs), + # Downsample as long as it is not the last block. + nn.Conv1d(dim_out, dim_out, 3, 2, 1) if not is_last else nn.Identity(), + ] + ) + ) + + # Processing in the middle of the auto-encoder. + self.mid_modules = nn.ModuleList( + [ + DiffusionConditionalResidualBlock1d( + config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs + ), + DiffusionConditionalResidualBlock1d( + config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs + ), + ] + ) + + # Unet decoder. + self.up_modules = nn.ModuleList([]) + for ind, (dim_out, dim_in) in enumerate(reversed(in_out[1:])): + is_last = ind >= (len(in_out) - 1) + self.up_modules.append( + nn.ModuleList( + [ + # dim_in * 2, because it takes the encoder's skip connection as well + DiffusionConditionalResidualBlock1d(dim_in * 2, dim_out, **common_res_block_kwargs), + DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs), + # Upsample as long as it is not the last block. + nn.ConvTranspose1d(dim_out, dim_out, 4, 2, 1) if not is_last else nn.Identity(), + ] + ) + ) + + self.final_conv = nn.Sequential( + DiffusionConv1dBlock(config.down_dims[0], config.down_dims[0], kernel_size=config.kernel_size), + nn.Conv1d(config.down_dims[0], config.action_feature.shape[0], 1), + ) + + def forward(self, x: Tensor, timestep: Tensor | int, global_cond=None) -> Tensor: + """ + Args: + x: (B, T, input_dim) tensor for input to the Unet. + timestep: (B,) tensor of (timestep_we_are_denoising_from - 1). + global_cond: (B, global_cond_dim) + output: (B, T, input_dim) + Returns: + (B, T, input_dim) diffusion model prediction. + """ + # For 1D convolutions we'll need feature dimension first. + x = einops.rearrange(x, "b t d -> b d t") + + timesteps_embed = self.diffusion_step_encoder(timestep) + + # If there is a global conditioning feature, concatenate it to the timestep embedding. + if global_cond is not None: + global_feature = torch.cat([timesteps_embed, global_cond], axis=-1) + else: + global_feature = timesteps_embed + + # Run encoder, keeping track of skip features to pass to the decoder. + encoder_skip_features: list[Tensor] = [] + for resnet, resnet2, downsample in self.down_modules: + x = resnet(x, global_feature) + x = resnet2(x, global_feature) + encoder_skip_features.append(x) + x = downsample(x) + + for mid_module in self.mid_modules: + x = mid_module(x, global_feature) + + # Run decoder, using the skip features from the encoder. + for resnet, resnet2, upsample in self.up_modules: + x = torch.cat((x, encoder_skip_features.pop()), dim=1) + x = resnet(x, global_feature) + x = resnet2(x, global_feature) + x = upsample(x) + + x = self.final_conv(x) + + x = einops.rearrange(x, "b d t -> b t d") + return x + + +class DiffusionConditionalResidualBlock1d(nn.Module): + """ResNet style 1D convolutional block with FiLM modulation for conditioning.""" + + def __init__( + self, + in_channels: int, + out_channels: int, + cond_dim: int, + kernel_size: int = 3, + n_groups: int = 8, + # Set to True to do scale modulation with FiLM as well as bias modulation (defaults to False meaning + # FiLM just modulates bias). + use_film_scale_modulation: bool = False, + ): + super().__init__() + + self.use_film_scale_modulation = use_film_scale_modulation + self.out_channels = out_channels + + self.conv1 = DiffusionConv1dBlock(in_channels, out_channels, kernel_size, n_groups=n_groups) + + # FiLM modulation (https://huggingface.co/papers/1709.07871) outputs per-channel bias and (maybe) scale. + cond_channels = out_channels * 2 if use_film_scale_modulation else out_channels + self.cond_encoder = nn.Sequential(nn.Mish(), nn.Linear(cond_dim, cond_channels)) + + self.conv2 = DiffusionConv1dBlock(out_channels, out_channels, kernel_size, n_groups=n_groups) + + # A final convolution for dimension matching the residual (if needed). + self.residual_conv = ( + nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity() + ) + + def forward(self, x: Tensor, cond: Tensor) -> Tensor: + """ + Args: + x: (B, in_channels, T) + cond: (B, cond_dim) + Returns: + (B, out_channels, T) + """ + out = self.conv1(x) + + # Get condition embedding. Unsqueeze for broadcasting to `out`, resulting in (B, out_channels, 1). + cond_embed = self.cond_encoder(cond).unsqueeze(-1) + if self.use_film_scale_modulation: + # Treat the embedding as a list of scales and biases. + scale = cond_embed[:, : self.out_channels] + bias = cond_embed[:, self.out_channels :] + out = scale * out + bias + else: + # Treat the embedding as biases. + out = out + cond_embed + + out = self.conv2(out) + out = out + self.residual_conv(x) + return out diff --git a/lerobot/src/lerobot/robots/lekiwi/__init__.py b/lerobot/src/lerobot/robots/lekiwi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c4f4724cfdb1a3893a227c0fe470f90beea52ed7 --- /dev/null +++ b/lerobot/src/lerobot/robots/lekiwi/__init__.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config_lekiwi import LeKiwiClientConfig, LeKiwiConfig +from .lekiwi import LeKiwi +from .lekiwi_client import LeKiwiClient diff --git a/lerobot/src/lerobot/robots/lekiwi/lekiwi.py b/lerobot/src/lerobot/robots/lekiwi/lekiwi.py new file mode 100644 index 0000000000000000000000000000000000000000..6345d6b06d9440c546b85716a987b9f7f78b407a --- /dev/null +++ b/lerobot/src/lerobot/robots/lekiwi/lekiwi.py @@ -0,0 +1,417 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +from functools import cached_property +from itertools import chain +from typing import Any + +import numpy as np + +from lerobot.cameras.utils import make_cameras_from_configs +from lerobot.motors import Motor, MotorCalibration, MotorNormMode +from lerobot.motors.feetech import ( + FeetechMotorsBus, + OperatingMode, +) +from lerobot.processor import RobotAction, RobotObservation +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected + +from ..robot import Robot +from ..utils import ensure_safe_goal_position +from .config_lekiwi import LeKiwiConfig + +logger = logging.getLogger(__name__) + + +class LeKiwi(Robot): + """ + The robot includes a three omniwheel mobile base and a remote follower arm. + The leader arm is connected locally (on the laptop) and its joint positions are recorded and then + forwarded to the remote follower arm (after applying a safety clamp). + In parallel, keyboard teleoperation is used to generate raw velocity commands for the wheels. + """ + + config_class = LeKiwiConfig + name = "lekiwi" + + def __init__(self, config: LeKiwiConfig): + super().__init__(config) + self.config = config + norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100 + self.bus = FeetechMotorsBus( + port=self.config.port, + motors={ + # arm + "arm_shoulder_pan": Motor(1, "sts3215", norm_mode_body), + "arm_shoulder_lift": Motor(2, "sts3215", norm_mode_body), + "arm_elbow_flex": Motor(3, "sts3215", norm_mode_body), + "arm_wrist_flex": Motor(4, "sts3215", norm_mode_body), + "arm_wrist_roll": Motor(5, "sts3215", norm_mode_body), + "arm_gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100), + # base + "base_left_wheel": Motor(7, "sts3215", MotorNormMode.RANGE_M100_100), + "base_back_wheel": Motor(8, "sts3215", MotorNormMode.RANGE_M100_100), + "base_right_wheel": Motor(9, "sts3215", MotorNormMode.RANGE_M100_100), + }, + calibration=self.calibration, + ) + self.arm_motors = [motor for motor in self.bus.motors if motor.startswith("arm")] + self.base_motors = [motor for motor in self.bus.motors if motor.startswith("base")] + self.cameras = make_cameras_from_configs(config.cameras) + + @property + def _state_ft(self) -> dict[str, type]: + return dict.fromkeys( + ( + "arm_shoulder_pan.pos", + "arm_shoulder_lift.pos", + "arm_elbow_flex.pos", + "arm_wrist_flex.pos", + "arm_wrist_roll.pos", + "arm_gripper.pos", + "x.vel", + "y.vel", + "theta.vel", + ), + float, + ) + + @property + def _cameras_ft(self) -> dict[str, tuple]: + return { + cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras + } + + @cached_property + def observation_features(self) -> dict[str, type | tuple]: + return {**self._state_ft, **self._cameras_ft} + + @cached_property + def action_features(self) -> dict[str, type]: + return self._state_ft + + @property + def is_connected(self) -> bool: + return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) + + @check_if_already_connected + def connect(self, calibrate: bool = True) -> None: + self.bus.connect() + if not self.is_calibrated and calibrate: + logger.info( + "Mismatch between calibration values in the motor and the calibration file or no calibration file found" + ) + self.calibrate() + + for cam in self.cameras.values(): + cam.connect() + + self.configure() + logger.info(f"{self} connected.") + + @property + def is_calibrated(self) -> bool: + return self.bus.is_calibrated + + def calibrate(self) -> None: + if self.calibration: + # Calibration file exists, ask user whether to use it or run new calibration + user_input = input( + f"Press ENTER to use provided calibration file associated with the id {self.id}, or type 'c' and press ENTER to run calibration: " + ) + if user_input.strip().lower() != "c": + logger.info(f"Writing calibration file associated with the id {self.id} to the motors") + self.bus.write_calibration(self.calibration) + return + logger.info(f"\nRunning calibration of {self}") + + motors = self.arm_motors + self.base_motors + + self.bus.disable_torque(self.arm_motors) + for name in self.arm_motors: + self.bus.write("Operating_Mode", name, OperatingMode.POSITION.value) + + input("Move robot to the middle of its range of motion and press ENTER....") + homing_offsets = self.bus.set_half_turn_homings(self.arm_motors) + + homing_offsets.update(dict.fromkeys(self.base_motors, 0)) + + full_turn_motor = [ + motor for motor in motors if any(keyword in motor for keyword in ["wheel", "wrist_roll"]) + ] + unknown_range_motors = [motor for motor in motors if motor not in full_turn_motor] + + print( + f"Move all arm joints except '{full_turn_motor}' sequentially through their " + "entire ranges of motion.\nRecording positions. Press ENTER to stop..." + ) + range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors) + for name in full_turn_motor: + range_mins[name] = 0 + range_maxes[name] = 4095 + + self.calibration = {} + for name, motor in self.bus.motors.items(): + self.calibration[name] = MotorCalibration( + id=motor.id, + drive_mode=0, + homing_offset=homing_offsets[name], + range_min=range_mins[name], + range_max=range_maxes[name], + ) + + self.bus.write_calibration(self.calibration) + self._save_calibration() + print("Calibration saved to", self.calibration_fpath) + + def configure(self): + # Set-up arm actuators (position mode) + # We assume that at connection time, arm is in a rest position, + # and torque can be safely disabled to run calibration. + self.bus.disable_torque() + self.bus.configure_motors() + for name in self.arm_motors: + self.bus.write("Operating_Mode", name, OperatingMode.POSITION.value) + # Set P_Coefficient to lower value to avoid shakiness (Default is 32) + self.bus.write("P_Coefficient", name, 16) + # Set I_Coefficient and D_Coefficient to default value 0 and 32 + self.bus.write("I_Coefficient", name, 0) + self.bus.write("D_Coefficient", name, 32) + + for name in self.base_motors: + self.bus.write("Operating_Mode", name, OperatingMode.VELOCITY.value) + + self.bus.enable_torque() + + def setup_motors(self) -> None: + for motor in chain(reversed(self.arm_motors), reversed(self.base_motors)): + input(f"Connect the controller board to the '{motor}' motor only and press enter.") + self.bus.setup_motor(motor) + print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") + + @staticmethod + def _degps_to_raw(degps: float) -> int: + steps_per_deg = 4096.0 / 360.0 + speed_in_steps = degps * steps_per_deg + speed_int = int(round(speed_in_steps)) + # Cap the value to fit within signed 16-bit range (-32768 to 32767) + if speed_int > 0x7FFF: + speed_int = 0x7FFF # 32767 -> maximum positive value + elif speed_int < -0x8000: + speed_int = -0x8000 # -32768 -> minimum negative value + return speed_int + + @staticmethod + def _raw_to_degps(raw_speed: int) -> float: + steps_per_deg = 4096.0 / 360.0 + magnitude = raw_speed + degps = magnitude / steps_per_deg + return degps + + def _body_to_wheel_raw( + self, + x: float, + y: float, + theta: float, + wheel_radius: float = 0.05, + base_radius: float = 0.125, + max_raw: int = 3000, + ) -> dict: + """ + Convert desired body-frame velocities into wheel raw commands. + + Parameters: + x_cmd : Linear velocity in x (m/s). + y_cmd : Linear velocity in y (m/s). + theta_cmd : Rotational velocity (deg/s). + wheel_radius: Radius of each wheel (meters). + base_radius : Distance from the center of rotation to each wheel (meters). + max_raw : Maximum allowed raw command (ticks) per wheel. + + Returns: + A dictionary with wheel raw commands: + {"base_left_wheel": value, "base_back_wheel": value, "base_right_wheel": value}. + + Notes: + - Internally, the method converts theta_cmd to rad/s for the kinematics. + - The raw command is computed from the wheels angular speed in deg/s + using _degps_to_raw(). If any command exceeds max_raw, all commands + are scaled down proportionally. + """ + # Convert rotational velocity from deg/s to rad/s. + theta_rad = theta * (np.pi / 180.0) + # Create the body velocity vector [x, y, theta_rad]. + velocity_vector = np.array([x, y, theta_rad]) + + # Define the wheel mounting angles with a -90° offset. + angles = np.radians(np.array([240, 0, 120]) - 90) + # Build the kinematic matrix: each row maps body velocities to a wheel’s linear speed. + # The third column (base_radius) accounts for the effect of rotation. + m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles]) + + # Compute each wheel’s linear speed (m/s) and then its angular speed (rad/s). + wheel_linear_speeds = m.dot(velocity_vector) + wheel_angular_speeds = wheel_linear_speeds / wheel_radius + + # Convert wheel angular speeds from rad/s to deg/s. + wheel_degps = wheel_angular_speeds * (180.0 / np.pi) + + # Scaling + steps_per_deg = 4096.0 / 360.0 + raw_floats = [abs(degps) * steps_per_deg for degps in wheel_degps] + max_raw_computed = max(raw_floats) + if max_raw_computed > max_raw: + scale = max_raw / max_raw_computed + wheel_degps = wheel_degps * scale + + # Convert each wheel’s angular speed (deg/s) to a raw integer. + wheel_raw = [self._degps_to_raw(deg) for deg in wheel_degps] + + return { + "base_left_wheel": wheel_raw[0], + "base_back_wheel": wheel_raw[1], + "base_right_wheel": wheel_raw[2], + } + + def _wheel_raw_to_body( + self, + left_wheel_speed, + back_wheel_speed, + right_wheel_speed, + wheel_radius: float = 0.05, + base_radius: float = 0.125, + ) -> dict[str, Any]: + """ + Convert wheel raw command feedback back into body-frame velocities. + + Parameters: + wheel_raw : Vector with raw wheel commands ("base_left_wheel", "base_back_wheel", "base_right_wheel"). + wheel_radius: Radius of each wheel (meters). + base_radius : Distance from the robot center to each wheel (meters). + + Returns: + A dict (x.vel, y.vel, theta.vel) all in m/s + """ + + # Convert each raw command back to an angular speed in deg/s. + wheel_degps = np.array( + [ + self._raw_to_degps(left_wheel_speed), + self._raw_to_degps(back_wheel_speed), + self._raw_to_degps(right_wheel_speed), + ] + ) + + # Convert from deg/s to rad/s. + wheel_radps = wheel_degps * (np.pi / 180.0) + # Compute each wheel’s linear speed (m/s) from its angular speed. + wheel_linear_speeds = wheel_radps * wheel_radius + + # Define the wheel mounting angles with a -90° offset. + angles = np.radians(np.array([240, 0, 120]) - 90) + m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles]) + + # Solve the inverse kinematics: body_velocity = M⁻¹ · wheel_linear_speeds. + m_inv = np.linalg.inv(m) + velocity_vector = m_inv.dot(wheel_linear_speeds) + x, y, theta_rad = velocity_vector + theta = theta_rad * (180.0 / np.pi) + return { + "x.vel": x, + "y.vel": y, + "theta.vel": theta, + } # m/s and deg/s + + @check_if_not_connected + def get_observation(self) -> RobotObservation: + # Read actuators position for arm and vel for base + start = time.perf_counter() + arm_pos = self.bus.sync_read("Present_Position", self.arm_motors) + base_wheel_vel = self.bus.sync_read("Present_Velocity", self.base_motors) + + base_vel = self._wheel_raw_to_body( + base_wheel_vel["base_left_wheel"], + base_wheel_vel["base_back_wheel"], + base_wheel_vel["base_right_wheel"], + ) + + arm_state = {f"{k}.pos": v for k, v in arm_pos.items()} + + obs_dict = {**arm_state, **base_vel} + + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read state: {dt_ms:.1f}ms") + + # Capture images from cameras + for cam_key, cam in self.cameras.items(): + start = time.perf_counter() + obs_dict[cam_key] = cam.async_read() + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") + + return obs_dict + + @check_if_not_connected + def send_action(self, action: RobotAction) -> RobotAction: + """Command lekiwi to move to a target joint configuration. + + The relative action magnitude may be clipped depending on the configuration parameter + `max_relative_target`. In this case, the action sent differs from original action. + Thus, this function always returns the action actually sent. + + Raises: + RobotDeviceNotConnectedError: if robot is not connected. + + Returns: + RobotAction: the action sent to the motors, potentially clipped. + """ + + arm_goal_pos = {k: v for k, v in action.items() if k.endswith(".pos")} + base_goal_vel = {k: v for k, v in action.items() if k.endswith(".vel")} + + base_wheel_goal_vel = self._body_to_wheel_raw( + base_goal_vel["x.vel"], base_goal_vel["y.vel"], base_goal_vel["theta.vel"] + ) + + # Cap goal position when too far away from present position. + # /!\ Slower fps expected due to reading from the follower. + if self.config.max_relative_target is not None: + present_pos = self.bus.sync_read("Present_Position", self.arm_motors) + goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in arm_goal_pos.items()} + arm_safe_goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target) + arm_goal_pos = arm_safe_goal_pos + + # Send goal position to the actuators + arm_goal_pos_raw = {k.replace(".pos", ""): v for k, v in arm_goal_pos.items()} + self.bus.sync_write("Goal_Position", arm_goal_pos_raw) + self.bus.sync_write("Goal_Velocity", base_wheel_goal_vel) + + return {**arm_goal_pos, **base_goal_vel} + + def stop_base(self): + self.bus.sync_write("Goal_Velocity", dict.fromkeys(self.base_motors, 0), num_retry=5) + logger.info("Base motors stopped") + + @check_if_not_connected + def disconnect(self): + self.stop_base() + self.bus.disconnect(self.config.disable_torque_on_disconnect) + for cam in self.cameras.values(): + cam.disconnect() + + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/robots/lekiwi/lekiwi_client.py b/lerobot/src/lerobot/robots/lekiwi/lekiwi_client.py new file mode 100644 index 0000000000000000000000000000000000000000..4305602de7f8575d41331c40a4e77cb1c608f046 --- /dev/null +++ b/lerobot/src/lerobot/robots/lekiwi/lekiwi_client.py @@ -0,0 +1,335 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(aliberts, Steven, Pepijn): use gRPC calls instead of zmq? + +import base64 +import json +import logging +from functools import cached_property + +import cv2 +import numpy as np + +from lerobot.processor import RobotAction, RobotObservation +from lerobot.utils.constants import ACTION, OBS_STATE +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected +from lerobot.utils.errors import DeviceNotConnectedError + +from ..robot import Robot +from .config_lekiwi import LeKiwiClientConfig + + +class LeKiwiClient(Robot): + config_class = LeKiwiClientConfig + name = "lekiwi_client" + + def __init__(self, config: LeKiwiClientConfig): + import zmq + + self._zmq = zmq + super().__init__(config) + self.config = config + self.id = config.id + self.robot_type = config.type + + self.remote_ip = config.remote_ip + self.port_zmq_cmd = config.port_zmq_cmd + self.port_zmq_observations = config.port_zmq_observations + + self.teleop_keys = config.teleop_keys + + self.polling_timeout_ms = config.polling_timeout_ms + self.connect_timeout_s = config.connect_timeout_s + + self.zmq_context = None + self.zmq_cmd_socket = None + self.zmq_observation_socket = None + + self.last_frames = {} + + self.last_remote_state = {} + + # Define three speed levels and a current index + self.speed_levels = [ + {"xy": 0.1, "theta": 30}, # slow + {"xy": 0.2, "theta": 60}, # medium + {"xy": 0.3, "theta": 90}, # fast + ] + self.speed_index = 0 # Start at slow + + self._is_connected = False + self.logs = {} + + @cached_property + def _state_ft(self) -> dict[str, type]: + return dict.fromkeys( + ( + "arm_shoulder_pan.pos", + "arm_shoulder_lift.pos", + "arm_elbow_flex.pos", + "arm_wrist_flex.pos", + "arm_wrist_roll.pos", + "arm_gripper.pos", + "x.vel", + "y.vel", + "theta.vel", + ), + float, + ) + + @cached_property + def _state_order(self) -> tuple[str, ...]: + return tuple(self._state_ft.keys()) + + @cached_property + def _cameras_ft(self) -> dict[str, tuple[int, int, int]]: + return {name: (cfg.height, cfg.width, 3) for name, cfg in self.config.cameras.items()} + + @cached_property + def observation_features(self) -> dict[str, type | tuple]: + return {**self._state_ft, **self._cameras_ft} + + @cached_property + def action_features(self) -> dict[str, type]: + return self._state_ft + + @property + def is_connected(self) -> bool: + return self._is_connected + + @property + def is_calibrated(self) -> bool: + pass + + @check_if_already_connected + def connect(self) -> None: + """Establishes ZMQ sockets with the remote mobile robot""" + + zmq = self._zmq + self.zmq_context = zmq.Context() + self.zmq_cmd_socket = self.zmq_context.socket(zmq.PUSH) + zmq_cmd_locator = f"tcp://{self.remote_ip}:{self.port_zmq_cmd}" + self.zmq_cmd_socket.connect(zmq_cmd_locator) + self.zmq_cmd_socket.setsockopt(zmq.CONFLATE, 1) + + self.zmq_observation_socket = self.zmq_context.socket(zmq.PULL) + zmq_observations_locator = f"tcp://{self.remote_ip}:{self.port_zmq_observations}" + self.zmq_observation_socket.connect(zmq_observations_locator) + self.zmq_observation_socket.setsockopt(zmq.CONFLATE, 1) + + poller = zmq.Poller() + poller.register(self.zmq_observation_socket, zmq.POLLIN) + socks = dict(poller.poll(self.connect_timeout_s * 1000)) + if self.zmq_observation_socket not in socks or socks[self.zmq_observation_socket] != zmq.POLLIN: + raise DeviceNotConnectedError("Timeout waiting for LeKiwi Host to connect expired.") + + self._is_connected = True + + def calibrate(self) -> None: + pass + + def _poll_and_get_latest_message(self) -> str | None: + """Polls the ZMQ socket for a limited time and returns the latest message string.""" + zmq = self._zmq + poller = zmq.Poller() + poller.register(self.zmq_observation_socket, zmq.POLLIN) + + try: + socks = dict(poller.poll(self.polling_timeout_ms)) + except zmq.ZMQError as e: + logging.error(f"ZMQ polling error: {e}") + return None + + if self.zmq_observation_socket not in socks: + logging.info("No new data available within timeout.") + return None + + last_msg = None + while True: + try: + msg = self.zmq_observation_socket.recv_string(zmq.NOBLOCK) + last_msg = msg + except zmq.Again: + break + + if last_msg is None: + logging.warning("Poller indicated data, but failed to retrieve message.") + + return last_msg + + def _parse_observation_json(self, obs_string: str) -> RobotObservation | None: + """Parses the JSON observation string.""" + try: + return json.loads(obs_string) + except json.JSONDecodeError as e: + logging.error(f"Error decoding JSON observation: {e}") + return None + + def _decode_image_from_b64(self, image_b64: str) -> np.ndarray | None: + """Decodes a base64 encoded image string to an OpenCV image.""" + if not image_b64: + return None + try: + jpg_data = base64.b64decode(image_b64) + np_arr = np.frombuffer(jpg_data, dtype=np.uint8) + frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) + if frame is None: + logging.warning("cv2.imdecode returned None for an image.") + return frame + except (TypeError, ValueError) as e: + logging.error(f"Error decoding base64 image data: {e}") + return None + + def _remote_state_from_obs( + self, observation: RobotObservation + ) -> tuple[dict[str, np.ndarray], RobotObservation]: + """Extracts frames, and state from the parsed observation.""" + + flat_state = {key: observation.get(key, 0.0) for key in self._state_order} + + state_vec = np.array([flat_state[key] for key in self._state_order], dtype=np.float32) + + obs_dict: RobotObservation = {**flat_state, OBS_STATE: state_vec} + + # Decode images + current_frames: dict[str, np.ndarray] = {} + for cam_name, image_b64 in observation.items(): + if cam_name not in self._cameras_ft: + continue + frame = self._decode_image_from_b64(image_b64) + if frame is not None: + current_frames[cam_name] = frame + + return current_frames, obs_dict + + def _get_data(self) -> tuple[dict[str, np.ndarray], RobotObservation]: + """ + Polls the video socket for the latest observation data. + + Attempts to retrieve and decode the latest message within a short timeout. + If successful, updates and returns the new frames, speed, and arm state. + If no new data arrives or decoding fails, returns the last known values. + """ + + # 1. Get the latest message string from the socket + latest_message_str = self._poll_and_get_latest_message() + + # 2. If no message, return cached data + if latest_message_str is None: + return self.last_frames, self.last_remote_state + + # 3. Parse the JSON message + observation = self._parse_observation_json(latest_message_str) + + # 4. If JSON parsing failed, return cached data + if observation is None: + return self.last_frames, self.last_remote_state + + # 5. Process the valid observation data + try: + new_frames, new_state = self._remote_state_from_obs(observation) + except Exception as e: + logging.error(f"Error processing observation data, serving last observation: {e}") + return self.last_frames, self.last_remote_state + + self.last_frames = new_frames + self.last_remote_state = new_state + + return new_frames, new_state + + @check_if_not_connected + def get_observation(self) -> RobotObservation: + """ + Capture observations from the remote robot: current follower arm positions, + present wheel speeds (converted to body-frame velocities: x, y, theta), + and a camera frame. Receives over ZMQ, translate to body-frame vel + """ + + frames, obs_dict = self._get_data() + + # Loop over each configured camera + for cam_name, frame in frames.items(): + if frame is None: + logging.warning("Frame is None") + frame = np.zeros((640, 480, 3), dtype=np.uint8) + obs_dict[cam_name] = frame + + return obs_dict + + def _from_keyboard_to_base_action(self, pressed_keys: np.ndarray): + # Speed control + if self.teleop_keys["speed_up"] in pressed_keys: + self.speed_index = min(self.speed_index + 1, 2) + if self.teleop_keys["speed_down"] in pressed_keys: + self.speed_index = max(self.speed_index - 1, 0) + speed_setting = self.speed_levels[self.speed_index] + xy_speed = speed_setting["xy"] # e.g. 0.1, 0.25, or 0.4 + theta_speed = speed_setting["theta"] # e.g. 30, 60, or 90 + + x_cmd = 0.0 # m/s forward/backward + y_cmd = 0.0 # m/s lateral + theta_cmd = 0.0 # deg/s rotation + + if self.teleop_keys["forward"] in pressed_keys: + x_cmd += xy_speed + if self.teleop_keys["backward"] in pressed_keys: + x_cmd -= xy_speed + if self.teleop_keys["left"] in pressed_keys: + y_cmd += xy_speed + if self.teleop_keys["right"] in pressed_keys: + y_cmd -= xy_speed + if self.teleop_keys["rotate_left"] in pressed_keys: + theta_cmd += theta_speed + if self.teleop_keys["rotate_right"] in pressed_keys: + theta_cmd -= theta_speed + return { + "x.vel": x_cmd, + "y.vel": y_cmd, + "theta.vel": theta_cmd, + } + + def configure(self): + pass + + @check_if_not_connected + def send_action(self, action: RobotAction) -> RobotAction: + """Command lekiwi to move to a target joint configuration. Translates to motor space + sends over ZMQ + + Args: + action (RobotAction): array containing the goal positions for the motors. + Raises: + RobotDeviceNotConnectedError: if robot is not connected. + + Returns: + np.ndarray: the action sent to the motors, potentially clipped. + """ + + self.zmq_cmd_socket.send_string(json.dumps(action)) # action is in motor space + + # TODO(Steven): Remove the np conversion when it is possible to record a non-numpy array value + actions = np.array([action.get(k, 0.0) for k in self._state_order], dtype=np.float32) + + action_sent = {key: actions[i] for i, key in enumerate(self._state_order)} + action_sent[ACTION] = actions + return action_sent + + @check_if_not_connected + def disconnect(self): + """Cleans ZMQ comms""" + + self.zmq_observation_socket.close() + self.zmq_cmd_socket.close() + self.zmq_context.term() + self._is_connected = False diff --git a/lerobot/src/lerobot/robots/lekiwi/lekiwi_host.py b/lerobot/src/lerobot/robots/lekiwi/lekiwi_host.py new file mode 100644 index 0000000000000000000000000000000000000000..741005a8867c47fc55563dffe5edba86654cc8fb --- /dev/null +++ b/lerobot/src/lerobot/robots/lekiwi/lekiwi_host.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import json +import logging +import time +from dataclasses import dataclass, field + +import cv2 +import draccus +import zmq + +from .config_lekiwi import LeKiwiConfig, LeKiwiHostConfig +from .lekiwi import LeKiwi + + +@dataclass +class LeKiwiServerConfig: + """Configuration for the LeKiwi host script.""" + + robot: LeKiwiConfig = field(default_factory=LeKiwiConfig) + host: LeKiwiHostConfig = field(default_factory=LeKiwiHostConfig) + + +class LeKiwiHost: + def __init__(self, config: LeKiwiHostConfig): + self.zmq_context = zmq.Context() + self.zmq_cmd_socket = self.zmq_context.socket(zmq.PULL) + self.zmq_cmd_socket.setsockopt(zmq.CONFLATE, 1) + self.zmq_cmd_socket.bind(f"tcp://*:{config.port_zmq_cmd}") + + self.zmq_observation_socket = self.zmq_context.socket(zmq.PUSH) + self.zmq_observation_socket.setsockopt(zmq.CONFLATE, 1) + self.zmq_observation_socket.bind(f"tcp://*:{config.port_zmq_observations}") + + self.connection_time_s = config.connection_time_s + self.watchdog_timeout_ms = config.watchdog_timeout_ms + self.max_loop_freq_hz = config.max_loop_freq_hz + + def disconnect(self): + self.zmq_observation_socket.close() + self.zmq_cmd_socket.close() + self.zmq_context.term() + + +@draccus.wrap() +def main(cfg: LeKiwiServerConfig): + logging.info("Configuring LeKiwi") + robot = LeKiwi(cfg.robot) + + logging.info("Connecting LeKiwi") + robot.connect() + + logging.info("Starting HostAgent") + host = LeKiwiHost(cfg.host) + + last_cmd_time = time.time() + watchdog_active = False + logging.info("Waiting for commands...") + try: + # Business logic + start = time.perf_counter() + duration = 0 + while duration < host.connection_time_s: + loop_start_time = time.time() + try: + msg = host.zmq_cmd_socket.recv_string(zmq.NOBLOCK) + data = dict(json.loads(msg)) + _action_sent = robot.send_action(data) + last_cmd_time = time.time() + watchdog_active = False + except zmq.Again: + if not watchdog_active: + logging.warning("No command available") + except Exception as e: + logging.error("Message fetching failed: %s", e) + + now = time.time() + if (now - last_cmd_time > host.watchdog_timeout_ms / 1000) and not watchdog_active: + logging.warning( + f"Command not received for more than {host.watchdog_timeout_ms} milliseconds. Stopping the base." + ) + watchdog_active = True + robot.stop_base() + + last_observation = robot.get_observation() + + # Encode ndarrays to base64 strings + for cam_key, _ in robot.cameras.items(): + ret, buffer = cv2.imencode( + ".jpg", last_observation[cam_key], [int(cv2.IMWRITE_JPEG_QUALITY), 90] + ) + if ret: + last_observation[cam_key] = base64.b64encode(buffer).decode("utf-8") + else: + last_observation[cam_key] = "" + + # Send the observation to the remote agent + try: + host.zmq_observation_socket.send_string(json.dumps(last_observation), flags=zmq.NOBLOCK) + except zmq.Again: + logging.info("Dropping observation, no client connected") + + # Ensure a short sleep to avoid overloading the CPU. + elapsed = time.time() - loop_start_time + + time.sleep(max(1 / host.max_loop_freq_hz - elapsed, 0)) + duration = time.perf_counter() - start + print("Cycle time reached.") + + except KeyboardInterrupt: + print("Keyboard interrupt received. Exiting...") + finally: + print("Shutting down Lekiwi Host.") + robot.disconnect() + host.disconnect() + + logging.info("Finished LeKiwi cleanly") + + +if __name__ == "__main__": + main() diff --git a/lerobot/src/lerobot/robots/omx_follower/__init__.py b/lerobot/src/lerobot/robots/omx_follower/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..86a431cfcbeb91b5b255d952cc6bffcf64c3f753 --- /dev/null +++ b/lerobot/src/lerobot/robots/omx_follower/__init__.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# OMX is a fully open-source robot from ROBOTIS. +# More information at: https://ai.robotis.com/omx/introduction_omx.html + +from .config_omx_follower import OmxFollowerConfig +from .omx_follower import OmxFollower diff --git a/lerobot/src/lerobot/robots/omx_follower/config_omx_follower.py b/lerobot/src/lerobot/robots/omx_follower/config_omx_follower.py new file mode 100644 index 0000000000000000000000000000000000000000..f64788fbdb0f4fdfb6001dd6b106d14c990349d0 --- /dev/null +++ b/lerobot/src/lerobot/robots/omx_follower/config_omx_follower.py @@ -0,0 +1,39 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field + +from lerobot.cameras import CameraConfig + +from ..config import RobotConfig + + +@RobotConfig.register_subclass("omx_follower") +@dataclass +class OmxFollowerConfig(RobotConfig): + # Port to connect to the arm + port: str + + disable_torque_on_disconnect: bool = True + + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a dictionary that maps motor + # names to the max_relative_target value for that motor. + max_relative_target: float | dict[str, float] | None = None + + # cameras + cameras: dict[str, CameraConfig] = field(default_factory=dict) + + # Set to `True` for backward compatibility with previous policies/dataset + use_degrees: bool = False diff --git a/lerobot/src/lerobot/robots/omx_follower/omx_follower.py b/lerobot/src/lerobot/robots/omx_follower/omx_follower.py new file mode 100644 index 0000000000000000000000000000000000000000..ca34997b7731f458632971e71a8ef6e6aab87ee2 --- /dev/null +++ b/lerobot/src/lerobot/robots/omx_follower/omx_follower.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +from functools import cached_property + +from lerobot.cameras.utils import make_cameras_from_configs +from lerobot.motors import Motor, MotorCalibration, MotorNormMode +from lerobot.motors.dynamixel import ( + DriveMode, + DynamixelMotorsBus, + OperatingMode, +) +from lerobot.processor import RobotAction, RobotObservation +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected + +from ..robot import Robot +from ..utils import ensure_safe_goal_position +from .config_omx_follower import OmxFollowerConfig + +logger = logging.getLogger(__name__) + + +class OmxFollower(Robot): + """ + - [OMX](https://github.com/ROBOTIS-GIT/open_manipulator), + expansion, developed by Woojin Wie and Junha Cha from [ROBOTIS](https://ai.robotis.com/) + """ + + config_class = OmxFollowerConfig + name = "omx_follower" + + def __init__(self, config: OmxFollowerConfig): + super().__init__(config) + self.config = config + norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100 + self.bus = DynamixelMotorsBus( + port=self.config.port, + motors={ + "shoulder_pan": Motor(11, "xl430-w250", norm_mode_body), + "shoulder_lift": Motor(12, "xl430-w250", norm_mode_body), + "elbow_flex": Motor(13, "xl430-w250", norm_mode_body), + "wrist_flex": Motor(14, "xl330-m288", norm_mode_body), + "wrist_roll": Motor(15, "xl330-m288", norm_mode_body), + "gripper": Motor(16, "xl330-m288", MotorNormMode.RANGE_0_100), + }, + calibration=self.calibration, + ) + self.cameras = make_cameras_from_configs(config.cameras) + + @property + def _motors_ft(self) -> dict[str, type]: + return {f"{motor}.pos": float for motor in self.bus.motors} + + @property + def _cameras_ft(self) -> dict[str, tuple]: + return { + cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras + } + + @cached_property + def observation_features(self) -> dict[str, type | tuple]: + return {**self._motors_ft, **self._cameras_ft} + + @cached_property + def action_features(self) -> dict[str, type]: + return self._motors_ft + + @property + def is_connected(self) -> bool: + return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) + + @check_if_already_connected + def connect(self, calibrate: bool = True) -> None: + """ + For OMX robots that come pre-calibrated: + - If default calibration from package doesn't match motors, read from motors and save + - This allows using pre-calibrated robots without manual calibration + - If no calibration file exists, use factory default values (homing_offset=0, range_min=0, range_max=4095) + """ + + self.bus.connect() + if not self.is_calibrated and calibrate: + logger.info( + "Mismatch between calibration values in the motor and the calibration file or no calibration file found" + ) + self.calibrate() + + for cam in self.cameras.values(): + cam.connect() + + self.configure() + logger.info(f"{self} connected.") + + @property + def is_calibrated(self) -> bool: + return self.bus.is_calibrated + + def calibrate(self) -> None: + self.bus.disable_torque() + logger.info(f"\nUsing factory default calibration values for {self}") + logger.info(f"\nWriting default configuration of {self} to the motors") + for motor in self.bus.motors: + self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value) + + for motor in self.bus.motors: + self.bus.write("Drive_Mode", motor, DriveMode.NON_INVERTED.value) + + self.calibration = {} + for motor, m in self.bus.motors.items(): + self.calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=0, + homing_offset=0, + range_min=0, + range_max=4095, + ) + + self.bus.write_calibration(self.calibration) + self._save_calibration() + logger.info(f"Calibration saved to {self.calibration_fpath}") + + def configure(self) -> None: + with self.bus.torque_disabled(): + self.bus.configure_motors() + # Use 'extended position mode' for all motors except gripper, because in joint mode the servos + # can't rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling + # the arm, you could end up with a servo with a position 0 or 4095 at a crucial point + for motor in self.bus.motors: + if motor != "gripper": + self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value) + + # Use 'position control current based' for gripper to be limited by the limit of the current. For + # the follower gripper, it means it can grasp an object without forcing too much even tho, its + # goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch). + # For the leader gripper, it means we can use it as a physical trigger, since we can force with + # our finger to make it move, and it will move back to its original target position when we + # release the force. + self.bus.write("Operating_Mode", "gripper", OperatingMode.CURRENT_POSITION.value) + + # Set better PID values to close the gap between recorded states and actions + # TODO(rcadene): Implement an automatic procedure to set optimal PID values for each motor + self.bus.write("Position_P_Gain", "elbow_flex", 1500) + self.bus.write("Position_I_Gain", "elbow_flex", 0) + self.bus.write("Position_D_Gain", "elbow_flex", 600) + + def setup_motors(self) -> None: + for motor in reversed(self.bus.motors): + input(f"Connect the controller board to the '{motor}' motor only and press enter.") + self.bus.setup_motor(motor) + print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") + + @check_if_not_connected + def get_observation(self) -> RobotObservation: + # Read arm position + start = time.perf_counter() + obs_dict = self.bus.sync_read("Present_Position") + obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read state: {dt_ms:.1f}ms") + + # Capture images from cameras + for cam_key, cam in self.cameras.items(): + start = time.perf_counter() + obs_dict[cam_key] = cam.async_read() + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") + + return obs_dict + + @check_if_not_connected + def send_action(self, action: RobotAction) -> RobotAction: + """Command arm to move to a target joint configuration. + + The relative action magnitude may be clipped depending on the configuration parameter + `max_relative_target`. In this case, the action sent differs from original action. + Thus, this function always returns the action actually sent. + + Args: + action (RobotAction): The goal positions for the motors. + + Returns: + RobotAction: The action sent to the motors, potentially clipped. + """ + + goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")} + + # Cap goal position when too far away from present position. + # /!\ Slower fps expected due to reading from the follower. + if self.config.max_relative_target is not None: + present_pos = self.bus.sync_read("Present_Position") + goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()} + goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target) + + # Send goal position to the arm + self.bus.sync_write("Goal_Position", goal_pos) + return {f"{motor}.pos": val for motor, val in goal_pos.items()} + + @check_if_not_connected + def disconnect(self): + self.bus.disconnect(self.config.disable_torque_on_disconnect) + for cam in self.cameras.values(): + cam.disconnect() + + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/robots/reachy2/__init__.py b/lerobot/src/lerobot/robots/reachy2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef9d3c4764d32fe38461980a832e838d27bdc4b8 --- /dev/null +++ b/lerobot/src/lerobot/robots/reachy2/__init__.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .configuration_reachy2 import Reachy2RobotConfig +from .robot_reachy2 import ( + REACHY2_ANTENNAS_JOINTS, + REACHY2_L_ARM_JOINTS, + REACHY2_NECK_JOINTS, + REACHY2_R_ARM_JOINTS, + REACHY2_VEL, + Reachy2Robot, +) diff --git a/lerobot/src/lerobot/robots/reachy2/configuration_reachy2.py b/lerobot/src/lerobot/robots/reachy2/configuration_reachy2.py new file mode 100644 index 0000000000000000000000000000000000000000..e67145ca5058856b0e2d4474b812eebc64e45e84 --- /dev/null +++ b/lerobot/src/lerobot/robots/reachy2/configuration_reachy2.py @@ -0,0 +1,117 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field + +from lerobot.cameras import CameraConfig +from lerobot.cameras.configs import ColorMode +from lerobot.cameras.reachy2_camera import Reachy2CameraConfig + +from ..config import RobotConfig + + +@RobotConfig.register_subclass("reachy2") +@dataclass +class Reachy2RobotConfig(RobotConfig): + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors. + max_relative_target: float | None = None + + # IP address of the Reachy 2 robot + ip_address: str | None = "localhost" + # Port of the Reachy 2 robot + port: int = 50065 + + # If True, turn_off_smoothly() will be sent to the robot before disconnecting. + disable_torque_on_disconnect: bool = False + + # Tag for external commands control + # Set to True if you use an external commands system to control the robot, + # such as the official teleoperation application: https://github.com/pollen-robotics/Reachy2Teleoperation + # If True, robot.send_action() will not send commands to the robot. + use_external_commands: bool = False + + # Robot parts + # Set to False to not add the corresponding joints part to the robot list of joints. + # By default, all parts are set to True. + with_mobile_base: bool = True + with_l_arm: bool = True + with_r_arm: bool = True + with_neck: bool = True + with_antennas: bool = True + + # Robot cameras + # Set to True if you want to use the corresponding cameras in the observations. + # By default, no camera is used. + with_left_teleop_camera: bool = False + with_right_teleop_camera: bool = False + with_torso_camera: bool = False + + # Camera parameters + camera_width: int = 640 + camera_height: int = 480 + + # For cameras other than the 3 default Reachy 2 cameras. + cameras: dict[str, CameraConfig] = field(default_factory=dict) + + def __post_init__(self) -> None: + # Add cameras with same ip_address as the robot + if self.with_left_teleop_camera: + self.cameras["teleop_left"] = Reachy2CameraConfig( + name="teleop", + image_type="left", + ip_address=self.ip_address, + port=self.port, + width=self.camera_width, + height=self.camera_height, + fps=30, # Not configurable for Reachy 2 cameras + color_mode=ColorMode.RGB, + ) + if self.with_right_teleop_camera: + self.cameras["teleop_right"] = Reachy2CameraConfig( + name="teleop", + image_type="right", + ip_address=self.ip_address, + port=self.port, + width=self.camera_width, + height=self.camera_height, + fps=30, # Not configurable for Reachy 2 cameras + color_mode=ColorMode.RGB, + ) + if self.with_torso_camera: + self.cameras["torso_rgb"] = Reachy2CameraConfig( + name="depth", + image_type="rgb", + ip_address=self.ip_address, + port=self.port, + width=self.camera_width, + height=self.camera_height, + fps=30, # Not configurable for Reachy 2 cameras + color_mode=ColorMode.RGB, + ) + + super().__post_init__() + + if not ( + self.with_mobile_base + or self.with_l_arm + or self.with_r_arm + or self.with_neck + or self.with_antennas + ): + raise ValueError( + "No Reachy2Robot part used.\n" + "At least one part of the robot must be set to True " + "(with_mobile_base, with_l_arm, with_r_arm, with_neck, with_antennas)" + ) diff --git a/lerobot/src/lerobot/robots/reachy2/robot_reachy2.py b/lerobot/src/lerobot/robots/reachy2/robot_reachy2.py new file mode 100644 index 0000000000000000000000000000000000000000..a59fd13190ba56ddd6108593fe74701a169a007b --- /dev/null +++ b/lerobot/src/lerobot/robots/reachy2/robot_reachy2.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import time +from typing import TYPE_CHECKING, Any + +from lerobot.cameras.utils import make_cameras_from_configs +from lerobot.processor import RobotAction, RobotObservation +from lerobot.utils.import_utils import _reachy2_sdk_available + +from ..robot import Robot +from ..utils import ensure_safe_goal_position +from .configuration_reachy2 import Reachy2RobotConfig + +if TYPE_CHECKING or _reachy2_sdk_available: + from reachy2_sdk import ReachySDK +else: + ReachySDK = None + +# {lerobot_keys: reachy2_sdk_keys} +REACHY2_NECK_JOINTS = { + "neck_yaw.pos": "head.neck.yaw", + "neck_pitch.pos": "head.neck.pitch", + "neck_roll.pos": "head.neck.roll", +} + +REACHY2_ANTENNAS_JOINTS = { + "l_antenna.pos": "head.l_antenna", + "r_antenna.pos": "head.r_antenna", +} + +REACHY2_R_ARM_JOINTS = { + "r_shoulder_pitch.pos": "r_arm.shoulder.pitch", + "r_shoulder_roll.pos": "r_arm.shoulder.roll", + "r_elbow_yaw.pos": "r_arm.elbow.yaw", + "r_elbow_pitch.pos": "r_arm.elbow.pitch", + "r_wrist_roll.pos": "r_arm.wrist.roll", + "r_wrist_pitch.pos": "r_arm.wrist.pitch", + "r_wrist_yaw.pos": "r_arm.wrist.yaw", + "r_gripper.pos": "r_arm.gripper", +} + +REACHY2_L_ARM_JOINTS = { + "l_shoulder_pitch.pos": "l_arm.shoulder.pitch", + "l_shoulder_roll.pos": "l_arm.shoulder.roll", + "l_elbow_yaw.pos": "l_arm.elbow.yaw", + "l_elbow_pitch.pos": "l_arm.elbow.pitch", + "l_wrist_roll.pos": "l_arm.wrist.roll", + "l_wrist_pitch.pos": "l_arm.wrist.pitch", + "l_wrist_yaw.pos": "l_arm.wrist.yaw", + "l_gripper.pos": "l_arm.gripper", +} + +REACHY2_VEL = { + "mobile_base.vx": "vx", + "mobile_base.vy": "vy", + "mobile_base.vtheta": "vtheta", +} + + +class Reachy2Robot(Robot): + """ + [Reachy 2](https://www.pollen-robotics.com/reachy/), by Pollen Robotics. + """ + + config_class = Reachy2RobotConfig + name = "reachy2" + + def __init__(self, config: Reachy2RobotConfig): + super().__init__(config) + + self.config = config + self.robot_type = self.config.type + self.use_external_commands = self.config.use_external_commands + + self.reachy: None | ReachySDK = None + self.cameras = make_cameras_from_configs(config.cameras) + + self.logs: dict[str, float] = {} + + self.joints_dict: dict[str, str] = self._generate_joints_dict() + + @property + def observation_features(self) -> dict[str, Any]: + return {**self.motors_features, **self.camera_features} + + @property + def action_features(self) -> dict[str, type]: + return self.motors_features + + @property + def camera_features(self) -> dict[str, tuple[int | None, int | None, int]]: + return {cam: (self.cameras[cam].height, self.cameras[cam].width, 3) for cam in self.cameras} + + @property + def motors_features(self) -> dict[str, type]: + if self.config.with_mobile_base: + return { + **dict.fromkeys( + self.joints_dict.keys(), + float, + ), + **dict.fromkeys( + REACHY2_VEL.keys(), + float, + ), + } + else: + return dict.fromkeys(self.joints_dict.keys(), float) + + @property + def is_connected(self) -> bool: + return self.reachy.is_connected() if self.reachy is not None else False + + def connect(self, calibrate: bool = False) -> None: + self.reachy = ReachySDK(self.config.ip_address) + if not self.is_connected: + raise ConnectionError() + + for cam in self.cameras.values(): + cam.connect() + + self.configure() + + def configure(self) -> None: + if self.reachy is not None: + self.reachy.turn_on() + self.reachy.reset_default_limits() + + @property + def is_calibrated(self) -> bool: + return True + + def calibrate(self) -> None: + pass + + def _generate_joints_dict(self) -> dict[str, str]: + joints = {} + if self.config.with_neck: + joints.update(REACHY2_NECK_JOINTS) + if self.config.with_l_arm: + joints.update(REACHY2_L_ARM_JOINTS) + if self.config.with_r_arm: + joints.update(REACHY2_R_ARM_JOINTS) + if self.config.with_antennas: + joints.update(REACHY2_ANTENNAS_JOINTS) + return joints + + def _get_state(self) -> dict[str, float]: + if self.reachy is not None: + pos_dict = {k: self.reachy.joints[v].present_position for k, v in self.joints_dict.items()} + if not self.config.with_mobile_base: + return pos_dict + vel_dict = {k: self.reachy.mobile_base.odometry[v] for k, v in REACHY2_VEL.items()} + return {**pos_dict, **vel_dict} + else: + return {} + + def get_observation(self) -> RobotObservation: + obs_dict: RobotObservation = {} + + # Read Reachy 2 state + before_read_t = time.perf_counter() + obs_dict.update(self._get_state()) + self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t + + # Capture images from cameras + for cam_key, cam in self.cameras.items(): + obs_dict[cam_key] = cam.async_read() + + return obs_dict + + def send_action(self, action: RobotAction) -> RobotAction: + if self.reachy is not None: + if not self.is_connected: + raise ConnectionError() + + before_write_t = time.perf_counter() + + vel = {} + goal_pos = {} + for key, val in action.items(): + if key not in self.joints_dict: + if key not in REACHY2_VEL: + raise KeyError(f"Key '{key}' is not a valid motor key in Reachy 2.") + else: + vel[REACHY2_VEL[key]] = float(val) + else: + if not self.use_external_commands and self.config.max_relative_target is not None: + goal_pos[key] = float(val) + goal_present_pos = { + key: ( + goal_pos[key], + self.reachy.joints[self.joints_dict[key]].present_position, + ) + } + safe_goal_pos = ensure_safe_goal_position( + goal_present_pos, float(self.config.max_relative_target) + ) + val = safe_goal_pos[key] + self.reachy.joints[self.joints_dict[key]].goal_position = float(val) + + if self.config.with_mobile_base: + self.reachy.mobile_base.set_goal_speed(vel["vx"], vel["vy"], vel["vtheta"]) + + # We don't send the goal positions if we control Reachy 2 externally + if not self.use_external_commands: + self.reachy.send_goal_positions() + if self.config.with_mobile_base: + self.reachy.mobile_base.send_speed_command() + + self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t + return action + + def disconnect(self) -> None: + if self.reachy is not None: + for cam in self.cameras.values(): + cam.disconnect() + if self.config.disable_torque_on_disconnect: + self.reachy.turn_off_smoothly() + self.reachy.disconnect() diff --git a/lerobot/src/lerobot/robots/so_follower/__init__.py b/lerobot/src/lerobot/robots/so_follower/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dfe0e340d78350d4de2c7f9bd0cc35a7684aaca6 --- /dev/null +++ b/lerobot/src/lerobot/robots/so_follower/__init__.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config_so_follower import ( + SO100FollowerConfig, + SO101FollowerConfig, + SOFollowerConfig, + SOFollowerRobotConfig, +) +from .so_follower import SO100Follower, SO101Follower, SOFollower diff --git a/lerobot/src/lerobot/robots/so_follower/config_so_follower.py b/lerobot/src/lerobot/robots/so_follower/config_so_follower.py new file mode 100644 index 0000000000000000000000000000000000000000..4a9f7bf3ec0e6a6be700a73968d12d1cf4e2fc89 --- /dev/null +++ b/lerobot/src/lerobot/robots/so_follower/config_so_follower.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import TypeAlias + +from lerobot.cameras import CameraConfig + +from ..config import RobotConfig + + +@dataclass +class SOFollowerConfig: + """Base configuration class for SO Follower robots.""" + + # Port to connect to the arm + port: str + + disable_torque_on_disconnect: bool = True + + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a dictionary that maps motor + # names to the max_relative_target value for that motor. + max_relative_target: float | dict[str, float] | None = None + + # cameras + cameras: dict[str, CameraConfig] = field(default_factory=dict) + + # Set to `True` for backward compatibility with previous policies/dataset + use_degrees: bool = False + + +@RobotConfig.register_subclass("so101_follower") +@RobotConfig.register_subclass("so100_follower") +@dataclass +class SOFollowerRobotConfig(RobotConfig, SOFollowerConfig): + pass + + +SO100FollowerConfig: TypeAlias = SOFollowerRobotConfig +SO101FollowerConfig: TypeAlias = SOFollowerRobotConfig diff --git a/lerobot/src/lerobot/robots/so_follower/robot_kinematic_processor.py b/lerobot/src/lerobot/robots/so_follower/robot_kinematic_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..2d7b21ec4a4c75cf82dfe3abd6697c66bd7bed8f --- /dev/null +++ b/lerobot/src/lerobot/robots/so_follower/robot_kinematic_processor.py @@ -0,0 +1,611 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import Any + +import numpy as np + +from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import ( + EnvTransition, + ObservationProcessorStep, + ProcessorStep, + ProcessorStepRegistry, + RobotAction, + RobotActionProcessorStep, + RobotObservation, + TransitionKey, +) +from lerobot.utils.rotation import Rotation + + +@ProcessorStepRegistry.register("ee_reference_and_delta") +@dataclass +class EEReferenceAndDelta(RobotActionProcessorStep): + """ + Computes a target end-effector pose from a relative delta command. + + This step takes a desired change in position and orientation (`target_*`) and applies it to a + reference end-effector pose to calculate an absolute target pose. The reference pose is derived + from the current robot joint positions using forward kinematics. + + The processor can operate in two modes: + 1. `use_latched_reference=True`: The reference pose is "latched" or saved at the moment the action + is first enabled. Subsequent commands are relative to this fixed reference. + 2. `use_latched_reference=False`: The reference pose is updated to the robot's current pose at + every step. + + Attributes: + kinematics: The robot's kinematic model for forward kinematics. + end_effector_step_sizes: A dictionary scaling the input delta commands. + motor_names: A list of motor names required for forward kinematics. + use_latched_reference: If True, latch the reference pose on enable; otherwise, always use the + current pose as the reference. + reference_ee_pose: Internal state storing the latched reference pose. + _prev_enabled: Internal state to detect the rising edge of the enable signal. + _command_when_disabled: Internal state to hold the last command while disabled. + """ + + kinematics: RobotKinematics + end_effector_step_sizes: dict + motor_names: list[str] + use_latched_reference: bool = ( + True # If True, latch reference on enable; if False, always use current pose + ) + use_ik_solution: bool = False + + reference_ee_pose: np.ndarray | None = field(default=None, init=False, repr=False) + _prev_enabled: bool = field(default=False, init=False, repr=False) + _command_when_disabled: np.ndarray | None = field(default=None, init=False, repr=False) + + def action(self, action: RobotAction) -> RobotAction: + observation = self.transition.get(TransitionKey.OBSERVATION).copy() + + if observation is None: + raise ValueError("Joints observation is require for computing robot kinematics") + + if self.use_ik_solution and "IK_solution" in self.transition.get(TransitionKey.COMPLEMENTARY_DATA): + q_raw = self.transition.get(TransitionKey.COMPLEMENTARY_DATA)["IK_solution"] + else: + q_raw = np.array( + [ + float(v) + for k, v in observation.items() + if isinstance(k, str) + and k.endswith(".pos") + and k.removesuffix(".pos") in self.motor_names + ], + dtype=float, + ) + + if q_raw is None: + raise ValueError("Joints observation is require for computing robot kinematics") + + # Current pose from FK on measured joints + t_curr = self.kinematics.forward_kinematics(q_raw) + + enabled = bool(action.pop("enabled")) + tx = float(action.pop("target_x")) + ty = float(action.pop("target_y")) + tz = float(action.pop("target_z")) + wx = float(action.pop("target_wx")) + wy = float(action.pop("target_wy")) + wz = float(action.pop("target_wz")) + gripper_vel = float(action.pop("gripper_vel")) + + desired = None + + if enabled: + ref = t_curr + if self.use_latched_reference: + # Latched reference mode: latch reference at the rising edge + if not self._prev_enabled or self.reference_ee_pose is None: + self.reference_ee_pose = t_curr.copy() + ref = self.reference_ee_pose if self.reference_ee_pose is not None else t_curr + + delta_p = np.array( + [ + tx * self.end_effector_step_sizes["x"], + ty * self.end_effector_step_sizes["y"], + tz * self.end_effector_step_sizes["z"], + ], + dtype=float, + ) + r_abs = Rotation.from_rotvec([wx, wy, wz]).as_matrix() + desired = np.eye(4, dtype=float) + desired[:3, :3] = ref[:3, :3] @ r_abs + desired[:3, 3] = ref[:3, 3] + delta_p + + self._command_when_disabled = desired.copy() + else: + # While disabled, keep sending the same command to avoid drift. + if self._command_when_disabled is None: + # If we've never had an enabled command yet, freeze current FK pose once. + self._command_when_disabled = t_curr.copy() + desired = self._command_when_disabled.copy() + + # Write action fields + pos = desired[:3, 3] + tw = Rotation.from_matrix(desired[:3, :3]).as_rotvec() + action["ee.x"] = float(pos[0]) + action["ee.y"] = float(pos[1]) + action["ee.z"] = float(pos[2]) + action["ee.wx"] = float(tw[0]) + action["ee.wy"] = float(tw[1]) + action["ee.wz"] = float(tw[2]) + action["ee.gripper_vel"] = gripper_vel + + self._prev_enabled = enabled + return action + + def reset(self): + """Resets the internal state of the processor.""" + self._prev_enabled = False + self.reference_ee_pose = None + self._command_when_disabled = None + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + for feat in [ + "enabled", + "target_x", + "target_y", + "target_z", + "target_wx", + "target_wy", + "target_wz", + "gripper_vel", + ]: + features[PipelineFeatureType.ACTION].pop(f"{feat}", None) + + for feat in ["x", "y", "z", "wx", "wy", "wz", "gripper_vel"]: + features[PipelineFeatureType.ACTION][f"ee.{feat}"] = PolicyFeature( + type=FeatureType.ACTION, shape=(1,) + ) + + return features + + +@ProcessorStepRegistry.register("ee_bounds_and_safety") +@dataclass +class EEBoundsAndSafety(RobotActionProcessorStep): + """ + Clips the end-effector pose to predefined bounds and checks for unsafe jumps. + + This step ensures that the target end-effector pose remains within a safe operational workspace. + It also moderates the command to prevent large, sudden movements between consecutive steps. + + Attributes: + end_effector_bounds: A dictionary with "min" and "max" keys for position clipping. + max_ee_step_m: The maximum allowed change in position (in meters) between steps. + _last_pos: Internal state storing the last commanded position. + """ + + end_effector_bounds: dict + max_ee_step_m: float = 0.05 + _last_pos: np.ndarray | None = field(default=None, init=False, repr=False) + + def action(self, action: RobotAction) -> RobotAction: + x = action["ee.x"] + y = action["ee.y"] + z = action["ee.z"] + wx = action["ee.wx"] + wy = action["ee.wy"] + wz = action["ee.wz"] + # TODO(Steven): ee.gripper_vel does not need to be bounded + + if None in (x, y, z, wx, wy, wz): + raise ValueError( + "Missing required end-effector pose components: x, y, z, wx, wy, wz must all be present in action" + ) + + pos = np.array([x, y, z], dtype=float) + twist = np.array([wx, wy, wz], dtype=float) + + # Clip position + pos = np.clip(pos, self.end_effector_bounds["min"], self.end_effector_bounds["max"]) + + # Check for jumps in position + if self._last_pos is not None: + dpos = pos - self._last_pos + n = float(np.linalg.norm(dpos)) + if n > self.max_ee_step_m and n > 0: + pos = self._last_pos + dpos * (self.max_ee_step_m / n) + raise ValueError(f"EE jump {n:.3f}m > {self.max_ee_step_m}m") + + self._last_pos = pos + + action["ee.x"] = float(pos[0]) + action["ee.y"] = float(pos[1]) + action["ee.z"] = float(pos[2]) + action["ee.wx"] = float(twist[0]) + action["ee.wy"] = float(twist[1]) + action["ee.wz"] = float(twist[2]) + return action + + def reset(self): + """Resets the last known position and orientation.""" + self._last_pos = None + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + return features + + +@ProcessorStepRegistry.register("inverse_kinematics_ee_to_joints") +@dataclass +class InverseKinematicsEEToJoints(RobotActionProcessorStep): + """ + Computes desired joint positions from a target end-effector pose using inverse kinematics (IK). + + This step translates a Cartesian command (position and orientation of the end-effector) into + the corresponding joint-space commands for each motor. + + Attributes: + kinematics: The robot's kinematic model for inverse kinematics. + motor_names: A list of motor names for which to compute joint positions. + q_curr: Internal state storing the last joint positions, used as an initial guess for the IK solver. + initial_guess_current_joints: If True, use the robot's current joint state as the IK guess. + If False, use the solution from the previous step. + """ + + kinematics: RobotKinematics + motor_names: list[str] + q_curr: np.ndarray | None = field(default=None, init=False, repr=False) + initial_guess_current_joints: bool = True + + def action(self, action: RobotAction) -> RobotAction: + x = action.pop("ee.x") + y = action.pop("ee.y") + z = action.pop("ee.z") + wx = action.pop("ee.wx") + wy = action.pop("ee.wy") + wz = action.pop("ee.wz") + gripper_pos = action.pop("ee.gripper_pos") + + if None in (x, y, z, wx, wy, wz, gripper_pos): + raise ValueError( + "Missing required end-effector pose components: ee.x, ee.y, ee.z, ee.wx, ee.wy, ee.wz, ee.gripper_pos must all be present in action" + ) + + observation = self.transition.get(TransitionKey.OBSERVATION).copy() + if observation is None: + raise ValueError("Joints observation is require for computing robot kinematics") + + q_raw = np.array( + [float(v) for k, v in observation.items() if isinstance(k, str) and k.endswith(".pos")], + dtype=float, + ) + if q_raw is None: + raise ValueError("Joints observation is require for computing robot kinematics") + + if self.initial_guess_current_joints: # Use current joints as initial guess + self.q_curr = q_raw + else: # Use previous ik solution as initial guess + if self.q_curr is None: + self.q_curr = q_raw + + # Build desired 4x4 transform from pos + rotvec (twist) + t_des = np.eye(4, dtype=float) + t_des[:3, :3] = Rotation.from_rotvec([wx, wy, wz]).as_matrix() + t_des[:3, 3] = [x, y, z] + + # Compute inverse kinematics + q_target = self.kinematics.inverse_kinematics(self.q_curr, t_des) + self.q_curr = q_target + + # TODO: This is sentitive to order of motor_names = q_target mapping + for i, name in enumerate(self.motor_names): + if name != "gripper": + action[f"{name}.pos"] = float(q_target[i]) + else: + action["gripper.pos"] = float(gripper_pos) + + return action + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + for feat in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]: + features[PipelineFeatureType.ACTION].pop(f"ee.{feat}", None) + + for name in self.motor_names: + features[PipelineFeatureType.ACTION][f"{name}.pos"] = PolicyFeature( + type=FeatureType.ACTION, shape=(1,) + ) + + return features + + def reset(self): + """Resets the initial guess for the IK solver.""" + self.q_curr = None + + +@ProcessorStepRegistry.register("gripper_velocity_to_joint") +@dataclass +class GripperVelocityToJoint(RobotActionProcessorStep): + """ + Converts a gripper velocity command into a target gripper joint position. + + This step integrates a normalized velocity command over time to produce a position command, + taking the current gripper position as a starting point. It also supports a discrete mode + where integer actions map to open, close, or no-op. + + Attributes: + motor_names: A list of motor names, which must include 'gripper'. + speed_factor: A scaling factor to convert the normalized velocity command to a position change. + clip_min: The minimum allowed gripper joint position. + clip_max: The maximum allowed gripper joint position. + discrete_gripper: If True, treat the input action as discrete (0: open, 1: close, 2: stay). + """ + + speed_factor: float = 20.0 + clip_min: float = 0.0 + clip_max: float = 100.0 + discrete_gripper: bool = False + + def action(self, action: RobotAction) -> RobotAction: + observation = self.transition.get(TransitionKey.OBSERVATION).copy() + + gripper_vel = action.pop("ee.gripper_vel") + + if observation is None: + raise ValueError("Joints observation is require for computing robot kinematics") + + q_raw = np.array( + [float(v) for k, v in observation.items() if isinstance(k, str) and k.endswith(".pos")], + dtype=float, + ) + if q_raw is None: + raise ValueError("Joints observation is require for computing robot kinematics") + + if self.discrete_gripper: + # Discrete gripper actions are in [0, 1, 2] + # 0: open, 1: close, 2: stay + # We need to shift them to [-1, 0, 1] and then scale them to clip_max + gripper_vel = (gripper_vel - 1) * self.clip_max + + # Compute desired gripper position + delta = gripper_vel * float(self.speed_factor) + # TODO: This assumes gripper is the last specified joint in the robot + gripper_pos = float(np.clip(q_raw[-1] + delta, self.clip_min, self.clip_max)) + action["ee.gripper_pos"] = gripper_pos + + return action + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + features[PipelineFeatureType.ACTION].pop("ee.gripper_vel", None) + features[PipelineFeatureType.ACTION]["ee.gripper_pos"] = PolicyFeature( + type=FeatureType.ACTION, shape=(1,) + ) + + return features + + +def compute_forward_kinematics_joints_to_ee( + joints: dict[str, Any], kinematics: RobotKinematics, motor_names: list[str] +) -> dict[str, Any]: + motor_joint_values = [joints[f"{n}.pos"] for n in motor_names] + + q = np.array(motor_joint_values, dtype=float) + t = kinematics.forward_kinematics(q) + pos = t[:3, 3] + tw = Rotation.from_matrix(t[:3, :3]).as_rotvec() + gripper_pos = joints["gripper.pos"] + for n in motor_names: + joints.pop(f"{n}.pos") + joints["ee.x"] = float(pos[0]) + joints["ee.y"] = float(pos[1]) + joints["ee.z"] = float(pos[2]) + joints["ee.wx"] = float(tw[0]) + joints["ee.wy"] = float(tw[1]) + joints["ee.wz"] = float(tw[2]) + joints["ee.gripper_pos"] = float(gripper_pos) + return joints + + +@ProcessorStepRegistry.register("forward_kinematics_joints_to_ee_observation") +@dataclass +class ForwardKinematicsJointsToEEObservation(ObservationProcessorStep): + """ + Computes the end-effector pose from joint positions using forward kinematics (FK). + + This step is typically used to add the robot's Cartesian pose to the observation space, + which can be useful for visualization or as an input to a policy. + + Attributes: + kinematics: The robot's kinematic model. + """ + + kinematics: RobotKinematics + motor_names: list[str] + + def observation(self, observation: RobotObservation) -> RobotObservation: + return compute_forward_kinematics_joints_to_ee(observation, self.kinematics, self.motor_names) + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + # We only use the ee pose in the dataset, so we don't need the joint positions + for n in self.motor_names: + features[PipelineFeatureType.OBSERVATION].pop(f"{n}.pos", None) + # We specify the dataset features of this step that we want to be stored in the dataset + for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]: + features[PipelineFeatureType.OBSERVATION][f"ee.{k}"] = PolicyFeature( + type=FeatureType.STATE, shape=(1,) + ) + return features + + +@ProcessorStepRegistry.register("forward_kinematics_joints_to_ee_action") +@dataclass +class ForwardKinematicsJointsToEEAction(RobotActionProcessorStep): + """ + Computes the end-effector pose from joint positions using forward kinematics (FK). + + This step is typically used to add the robot's Cartesian pose to the observation space, + which can be useful for visualization or as an input to a policy. + + Attributes: + kinematics: The robot's kinematic model. + """ + + kinematics: RobotKinematics + motor_names: list[str] + + def action(self, action: RobotAction) -> RobotAction: + return compute_forward_kinematics_joints_to_ee(action, self.kinematics, self.motor_names) + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + # We only use the ee pose in the dataset, so we don't need the joint positions + for n in self.motor_names: + features[PipelineFeatureType.ACTION].pop(f"{n}.pos", None) + # We specify the dataset features of this step that we want to be stored in the dataset + for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]: + features[PipelineFeatureType.ACTION][f"ee.{k}"] = PolicyFeature( + type=FeatureType.STATE, shape=(1,) + ) + return features + + +@ProcessorStepRegistry.register(name="forward_kinematics_joints_to_ee") +@dataclass +class ForwardKinematicsJointsToEE(ProcessorStep): + kinematics: RobotKinematics + motor_names: list[str] + + def __post_init__(self): + self.joints_to_ee_action_processor = ForwardKinematicsJointsToEEAction( + kinematics=self.kinematics, motor_names=self.motor_names + ) + self.joints_to_ee_observation_processor = ForwardKinematicsJointsToEEObservation( + kinematics=self.kinematics, motor_names=self.motor_names + ) + + def __call__(self, transition: EnvTransition) -> EnvTransition: + if transition.get(TransitionKey.ACTION) is not None: + transition = self.joints_to_ee_action_processor(transition) + if transition.get(TransitionKey.OBSERVATION) is not None: + transition = self.joints_to_ee_observation_processor(transition) + return transition + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + if features[PipelineFeatureType.ACTION] is not None: + features = self.joints_to_ee_action_processor.transform_features(features) + if features[PipelineFeatureType.OBSERVATION] is not None: + features = self.joints_to_ee_observation_processor.transform_features(features) + return features + + +@ProcessorStepRegistry.register("inverse_kinematics_rl_step") +@dataclass +class InverseKinematicsRLStep(ProcessorStep): + """ + Computes desired joint positions from a target end-effector pose using inverse kinematics (IK). + + This is modified from the InverseKinematicsEEToJoints step to be used in the RL pipeline. + """ + + kinematics: RobotKinematics + motor_names: list[str] + q_curr: np.ndarray | None = field(default=None, init=False, repr=False) + initial_guess_current_joints: bool = True + + def __call__(self, transition: EnvTransition) -> EnvTransition: + new_transition = dict(transition) + action = new_transition.get(TransitionKey.ACTION) + if action is None: + raise ValueError("Action is required for InverseKinematicsEEToJoints") + action = dict(action) + + x = action.pop("ee.x") + y = action.pop("ee.y") + z = action.pop("ee.z") + wx = action.pop("ee.wx") + wy = action.pop("ee.wy") + wz = action.pop("ee.wz") + gripper_pos = action.pop("ee.gripper_pos") + + if None in (x, y, z, wx, wy, wz, gripper_pos): + raise ValueError( + "Missing required end-effector pose components: ee.x, ee.y, ee.z, ee.wx, ee.wy, ee.wz, ee.gripper_pos must all be present in action" + ) + + observation = new_transition.get(TransitionKey.OBSERVATION).copy() + if observation is None: + raise ValueError("Joints observation is require for computing robot kinematics") + + q_raw = np.array( + [float(v) for k, v in observation.items() if isinstance(k, str) and k.endswith(".pos")], + dtype=float, + ) + if q_raw is None: + raise ValueError("Joints observation is require for computing robot kinematics") + + if self.initial_guess_current_joints: # Use current joints as initial guess + self.q_curr = q_raw + else: # Use previous ik solution as initial guess + if self.q_curr is None: + self.q_curr = q_raw + + # Build desired 4x4 transform from pos + rotvec (twist) + t_des = np.eye(4, dtype=float) + t_des[:3, :3] = Rotation.from_rotvec([wx, wy, wz]).as_matrix() + t_des[:3, 3] = [x, y, z] + + # Compute inverse kinematics + q_target = self.kinematics.inverse_kinematics(self.q_curr, t_des) + self.q_curr = q_target + + # TODO: This is sentitive to order of motor_names = q_target mapping + for i, name in enumerate(self.motor_names): + if name != "gripper": + action[f"{name}.pos"] = float(q_target[i]) + else: + action["gripper.pos"] = float(gripper_pos) + + new_transition[TransitionKey.ACTION] = action + complementary_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {}) + complementary_data["IK_solution"] = q_target + new_transition[TransitionKey.COMPLEMENTARY_DATA] = complementary_data + return new_transition + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + for feat in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]: + features[PipelineFeatureType.ACTION].pop(f"ee.{feat}", None) + + for name in self.motor_names: + features[PipelineFeatureType.ACTION][f"{name}.pos"] = PolicyFeature( + type=FeatureType.ACTION, shape=(1,) + ) + + return features + + def reset(self): + """Resets the initial guess for the IK solver.""" + self.q_curr = None diff --git a/lerobot/src/lerobot/robots/so_follower/so100.md b/lerobot/src/lerobot/robots/so_follower/so100.md new file mode 100644 index 0000000000000000000000000000000000000000..ad1154e75a74a496aa74cb1ac1b545238d5174e4 --- /dev/null +++ b/lerobot/src/lerobot/robots/so_follower/so100.md @@ -0,0 +1 @@ +../../../../docs/source/so100.mdx \ No newline at end of file diff --git a/lerobot/src/lerobot/robots/so_follower/so101.md b/lerobot/src/lerobot/robots/so_follower/so101.md new file mode 100644 index 0000000000000000000000000000000000000000..27b89266029afbf0aa59be195cc0b4b6ee93ac26 --- /dev/null +++ b/lerobot/src/lerobot/robots/so_follower/so101.md @@ -0,0 +1 @@ +../../../../docs/source/so101.mdx \ No newline at end of file diff --git a/lerobot/src/lerobot/robots/so_follower/so_follower.py b/lerobot/src/lerobot/robots/so_follower/so_follower.py new file mode 100644 index 0000000000000000000000000000000000000000..1060b1ef892431666ca2ff6cd697fc1ca1660c32 --- /dev/null +++ b/lerobot/src/lerobot/robots/so_follower/so_follower.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +from functools import cached_property +from typing import TypeAlias + +from lerobot.cameras.utils import make_cameras_from_configs +from lerobot.motors import Motor, MotorCalibration, MotorNormMode +from lerobot.motors.feetech import ( + FeetechMotorsBus, + OperatingMode, +) +from lerobot.processor import RobotAction, RobotObservation +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected + +from ..robot import Robot +from ..utils import ensure_safe_goal_position +from .config_so_follower import SOFollowerRobotConfig + +logger = logging.getLogger(__name__) + + +class SOFollower(Robot): + """ + Generic SO follower base implementing common functionality for SO-100/101/10X. + Designed to be subclassed with a per-hardware-model `config_class` and `name`. + """ + + config_class = SOFollowerRobotConfig + name = "so_follower" + + def __init__(self, config: SOFollowerRobotConfig): + super().__init__(config) + self.config = config + # choose normalization mode depending on config if available + norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100 + self.bus = FeetechMotorsBus( + port=self.config.port, + motors={ + "shoulder_pan": Motor(1, "sts3215", norm_mode_body), + "shoulder_lift": Motor(2, "sts3215", norm_mode_body), + "elbow_flex": Motor(3, "sts3215", norm_mode_body), + "wrist_flex": Motor(4, "sts3215", norm_mode_body), + "wrist_roll": Motor(5, "sts3215", norm_mode_body), + "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100), + }, + calibration=self.calibration, + ) + self.cameras = make_cameras_from_configs(config.cameras) + + @property + def _motors_ft(self) -> dict[str, type]: + return {f"{motor}.pos": float for motor in self.bus.motors} + + @property + def _cameras_ft(self) -> dict[str, tuple]: + return { + cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras + } + + @cached_property + def observation_features(self) -> dict[str, type | tuple]: + return {**self._motors_ft, **self._cameras_ft} + + @cached_property + def action_features(self) -> dict[str, type]: + return self._motors_ft + + @property + def is_connected(self) -> bool: + return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) + + @check_if_already_connected + def connect(self, calibrate: bool = True) -> None: + """ + We assume that at connection time, arm is in a rest position, + and torque can be safely disabled to run calibration. + """ + + self.bus.connect() + if not self.is_calibrated and calibrate: + logger.info( + "Mismatch between calibration values in the motor and the calibration file or no calibration file found" + ) + self.calibrate() + + for cam in self.cameras.values(): + cam.connect() + + self.configure() + logger.info(f"{self} connected.") + + @property + def is_calibrated(self) -> bool: + return self.bus.is_calibrated + + def calibrate(self) -> None: + if self.calibration: + # Calibration file exists, ask user whether to use it or run new calibration + user_input = input( + f"Press ENTER to use provided calibration file associated with the id {self.id}, or type 'c' and press ENTER to run calibration: " + ) + if user_input.strip().lower() != "c": + logger.info(f"Writing calibration file associated with the id {self.id} to the motors") + self.bus.write_calibration(self.calibration) + return + + logger.info(f"\nRunning calibration of {self}") + self.bus.disable_torque() + for motor in self.bus.motors: + self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) + + input(f"Move {self} to the middle of its range of motion and press ENTER....") + homing_offsets = self.bus.set_half_turn_homings() + + # Attempt to call record_ranges_of_motion with a reduced motor set when appropriate. + full_turn_motor = "wrist_roll" + unknown_range_motors = [motor for motor in self.bus.motors if motor != full_turn_motor] + print( + f"Move all joints except '{full_turn_motor}' sequentially through their " + "entire ranges of motion.\nRecording positions. Press ENTER to stop..." + ) + range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors) + range_mins[full_turn_motor] = 0 + range_maxes[full_turn_motor] = 4095 + + self.calibration = {} + for motor, m in self.bus.motors.items(): + self.calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=0, + homing_offset=homing_offsets[motor], + range_min=range_mins[motor], + range_max=range_maxes[motor], + ) + + self.bus.write_calibration(self.calibration) + self._save_calibration() + print("Calibration saved to", self.calibration_fpath) + + def configure(self) -> None: + with self.bus.torque_disabled(): + self.bus.configure_motors() + for motor in self.bus.motors: + self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) + # Set P_Coefficient to lower value to avoid shakiness (Default is 32) + self.bus.write("P_Coefficient", motor, 16) + # Set I_Coefficient and D_Coefficient to default value 0 and 32 + self.bus.write("I_Coefficient", motor, 0) + self.bus.write("D_Coefficient", motor, 32) + + if motor == "gripper": + self.bus.write("Max_Torque_Limit", motor, 500) # 50% of max torque to avoid burnout + self.bus.write("Protection_Current", motor, 250) # 50% of max current to avoid burnout + self.bus.write("Overload_Torque", motor, 25) # 25% torque when overloaded + + def setup_motors(self) -> None: + for motor in reversed(self.bus.motors): + input(f"Connect the controller board to the '{motor}' motor only and press enter.") + self.bus.setup_motor(motor) + print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") + + @check_if_not_connected + def get_observation(self) -> RobotObservation: + # Read arm position + start = time.perf_counter() + obs_dict = self.bus.sync_read("Present_Position") + obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read state: {dt_ms:.1f}ms") + + # Capture images from cameras + for cam_key, cam in self.cameras.items(): + start = time.perf_counter() + obs_dict[cam_key] = cam.async_read() + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") + + return obs_dict + + @check_if_not_connected + def send_action(self, action: RobotAction) -> RobotAction: + """Command arm to move to a target joint configuration. + + The relative action magnitude may be clipped depending on the configuration parameter + `max_relative_target`. In this case, the action sent differs from original action. + Thus, this function always returns the action actually sent. + + Raises: + RobotDeviceNotConnectedError: if robot is not connected. + + Returns: + RobotAction: the action sent to the motors, potentially clipped. + """ + + goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")} + + # Cap goal position when too far away from present position. + # /!\ Slower fps expected due to reading from the follower. + if self.config.max_relative_target is not None: + present_pos = self.bus.sync_read("Present_Position") + goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()} + goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target) + + # Send goal position to the arm + self.bus.sync_write("Goal_Position", goal_pos) + return {f"{motor}.pos": val for motor, val in goal_pos.items()} + + @check_if_not_connected + def disconnect(self): + self.bus.disconnect(self.config.disable_torque_on_disconnect) + for cam in self.cameras.values(): + cam.disconnect() + + logger.info(f"{self} disconnected.") + + +SO100Follower: TypeAlias = SOFollower +SO101Follower: TypeAlias = SOFollower diff --git a/lerobot/src/lerobot/robots/unitree_g1/__init__.py b/lerobot/src/lerobot/robots/unitree_g1/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7feb128bed5c2a8b57e6c33798dcbdb8b53c1229 --- /dev/null +++ b/lerobot/src/lerobot/robots/unitree_g1/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config_unitree_g1 import UnitreeG1Config +from .unitree_g1 import UnitreeG1 diff --git a/lerobot/src/lerobot/robots/unitree_g1/config_unitree_g1.py b/lerobot/src/lerobot/robots/unitree_g1/config_unitree_g1.py new file mode 100644 index 0000000000000000000000000000000000000000..bdd2bfa9be06581e77d6dc7f2846326d0504e3e9 --- /dev/null +++ b/lerobot/src/lerobot/robots/unitree_g1/config_unitree_g1.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field + +from lerobot.cameras import CameraConfig + +from ..config import RobotConfig + +_GAINS: dict[str, dict[str, list[float]]] = { + "left_leg": { + "kp": [150, 150, 150, 300, 40, 40], + "kd": [2, 2, 2, 4, 2, 2], + }, # pitch, roll, yaw, knee, ankle_pitch, ankle_roll + "right_leg": {"kp": [150, 150, 150, 300, 40, 40], "kd": [2, 2, 2, 4, 2, 2]}, + "waist": {"kp": [250, 250, 250], "kd": [5, 5, 5]}, # yaw, roll, pitch + "left_arm": {"kp": [80, 80, 80, 80], "kd": [3, 3, 3, 3]}, # shoulder_pitch/roll/yaw, elbow + "left_wrist": {"kp": [40, 40, 40], "kd": [1.5, 1.5, 1.5]}, # roll, pitch, yaw + "right_arm": {"kp": [80, 80, 80, 80], "kd": [3, 3, 3, 3]}, + "right_wrist": {"kp": [40, 40, 40], "kd": [1.5, 1.5, 1.5]}, + "other": {"kp": [80, 80, 80, 80, 80, 80], "kd": [3, 3, 3, 3, 3, 3]}, +} + + +def _build_gains() -> tuple[list[float], list[float]]: + """Build kp and kd lists from body-part groupings.""" + kp = [v for g in _GAINS.values() for v in g["kp"]] + kd = [v for g in _GAINS.values() for v in g["kd"]] + return kp, kd + + +_DEFAULT_KP, _DEFAULT_KD = _build_gains() + + +@RobotConfig.register_subclass("unitree_g1") +@dataclass +class UnitreeG1Config(RobotConfig): + kp: list[float] = field(default_factory=lambda: _DEFAULT_KP.copy()) + kd: list[float] = field(default_factory=lambda: _DEFAULT_KD.copy()) + + # Default joint positions + default_positions: list[float] = field(default_factory=lambda: [0.0] * 29) + + # Control loop timestep + control_dt: float = 1.0 / 250.0 # 250Hz + + # Launch mujoco simulation + is_simulation: bool = True + + # Socket config for ZMQ bridge + robot_ip: str = "192.168.123.164" # default G1 IP + + # Cameras (ZMQ-based remote cameras) + cameras: dict[str, CameraConfig] = field(default_factory=dict) diff --git a/lerobot/src/lerobot/robots/unitree_g1/g1_utils.py b/lerobot/src/lerobot/robots/unitree_g1/g1_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fa0e637b18e372ddd58aff11960b84caa0c2bc8d --- /dev/null +++ b/lerobot/src/lerobot/robots/unitree_g1/g1_utils.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import IntEnum + +# ruff: noqa: N801, N815 + +NUM_MOTORS = 35 + + +class G1_29_JointArmIndex(IntEnum): + # Left arm + kLeftShoulderPitch = 15 + kLeftShoulderRoll = 16 + kLeftShoulderYaw = 17 + kLeftElbow = 18 + kLeftWristRoll = 19 + kLeftWristPitch = 20 + kLeftWristyaw = 21 + + # Right arm + kRightShoulderPitch = 22 + kRightShoulderRoll = 23 + kRightShoulderYaw = 24 + kRightElbow = 25 + kRightWristRoll = 26 + kRightWristPitch = 27 + kRightWristYaw = 28 + + +class G1_29_JointIndex(IntEnum): + # Left leg + kLeftHipPitch = 0 + kLeftHipRoll = 1 + kLeftHipYaw = 2 + kLeftKnee = 3 + kLeftAnklePitch = 4 + kLeftAnkleRoll = 5 + + # Right leg + kRightHipPitch = 6 + kRightHipRoll = 7 + kRightHipYaw = 8 + kRightKnee = 9 + kRightAnklePitch = 10 + kRightAnkleRoll = 11 + + kWaistYaw = 12 + kWaistRoll = 13 + kWaistPitch = 14 + + # Left arm + kLeftShoulderPitch = 15 + kLeftShoulderRoll = 16 + kLeftShoulderYaw = 17 + kLeftElbow = 18 + kLeftWristRoll = 19 + kLeftWristPitch = 20 + kLeftWristyaw = 21 + + # Right arm + kRightShoulderPitch = 22 + kRightShoulderRoll = 23 + kRightShoulderYaw = 24 + kRightElbow = 25 + kRightWristRoll = 26 + kRightWristPitch = 27 + kRightWristYaw = 28 diff --git a/lerobot/src/lerobot/robots/unitree_g1/run_g1_server.py b/lerobot/src/lerobot/robots/unitree_g1/run_g1_server.py new file mode 100644 index 0000000000000000000000000000000000000000..8406d0cd60e30cd227a1f517d5587e0fbce6c755 --- /dev/null +++ b/lerobot/src/lerobot/robots/unitree_g1/run_g1_server.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python3 + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +DDS-to-ZMQ bridge server for Unitree G1 robot. + +This server runs on the robot and forwards: +- Robot state (LowState) from DDS to ZMQ (for remote clients) +- Robot commands (LowCmd) from ZMQ to DDS (from remote clients) + +Uses JSON for secure serialization instead of pickle. +""" + +import base64 +import contextlib +import json +import threading +import time +from typing import Any + +import zmq +from unitree_sdk2py.comm.motion_switcher.motion_switcher_client import MotionSwitcherClient +from unitree_sdk2py.core.channel import ChannelFactoryInitialize, ChannelPublisher, ChannelSubscriber +from unitree_sdk2py.idl.default import unitree_hg_msg_dds__LowCmd_ +from unitree_sdk2py.idl.unitree_hg.msg.dds_ import LowCmd_ as hg_LowCmd, LowState_ as hg_LowState +from unitree_sdk2py.utils.crc import CRC + +# DDS topic names follow Unitree SDK naming conventions +# ruff: noqa: N816 +kTopicLowCommand_Debug = "rt/lowcmd" # action to robot +kTopicLowState = "rt/lowstate" # observation from robot + +LOWCMD_PORT = 6000 +LOWSTATE_PORT = 6001 +NUM_MOTORS = 35 + + +def lowstate_to_dict(msg: hg_LowState) -> dict[str, Any]: + """Convert LowState SDK message to a JSON-serializable dictionary.""" + motor_states = [] + for i in range(NUM_MOTORS): + temp = msg.motor_state[i].temperature + avg_temp = float(sum(temp) / len(temp)) if isinstance(temp, list) else float(temp) + motor_states.append( + { + "q": float(msg.motor_state[i].q), + "dq": float(msg.motor_state[i].dq), + "tau_est": float(msg.motor_state[i].tau_est), + "temperature": avg_temp, + } + ) + + return { + "motor_state": motor_states, + "imu_state": { + "quaternion": [float(x) for x in msg.imu_state.quaternion], + "gyroscope": [float(x) for x in msg.imu_state.gyroscope], + "accelerometer": [float(x) for x in msg.imu_state.accelerometer], + "rpy": [float(x) for x in msg.imu_state.rpy], + "temperature": float(msg.imu_state.temperature), + }, + # Encode bytes as base64 for JSON compatibility + "wireless_remote": base64.b64encode(bytes(msg.wireless_remote)).decode("ascii"), + "mode_machine": int(msg.mode_machine), + } + + +def dict_to_lowcmd(data: dict[str, Any]) -> hg_LowCmd: + """Convert dictionary back to LowCmd SDK message.""" + cmd = unitree_hg_msg_dds__LowCmd_() + cmd.mode_pr = data.get("mode_pr", 0) + cmd.mode_machine = data.get("mode_machine", 0) + + for i, motor_data in enumerate(data.get("motor_cmd", [])): + cmd.motor_cmd[i].mode = motor_data.get("mode", 0) + cmd.motor_cmd[i].q = motor_data.get("q", 0.0) + cmd.motor_cmd[i].dq = motor_data.get("dq", 0.0) + cmd.motor_cmd[i].kp = motor_data.get("kp", 0.0) + cmd.motor_cmd[i].kd = motor_data.get("kd", 0.0) + cmd.motor_cmd[i].tau = motor_data.get("tau", 0.0) + + return cmd + + +def state_forward_loop( + lowstate_sub: ChannelSubscriber, + lowstate_sock: zmq.Socket, + state_period: float, + shutdown_event: threading.Event, +) -> None: + """Read observation from DDS and forward to ZMQ clients.""" + last_state_time = 0.0 + + while not shutdown_event.is_set(): + # read from DDS + msg = lowstate_sub.Read() + if msg is None: + continue + + now = time.time() + # optional downsampling (if robot dds rate > state_period) + if now - last_state_time >= state_period: + # Convert to dict and serialize with JSON + state_dict = lowstate_to_dict(msg) + payload = json.dumps({"topic": kTopicLowState, "data": state_dict}).encode("utf-8") + # if no subscribers / tx buffer full, just drop + with contextlib.suppress(zmq.Again): + lowstate_sock.send(payload, zmq.NOBLOCK) + last_state_time = now + + +def cmd_forward_loop( + lowcmd_sock: zmq.Socket, + lowcmd_pub_debug: ChannelPublisher, + crc: CRC, +) -> None: + """Receive commands from ZMQ and forward to DDS.""" + while True: + try: + payload = lowcmd_sock.recv() + except zmq.ContextTerminated: + break + msg_dict = json.loads(payload.decode("utf-8")) + + topic = msg_dict.get("topic", "") + cmd_data = msg_dict.get("data", {}) + + # Reconstruct LowCmd object from dict + cmd = dict_to_lowcmd(cmd_data) + + # recompute crc + cmd.crc = crc.Crc(cmd) + + if topic == kTopicLowCommand_Debug: + lowcmd_pub_debug.Write(cmd) + + +def main() -> None: + """Main entry point for the robot server bridge.""" + # initialize DDS + ChannelFactoryInitialize(0) + + # stop all active publishers on the robot + msc = MotionSwitcherClient() + msc.SetTimeout(5.0) + msc.Init() + + status, result = msc.CheckMode() + while result is not None and "name" in result and result["name"]: + msc.ReleaseMode() + status, result = msc.CheckMode() + time.sleep(1.0) + + crc = CRC() + + # initialize DDS publisher + lowcmd_pub_debug = ChannelPublisher(kTopicLowCommand_Debug, hg_LowCmd) + lowcmd_pub_debug.Init() + + # initialize DDS subscriber + lowstate_sub = ChannelSubscriber(kTopicLowState, hg_LowState) + lowstate_sub.Init() + + # initialize ZMQ + ctx = zmq.Context.instance() + + # receive commands from remote client + lowcmd_sock = ctx.socket(zmq.PULL) + lowcmd_sock.bind(f"tcp://0.0.0.0:{LOWCMD_PORT}") + + # publish state to remote clients + lowstate_sock = ctx.socket(zmq.PUB) + lowstate_sock.bind(f"tcp://0.0.0.0:{LOWSTATE_PORT}") + + state_period = 0.002 # ~500 hz + shutdown_event = threading.Event() + + # start observation forwarding in background thread + t_state = threading.Thread( + target=state_forward_loop, + args=(lowstate_sub, lowstate_sock, state_period, shutdown_event), + ) + t_state.start() + + print("bridge running (lowstate -> zmq, lowcmd -> dds)") + + # run command forwarding in main thread + try: + cmd_forward_loop(lowcmd_sock, lowcmd_pub_debug, crc) + except KeyboardInterrupt: + print("shutting down bridge...") + finally: + shutdown_event.set() + ctx.term() # terminates blocking zmq.recv() calls + t_state.join(timeout=2.0) + + +if __name__ == "__main__": + main() diff --git a/lerobot/src/lerobot/robots/unitree_g1/unitree_g1.py b/lerobot/src/lerobot/robots/unitree_g1/unitree_g1.py new file mode 100644 index 0000000000000000000000000000000000000000..8cc9e8b560afad0de08869607d837888942d980f --- /dev/null +++ b/lerobot/src/lerobot/robots/unitree_g1/unitree_g1.py @@ -0,0 +1,432 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import struct +import threading +import time +from dataclasses import dataclass, field +from functools import cached_property +from typing import Any + +import numpy as np + +from lerobot.cameras.utils import make_cameras_from_configs +from lerobot.envs.factory import make_env +from lerobot.processor import RobotAction, RobotObservation +from lerobot.robots.unitree_g1.g1_utils import G1_29_JointIndex + +from ..robot import Robot +from .config_unitree_g1 import UnitreeG1Config + +logger = logging.getLogger(__name__) + +# DDS topic names follow Unitree SDK naming conventions +# ruff: noqa: N816 +kTopicLowCommand_Debug = "rt/lowcmd" +kTopicLowState = "rt/lowstate" + + +@dataclass +class MotorState: + q: float | None = None # position + dq: float | None = None # velocity + tau_est: float | None = None # estimated torque + temperature: float | None = None # motor temperature + + +@dataclass +class IMUState: + quaternion: np.ndarray | None = None # [w, x, y, z] + gyroscope: np.ndarray | None = None # [x, y, z] angular velocity (rad/s) + accelerometer: np.ndarray | None = None # [x, y, z] linear acceleration (m/s²) + rpy: np.ndarray | None = None # [roll, pitch, yaw] (rad) + temperature: float | None = None # IMU temperature + + +# g1 observation class +@dataclass +class G1_29_LowState: # noqa: N801 + motor_state: list[MotorState] = field(default_factory=lambda: [MotorState() for _ in G1_29_JointIndex]) + imu_state: IMUState = field(default_factory=IMUState) + wireless_remote: Any = None # Raw wireless remote data + mode_machine: int = 0 # Robot mode + + +class UnitreeG1(Robot): + config_class = UnitreeG1Config + name = "unitree_g1" + + # unitree remote controller + class RemoteController: + def __init__(self): + self.lx = 0 + self.ly = 0 + self.rx = 0 + self.ry = 0 + self.button = [0] * 16 + + def set(self, data): + # wireless_remote + keys = struct.unpack("H", data[2:4])[0] + for i in range(16): + self.button[i] = (keys & (1 << i)) >> i + self.lx = struct.unpack("f", data[4:8])[0] + self.rx = struct.unpack("f", data[8:12])[0] + self.ry = struct.unpack("f", data[12:16])[0] + self.ly = struct.unpack("f", data[20:24])[0] + + def __init__(self, config: UnitreeG1Config): + super().__init__(config) + + logger.info("Initialize UnitreeG1...") + + self.config = config + self.control_dt = config.control_dt + + # Initialize cameras config (ZMQ-based) - actual connection in connect() + self._cameras = make_cameras_from_configs(config.cameras) + + # Import channel classes based on mode + if config.is_simulation: + from unitree_sdk2py.core.channel import ( + ChannelFactoryInitialize, + ChannelPublisher, + ChannelSubscriber, + ) + else: + from lerobot.robots.unitree_g1.unitree_sdk2_socket import ( + ChannelFactoryInitialize, + ChannelPublisher, + ChannelSubscriber, + ) + + # Store for use in connect() + self._ChannelFactoryInitialize = ChannelFactoryInitialize + self._ChannelPublisher = ChannelPublisher + self._ChannelSubscriber = ChannelSubscriber + + # Initialize state variables + self.sim_env = None + self._env_wrapper = None + self._lowstate = None + self._shutdown_event = threading.Event() + self.subscribe_thread = None + self.remote_controller = self.RemoteController() + + def _subscribe_motor_state(self): # polls robot state @ 250Hz + while not self._shutdown_event.is_set(): + start_time = time.time() + + # Step simulation if in simulation mode + if self.config.is_simulation and self.sim_env is not None: + self.sim_env.step() + + msg = self.lowstate_subscriber.Read() + if msg is not None: + lowstate = G1_29_LowState() + + # Capture motor states using jointindex + for id in G1_29_JointIndex: + lowstate.motor_state[id].q = msg.motor_state[id].q + lowstate.motor_state[id].dq = msg.motor_state[id].dq + lowstate.motor_state[id].tau_est = msg.motor_state[id].tau_est + lowstate.motor_state[id].temperature = msg.motor_state[id].temperature + + # Capture IMU state + lowstate.imu_state.quaternion = list(msg.imu_state.quaternion) + lowstate.imu_state.gyroscope = list(msg.imu_state.gyroscope) + lowstate.imu_state.accelerometer = list(msg.imu_state.accelerometer) + lowstate.imu_state.rpy = list(msg.imu_state.rpy) + lowstate.imu_state.temperature = msg.imu_state.temperature + + # Capture wireless remote data + lowstate.wireless_remote = msg.wireless_remote + + # Capture mode_machine + lowstate.mode_machine = msg.mode_machine + + self._lowstate = lowstate + + current_time = time.time() + all_t_elapsed = current_time - start_time + sleep_time = max(0, (self.control_dt - all_t_elapsed)) # maintain constant control dt + time.sleep(sleep_time) + + @cached_property + def action_features(self) -> dict[str, type]: + return {f"{G1_29_JointIndex(motor).name}.q": float for motor in G1_29_JointIndex} + + def calibrate(self) -> None: # robot is already calibrated + pass + + def configure(self) -> None: + pass + + def connect(self, calibrate: bool = True) -> None: # connect to DDS + from unitree_sdk2py.idl.default import unitree_hg_msg_dds__LowCmd_ + from unitree_sdk2py.idl.unitree_hg.msg.dds_ import ( + LowCmd_ as hg_LowCmd, + LowState_ as hg_LowState, + ) + from unitree_sdk2py.utils.crc import CRC + + # Initialize DDS channel and simulation environment + if self.config.is_simulation: + self._ChannelFactoryInitialize(0, "lo") + self._env_wrapper = make_env("lerobot/unitree-g1-mujoco", trust_remote_code=True) + # Extract the actual gym env from the dict structure + self.sim_env = self._env_wrapper["hub_env"][0].envs[0] + else: + self._ChannelFactoryInitialize(0) + + # Initialize direct motor control interface + self.lowcmd_publisher = self._ChannelPublisher(kTopicLowCommand_Debug, hg_LowCmd) + self.lowcmd_publisher.Init() + self.lowstate_subscriber = self._ChannelSubscriber(kTopicLowState, hg_LowState) + self.lowstate_subscriber.Init() + + # Start subscribe thread to read robot state + self.subscribe_thread = threading.Thread(target=self._subscribe_motor_state) + self.subscribe_thread.start() + + # Connect cameras + for cam in self._cameras.values(): + if not cam.is_connected: + cam.connect() + + logger.info(f"Connected {len(self._cameras)} camera(s).") + + # Initialize lowcmd message + self.crc = CRC() + self.msg = unitree_hg_msg_dds__LowCmd_() + self.msg.mode_pr = 0 + + # Wait for first state message to arrive + lowstate = None + while lowstate is None: + lowstate = self._lowstate + if lowstate is None: + time.sleep(0.01) + logger.warning("[UnitreeG1] Waiting for robot state...") + logger.warning("[UnitreeG1] Connected to robot.") + self.msg.mode_machine = lowstate.mode_machine + + # Initialize all motors with unified kp/kd from config + self.kp = np.array(self.config.kp, dtype=np.float32) + self.kd = np.array(self.config.kd, dtype=np.float32) + + for id in G1_29_JointIndex: + self.msg.motor_cmd[id].mode = 1 + self.msg.motor_cmd[id].kp = self.kp[id.value] + self.msg.motor_cmd[id].kd = self.kd[id.value] + self.msg.motor_cmd[id].q = lowstate.motor_state[id.value].q + + def disconnect(self): + # Signal thread to stop and unblock any waits + self._shutdown_event.set() + + # Wait for subscribe thread to finish + if self.subscribe_thread is not None: + self.subscribe_thread.join(timeout=2.0) + if self.subscribe_thread.is_alive(): + logger.warning("Subscribe thread did not stop cleanly") + + # Close simulation environment + if self.config.is_simulation and self.sim_env is not None: + try: + # Force-kill the image publish subprocess first to avoid long waits + if hasattr(self.sim_env, "simulator") and hasattr(self.sim_env.simulator, "sim_env"): + sim_env_inner = self.sim_env.simulator.sim_env + if hasattr(sim_env_inner, "image_publish_process"): + proc = sim_env_inner.image_publish_process + if proc.process and proc.process.is_alive(): + logger.info("Force-terminating image publish subprocess...") + proc.stop_event.set() + proc.process.terminate() + proc.process.join(timeout=1) + if proc.process.is_alive(): + proc.process.kill() + self.sim_env.close() + except Exception as e: + logger.warning(f"Error closing sim_env: {e}") + self.sim_env = None + self._env_wrapper = None + + # Disconnect cameras + for cam in self._cameras.values(): + cam.disconnect() + + def get_observation(self) -> RobotObservation: + lowstate = self._lowstate + if lowstate is None: + return {} + + obs = {} + + # Motors - q, dq, tau for all joints + for motor in G1_29_JointIndex: + name = motor.name + idx = motor.value + obs[f"{name}.q"] = lowstate.motor_state[idx].q + obs[f"{name}.dq"] = lowstate.motor_state[idx].dq + obs[f"{name}.tau"] = lowstate.motor_state[idx].tau_est + + # IMU - gyroscope + if lowstate.imu_state.gyroscope: + obs["imu.gyro.x"] = lowstate.imu_state.gyroscope[0] + obs["imu.gyro.y"] = lowstate.imu_state.gyroscope[1] + obs["imu.gyro.z"] = lowstate.imu_state.gyroscope[2] + + # IMU - accelerometer + if lowstate.imu_state.accelerometer: + obs["imu.accel.x"] = lowstate.imu_state.accelerometer[0] + obs["imu.accel.y"] = lowstate.imu_state.accelerometer[1] + obs["imu.accel.z"] = lowstate.imu_state.accelerometer[2] + + # IMU - quaternion + if lowstate.imu_state.quaternion: + obs["imu.quat.w"] = lowstate.imu_state.quaternion[0] + obs["imu.quat.x"] = lowstate.imu_state.quaternion[1] + obs["imu.quat.y"] = lowstate.imu_state.quaternion[2] + obs["imu.quat.z"] = lowstate.imu_state.quaternion[3] + + # IMU - rpy + if lowstate.imu_state.rpy: + obs["imu.rpy.roll"] = lowstate.imu_state.rpy[0] + obs["imu.rpy.pitch"] = lowstate.imu_state.rpy[1] + obs["imu.rpy.yaw"] = lowstate.imu_state.rpy[2] + + # Controller - parse wireless_remote and add to obs + if lowstate.wireless_remote and len(lowstate.wireless_remote) >= 24: + self.remote_controller.set(lowstate.wireless_remote) + obs["remote.buttons"] = self.remote_controller.button.copy() + obs["remote.lx"] = self.remote_controller.lx + obs["remote.ly"] = self.remote_controller.ly + obs["remote.rx"] = self.remote_controller.rx + obs["remote.ry"] = self.remote_controller.ry + + # Cameras - read images from ZMQ cameras + for cam_name, cam in self._cameras.items(): + obs[cam_name] = cam.async_read() + + return obs + + @property + def is_calibrated(self) -> bool: + return True + + @property + def is_connected(self) -> bool: + return self._lowstate is not None + + @property + def _motors_ft(self) -> dict[str, type]: + return {f"{G1_29_JointIndex(motor).name}.q": float for motor in G1_29_JointIndex} + + @property + def cameras(self) -> dict: + return self._cameras + + @property + def _cameras_ft(self) -> dict[str, tuple]: + return { + cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras + } + + @cached_property + def observation_features(self) -> dict[str, type | tuple]: + return {**self._motors_ft, **self._cameras_ft} + + def send_action(self, action: RobotAction) -> RobotAction: + for motor in G1_29_JointIndex: + key = f"{motor.name}.q" + if key in action: + self.msg.motor_cmd[motor.value].q = action[key] + self.msg.motor_cmd[motor.value].qd = 0 + self.msg.motor_cmd[motor.value].kp = self.kp[motor.value] + self.msg.motor_cmd[motor.value].kd = self.kd[motor.value] + self.msg.motor_cmd[motor.value].tau = 0 + + self.msg.crc = self.crc.Crc(self.msg) + self.lowcmd_publisher.Write(self.msg) + return action + + def get_gravity_orientation(self, quaternion): # get gravity orientation from quaternion + """Get gravity orientation from quaternion.""" + qw = quaternion[0] + qx = quaternion[1] + qy = quaternion[2] + qz = quaternion[3] + + gravity_orientation = np.zeros(3) + gravity_orientation[0] = 2 * (-qz * qx + qw * qy) + gravity_orientation[1] = -2 * (qz * qy + qw * qx) + gravity_orientation[2] = 1 - 2 * (qw * qw + qz * qz) + return gravity_orientation + + def reset( + self, + control_dt: float | None = None, + default_positions: list[float] | None = None, + ) -> None: # move robot to default position + if control_dt is None: + control_dt = self.config.control_dt + if default_positions is None: + default_positions = np.array(self.config.default_positions, dtype=np.float32) + + if self.config.is_simulation and self.sim_env is not None: + self.sim_env.reset() + + for motor in G1_29_JointIndex: + self.msg.motor_cmd[motor.value].q = default_positions[motor.value] + self.msg.motor_cmd[motor.value].qd = 0 + self.msg.motor_cmd[motor.value].kp = self.kp[motor.value] + self.msg.motor_cmd[motor.value].kd = self.kd[motor.value] + self.msg.motor_cmd[motor.value].tau = 0 + self.msg.crc = self.crc.Crc(self.msg) + self.lowcmd_publisher.Write(self.msg) + else: + total_time = 3.0 + num_steps = int(total_time / control_dt) + + # get current state + obs = self.get_observation() + + # record current positions + init_dof_pos = np.zeros(29, dtype=np.float32) + for motor in G1_29_JointIndex: + init_dof_pos[motor.value] = obs[f"{motor.name}.q"] + + # Interpolate to default position + for step in range(num_steps): + start_time = time.time() + + alpha = step / num_steps + action_dict = {} + for motor in G1_29_JointIndex: + target_pos = default_positions[motor.value] + interp_pos = init_dof_pos[motor.value] * (1 - alpha) + target_pos * alpha + action_dict[f"{motor.name}.q"] = float(interp_pos) + + self.send_action(action_dict) + + # Maintain constant control rate + elapsed = time.time() - start_time + sleep_time = max(0, control_dt - elapsed) + time.sleep(sleep_time) + + logger.info("Reached default position") diff --git a/lerobot/src/lerobot/robots/unitree_g1/unitree_sdk2_socket.py b/lerobot/src/lerobot/robots/unitree_g1/unitree_sdk2_socket.py new file mode 100644 index 0000000000000000000000000000000000000000..ede193dfb0a696830bc875ee0824c818700ca36a --- /dev/null +++ b/lerobot/src/lerobot/robots/unitree_g1/unitree_sdk2_socket.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import json +from typing import Any + +import zmq + +from lerobot.robots.unitree_g1.config_unitree_g1 import UnitreeG1Config + +_ctx: zmq.Context | None = None +_lowcmd_sock: zmq.Socket | None = None +_lowstate_sock: zmq.Socket | None = None + +LOWCMD_PORT = 6000 +LOWSTATE_PORT = 6001 + +# DDS topic names follow Unitree SDK naming conventions +# ruff: noqa: N816 +kTopicLowCommand_Debug = "rt/lowcmd" + + +class LowStateMsg: + """ + Wrapper class that mimics the Unitree SDK LowState_ message structure. + + Reconstructs the message from deserialized JSON data to maintain + compatibility with existing code that expects SDK message objects. + """ + + class MotorState: + """Motor state data for a single joint.""" + + def __init__(self, data: dict[str, Any]) -> None: + self.q: float = data.get("q", 0.0) + self.dq: float = data.get("dq", 0.0) + self.tau_est: float = data.get("tau_est", 0.0) + self.temperature: float = data.get("temperature", 0.0) + + class IMUState: + """IMU sensor data.""" + + def __init__(self, data: dict[str, Any]) -> None: + self.quaternion: list[float] = data.get("quaternion", [1.0, 0.0, 0.0, 0.0]) + self.gyroscope: list[float] = data.get("gyroscope", [0.0, 0.0, 0.0]) + self.accelerometer: list[float] = data.get("accelerometer", [0.0, 0.0, 0.0]) + self.rpy: list[float] = data.get("rpy", [0.0, 0.0, 0.0]) + self.temperature: float = data.get("temperature", 0.0) + + def __init__(self, data: dict[str, Any]) -> None: + """Initialize from deserialized JSON data.""" + self.motor_state = [self.MotorState(m) for m in data.get("motor_state", [])] + self.imu_state = self.IMUState(data.get("imu_state", {})) + # Decode base64-encoded wireless_remote bytes + wireless_b64 = data.get("wireless_remote", "") + self.wireless_remote: bytes = base64.b64decode(wireless_b64) if wireless_b64 else b"" + self.mode_machine: int = data.get("mode_machine", 0) + + +def lowcmd_to_dict(topic: str, msg: Any) -> dict[str, Any]: + """Convert LowCmd message to a JSON-serializable dictionary.""" + motor_cmds = [] + # Iterate over all motor commands in the message + for i in range(len(msg.motor_cmd)): + motor_cmds.append( + { + "mode": int(msg.motor_cmd[i].mode), + "q": float(msg.motor_cmd[i].q), + "dq": float(msg.motor_cmd[i].dq), + "kp": float(msg.motor_cmd[i].kp), + "kd": float(msg.motor_cmd[i].kd), + "tau": float(msg.motor_cmd[i].tau), + } + ) + + return { + "topic": topic, + "data": { + "mode_pr": int(msg.mode_pr), + "mode_machine": int(msg.mode_machine), + "motor_cmd": motor_cmds, + }, + } + + +def ChannelFactoryInitialize(*args: Any, **kwargs: Any) -> None: # noqa: N802 + """ + Initialize ZMQ sockets for robot communication. + + This function mimics the Unitree SDK's ChannelFactoryInitialize but uses + ZMQ sockets to connect to the robot server bridge instead of DDS. + """ + global _ctx, _lowcmd_sock, _lowstate_sock + + # read socket config + config = UnitreeG1Config() + robot_ip = config.robot_ip + + ctx = zmq.Context.instance() + _ctx = ctx + + # lowcmd: send robot commands + lowcmd_sock = ctx.socket(zmq.PUSH) + lowcmd_sock.setsockopt(zmq.CONFLATE, 1) # keep only last message + lowcmd_sock.connect(f"tcp://{robot_ip}:{LOWCMD_PORT}") + _lowcmd_sock = lowcmd_sock + + # lowstate: receive robot observations + lowstate_sock = ctx.socket(zmq.SUB) + lowstate_sock.setsockopt(zmq.CONFLATE, 1) # keep only last message + lowstate_sock.connect(f"tcp://{robot_ip}:{LOWSTATE_PORT}") + lowstate_sock.setsockopt_string(zmq.SUBSCRIBE, "") + _lowstate_sock = lowstate_sock + + +class ChannelPublisher: + """ZMQ-based publisher that sends commands to the robot server.""" + + def __init__(self, topic: str, msg_type: type) -> None: + self.topic = topic + self.msg_type = msg_type + + def Init(self) -> None: # noqa: N802 + """Initialize the publisher (no-op for ZMQ).""" + pass + + def Write(self, msg: Any) -> None: # noqa: N802 + """Serialize and send a command message to the robot.""" + if _lowcmd_sock is None: + raise RuntimeError("ChannelFactoryInitialize must be called first") + + payload = json.dumps(lowcmd_to_dict(self.topic, msg)).encode("utf-8") + _lowcmd_sock.send(payload) + + +class ChannelSubscriber: + """ZMQ-based subscriber that receives state from the robot server.""" + + def __init__(self, topic: str, msg_type: type) -> None: + self.topic = topic + self.msg_type = msg_type + + def Init(self) -> None: # noqa: N802 + """Initialize the subscriber (no-op for ZMQ).""" + pass + + def Read(self) -> LowStateMsg: # noqa: N802 + """Receive and deserialize a state message from the robot.""" + if _lowstate_sock is None: + raise RuntimeError("ChannelFactoryInitialize must be called first") + + payload = _lowstate_sock.recv() + msg_dict = json.loads(payload.decode("utf-8")) + return LowStateMsg(msg_dict.get("data", {})) diff --git a/lerobot/src/lerobot/teleoperators/__init__.py b/lerobot/src/lerobot/teleoperators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..12f533c34973614f548d432a75a600047f5b42c0 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/__init__.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import TeleoperatorConfig +from .teleoperator import Teleoperator +from .utils import TeleopEvents, make_teleoperator_from_config diff --git a/lerobot/src/lerobot/teleoperators/bi_so_leader/__init__.py b/lerobot/src/lerobot/teleoperators/bi_so_leader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..79bf42811ae859d52c620b0047771c8dfc648fa6 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/bi_so_leader/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .bi_so_leader import BiSOLeader, BiSOLeaderConfig diff --git a/lerobot/src/lerobot/teleoperators/bi_so_leader/bi_so_leader.py b/lerobot/src/lerobot/teleoperators/bi_so_leader/bi_so_leader.py new file mode 100644 index 0000000000000000000000000000000000000000..66d6c2ca536618681632ef7b56eee9b110b7cb45 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/bi_so_leader/bi_so_leader.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from functools import cached_property + +from lerobot.teleoperators.so_leader import SOLeaderTeleopConfig +from lerobot.utils.decorators import check_if_not_connected + +from ..so_leader import SOLeader +from ..teleoperator import Teleoperator +from .config_bi_so_leader import BiSOLeaderConfig + +logger = logging.getLogger(__name__) + + +class BiSOLeader(Teleoperator): + """ + [Bimanual SO Leader Arms](https://github.com/TheRobotStudio/SO-ARM100) designed by TheRobotStudio + """ + + config_class = BiSOLeaderConfig + name = "bi_so_leader" + + def __init__(self, config: BiSOLeaderConfig): + super().__init__(config) + self.config = config + + left_arm_config = SOLeaderTeleopConfig( + id=f"{config.id}_left" if config.id else None, + calibration_dir=config.calibration_dir, + port=config.left_arm_config.port, + ) + + right_arm_config = SOLeaderTeleopConfig( + id=f"{config.id}_right" if config.id else None, + calibration_dir=config.calibration_dir, + port=config.right_arm_config.port, + ) + + self.left_arm = SOLeader(left_arm_config) + self.right_arm = SOLeader(right_arm_config) + + @cached_property + def action_features(self) -> dict[str, type]: + left_arm_features = self.left_arm.action_features + right_arm_features = self.right_arm.action_features + + return { + **{f"left_{k}": v for k, v in left_arm_features.items()}, + **{f"right_{k}": v for k, v in right_arm_features.items()}, + } + + @cached_property + def feedback_features(self) -> dict[str, type]: + return {} + + @property + def is_connected(self) -> bool: + return self.left_arm.is_connected and self.right_arm.is_connected + + def connect(self, calibrate: bool = True) -> None: + self.left_arm.connect(calibrate) + self.right_arm.connect(calibrate) + + @property + def is_calibrated(self) -> bool: + return self.left_arm.is_calibrated and self.right_arm.is_calibrated + + def calibrate(self) -> None: + self.left_arm.calibrate() + self.right_arm.calibrate() + + def configure(self) -> None: + self.left_arm.configure() + self.right_arm.configure() + + def setup_motors(self) -> None: + self.left_arm.setup_motors() + self.right_arm.setup_motors() + + @check_if_not_connected + def get_action(self) -> dict[str, float]: + action_dict = {} + + # Add "left_" prefix + left_action = self.left_arm.get_action() + action_dict.update({f"left_{key}": value for key, value in left_action.items()}) + + # Add "right_" prefix + right_action = self.right_arm.get_action() + action_dict.update({f"right_{key}": value for key, value in right_action.items()}) + + return action_dict + + def send_feedback(self, feedback: dict[str, float]) -> None: + # TODO: Implement force feedback + raise NotImplementedError + + def disconnect(self) -> None: + self.left_arm.disconnect() + self.right_arm.disconnect() diff --git a/lerobot/src/lerobot/teleoperators/bi_so_leader/config_bi_so_leader.py b/lerobot/src/lerobot/teleoperators/bi_so_leader/config_bi_so_leader.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc67dce720cbecdd59dedda714949b8e0f91fc5 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/bi_so_leader/config_bi_so_leader.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from lerobot.teleoperators.so_leader import SOLeaderConfig + +from ..config import TeleoperatorConfig + + +@TeleoperatorConfig.register_subclass("bi_so_leader") +@dataclass +class BiSOLeaderConfig(TeleoperatorConfig): + """Configuration class for Bi SO Leader teleoperators.""" + + left_arm_config: SOLeaderConfig + right_arm_config: SOLeaderConfig diff --git a/lerobot/src/lerobot/teleoperators/gamepad/__init__.py b/lerobot/src/lerobot/teleoperators/gamepad/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1cd2ef3a167f70fac6edb6a178958480bf67d2f4 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/gamepad/__init__.py @@ -0,0 +1,18 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .configuration_gamepad import GamepadTeleopConfig +from .teleop_gamepad import GamepadTeleop diff --git a/lerobot/src/lerobot/teleoperators/gamepad/configuration_gamepad.py b/lerobot/src/lerobot/teleoperators/gamepad/configuration_gamepad.py new file mode 100644 index 0000000000000000000000000000000000000000..c89b45c9755969343722fb98ad8b0113e8e73686 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/gamepad/configuration_gamepad.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..config import TeleoperatorConfig + + +@TeleoperatorConfig.register_subclass("gamepad") +@dataclass +class GamepadTeleopConfig(TeleoperatorConfig): + use_gripper: bool = True diff --git a/lerobot/src/lerobot/teleoperators/gamepad/gamepad_utils.py b/lerobot/src/lerobot/teleoperators/gamepad/gamepad_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3bb8d03818d2cf086640e29ae9c0c8c631fc80ba --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/gamepad/gamepad_utils.py @@ -0,0 +1,460 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from ..utils import TeleopEvents + + +class InputController: + """Base class for input controllers that generate motion deltas.""" + + def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0): + """ + Initialize the controller. + + Args: + x_step_size: Base movement step size in meters + y_step_size: Base movement step size in meters + z_step_size: Base movement step size in meters + """ + self.x_step_size = x_step_size + self.y_step_size = y_step_size + self.z_step_size = z_step_size + self.running = True + self.episode_end_status = None # None, "success", or "failure" + self.intervention_flag = False + self.open_gripper_command = False + self.close_gripper_command = False + + def start(self): + """Start the controller and initialize resources.""" + pass + + def stop(self): + """Stop the controller and release resources.""" + pass + + def get_deltas(self): + """Get the current movement deltas (dx, dy, dz) in meters.""" + return 0.0, 0.0, 0.0 + + def update(self): + """Update controller state - call this once per frame.""" + pass + + def __enter__(self): + """Support for use in 'with' statements.""" + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Ensure resources are released when exiting 'with' block.""" + self.stop() + + def get_episode_end_status(self): + """ + Get the current episode end status. + + Returns: + None if episode should continue, "success" or "failure" otherwise + """ + status = self.episode_end_status + self.episode_end_status = None # Reset after reading + return status + + def should_intervene(self): + """Return True if intervention flag was set.""" + return self.intervention_flag + + def gripper_command(self): + """Return the current gripper command.""" + if self.open_gripper_command == self.close_gripper_command: + return "stay" + elif self.open_gripper_command: + return "open" + elif self.close_gripper_command: + return "close" + + +class KeyboardController(InputController): + """Generate motion deltas from keyboard input.""" + + def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0): + super().__init__(x_step_size, y_step_size, z_step_size) + self.key_states = { + "forward_x": False, + "backward_x": False, + "forward_y": False, + "backward_y": False, + "forward_z": False, + "backward_z": False, + "quit": False, + "success": False, + "failure": False, + } + self.listener = None + + def start(self): + """Start the keyboard listener.""" + from pynput import keyboard + + def on_press(key): + try: + if key == keyboard.Key.up: + self.key_states["forward_x"] = True + elif key == keyboard.Key.down: + self.key_states["backward_x"] = True + elif key == keyboard.Key.left: + self.key_states["forward_y"] = True + elif key == keyboard.Key.right: + self.key_states["backward_y"] = True + elif key == keyboard.Key.shift: + self.key_states["backward_z"] = True + elif key == keyboard.Key.shift_r: + self.key_states["forward_z"] = True + elif key == keyboard.Key.esc: + self.key_states["quit"] = True + self.running = False + return False + elif key == keyboard.Key.enter: + self.key_states["success"] = True + self.episode_end_status = TeleopEvents.SUCCESS + elif key == keyboard.Key.backspace: + self.key_states["failure"] = True + self.episode_end_status = TeleopEvents.FAILURE + except AttributeError: + pass + + def on_release(key): + try: + if key == keyboard.Key.up: + self.key_states["forward_x"] = False + elif key == keyboard.Key.down: + self.key_states["backward_x"] = False + elif key == keyboard.Key.left: + self.key_states["forward_y"] = False + elif key == keyboard.Key.right: + self.key_states["backward_y"] = False + elif key == keyboard.Key.shift: + self.key_states["backward_z"] = False + elif key == keyboard.Key.shift_r: + self.key_states["forward_z"] = False + elif key == keyboard.Key.enter: + self.key_states["success"] = False + elif key == keyboard.Key.backspace: + self.key_states["failure"] = False + except AttributeError: + pass + + self.listener = keyboard.Listener(on_press=on_press, on_release=on_release) + self.listener.start() + + print("Keyboard controls:") + print(" Arrow keys: Move in X-Y plane") + print(" Shift and Shift_R: Move in Z axis") + print(" Enter: End episode with SUCCESS") + print(" Backspace: End episode with FAILURE") + print(" ESC: Exit") + + def stop(self): + """Stop the keyboard listener.""" + if self.listener and self.listener.is_alive(): + self.listener.stop() + + def get_deltas(self): + """Get the current movement deltas from keyboard state.""" + delta_x = delta_y = delta_z = 0.0 + + if self.key_states["forward_x"]: + delta_x += self.x_step_size + if self.key_states["backward_x"]: + delta_x -= self.x_step_size + if self.key_states["forward_y"]: + delta_y += self.y_step_size + if self.key_states["backward_y"]: + delta_y -= self.y_step_size + if self.key_states["forward_z"]: + delta_z += self.z_step_size + if self.key_states["backward_z"]: + delta_z -= self.z_step_size + + return delta_x, delta_y, delta_z + + +class GamepadController(InputController): + """Generate motion deltas from gamepad input.""" + + def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0, deadzone=0.1): + super().__init__(x_step_size, y_step_size, z_step_size) + self.deadzone = deadzone + self.joystick = None + self.intervention_flag = False + + def start(self): + """Initialize pygame and the gamepad.""" + import pygame + + pygame.init() + pygame.joystick.init() + + if pygame.joystick.get_count() == 0: + logging.error("No gamepad detected. Please connect a gamepad and try again.") + self.running = False + return + + self.joystick = pygame.joystick.Joystick(0) + self.joystick.init() + logging.info(f"Initialized gamepad: {self.joystick.get_name()}") + + print("Gamepad controls:") + print(" Left analog stick: Move in X-Y plane") + print(" Right analog stick (vertical): Move in Z axis") + print(" B/Circle button: Exit") + print(" Y/Triangle button: End episode with SUCCESS") + print(" A/Cross button: End episode with FAILURE") + print(" X/Square button: Rerecord episode") + + def stop(self): + """Clean up pygame resources.""" + import pygame + + if pygame.joystick.get_init(): + if self.joystick: + self.joystick.quit() + pygame.joystick.quit() + pygame.quit() + + def update(self): + """Process pygame events to get fresh gamepad readings.""" + import pygame + + for event in pygame.event.get(): + if event.type == pygame.JOYBUTTONDOWN: + if event.button == 3: + self.episode_end_status = TeleopEvents.SUCCESS + # A button (1) for failure + elif event.button == 1: + self.episode_end_status = TeleopEvents.FAILURE + # X button (0) for rerecord + elif event.button == 0: + self.episode_end_status = TeleopEvents.RERECORD_EPISODE + + # RB button (6) for closing gripper + elif event.button == 6: + self.close_gripper_command = True + + # LT button (7) for opening gripper + elif event.button == 7: + self.open_gripper_command = True + + # Reset episode status on button release + elif event.type == pygame.JOYBUTTONUP: + if event.button in [0, 2, 3]: + self.episode_end_status = None + + elif event.button == 6: + self.close_gripper_command = False + + elif event.button == 7: + self.open_gripper_command = False + + # Check for RB button (typically button 5) for intervention flag + if self.joystick.get_button(5): + self.intervention_flag = True + else: + self.intervention_flag = False + + def get_deltas(self): + """Get the current movement deltas from gamepad state.""" + import pygame + + try: + # Read joystick axes + # Left stick X and Y (typically axes 0 and 1) + y_input = self.joystick.get_axis(0) # Up/Down (often inverted) + x_input = self.joystick.get_axis(1) # Left/Right + + # Right stick Y (typically axis 3 or 4) + z_input = self.joystick.get_axis(3) # Up/Down for Z + + # Apply deadzone to avoid drift + x_input = 0 if abs(x_input) < self.deadzone else x_input + y_input = 0 if abs(y_input) < self.deadzone else y_input + z_input = 0 if abs(z_input) < self.deadzone else z_input + + # Calculate deltas (note: may need to invert axes depending on controller) + delta_x = -x_input * self.x_step_size # Forward/backward + delta_y = -y_input * self.y_step_size # Left/right + delta_z = -z_input * self.z_step_size # Up/down + + return delta_x, delta_y, delta_z + + except pygame.error: + logging.error("Error reading gamepad. Is it still connected?") + return 0.0, 0.0, 0.0 + + +class GamepadControllerHID(InputController): + """Generate motion deltas from gamepad input using HIDAPI.""" + + def __init__( + self, + x_step_size=1.0, + y_step_size=1.0, + z_step_size=1.0, + deadzone=0.1, + ): + """ + Initialize the HID gamepad controller. + + Args: + step_size: Base movement step size in meters + z_scale: Scaling factor for Z-axis movement + deadzone: Joystick deadzone to prevent drift + """ + super().__init__(x_step_size, y_step_size, z_step_size) + self.deadzone = deadzone + self.device = None + self.device_info = None + + # Movement values (normalized from -1.0 to 1.0) + self.left_x = 0.0 + self.left_y = 0.0 + self.right_x = 0.0 + self.right_y = 0.0 + + # Button states + self.buttons = {} + + def find_device(self): + """Look for the gamepad device by vendor and product ID.""" + import hid + + devices = hid.enumerate() + for device in devices: + device_name = device["product_string"] + if any(controller in device_name for controller in ["Logitech", "Xbox", "PS4", "PS5"]): + return device + + logging.error( + "No gamepad found, check the connection and the product string in HID to add your gamepad" + ) + return None + + def start(self): + """Connect to the gamepad using HIDAPI.""" + import hid + + self.device_info = self.find_device() + if not self.device_info: + self.running = False + return + + try: + logging.info(f"Connecting to gamepad at path: {self.device_info['path']}") + self.device = hid.device() + self.device.open_path(self.device_info["path"]) + self.device.set_nonblocking(1) + + manufacturer = self.device.get_manufacturer_string() + product = self.device.get_product_string() + logging.info(f"Connected to {manufacturer} {product}") + + logging.info("Gamepad controls (HID mode):") + logging.info(" Left analog stick: Move in X-Y plane") + logging.info(" Right analog stick: Move in Z axis (vertical)") + logging.info(" Button 1/B/Circle: Exit") + logging.info(" Button 2/A/Cross: End episode with SUCCESS") + logging.info(" Button 3/X/Square: End episode with FAILURE") + + except OSError as e: + logging.error(f"Error opening gamepad: {e}") + logging.error("You might need to run this with sudo/admin privileges on some systems") + self.running = False + + def stop(self): + """Close the HID device connection.""" + if self.device: + self.device.close() + self.device = None + + def update(self): + """ + Read and process the latest gamepad data. + Due to an issue with the HIDAPI, we need to read the read the device several times in order to get a stable reading + """ + for _ in range(10): + self._update() + + def _update(self): + """Read and process the latest gamepad data.""" + if not self.device or not self.running: + return + + try: + # Read data from the gamepad + data = self.device.read(64) + # Interpret gamepad data - this will vary by controller model + # These offsets are for the Logitech RumblePad 2 + if data and len(data) >= 8: + # Normalize joystick values from 0-255 to -1.0-1.0 + self.left_y = (data[1] - 128) / 128.0 + self.left_x = (data[2] - 128) / 128.0 + self.right_x = (data[3] - 128) / 128.0 + self.right_y = (data[4] - 128) / 128.0 + + # Apply deadzone + self.left_y = 0 if abs(self.left_y) < self.deadzone else self.left_y + self.left_x = 0 if abs(self.left_x) < self.deadzone else self.left_x + self.right_x = 0 if abs(self.right_x) < self.deadzone else self.right_x + self.right_y = 0 if abs(self.right_y) < self.deadzone else self.right_y + + # Parse button states (byte 5 in the Logitech RumblePad 2) + buttons = data[5] + + # Check if RB is pressed then the intervention flag should be set + self.intervention_flag = data[6] in [2, 6, 10, 14] + + # Check if RT is pressed + self.open_gripper_command = data[6] in [8, 10, 12] + + # Check if LT is pressed + self.close_gripper_command = data[6] in [4, 6, 12] + + # Check if Y/Triangle button (bit 7) is pressed for saving + # Check if X/Square button (bit 5) is pressed for failure + # Check if A/Cross button (bit 4) is pressed for rerecording + if buttons & 1 << 7: + self.episode_end_status = TeleopEvents.SUCCESS + elif buttons & 1 << 5: + self.episode_end_status = TeleopEvents.FAILURE + elif buttons & 1 << 4: + self.episode_end_status = TeleopEvents.RERECORD_EPISODE + else: + self.episode_end_status = None + + except OSError as e: + logging.error(f"Error reading from gamepad: {e}") + + def get_deltas(self): + """Get the current movement deltas from gamepad state.""" + # Calculate deltas - invert as needed based on controller orientation + delta_x = -self.left_x * self.x_step_size # Forward/backward + delta_y = -self.left_y * self.y_step_size # Left/right + delta_z = -self.right_y * self.z_step_size # Up/down + + return delta_x, delta_y, delta_z diff --git a/lerobot/src/lerobot/teleoperators/gamepad/teleop_gamepad.py b/lerobot/src/lerobot/teleoperators/gamepad/teleop_gamepad.py new file mode 100644 index 0000000000000000000000000000000000000000..c4d81e97e056d0856bee29fe2921fcf609b3f749 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/gamepad/teleop_gamepad.py @@ -0,0 +1,186 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from enum import IntEnum +from typing import Any + +import numpy as np + +from lerobot.processor import RobotAction +from lerobot.utils.decorators import check_if_not_connected + +from ..teleoperator import Teleoperator +from ..utils import TeleopEvents +from .configuration_gamepad import GamepadTeleopConfig + + +class GripperAction(IntEnum): + CLOSE = 0 + STAY = 1 + OPEN = 2 + + +gripper_action_map = { + "close": GripperAction.CLOSE.value, + "open": GripperAction.OPEN.value, + "stay": GripperAction.STAY.value, +} + + +class GamepadTeleop(Teleoperator): + """ + Teleop class to use gamepad inputs for control. + """ + + config_class = GamepadTeleopConfig + name = "gamepad" + + def __init__(self, config: GamepadTeleopConfig): + super().__init__(config) + self.config = config + self.robot_type = config.type + + self.gamepad = None + + @property + def action_features(self) -> dict: + if self.config.use_gripper: + return { + "dtype": "float32", + "shape": (4,), + "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3}, + } + else: + return { + "dtype": "float32", + "shape": (3,), + "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2}, + } + + @property + def feedback_features(self) -> dict: + return {} + + def connect(self) -> None: + # use HidApi for macos + if sys.platform == "darwin": + # NOTE: On macOS, pygame doesn’t reliably detect input from some controllers so we fall back to hidapi + from .gamepad_utils import GamepadControllerHID as Gamepad + else: + from .gamepad_utils import GamepadController as Gamepad + + self.gamepad = Gamepad() + self.gamepad.start() + + @check_if_not_connected + def get_action(self) -> RobotAction: + # Update the controller to get fresh inputs + self.gamepad.update() + + # Get movement deltas from the controller + delta_x, delta_y, delta_z = self.gamepad.get_deltas() + + # Create action from gamepad input + gamepad_action = np.array([delta_x, delta_y, delta_z], dtype=np.float32) + + action_dict = { + "delta_x": gamepad_action[0], + "delta_y": gamepad_action[1], + "delta_z": gamepad_action[2], + } + + # Default gripper action is to stay + gripper_action = GripperAction.STAY.value + if self.config.use_gripper: + gripper_command = self.gamepad.gripper_command() + gripper_action = gripper_action_map[gripper_command] + action_dict["gripper"] = gripper_action + + return action_dict + + def get_teleop_events(self) -> dict[str, Any]: + """ + Get extra control events from the gamepad such as intervention status, + episode termination, success indicators, etc. + + Returns: + Dictionary containing: + - is_intervention: bool - Whether human is currently intervening + - terminate_episode: bool - Whether to terminate the current episode + - success: bool - Whether the episode was successful + - rerecord_episode: bool - Whether to rerecord the episode + """ + if self.gamepad is None: + return { + TeleopEvents.IS_INTERVENTION: False, + TeleopEvents.TERMINATE_EPISODE: False, + TeleopEvents.SUCCESS: False, + TeleopEvents.RERECORD_EPISODE: False, + } + + # Update gamepad state to get fresh inputs + self.gamepad.update() + + # Check if intervention is active + is_intervention = self.gamepad.should_intervene() + + # Get episode end status + episode_end_status = self.gamepad.get_episode_end_status() + terminate_episode = episode_end_status in [ + TeleopEvents.RERECORD_EPISODE, + TeleopEvents.FAILURE, + ] + success = episode_end_status == TeleopEvents.SUCCESS + rerecord_episode = episode_end_status == TeleopEvents.RERECORD_EPISODE + + return { + TeleopEvents.IS_INTERVENTION: is_intervention, + TeleopEvents.TERMINATE_EPISODE: terminate_episode, + TeleopEvents.SUCCESS: success, + TeleopEvents.RERECORD_EPISODE: rerecord_episode, + } + + def disconnect(self) -> None: + """Disconnect from the gamepad.""" + if self.gamepad is not None: + self.gamepad.stop() + self.gamepad = None + + @property + def is_connected(self) -> bool: + """Check if gamepad is connected.""" + return self.gamepad is not None + + def calibrate(self) -> None: + """Calibrate the gamepad.""" + # No calibration needed for gamepad + pass + + def is_calibrated(self) -> bool: + """Check if gamepad is calibrated.""" + # Gamepad doesn't require calibration + return True + + def configure(self) -> None: + """Configure the gamepad.""" + # No additional configuration needed + pass + + def send_feedback(self, feedback: dict) -> None: + """Send feedback to the gamepad.""" + # Gamepad doesn't support feedback + pass diff --git a/lerobot/src/lerobot/teleoperators/homunculus/__init__.py b/lerobot/src/lerobot/teleoperators/homunculus/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ba020db84d453629cca474038cc80dc367c9e5 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/homunculus/__init__.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config_homunculus import HomunculusArmConfig, HomunculusGloveConfig +from .homunculus_arm import HomunculusArm +from .homunculus_glove import HomunculusGlove +from .joints_translation import homunculus_glove_to_hope_jr_hand diff --git a/lerobot/src/lerobot/teleoperators/homunculus/config_homunculus.py b/lerobot/src/lerobot/teleoperators/homunculus/config_homunculus.py new file mode 100644 index 0000000000000000000000000000000000000000..a346803a56a40e58f58349692ed6ccaa56b398f9 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/homunculus/config_homunculus.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..config import TeleoperatorConfig + + +@TeleoperatorConfig.register_subclass("homunculus_glove") +@dataclass +class HomunculusGloveConfig(TeleoperatorConfig): + port: str # Port to connect to the glove + side: str # "left" / "right" + baud_rate: int = 115_200 + + def __post_init__(self): + if self.side not in ["right", "left"]: + raise ValueError(self.side) + + +@TeleoperatorConfig.register_subclass("homunculus_arm") +@dataclass +class HomunculusArmConfig(TeleoperatorConfig): + port: str # Port to connect to the arm + baud_rate: int = 115_200 diff --git a/lerobot/src/lerobot/teleoperators/homunculus/homunculus_arm.py b/lerobot/src/lerobot/teleoperators/homunculus/homunculus_arm.py new file mode 100644 index 0000000000000000000000000000000000000000..8945522ac9a682e41a6153594f1ab6557839ed06 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/homunculus/homunculus_arm.py @@ -0,0 +1,313 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import threading +from collections import deque +from pprint import pformat + +import serial + +from lerobot.motors.motors_bus import MotorCalibration, MotorNormMode +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected +from lerobot.utils.utils import enter_pressed, move_cursor_up + +from ..teleoperator import Teleoperator +from .config_homunculus import HomunculusArmConfig + +logger = logging.getLogger(__name__) + + +class HomunculusArm(Teleoperator): + """ + Homunculus Arm designed by Hugging Face. + """ + + config_class = HomunculusArmConfig + name = "homunculus_arm" + + def __init__(self, config: HomunculusArmConfig): + super().__init__(config) + self.config = config + self.serial = serial.Serial(config.port, config.baud_rate, timeout=1) + self.serial_lock = threading.Lock() + + self.joints = { + "shoulder_pitch": MotorNormMode.RANGE_M100_100, + "shoulder_yaw": MotorNormMode.RANGE_M100_100, + "shoulder_roll": MotorNormMode.RANGE_M100_100, + "elbow_flex": MotorNormMode.RANGE_M100_100, + "wrist_roll": MotorNormMode.RANGE_M100_100, + "wrist_yaw": MotorNormMode.RANGE_M100_100, + "wrist_pitch": MotorNormMode.RANGE_M100_100, + } + n = 50 + # EMA parameters --------------------------------------------------- + self.n: int = n + self.alpha: float = 2 / (n + 1) + # one deque *per joint* so we can inspect raw history if needed + self._buffers: dict[str, deque[int]] = { + joint: deque(maxlen=n) + for joint in ( + "shoulder_pitch", + "shoulder_yaw", + "shoulder_roll", + "elbow_flex", + "wrist_roll", + "wrist_yaw", + "wrist_pitch", + ) + } + # running EMA value per joint – lazily initialised on first read + self._ema: dict[str, float | None] = dict.fromkeys(self._buffers) + + self._state: dict[str, float] | None = None + self.new_state_event = threading.Event() + self.stop_event = threading.Event() + self.thread = threading.Thread(target=self._read_loop, daemon=True, name=f"{self} _read_loop") + self.state_lock = threading.Lock() + + @property + def action_features(self) -> dict: + return {f"{joint}.pos": float for joint in self.joints} + + @property + def feedback_features(self) -> dict: + return {} + + @property + def is_connected(self) -> bool: + with self.serial_lock: + return self.serial.is_open and self.thread.is_alive() + + @check_if_already_connected + def connect(self, calibrate: bool = True) -> None: + if not self.serial.is_open: + self.serial.open() + self.thread.start() + + # wait for the thread to ramp up & 1st state to be ready + if not self.new_state_event.wait(timeout=2): + raise TimeoutError(f"{self}: Timed out waiting for state after 2s.") + + if not self.is_calibrated and calibrate: + self.calibrate() + + logger.info(f"{self} connected.") + + @property + def is_calibrated(self) -> bool: + return self.calibration_fpath.is_file() + + def calibrate(self) -> None: + print( + "\nMove all joints through their entire range of motion." + "\nRecording positions. Press ENTER to stop..." + ) + range_mins, range_maxes = self._record_ranges_of_motion() + + self.calibration = {} + for id_, joint in enumerate(self.joints): + self.calibration[joint] = MotorCalibration( + id=id_, + drive_mode=0, + homing_offset=0, + range_min=range_mins[joint], + range_max=range_maxes[joint], + ) + + self._save_calibration() + print("Calibration saved to", self.calibration_fpath) + + # TODO(Steven): This function is copy/paste from the `HomunculusGlove` class. Consider moving it to an utility to reduce duplicated code. + def _record_ranges_of_motion( + self, joints: list[str] | None = None, display_values: bool = True + ) -> tuple[dict[str, int], dict[str, int]]: + """Interactively record the min/max encoder values of each joint. + + Move the joints while the method streams live positions. Press :kbd:`Enter` to finish. + + Args: + joints (list[str] | None, optional): Joints to record. Defaults to every joint (`None`). + display_values (bool, optional): When `True` (default) a live table is printed to the console. + + Raises: + TypeError: `joints` is not `None` or a list. + ValueError: any joint's recorded min and max are the same. + + Returns: + tuple[dict[str, int], dict[str, int]]: Two dictionaries *mins* and *maxes* with the extreme values + observed for each joint. + """ + if joints is None: + joints = list(self.joints) + elif not isinstance(joints, list): + raise TypeError(joints) + + display_len = max(len(key) for key in joints) + + start_positions = self._read(joints, normalize=False) + mins = start_positions.copy() + maxes = start_positions.copy() + + user_pressed_enter = False + while not user_pressed_enter: + positions = self._read(joints, normalize=False) + mins = {joint: int(min(positions[joint], min_)) for joint, min_ in mins.items()} + maxes = {joint: int(max(positions[joint], max_)) for joint, max_ in maxes.items()} + + if display_values: + print("\n-------------------------------------------") + print(f"{'NAME':<{display_len}} | {'MIN':>6} | {'POS':>6} | {'MAX':>6}") + for joint in joints: + print( + f"{joint:<{display_len}} | {mins[joint]:>6} | {positions[joint]:>6} | {maxes[joint]:>6}" + ) + + if enter_pressed(): + user_pressed_enter = True + + if display_values and not user_pressed_enter: + # Move cursor up to overwrite the previous output + move_cursor_up(len(joints) + 3) + + same_min_max = [joint for joint in joints if mins[joint] == maxes[joint]] + if same_min_max: + raise ValueError(f"Some joints have the same min and max values:\n{pformat(same_min_max)}") + + return mins, maxes + + def configure(self) -> None: + pass + + # TODO(Steven): This function is copy/paste from the `HomunculusGlove` class. Consider moving it to an utility to reduce duplicated code. + def _normalize(self, values: dict[str, int]) -> dict[str, float]: + if not self.calibration: + raise RuntimeError(f"{self} has no calibration registered.") + + normalized_values = {} + for joint, val in values.items(): + min_ = self.calibration[joint].range_min + max_ = self.calibration[joint].range_max + drive_mode = self.calibration[joint].drive_mode + bounded_val = min(max_, max(min_, val)) + + if self.joints[joint] is MotorNormMode.RANGE_M100_100: + norm = (((bounded_val - min_) / (max_ - min_)) * 200) - 100 + normalized_values[joint] = -norm if drive_mode else norm + elif self.joints[joint] is MotorNormMode.RANGE_0_100: + norm = ((bounded_val - min_) / (max_ - min_)) * 100 + normalized_values[joint] = 100 - norm if drive_mode else norm + + return normalized_values + + def _apply_ema(self, raw: dict[str, int]) -> dict[str, float]: + """Update buffers & running EMA values; return smoothed dict.""" + smoothed: dict[str, float] = {} + for joint, value in raw.items(): + # maintain raw history + self._buffers[joint].append(value) + + # initialise on first run + if self._ema[joint] is None: + self._ema[joint] = float(value) + else: + self._ema[joint] = self.alpha * value + (1 - self.alpha) * self._ema[joint] + + smoothed[joint] = self._ema[joint] + return smoothed + + def _read( + self, joints: list[str] | None = None, normalize: bool = True, timeout: float = 1 + ) -> dict[str, int | float]: + """ + Return the most recent (single) values from self.last_d, + optionally applying calibration. + """ + if not self.new_state_event.wait(timeout=timeout): + raise TimeoutError(f"{self}: Timed out waiting for state after {timeout}s.") + + with self.state_lock: + state = self._state + + self.new_state_event.clear() + + if state is None: + raise RuntimeError(f"{self} Internal error: Event set but no state available.") + + if joints is not None: + state = {k: v for k, v in state.items() if k in joints} + + if normalize: + state = self._normalize(state) + + state = self._apply_ema(state) + + return state + + def _read_loop(self): + """ + Continuously read from the serial buffer in its own thread and sends values to the main thread through + a queue. + """ + while not self.stop_event.is_set(): + try: + raw_values = None + with self.serial_lock: + if self.serial.in_waiting > 0: + lines = [] + while self.serial.in_waiting > 0: + line = self.serial.read_until().decode("utf-8").strip() + if line: + lines.append(line.split(" ")) + + if lines: + raw_values = lines[-1] + + if raw_values is None or len(raw_values) != 21: # 16 raw + 5 angle values + continue + + joint_angles = { + "shoulder_pitch": int(raw_values[19]), + "shoulder_yaw": int(raw_values[18]), + "shoulder_roll": int(raw_values[20]), + "elbow_flex": int(raw_values[17]), + "wrist_roll": int(raw_values[16]), + "wrist_yaw": int(raw_values[1]), + "wrist_pitch": int(raw_values[0]), + } + + with self.state_lock: + self._state = joint_angles + self.new_state_event.set() + + except Exception as e: + logger.debug(f"Error reading frame in background thread for {self}: {e}") + + @check_if_not_connected + def get_action(self) -> dict[str, float]: + joint_positions = self._read() + return {f"{joint}.pos": pos for joint, pos in joint_positions.items()} + + def send_feedback(self, feedback: dict[str, float]) -> None: + raise NotImplementedError + + @check_if_not_connected + def disconnect(self) -> None: + self.stop_event.set() + self.thread.join(timeout=1) + self.serial.close() + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/teleoperators/homunculus/homunculus_glove.py b/lerobot/src/lerobot/teleoperators/homunculus/homunculus_glove.py new file mode 100644 index 0000000000000000000000000000000000000000..7484f439edb1ce4f18c23e10a31df957c77609a8 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/homunculus/homunculus_glove.py @@ -0,0 +1,341 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import threading +from collections import deque +from pprint import pformat + +import serial + +from lerobot.motors import MotorCalibration +from lerobot.motors.motors_bus import MotorNormMode +from lerobot.teleoperators.homunculus.joints_translation import homunculus_glove_to_hope_jr_hand +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected +from lerobot.utils.utils import enter_pressed, move_cursor_up + +from ..teleoperator import Teleoperator +from .config_homunculus import HomunculusGloveConfig + +logger = logging.getLogger(__name__) + +LEFT_HAND_INVERSIONS = [ + "thumb_cmc", + "index_dip", + "middle_mcp_abduction", + "middle_dip", + "pinky_mcp_abduction", + "pinky_dip", +] + +RIGHT_HAND_INVERSIONS = [ + "thumb_mcp", + "thumb_cmc", + "thumb_pip", + "thumb_dip", + "index_mcp_abduction", + # "index_dip", + "middle_mcp_abduction", + # "middle_dip", + "ring_mcp_abduction", + "ring_mcp_flexion", + # "ring_dip", + "pinky_mcp_abduction", +] + + +class HomunculusGlove(Teleoperator): + """ + Homunculus Glove designed by NepYope & Hugging Face. + """ + + config_class = HomunculusGloveConfig + name = "homunculus_glove" + + def __init__(self, config: HomunculusGloveConfig): + super().__init__(config) + self.config = config + self.serial = serial.Serial(config.port, config.baud_rate, timeout=1) + self.serial_lock = threading.Lock() + + self.joints = { + "thumb_cmc": MotorNormMode.RANGE_0_100, + "thumb_mcp": MotorNormMode.RANGE_0_100, + "thumb_pip": MotorNormMode.RANGE_0_100, + "thumb_dip": MotorNormMode.RANGE_0_100, + "index_mcp_abduction": MotorNormMode.RANGE_M100_100, + "index_mcp_flexion": MotorNormMode.RANGE_0_100, + "index_dip": MotorNormMode.RANGE_0_100, + "middle_mcp_abduction": MotorNormMode.RANGE_M100_100, + "middle_mcp_flexion": MotorNormMode.RANGE_0_100, + "middle_dip": MotorNormMode.RANGE_0_100, + "ring_mcp_abduction": MotorNormMode.RANGE_M100_100, + "ring_mcp_flexion": MotorNormMode.RANGE_0_100, + "ring_dip": MotorNormMode.RANGE_0_100, + "pinky_mcp_abduction": MotorNormMode.RANGE_M100_100, + "pinky_mcp_flexion": MotorNormMode.RANGE_0_100, + "pinky_dip": MotorNormMode.RANGE_0_100, + } + self.inverted_joints = RIGHT_HAND_INVERSIONS if config.side == "right" else LEFT_HAND_INVERSIONS + + n = 10 + # EMA parameters --------------------------------------------------- + self.n: int = n + self.alpha: float = 2 / (n + 1) + # one deque *per joint* so we can inspect raw history if needed + self._buffers: dict[str, deque[int]] = {joint: deque(maxlen=n) for joint in self.joints} + # running EMA value per joint – lazily initialised on first read + self._ema: dict[str, float | None] = dict.fromkeys(self._buffers) + + self._state: dict[str, float] | None = None + self.new_state_event = threading.Event() + self.stop_event = threading.Event() + self.thread = threading.Thread(target=self._read_loop, daemon=True, name=f"{self} _read_loop") + self.state_lock = threading.Lock() + + @property + def action_features(self) -> dict: + return {f"{joint}.pos": float for joint in self.joints} + + @property + def feedback_features(self) -> dict: + return {} + + @property + def is_connected(self) -> bool: + with self.serial_lock: + return self.serial.is_open and self.thread.is_alive() + + @check_if_already_connected + def connect(self, calibrate: bool = True) -> None: + if not self.serial.is_open: + self.serial.open() + self.thread.start() + + # wait for the thread to ramp up & 1st state to be ready + if not self.new_state_event.wait(timeout=2): + raise TimeoutError(f"{self}: Timed out waiting for state after 2s.") + + if not self.is_calibrated and calibrate: + self.calibrate() + + logger.info(f"{self} connected.") + + @property + def is_calibrated(self) -> bool: + return self.calibration_fpath.is_file() + + def calibrate(self) -> None: + range_mins, range_maxes = {}, {} + for finger in ["thumb", "index", "middle", "ring", "pinky"]: + print( + f"\nMove {finger} through its entire range of motion." + "\nRecording positions. Press ENTER to stop..." + ) + finger_joints = [joint for joint in self.joints if joint.startswith(finger)] + finger_mins, finger_maxes = self._record_ranges_of_motion(finger_joints) + range_mins.update(finger_mins) + range_maxes.update(finger_maxes) + + self.calibration = {} + for id_, joint in enumerate(self.joints): + self.calibration[joint] = MotorCalibration( + id=id_, + drive_mode=1 if joint in self.inverted_joints else 0, + homing_offset=0, + range_min=range_mins[joint], + range_max=range_maxes[joint], + ) + + self._save_calibration() + print("Calibration saved to", self.calibration_fpath) + + # TODO(Steven): This function is copy/paste from the `HomunculusArm` class. Consider moving it to an utility to reduce duplicated code. + def _record_ranges_of_motion( + self, joints: list[str] | None = None, display_values: bool = True + ) -> tuple[dict[str, int], dict[str, int]]: + """Interactively record the min/max encoder values of each joint. + + Move the joints while the method streams live positions. Press :kbd:`Enter` to finish. + + Args: + joints (list[str] | None, optional): Joints to record. Defaults to every joint (`None`). + display_values (bool, optional): When `True` (default) a live table is printed to the console. + + Raises: + TypeError: `joints` is not `None` or a list. + ValueError: any joint's recorded min and max are the same. + + Returns: + tuple[dict[str, int], dict[str, int]]: Two dictionaries *mins* and *maxes* with the extreme values + observed for each joint. + """ + if joints is None: + joints = list(self.joints) + elif not isinstance(joints, list): + raise TypeError(joints) + + display_len = max(len(key) for key in joints) + + start_positions = self._read(joints, normalize=False) + mins = start_positions.copy() + maxes = start_positions.copy() + + user_pressed_enter = False + while not user_pressed_enter: + positions = self._read(joints, normalize=False) + mins = {joint: int(min(positions[joint], min_)) for joint, min_ in mins.items()} + maxes = {joint: int(max(positions[joint], max_)) for joint, max_ in maxes.items()} + + if display_values: + print("\n-------------------------------------------") + print(f"{'NAME':<{display_len}} | {'MIN':>6} | {'POS':>6} | {'MAX':>6}") + for joint in joints: + print( + f"{joint:<{display_len}} | {mins[joint]:>6} | {positions[joint]:>6} | {maxes[joint]:>6}" + ) + + if enter_pressed(): + user_pressed_enter = True + + if display_values and not user_pressed_enter: + # Move cursor up to overwrite the previous output + move_cursor_up(len(joints) + 3) + + same_min_max = [joint for joint in joints if mins[joint] == maxes[joint]] + if same_min_max: + raise ValueError(f"Some joints have the same min and max values:\n{pformat(same_min_max)}") + + return mins, maxes + + def configure(self) -> None: + pass + + # TODO(Steven): This function is copy/paste from the `HomunculusArm` class. Consider moving it to an utility to reduce duplicated code. + def _normalize(self, values: dict[str, int]) -> dict[str, float]: + if not self.calibration: + raise RuntimeError(f"{self} has no calibration registered.") + + normalized_values = {} + for joint, val in values.items(): + min_ = self.calibration[joint].range_min + max_ = self.calibration[joint].range_max + drive_mode = self.calibration[joint].drive_mode + bounded_val = min(max_, max(min_, val)) + + if self.joints[joint] is MotorNormMode.RANGE_M100_100: + norm = (((bounded_val - min_) / (max_ - min_)) * 200) - 100 + normalized_values[joint] = -norm if drive_mode else norm + elif self.joints[joint] is MotorNormMode.RANGE_0_100: + norm = ((bounded_val - min_) / (max_ - min_)) * 100 + normalized_values[joint] = 100 - norm if drive_mode else norm + + return normalized_values + + def _apply_ema(self, raw: dict[str, int]) -> dict[str, int]: + """Update buffers & running EMA values; return smoothed dict as integers.""" + smoothed: dict[str, int] = {} + for joint, value in raw.items(): + # maintain raw history + self._buffers[joint].append(value) + + # initialise on first run + if self._ema[joint] is None: + self._ema[joint] = float(value) + else: + self._ema[joint] = self.alpha * value + (1 - self.alpha) * self._ema[joint] + + # Convert back to int for compatibility with normalization + smoothed[joint] = int(round(self._ema[joint])) + return smoothed + + def _read( + self, joints: list[str] | None = None, normalize: bool = True, timeout: float = 1 + ) -> dict[str, int | float]: + """ + Return the most recent (single) values from self.last_d, + optionally applying calibration. + """ + if not self.new_state_event.wait(timeout=timeout): + raise TimeoutError(f"{self}: Timed out waiting for state after {timeout}s.") + + with self.state_lock: + state = self._state + + self.new_state_event.clear() + + if state is None: + raise RuntimeError(f"{self} Internal error: Event set but no state available.") + + if joints is not None: + state = {k: v for k, v in state.items() if k in joints} + + # Apply EMA smoothing to raw values first + state = self._apply_ema(state) + + # Then normalize if requested + if normalize: + state = self._normalize(state) + + return state + + def _read_loop(self): + """ + Continuously read from the serial buffer in its own thread and sends values to the main thread through + a queue. + """ + while not self.stop_event.is_set(): + try: + positions = None + with self.serial_lock: + if self.serial.in_waiting > 0: + lines = [] + while self.serial.in_waiting > 0: + line = self.serial.read_until().decode("utf-8").strip() + if line: + lines.append(line.split(" ")) + + if lines: + positions = lines[-1] + + if positions is None or len(positions) != len(self.joints): + continue + + joint_positions = {joint: int(pos) for joint, pos in zip(self.joints, positions, strict=True)} + + with self.state_lock: + self._state = joint_positions + self.new_state_event.set() + + except Exception as e: + logger.debug(f"Error reading frame in background thread for {self}: {e}") + + @check_if_not_connected + def get_action(self) -> dict[str, float]: + joint_positions = self._read() + return homunculus_glove_to_hope_jr_hand( + {f"{joint}.pos": pos for joint, pos in joint_positions.items()} + ) + + def send_feedback(self, feedback: dict[str, float]) -> None: + raise NotImplementedError + + @check_if_not_connected + def disconnect(self) -> None: + self.stop_event.set() + self.thread.join(timeout=1) + self.serial.close() + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/teleoperators/homunculus/joints_translation.py b/lerobot/src/lerobot/teleoperators/homunculus/joints_translation.py new file mode 100644 index 0000000000000000000000000000000000000000..913943c9ae4fd24b8736fad8075e563f2e46315b --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/homunculus/joints_translation.py @@ -0,0 +1,63 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INDEX_SPLAY = 0.3 +MIDDLE_SPLAY = 0.3 +RING_SPLAY = 0.3 +PINKY_SPLAY = 0.5 + + +def get_ulnar_flexion(flexion: float, abduction: float, splay: float): + return -abduction * splay + flexion * (1 - splay) + + +def get_radial_flexion(flexion: float, abduction: float, splay: float): + return abduction * splay + flexion * (1 - splay) + + +def homunculus_glove_to_hope_jr_hand(glove_action: dict[str, float]) -> dict[str, float]: + return { + "thumb_cmc.pos": glove_action["thumb_cmc.pos"], + "thumb_mcp.pos": glove_action["thumb_mcp.pos"], + "thumb_pip.pos": glove_action["thumb_pip.pos"], + "thumb_dip.pos": glove_action["thumb_dip.pos"], + "index_radial_flexor.pos": get_radial_flexion( + glove_action["index_mcp_flexion.pos"], glove_action["index_mcp_abduction.pos"], INDEX_SPLAY + ), + "index_ulnar_flexor.pos": get_ulnar_flexion( + glove_action["index_mcp_flexion.pos"], glove_action["index_mcp_abduction.pos"], INDEX_SPLAY + ), + "index_pip_dip.pos": glove_action["index_dip.pos"], + "middle_radial_flexor.pos": get_radial_flexion( + glove_action["middle_mcp_flexion.pos"], glove_action["middle_mcp_abduction.pos"], MIDDLE_SPLAY + ), + "middle_ulnar_flexor.pos": get_ulnar_flexion( + glove_action["middle_mcp_flexion.pos"], glove_action["middle_mcp_abduction.pos"], MIDDLE_SPLAY + ), + "middle_pip_dip.pos": glove_action["middle_dip.pos"], + "ring_radial_flexor.pos": get_radial_flexion( + glove_action["ring_mcp_flexion.pos"], glove_action["ring_mcp_abduction.pos"], RING_SPLAY + ), + "ring_ulnar_flexor.pos": get_ulnar_flexion( + glove_action["ring_mcp_flexion.pos"], glove_action["ring_mcp_abduction.pos"], RING_SPLAY + ), + "ring_pip_dip.pos": glove_action["ring_dip.pos"], + "pinky_radial_flexor.pos": get_radial_flexion( + glove_action["pinky_mcp_flexion.pos"], glove_action["pinky_mcp_abduction.pos"], PINKY_SPLAY + ), + "pinky_ulnar_flexor.pos": get_ulnar_flexion( + glove_action["pinky_mcp_flexion.pos"], glove_action["pinky_mcp_abduction.pos"], PINKY_SPLAY + ), + "pinky_pip_dip.pos": glove_action["pinky_dip.pos"], + } diff --git a/lerobot/src/lerobot/teleoperators/keyboard/__init__.py b/lerobot/src/lerobot/teleoperators/keyboard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4676a13ef041bfe5d45fb960848e99927e914bf9 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/keyboard/__init__.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .configuration_keyboard import ( + KeyboardEndEffectorTeleopConfig, + KeyboardRoverTeleopConfig, + KeyboardTeleopConfig, +) +from .teleop_keyboard import KeyboardEndEffectorTeleop, KeyboardRoverTeleop, KeyboardTeleop + +__all__ = [ + "KeyboardTeleopConfig", + "KeyboardTeleop", + "KeyboardEndEffectorTeleopConfig", + "KeyboardEndEffectorTeleop", + "KeyboardRoverTeleopConfig", + "KeyboardRoverTeleop", +] diff --git a/lerobot/src/lerobot/teleoperators/keyboard/configuration_keyboard.py b/lerobot/src/lerobot/teleoperators/keyboard/configuration_keyboard.py new file mode 100644 index 0000000000000000000000000000000000000000..953e12b7ecfc5635e49dfe5911ba5ffa2c8cbca3 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/keyboard/configuration_keyboard.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Configuration for keyboard teleoperators.""" + +from dataclasses import dataclass + +from ..config import TeleoperatorConfig + + +@TeleoperatorConfig.register_subclass("keyboard") +@dataclass +class KeyboardTeleopConfig(TeleoperatorConfig): + """KeyboardTeleopConfig""" + + # TODO(Steven): Consider setting in here the keys that we want to capture/listen + + +@TeleoperatorConfig.register_subclass("keyboard_ee") +@dataclass +class KeyboardEndEffectorTeleopConfig(KeyboardTeleopConfig): + """Configuration for keyboard end-effector teleoperator. + + Used for controlling robot end-effectors with keyboard inputs. + + Attributes: + use_gripper: Whether to include gripper control in actions + """ + + use_gripper: bool = True + + +@TeleoperatorConfig.register_subclass("keyboard_rover") +@dataclass +class KeyboardRoverTeleopConfig(TeleoperatorConfig): + """Configuration for keyboard rover teleoperator. + + Used for controlling mobile robots like EarthRover Mini Plus with WASD controls. + + Attributes: + linear_speed: Default linear velocity magnitude (-1 to 1 range for SDK robots) + angular_speed: Default angular velocity magnitude (-1 to 1 range for SDK robots) + speed_increment: Amount to increase/decrease speed with +/- keys + turn_assist_ratio: Forward motion multiplier when turning with A/D keys (0.0-1.0) + angular_speed_ratio: Ratio of angular to linear speed for synchronized adjustments + min_linear_speed: Minimum linear speed when decreasing (prevents zero speed) + min_angular_speed: Minimum angular speed when decreasing (prevents zero speed) + """ + + linear_speed: float = 1.0 + angular_speed: float = 1.0 + speed_increment: float = 0.1 + turn_assist_ratio: float = 0.3 + angular_speed_ratio: float = 0.6 + min_linear_speed: float = 0.1 + min_angular_speed: float = 0.05 diff --git a/lerobot/src/lerobot/teleoperators/keyboard/teleop_keyboard.py b/lerobot/src/lerobot/teleoperators/keyboard/teleop_keyboard.py new file mode 100644 index 0000000000000000000000000000000000000000..d900eaef1b0358cb105729baa8005bc0d620f8e2 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/keyboard/teleop_keyboard.py @@ -0,0 +1,432 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import sys +import time +from queue import Queue +from typing import Any + +from lerobot.processor import RobotAction +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected + +from ..teleoperator import Teleoperator +from ..utils import TeleopEvents +from .configuration_keyboard import ( + KeyboardEndEffectorTeleopConfig, + KeyboardRoverTeleopConfig, + KeyboardTeleopConfig, +) + +PYNPUT_AVAILABLE = True +try: + if ("DISPLAY" not in os.environ) and ("linux" in sys.platform): + logging.info("No DISPLAY set. Skipping pynput import.") + raise ImportError("pynput blocked intentionally due to no display.") + + from pynput import keyboard +except ImportError: + keyboard = None + PYNPUT_AVAILABLE = False +except Exception as e: + keyboard = None + PYNPUT_AVAILABLE = False + logging.info(f"Could not import pynput: {e}") + + +class KeyboardTeleop(Teleoperator): + """ + Teleop class to use keyboard inputs for control. + """ + + config_class = KeyboardTeleopConfig + name = "keyboard" + + def __init__(self, config: KeyboardTeleopConfig): + super().__init__(config) + self.config = config + self.robot_type = config.type + + self.event_queue = Queue() + self.current_pressed = {} + self.listener = None + self.logs = {} + + @property + def action_features(self) -> dict: + return { + "dtype": "float32", + "shape": (len(self.arm),), + "names": {"motors": list(self.arm.motors)}, + } + + @property + def feedback_features(self) -> dict: + return {} + + @property + def is_connected(self) -> bool: + return PYNPUT_AVAILABLE and isinstance(self.listener, keyboard.Listener) and self.listener.is_alive() + + @property + def is_calibrated(self) -> bool: + pass + + @check_if_already_connected + def connect(self) -> None: + if PYNPUT_AVAILABLE: + logging.info("pynput is available - enabling local keyboard listener.") + self.listener = keyboard.Listener( + on_press=self._on_press, + on_release=self._on_release, + ) + self.listener.start() + else: + logging.info("pynput not available - skipping local keyboard listener.") + self.listener = None + + def calibrate(self) -> None: + pass + + def _on_press(self, key): + if hasattr(key, "char"): + self.event_queue.put((key.char, True)) + + def _on_release(self, key): + if hasattr(key, "char"): + self.event_queue.put((key.char, False)) + if key == keyboard.Key.esc: + logging.info("ESC pressed, disconnecting.") + self.disconnect() + + def _drain_pressed_keys(self): + while not self.event_queue.empty(): + key_char, is_pressed = self.event_queue.get_nowait() + self.current_pressed[key_char] = is_pressed + + def configure(self): + pass + + @check_if_not_connected + def get_action(self) -> RobotAction: + before_read_t = time.perf_counter() + + self._drain_pressed_keys() + + # Generate action based on current key states + action = {key for key, val in self.current_pressed.items() if val} + self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t + + return dict.fromkeys(action, None) + + def send_feedback(self, feedback: dict[str, Any]) -> None: + pass + + @check_if_not_connected + def disconnect(self) -> None: + if self.listener is not None: + self.listener.stop() + + +class KeyboardEndEffectorTeleop(KeyboardTeleop): + """ + Teleop class to use keyboard inputs for end effector control. + Designed to be used with the `So100FollowerEndEffector` robot. + """ + + config_class = KeyboardEndEffectorTeleopConfig + name = "keyboard_ee" + + def __init__(self, config: KeyboardEndEffectorTeleopConfig): + super().__init__(config) + self.config = config + self.misc_keys_queue = Queue() + + @property + def action_features(self) -> dict: + if self.config.use_gripper: + return { + "dtype": "float32", + "shape": (4,), + "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3}, + } + else: + return { + "dtype": "float32", + "shape": (3,), + "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2}, + } + + @check_if_not_connected + def get_action(self) -> RobotAction: + self._drain_pressed_keys() + delta_x = 0.0 + delta_y = 0.0 + delta_z = 0.0 + gripper_action = 1.0 + + # Generate action based on current key states + for key, val in self.current_pressed.items(): + if key == keyboard.Key.up: + delta_y = -int(val) + elif key == keyboard.Key.down: + delta_y = int(val) + elif key == keyboard.Key.left: + delta_x = int(val) + elif key == keyboard.Key.right: + delta_x = -int(val) + elif key == keyboard.Key.shift: + delta_z = -int(val) + elif key == keyboard.Key.shift_r: + delta_z = int(val) + elif key == keyboard.Key.ctrl_r: + # Gripper actions are expected to be between 0 (close), 1 (stay), 2 (open) + gripper_action = int(val) + 1 + elif key == keyboard.Key.ctrl_l: + gripper_action = int(val) - 1 + elif val: + # If the key is pressed, add it to the misc_keys_queue + # this will record key presses that are not part of the delta_x, delta_y, delta_z + # this is useful for retrieving other events like interventions for RL, episode success, etc. + self.misc_keys_queue.put(key) + + self.current_pressed.clear() + + action_dict = { + "delta_x": delta_x, + "delta_y": delta_y, + "delta_z": delta_z, + } + + if self.config.use_gripper: + action_dict["gripper"] = gripper_action + + return action_dict + + def get_teleop_events(self) -> dict[str, Any]: + """ + Get extra control events from the keyboard such as intervention status, + episode termination, success indicators, etc. + + Keyboard mappings: + - Any movement keys pressed = intervention active + - 's' key = success (terminate episode successfully) + - 'r' key = rerecord episode (terminate and rerecord) + - 'q' key = quit episode (terminate without success) + + Returns: + Dictionary containing: + - is_intervention: bool - Whether human is currently intervening + - terminate_episode: bool - Whether to terminate the current episode + - success: bool - Whether the episode was successful + - rerecord_episode: bool - Whether to rerecord the episode + """ + if not self.is_connected: + return { + TeleopEvents.IS_INTERVENTION: False, + TeleopEvents.TERMINATE_EPISODE: False, + TeleopEvents.SUCCESS: False, + TeleopEvents.RERECORD_EPISODE: False, + } + + # Check if any movement keys are currently pressed (indicates intervention) + movement_keys = [ + keyboard.Key.up, + keyboard.Key.down, + keyboard.Key.left, + keyboard.Key.right, + keyboard.Key.shift, + keyboard.Key.shift_r, + keyboard.Key.ctrl_r, + keyboard.Key.ctrl_l, + ] + is_intervention = any(self.current_pressed.get(key, False) for key in movement_keys) + + # Check for episode control commands from misc_keys_queue + terminate_episode = False + success = False + rerecord_episode = False + + # Process any pending misc keys + while not self.misc_keys_queue.empty(): + key = self.misc_keys_queue.get_nowait() + if key == "s": + success = True + elif key == "r": + terminate_episode = True + rerecord_episode = True + elif key == "q": + terminate_episode = True + success = False + + return { + TeleopEvents.IS_INTERVENTION: is_intervention, + TeleopEvents.TERMINATE_EPISODE: terminate_episode, + TeleopEvents.SUCCESS: success, + TeleopEvents.RERECORD_EPISODE: rerecord_episode, + } + + +class KeyboardRoverTeleop(KeyboardTeleop): + """ + Keyboard teleoperator for mobile robots like EarthRover Mini Plus. + + Provides intuitive WASD-style controls for driving a mobile robot: + - Linear movement (forward/backward) + - Angular movement (turning/rotation) + - Speed adjustment + - Emergency stop + + Keyboard Controls: + Movement: + - W: Move forward + - S: Move backward + - A: Turn left (with forward motion) + - D: Turn right (with forward motion) + - Q: Rotate left in place + - E: Rotate right in place + - X: Emergency stop + + Speed Control: + - +/=: Increase speed + - -: Decrease speed + + System: + - ESC: Disconnect teleoperator + + Attributes: + config: Teleoperator configuration + current_linear_speed: Current linear velocity magnitude + current_angular_speed: Current angular velocity magnitude + + Example: + ```python + from lerobot.teleoperators.keyboard import KeyboardRoverTeleop, KeyboardRoverTeleopConfig + + teleop = KeyboardRoverTeleop( + KeyboardRoverTeleopConfig(linear_speed=1.0, angular_speed=1.0, speed_increment=0.1) + ) + teleop.connect() + + while teleop.is_connected: + action = teleop.get_action() + robot.send_action(action) + ``` + """ + + config_class = KeyboardRoverTeleopConfig + name = "keyboard_rover" + + def __init__(self, config: KeyboardRoverTeleopConfig): + super().__init__(config) + # Add rover-specific speed settings + self.current_linear_speed = config.linear_speed + self.current_angular_speed = config.angular_speed + + @property + def action_features(self) -> dict: + """Return action format for rover (linear and angular velocities).""" + return { + "linear.vel": float, + "angular.vel": float, + } + + @property + def is_calibrated(self) -> bool: + """Rover teleop doesn't require calibration.""" + return True + + def _drain_pressed_keys(self): + """Update current_pressed state from event queue without clearing held keys""" + while not self.event_queue.empty(): + key_char, is_pressed = self.event_queue.get_nowait() + if is_pressed: + self.current_pressed[key_char] = True + else: + # Only remove key if it's being released + self.current_pressed.pop(key_char, None) + + @check_if_not_connected + def get_action(self) -> RobotAction: + """ + Get the current action based on pressed keys. + + Returns: + RobotAction with 'linear.vel' and 'angular.vel' keys + """ + before_read_t = time.perf_counter() + + self._drain_pressed_keys() + + linear_velocity = 0.0 + angular_velocity = 0.0 + + # Check which keys are currently pressed (not released) + active_keys = {key for key, is_pressed in self.current_pressed.items() if is_pressed} + + # Linear movement (W/S) - these take priority + if "w" in active_keys: + linear_velocity = self.current_linear_speed + elif "s" in active_keys: + linear_velocity = -self.current_linear_speed + + # Turning (A/D/Q/E) + if "d" in active_keys: + angular_velocity = -self.current_angular_speed + if linear_velocity == 0: # If not moving forward/back, add slight forward motion + linear_velocity = self.current_linear_speed * self.config.turn_assist_ratio + elif "a" in active_keys: + angular_velocity = self.current_angular_speed + if linear_velocity == 0: # If not moving forward/back, add slight forward motion + linear_velocity = self.current_linear_speed * self.config.turn_assist_ratio + elif "q" in active_keys: + angular_velocity = self.current_angular_speed + linear_velocity = 0 # Rotate in place + elif "e" in active_keys: + angular_velocity = -self.current_angular_speed + linear_velocity = 0 # Rotate in place + + # Stop (X) - overrides everything + if "x" in active_keys: + linear_velocity = 0 + angular_velocity = 0 + + # Speed adjustment + if "+" in active_keys or "=" in active_keys: + self.current_linear_speed += self.config.speed_increment + self.current_angular_speed += self.config.speed_increment * self.config.angular_speed_ratio + logging.info( + f"Speed increased: linear={self.current_linear_speed:.2f}, angular={self.current_angular_speed:.2f}" + ) + if "-" in active_keys: + self.current_linear_speed = max( + self.config.min_linear_speed, self.current_linear_speed - self.config.speed_increment + ) + self.current_angular_speed = max( + self.config.min_angular_speed, + self.current_angular_speed - self.config.speed_increment * self.config.angular_speed_ratio, + ) + logging.info( + f"Speed decreased: linear={self.current_linear_speed:.2f}, angular={self.current_angular_speed:.2f}" + ) + + self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t + + return { + "linear.vel": linear_velocity, + "angular.vel": angular_velocity, + } diff --git a/lerobot/src/lerobot/teleoperators/koch_leader/__init__.py b/lerobot/src/lerobot/teleoperators/koch_leader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6cab71363020e1026760b075d1101fc5f7b884f7 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/koch_leader/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config_koch_leader import KochLeaderConfig +from .koch_leader import KochLeader diff --git a/lerobot/src/lerobot/teleoperators/koch_leader/config_koch_leader.py b/lerobot/src/lerobot/teleoperators/koch_leader/config_koch_leader.py new file mode 100644 index 0000000000000000000000000000000000000000..d8023c910ce8f0a1a42c1630d7b987f145239719 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/koch_leader/config_koch_leader.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..config import TeleoperatorConfig + + +@TeleoperatorConfig.register_subclass("koch_leader") +@dataclass +class KochLeaderConfig(TeleoperatorConfig): + # Port to connect to the arm + port: str + + # Sets the arm in torque mode with the gripper motor set to this value. This makes it possible to squeeze + # the gripper and have it spring back to an open position on its own. + gripper_open_pos: float = 50.0 diff --git a/lerobot/src/lerobot/teleoperators/koch_leader/koch_leader.py b/lerobot/src/lerobot/teleoperators/koch_leader/koch_leader.py new file mode 100644 index 0000000000000000000000000000000000000000..e796c20d04f00d133e69591ac6c7c06e480772da --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/koch_leader/koch_leader.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time + +from lerobot.motors import Motor, MotorCalibration, MotorNormMode +from lerobot.motors.dynamixel import ( + DriveMode, + DynamixelMotorsBus, + OperatingMode, +) +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected + +from ..teleoperator import Teleoperator +from .config_koch_leader import KochLeaderConfig + +logger = logging.getLogger(__name__) + + +class KochLeader(Teleoperator): + """ + - [Koch v1.0](https://github.com/AlexanderKoch-Koch/low_cost_robot), with and without the wrist-to-elbow + expansion, developed by Alexander Koch from [Tau Robotics](https://tau-robotics.com) + - [Koch v1.1](https://github.com/jess-moss/koch-v1-1) developed by Jess Moss + """ + + config_class = KochLeaderConfig + name = "koch_leader" + + def __init__(self, config: KochLeaderConfig): + super().__init__(config) + self.config = config + self.bus = DynamixelMotorsBus( + port=self.config.port, + motors={ + "shoulder_pan": Motor(1, "xl330-m077", MotorNormMode.RANGE_M100_100), + "shoulder_lift": Motor(2, "xl330-m077", MotorNormMode.RANGE_M100_100), + "elbow_flex": Motor(3, "xl330-m077", MotorNormMode.RANGE_M100_100), + "wrist_flex": Motor(4, "xl330-m077", MotorNormMode.RANGE_M100_100), + "wrist_roll": Motor(5, "xl330-m077", MotorNormMode.RANGE_M100_100), + "gripper": Motor(6, "xl330-m077", MotorNormMode.RANGE_0_100), + }, + calibration=self.calibration, + ) + + @property + def action_features(self) -> dict[str, type]: + return {f"{motor}.pos": float for motor in self.bus.motors} + + @property + def feedback_features(self) -> dict[str, type]: + return {} + + @property + def is_connected(self) -> bool: + return self.bus.is_connected + + @check_if_already_connected + def connect(self, calibrate: bool = True) -> None: + self.bus.connect() + if not self.is_calibrated and calibrate: + logger.info( + "Mismatch between calibration values in the motor and the calibration file or no calibration file found" + ) + self.calibrate() + + self.configure() + logger.info(f"{self} connected.") + + @property + def is_calibrated(self) -> bool: + return self.bus.is_calibrated + + def calibrate(self) -> None: + self.bus.disable_torque() + if self.calibration: + # Calibration file exists, ask user whether to use it or run new calibration + user_input = input( + f"Press ENTER to use provided calibration file associated with the id {self.id}, or type 'c' and press ENTER to run calibration: " + ) + if user_input.strip().lower() != "c": + logger.info(f"Writing calibration file associated with the id {self.id} to the motors") + self.bus.write_calibration(self.calibration) + return + logger.info(f"\nRunning calibration of {self}") + for motor in self.bus.motors: + self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value) + + self.bus.write("Drive_Mode", "elbow_flex", DriveMode.INVERTED.value) + drive_modes = {motor: 1 if motor == "elbow_flex" else 0 for motor in self.bus.motors} + + input(f"Move {self} to the middle of its range of motion and press ENTER....") + homing_offsets = self.bus.set_half_turn_homings() + + full_turn_motors = ["shoulder_pan", "wrist_roll"] + unknown_range_motors = [motor for motor in self.bus.motors if motor not in full_turn_motors] + print( + f"Move all joints except {full_turn_motors} sequentially through their " + "entire ranges of motion.\nRecording positions. Press ENTER to stop..." + ) + range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors) + for motor in full_turn_motors: + range_mins[motor] = 0 + range_maxes[motor] = 4095 + + self.calibration = {} + for motor, m in self.bus.motors.items(): + self.calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=drive_modes[motor], + homing_offset=homing_offsets[motor], + range_min=range_mins[motor], + range_max=range_maxes[motor], + ) + + self.bus.write_calibration(self.calibration) + self._save_calibration() + logger.info(f"Calibration saved to {self.calibration_fpath}") + + def configure(self) -> None: + self.bus.disable_torque() + self.bus.configure_motors() + for motor in self.bus.motors: + if motor != "gripper": + # Use 'extended position mode' for all motors except gripper, because in joint mode the servos + # can't rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while + # assembling the arm, you could end up with a servo with a position 0 or 4095 at a crucial + # point + self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value) + + # Use 'position control current based' for gripper to be limited by the limit of the current. + # For the follower gripper, it means it can grasp an object without forcing too much even tho, + # its goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch). + # For the leader gripper, it means we can use it as a physical trigger, since we can force with our finger + # to make it move, and it will move back to its original target position when we release the force. + self.bus.write("Operating_Mode", "gripper", OperatingMode.CURRENT_POSITION.value) + # Set gripper's goal pos in current position mode so that we can use it as a trigger. + self.bus.enable_torque("gripper") + if self.is_calibrated: + self.bus.write("Goal_Position", "gripper", self.config.gripper_open_pos) + + def setup_motors(self) -> None: + for motor in reversed(self.bus.motors): + input(f"Connect the controller board to the '{motor}' motor only and press enter.") + self.bus.setup_motor(motor) + print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") + + @check_if_not_connected + def get_action(self) -> dict[str, float]: + start = time.perf_counter() + action = self.bus.sync_read("Present_Position") + action = {f"{motor}.pos": val for motor, val in action.items()} + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read action: {dt_ms:.1f}ms") + return action + + def send_feedback(self, feedback: dict[str, float]) -> None: + # TODO(rcadene, aliberts): Implement force feedback + raise NotImplementedError + + @check_if_not_connected + def disconnect(self) -> None: + self.bus.disconnect() + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/teleoperators/omx_leader/__init__.py b/lerobot/src/lerobot/teleoperators/omx_leader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..efbe335ee16646ce6d044f0a1ef3712f5674ba4f --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/omx_leader/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config_omx_leader import OmxLeaderConfig +from .omx_leader import OmxLeader diff --git a/lerobot/src/lerobot/teleoperators/omx_leader/config_omx_leader.py b/lerobot/src/lerobot/teleoperators/omx_leader/config_omx_leader.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb83e66a7f81f6d22a8aaef744422da7775183f --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/omx_leader/config_omx_leader.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..config import TeleoperatorConfig + + +@TeleoperatorConfig.register_subclass("omx_leader") +@dataclass +class OmxLeaderConfig(TeleoperatorConfig): + # Port to connect to the arm + port: str + + # Sets the arm in torque mode with the gripper motor set to this value. This makes it possible to squeeze + # the gripper and have it spring back to an open position on its own. + gripper_open_pos: float = 37.0 diff --git a/lerobot/src/lerobot/teleoperators/omx_leader/omx_leader.py b/lerobot/src/lerobot/teleoperators/omx_leader/omx_leader.py new file mode 100644 index 0000000000000000000000000000000000000000..63d0408ee894d74da665065cc7127849ff817a8d --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/omx_leader/omx_leader.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time + +from lerobot.motors import Motor, MotorCalibration, MotorNormMode +from lerobot.motors.dynamixel import ( + DriveMode, + DynamixelMotorsBus, + OperatingMode, +) +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected + +from ..teleoperator import Teleoperator +from .config_omx_leader import OmxLeaderConfig + +logger = logging.getLogger(__name__) + + +class OmxLeader(Teleoperator): + """ + - [OMX](https://github.com/ROBOTIS-GIT/open_manipulator), + expansion, developed by Woojin Wie and Junha Cha from [ROBOTIS](https://ai.robotis.com/) + """ + + config_class = OmxLeaderConfig + name = "omx_leader" + + def __init__(self, config: OmxLeaderConfig): + super().__init__(config) + self.config = config + self.bus = DynamixelMotorsBus( + port=self.config.port, + motors={ + "shoulder_pan": Motor(1, "xl330-m288", MotorNormMode.RANGE_M100_100), + "shoulder_lift": Motor(2, "xl330-m288", MotorNormMode.RANGE_M100_100), + "elbow_flex": Motor(3, "xl330-m288", MotorNormMode.RANGE_M100_100), + "wrist_flex": Motor(4, "xl330-m288", MotorNormMode.RANGE_M100_100), + "wrist_roll": Motor(5, "xl330-m288", MotorNormMode.RANGE_M100_100), + "gripper": Motor(6, "xl330-m077", MotorNormMode.RANGE_0_100), + }, + calibration=self.calibration, + ) + + @property + def action_features(self) -> dict[str, type]: + return {f"{motor}.pos": float for motor in self.bus.motors} + + @property + def feedback_features(self) -> dict[str, type]: + return {} + + @property + def is_connected(self) -> bool: + return self.bus.is_connected + + @check_if_already_connected + def connect(self, calibrate: bool = True) -> None: + self.bus.connect() + if not self.is_calibrated and calibrate: + logger.info( + "Mismatch between calibration values in the motor and the calibration file or no calibration file found" + ) + self.calibrate() + + self.configure() + logger.info(f"{self} connected.") + + @property + def is_calibrated(self) -> bool: + return self.bus.is_calibrated + + def calibrate(self) -> None: + self.bus.disable_torque() + logger.info(f"\nUsing factory default calibration values for {self}") + logger.info(f"\nWriting default configuration of {self} to the motors") + for motor in self.bus.motors: + self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value) + + for motor in self.bus.motors: + if motor == "gripper": + self.bus.write("Drive_Mode", motor, DriveMode.INVERTED.value) + else: + self.bus.write("Drive_Mode", motor, DriveMode.NON_INVERTED.value) + drive_modes = {motor: 1 if motor == "gripper" else 0 for motor in self.bus.motors} + + self.calibration = {} + for motor, m in self.bus.motors.items(): + self.calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=drive_modes[motor], + homing_offset=0, + range_min=0, + range_max=4095, + ) + + self.bus.write_calibration(self.calibration) + self._save_calibration() + logger.info(f"Calibration saved to {self.calibration_fpath}") + + def configure(self) -> None: + self.bus.disable_torque() + self.bus.configure_motors() + for motor in self.bus.motors: + if motor != "gripper": + # Use 'extended position mode' for all motors except gripper, because in joint mode the servos + # can't rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while + # assembling the arm, you could end up with a servo with a position 0 or 4095 at a crucial + # point + self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value) + + # Use 'position control current based' for gripper to be limited by the limit of the current. + # For the follower gripper, it means it can grasp an object without forcing too much even tho, + # its goal position is a complete grasp (both gripper fingers are ordered to join and reach a touch). + # For the leader gripper, it means we can use it as a physical trigger, since we can force with our finger + # to make it move, and it will move back to its original target position when we release the force. + self.bus.write("Operating_Mode", "gripper", OperatingMode.CURRENT_POSITION.value) + # Set gripper's goal pos in current position mode so that we can use it as a trigger. + self.bus.enable_torque("gripper") + if self.is_calibrated: + self.bus.write("Goal_Position", "gripper", self.config.gripper_open_pos) + + def setup_motors(self) -> None: + for motor in reversed(self.bus.motors): + input(f"Connect the controller board to the '{motor}' motor only and press enter.") + self.bus.setup_motor(motor) + print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") + + @check_if_not_connected + def get_action(self) -> dict[str, float]: + start = time.perf_counter() + action = self.bus.sync_read("Present_Position") + action = {f"{motor}.pos": val for motor, val in action.items()} + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read action: {dt_ms:.1f}ms") + return action + + def send_feedback(self, feedback: dict[str, float]) -> None: + # TODO(rcadene, aliberts): Implement force feedback + raise NotImplementedError + + @check_if_not_connected + def disconnect(self) -> None: + self.bus.disconnect() + logger.info(f"{self} disconnected.") diff --git a/lerobot/src/lerobot/teleoperators/phone/__init__.py b/lerobot/src/lerobot/teleoperators/phone/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..967a7b3b4ceaac6bb975b06b2fe7c0bc744b3779 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/phone/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config_phone import PhoneConfig +from .teleop_phone import Phone diff --git a/lerobot/src/lerobot/teleoperators/phone/config_phone.py b/lerobot/src/lerobot/teleoperators/phone/config_phone.py new file mode 100644 index 0000000000000000000000000000000000000000..121042b1ba4457e1cdee1f4525d206e31e18f0e7 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/phone/config_phone.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from enum import Enum + +import numpy as np + +from ..config import TeleoperatorConfig + + +class PhoneOS(Enum): + ANDROID = "android" + IOS = "ios" + + +@TeleoperatorConfig.register_subclass("phone") +@dataclass +class PhoneConfig(TeleoperatorConfig): + phone_os: PhoneOS = PhoneOS.IOS + camera_offset = np.array( + [0.0, -0.02, 0.04] + ) # iPhone 14 Pro camera is 2cm off center and 4cm above center diff --git a/lerobot/src/lerobot/teleoperators/phone/phone_processor.py b/lerobot/src/lerobot/teleoperators/phone/phone_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..9f3fa76f0f3989f63e1f4a4344761f05c34c5776 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/phone/phone_processor.py @@ -0,0 +1,110 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field + +from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature +from lerobot.processor import ProcessorStepRegistry, RobotAction, RobotActionProcessorStep +from lerobot.teleoperators.phone.config_phone import PhoneOS + + +@ProcessorStepRegistry.register("map_phone_action_to_robot_action") +@dataclass +class MapPhoneActionToRobotAction(RobotActionProcessorStep): + """ + Maps calibrated phone pose actions to standardized robot action inputs. + + This processor step acts as a bridge between the phone teleoperator's output + and the robot's expected action format. It remaps the phone's 6-DoF pose + (position and rotation) to the robot's target end-effector pose, applying + necessary axis inversions and swaps. It also interprets platform-specific + button presses to generate a gripper command. + + Attributes: + platform: The operating system of the phone (iOS or Android), used + to determine the correct button mappings for the gripper. + """ + + # TODO(Steven): Gripper vel could be output of phone_teleop directly + platform: PhoneOS + _enabled_prev: bool = field(default=False, init=False, repr=False) + + def action(self, action: RobotAction) -> RobotAction: + """ + Processes the phone action dictionary to create a robot action dictionary. + + Args: + act: The input action dictionary from the phone teleoperator. + + Returns: + A new action dictionary formatted for the robot controller. + + Raises: + ValueError: If 'pos' or 'rot' keys are missing from the input action. + """ + # Pop them from the action + enabled = bool(action.pop("phone.enabled")) + pos = action.pop("phone.pos") + rot = action.pop("phone.rot") + inputs = action.pop("phone.raw_inputs") + + if pos is None or rot is None: + raise ValueError("pos and rot must be present in action") + + rotvec = rot.as_rotvec() # Absolute orientation as rotvec + + # Map certain inputs to certain actions + if self.platform == PhoneOS.IOS: + gripper_vel = float(inputs.get("a3", 0.0)) + else: + a = float(inputs.get("reservedButtonA", 0.0)) + b = float(inputs.get("reservedButtonB", 0.0)) + gripper_vel = ( + a - b + ) # Positive if a is pressed, negative if b is pressed, 0 if both or neither are pressed + + # For some actions we need to invert the axis + action["enabled"] = enabled + action["target_x"] = -pos[1] if enabled else 0.0 + action["target_y"] = pos[0] if enabled else 0.0 + action["target_z"] = pos[2] if enabled else 0.0 + action["target_wx"] = rotvec[1] if enabled else 0.0 + action["target_wy"] = rotvec[0] if enabled else 0.0 + action["target_wz"] = -rotvec[2] if enabled else 0.0 + action["gripper_vel"] = gripper_vel # Still send gripper action when disabled + return action + + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + for feat in ["enabled", "pos", "rot", "raw_inputs"]: + features[PipelineFeatureType.ACTION].pop(f"phone.{feat}", None) + + for feat in [ + "enabled", + "target_x", + "target_y", + "target_z", + "target_wx", + "target_wy", + "target_wz", + "gripper_vel", + ]: + features[PipelineFeatureType.ACTION][f"{feat}"] = PolicyFeature( + type=FeatureType.ACTION, shape=(1,) + ) + + return features diff --git a/lerobot/src/lerobot/teleoperators/phone/teleop_phone.py b/lerobot/src/lerobot/teleoperators/phone/teleop_phone.py new file mode 100644 index 0000000000000000000000000000000000000000..a13d78a7576ecf388b9f57ba2a3328710bc5d61e --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/phone/teleop_phone.py @@ -0,0 +1,415 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Docs: +# hebi: https://docs.hebi.us/tools.html#mobile-io +# teleop: https://github.com/SpesRobotics/teleop + +import logging +import threading +import time + +import hebi +import numpy as np +from teleop import Teleop + +from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS +from lerobot.teleoperators.teleoperator import Teleoperator +from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected +from lerobot.utils.rotation import Rotation + +logger = logging.getLogger(__name__) + + +class BasePhone: + _enabled: bool = False + _calib_pos: np.ndarray | None = None + _calib_rot_inv: Rotation | None = None + + def _reapply_position_calibration(self, pos: np.ndarray) -> None: + self._calib_pos = pos.copy() + + @property + def is_calibrated(self) -> bool: + return (self._calib_pos is not None) and (self._calib_rot_inv is not None) + + @property + def action_features(self) -> dict[str, type]: + return { + "phone.pos": np.ndarray, # shape (3,) + "phone.rot": Rotation, # scipy.spatial.transform.Rotation + "phone.raw_inputs": dict, # analogs/buttons or webXR meta + "phone.enabled": bool, + } + + @property + def feedback_features(self) -> dict[str, type]: + # No haptic or other feedback implemented yet + pass + + def configure(self) -> None: + # No additional configuration required for phone teleop + pass + + def send_feedback(self, feedback: dict[str, float]) -> None: + # We could add haptic feedback (vibrations) here, but it's not implemented yet + raise NotImplementedError + + +class IOSPhone(BasePhone, Teleoperator): + name = "ios_phone" + + def __init__(self, config: PhoneConfig): + super().__init__(config) + self.config = config + self._group = None + + @property + def is_connected(self) -> bool: + return self._group is not None + + @check_if_already_connected + def connect(self) -> None: + logger.info("Connecting to IPhone, make sure to open the HEBI Mobile I/O app.") + lookup = hebi.Lookup() + time.sleep(2.0) + group = lookup.get_group_from_names(["HEBI"], ["mobileIO"]) + if group is None: + raise RuntimeError("Mobile I/O not found — check name/family settings in the app.") + self._group = group + logger.info(f"{self} connected to HEBI group with {group.size} module(s).") + + self.calibrate() + + def calibrate(self) -> None: + print( + "Hold the phone so that: top edge points forward in same direction as the robot (robot +x) and screen points up (robot +z)" + ) + print("Press and hold B1 in the HEBI Mobile I/O app to capture this pose...\n") + position, rotation = self._wait_for_capture_trigger() + self._calib_pos = position.copy() + self._calib_rot_inv = rotation.inv() + self._enabled = False + print("Calibration done\n") + + def _wait_for_capture_trigger(self) -> tuple[np.ndarray, Rotation]: + """ + Blocks execution until the calibration trigger is detected from the iOS device. + + This method enters a loop, continuously reading the phone's state. It waits for the user to press + and hold the 'B1' button in the HEBI Mobile I/O app. Once B1 is pressed, the loop breaks and + returns the phone's pose at that exact moment. + + Returns: + A tuple containing the position (np.ndarray) and rotation (Rotation) of the phone at the + moment the trigger was activated. + """ + while True: + has_pose, position, rotation, fb_pose = self._read_current_pose() + if not has_pose: + time.sleep(0.01) + continue + + io = getattr(fb_pose, "io", None) + button_b = getattr(io, "b", None) if io is not None else None + button_b1_pressed = False + if button_b is not None: + button_b1_pressed = bool(button_b.get_int(1)) + if button_b1_pressed: + return position, rotation + + time.sleep(0.01) + + def _read_current_pose(self) -> tuple[bool, np.ndarray | None, Rotation | None, object | None]: + """ + Reads the instantaneous 6-DoF pose from the connected iOS device via the HEBI SDK. + + This method fetches the latest feedback packet from the HEBI group, extracts the ARKit + position and orientation, and converts them into a standard format. It also applies a + configured camera offset to adjust the pose from the camera's frame to the phone's + physical frame. + + Returns: + A tuple containing: + - A boolean indicating if a valid pose was successfully read. + - The 3D position as a NumPy array, or None if not available. + - The orientation as a `Rotation` object, or None if not available. + - The raw HEBI feedback object for accessing other data like button presses. + """ + fbk = self._group.get_next_feedback() + pose = fbk[0] + ar_pos = getattr(pose, "ar_position", None) + ar_quat = getattr(pose, "ar_orientation", None) + if ar_pos is None or ar_quat is None: + return False, None, None, None + # HEBI provides orientation in w, x, y, z format. + # Scipy's Rotation expects x, y, z, w. + quat_xyzw = np.concatenate((ar_quat[1:], [ar_quat[0]])) # wxyz to xyzw + rot = Rotation.from_quat(quat_xyzw) + pos = ar_pos - rot.apply(self.config.camera_offset) + return True, pos, rot, pose + + @check_if_not_connected + def get_action(self) -> dict: + has_pose, raw_position, raw_rotation, fb_pose = self._read_current_pose() + if not has_pose or not self.is_calibrated: + return {} + + # Collect raw inputs (B1 / analogs on iOS, move/scale on Android) + raw_inputs: dict[str, float | int | bool] = {} + io = getattr(fb_pose, "io", None) + if io is not None: + bank_a, bank_b = io.a, io.b + if bank_a: + for ch in range(1, 9): + if bank_a.has_float(ch): + raw_inputs[f"a{ch}"] = float(bank_a.get_float(ch)) + if bank_b: + for ch in range(1, 9): + if bank_b.has_int(ch): + raw_inputs[f"b{ch}"] = int(bank_b.get_int(ch)) + elif hasattr(bank_b, "has_bool") and bank_b.has_bool(ch): + raw_inputs[f"b{ch}"] = int(bank_b.get_bool(ch)) + + enable = bool(raw_inputs.get("b1", 0)) + + # Rising edge then re-capture calibration immediately from current raw pose + if enable and not self._enabled: + self._reapply_position_calibration(raw_position) + + # Apply calibration + pos_cal = self._calib_rot_inv.apply(raw_position - self._calib_pos) + rot_cal = self._calib_rot_inv * raw_rotation + + self._enabled = enable + + return { + "phone.pos": pos_cal, + "phone.rot": rot_cal, + "phone.raw_inputs": raw_inputs, + "phone.enabled": self._enabled, + } + + @check_if_not_connected + def disconnect(self) -> None: + self._group = None + + +class AndroidPhone(BasePhone, Teleoperator): + name = "android_phone" + + def __init__(self, config: PhoneConfig): + super().__init__(config) + self.config = config + self._teleop = None + self._teleop_thread = None + self._latest_pose = None + self._latest_message = None + self._android_lock = threading.Lock() + + @property + def is_connected(self) -> bool: + return self._teleop is not None + + @check_if_already_connected + def connect(self) -> None: + logger.info("Starting teleop stream for Android...") + self._teleop = Teleop() + self._teleop.subscribe(self._android_callback) + self._teleop_thread = threading.Thread(target=self._teleop.run, daemon=True) + self._teleop_thread.start() + logger.info(f"{self} connected, teleop stream started.") + + self.calibrate() + + def calibrate(self) -> None: + print( + "Hold the phone so that: top edge points forward in same direction as the robot (robot +x) and screen points up (robot +z)" + ) + print("Touch and move on the WebXR page to capture this pose...\n") + + pos, rot = self._wait_for_capture_trigger() + self._calib_pos = pos.copy() + self._calib_rot_inv = rot.inv() + self._enabled = False + print("Calibration done\n") + + def _wait_for_capture_trigger(self) -> tuple[np.ndarray, Rotation]: + """ + Blocks execution until the calibration trigger is detected from the Android device. + + This method enters a loop, continuously checking the latest message received from the WebXR + session. It waits for the user to touch and move their finger on the screen, which generates + a `move` event. Once this event is detected, the loop breaks and returns the phone's current + pose. + + Returns: + A tuple containing the position (np.ndarray) and rotation (Rotation) of the phone at the + moment the trigger was activated. + """ + while True: + with self._android_lock: + msg = self._latest_message or {} + + if bool(msg.get("move", False)): + ok, pos, rot, _pose = self._read_current_pose() + if ok: + return pos, rot + + time.sleep(0.01) + + def _read_current_pose(self) -> tuple[bool, np.ndarray | None, Rotation | None, object | None]: + """ + Reads the latest 6-DoF pose received from the Android device's WebXR session. + + This method accesses the most recent pose data stored by the `_android_callback`. It uses a + thread lock to safely read the shared `_latest_pose` variable. The pose, a 4x4 matrix, is + then decomposed into position and rotation, and the configured camera offset is applied. + + Returns: + A tuple containing: + - A boolean indicating if a valid pose was available. + - The 3D position as a NumPy array, or None if no pose has been received yet. + - The orientation as a `Rotation` object, or None if no pose has been received. + - The raw 4x4 pose matrix as received from the teleop stream. + """ + with self._android_lock: + if self._latest_pose is None: + return False, None, None, None + p = self._latest_pose.copy() + pose = self._latest_pose + rot = Rotation.from_matrix(p[:3, :3]) + pos = p[:3, 3] - rot.apply(self.config.camera_offset) + return True, pos, rot, pose + + def _android_callback(self, pose: np.ndarray, message: dict) -> None: + """ + Callback function to handle incoming data from the Android teleop stream. + + This method is executed by the `teleop` package's subscriber thread whenever a new + pose and message are received from the WebXR session on the Android phone. It updates + the internal state (`_latest_pose` and `_latest_message`) with the new data. + A thread lock is used to ensure that these shared variables are updated atomically, + preventing race conditions with the main thread that reads them. + + Args: + pose: A 4x4 NumPy array representing the phone's transformation matrix. + message: A dictionary containing additional data, such as button presses or touch events. + """ + with self._android_lock: + self._latest_pose = pose + self._latest_message = message + + @check_if_not_connected + def get_action(self) -> dict: + ok, raw_pos, raw_rot, pose = self._read_current_pose() + if not ok or not self.is_calibrated: + return {} + + # Collect raw inputs (B1 / analogs on iOS, move/scale on Android) + raw_inputs: dict[str, float | int | bool] = {} + msg = self._latest_message or {} + raw_inputs["move"] = bool(msg.get("move", False)) + raw_inputs["scale"] = float(msg.get("scale", 1.0)) + raw_inputs["reservedButtonA"] = bool(msg.get("reservedButtonA", False)) + raw_inputs["reservedButtonB"] = bool(msg.get("reservedButtonB", False)) + + enable = bool(raw_inputs.get("move", False)) + + # Rising edge then re-capture calibration immediately from current raw pose + if enable and not self._enabled: + self._reapply_position_calibration(raw_pos) + + # Apply calibration + pos_cal = self._calib_rot_inv.apply(raw_pos - self._calib_pos) + rot_cal = self._calib_rot_inv * raw_rot + + self._enabled = enable + + return { + "phone.pos": pos_cal, + "phone.rot": rot_cal, + "phone.raw_inputs": raw_inputs, + "phone.enabled": self._enabled, + } + + @check_if_not_connected + def disconnect(self) -> None: + self._teleop = None + if self._teleop_thread and self._teleop_thread.is_alive(): + self._teleop_thread.join(timeout=1.0) + self._teleop_thread = None + self._latest_pose = None + + +class Phone(Teleoperator): + """ + Phone-based teleoperator using ARKit (iOS via HEBI Mobile I/O App) or the teleop Python package (Android via WebXR API). + For HEBI Mobile I/O we also expose 8 analog (a1-a8) and 8 digital (b1-b8) inputs. + + Press and hold **B1** to enable teleoperation. While enabled, the first B1 press + captures a reference pose and rotation, when disabled and pressed again the position is reapplied. + """ + + config_class = PhoneConfig + name = "phone" + + def __init__(self, config: PhoneConfig): + super().__init__(config) + self.config = config + + self._phone_impl: Teleoperator + + if self.config.phone_os == PhoneOS.IOS: + self._phone_impl = IOSPhone(config) + elif self.config.phone_os == PhoneOS.ANDROID: + self._phone_impl = AndroidPhone(config) + else: + raise ValueError(f"Invalid config phone_os: {self.config.phone_os}") + + @property + def is_connected(self) -> bool: + return self._phone_impl.is_connected + + def connect(self) -> None: + return self._phone_impl.connect() + + def calibrate(self) -> None: + return self._phone_impl.calibrate() + + @property + def is_calibrated(self) -> bool: + return self._phone_impl.is_calibrated + + @property + def action_features(self) -> dict[str, type]: + return self._phone_impl.action_features + + @property + def feedback_features(self) -> dict[str, type]: + return self._phone_impl.feedback_features + + def configure(self) -> None: + return self._phone_impl.configure() + + def get_action(self) -> dict: + return self._phone_impl.get_action() + + def send_feedback(self, feedback: dict[str, float]) -> None: + return self._phone_impl.send_feedback(feedback) + + def disconnect(self) -> None: + return self._phone_impl.disconnect() diff --git a/lerobot/src/lerobot/teleoperators/reachy2_teleoperator/config_reachy2_teleoperator.py b/lerobot/src/lerobot/teleoperators/reachy2_teleoperator/config_reachy2_teleoperator.py new file mode 100644 index 0000000000000000000000000000000000000000..eb301450337d4a5d8cf77b4c325c5556ed9ccff9 --- /dev/null +++ b/lerobot/src/lerobot/teleoperators/reachy2_teleoperator/config_reachy2_teleoperator.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..config import TeleoperatorConfig + + +@TeleoperatorConfig.register_subclass("reachy2_teleoperator") +@dataclass +class Reachy2TeleoperatorConfig(TeleoperatorConfig): + # IP address of the Reachy 2 robot used as teleoperator + ip_address: str | None = "localhost" + + # Whether to use the present position of the joints as actions + # if False, the goal position of the joints will be used + use_present_position: bool = False + + # Which parts of the robot to use + with_mobile_base: bool = True + with_l_arm: bool = True + with_r_arm: bool = True + with_neck: bool = True + with_antennas: bool = True + + def __post_init__(self): + if not ( + self.with_mobile_base + or self.with_l_arm + or self.with_r_arm + or self.with_neck + or self.with_antennas + ): + raise ValueError( + "No Reachy2Teleoperator part used.\n" + "At least one part of the robot must be set to True " + "(with_mobile_base, with_l_arm, with_r_arm, with_neck, with_antennas)" + ) diff --git a/lerobot/src/lerobot/templates/lerobot_modelcard_template.md b/lerobot/src/lerobot/templates/lerobot_modelcard_template.md new file mode 100644 index 0000000000000000000000000000000000000000..48b9b96a02a3acccba9b7e229aec947947958e69 --- /dev/null +++ b/lerobot/src/lerobot/templates/lerobot_modelcard_template.md @@ -0,0 +1,91 @@ +--- +# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/model-cards +# prettier-ignore +{{card_data}} +--- + +# Model Card for {{ model_name | default("Model ID", true) }} + + + +{% if model_name == "smolvla" %} +[SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. +{% elif model_name == "act" %} +[Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates. +{% elif model_name == "tdmpc" %} +[TD-MPC](https://huggingface.co/papers/2203.04955) combines model-free and model-based approaches to improve sample efficiency and performance in continuous control tasks by using a learned latent dynamics model and terminal value function. +{% elif model_name == "diffusion" %} +[Diffusion Policy](https://huggingface.co/papers/2303.04137) treats visuomotor control as a generative diffusion process, producing smooth, multi-step action trajectories that excel at contact-rich manipulation. +{% elif model_name == "vqbet" %} +[VQ-BET](https://huggingface.co/papers/2403.03181) combines vector-quantised action tokens with Behaviour Transformers to discretise control and achieve data-efficient imitation across diverse skills. +{% elif model_name == "pi0" %} +**π₀ (Pi0)** + +π₀ is a Vision-Language-Action model for general robot control, from Physical Intelligence. The LeRobot implementation is adapted from their open source OpenPI repository. + +**Model Overview** + +π₀ represents a breakthrough in robotics as the first general-purpose robot foundation model developed by Physical Intelligence. Unlike traditional robots that are narrow specialists programmed for repetitive motions, π₀ is designed to be a generalist policy that can understand visual inputs, interpret natural language instructions, and control a variety of different robots across diverse tasks. + +For more details, see the [Physical Intelligence π₀ blog post](https://www.physicalintelligence.company/blog/pi0). +{% elif model_name == "pi05" %} +**π₀.₅ (Pi05) Policy** + +π₀.₅ is a Vision-Language-Action model with open-world generalization, from Physical Intelligence. The LeRobot implementation is adapted from their open source OpenPI repository. + +**Model Overview** + +π₀.₅ represents a significant evolution from π₀, developed by Physical Intelligence to address a big challenge in robotics: open-world generalization. While robots can perform impressive tasks in controlled environments, π₀.₅ is designed to generalize to entirely new environments and situations that were never seen during training. + +For more details, see the [Physical Intelligence π₀.₅ blog post](https://www.physicalintelligence.company/blog/pi05). +{% elif model_name == "sac" %} +[Soft Actor-Critic (SAC)](https://huggingface.co/papers/1801.01290) is an entropy-regularised actor-critic algorithm offering stable, sample-efficient learning in continuous-control environments. +{% elif model_name == "reward_classifier" %} +A reward classifier is a lightweight neural network that scores observations or trajectories for task success, providing a learned reward signal or offline evaluation when explicit rewards are unavailable. +{% else %} +_Model type not recognized — please update this template._ +{% endif %} + +This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). +See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index). + +--- + +## How to Get Started with the Model + +For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy). +Below is the short version on how to train and run inference/eval: + +### Train from scratch + +```bash +lerobot-train \ + --dataset.repo_id=${HF_USER}/ \ + --policy.type=act \ + --output_dir=outputs/train/ \ + --job_name=lerobot_training \ + --policy.device=cuda \ + --policy.repo_id=${HF_USER}/ + --wandb.enable=true +``` + +_Writes checkpoints to `outputs/train//checkpoints/`._ + +### Evaluate the policy/run inference + +```bash +lerobot-record \ + --robot.type=so100_follower \ + --dataset.repo_id=/eval_ \ + --policy.path=/ \ + --episodes=10 +``` + +Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint. + +--- + +## Model Details + +- **License:** {{ license | default("\[More Information Needed]", true) }} diff --git a/lerobot/src/lerobot/transport/services.proto b/lerobot/src/lerobot/transport/services.proto new file mode 100644 index 0000000000000000000000000000000000000000..1917bd7d7791d80f124c90769ad527400c74f3c1 --- /dev/null +++ b/lerobot/src/lerobot/transport/services.proto @@ -0,0 +1,87 @@ +// Copyright 2024 The HuggingFace Inc. team. +// All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.python -m grpc_tools.protoc -I src --python_out=src --grpc_python_out=src src/lerobot/transport/services.proto + +// To generate a classes for transport part (services_pb2.py and services_pb2_grpc.py) use the following command: +// +// python -m grpc_tools.protoc -I src --python_out=src --grpc_python_out=src src/lerobot/transport/services.proto +// +// The command should be launched from the root of the project. + +syntax = "proto3"; + +package transport; + +// LearnerService: the Actor calls this to push transitions. +// The Learner implements this service. +service LearnerService { + // Actor -> Learner to store transitions + rpc StreamParameters(Empty) returns (stream Parameters); + rpc SendTransitions(stream Transition) returns (Empty); + rpc SendInteractions(stream InteractionMessage) returns (Empty); + rpc Ready(Empty) returns (Empty); +} + +// AsyncInference: from Robot perspective +// Robot send observations to & executes action received from a remote Policy server +service AsyncInference { + // Robot -> Policy to share observations with a remote inference server + // Policy -> Robot to share actions predicted for given observations + rpc SendObservations(stream Observation) returns (Empty); + rpc GetActions(Empty) returns (Actions); + rpc SendPolicyInstructions(PolicySetup) returns (Empty); + rpc Ready(Empty) returns (Empty); +} + +enum TransferState { + TRANSFER_UNKNOWN = 0; + TRANSFER_BEGIN = 1; + TRANSFER_MIDDLE = 2; + TRANSFER_END = 3; +} + +// Messages +message Transition { + TransferState transfer_state = 1; + bytes data = 2; +} + +message Parameters { + TransferState transfer_state = 1; + bytes data = 2; +} + +message InteractionMessage { + TransferState transfer_state = 1; + bytes data = 2; +} + +// Messages +message Observation { + // sent by Robot, to remote Policy + TransferState transfer_state = 1; // Observations can be streamed exceeding 4MB of size + bytes data = 2; +} + +message Actions { + // sent by remote Policy, to Robot + bytes data = 1; +} + +message PolicySetup { + // sent by Robot to remote server, to init Policy + bytes data = 1; +} + +message Empty {} diff --git a/lerobot/src/lerobot/transport/services_pb2.py b/lerobot/src/lerobot/transport/services_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..5bab324f2b68d93cdb6de229070c01c546c976fb --- /dev/null +++ b/lerobot/src/lerobot/transport/services_pb2.py @@ -0,0 +1,53 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: lerobot/transport/services.proto +# Protobuf Python Version: 6.31.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 31, + 0, + '', + 'lerobot/transport/services.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n lerobot/transport/services.proto\x12\ttransport\"L\n\nTransition\x12\x30\n\x0etransfer_state\x18\x01 \x01(\x0e\x32\x18.transport.TransferState\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"L\n\nParameters\x12\x30\n\x0etransfer_state\x18\x01 \x01(\x0e\x32\x18.transport.TransferState\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"T\n\x12InteractionMessage\x12\x30\n\x0etransfer_state\x18\x01 \x01(\x0e\x32\x18.transport.TransferState\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"M\n\x0bObservation\x12\x30\n\x0etransfer_state\x18\x01 \x01(\x0e\x32\x18.transport.TransferState\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x17\n\x07\x41\x63tions\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"\x1b\n\x0bPolicySetup\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"\x07\n\x05\x45mpty*`\n\rTransferState\x12\x14\n\x10TRANSFER_UNKNOWN\x10\x00\x12\x12\n\x0eTRANSFER_BEGIN\x10\x01\x12\x13\n\x0fTRANSFER_MIDDLE\x10\x02\x12\x10\n\x0cTRANSFER_END\x10\x03\x32\x81\x02\n\x0eLearnerService\x12=\n\x10StreamParameters\x12\x10.transport.Empty\x1a\x15.transport.Parameters0\x01\x12<\n\x0fSendTransitions\x12\x15.transport.Transition\x1a\x10.transport.Empty(\x01\x12\x45\n\x10SendInteractions\x12\x1d.transport.InteractionMessage\x1a\x10.transport.Empty(\x01\x12+\n\x05Ready\x12\x10.transport.Empty\x1a\x10.transport.Empty2\xf5\x01\n\x0e\x41syncInference\x12>\n\x10SendObservations\x12\x16.transport.Observation\x1a\x10.transport.Empty(\x01\x12\x32\n\nGetActions\x12\x10.transport.Empty\x1a\x12.transport.Actions\x12\x42\n\x16SendPolicyInstructions\x12\x16.transport.PolicySetup\x1a\x10.transport.Empty\x12+\n\x05Ready\x12\x10.transport.Empty\x1a\x10.transport.Emptyb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'lerobot.transport.services_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_TRANSFERSTATE']._serialized_start=431 + _globals['_TRANSFERSTATE']._serialized_end=527 + _globals['_TRANSITION']._serialized_start=47 + _globals['_TRANSITION']._serialized_end=123 + _globals['_PARAMETERS']._serialized_start=125 + _globals['_PARAMETERS']._serialized_end=201 + _globals['_INTERACTIONMESSAGE']._serialized_start=203 + _globals['_INTERACTIONMESSAGE']._serialized_end=287 + _globals['_OBSERVATION']._serialized_start=289 + _globals['_OBSERVATION']._serialized_end=366 + _globals['_ACTIONS']._serialized_start=368 + _globals['_ACTIONS']._serialized_end=391 + _globals['_POLICYSETUP']._serialized_start=393 + _globals['_POLICYSETUP']._serialized_end=420 + _globals['_EMPTY']._serialized_start=422 + _globals['_EMPTY']._serialized_end=429 + _globals['_LEARNERSERVICE']._serialized_start=530 + _globals['_LEARNERSERVICE']._serialized_end=787 + _globals['_ASYNCINFERENCE']._serialized_start=790 + _globals['_ASYNCINFERENCE']._serialized_end=1035 +# @@protoc_insertion_point(module_scope) diff --git a/lerobot/src/lerobot/transport/services_pb2_grpc.py b/lerobot/src/lerobot/transport/services_pb2_grpc.py new file mode 100644 index 0000000000000000000000000000000000000000..26e5c68186dac26e56570f2d98a752aaf9f1cd39 --- /dev/null +++ b/lerobot/src/lerobot/transport/services_pb2_grpc.py @@ -0,0 +1,442 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +from lerobot.transport import services_pb2 as lerobot_dot_transport_dot_services__pb2 + +GRPC_GENERATED_VERSION = '1.73.1' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in lerobot/transport/services_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class LearnerServiceStub: + """LearnerService: the Actor calls this to push transitions. + The Learner implements this service. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.StreamParameters = channel.unary_stream( + '/transport.LearnerService/StreamParameters', + request_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + response_deserializer=lerobot_dot_transport_dot_services__pb2.Parameters.FromString, + _registered_method=True) + self.SendTransitions = channel.stream_unary( + '/transport.LearnerService/SendTransitions', + request_serializer=lerobot_dot_transport_dot_services__pb2.Transition.SerializeToString, + response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + _registered_method=True) + self.SendInteractions = channel.stream_unary( + '/transport.LearnerService/SendInteractions', + request_serializer=lerobot_dot_transport_dot_services__pb2.InteractionMessage.SerializeToString, + response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + _registered_method=True) + self.Ready = channel.unary_unary( + '/transport.LearnerService/Ready', + request_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + _registered_method=True) + + +class LearnerServiceServicer: + """LearnerService: the Actor calls this to push transitions. + The Learner implements this service. + """ + + def StreamParameters(self, request, context): + """Actor -> Learner to store transitions + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SendTransitions(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SendInteractions(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Ready(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_LearnerServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'StreamParameters': grpc.unary_stream_rpc_method_handler( + servicer.StreamParameters, + request_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + response_serializer=lerobot_dot_transport_dot_services__pb2.Parameters.SerializeToString, + ), + 'SendTransitions': grpc.stream_unary_rpc_method_handler( + servicer.SendTransitions, + request_deserializer=lerobot_dot_transport_dot_services__pb2.Transition.FromString, + response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + ), + 'SendInteractions': grpc.stream_unary_rpc_method_handler( + servicer.SendInteractions, + request_deserializer=lerobot_dot_transport_dot_services__pb2.InteractionMessage.FromString, + response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + ), + 'Ready': grpc.unary_unary_rpc_method_handler( + servicer.Ready, + request_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'transport.LearnerService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('transport.LearnerService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class LearnerService: + """LearnerService: the Actor calls this to push transitions. + The Learner implements this service. + """ + + @staticmethod + def StreamParameters(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/transport.LearnerService/StreamParameters', + lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + lerobot_dot_transport_dot_services__pb2.Parameters.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SendTransitions(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_unary( + request_iterator, + target, + '/transport.LearnerService/SendTransitions', + lerobot_dot_transport_dot_services__pb2.Transition.SerializeToString, + lerobot_dot_transport_dot_services__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SendInteractions(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_unary( + request_iterator, + target, + '/transport.LearnerService/SendInteractions', + lerobot_dot_transport_dot_services__pb2.InteractionMessage.SerializeToString, + lerobot_dot_transport_dot_services__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Ready(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transport.LearnerService/Ready', + lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + lerobot_dot_transport_dot_services__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + +class AsyncInferenceStub: + """AsyncInference: from Robot perspective + Robot send observations to & executes action received from a remote Policy server + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SendObservations = channel.stream_unary( + '/transport.AsyncInference/SendObservations', + request_serializer=lerobot_dot_transport_dot_services__pb2.Observation.SerializeToString, + response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + _registered_method=True) + self.GetActions = channel.unary_unary( + '/transport.AsyncInference/GetActions', + request_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + response_deserializer=lerobot_dot_transport_dot_services__pb2.Actions.FromString, + _registered_method=True) + self.SendPolicyInstructions = channel.unary_unary( + '/transport.AsyncInference/SendPolicyInstructions', + request_serializer=lerobot_dot_transport_dot_services__pb2.PolicySetup.SerializeToString, + response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + _registered_method=True) + self.Ready = channel.unary_unary( + '/transport.AsyncInference/Ready', + request_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + _registered_method=True) + + +class AsyncInferenceServicer: + """AsyncInference: from Robot perspective + Robot send observations to & executes action received from a remote Policy server + """ + + def SendObservations(self, request_iterator, context): + """Robot -> Policy to share observations with a remote inference server + Policy -> Robot to share actions predicted for given observations + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetActions(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SendPolicyInstructions(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Ready(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_AsyncInferenceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SendObservations': grpc.stream_unary_rpc_method_handler( + servicer.SendObservations, + request_deserializer=lerobot_dot_transport_dot_services__pb2.Observation.FromString, + response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + ), + 'GetActions': grpc.unary_unary_rpc_method_handler( + servicer.GetActions, + request_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + response_serializer=lerobot_dot_transport_dot_services__pb2.Actions.SerializeToString, + ), + 'SendPolicyInstructions': grpc.unary_unary_rpc_method_handler( + servicer.SendPolicyInstructions, + request_deserializer=lerobot_dot_transport_dot_services__pb2.PolicySetup.FromString, + response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + ), + 'Ready': grpc.unary_unary_rpc_method_handler( + servicer.Ready, + request_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, + response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'transport.AsyncInference', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('transport.AsyncInference', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class AsyncInference: + """AsyncInference: from Robot perspective + Robot send observations to & executes action received from a remote Policy server + """ + + @staticmethod + def SendObservations(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_unary( + request_iterator, + target, + '/transport.AsyncInference/SendObservations', + lerobot_dot_transport_dot_services__pb2.Observation.SerializeToString, + lerobot_dot_transport_dot_services__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetActions(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transport.AsyncInference/GetActions', + lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + lerobot_dot_transport_dot_services__pb2.Actions.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SendPolicyInstructions(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transport.AsyncInference/SendPolicyInstructions', + lerobot_dot_transport_dot_services__pb2.PolicySetup.SerializeToString, + lerobot_dot_transport_dot_services__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Ready(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transport.AsyncInference/Ready', + lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, + lerobot_dot_transport_dot_services__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/lerobot/src/lerobot/transport/utils.py b/lerobot/src/lerobot/transport/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..23e21b7b3fab6e085f5ec3b707a0e62ad16b74b3 --- /dev/null +++ b/lerobot/src/lerobot/transport/utils.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import json +import logging +import pickle # nosec B403: Safe usage for internal serialization only +from multiprocessing.synchronize import Event as MpEvent +from queue import Queue +from typing import Any + +import torch + +from lerobot.transport import services_pb2 +from lerobot.utils.transition import Transition + +# FIX for protobuf: Assign the enum to a variable and ignore the type error once +TransferState = services_pb2.TransferState # type: ignore[attr-defined] + +CHUNK_SIZE = 2 * 1024 * 1024 # 2 MB +MAX_MESSAGE_SIZE = 4 * 1024 * 1024 # 4 MB + + +def bytes_buffer_size(buffer: io.BytesIO) -> int: + buffer.seek(0, io.SEEK_END) + result = buffer.tell() + buffer.seek(0) + return result + + +def send_bytes_in_chunks(buffer: bytes, message_class: Any, log_prefix: str = "", silent: bool = True): + bytes_buffer: io.BytesIO = io.BytesIO(buffer) + size_in_bytes = bytes_buffer_size(bytes_buffer) + + sent_bytes = 0 + + logging_method = logging.info if not silent else logging.debug + + logging_method(f"{log_prefix} Buffer size {size_in_bytes / 1024 / 1024} MB with") + + while sent_bytes < size_in_bytes: + transfer_state = TransferState.TRANSFER_MIDDLE + + if sent_bytes + CHUNK_SIZE >= size_in_bytes: + transfer_state = TransferState.TRANSFER_END + elif sent_bytes == 0: + transfer_state = TransferState.TRANSFER_BEGIN + + size_to_read = min(CHUNK_SIZE, size_in_bytes - sent_bytes) + chunk = bytes_buffer.read(size_to_read) + + yield message_class(transfer_state=transfer_state, data=chunk) + sent_bytes += size_to_read + logging_method(f"{log_prefix} Sent {sent_bytes}/{size_in_bytes} bytes with state {transfer_state}") + + logging_method(f"{log_prefix} Published {sent_bytes / 1024 / 1024} MB") + + +def receive_bytes_in_chunks(iterator, queue: Queue | None, shutdown_event: MpEvent, log_prefix: str = ""): + bytes_buffer = io.BytesIO() + step = 0 + + logging.info(f"{log_prefix} Starting receiver") + for item in iterator: + logging.debug(f"{log_prefix} Received item") + if shutdown_event.is_set(): + logging.info(f"{log_prefix} Shutting down receiver") + return + + if item.transfer_state == TransferState.TRANSFER_BEGIN: + bytes_buffer.seek(0) + bytes_buffer.truncate(0) + bytes_buffer.write(item.data) + logging.debug(f"{log_prefix} Received data at step 0") + step = 0 + elif item.transfer_state == TransferState.TRANSFER_MIDDLE: + bytes_buffer.write(item.data) + step += 1 + logging.debug(f"{log_prefix} Received data at step {step}") + elif item.transfer_state == TransferState.TRANSFER_END: + bytes_buffer.write(item.data) + logging.debug(f"{log_prefix} Received data at step end size {bytes_buffer_size(bytes_buffer)}") + + if queue is not None: + queue.put(bytes_buffer.getvalue()) + else: + return bytes_buffer.getvalue() + + bytes_buffer.seek(0) + bytes_buffer.truncate(0) + step = 0 + + logging.debug(f"{log_prefix} Queue updated") + else: + logging.warning(f"{log_prefix} Received unknown transfer state {item.transfer_state}") + raise ValueError(f"Received unknown transfer state {item.transfer_state}") + + +def state_to_bytes(state_dict: dict[str, torch.Tensor]) -> bytes: + """Convert model state dict to flat array for transmission""" + bytes_buffer = io.BytesIO() + + torch.save(state_dict, bytes_buffer) + + return bytes_buffer.getvalue() + + +def bytes_to_state_dict(buffer: bytes) -> dict[str, torch.Tensor]: + bytes_buffer = io.BytesIO(buffer) + bytes_buffer.seek(0) + return torch.load(bytes_buffer, weights_only=True) + + +def python_object_to_bytes(python_object: Any) -> bytes: + return pickle.dumps(python_object) + + +def bytes_to_python_object(buffer: bytes) -> Any: + bytes_buffer = io.BytesIO(buffer) + bytes_buffer.seek(0) + obj = pickle.load(bytes_buffer) # nosec B301: Safe usage of pickle.load + # Add validation checks here + return obj + + +def bytes_to_transitions(buffer: bytes) -> list[Transition]: + bytes_buffer = io.BytesIO(buffer) + bytes_buffer.seek(0) + transitions = torch.load(bytes_buffer, weights_only=True) + return transitions + + +def transitions_to_bytes(transitions: list[Transition]) -> bytes: + bytes_buffer = io.BytesIO() + torch.save(transitions, bytes_buffer) + return bytes_buffer.getvalue() + + +def grpc_channel_options( + max_receive_message_length: int = MAX_MESSAGE_SIZE, + max_send_message_length: int = MAX_MESSAGE_SIZE, + enable_retries: bool = True, + initial_backoff: str = "0.1s", + max_attempts: int = 5, + backoff_multiplier: float = 2, + max_backoff: str = "2s", +): + service_config = { + "methodConfig": [ + { + "name": [{}], # Applies to ALL methods in ALL services + "retryPolicy": { + "maxAttempts": max_attempts, # Max retries (total attempts = 5) + "initialBackoff": initial_backoff, # First retry after 0.1s + "maxBackoff": max_backoff, # Max wait time between retries + "backoffMultiplier": backoff_multiplier, # Exponential backoff factor + "retryableStatusCodes": [ + "UNAVAILABLE", + "DEADLINE_EXCEEDED", + ], # Retries on network failures + }, + } + ] + } + + service_config_json = json.dumps(service_config) + + retries_option = 1 if enable_retries else 0 + + return [ + ("grpc.max_receive_message_length", max_receive_message_length), + ("grpc.max_send_message_length", max_send_message_length), + ("grpc.enable_retries", retries_option), + ("grpc.service_config", service_config_json), + ] diff --git a/lerobot/src/lerobot/utils/constants.py b/lerobot/src/lerobot/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..55f003aa1d63b6795ee9dd6c741d3fa8f3c412cc --- /dev/null +++ b/lerobot/src/lerobot/utils/constants.py @@ -0,0 +1,88 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# keys +import os +from pathlib import Path + +from huggingface_hub.constants import HF_HOME + +OBS_STR = "observation" +OBS_PREFIX = OBS_STR + "." +OBS_ENV_STATE = OBS_STR + ".environment_state" +OBS_STATE = OBS_STR + ".state" +OBS_IMAGE = OBS_STR + ".image" +OBS_IMAGES = OBS_IMAGE + "s" +OBS_LANGUAGE = OBS_STR + ".language" +OBS_LANGUAGE_TOKENS = OBS_LANGUAGE + ".tokens" +OBS_LANGUAGE_ATTENTION_MASK = OBS_LANGUAGE + ".attention_mask" + +ACTION = "action" +ACTION_PREFIX = ACTION + "." +ACTION_TOKENS = ACTION + ".tokens" +ACTION_TOKEN_MASK = ACTION + ".token_mask" +REWARD = "next.reward" +TRUNCATED = "next.truncated" +DONE = "next.done" +INFO = "info" + +ROBOTS = "robots" +TELEOPERATORS = "teleoperators" + +# files & directories +CHECKPOINTS_DIR = "checkpoints" +LAST_CHECKPOINT_LINK = "last" +PRETRAINED_MODEL_DIR = "pretrained_model" +TRAINING_STATE_DIR = "training_state" +RNG_STATE = "rng_state.safetensors" +TRAINING_STEP = "training_step.json" +OPTIMIZER_STATE = "optimizer_state.safetensors" +OPTIMIZER_PARAM_GROUPS = "optimizer_param_groups.json" +SCHEDULER_STATE = "scheduler_state.json" + +POLICY_PREPROCESSOR_DEFAULT_NAME = "policy_preprocessor" +POLICY_POSTPROCESSOR_DEFAULT_NAME = "policy_postprocessor" + +if "LEROBOT_HOME" in os.environ: + raise ValueError( + f"You have a 'LEROBOT_HOME' environment variable set to '{os.getenv('LEROBOT_HOME')}'.\n" + "'LEROBOT_HOME' is deprecated, please use 'HF_LEROBOT_HOME' instead." + ) + +# cache dir +default_cache_path = Path(HF_HOME) / "lerobot" +HF_LEROBOT_HOME = Path(os.getenv("HF_LEROBOT_HOME", default_cache_path)).expanduser() + +# calibration dir +default_calibration_path = HF_LEROBOT_HOME / "calibration" +HF_LEROBOT_CALIBRATION = Path(os.getenv("HF_LEROBOT_CALIBRATION", default_calibration_path)).expanduser() + + +# streaming datasets +LOOKBACK_BACKTRACKTABLE = 100 +LOOKAHEAD_BACKTRACKTABLE = 100 + +# openpi +OPENPI_ATTENTION_MASK_VALUE = -2.3819763e38 # TODO(pepijn): Modify this when extending support to fp8 models + +# Constants for LIBERO observation keys +LIBERO_KEY_EEF_POS = "robot_state/eef/pos" +LIBERO_KEY_EEF_QUAT = "robot_state/eef/quat" +LIBERO_KEY_EEF_MAT = "robot_state/eef/mat" +LIBERO_KEY_EEF_AXISANGLE = "robot_state/eef/axisangle" +LIBERO_KEY_GRIPPER_QPOS = "robot_state/gripper/qpos" +LIBERO_KEY_GRIPPER_QVEL = "robot_state/gripper/qvel" +LIBERO_KEY_JOINTS_POS = "robot_state/joints/pos" +LIBERO_KEY_JOINTS_VEL = "robot_state/joints/vel" +LIBERO_KEY_PIXELS_AGENTVIEW = "pixels/agentview_image" +LIBERO_KEY_PIXELS_EYE_IN_HAND = "pixels/robot0_eye_in_hand_image" diff --git a/lerobot/src/lerobot/utils/control_utils.py b/lerobot/src/lerobot/utils/control_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ffb75663ef3dc26ad176b31d8f16010f08da7497 --- /dev/null +++ b/lerobot/src/lerobot/utils/control_utils.py @@ -0,0 +1,235 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################## +# Utilities +######################################################################################## + + +import logging +import traceback +from contextlib import nullcontext +from copy import copy +from functools import cache +from typing import Any + +import numpy as np +import torch +from deepdiff import DeepDiff + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.utils import DEFAULT_FEATURES +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.utils import prepare_observation_for_inference +from lerobot.processor import PolicyAction, PolicyProcessorPipeline +from lerobot.robots import Robot + + +@cache +def is_headless(): + """ + Detects if the Python script is running in a headless environment (e.g., without a display). + + This function attempts to import `pynput`, a library that requires a graphical environment. + If the import fails, it assumes the environment is headless. The result is cached to avoid + re-running the check. + + Returns: + True if the environment is determined to be headless, False otherwise. + """ + try: + import pynput # noqa + + return False + except Exception: + print( + "Error trying to import pynput. Switching to headless mode. " + "As a result, the video stream from the cameras won't be shown, " + "and you won't be able to change the control flow with keyboards. " + "For more info, see traceback below.\n" + ) + traceback.print_exc() + print() + return True + + +def predict_action( + observation: dict[str, np.ndarray], + policy: PreTrainedPolicy, + device: torch.device, + preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], + postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction], + use_amp: bool, + task: str | None = None, + robot_type: str | None = None, +): + """ + Performs a single-step inference to predict a robot action from an observation. + + This function encapsulates the full inference pipeline: + 1. Prepares the observation by converting it to PyTorch tensors and adding a batch dimension. + 2. Runs the preprocessor pipeline on the observation. + 3. Feeds the processed observation to the policy to get a raw action. + 4. Runs the postprocessor pipeline on the raw action. + 5. Formats the final action by removing the batch dimension and moving it to the CPU. + + Args: + observation: A dictionary of NumPy arrays representing the robot's current observation. + policy: The `PreTrainedPolicy` model to use for action prediction. + device: The `torch.device` (e.g., 'cuda' or 'cpu') to run inference on. + preprocessor: The `PolicyProcessorPipeline` for preprocessing observations. + postprocessor: The `PolicyProcessorPipeline` for postprocessing actions. + use_amp: A boolean to enable/disable Automatic Mixed Precision for CUDA inference. + task: An optional string identifier for the task. + robot_type: An optional string identifier for the robot type. + + Returns: + A `torch.Tensor` containing the predicted action, ready for the robot. + """ + observation = copy(observation) + with ( + torch.inference_mode(), + torch.autocast(device_type=device.type) if device.type == "cuda" and use_amp else nullcontext(), + ): + # Convert to pytorch format: channel first and float32 in [0,1] with batch dimension + observation = prepare_observation_for_inference(observation, device, task, robot_type) + observation = preprocessor(observation) + + # Compute the next action with the policy + # based on the current observation + action = policy.select_action(observation) + + action = postprocessor(action) + + return action + + +def init_keyboard_listener(): + """ + Initializes a non-blocking keyboard listener for real-time user interaction. + + This function sets up a listener for specific keys (right arrow, left arrow, escape) to control + the program flow during execution, such as stopping recording or exiting loops. It gracefully + handles headless environments where keyboard listening is not possible. + + Returns: + A tuple containing: + - The `pynput.keyboard.Listener` instance, or `None` if in a headless environment. + - A dictionary of event flags (e.g., `exit_early`) that are set by key presses. + """ + # Allow to exit early while recording an episode or resetting the environment, + # by tapping the right arrow key '->'. This might require a sudo permission + # to allow your terminal to monitor keyboard events. + events = {} + events["exit_early"] = False + events["rerecord_episode"] = False + events["stop_recording"] = False + + if is_headless(): + logging.warning( + "Headless environment detected. On-screen cameras display and keyboard inputs will not be available." + ) + listener = None + return listener, events + + # Only import pynput if not in a headless environment + from pynput import keyboard + + def on_press(key): + try: + if key == keyboard.Key.right: + print("Right arrow key pressed. Exiting loop...") + events["exit_early"] = True + elif key == keyboard.Key.left: + print("Left arrow key pressed. Exiting loop and rerecord the last episode...") + events["rerecord_episode"] = True + events["exit_early"] = True + elif key == keyboard.Key.esc: + print("Escape key pressed. Stopping data recording...") + events["stop_recording"] = True + events["exit_early"] = True + except Exception as e: + print(f"Error handling key press: {e}") + + listener = keyboard.Listener(on_press=on_press) + listener.start() + + return listener, events + + +def sanity_check_dataset_name(repo_id, policy_cfg): + """ + Validates the dataset repository name against the presence of a policy configuration. + + This function enforces a naming convention: a dataset repository ID should start with "eval_" + if and only if a policy configuration is provided for evaluation purposes. + + Args: + repo_id: The Hugging Face Hub repository ID of the dataset. + policy_cfg: The configuration object for the policy, or `None`. + + Raises: + ValueError: If the naming convention is violated. + """ + _, dataset_name = repo_id.split("/") + # either repo_id doesnt start with "eval_" and there is no policy + # or repo_id starts with "eval_" and there is a policy + + # Check if dataset_name starts with "eval_" but policy is missing + if dataset_name.startswith("eval_") and policy_cfg is None: + raise ValueError( + f"Your dataset name begins with 'eval_' ({dataset_name}), but no policy is provided ({policy_cfg.type})." + ) + + # Check if dataset_name does not start with "eval_" but policy is provided + if not dataset_name.startswith("eval_") and policy_cfg is not None: + raise ValueError( + f"Your dataset name does not begin with 'eval_' ({dataset_name}), but a policy is provided ({policy_cfg.type})." + ) + + +def sanity_check_dataset_robot_compatibility( + dataset: LeRobotDataset, robot: Robot, fps: int, features: dict +) -> None: + """ + Checks if a dataset's metadata is compatible with the current robot and recording setup. + + This function compares key metadata fields (`robot_type`, `fps`, and `features`) from the + dataset against the current configuration to ensure that appended data will be consistent. + + Args: + dataset: The `LeRobotDataset` instance to check. + robot: The `Robot` instance representing the current hardware setup. + fps: The current recording frequency (frames per second). + features: The dictionary of features for the current recording session. + + Raises: + ValueError: If any of the checked metadata fields do not match. + """ + fields = [ + ("robot_type", dataset.meta.robot_type, robot.robot_type), + ("fps", dataset.fps, fps), + ("features", dataset.features, {**features, **DEFAULT_FEATURES}), + ] + + mismatches = [] + for field, dataset_value, present_value in fields: + diff = DeepDiff(dataset_value, present_value, exclude_regex_paths=[r".*\['info'\]$"]) + if diff: + mismatches.append(f"{field}: expected {present_value}, got {dataset_value}") + + if mismatches: + raise ValueError( + "Dataset metadata compatibility check failed with mismatches:\n" + "\n".join(mismatches) + ) diff --git a/lerobot/src/lerobot/utils/decorators.py b/lerobot/src/lerobot/utils/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..149b6ed98b957ab8d0f9fd9cc15b4a3fce499ac4 --- /dev/null +++ b/lerobot/src/lerobot/utils/decorators.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python + +# Copyright 2026 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import wraps + +from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError + + +def check_if_not_connected(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + if not self.is_connected: + raise DeviceNotConnectedError( + f"{self.__class__.__name__} is not connected. Run `.connect()` first." + ) + return func(self, *args, **kwargs) + + return wrapper + + +def check_if_already_connected(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + if self.is_connected: + raise DeviceAlreadyConnectedError(f"{self.__class__.__name__} is already connected.") + return func(self, *args, **kwargs) + + return wrapper diff --git a/lerobot/src/lerobot/utils/errors.py b/lerobot/src/lerobot/utils/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..b791eb0b9ae593dc2297a07a0e32fff844b02060 --- /dev/null +++ b/lerobot/src/lerobot/utils/errors.py @@ -0,0 +1,32 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class DeviceNotConnectedError(ConnectionError): + """Exception raised when the device is not connected.""" + + def __init__(self, message="This device is not connected. Try calling `connect()` first."): + self.message = message + super().__init__(self.message) + + +class DeviceAlreadyConnectedError(ConnectionError): + """Exception raised when the device is already connected.""" + + def __init__( + self, + message="This device is already connected. Try not calling `connect()` twice.", + ): + self.message = message + super().__init__(self.message) diff --git a/lerobot/src/lerobot/utils/hub.py b/lerobot/src/lerobot/utils/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..a810068ffc886c918944861b30a4349a298da66b --- /dev/null +++ b/lerobot/src/lerobot/utils/hub.py @@ -0,0 +1,203 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import builtins +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Any, TypeVar + +from huggingface_hub import HfApi +from huggingface_hub.utils import validate_hf_hub_args + +T = TypeVar("T", bound="HubMixin") + + +class HubMixin: + """ + A Mixin containing the functionality to push an object to the hub. + + This is similar to huggingface_hub.ModelHubMixin but is lighter and makes less assumptions about its + subclasses (in particular, the fact that it's not necessarily a model). + + The inheriting classes must implement '_save_pretrained' and 'from_pretrained'. + """ + + def save_pretrained( + self, + save_directory: str | Path, + *, + repo_id: str | None = None, + push_to_hub: bool = False, + card_kwargs: dict[str, Any] | None = None, + **push_to_hub_kwargs, + ) -> str | None: + """ + Save object in local directory. + + Args: + save_directory (`str` or `Path`): + Path to directory in which the object will be saved. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your object to the Huggingface Hub after saving it. + repo_id (`str`, *optional*): + ID of your repository on the Hub. Used only if `push_to_hub=True`. Will default to the folder name if + not provided. + card_kwargs (`Dict[str, Any]`, *optional*): + Additional arguments passed to the card template to customize the card. + push_to_hub_kwargs: + Additional key word arguments passed along to the [`~HubMixin.push_to_hub`] method. + Returns: + `str` or `None`: url of the commit on the Hub if `push_to_hub=True`, `None` otherwise. + """ + save_directory = Path(save_directory) + save_directory.mkdir(parents=True, exist_ok=True) + + # save object (weights, files, etc.) + self._save_pretrained(save_directory) + + # push to the Hub if required + if push_to_hub: + if repo_id is None: + repo_id = save_directory.name # Defaults to `save_directory` name + return self.push_to_hub(repo_id=repo_id, card_kwargs=card_kwargs, **push_to_hub_kwargs) + return None + + def _save_pretrained(self, save_directory: Path) -> None: + """ + Overwrite this method in subclass to define how to save your object. + + Args: + save_directory (`str` or `Path`): + Path to directory in which the object files will be saved. + """ + raise NotImplementedError + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls: builtins.type[T], + pretrained_name_or_path: str | Path, + *, + force_download: bool = False, + resume_download: bool | None = None, + proxies: dict | None = None, + token: str | bool | None = None, + cache_dir: str | Path | None = None, + local_files_only: bool = False, + revision: str | None = None, + **kwargs, + ) -> T: + """ + Download the object from the Huggingface Hub and instantiate it. + + Args: + pretrained_name_or_path (`str`, `Path`): + - Either the `repo_id` (string) of the object hosted on the Hub, e.g. `lerobot/diffusion_pusht`. + - Or a path to a `directory` containing the object files saved using `.save_pretrained`, + e.g., `../path/to/my_model_directory/`. + revision (`str`, *optional*): + Revision on the Hub. Can be a branch name, a git tag or any commit id. + Defaults to the latest commit on `main` branch. + force_download (`bool`, *optional*, defaults to `False`): + Whether to force (re-)downloading the files from the Hub, overriding the existing cache. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on every request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. By default, it will use the token + cached when running `huggingface-cli login`. + cache_dir (`str`, `Path`, *optional*): + Path to the folder where cached files are stored. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, avoid downloading the file and return the path to the local cached file if it exists. + kwargs (`Dict`, *optional*): + Additional kwargs to pass to the object during initialization. + """ + raise NotImplementedError + + @validate_hf_hub_args + def push_to_hub( + self, + repo_id: str, + *, + commit_message: str | None = None, + private: bool | None = None, + token: str | None = None, + branch: str | None = None, + create_pr: bool | None = None, + allow_patterns: list[str] | str | None = None, + ignore_patterns: list[str] | str | None = None, + delete_patterns: list[str] | str | None = None, + card_kwargs: dict[str, Any] | None = None, + ) -> str: + """ + Upload model checkpoint to the Hub. + + Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use + `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more + details. + + Args: + repo_id (`str`): + ID of the repository to push to (example: `"username/my-model"`). + commit_message (`str`, *optional*): + Message to commit while pushing. + private (`bool`, *optional*): + Whether the repository created should be private. + If `None` (default), the repo will be public unless the organization's default is private. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. By default, it will use the token + cached when running `huggingface-cli login`. + branch (`str`, *optional*): + The git branch on which to push the model. This defaults to `"main"`. + create_pr (`boolean`, *optional*): + Whether or not to create a Pull Request from `branch` with that commit. Defaults to `False`. + allow_patterns (`List[str]` or `str`, *optional*): + If provided, only files matching at least one pattern are pushed. + ignore_patterns (`List[str]` or `str`, *optional*): + If provided, files matching any of the patterns are not pushed. + delete_patterns (`List[str]` or `str`, *optional*): + If provided, remote files matching any of the patterns will be deleted from the repo. + card_kwargs (`Dict[str, Any]`, *optional*): + Additional arguments passed to the card template to customize the card. + + Returns: + The url of the commit of your object in the given repository. + """ + api = HfApi(token=token) + repo_id = api.create_repo(repo_id=repo_id, private=private, exist_ok=True).repo_id + + if commit_message is None: + if "Policy" in self.__class__.__name__: + commit_message = "Upload policy" + elif "Config" in self.__class__.__name__: + commit_message = "Upload config" + else: + commit_message = f"Upload {self.__class__.__name__}" + + # Push the files to the repo in a single commit + with TemporaryDirectory(ignore_cleanup_errors=True) as tmp: + saved_path = Path(tmp) / repo_id + self.save_pretrained(saved_path, card_kwargs=card_kwargs) + return api.upload_folder( + repo_id=repo_id, + repo_type="model", + folder_path=saved_path, + commit_message=commit_message, + revision=branch, + create_pr=create_pr, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + delete_patterns=delete_patterns, + ) diff --git a/lerobot/src/lerobot/utils/import_utils.py b/lerobot/src/lerobot/utils/import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..166cf6426d2284caddde96e33f26303e00c03643 --- /dev/null +++ b/lerobot/src/lerobot/utils/import_utils.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import importlib.metadata +import logging +from typing import Any + +from draccus.choice_types import ChoiceRegistry + + +def is_package_available(pkg_name: str, return_version: bool = False) -> tuple[bool, str] | bool: + """Copied from https://github.com/huggingface/transformers/blob/main/src/transformers/utils/import_utils.py + Check if the package spec exists and grab its version to avoid importing a local directory. + **Note:** this doesn't work for all packages. + """ + package_exists = importlib.util.find_spec(pkg_name) is not None + package_version = "N/A" + if package_exists: + try: + # Primary method to get the package version + package_version = importlib.metadata.version(pkg_name) + + except importlib.metadata.PackageNotFoundError: + # Fallback method: Only for "torch" and versions containing "dev" + if pkg_name == "torch": + try: + package = importlib.import_module(pkg_name) + temp_version = getattr(package, "__version__", "N/A") + # Check if the version contains "dev" + if "dev" in temp_version: + package_version = temp_version + package_exists = True + else: + package_exists = False + except ImportError: + # If the package can't be imported, it's not available + package_exists = False + elif pkg_name == "grpc": + package = importlib.import_module(pkg_name) + package_version = getattr(package, "__version__", "N/A") + else: + # For packages other than "torch", don't attempt the fallback and set as not available + package_exists = False + logging.debug(f"Detected {pkg_name} version: {package_version}") + if return_version: + return package_exists, package_version + else: + return package_exists + + +_transformers_available = is_package_available("transformers") +_peft_available = is_package_available("peft") +_scipy_available = is_package_available("scipy") +_reachy2_sdk_available = is_package_available("reachy2_sdk") + + +def make_device_from_device_class(config: ChoiceRegistry) -> Any: + """ + Dynamically instantiates an object from its `ChoiceRegistry` configuration. + + This factory uses the module path and class name from the `config` object's + type to locate and instantiate the corresponding device class (not the config). + It derives the device class name by removing a trailing 'Config' from the config + class name and tries a few candidate modules where the device implementation is + commonly located. + """ + if not isinstance(config, ChoiceRegistry): + raise ValueError(f"Config should be an instance of `ChoiceRegistry`, got {type(config)}") + + config_cls = config.__class__ + module_path = config_cls.__module__ # typical: lerobot_teleop_mydevice.config_mydevice + config_name = config_cls.__name__ # typical: MyDeviceConfig + + # Derive device class name (strip "Config") + if not config_name.endswith("Config"): + raise ValueError(f"Config class name '{config_name}' does not end with 'Config'") + + device_class_name = config_name[:-6] # typical: MyDeviceConfig -> MyDevice + + # Build candidate modules to search for the device class + parts = module_path.split(".") + parent_module = ".".join(parts[:-1]) if len(parts) > 1 else module_path + candidates = [ + parent_module, # typical: lerobot_teleop_mydevice + parent_module + "." + device_class_name.lower(), # typical: lerobot_teleop_mydevice.mydevice + ] + + # handle modules named like "config_xxx" -> try replacing that piece with "xxx" + last = parts[-1] if parts else "" + if last.startswith("config_"): + candidates.append(".".join(parts[:-1] + [last.replace("config_", "")])) + + # de-duplicate while preserving order + seen: set[str] = set() + candidates = [c for c in candidates if not (c in seen or seen.add(c))] + + tried: list[str] = [] + for candidate in candidates: + tried.append(candidate) + try: + module = importlib.import_module(candidate) + except ImportError: + continue + + if hasattr(module, device_class_name): + cls = getattr(module, device_class_name) + if callable(cls): + try: + return cls(config) + except TypeError as e: + raise TypeError( + f"Failed to instantiate '{device_class_name}' from module '{candidate}': {e}" + ) from e + + raise ImportError( + f"Could not locate device class '{device_class_name}' for config '{config_name}'. " + f"Tried modules: {tried}. Ensure your device class name is the config class name without " + f"'Config' and that it's importable from one of those modules." + ) + + +def register_third_party_plugins() -> None: + """ + Discover and import third-party LeRobot plugins so they can register themselves. + + This function uses `importlib.metadata` to find packages installed in the environment + (including editable installs) starting with 'lerobot_robot_', 'lerobot_camera_', + 'lerobot_teleoperator_', or 'lerobot_policy_' and imports them. + """ + prefixes = ("lerobot_robot_", "lerobot_camera_", "lerobot_teleoperator_", "lerobot_policy_") + imported: list[str] = [] + failed: list[str] = [] + + def attempt_import(module_name: str): + try: + importlib.import_module(module_name) + imported.append(module_name) + logging.info("Imported third-party plugin: %s", module_name) + except Exception: + logging.exception("Could not import third-party plugin: %s", module_name) + failed.append(module_name) + + for dist in importlib.metadata.distributions(): + dist_name = dist.metadata.get("Name") + if not dist_name: + continue + if dist_name.startswith(prefixes): + attempt_import(dist_name) + + logging.debug("Third-party plugin import summary: imported=%s failed=%s", imported, failed) diff --git a/lerobot/src/lerobot/utils/io_utils.py b/lerobot/src/lerobot/utils/io_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1226772c97f4c9f62047d80a500704e6c17673f0 --- /dev/null +++ b/lerobot/src/lerobot/utils/io_utils.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import warnings +from pathlib import Path +from typing import TypeVar + +import imageio + +JsonLike = str | int | float | bool | None | list["JsonLike"] | dict[str, "JsonLike"] | tuple["JsonLike", ...] +T = TypeVar("T", bound=JsonLike) + + +def write_video(video_path, stacked_frames, fps): + # Filter out DeprecationWarnings raised from pkg_resources + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "pkg_resources is deprecated as an API", category=DeprecationWarning + ) + imageio.mimsave(video_path, stacked_frames, fps=fps) + + +def deserialize_json_into_object(fpath: Path, obj: T) -> T: + """ + Loads the JSON data from `fpath` and recursively fills `obj` with the + corresponding values (strictly matching structure and types). + Tuples in `obj` are expected to be lists in the JSON data, which will be + converted back into tuples. + """ + with open(fpath, encoding="utf-8") as f: + data = json.load(f) + + def _deserialize(target, source): + """ + Recursively overwrite the structure in `target` with data from `source`, + performing strict checks on structure and type. + Returns the updated version of `target` (especially important for tuples). + """ + + # If the target is a dictionary, source must be a dictionary as well. + if isinstance(target, dict): + if not isinstance(source, dict): + raise TypeError(f"Type mismatch: expected dict, got {type(source)}") + + # Check that they have exactly the same set of keys. + if target.keys() != source.keys(): + raise ValueError( + f"Dictionary keys do not match.\nExpected: {target.keys()}, got: {source.keys()}" + ) + + # Recursively update each key. + for k in target: + target[k] = _deserialize(target[k], source[k]) + + return target + + # If the target is a list, source must be a list as well. + elif isinstance(target, list): + if not isinstance(source, list): + raise TypeError(f"Type mismatch: expected list, got {type(source)}") + + # Check length + if len(target) != len(source): + raise ValueError(f"List length mismatch: expected {len(target)}, got {len(source)}") + + # Recursively update each element. + for i in range(len(target)): + target[i] = _deserialize(target[i], source[i]) + + return target + + # If the target is a tuple, the source must be a list in JSON, + # which we'll convert back to a tuple. + elif isinstance(target, tuple): + if not isinstance(source, list): + raise TypeError(f"Type mismatch: expected list (for tuple), got {type(source)}") + + if len(target) != len(source): + raise ValueError(f"Tuple length mismatch: expected {len(target)}, got {len(source)}") + + # Convert each element, forming a new tuple. + converted_items = [] + for t_item, s_item in zip(target, source, strict=False): + converted_items.append(_deserialize(t_item, s_item)) + + # Return a brand new tuple (tuples are immutable in Python). + return tuple(converted_items) + + # Otherwise, we're dealing with a "primitive" (int, float, str, bool, None). + else: + # Check the exact type. If these must match 1:1, do: + if type(target) is not type(source): + raise TypeError(f"Type mismatch: expected {type(target)}, got {type(source)}") + return source + + # Perform the in-place/recursive deserialization + updated_obj = _deserialize(obj, data) + return updated_obj diff --git a/lerobot/src/lerobot/utils/logging_utils.py b/lerobot/src/lerobot/utils/logging_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5b61394ac8239fc11ebe9edbe4c51672848a9f33 --- /dev/null +++ b/lerobot/src/lerobot/utils/logging_utils.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections.abc import Callable +from typing import Any + +from lerobot.utils.utils import format_big_number + + +class AverageMeter: + """ + Computes and stores the average and current value + Adapted from https://github.com/pytorch/examples/blob/main/imagenet/main.py + """ + + def __init__(self, name: str, fmt: str = ":f"): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self) -> None: + self.val = 0.0 + self.avg = 0.0 + self.sum = 0.0 + self.count = 0.0 + + def update(self, val: float, n: int = 1) -> None: + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = "{name}:{avg" + self.fmt + "}" + return fmtstr.format(**self.__dict__) + + +class MetricsTracker: + """ + A helper class to track and log metrics over time. + + Usage pattern: + + ```python + # initialize, potentially with non-zero initial step (e.g. if resuming run) + metrics = {"loss": AverageMeter("loss", ":.3f")} + train_metrics = MetricsTracker(cfg, dataset, metrics, initial_step=step) + + # update metrics derived from step (samples, episodes, epochs) at each training step + train_metrics.step() + + # update various metrics + loss = policy.forward(batch) + train_metrics.loss = loss + + # display current metrics + logging.info(train_metrics) + + # export for wandb + wandb.log(train_metrics.to_dict()) + + # reset averages after logging + train_metrics.reset_averages() + ``` + """ + + __keys__ = [ + "_batch_size", + "_num_frames", + "_avg_samples_per_ep", + "metrics", + "steps", + "samples", + "episodes", + "epochs", + "accelerator", + ] + + def __init__( + self, + batch_size: int, + num_frames: int, + num_episodes: int, + metrics: dict[str, AverageMeter], + initial_step: int = 0, + accelerator: Callable | None = None, + ): + self.__dict__.update(dict.fromkeys(self.__keys__)) + self._batch_size = batch_size + self._num_frames = num_frames + self._avg_samples_per_ep = num_frames / num_episodes + self.metrics = metrics + + self.steps = initial_step + # A sample is an (observation,action) pair, where observation and action + # can be on multiple timestamps. In a batch, we have `batch_size` number of samples. + self.samples = self.steps * self._batch_size + self.episodes = self.samples / self._avg_samples_per_ep + self.epochs = self.samples / self._num_frames + self.accelerator = accelerator + + def __getattr__(self, name: str) -> int | dict[str, AverageMeter] | AverageMeter | Any: + if name in self.__dict__: + return self.__dict__[name] + elif name in self.metrics: + return self.metrics[name] + else: + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, name: str, value: Any) -> None: + if name in self.__dict__: + super().__setattr__(name, value) + elif name in self.metrics: + self.metrics[name].update(value) + else: + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def step(self) -> None: + """ + Updates metrics that depend on 'step' for one step. + """ + self.steps += 1 + self.samples += self._batch_size * (self.accelerator.num_processes if self.accelerator else 1) + self.episodes = self.samples / self._avg_samples_per_ep + self.epochs = self.samples / self._num_frames + + def __str__(self) -> str: + display_list = [ + f"step:{format_big_number(self.steps)}", + # number of samples seen during training + f"smpl:{format_big_number(self.samples)}", + # number of episodes seen during training + f"ep:{format_big_number(self.episodes)}", + # number of time all unique samples are seen + f"epch:{self.epochs:.2f}", + *[str(m) for m in self.metrics.values()], + ] + return " ".join(display_list) + + def to_dict(self, use_avg: bool = True) -> dict[str, int | float]: + """ + Returns the current metric values (or averages if `use_avg=True`) as a dict. + """ + return { + "steps": self.steps, + "samples": self.samples, + "episodes": self.episodes, + "epochs": self.epochs, + **{k: m.avg if use_avg else m.val for k, m in self.metrics.items()}, + } + + def reset_averages(self) -> None: + """Resets average meters.""" + for m in self.metrics.values(): + m.reset() diff --git a/lerobot/src/lerobot/utils/rabc.py b/lerobot/src/lerobot/utils/rabc.py new file mode 100644 index 0000000000000000000000000000000000000000..aa3d74045b475f18c76cb29949cae02e93c2d9ec --- /dev/null +++ b/lerobot/src/lerobot/utils/rabc.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +import numpy as np +import pandas as pd +import torch +from huggingface_hub import hf_hub_download + + +def resolve_hf_path(path: str | Path) -> Path: + """Resolve a path that may be a HuggingFace URL (hf://datasets/...) to a local path.""" + path_str = str(path) + if path_str.startswith("hf://datasets/"): + parts = path_str.replace("hf://datasets/", "").split("/") + repo_id = "/".join(parts[:2]) + filename = "/".join(parts[2:]) + return Path(hf_hub_download(repo_id=repo_id, filename=filename, repo_type="dataset")) + return Path(path) + + +class RABCWeights: + """ + Load precomputed SARM progress values and compute RA-BC weights during training. + + Progress values are loaded from a parquet file (generated by compute_rabc_weights.py). + During training, computes: + - progress_delta = progress[t + chunk_size] - progress[t] + - rabc_weight based on the delta (paper Eq. 8-9) + + Args: + progress_path: Path to parquet file with precomputed progress values + chunk_size: Number of frames ahead for computing progress delta + head_mode: Which SARM head to use ("sparse" or "dense") + kappa: Hard threshold for high-quality samples (default: 0.01) + epsilon: Small constant for numerical stability (default: 1e-6) + fallback_weight: Weight to use for frames without valid delta (default: 1.0) + device: Device to return tensors on + """ + + def __init__( + self, + progress_path: str | Path, + chunk_size: int = 50, + head_mode: str = "sparse", + kappa: float = 0.01, + epsilon: float = 1e-6, + fallback_weight: float = 1.0, + device: torch.device = None, + ): + self.progress_path = resolve_hf_path(progress_path) + self.chunk_size = chunk_size + self.head_mode = head_mode + self.kappa = kappa + self.epsilon = epsilon + self.fallback_weight = fallback_weight + self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu") + + # Determine progress column name + self.progress_column = f"progress_{head_mode}" + + # Load progress values + logging.info(f"Loading SARM progress values from {self.progress_path}") + self.df = pd.read_parquet(self.progress_path) + + # Check if the requested head mode column exists + if self.progress_column not in self.df.columns: + available = [c for c in self.df.columns if c.startswith("progress")] + raise ValueError( + f"Column '{self.progress_column}' not found. Available progress columns: {available}" + ) + + logging.info(f"Using progress column: {self.progress_column}") + + self.progress_lookup = {} + self.episode_lookup = {} + + for _, row in self.df.iterrows(): + global_idx = int(row["index"]) + progress = row[self.progress_column] + episode_idx = int(row["episode_index"]) + + if not np.isnan(progress): + self.progress_lookup[global_idx] = float(progress) + self.episode_lookup[global_idx] = episode_idx + + # Build episode boundaries for delta computation + self.episode_boundaries = {} + for episode_idx in self.df["episode_index"].unique(): + ep_df = self.df[self.df["episode_index"] == episode_idx] + self.episode_boundaries[int(episode_idx)] = { + "start": int(ep_df["index"].min()), + "end": int(ep_df["index"].max()) + 1, + } + + logging.info(f"Loaded {len(self.progress_lookup)} frame progress values") + logging.info(f"Chunk size for delta computation: {chunk_size}") + + # Compute global statistics for weight computation + self._compute_global_stats() + + def _compute_global_stats(self): + """Compute global mean and std of progress deltas for weight calculation.""" + all_deltas = [] + + for global_idx, progress in self.progress_lookup.items(): + episode_idx = self.episode_lookup.get(global_idx) + if episode_idx is None: + continue + + bounds = self.episode_boundaries.get(episode_idx) + if bounds is None: + continue + + future_idx = global_idx + self.chunk_size + if future_idx >= bounds["end"]: + # Near end of episode: use last frame's progress + future_idx = bounds["end"] - 1 + + future_progress = self.progress_lookup.get(future_idx) + if future_progress is not None: + delta = future_progress - progress + all_deltas.append(delta) + + if all_deltas: + self.delta_mean = max(np.mean(all_deltas), 0.0) + self.delta_std = max(np.std(all_deltas), self.epsilon) + logging.info(f"Progress delta stats: mean={self.delta_mean:.4f}, std={self.delta_std:.4f}") + else: + self.delta_mean = 0.0 + self.delta_std = self.epsilon + logging.warning("No valid progress deltas found, using default stats") + + def compute_batch_weights(self, batch: dict) -> tuple[torch.Tensor, dict]: + """ + Compute RA-BC weights for a batch. + + For each sample: + 1. Get progress at current frame + 2. Get progress at frame + chunk_size (within same episode) + 3. Compute delta = future_progress - current_progress + 4. Compute weight using paper Eq. 8-9 + + Args: + batch: Training batch containing "index" key with global frame indices + + Returns: + Tuple of: + - Weights tensor (batch_size,) normalized to sum to batch_size + - Stats dict with raw_mean_weight, num_zero_weight, num_full_weight + """ + indices = batch.get("index") + if indices is None: + logging.warning("RA-BC: Batch missing 'index' key, using uniform weights") + batch_size = self._get_batch_size(batch) + return torch.ones(batch_size, device=self.device), {"raw_mean_weight": 1.0} + + # Convert to list of ints + if isinstance(indices, torch.Tensor): + indices = indices.cpu().numpy().tolist() + elif isinstance(indices, np.ndarray): + indices = indices.tolist() + + # Compute deltas and weights for each sample + deltas = [] + for idx in indices: + idx = int(idx) + delta = self._compute_delta(idx) + deltas.append(delta) + + deltas = np.array(deltas, dtype=np.float32) + + # Compute weights from deltas + weights = self._compute_weights(deltas) + + # Compute stats before normalization for logging + raw_mean_weight = float(np.nanmean(weights)) + num_zero_weight = int(np.sum(weights == 0)) + num_full_weight = int(np.sum(weights == 1.0)) + batch_stats = { + "raw_mean_weight": raw_mean_weight, + "num_zero_weight": num_zero_weight, + "num_full_weight": num_full_weight, + } + + weights = torch.tensor(weights, device=self.device, dtype=torch.float32) + + # Normalize to sum to batch_size + batch_size = len(weights) + weight_sum = weights.sum() + self.epsilon + weights = weights * batch_size / weight_sum + + return weights, batch_stats + + def _compute_delta(self, global_idx: int) -> float: + """Compute progress delta for a single frame.""" + current_progress = self.progress_lookup.get(global_idx) + if current_progress is None: + return np.nan + + episode_idx = self.episode_lookup.get(global_idx) + if episode_idx is None: + return np.nan + + bounds = self.episode_boundaries.get(episode_idx) + if bounds is None: + return np.nan + + future_idx = global_idx + self.chunk_size # Δ = chunk_size + if future_idx >= bounds["end"]: + # Near end of episode: use last frame's progress instead + future_idx = bounds["end"] - 1 + + future_progress = self.progress_lookup.get(future_idx) + if future_progress is None: + return np.nan + + return future_progress - current_progress + + def _compute_weights(self, deltas: np.ndarray) -> np.ndarray: + """ + Compute RA-BC weights from progress deltas. + + Following paper Eq. 8-9: + - Soft weight: ˜wi = clip((ri − (µ − 2σ)) / (4σ + ε), 0, 1) + - Final weight: wi = 1{ri > κ} + 1{0 ≤ ri ≤ κ}˜wi + + Returns: + Array of weights + """ + valid_mask = ~np.isnan(deltas) + + # Compute soft weights using global statistics + lower_bound = self.delta_mean - 2 * self.delta_std + soft_weights = (deltas - lower_bound) / (4 * self.delta_std + self.epsilon) + soft_weights = np.clip(soft_weights, 0.0, 1.0) + + # Apply paper's Eq. 9 + weights = np.zeros_like(deltas, dtype=np.float32) + + # High quality: ri > kappa → weight = 1 + high_quality_mask = deltas > self.kappa + weights[high_quality_mask] = 1.0 + + # Moderate quality: 0 <= ri <= kappa → weight = soft_weight + moderate_mask = (deltas >= 0) & (deltas <= self.kappa) + weights[moderate_mask] = soft_weights[moderate_mask] + + # Negative progress: ri < 0 → weight = 0 (already 0) + # Invalid (NaN): use fallback weight + weights[~valid_mask] = self.fallback_weight + + return weights + + def _get_batch_size(self, batch: dict) -> int: + """Determine batch size from batch.""" + for key in ["action", "index"]: + if key in batch: + val = batch[key] + if isinstance(val, (torch.Tensor, np.ndarray)): + return val.shape[0] + return 1 + + def get_stats(self) -> dict: + """Get statistics.""" + return { + "num_frames": len(self.progress_lookup), + "chunk_size": self.chunk_size, + "head_mode": self.head_mode, + "delta_mean": self.delta_mean, + "delta_std": self.delta_std, + "kappa": self.kappa, + } diff --git a/lerobot/src/lerobot/utils/random_utils.py b/lerobot/src/lerobot/utils/random_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5d9558b173c2b30d6a6bd653cb52518c2770cf05 --- /dev/null +++ b/lerobot/src/lerobot/utils/random_utils.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import random +from collections.abc import Callable, Generator +from contextlib import contextmanager +from pathlib import Path +from typing import Any + +import numpy as np +import torch +from safetensors.torch import load_file, save_file + +from lerobot.datasets.utils import flatten_dict, unflatten_dict +from lerobot.utils.constants import RNG_STATE + + +def serialize_python_rng_state() -> dict[str, torch.Tensor]: + """ + Returns the rng state for `random` in the form of a flat dict[str, torch.Tensor] to be saved using + `safetensors.save_file()` or `torch.save()`. + """ + py_state = random.getstate() + return { + "py_rng_version": torch.tensor([py_state[0]], dtype=torch.int64), + "py_rng_state": torch.tensor(py_state[1], dtype=torch.int64), + } + + +def deserialize_python_rng_state(rng_state_dict: dict[str, torch.Tensor]) -> None: + """ + Restores the rng state for `random` from a dictionary produced by `serialize_python_rng_state()`. + """ + py_state = (rng_state_dict["py_rng_version"].item(), tuple(rng_state_dict["py_rng_state"].tolist()), None) + random.setstate(py_state) + + +def serialize_numpy_rng_state() -> dict[str, torch.Tensor]: + """ + Returns the rng state for `numpy` in the form of a flat dict[str, torch.Tensor] to be saved using + `safetensors.save_file()` or `torch.save()`. + """ + np_state = np.random.get_state() + # Ensure no breaking changes from numpy + assert np_state[0] == "MT19937" + return { + "np_rng_state_values": torch.tensor(np_state[1], dtype=torch.int64), + "np_rng_state_index": torch.tensor([np_state[2]], dtype=torch.int64), + "np_rng_has_gauss": torch.tensor([np_state[3]], dtype=torch.int64), + "np_rng_cached_gaussian": torch.tensor([np_state[4]], dtype=torch.float32), + } + + +def deserialize_numpy_rng_state(rng_state_dict: dict[str, torch.Tensor]) -> None: + """ + Restores the rng state for `numpy` from a dictionary produced by `serialize_numpy_rng_state()`. + """ + np_state = ( + "MT19937", + rng_state_dict["np_rng_state_values"].numpy(), + rng_state_dict["np_rng_state_index"].item(), + rng_state_dict["np_rng_has_gauss"].item(), + rng_state_dict["np_rng_cached_gaussian"].item(), + ) + np.random.set_state(np_state) + + +def serialize_torch_rng_state() -> dict[str, torch.Tensor]: + """ + Returns the rng state for `torch` in the form of a flat dict[str, torch.Tensor] to be saved using + `safetensors.save_file()` or `torch.save()`. + """ + torch_rng_state_dict = {"torch_rng_state": torch.get_rng_state()} + if torch.cuda.is_available(): + torch_rng_state_dict["torch_cuda_rng_state"] = torch.cuda.get_rng_state() + return torch_rng_state_dict + + +def deserialize_torch_rng_state(rng_state_dict: dict[str, torch.Tensor]) -> None: + """ + Restores the rng state for `torch` from a dictionary produced by `serialize_torch_rng_state()`. + """ + torch.set_rng_state(rng_state_dict["torch_rng_state"]) + if torch.cuda.is_available() and "torch_cuda_rng_state" in rng_state_dict: + torch.cuda.set_rng_state(rng_state_dict["torch_cuda_rng_state"]) + + +def serialize_rng_state() -> dict[str, torch.Tensor]: + """ + Returns the rng state for `random`, `numpy`, and `torch`, in the form of a flat + dict[str, torch.Tensor] to be saved using `safetensors.save_file()` `torch.save()`. + """ + py_rng_state_dict = serialize_python_rng_state() + np_rng_state_dict = serialize_numpy_rng_state() + torch_rng_state_dict = serialize_torch_rng_state() + + return { + **py_rng_state_dict, + **np_rng_state_dict, + **torch_rng_state_dict, + } + + +def deserialize_rng_state(rng_state_dict: dict[str, torch.Tensor]) -> None: + """ + Restores the rng state for `random`, `numpy`, and `torch` from a dictionary produced by + `serialize_rng_state()`. + """ + py_rng_state_dict = {k: v for k, v in rng_state_dict.items() if k.startswith("py")} + np_rng_state_dict = {k: v for k, v in rng_state_dict.items() if k.startswith("np")} + torch_rng_state_dict = {k: v for k, v in rng_state_dict.items() if k.startswith("torch")} + + deserialize_python_rng_state(py_rng_state_dict) + deserialize_numpy_rng_state(np_rng_state_dict) + deserialize_torch_rng_state(torch_rng_state_dict) + + +def save_rng_state(save_dir: Path) -> None: + rng_state_dict = serialize_rng_state() + flat_rng_state_dict = flatten_dict(rng_state_dict) + save_file(flat_rng_state_dict, save_dir / RNG_STATE) + + +def load_rng_state(save_dir: Path) -> None: + flat_rng_state_dict = load_file(save_dir / RNG_STATE) + rng_state_dict = unflatten_dict(flat_rng_state_dict) + deserialize_rng_state(rng_state_dict) + + +def get_rng_state() -> dict[str, Any]: + """Get the random state for `random`, `numpy`, and `torch`.""" + random_state_dict = { + "random_state": random.getstate(), + "numpy_random_state": np.random.get_state(), + "torch_random_state": torch.random.get_rng_state(), + } + if torch.cuda.is_available(): + random_state_dict["torch_cuda_random_state"] = torch.cuda.random.get_rng_state() + return random_state_dict + + +def set_rng_state(random_state_dict: dict[str, Any]): + """Set the random state for `random`, `numpy`, and `torch`. + + Args: + random_state_dict: A dictionary of the form returned by `get_rng_state`. + """ + random.setstate(random_state_dict["random_state"]) + np.random.set_state(random_state_dict["numpy_random_state"]) + torch.random.set_rng_state(random_state_dict["torch_random_state"]) + if torch.cuda.is_available(): + torch.cuda.random.set_rng_state(random_state_dict["torch_cuda_random_state"]) + + +def set_seed(seed, accelerator: Callable | None = None) -> None: + """Set seed for reproducibility.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + + if accelerator: + from accelerate.utils import set_seed as _accelerate_set_seed + + _accelerate_set_seed(seed) + + +@contextmanager +def seeded_context(seed: int) -> Generator[None, None, None]: + """Set the seed when entering a context, and restore the prior random state at exit. + + Example usage: + + ``` + a = random.random() # produces some random number + with seeded_context(1337): + b = random.random() # produces some other random number + c = random.random() # produces yet another random number, but the same it would have if we never made `b` + ``` + """ + random_state_dict = get_rng_state() + set_seed(seed) + yield None + set_rng_state(random_state_dict) diff --git a/lerobot/src/lerobot/utils/robot_utils.py b/lerobot/src/lerobot/utils/robot_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3fc18ac9c754364ff8cfb52116bd5ca4176909b6 --- /dev/null +++ b/lerobot/src/lerobot/utils/robot_utils.py @@ -0,0 +1,55 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import platform +import time + + +def precise_sleep(seconds: float, spin_threshold: float = 0.010, sleep_margin: float = 0.003): + """ + Wait for `seconds` with better precision than time.sleep alone at the expense of more CPU usage. + + Parameters: + - seconds: duration to wait + - spin_threshold: if remaining <= spin_threshold -> spin; otherwise sleep (seconds). Default 10ms + - sleep_margin: when sleeping leave this much time before deadline to avoid oversleep. Default 3ms + + Note: + The default parameters are chosen to prioritize timing accuracy over CPU usage for the common 30 FPS use case. + """ + if seconds <= 0: + return + + system = platform.system() + # On macOS and Windows the scheduler / sleep granularity can make + # short sleeps inaccurate. Instead of burning CPU for the whole + # duration, sleep for most of the time and spin for the final few + # milliseconds to achieve good accuracy with much lower CPU usage. + if system in ("Darwin", "Windows"): + end_time = time.perf_counter() + seconds + while True: + remaining = end_time - time.perf_counter() + if remaining <= 0: + break + # If there's more than a couple milliseconds left, sleep most + # of the remaining time and leave a small margin for the final spin. + if remaining > spin_threshold: + # Sleep but avoid sleeping past the end by leaving a small margin. + time.sleep(max(remaining - sleep_margin, 0)) + else: + # Final short spin to hit precise timing without long sleeps. + pass + else: + # On Linux time.sleep is accurate enough for most uses + time.sleep(seconds) diff --git a/lerobot/src/lerobot/utils/rotation.py b/lerobot/src/lerobot/utils/rotation.py new file mode 100644 index 0000000000000000000000000000000000000000..48d1ba5fa16307a0331463084e66f65c94151f78 --- /dev/null +++ b/lerobot/src/lerobot/utils/rotation.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Custom rotation utilities to replace scipy.spatial.transform.Rotation.""" + +import numpy as np + + +class Rotation: + """ + Custom rotation class that provides a subset of scipy.spatial.transform.Rotation functionality. + + Supports conversions between rotation vectors, rotation matrices, and quaternions. + """ + + def __init__(self, quat: np.ndarray) -> None: + """Initialize rotation from quaternion [x, y, z, w].""" + self._quat = np.asarray(quat, dtype=float) + # Normalize quaternion + norm = np.linalg.norm(self._quat) + if norm > 0: + self._quat = self._quat / norm + + @classmethod + def from_rotvec(cls, rotvec: np.ndarray) -> "Rotation": + """ + Create rotation from rotation vector using Rodrigues' formula. + + Args: + rotvec: Rotation vector [x, y, z] where magnitude is angle in radians + + Returns: + Rotation instance + """ + rotvec = np.asarray(rotvec, dtype=float) + angle = np.linalg.norm(rotvec) + + if angle < 1e-8: + # For very small angles, use identity quaternion + quat = np.array([0.0, 0.0, 0.0, 1.0]) + else: + axis = rotvec / angle + half_angle = angle / 2.0 + sin_half = np.sin(half_angle) + cos_half = np.cos(half_angle) + + # Quaternion [x, y, z, w] + quat = np.array([axis[0] * sin_half, axis[1] * sin_half, axis[2] * sin_half, cos_half]) + + return cls(quat) + + @classmethod + def from_matrix(cls, matrix: np.ndarray) -> "Rotation": + """ + Create rotation from 3x3 rotation matrix. + + Args: + matrix: 3x3 rotation matrix + + Returns: + Rotation instance + """ + matrix = np.asarray(matrix, dtype=float) + + # Shepherd's method for converting rotation matrix to quaternion + trace = np.trace(matrix) + + if trace > 0: + s = np.sqrt(trace + 1.0) * 2 # s = 4 * qw + qw = 0.25 * s + qx = (matrix[2, 1] - matrix[1, 2]) / s + qy = (matrix[0, 2] - matrix[2, 0]) / s + qz = (matrix[1, 0] - matrix[0, 1]) / s + elif matrix[0, 0] > matrix[1, 1] and matrix[0, 0] > matrix[2, 2]: + s = np.sqrt(1.0 + matrix[0, 0] - matrix[1, 1] - matrix[2, 2]) * 2 # s = 4 * qx + qw = (matrix[2, 1] - matrix[1, 2]) / s + qx = 0.25 * s + qy = (matrix[0, 1] + matrix[1, 0]) / s + qz = (matrix[0, 2] + matrix[2, 0]) / s + elif matrix[1, 1] > matrix[2, 2]: + s = np.sqrt(1.0 + matrix[1, 1] - matrix[0, 0] - matrix[2, 2]) * 2 # s = 4 * qy + qw = (matrix[0, 2] - matrix[2, 0]) / s + qx = (matrix[0, 1] + matrix[1, 0]) / s + qy = 0.25 * s + qz = (matrix[1, 2] + matrix[2, 1]) / s + else: + s = np.sqrt(1.0 + matrix[2, 2] - matrix[0, 0] - matrix[1, 1]) * 2 # s = 4 * qz + qw = (matrix[1, 0] - matrix[0, 1]) / s + qx = (matrix[0, 2] + matrix[2, 0]) / s + qy = (matrix[1, 2] + matrix[2, 1]) / s + qz = 0.25 * s + + quat = np.array([qx, qy, qz, qw]) + return cls(quat) + + @classmethod + def from_quat(cls, quat: np.ndarray) -> "Rotation": + """ + Create rotation from quaternion. + + Args: + quat: Quaternion [x, y, z, w] or [w, x, y, z] (specify convention in docstring) + This implementation expects [x, y, z, w] format + + Returns: + Rotation instance + """ + return cls(quat) + + def as_matrix(self) -> np.ndarray: + """ + Convert rotation to 3x3 rotation matrix. + + Returns: + 3x3 rotation matrix + """ + qx, qy, qz, qw = self._quat + + # Compute rotation matrix from quaternion + return np.array( + [ + [1 - 2 * (qy * qy + qz * qz), 2 * (qx * qy - qz * qw), 2 * (qx * qz + qy * qw)], + [2 * (qx * qy + qz * qw), 1 - 2 * (qx * qx + qz * qz), 2 * (qy * qz - qx * qw)], + [2 * (qx * qz - qy * qw), 2 * (qy * qz + qx * qw), 1 - 2 * (qx * qx + qy * qy)], + ], + dtype=float, + ) + + def as_rotvec(self) -> np.ndarray: + """ + Convert rotation to rotation vector. + + Returns: + Rotation vector [x, y, z] where magnitude is angle in radians + """ + qx, qy, qz, qw = self._quat + + # Ensure qw is positive for unique representation + if qw < 0: + qx, qy, qz, qw = -qx, -qy, -qz, -qw + + # Compute angle and axis + angle = 2.0 * np.arccos(np.clip(abs(qw), 0.0, 1.0)) + sin_half_angle = np.sqrt(1.0 - qw * qw) + + if sin_half_angle < 1e-8: + # For very small angles, use linearization: rotvec ≈ 2 * [qx, qy, qz] + return 2.0 * np.array([qx, qy, qz]) + + # Extract axis and scale by angle + axis = np.array([qx, qy, qz]) / sin_half_angle + return angle * axis + + def as_quat(self) -> np.ndarray: + """ + Get quaternion representation. + + Returns: + Quaternion [x, y, z, w] + """ + return self._quat.copy() + + def apply(self, vectors: np.ndarray, inverse: bool = False) -> np.ndarray: + """ + Apply this rotation to a set of vectors. + + This is equivalent to applying the rotation matrix to the vectors: + self.as_matrix() @ vectors (or self.as_matrix().T @ vectors if inverse=True). + + Args: + vectors: Array of shape (3,) or (N, 3) representing vectors in 3D space + inverse: If True, apply the inverse of the rotation. Default is False. + + Returns: + Rotated vectors with shape: + - (3,) if input was single vector with shape (3,) + - (N, 3) in all other cases + """ + vectors = np.asarray(vectors, dtype=float) + original_shape = vectors.shape + + # Handle single vector case - ensure it's 2D for matrix multiplication + if vectors.ndim == 1: + if len(vectors) != 3: + raise ValueError("Single vector must have length 3") + vectors = vectors.reshape(1, 3) + single_vector = True + elif vectors.ndim == 2: + if vectors.shape[1] != 3: + raise ValueError("Vectors must have shape (N, 3)") + single_vector = False + else: + raise ValueError("Vectors must be 1D or 2D array") + + # Get rotation matrix + rotation_matrix = self.as_matrix() + + # Apply inverse if requested (transpose for orthogonal rotation matrices) + if inverse: + rotation_matrix = rotation_matrix.T + + # Apply rotation: (N, 3) @ (3, 3).T -> (N, 3) + rotated_vectors = vectors @ rotation_matrix.T + + # Return original shape for single vector case + if single_vector and original_shape == (3,): + return rotated_vectors.flatten() + + return rotated_vectors + + def inv(self) -> "Rotation": + """ + Invert this rotation. + + Composition of a rotation with its inverse results in an identity transformation. + + Returns: + Rotation instance containing the inverse of this rotation + """ + qx, qy, qz, qw = self._quat + + # For a unit quaternion, the inverse is the conjugate: [-x, -y, -z, w] + inverse_quat = np.array([-qx, -qy, -qz, qw]) + + return Rotation(inverse_quat) + + def __mul__(self, other: "Rotation") -> "Rotation": + """ + Compose this rotation with another rotation using the * operator. + + The composition `r2 * r1` means "apply r1 first, then r2". + This is equivalent to applying rotation matrices: r2.as_matrix() @ r1.as_matrix() + + Args: + other: Another Rotation instance to compose with + + Returns: + Rotation instance representing the composition of rotations + """ + if not isinstance(other, Rotation): + return NotImplemented + + # Get quaternions [x, y, z, w] + x1, y1, z1, w1 = other._quat # Apply first + x2, y2, z2, w2 = self._quat # Apply second + + # Quaternion multiplication: q2 * q1 (apply q1 first, then q2) + composed_quat = np.array( + [ + w2 * x1 + x2 * w1 + y2 * z1 - z2 * y1, # x component + w2 * y1 - x2 * z1 + y2 * w1 + z2 * x1, # y component + w2 * z1 + x2 * y1 - y2 * x1 + z2 * w1, # z component + w2 * w1 - x2 * x1 - y2 * y1 - z2 * z1, # w component + ] + ) + + return Rotation(composed_quat) diff --git a/lerobot/src/lerobot/utils/train_utils.py b/lerobot/src/lerobot/utils/train_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..faf4af837eb0c2b75eaef041379c3c6b1d6ba839 --- /dev/null +++ b/lerobot/src/lerobot/utils/train_utils.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from pathlib import Path + +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LRScheduler + +from lerobot.configs.train import TrainPipelineConfig +from lerobot.datasets.utils import load_json, write_json +from lerobot.optim.optimizers import load_optimizer_state, save_optimizer_state +from lerobot.optim.schedulers import load_scheduler_state, save_scheduler_state +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.processor import PolicyProcessorPipeline +from lerobot.utils.constants import ( + CHECKPOINTS_DIR, + LAST_CHECKPOINT_LINK, + PRETRAINED_MODEL_DIR, + TRAINING_STATE_DIR, + TRAINING_STEP, +) +from lerobot.utils.random_utils import load_rng_state, save_rng_state + + +def get_step_identifier(step: int, total_steps: int) -> str: + num_digits = max(6, len(str(total_steps))) + return f"{step:0{num_digits}d}" + + +def get_step_checkpoint_dir(output_dir: Path, total_steps: int, step: int) -> Path: + """Returns the checkpoint sub-directory corresponding to the step number.""" + step_identifier = get_step_identifier(step, total_steps) + return output_dir / CHECKPOINTS_DIR / step_identifier + + +def save_training_step(step: int, save_dir: Path) -> None: + write_json({"step": step}, save_dir / TRAINING_STEP) + + +def load_training_step(save_dir: Path) -> int: + training_step = load_json(save_dir / TRAINING_STEP) + return training_step["step"] + + +def update_last_checkpoint(checkpoint_dir: Path) -> Path: + last_checkpoint_dir = checkpoint_dir.parent / LAST_CHECKPOINT_LINK + if last_checkpoint_dir.is_symlink(): + last_checkpoint_dir.unlink() + relative_target = checkpoint_dir.relative_to(checkpoint_dir.parent) + last_checkpoint_dir.symlink_to(relative_target) + + +def save_checkpoint( + checkpoint_dir: Path, + step: int, + cfg: TrainPipelineConfig, + policy: PreTrainedPolicy, + optimizer: Optimizer, + scheduler: LRScheduler | None = None, + preprocessor: PolicyProcessorPipeline | None = None, + postprocessor: PolicyProcessorPipeline | None = None, +) -> None: + """This function creates the following directory structure: + + 005000/ # training step at checkpoint + ├── pretrained_model/ + │ ├── config.json # policy config + │ ├── model.safetensors # policy weights + │ ├── train_config.json # train config + │ ├── processor.json # processor config (if preprocessor provided) + │ └── step_*.safetensors # processor state files (if any) + └── training_state/ + ├── optimizer_param_groups.json # optimizer param groups + ├── optimizer_state.safetensors # optimizer state + ├── rng_state.safetensors # rng states + ├── scheduler_state.json # scheduler state + └── training_step.json # training step + + Args: + cfg (TrainPipelineConfig): The training config used for this run. + step (int): The training step at that checkpoint. + policy (PreTrainedPolicy): The policy to save. + optimizer (Optimizer | None, optional): The optimizer to save the state from. Defaults to None. + scheduler (LRScheduler | None, optional): The scheduler to save the state from. Defaults to None. + preprocessor: The preprocessor/pipeline to save. Defaults to None. + """ + pretrained_dir = checkpoint_dir / PRETRAINED_MODEL_DIR + policy.save_pretrained(pretrained_dir) + cfg.save_pretrained(pretrained_dir) + if cfg.peft is not None: + # When using PEFT, policy.save_pretrained will only write the adapter weights + config, not the + # policy config which we need for loading the model. In this case we'll write it ourselves. + policy.config.save_pretrained(pretrained_dir) + if preprocessor is not None: + preprocessor.save_pretrained(pretrained_dir) + if postprocessor is not None: + postprocessor.save_pretrained(pretrained_dir) + save_training_state(checkpoint_dir, step, optimizer, scheduler) + + +def save_training_state( + checkpoint_dir: Path, + train_step: int, + optimizer: Optimizer | None = None, + scheduler: LRScheduler | None = None, +) -> None: + """ + Saves the training step, optimizer state, scheduler state, and rng state. + + Args: + save_dir (Path): The directory to save artifacts to. + train_step (int): Current training step. + optimizer (Optimizer | None, optional): The optimizer from which to save the state_dict. + Defaults to None. + scheduler (LRScheduler | None, optional): The scheduler from which to save the state_dict. + Defaults to None. + """ + save_dir = checkpoint_dir / TRAINING_STATE_DIR + save_dir.mkdir(parents=True, exist_ok=True) + save_training_step(train_step, save_dir) + save_rng_state(save_dir) + if optimizer is not None: + save_optimizer_state(optimizer, save_dir) + if scheduler is not None: + save_scheduler_state(scheduler, save_dir) + + +def load_training_state( + checkpoint_dir: Path, optimizer: Optimizer, scheduler: LRScheduler | None +) -> tuple[int, Optimizer, LRScheduler | None]: + """ + Loads the training step, optimizer state, scheduler state, and rng state. + This is used to resume a training run. + + Args: + checkpoint_dir (Path): The checkpoint directory. Should contain a 'training_state' dir. + optimizer (Optimizer): The optimizer to load the state_dict to. + scheduler (LRScheduler | None): The scheduler to load the state_dict to (can be None). + + Raises: + NotADirectoryError: If 'checkpoint_dir' doesn't contain a 'training_state' dir + + Returns: + tuple[int, Optimizer, LRScheduler | None]: training step, optimizer and scheduler with their + state_dict loaded. + """ + training_state_dir = checkpoint_dir / TRAINING_STATE_DIR + if not training_state_dir.is_dir(): + raise NotADirectoryError(training_state_dir) + + load_rng_state(training_state_dir) + step = load_training_step(training_state_dir) + optimizer = load_optimizer_state(optimizer, training_state_dir) + if scheduler is not None: + scheduler = load_scheduler_state(scheduler, training_state_dir) + + return step, optimizer, scheduler diff --git a/lerobot/src/lerobot/utils/transition.py b/lerobot/src/lerobot/utils/transition.py new file mode 100644 index 0000000000000000000000000000000000000000..f09030003ba50b639cadc99ca44bbd6c4df1f3b3 --- /dev/null +++ b/lerobot/src/lerobot/utils/transition.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TypedDict + +import torch + +from lerobot.utils.constants import ACTION + + +class Transition(TypedDict): + state: dict[str, torch.Tensor] + action: torch.Tensor + reward: float + next_state: dict[str, torch.Tensor] + done: bool + truncated: bool + complementary_info: dict[str, torch.Tensor | float | int] | None = None + + +def move_transition_to_device(transition: Transition, device: str = "cpu") -> Transition: + device = torch.device(device) + non_blocking = device.type == "cuda" + + # Move state tensors to device + transition["state"] = { + key: val.to(device, non_blocking=non_blocking) for key, val in transition["state"].items() + } + + # Move action to device + transition[ACTION] = transition[ACTION].to(device, non_blocking=non_blocking) + + # Move reward and done if they are tensors + if isinstance(transition["reward"], torch.Tensor): + transition["reward"] = transition["reward"].to(device, non_blocking=non_blocking) + + if isinstance(transition["done"], torch.Tensor): + transition["done"] = transition["done"].to(device, non_blocking=non_blocking) + + if isinstance(transition["truncated"], torch.Tensor): + transition["truncated"] = transition["truncated"].to(device, non_blocking=non_blocking) + + # Move next_state tensors to device + transition["next_state"] = { + key: val.to(device, non_blocking=non_blocking) for key, val in transition["next_state"].items() + } + + # Move complementary_info tensors if present + if transition.get("complementary_info") is not None: + for key, val in transition["complementary_info"].items(): + if isinstance(val, torch.Tensor): + transition["complementary_info"][key] = val.to(device, non_blocking=non_blocking) + elif isinstance(val, (int | float | bool)): + transition["complementary_info"][key] = torch.tensor(val, device=device) + else: + raise ValueError(f"Unsupported type {type(val)} for complementary_info[{key}]") + return transition + + +def move_state_dict_to_device(state_dict, device="cpu"): + """ + Recursively move all tensors in a (potentially) nested + dict/list/tuple structure to the CPU. + """ + if isinstance(state_dict, torch.Tensor): + return state_dict.to(device) + elif isinstance(state_dict, dict): + return {k: move_state_dict_to_device(v, device=device) for k, v in state_dict.items()} + elif isinstance(state_dict, list): + return [move_state_dict_to_device(v, device=device) for v in state_dict] + elif isinstance(state_dict, tuple): + return tuple(move_state_dict_to_device(v, device=device) for v in state_dict) + else: + return state_dict diff --git a/lerobot/src/lerobot/utils/utils.py b/lerobot/src/lerobot/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..496a0ee337e80831685076ef847df26b6182b247 --- /dev/null +++ b/lerobot/src/lerobot/utils/utils.py @@ -0,0 +1,410 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import os +import platform +import select +import subprocess +import sys +import time +from copy import copy, deepcopy +from datetime import datetime +from pathlib import Path +from statistics import mean + +import numpy as np +import torch +from accelerate import Accelerator +from datasets.utils.logging import disable_progress_bar, enable_progress_bar + + +def inside_slurm(): + """Check whether the python process was launched through slurm""" + # TODO(rcadene): return False for interactive mode `--pty bash` + return "SLURM_JOB_ID" in os.environ + + +def auto_select_torch_device() -> torch.device: + """Tries to select automatically a torch device.""" + if torch.cuda.is_available(): + logging.info("Cuda backend detected, using cuda.") + return torch.device("cuda") + elif torch.backends.mps.is_available(): + logging.info("Metal backend detected, using mps.") + return torch.device("mps") + elif torch.xpu.is_available(): + logging.info("Intel XPU backend detected, using xpu.") + return torch.device("xpu") + else: + logging.warning("No accelerated backend detected. Using default cpu, this will be slow.") + return torch.device("cpu") + + +# TODO(Steven): Remove log. log shouldn't be an argument, this should be handled by the logger level +def get_safe_torch_device(try_device: str, log: bool = False) -> torch.device: + """Given a string, return a torch.device with checks on whether the device is available.""" + try_device = str(try_device) + if try_device.startswith("cuda"): + assert torch.cuda.is_available() + device = torch.device(try_device) + elif try_device == "mps": + assert torch.backends.mps.is_available() + device = torch.device("mps") + elif try_device == "xpu": + assert torch.xpu.is_available() + device = torch.device("xpu") + elif try_device == "cpu": + device = torch.device("cpu") + if log: + logging.warning("Using CPU, this will be slow.") + else: + device = torch.device(try_device) + if log: + logging.warning(f"Using custom {try_device} device.") + return device + + +def get_safe_dtype(dtype: torch.dtype, device: str | torch.device): + """ + mps is currently not compatible with float64 + """ + if isinstance(device, torch.device): + device = device.type + if device == "mps" and dtype == torch.float64: + return torch.float32 + if device == "xpu" and dtype == torch.float64: + if hasattr(torch.xpu, "get_device_capability"): + device_capability = torch.xpu.get_device_capability() + # NOTE: Some Intel XPU devices do not support double precision (FP64). + # The `has_fp64` flag is returned by `torch.xpu.get_device_capability()` + # when available; if False, we fall back to float32 for compatibility. + if not device_capability.get("has_fp64", False): + logging.warning(f"Device {device} does not support float64, using float32 instead.") + return torch.float32 + else: + logging.warning( + f"Device {device} capability check failed. Assuming no support for float64, using float32 instead." + ) + return torch.float32 + return dtype + else: + return dtype + + +def is_torch_device_available(try_device: str) -> bool: + try_device = str(try_device) # Ensure try_device is a string + if try_device.startswith("cuda"): + return torch.cuda.is_available() + elif try_device == "mps": + return torch.backends.mps.is_available() + elif try_device == "xpu": + return torch.xpu.is_available() + elif try_device == "cpu": + return True + else: + raise ValueError(f"Unknown device {try_device}. Supported devices are: cuda, mps, xpu or cpu.") + + +def is_amp_available(device: str): + if device in ["cuda", "xpu", "cpu"]: + return True + elif device == "mps": + return False + else: + raise ValueError(f"Unknown device '{device}.") + + +def init_logging( + log_file: Path | None = None, + display_pid: bool = False, + console_level: str = "INFO", + file_level: str = "DEBUG", + accelerator: Accelerator | None = None, +): + """Initialize logging configuration for LeRobot. + + In multi-GPU training, only the main process logs to console to avoid duplicate output. + Non-main processes have console logging suppressed but can still log to file. + + Args: + log_file: Optional file path to write logs to + display_pid: Include process ID in log messages (useful for debugging multi-process) + console_level: Logging level for console output + file_level: Logging level for file output + accelerator: Optional Accelerator instance (for multi-GPU detection) + """ + + def custom_format(record: logging.LogRecord) -> str: + dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + fnameline = f"{record.pathname}:{record.lineno}" + pid_str = f"[PID: {os.getpid()}] " if display_pid else "" + return f"{record.levelname} {pid_str}{dt} {fnameline[-15:]:>15} {record.getMessage()}" + + formatter = logging.Formatter() + formatter.format = custom_format + + logger = logging.getLogger() + logger.setLevel(logging.NOTSET) + + # Clear any existing handlers + logger.handlers.clear() + + # Determine if this is a non-main process in distributed training + is_main_process = accelerator.is_main_process if accelerator is not None else True + + # Console logging (main process only) + if is_main_process: + console_handler = logging.StreamHandler() + console_handler.setFormatter(formatter) + console_handler.setLevel(console_level.upper()) + logger.addHandler(console_handler) + else: + # Suppress console output for non-main processes + logger.addHandler(logging.NullHandler()) + logger.setLevel(logging.ERROR) + + if log_file is not None: + file_handler = logging.FileHandler(log_file) + file_handler.setFormatter(formatter) + file_handler.setLevel(file_level.upper()) + logger.addHandler(file_handler) + + +def format_big_number(num, precision=0): + suffixes = ["", "K", "M", "B", "T", "Q"] + divisor = 1000.0 + + for suffix in suffixes: + if abs(num) < divisor: + return f"{num:.{precision}f}{suffix}" + num /= divisor + + return num + + +def say(text: str, blocking: bool = False): + system = platform.system() + + if system == "Darwin": + cmd = ["say", text] + + elif system == "Linux": + cmd = ["spd-say", text] + if blocking: + cmd.append("--wait") + + elif system == "Windows": + cmd = [ + "PowerShell", + "-Command", + "Add-Type -AssemblyName System.Speech; " + f"(New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak('{text}')", + ] + + else: + raise RuntimeError("Unsupported operating system for text-to-speech.") + + if blocking: + subprocess.run(cmd, check=True) + else: + subprocess.Popen(cmd, creationflags=subprocess.CREATE_NO_WINDOW if system == "Windows" else 0) + + +def log_say(text: str, play_sounds: bool = True, blocking: bool = False): + logging.info(text) + + if play_sounds: + say(text, blocking) + + +def get_channel_first_image_shape(image_shape: tuple) -> tuple: + shape = copy(image_shape) + if shape[2] < shape[0] and shape[2] < shape[1]: # (h, w, c) -> (c, h, w) + shape = (shape[2], shape[0], shape[1]) + elif not (shape[0] < shape[1] and shape[0] < shape[2]): + raise ValueError(image_shape) + + return shape + + +def has_method(cls: object, method_name: str) -> bool: + return hasattr(cls, method_name) and callable(getattr(cls, method_name)) + + +def is_valid_numpy_dtype_string(dtype_str: str) -> bool: + """ + Return True if a given string can be converted to a numpy dtype. + """ + try: + # Attempt to convert the string to a numpy dtype + np.dtype(dtype_str) + return True + except TypeError: + # If a TypeError is raised, the string is not a valid dtype + return False + + +def enter_pressed() -> bool: + if platform.system() == "Windows": + import msvcrt + + if msvcrt.kbhit(): + key = msvcrt.getch() + return key in (b"\r", b"\n") # enter key + return False + else: + return select.select([sys.stdin], [], [], 0)[0] and sys.stdin.readline().strip() == "" + + +def move_cursor_up(lines): + """Move the cursor up by a specified number of lines.""" + print(f"\033[{lines}A", end="") + + +def get_elapsed_time_in_days_hours_minutes_seconds(elapsed_time_s: float): + days = int(elapsed_time_s // (24 * 3600)) + elapsed_time_s %= 24 * 3600 + hours = int(elapsed_time_s // 3600) + elapsed_time_s %= 3600 + minutes = int(elapsed_time_s // 60) + seconds = elapsed_time_s % 60 + return days, hours, minutes, seconds + + +class SuppressProgressBars: + """ + Context manager to suppress progress bars. + + Example + -------- + ```python + with SuppressProgressBars(): + # Code that would normally show progress bars + ``` + """ + + def __enter__(self): + disable_progress_bar() + + def __exit__(self, exc_type, exc_val, exc_tb): + enable_progress_bar() + + +class TimerManager: + """ + Lightweight utility to measure elapsed time. + + Examples + -------- + ```python + # Example 1: Using context manager + timer = TimerManager("Policy", log=False) + for _ in range(3): + with timer: + time.sleep(0.01) + print(timer.last, timer.fps_avg, timer.percentile(90)) # Prints: 0.01 100.0 0.01 + ``` + + ```python + # Example 2: Using start/stop methods + timer = TimerManager("Policy", log=False) + timer.start() + time.sleep(0.01) + timer.stop() + print(timer.last, timer.fps_avg, timer.percentile(90)) # Prints: 0.01 100.0 0.01 + ``` + """ + + def __init__( + self, + label: str = "Elapsed-time", + log: bool = True, + logger: logging.Logger | None = None, + ): + self.label = label + self.log = log + self.logger = logger + self._start: float | None = None + self._history: list[float] = [] + + def __enter__(self): + return self.start() + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + + def start(self): + self._start = time.perf_counter() + return self + + def stop(self) -> float: + if self._start is None: + raise RuntimeError("Timer was never started.") + elapsed = time.perf_counter() - self._start + self._history.append(elapsed) + self._start = None + if self.log: + if self.logger is not None: + self.logger.info(f"{self.label}: {elapsed:.6f} s") + else: + logging.info(f"{self.label}: {elapsed:.6f} s") + return elapsed + + def reset(self): + self._history.clear() + + @property + def last(self) -> float: + return self._history[-1] if self._history else 0.0 + + @property + def avg(self) -> float: + return mean(self._history) if self._history else 0.0 + + @property + def total(self) -> float: + return sum(self._history) + + @property + def count(self) -> int: + return len(self._history) + + @property + def history(self) -> list[float]: + return deepcopy(self._history) + + @property + def fps_last(self) -> float: + return 0.0 if self.last == 0 else 1.0 / self.last + + @property + def fps_avg(self) -> float: + return 0.0 if self.avg == 0 else 1.0 / self.avg + + def percentile(self, p: float) -> float: + """ + Return the p-th percentile of recorded times. + """ + if not self._history: + return 0.0 + return float(np.percentile(self._history, p)) + + def fps_percentile(self, p: float) -> float: + """ + FPS corresponding to the p-th percentile time. + """ + val = self.percentile(p) + return 0.0 if val == 0 else 1.0 / val diff --git a/lerobot/src/lerobot/utils/visualization_utils.py b/lerobot/src/lerobot/utils/visualization_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..182623fe4eaaaf164e7389b2e3bfa0ea25c7feab --- /dev/null +++ b/lerobot/src/lerobot/utils/visualization_utils.py @@ -0,0 +1,112 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numbers +import os + +import numpy as np +import rerun as rr + +from lerobot.processor import RobotAction, RobotObservation + +from .constants import ACTION, ACTION_PREFIX, OBS_PREFIX, OBS_STR + + +def init_rerun( + session_name: str = "lerobot_control_loop", ip: str | None = None, port: int | None = None +) -> None: + """ + Initializes the Rerun SDK for visualizing the control loop. + + Args: + session_name: Name of the Rerun session. + ip: Optional IP for connecting to a Rerun server. + port: Optional port for connecting to a Rerun server. + """ + batch_size = os.getenv("RERUN_FLUSH_NUM_BYTES", "8000") + os.environ["RERUN_FLUSH_NUM_BYTES"] = batch_size + rr.init(session_name) + memory_limit = os.getenv("LEROBOT_RERUN_MEMORY_LIMIT", "10%") + if ip and port: + rr.connect_grpc(url=f"rerun+http://{ip}:{port}/proxy") + else: + rr.spawn(memory_limit=memory_limit) + + +def _is_scalar(x): + return isinstance(x, (float | numbers.Real | np.integer | np.floating)) or ( + isinstance(x, np.ndarray) and x.ndim == 0 + ) + + +def log_rerun_data( + observation: RobotObservation | None = None, + action: RobotAction | None = None, + compress_images: bool = False, +) -> None: + """ + Logs observation and action data to Rerun for real-time visualization. + + This function iterates through the provided observation and action dictionaries and sends their contents + to the Rerun viewer. It handles different data types appropriately: + - Scalars values (floats, ints) are logged as `rr.Scalars`. + - 3D NumPy arrays that resemble images (e.g., with 1, 3, or 4 channels first) are transposed + from CHW to HWC format, (optionally) compressed to JPEG and logged as `rr.Image` or `rr.EncodedImage`. + - 1D NumPy arrays are logged as a series of individual scalars, with each element indexed. + - Other multi-dimensional arrays are flattened and logged as individual scalars. + + Keys are automatically namespaced with "observation." or "action." if not already present. + + Args: + observation: An optional dictionary containing observation data to log. + action: An optional dictionary containing action data to log. + compress_images: Whether to compress images before logging to save bandwidth & memory in exchange for cpu and quality. + """ + if observation: + for k, v in observation.items(): + if v is None: + continue + key = k if str(k).startswith(OBS_PREFIX) else f"{OBS_STR}.{k}" + + if _is_scalar(v): + rr.log(key, rr.Scalars(float(v))) + elif isinstance(v, np.ndarray): + arr = v + # Convert CHW -> HWC when needed + if arr.ndim == 3 and arr.shape[0] in (1, 3, 4) and arr.shape[-1] not in (1, 3, 4): + arr = np.transpose(arr, (1, 2, 0)) + if arr.ndim == 1: + for i, vi in enumerate(arr): + rr.log(f"{key}_{i}", rr.Scalars(float(vi))) + else: + img_entity = rr.Image(arr).compress() if compress_images else rr.Image(arr) + rr.log(key, entity=img_entity, static=True) + + if action: + for k, v in action.items(): + if v is None: + continue + key = k if str(k).startswith(ACTION_PREFIX) else f"{ACTION}.{k}" + + if _is_scalar(v): + rr.log(key, rr.Scalars(float(v))) + elif isinstance(v, np.ndarray): + if v.ndim == 1: + for i, vi in enumerate(v): + rr.log(f"{key}_{i}", rr.Scalars(float(vi))) + else: + # Fall back to flattening higher-dimensional arrays + flat = v.flatten() + for i, vi in enumerate(flat): + rr.log(f"{key}_{i}", rr.Scalars(float(vi)))