sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
huggingface/lerobot:src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from ..configs import CameraConfig, ColorMode
__all__ = ["CameraConfig", "ColorMode", "Reachy2CameraConfig"]
@CameraConfig.register_subclass("reachy2_camera")
@dataclass
class Reachy2CameraConfig(CameraConfig):
"""Configuration class for Reachy 2 camera devices.
This class provides configuration options for Reachy 2 cameras,
supporting both the teleop and depth cameras. It includes settings
for resolution, frame rate, color mode, and the selection of the cameras.
Example configurations:
```python
# Basic configurations
Reachy2CameraConfig(
name="teleop",
image_type="left",
ip_address="192.168.0.200", # IP address of the robot
port=50065, # Port of the camera server
width=640,
height=480,
fps=30, # Not configurable for Reachy 2 cameras
color_mode=ColorMode.RGB,
) # Left teleop camera, 640x480 @ 30FPS
```
Attributes:
name: Name of the camera device. Can be "teleop" or "depth".
image_type: Type of image stream. For "teleop" camera, can be "left" or "right".
For "depth" camera, can be "rgb" or "depth". (depth is not supported yet)
fps: Requested frames per second for the color stream. Not configurable for Reachy 2 cameras.
width: Requested frame width in pixels for the color stream.
height: Requested frame height in pixels for the color stream.
color_mode: Color mode for image output (RGB or BGR). Defaults to RGB.
ip_address: IP address of the robot. Defaults to "localhost".
port: Port number for the camera server. Defaults to 50065.
Note:
- Only 3-channel color output (RGB/BGR) is currently supported.
"""
name: str
image_type: str
color_mode: ColorMode = ColorMode.RGB
ip_address: str | None = "localhost"
port: int = 50065
def __post_init__(self) -> None:
if self.name not in ["teleop", "depth"]:
raise ValueError(f"`name` is expected to be 'teleop' or 'depth', but {self.name} is provided.")
if (self.name == "teleop" and self.image_type not in ["left", "right"]) or (
self.name == "depth" and self.image_type not in ["rgb", "depth"]
):
raise ValueError(
f"`image_type` is expected to be 'left' or 'right' for teleop camera, and 'rgb' or 'depth' for depth camera, but {self.image_type} is provided."
)
self.color_mode = ColorMode(self.color_mode)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/cameras/reachy2_camera/reachy2_camera.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides the Reachy2Camera class for capturing frames from Reachy 2 cameras using Reachy 2's CameraManager.
"""
from __future__ import annotations
import logging
import os
import platform
import time
from typing import TYPE_CHECKING, Any
from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing
# Fix MSMF hardware transform compatibility for Windows before importing cv2
if platform.system() == "Windows" and "OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS" not in os.environ:
os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0"
import cv2 # type: ignore # TODO: add type stubs for OpenCV
import numpy as np # type: ignore # TODO: add type stubs for numpy
from lerobot.utils.decorators import check_if_not_connected
from lerobot.utils.import_utils import _reachy2_sdk_available
if TYPE_CHECKING or _reachy2_sdk_available:
from reachy2_sdk.media.camera import CameraView
from reachy2_sdk.media.camera_manager import CameraManager
else:
CameraManager = None
class CameraView:
LEFT = 0
RIGHT = 1
from lerobot.utils.errors import DeviceNotConnectedError
from ..camera import Camera
from .configuration_reachy2_camera import ColorMode, Reachy2CameraConfig
logger = logging.getLogger(__name__)
class Reachy2Camera(Camera):
"""
Manages Reachy 2 camera using Reachy 2 CameraManager.
This class provides a high-level interface to connect to, configure, and read
frames from Reachy 2 cameras. It supports both synchronous and asynchronous
frame reading.
An Reachy2Camera instance requires a camera name (e.g., "teleop") and an image
type (e.g., "left") to be specified in the configuration.
The camera's default settings (FPS, resolution, color mode) are used unless
overridden in the configuration.
"""
def __init__(self, config: Reachy2CameraConfig):
"""
Initializes the Reachy2Camera instance.
Args:
config: The configuration settings for the camera.
"""
super().__init__(config)
self.config = config
self.color_mode = config.color_mode
self.latest_frame: NDArray[Any] | None = None
self.latest_timestamp: float | None = None
self.cam_manager: CameraManager | None = None
def __str__(self) -> str:
return f"{self.__class__.__name__}({self.config.name}, {self.config.image_type})"
@property
def is_connected(self) -> bool:
"""Checks if the camera is currently connected and opened."""
if self.config.name == "teleop":
return bool(
self.cam_manager._grpc_connected and self.cam_manager.teleop if self.cam_manager else False
)
elif self.config.name == "depth":
return bool(
self.cam_manager._grpc_connected and self.cam_manager.depth if self.cam_manager else False
)
else:
raise ValueError(f"Invalid camera name '{self.config.name}'. Expected 'teleop' or 'depth'.")
def connect(self, warmup: bool = True) -> None:
"""
Connects to the Reachy2 CameraManager as specified in the configuration.
Raises:
DeviceNotConnectedError: If the camera is not connected.
"""
self.cam_manager = CameraManager(host=self.config.ip_address, port=self.config.port)
if self.cam_manager is None:
raise DeviceNotConnectedError(f"Could not connect to {self}.")
self.cam_manager.initialize_cameras()
logger.info(f"{self} connected.")
@staticmethod
def find_cameras() -> list[dict[str, Any]]:
"""
Detection not implemented for Reachy2 cameras.
"""
raise NotImplementedError("Camera detection is not implemented for Reachy2 cameras.")
@check_if_not_connected
def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]:
"""
Reads a single frame synchronously from the camera.
This method retrieves the most recent frame available in Reachy 2's low-level software.
Returns:
np.ndarray: The captured frame as a NumPy array in the format
(height, width, channels), using the specified or default
color mode and applying any configured rotation.
"""
start_time = time.perf_counter()
if self.cam_manager is None:
raise DeviceNotConnectedError(f"{self} is not connected.")
if color_mode is not None:
logger.warning(
f"{self} read() color_mode parameter is deprecated and will be removed in future versions."
)
frame: NDArray[Any] = np.empty((0, 0, 3), dtype=np.uint8)
if self.config.name == "teleop" and hasattr(self.cam_manager, "teleop"):
if self.config.image_type == "left":
frame = self.cam_manager.teleop.get_frame(
CameraView.LEFT, size=(self.config.width, self.config.height)
)[0]
elif self.config.image_type == "right":
frame = self.cam_manager.teleop.get_frame(
CameraView.RIGHT, size=(self.config.width, self.config.height)
)[0]
elif self.config.name == "depth" and hasattr(self.cam_manager, "depth"):
if self.config.image_type == "depth":
frame = self.cam_manager.depth.get_depth_frame()[0]
elif self.config.image_type == "rgb":
frame = self.cam_manager.depth.get_frame(size=(self.config.width, self.config.height))[0]
else:
raise ValueError(f"Invalid camera name '{self.config.name}'. Expected 'teleop' or 'depth'.")
if frame is None:
raise RuntimeError(f"Internal error: No frame available for {self}.")
if self.color_mode not in (ColorMode.RGB, ColorMode.BGR):
raise ValueError(
f"Invalid color mode '{self.color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}."
)
if self.color_mode == ColorMode.RGB:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.latest_frame = frame
self.latest_timestamp = time.perf_counter()
read_duration_ms = (time.perf_counter() - start_time) * 1e3
logger.debug(f"{self} read took: {read_duration_ms:.1f}ms")
return frame
@check_if_not_connected
def async_read(self, timeout_ms: float = 200) -> NDArray[Any]:
"""
Same as read()
Returns:
np.ndarray: The latest captured frame as a NumPy array in the format
(height, width, channels), processed according to configuration.
Raises:
DeviceNotConnectedError: If the camera is not connected.
TimeoutError: If no frame becomes available within the specified timeout.
RuntimeError: If an unexpected error occurs.
"""
return self.read()
@check_if_not_connected
def read_latest(self, max_age_ms: int = 500) -> NDArray[Any]:
"""Return the most recent frame captured immediately (Peeking).
This method is non-blocking and returns whatever is currently in the
memory buffer. The frame may be stale,
meaning it could have been captured a while ago (hanging camera scenario e.g.).
Returns:
tuple[NDArray, float]:
- The frame image (numpy array).
- The timestamp (time.perf_counter) when this frame was captured.
Raises:
TimeoutError: If the latest frame is older than `max_age_ms`.
DeviceNotConnectedError: If the camera is not connected.
RuntimeError: If the camera is connected but has not captured any frames yet.
"""
if self.latest_frame is None or self.latest_timestamp is None:
raise RuntimeError(f"{self} has not captured any frames yet.")
age_ms = (time.perf_counter() - self.latest_timestamp) * 1e3
if age_ms > max_age_ms:
raise TimeoutError(
f"{self} latest frame is too old: {age_ms:.1f} ms (max allowed: {max_age_ms} ms)."
)
return self.latest_frame
@check_if_not_connected
def disconnect(self) -> None:
"""
Stops the background read thread (if running).
Raises:
DeviceNotConnectedError: If the camera is already disconnected.
"""
if self.cam_manager is not None:
self.cam_manager.disconnect()
logger.info(f"{self} disconnected.")
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/cameras/reachy2_camera/reachy2_camera.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/robots/reachy2/configuration_reachy2.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from lerobot.cameras import CameraConfig
from lerobot.cameras.configs import ColorMode
from lerobot.cameras.reachy2_camera import Reachy2CameraConfig
from ..config import RobotConfig
@RobotConfig.register_subclass("reachy2")
@dataclass
class Reachy2RobotConfig(RobotConfig):
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
# Set this to a positive scalar to have the same value for all motors.
max_relative_target: float | None = None
# IP address of the Reachy 2 robot
ip_address: str | None = "localhost"
# Port of the Reachy 2 robot
port: int = 50065
# If True, turn_off_smoothly() will be sent to the robot before disconnecting.
disable_torque_on_disconnect: bool = False
# Tag for external commands control
# Set to True if you use an external commands system to control the robot,
# such as the official teleoperation application: https://github.com/pollen-robotics/Reachy2Teleoperation
# If True, robot.send_action() will not send commands to the robot.
use_external_commands: bool = False
# Robot parts
# Set to False to not add the corresponding joints part to the robot list of joints.
# By default, all parts are set to True.
with_mobile_base: bool = True
with_l_arm: bool = True
with_r_arm: bool = True
with_neck: bool = True
with_antennas: bool = True
# Robot cameras
# Set to True if you want to use the corresponding cameras in the observations.
# By default, no camera is used.
with_left_teleop_camera: bool = False
with_right_teleop_camera: bool = False
with_torso_camera: bool = False
# Camera parameters
camera_width: int = 640
camera_height: int = 480
# For cameras other than the 3 default Reachy 2 cameras.
cameras: dict[str, CameraConfig] = field(default_factory=dict)
def __post_init__(self) -> None:
# Add cameras with same ip_address as the robot
if self.with_left_teleop_camera:
self.cameras["teleop_left"] = Reachy2CameraConfig(
name="teleop",
image_type="left",
ip_address=self.ip_address,
port=self.port,
width=self.camera_width,
height=self.camera_height,
fps=30, # Not configurable for Reachy 2 cameras
color_mode=ColorMode.RGB,
)
if self.with_right_teleop_camera:
self.cameras["teleop_right"] = Reachy2CameraConfig(
name="teleop",
image_type="right",
ip_address=self.ip_address,
port=self.port,
width=self.camera_width,
height=self.camera_height,
fps=30, # Not configurable for Reachy 2 cameras
color_mode=ColorMode.RGB,
)
if self.with_torso_camera:
self.cameras["torso_rgb"] = Reachy2CameraConfig(
name="depth",
image_type="rgb",
ip_address=self.ip_address,
port=self.port,
width=self.camera_width,
height=self.camera_height,
fps=30, # Not configurable for Reachy 2 cameras
color_mode=ColorMode.RGB,
)
super().__post_init__()
if not (
self.with_mobile_base
or self.with_l_arm
or self.with_r_arm
or self.with_neck
or self.with_antennas
):
raise ValueError(
"No Reachy2Robot part used.\n"
"At least one part of the robot must be set to True "
"(with_mobile_base, with_l_arm, with_r_arm, with_neck, with_antennas)"
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/robots/reachy2/configuration_reachy2.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/robots/reachy2/robot_reachy2.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import time
from typing import TYPE_CHECKING, Any
from lerobot.cameras.utils import make_cameras_from_configs
from lerobot.processor import RobotAction, RobotObservation
from lerobot.utils.import_utils import _reachy2_sdk_available
from ..robot import Robot
from ..utils import ensure_safe_goal_position
from .configuration_reachy2 import Reachy2RobotConfig
if TYPE_CHECKING or _reachy2_sdk_available:
from reachy2_sdk import ReachySDK
else:
ReachySDK = None
# {lerobot_keys: reachy2_sdk_keys}
REACHY2_NECK_JOINTS = {
"neck_yaw.pos": "head.neck.yaw",
"neck_pitch.pos": "head.neck.pitch",
"neck_roll.pos": "head.neck.roll",
}
REACHY2_ANTENNAS_JOINTS = {
"l_antenna.pos": "head.l_antenna",
"r_antenna.pos": "head.r_antenna",
}
REACHY2_R_ARM_JOINTS = {
"r_shoulder_pitch.pos": "r_arm.shoulder.pitch",
"r_shoulder_roll.pos": "r_arm.shoulder.roll",
"r_elbow_yaw.pos": "r_arm.elbow.yaw",
"r_elbow_pitch.pos": "r_arm.elbow.pitch",
"r_wrist_roll.pos": "r_arm.wrist.roll",
"r_wrist_pitch.pos": "r_arm.wrist.pitch",
"r_wrist_yaw.pos": "r_arm.wrist.yaw",
"r_gripper.pos": "r_arm.gripper",
}
REACHY2_L_ARM_JOINTS = {
"l_shoulder_pitch.pos": "l_arm.shoulder.pitch",
"l_shoulder_roll.pos": "l_arm.shoulder.roll",
"l_elbow_yaw.pos": "l_arm.elbow.yaw",
"l_elbow_pitch.pos": "l_arm.elbow.pitch",
"l_wrist_roll.pos": "l_arm.wrist.roll",
"l_wrist_pitch.pos": "l_arm.wrist.pitch",
"l_wrist_yaw.pos": "l_arm.wrist.yaw",
"l_gripper.pos": "l_arm.gripper",
}
REACHY2_VEL = {
"mobile_base.vx": "vx",
"mobile_base.vy": "vy",
"mobile_base.vtheta": "vtheta",
}
class Reachy2Robot(Robot):
"""
[Reachy 2](https://www.pollen-robotics.com/reachy/), by Pollen Robotics.
"""
config_class = Reachy2RobotConfig
name = "reachy2"
def __init__(self, config: Reachy2RobotConfig):
super().__init__(config)
self.config = config
self.robot_type = self.config.type
self.use_external_commands = self.config.use_external_commands
self.reachy: None | ReachySDK = None
self.cameras = make_cameras_from_configs(config.cameras)
self.logs: dict[str, float] = {}
self.joints_dict: dict[str, str] = self._generate_joints_dict()
@property
def observation_features(self) -> dict[str, Any]:
return {**self.motors_features, **self.camera_features}
@property
def action_features(self) -> dict[str, type]:
return self.motors_features
@property
def camera_features(self) -> dict[str, tuple[int | None, int | None, int]]:
return {cam: (self.cameras[cam].height, self.cameras[cam].width, 3) for cam in self.cameras}
@property
def motors_features(self) -> dict[str, type]:
if self.config.with_mobile_base:
return {
**dict.fromkeys(
self.joints_dict.keys(),
float,
),
**dict.fromkeys(
REACHY2_VEL.keys(),
float,
),
}
else:
return dict.fromkeys(self.joints_dict.keys(), float)
@property
def is_connected(self) -> bool:
return self.reachy.is_connected() if self.reachy is not None else False
def connect(self, calibrate: bool = False) -> None:
self.reachy = ReachySDK(self.config.ip_address)
if not self.is_connected:
raise ConnectionError()
for cam in self.cameras.values():
cam.connect()
self.configure()
def configure(self) -> None:
if self.reachy is not None:
self.reachy.turn_on()
self.reachy.reset_default_limits()
@property
def is_calibrated(self) -> bool:
return True
def calibrate(self) -> None:
pass
def _generate_joints_dict(self) -> dict[str, str]:
joints = {}
if self.config.with_neck:
joints.update(REACHY2_NECK_JOINTS)
if self.config.with_l_arm:
joints.update(REACHY2_L_ARM_JOINTS)
if self.config.with_r_arm:
joints.update(REACHY2_R_ARM_JOINTS)
if self.config.with_antennas:
joints.update(REACHY2_ANTENNAS_JOINTS)
return joints
def _get_state(self) -> dict[str, float]:
if self.reachy is not None:
pos_dict = {k: self.reachy.joints[v].present_position for k, v in self.joints_dict.items()}
if not self.config.with_mobile_base:
return pos_dict
vel_dict = {k: self.reachy.mobile_base.odometry[v] for k, v in REACHY2_VEL.items()}
return {**pos_dict, **vel_dict}
else:
return {}
def get_observation(self) -> RobotObservation:
obs_dict: RobotObservation = {}
# Read Reachy 2 state
before_read_t = time.perf_counter()
obs_dict.update(self._get_state())
self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t
# Capture images from cameras
for cam_key, cam in self.cameras.items():
obs_dict[cam_key] = cam.read_latest()
return obs_dict
def send_action(self, action: RobotAction) -> RobotAction:
if self.reachy is not None:
if not self.is_connected:
raise ConnectionError()
before_write_t = time.perf_counter()
vel = {}
goal_pos = {}
for key, val in action.items():
if key not in self.joints_dict:
if key not in REACHY2_VEL:
raise KeyError(f"Key '{key}' is not a valid motor key in Reachy 2.")
else:
vel[REACHY2_VEL[key]] = float(val)
else:
if not self.use_external_commands and self.config.max_relative_target is not None:
goal_pos[key] = float(val)
goal_present_pos = {
key: (
goal_pos[key],
self.reachy.joints[self.joints_dict[key]].present_position,
)
}
safe_goal_pos = ensure_safe_goal_position(
goal_present_pos, float(self.config.max_relative_target)
)
val = safe_goal_pos[key]
self.reachy.joints[self.joints_dict[key]].goal_position = float(val)
if self.config.with_mobile_base:
self.reachy.mobile_base.set_goal_speed(vel["vx"], vel["vy"], vel["vtheta"])
# We don't send the goal positions if we control Reachy 2 externally
if not self.use_external_commands:
self.reachy.send_goal_positions()
if self.config.with_mobile_base:
self.reachy.mobile_base.send_speed_command()
self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t
return action
def disconnect(self) -> None:
if self.reachy is not None:
for cam in self.cameras.values():
cam.disconnect()
if self.config.disable_torque_on_disconnect:
self.reachy.turn_off_smoothly()
self.reachy.disconnect()
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/robots/reachy2/robot_reachy2.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/teleoperators/reachy2_teleoperator/config_reachy2_teleoperator.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from ..config import TeleoperatorConfig
@TeleoperatorConfig.register_subclass("reachy2_teleoperator")
@dataclass
class Reachy2TeleoperatorConfig(TeleoperatorConfig):
# IP address of the Reachy 2 robot used as teleoperator
ip_address: str | None = "localhost"
# Whether to use the present position of the joints as actions
# if False, the goal position of the joints will be used
use_present_position: bool = False
# Which parts of the robot to use
with_mobile_base: bool = True
with_l_arm: bool = True
with_r_arm: bool = True
with_neck: bool = True
with_antennas: bool = True
def __post_init__(self):
if not (
self.with_mobile_base
or self.with_l_arm
or self.with_r_arm
or self.with_neck
or self.with_antennas
):
raise ValueError(
"No Reachy2Teleoperator part used.\n"
"At least one part of the robot must be set to True "
"(with_mobile_base, with_l_arm, with_r_arm, with_neck, with_antennas)"
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/teleoperators/reachy2_teleoperator/config_reachy2_teleoperator.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/teleoperators/reachy2_teleoperator/reachy2_teleoperator.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import logging
import time
from typing import TYPE_CHECKING
from lerobot.utils.import_utils import _reachy2_sdk_available
if TYPE_CHECKING or _reachy2_sdk_available:
from reachy2_sdk import ReachySDK
else:
ReachySDK = None
from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
from lerobot.utils.errors import DeviceNotConnectedError
from ..teleoperator import Teleoperator
from .config_reachy2_teleoperator import Reachy2TeleoperatorConfig
logger = logging.getLogger(__name__)
# {lerobot_keys: reachy2_sdk_keys}
REACHY2_NECK_JOINTS = {
"neck_yaw.pos": "head.neck.yaw",
"neck_pitch.pos": "head.neck.pitch",
"neck_roll.pos": "head.neck.roll",
}
REACHY2_ANTENNAS_JOINTS = {
"l_antenna.pos": "head.l_antenna",
"r_antenna.pos": "head.r_antenna",
}
REACHY2_R_ARM_JOINTS = {
"r_shoulder_pitch.pos": "r_arm.shoulder.pitch",
"r_shoulder_roll.pos": "r_arm.shoulder.roll",
"r_elbow_yaw.pos": "r_arm.elbow.yaw",
"r_elbow_pitch.pos": "r_arm.elbow.pitch",
"r_wrist_roll.pos": "r_arm.wrist.roll",
"r_wrist_pitch.pos": "r_arm.wrist.pitch",
"r_wrist_yaw.pos": "r_arm.wrist.yaw",
"r_gripper.pos": "r_arm.gripper",
}
REACHY2_L_ARM_JOINTS = {
"l_shoulder_pitch.pos": "l_arm.shoulder.pitch",
"l_shoulder_roll.pos": "l_arm.shoulder.roll",
"l_elbow_yaw.pos": "l_arm.elbow.yaw",
"l_elbow_pitch.pos": "l_arm.elbow.pitch",
"l_wrist_roll.pos": "l_arm.wrist.roll",
"l_wrist_pitch.pos": "l_arm.wrist.pitch",
"l_wrist_yaw.pos": "l_arm.wrist.yaw",
"l_gripper.pos": "l_arm.gripper",
}
REACHY2_VEL = {
"mobile_base.vx": "vx",
"mobile_base.vy": "vy",
"mobile_base.vtheta": "vtheta",
}
class Reachy2Teleoperator(Teleoperator):
"""
[Reachy 2](https://www.pollen-robotics.com/reachy/), by Pollen Robotics.
"""
config_class = Reachy2TeleoperatorConfig
name = "reachy2_specific"
def __init__(self, config: Reachy2TeleoperatorConfig):
super().__init__(config)
self.config = config
self.reachy: None | ReachySDK = None
self.joints_dict: dict[str, str] = self._generate_joints_dict()
def _generate_joints_dict(self) -> dict[str, str]:
joints = {}
if self.config.with_neck:
joints.update(REACHY2_NECK_JOINTS)
if self.config.with_l_arm:
joints.update(REACHY2_L_ARM_JOINTS)
if self.config.with_r_arm:
joints.update(REACHY2_R_ARM_JOINTS)
if self.config.with_antennas:
joints.update(REACHY2_ANTENNAS_JOINTS)
return joints
@property
def action_features(self) -> dict[str, type]:
if self.config.with_mobile_base:
return {
**dict.fromkeys(
self.joints_dict.keys(),
float,
),
**dict.fromkeys(
REACHY2_VEL.keys(),
float,
),
}
else:
return dict.fromkeys(self.joints_dict.keys(), float)
@property
def feedback_features(self) -> dict[str, type]:
return {}
@property
def is_connected(self) -> bool:
return self.reachy.is_connected() if self.reachy is not None else False
@check_if_already_connected
def connect(self, calibrate: bool = True) -> None:
self.reachy = ReachySDK(self.config.ip_address)
if not self.is_connected:
raise DeviceNotConnectedError()
logger.info(f"{self} connected.")
@property
def is_calibrated(self) -> bool:
return True
def calibrate(self) -> None:
pass
def configure(self) -> None:
pass
@check_if_not_connected
def get_action(self) -> dict[str, float]:
start = time.perf_counter()
joint_action: dict[str, float] = {}
vel_action: dict[str, float] = {}
if self.config.use_present_position:
joint_action = {k: self.reachy.joints[v].present_position for k, v in self.joints_dict.items()}
else:
joint_action = {k: self.reachy.joints[v].goal_position for k, v in self.joints_dict.items()}
if not self.config.with_mobile_base:
dt_ms = (time.perf_counter() - start) * 1e3
logger.debug(f"{self} read action: {dt_ms:.1f}ms")
return joint_action
if self.config.use_present_position:
vel_action = {k: self.reachy.mobile_base.odometry[v] for k, v in REACHY2_VEL.items()}
else:
vel_action = {k: self.reachy.mobile_base.last_cmd_vel[v] for k, v in REACHY2_VEL.items()}
dt_ms = (time.perf_counter() - start) * 1e3
logger.debug(f"{self} read action: {dt_ms:.1f}ms")
return {**joint_action, **vel_action}
def send_feedback(self, feedback: dict[str, float]) -> None:
raise NotImplementedError
def disconnect(self) -> None:
if self.is_connected:
self.reachy.disconnect()
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/teleoperators/reachy2_teleoperator/reachy2_teleoperator.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:tests/cameras/test_reachy2_camera.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
pytest.importorskip("reachy2_sdk")
from lerobot.cameras.reachy2_camera import Reachy2Camera, Reachy2CameraConfig
from lerobot.utils.errors import DeviceNotConnectedError
PARAMS = [
("teleop", "left"),
("teleop", "right"),
("depth", "rgb"),
# ("depth", "depth"), # Depth camera is not available yet
]
def _make_cam_manager_mock():
c = MagicMock(name="CameraManagerMock")
teleop = MagicMock(name="TeleopCam")
teleop.width = 640
teleop.height = 480
teleop.get_frame = MagicMock(
side_effect=lambda *_, **__: (
np.zeros((480, 640, 3), dtype=np.uint8),
time.time(),
)
)
depth = MagicMock(name="DepthCam")
depth.width = 640
depth.height = 480
depth.get_frame = MagicMock(
side_effect=lambda *_, **__: (
np.zeros((480, 640, 3), dtype=np.uint8),
time.time(),
)
)
c.is_connected.return_value = True
c.teleop = teleop
c.depth = depth
def _connect():
c.teleop = teleop
c.depth = depth
c.is_connected.return_value = True
def _disconnect():
c.teleop = None
c.depth = None
c.is_connected.return_value = False
c.connect = MagicMock(side_effect=_connect)
c.disconnect = MagicMock(side_effect=_disconnect)
# Mock methods
c.initialize_cameras = MagicMock()
return c
@pytest.fixture(
params=PARAMS,
# ids=["teleop-left", "teleop-right", "torso-rgb", "torso-depth"],
ids=["teleop-left", "teleop-right", "torso-rgb"],
)
def camera(request):
name, image_type = request.param
with (
patch(
"lerobot.cameras.reachy2_camera.reachy2_camera.CameraManager",
side_effect=lambda *a, **k: _make_cam_manager_mock(),
),
):
config = Reachy2CameraConfig(name=name, image_type=image_type)
cam = Reachy2Camera(config)
yield cam
if cam.is_connected:
cam.disconnect()
def test_connect(camera):
camera.connect()
assert camera.is_connected
camera.cam_manager.initialize_cameras.assert_called_once()
def test_read(camera):
camera.connect()
img = camera.read()
if camera.config.name == "teleop":
camera.cam_manager.teleop.get_frame.assert_called_once()
elif camera.config.name == "depth":
camera.cam_manager.depth.get_frame.assert_called_once()
assert isinstance(img, np.ndarray)
assert img.shape == (480, 640, 3)
def test_disconnect(camera):
camera.connect()
camera.disconnect()
assert not camera.is_connected
def test_async_read(camera):
camera.connect()
try:
img = camera.async_read()
assert isinstance(img, np.ndarray)
finally:
if camera.is_connected:
camera.disconnect()
def test_read_before_connect(camera):
with pytest.raises(DeviceNotConnectedError):
_ = camera.read()
def test_disconnect_before_connect(camera):
with pytest.raises(DeviceNotConnectedError):
camera.disconnect()
def test_async_read_before_connect(camera):
with pytest.raises(DeviceNotConnectedError):
_ = camera.async_read()
def test_read_latest(camera):
camera.connect()
frame = camera.read()
latest = camera.read_latest()
assert isinstance(latest, np.ndarray)
assert latest.shape == frame.shape
def test_read_latest_before_connect(camera):
# camera fixture yields an unconnected camera instance
with pytest.raises(DeviceNotConnectedError):
_ = camera.read_latest()
def test_read_latest_high_frequency(camera):
camera.connect()
# prime to ensure frames are available
ref = camera.read()
for _ in range(20):
latest = camera.read_latest()
assert isinstance(latest, np.ndarray)
assert latest.shape == ref.shape
def test_read_latest_too_old(camera):
camera.connect()
# prime to ensure frames are available
_ = camera.read()
with pytest.raises(TimeoutError):
_ = camera.read_latest(max_age_ms=0) # immediately too old
def test_wrong_camera_name():
with pytest.raises(ValueError):
_ = Reachy2CameraConfig(name="wrong-name", image_type="left")
def test_wrong_image_type():
with pytest.raises(ValueError):
_ = Reachy2CameraConfig(name="teleop", image_type="rgb")
with pytest.raises(ValueError):
_ = Reachy2CameraConfig(name="depth", image_type="left")
def test_wrong_color_mode():
with pytest.raises(ValueError):
_ = Reachy2CameraConfig(name="teleop", image_type="left", color_mode="wrong-color")
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/cameras/test_reachy2_camera.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/robots/test_reachy2.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
pytest.importorskip("reachy2_sdk")
from lerobot.robots.reachy2 import (
REACHY2_ANTENNAS_JOINTS,
REACHY2_L_ARM_JOINTS,
REACHY2_NECK_JOINTS,
REACHY2_R_ARM_JOINTS,
REACHY2_VEL,
Reachy2Robot,
Reachy2RobotConfig,
)
# {lerobot_keys: reachy2_sdk_keys}
REACHY2_JOINTS = {
**REACHY2_NECK_JOINTS,
**REACHY2_ANTENNAS_JOINTS,
**REACHY2_R_ARM_JOINTS,
**REACHY2_L_ARM_JOINTS,
}
PARAMS = [
{}, # default config
{"with_mobile_base": False},
{"with_mobile_base": False, "with_l_arm": False, "with_antennas": False},
{"with_r_arm": False, "with_neck": False, "with_antennas": False},
{"use_external_commands": True, "disable_torque_on_disconnect": True},
{"use_external_commands": True, "with_mobile_base": False, "with_neck": False},
{"disable_torque_on_disconnect": False},
{"max_relative_target": 5},
{"with_right_teleop_camera": False},
{"with_left_teleop_camera": False, "with_right_teleop_camera": False},
{"with_left_teleop_camera": False, "with_torso_camera": True},
]
def _make_reachy2_sdk_mock():
class JointSpy:
__slots__ = (
"present_position",
"_goal_position",
"_on_set",
)
def __init__(self, present_position=0.0, on_set=None):
self.present_position = present_position
self._goal_position = present_position
self._on_set = on_set
@property
def goal_position(self):
return self._goal_position
@goal_position.setter
def goal_position(self, v):
self._goal_position = v
if self._on_set:
self._on_set()
r = MagicMock(name="ReachySDKMock")
r.is_connected.return_value = True
def _connect():
r.is_connected.return_value = True
def _disconnect():
r.is_connected.return_value = False
# Global counter of goal_position sets
r._goal_position_set_total = 0
def _on_any_goal_set():
r._goal_position_set_total += 1
# Mock joints with some dummy positions
joints = {
k: JointSpy(
present_position=float(i),
on_set=_on_any_goal_set,
)
for i, k in enumerate(REACHY2_JOINTS.values())
}
r.joints = joints
# Mock mobile base with some dummy odometry
r.mobile_base = MagicMock()
r.mobile_base.odometry = {
"x": 0.1,
"y": -0.2,
"theta": 21.3,
"vx": 0.001,
"vy": 0.002,
"vtheta": 0.0,
}
r.connect = MagicMock(side_effect=_connect)
r.disconnect = MagicMock(side_effect=_disconnect)
# Mock methods
r.turn_on = MagicMock()
r.reset_default_limits = MagicMock()
r.send_goal_positions = MagicMock()
r.turn_off_smoothly = MagicMock()
r.mobile_base.set_goal_speed = MagicMock()
r.mobile_base.send_speed_command = MagicMock()
return r
def _make_reachy2_camera_mock(*args, **kwargs):
cfg = args[0] if args else kwargs.get("config")
name = getattr(cfg, "name", kwargs.get("name", "cam"))
image_type = getattr(cfg, "image_type", kwargs.get("image_type", "cam"))
width = getattr(cfg, "width", kwargs.get("width", 640))
height = getattr(cfg, "height", kwargs.get("height", 480))
cam = MagicMock(name=f"Reachy2CameraMock:{name}")
cam.name = name
cam.image_type = image_type
cam.width = width
cam.height = height
cam.connect = MagicMock()
cam.disconnect = MagicMock()
cam.async_read = MagicMock(side_effect=lambda: np.zeros((height, width, 3), dtype=np.uint8))
cam.read_latest = MagicMock(side_effect=lambda: np.zeros((height, width, 3), dtype=np.uint8))
return cam
@pytest.fixture(params=PARAMS, ids=lambda p: "default" if not p else ",".join(p.keys()))
def reachy2(request):
with (
patch(
"lerobot.robots.reachy2.robot_reachy2.ReachySDK",
side_effect=lambda *a, **k: _make_reachy2_sdk_mock(),
),
patch(
"lerobot.cameras.reachy2_camera.reachy2_camera.Reachy2Camera",
side_effect=_make_reachy2_camera_mock,
),
):
overrides = request.param
cfg = Reachy2RobotConfig(ip_address="192.168.0.200", **overrides)
robot = Reachy2Robot(cfg)
yield robot
if robot.is_connected:
robot.disconnect()
def test_connect_disconnect(reachy2):
assert not reachy2.is_connected
reachy2.connect()
assert reachy2.is_connected
reachy2.reachy.turn_on.assert_called_once()
reachy2.reachy.reset_default_limits.assert_called_once()
reachy2.disconnect()
assert not reachy2.is_connected
if reachy2.config.disable_torque_on_disconnect:
reachy2.reachy.turn_off_smoothly.assert_called_once()
else:
reachy2.reachy.turn_off_smoothly.assert_not_called()
reachy2.reachy.disconnect.assert_called_once()
def test_get_joints_dict(reachy2):
reachy2.connect()
if reachy2.config.with_neck:
assert "neck_yaw.pos" in reachy2.joints_dict
assert "neck_pitch.pos" in reachy2.joints_dict
assert "neck_roll.pos" in reachy2.joints_dict
else:
assert "neck_yaw.pos" not in reachy2.joints_dict
assert "neck_pitch.pos" not in reachy2.joints_dict
assert "neck_roll.pos" not in reachy2.joints_dict
if reachy2.config.with_antennas:
assert "l_antenna.pos" in reachy2.joints_dict
assert "r_antenna.pos" in reachy2.joints_dict
else:
assert "l_antenna.pos" not in reachy2.joints_dict
assert "r_antenna.pos" not in reachy2.joints_dict
if reachy2.config.with_r_arm:
assert "r_shoulder_pitch.pos" in reachy2.joints_dict
assert "r_shoulder_roll.pos" in reachy2.joints_dict
assert "r_elbow_yaw.pos" in reachy2.joints_dict
assert "r_elbow_pitch.pos" in reachy2.joints_dict
assert "r_wrist_roll.pos" in reachy2.joints_dict
assert "r_wrist_pitch.pos" in reachy2.joints_dict
assert "r_wrist_yaw.pos" in reachy2.joints_dict
assert "r_gripper.pos" in reachy2.joints_dict
else:
assert "r_shoulder_pitch.pos" not in reachy2.joints_dict
assert "r_shoulder_roll.pos" not in reachy2.joints_dict
assert "r_elbow_yaw.pos" not in reachy2.joints_dict
assert "r_elbow_pitch.pos" not in reachy2.joints_dict
assert "r_wrist_roll.pos" not in reachy2.joints_dict
assert "r_wrist_pitch.pos" not in reachy2.joints_dict
assert "r_wrist_yaw.pos" not in reachy2.joints_dict
assert "r_gripper.pos" not in reachy2.joints_dict
if reachy2.config.with_l_arm:
assert "l_shoulder_pitch.pos" in reachy2.joints_dict
assert "l_shoulder_roll.pos" in reachy2.joints_dict
assert "l_elbow_yaw.pos" in reachy2.joints_dict
assert "l_elbow_pitch.pos" in reachy2.joints_dict
assert "l_wrist_roll.pos" in reachy2.joints_dict
assert "l_wrist_pitch.pos" in reachy2.joints_dict
assert "l_wrist_yaw.pos" in reachy2.joints_dict
assert "l_gripper.pos" in reachy2.joints_dict
else:
assert "l_shoulder_pitch.pos" not in reachy2.joints_dict
assert "l_shoulder_roll.pos" not in reachy2.joints_dict
assert "l_elbow_yaw.pos" not in reachy2.joints_dict
assert "l_elbow_pitch.pos" not in reachy2.joints_dict
assert "l_wrist_roll.pos" not in reachy2.joints_dict
assert "l_wrist_pitch.pos" not in reachy2.joints_dict
assert "l_wrist_yaw.pos" not in reachy2.joints_dict
assert "l_gripper.pos" not in reachy2.joints_dict
def test_get_observation(reachy2):
reachy2.connect()
obs = reachy2.get_observation()
expected_keys = set(reachy2.joints_dict)
expected_keys.update(f"{v}" for v in REACHY2_VEL if reachy2.config.with_mobile_base)
expected_keys.update(reachy2.cameras.keys())
assert set(obs.keys()) == expected_keys
for motor in reachy2.joints_dict:
assert obs[motor] == reachy2.reachy.joints[REACHY2_JOINTS[motor]].present_position
if reachy2.config.with_mobile_base:
for vel in REACHY2_VEL:
assert obs[vel] == reachy2.reachy.mobile_base.odometry[REACHY2_VEL[vel]]
if reachy2.config.with_left_teleop_camera:
assert obs["teleop_left"].shape == (
reachy2.config.cameras["teleop_left"].height,
reachy2.config.cameras["teleop_left"].width,
3,
)
if reachy2.config.with_right_teleop_camera:
assert obs["teleop_right"].shape == (
reachy2.config.cameras["teleop_right"].height,
reachy2.config.cameras["teleop_right"].width,
3,
)
if reachy2.config.with_torso_camera:
assert obs["torso_rgb"].shape == (
reachy2.config.cameras["torso_rgb"].height,
reachy2.config.cameras["torso_rgb"].width,
3,
)
def test_send_action(reachy2):
reachy2.connect()
action = {k: i * 10.0 for i, k in enumerate(reachy2.joints_dict.keys(), start=1)}
if reachy2.config.with_mobile_base:
action.update({k: i * 0.1 for i, k in enumerate(REACHY2_VEL.keys(), start=1)})
previous_present_position = {
k: reachy2.reachy.joints[REACHY2_JOINTS[k]].present_position for k in reachy2.joints_dict
}
returned = reachy2.send_action(action)
if reachy2.config.max_relative_target is None:
assert returned == action
assert reachy2.reachy._goal_position_set_total == len(reachy2.joints_dict)
for motor in reachy2.joints_dict:
expected_pos = action[motor]
real_pos = reachy2.reachy.joints[REACHY2_JOINTS[motor]].goal_position
if reachy2.config.max_relative_target is None:
assert real_pos == expected_pos
else:
assert real_pos == previous_present_position[motor] + np.sign(expected_pos) * min(
abs(expected_pos - real_pos), reachy2.config.max_relative_target
)
if reachy2.config.with_mobile_base:
goal_speed = [i * 0.1 for i, _ in enumerate(REACHY2_VEL.keys(), start=1)]
reachy2.reachy.mobile_base.set_goal_speed.assert_called_once_with(*goal_speed)
if reachy2.config.use_external_commands:
reachy2.reachy.send_goal_positions.assert_not_called()
if reachy2.config.with_mobile_base:
reachy2.reachy.mobile_base.send_speed_command.assert_not_called()
else:
reachy2.reachy.send_goal_positions.assert_called_once()
if reachy2.config.with_mobile_base:
reachy2.reachy.mobile_base.send_speed_command.assert_called_once()
def test_no_part_declared():
with pytest.raises(ValueError):
_ = Reachy2RobotConfig(
ip_address="192.168.0.200",
with_mobile_base=False,
with_l_arm=False,
with_r_arm=False,
with_neck=False,
with_antennas=False,
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/robots/test_reachy2.py",
"license": "Apache License 2.0",
"lines": 276,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/teleoperators/test_reachy2_teleoperator.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock, patch
import pytest
from lerobot.teleoperators.reachy2_teleoperator import (
REACHY2_ANTENNAS_JOINTS,
REACHY2_L_ARM_JOINTS,
REACHY2_NECK_JOINTS,
REACHY2_R_ARM_JOINTS,
REACHY2_VEL,
Reachy2Teleoperator,
Reachy2TeleoperatorConfig,
)
# {lerobot_keys: reachy2_sdk_keys}
REACHY2_JOINTS = {
**REACHY2_NECK_JOINTS,
**REACHY2_ANTENNAS_JOINTS,
**REACHY2_R_ARM_JOINTS,
**REACHY2_L_ARM_JOINTS,
}
PARAMS = [
{}, # default config
{"with_mobile_base": False},
{"with_mobile_base": False, "with_l_arm": False, "with_antennas": False},
{"with_r_arm": False, "with_neck": False, "with_antennas": False},
{"with_mobile_base": False, "with_neck": False},
{"use_present_position": True},
]
def _make_reachy2_sdk_mock():
r = MagicMock(name="ReachySDKMock")
r.is_connected.return_value = True
def _connect():
r.is_connected.return_value = True
def _disconnect():
r.is_connected.return_value = False
# Mock joints with some dummy positions
joints = {
k: MagicMock(
present_position=float(i),
goal_position=float(i) + 0.5,
)
for i, k in enumerate(REACHY2_JOINTS.values())
}
r.joints = joints
# Mock mobile base with some dummy odometry
r.mobile_base = MagicMock()
r.mobile_base.last_cmd_vel = {
"vx": -0.2,
"vy": 0.2,
"vtheta": 11.0,
}
r.mobile_base.odometry = {
"x": 1.0,
"y": 2.0,
"theta": 20.0,
"vx": 0.1,
"vy": -0.1,
"vtheta": 8.0,
}
r.connect = MagicMock(side_effect=_connect)
r.disconnect = MagicMock(side_effect=_disconnect)
return r
@pytest.fixture(params=PARAMS, ids=lambda p: "default" if not p else ",".join(p.keys()))
def reachy2(request):
with (
patch(
"lerobot.teleoperators.reachy2_teleoperator.reachy2_teleoperator.ReachySDK",
side_effect=lambda *a, **k: _make_reachy2_sdk_mock(),
),
):
overrides = request.param
cfg = Reachy2TeleoperatorConfig(ip_address="192.168.0.200", **overrides)
robot = Reachy2Teleoperator(cfg)
yield robot
if robot.is_connected:
robot.disconnect()
def test_connect_disconnect(reachy2):
assert not reachy2.is_connected
reachy2.connect()
assert reachy2.is_connected
reachy2.disconnect()
assert not reachy2.is_connected
reachy2.reachy.disconnect.assert_called_once()
def test_get_action(reachy2):
reachy2.connect()
action = reachy2.get_action()
expected_keys = set(reachy2.joints_dict)
expected_keys.update(f"{v}" for v in REACHY2_VEL if reachy2.config.with_mobile_base)
assert set(action.keys()) == expected_keys
for motor in reachy2.joints_dict:
if reachy2.config.use_present_position:
assert action[motor] == reachy2.reachy.joints[REACHY2_JOINTS[motor]].present_position
else:
assert action[motor] == reachy2.reachy.joints[REACHY2_JOINTS[motor]].goal_position
if reachy2.config.with_mobile_base:
if reachy2.config.use_present_position:
for vel in REACHY2_VEL:
assert action[vel] == reachy2.reachy.mobile_base.odometry[REACHY2_VEL[vel]]
else:
for vel in REACHY2_VEL:
assert action[vel] == reachy2.reachy.mobile_base.last_cmd_vel[REACHY2_VEL[vel]]
def test_no_part_declared():
with pytest.raises(ValueError):
_ = Reachy2TeleoperatorConfig(
ip_address="192.168.0.200",
with_mobile_base=False,
with_l_arm=False,
with_r_arm=False,
with_neck=False,
with_antennas=False,
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/teleoperators/test_reachy2_teleoperator.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:src/lerobot/processor/device_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script defines a processor step for moving environment transition data to a specific torch device and casting
its floating-point precision.
"""
from dataclasses import dataclass
from typing import Any
import torch
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from lerobot.utils.utils import get_safe_torch_device
from .core import EnvTransition, PolicyAction, TransitionKey
from .pipeline import ProcessorStep, ProcessorStepRegistry
@ProcessorStepRegistry.register("device_processor")
@dataclass
class DeviceProcessorStep(ProcessorStep):
"""
Processor step to move all tensors within an `EnvTransition` to a specified device and optionally cast their
floating-point data type.
This is crucial for preparing data for model training or inference on hardware like GPUs.
Attributes:
device: The target device for tensors (e.g., "cpu", "cuda", "cuda:0").
float_dtype: The target floating-point dtype as a string (e.g., "float32", "float16", "bfloat16").
If None, the dtype is not changed.
"""
device: str = "cpu"
float_dtype: str | None = None
DTYPE_MAPPING = {
"float16": torch.float16,
"float32": torch.float32,
"float64": torch.float64,
"bfloat16": torch.bfloat16,
"half": torch.float16,
"float": torch.float32,
"double": torch.float64,
}
def __post_init__(self):
"""
Initializes the processor by converting string configurations to torch objects.
This method sets up the `torch.device`, determines if transfers can be non-blocking, and validates the
`float_dtype` string, converting it to a `torch.dtype` object.
"""
self.tensor_device: torch.device = get_safe_torch_device(self.device)
# Update device string in case a specific GPU was selected (e.g., "cuda" -> "cuda:0")
self.device = self.tensor_device.type
self.non_blocking = "cuda" in str(self.device)
# Validate and convert float_dtype string to torch dtype
if self.float_dtype is not None:
if self.float_dtype not in self.DTYPE_MAPPING:
raise ValueError(
f"Invalid float_dtype '{self.float_dtype}'. Available options: {list(self.DTYPE_MAPPING.keys())}"
)
self._target_float_dtype = self.DTYPE_MAPPING[self.float_dtype]
else:
self._target_float_dtype = None
def _process_tensor(self, tensor: torch.Tensor) -> torch.Tensor:
"""
Moves a single tensor to the target device and casts its dtype.
Handles multi-GPU scenarios by not moving a tensor if it's already on a different CUDA device than
the target, which is useful when using frameworks like Accelerate.
Args:
tensor: The input torch.Tensor.
Returns:
The processed tensor on the correct device and with the correct dtype.
"""
# Determine target device
if tensor.is_cuda and self.tensor_device.type == "cuda":
# Both tensor and target are on GPU - preserve tensor's GPU placement.
# This handles multi-GPU scenarios where Accelerate has already placed
# tensors on the correct GPU for each process.
target_device = tensor.device
else:
# Either tensor is on CPU, or we're configured for CPU.
# In both cases, use the configured device.
target_device = self.tensor_device
# MPS workaround: Convert float64 to float32 since MPS doesn't support float64
if target_device.type == "mps" and tensor.dtype == torch.float64:
tensor = tensor.to(dtype=torch.float32)
# Only move if necessary
if tensor.device != target_device:
tensor = tensor.to(target_device, non_blocking=self.non_blocking)
# Convert float dtype if specified and tensor is floating point
if self._target_float_dtype is not None and tensor.is_floating_point():
tensor = tensor.to(dtype=self._target_float_dtype)
return tensor
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""
Applies device and dtype conversion to all tensors in an environment transition.
It iterates through the transition, finds all `torch.Tensor` objects (including those nested in
dictionaries like `observation`), and processes them.
Args:
transition: The input `EnvTransition` object.
Returns:
A new `EnvTransition` object with all tensors moved to the target device and dtype.
"""
new_transition = transition.copy()
action = new_transition.get(TransitionKey.ACTION)
if action is not None and not isinstance(action, PolicyAction):
raise ValueError(f"If action is not None should be a PolicyAction type got {type(action)}")
simple_tensor_keys = [
TransitionKey.ACTION,
TransitionKey.REWARD,
TransitionKey.DONE,
TransitionKey.TRUNCATED,
]
dict_tensor_keys = [
TransitionKey.OBSERVATION,
TransitionKey.COMPLEMENTARY_DATA,
]
# Process simple, top-level tensors
for key in simple_tensor_keys:
value = transition.get(key)
if isinstance(value, torch.Tensor):
new_transition[key] = self._process_tensor(value)
# Process tensors nested within dictionaries
for key in dict_tensor_keys:
data_dict = transition.get(key)
if data_dict is not None:
new_data_dict = {
k: self._process_tensor(v) if isinstance(v, torch.Tensor) else v
for k, v in data_dict.items()
}
new_transition[key] = new_data_dict
return new_transition
def get_config(self) -> dict[str, Any]:
"""
Returns the serializable configuration of the processor.
Returns:
A dictionary containing the device and float_dtype settings.
"""
return {"device": self.device, "float_dtype": self.float_dtype}
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
Returns the input features unchanged.
Device and dtype transformations do not alter the fundamental definition of the features (e.g., shape).
Args:
features: A dictionary of policy features.
Returns:
The original dictionary of policy features.
"""
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/device_processor.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/normalize_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any
import torch
from torch import Tensor
from lerobot.configs.types import FeatureType, NormalizationMode, PipelineFeatureType, PolicyFeature
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.utils.constants import ACTION
from .converters import from_tensor_to_numpy, to_tensor
from .core import EnvTransition, PolicyAction, TransitionKey
from .pipeline import PolicyProcessorPipeline, ProcessorStep, ProcessorStepRegistry, RobotObservation
@dataclass
class _NormalizationMixin:
"""
A mixin class providing core functionality for normalization and unnormalization.
This class manages normalization statistics (`stats`), converts them to tensors for
efficient computation, handles device placement, and implements the logic for
applying normalization transformations (mean/std and min/max). It is designed to
be inherited by concrete `ProcessorStep` implementations and should not be used
directly.
**Stats Override Preservation:**
When stats are explicitly provided during construction (e.g., via overrides in
`DataProcessorPipeline.from_pretrained()`), they are preserved even when
`load_state_dict()` is called. This allows users to override normalization
statistics from saved models while keeping the rest of the model state intact.
Examples:
```python
# Common use case: Override with dataset stats
from lerobot.datasets import LeRobotDataset
dataset = LeRobotDataset("my_dataset")
pipeline = DataProcessorPipeline.from_pretrained(
"model_path", overrides={"normalizer_processor": {"stats": dataset.meta.stats}}
)
# dataset.meta.stats will be used, not the stats from the saved model
# Custom stats override
custom_stats = {"action": {"mean": [0.0], "std": [1.0]}}
pipeline = DataProcessorPipeline.from_pretrained(
"model_path", overrides={"normalizer_processor": {"stats": custom_stats}}
)
```
Attributes:
features: A dictionary mapping feature names to `PolicyFeature` objects, defining
the data structure to be processed.
norm_map: A dictionary mapping `FeatureType` to `NormalizationMode`, specifying
which normalization method to use for each type of feature.
stats: A dictionary containing the normalization statistics (e.g., mean, std,
min, max) for each feature.
device: The PyTorch device on which to store and perform tensor operations.
eps: A small epsilon value to prevent division by zero in normalization
calculations.
normalize_observation_keys: An optional set of keys to selectively apply
normalization to specific observation features.
_tensor_stats: An internal dictionary holding the normalization statistics as
PyTorch tensors.
_stats_explicitly_provided: Internal flag tracking whether stats were explicitly
provided during construction (used for override preservation).
"""
features: dict[str, PolicyFeature]
norm_map: dict[FeatureType, NormalizationMode]
stats: dict[str, dict[str, Any]] | None = None
device: torch.device | str | None = None
dtype: torch.dtype | None = None
eps: float = 1e-8
normalize_observation_keys: set[str] | None = None
_tensor_stats: dict[str, dict[str, Tensor]] = field(default_factory=dict, init=False, repr=False)
_stats_explicitly_provided: bool = field(default=False, init=False, repr=False)
def __post_init__(self):
"""
Initializes the mixin after dataclass construction.
This method handles the robust deserialization of `features` and `norm_map`
from JSON-compatible formats (where enums become strings and tuples become
lists) and converts the provided `stats` dictionary into a dictionary of
tensors (`_tensor_stats`) on the specified device.
"""
# Track if stats were explicitly provided (not None and not empty)
self._stats_explicitly_provided = self.stats is not None and bool(self.stats)
# Robust JSON deserialization handling (guard empty maps).
if self.features:
first_val = next(iter(self.features.values()))
if isinstance(first_val, dict):
reconstructed = {}
for key, ft_dict in self.features.items():
reconstructed[key] = PolicyFeature(
type=FeatureType(ft_dict["type"]), shape=tuple(ft_dict["shape"])
)
self.features = reconstructed
# if keys are strings (JSON), rebuild enum map
if self.norm_map and all(isinstance(k, str) for k in self.norm_map):
reconstructed = {}
for ft_type_str, norm_mode_str in self.norm_map.items():
reconstructed[FeatureType(ft_type_str)] = NormalizationMode(norm_mode_str)
self.norm_map = reconstructed
# Convert stats to tensors and move to the target device once during initialization.
self.stats = self.stats or {}
if self.dtype is None:
self.dtype = torch.float32
self._tensor_stats = to_tensor(self.stats, device=self.device, dtype=self.dtype)
def to(
self, device: torch.device | str | None = None, dtype: torch.dtype | None = None
) -> _NormalizationMixin:
"""
Moves the processor's normalization stats to the specified device.
Args:
device: The target PyTorch device.
Returns:
The instance of the class, allowing for method chaining.
"""
if device is not None:
self.device = device
if dtype is not None:
self.dtype = dtype
self._tensor_stats = to_tensor(self.stats, device=self.device, dtype=self.dtype)
return self
def state_dict(self) -> dict[str, Tensor]:
"""
Returns the normalization statistics as a flat state dictionary.
All tensors are moved to the CPU before being returned, which is standard practice
for saving state dictionaries.
Returns:
A flat dictionary mapping from `'feature_name.stat_name'` to the
corresponding statistics tensor on the CPU.
"""
flat: dict[str, Tensor] = {}
for key, sub in self._tensor_stats.items():
for stat_name, tensor in sub.items():
flat[f"{key}.{stat_name}"] = tensor.cpu() # Always save to CPU
return flat
def load_state_dict(self, state: dict[str, Tensor]) -> None:
"""
Loads normalization statistics from a state dictionary.
The loaded tensors are moved to the processor's configured device.
**Stats Override Preservation:**
If stats were explicitly provided during construction (e.g., via overrides in
`DataProcessorPipeline.from_pretrained()`), they are preserved and the state
dictionary is ignored. This allows users to override normalization statistics
while still loading the rest of the model state.
This behavior is crucial for scenarios where users want to adapt a pretrained
model to a new dataset with different statistics without retraining the entire
model.
Args:
state: A flat state dictionary with keys in the format
`'feature_name.stat_name'`.
Note:
When stats are preserved due to explicit provision, only the tensor
representation is updated to ensure consistency with the current device
and dtype settings.
"""
# If stats were explicitly provided during construction, preserve them
if self._stats_explicitly_provided and self.stats is not None:
# Don't load from state_dict, keep the explicitly provided stats
# But ensure _tensor_stats is properly initialized
self._tensor_stats = to_tensor(self.stats, device=self.device, dtype=self.dtype) # type: ignore[assignment]
return
# Normal behavior: load stats from state_dict
self._tensor_stats.clear()
for flat_key, tensor in state.items():
key, stat_name = flat_key.rsplit(".", 1)
# Load to the processor's configured device.
self._tensor_stats.setdefault(key, {})[stat_name] = tensor.to(
dtype=torch.float32, device=self.device
)
# Reconstruct the original stats dict from tensor stats for compatibility with to() method
# and other functions that rely on self.stats
self.stats = {}
for key, tensor_dict in self._tensor_stats.items():
self.stats[key] = {}
for stat_name, tensor in tensor_dict.items():
# Convert tensor back to python/numpy format
self.stats[key][stat_name] = from_tensor_to_numpy(tensor)
def get_config(self) -> dict[str, Any]:
"""
Returns a serializable dictionary of the processor's configuration.
This method is used when saving the processor to disk, ensuring that its
configuration can be reconstructed later.
Returns:
A JSON-serializable dictionary containing the configuration.
"""
config = {
"eps": self.eps,
"features": {
key: {"type": ft.type.value, "shape": ft.shape} for key, ft in self.features.items()
},
"norm_map": {ft_type.value: norm_mode.value for ft_type, norm_mode in self.norm_map.items()},
}
if self.normalize_observation_keys is not None:
config["normalize_observation_keys"] = sorted(self.normalize_observation_keys)
return config
def _normalize_observation(self, observation: RobotObservation, inverse: bool) -> dict[str, Tensor]:
"""
Applies (un)normalization to all relevant features in an observation dictionary.
Args:
observation: The observation dictionary to process.
inverse: If `True`, applies unnormalization; otherwise, applies normalization.
Returns:
A new observation dictionary with the transformed tensor values.
"""
new_observation = dict(observation)
for key, feature in self.features.items():
if self.normalize_observation_keys is not None and key not in self.normalize_observation_keys:
continue
if feature.type != FeatureType.ACTION and key in new_observation:
# Convert to tensor but preserve original dtype for adaptation logic
tensor = torch.as_tensor(new_observation[key])
new_observation[key] = self._apply_transform(tensor, key, feature.type, inverse=inverse)
return new_observation
def _normalize_action(self, action: Tensor, inverse: bool) -> Tensor:
# Convert to tensor but preserve original dtype for adaptation logic
"""
Applies (un)normalization to an action tensor.
Args:
action: The action tensor to process.
inverse: If `True`, applies unnormalization; otherwise, applies normalization.
Returns:
The transformed action tensor.
"""
processed_action = self._apply_transform(action, ACTION, FeatureType.ACTION, inverse=inverse)
return processed_action
def _apply_transform(
self, tensor: Tensor, key: str, feature_type: FeatureType, *, inverse: bool = False
) -> Tensor:
"""
Core logic to apply a normalization or unnormalization transformation to a tensor.
This method selects the appropriate normalization mode based on the feature type
and applies the corresponding mathematical operation.
Normalization Modes:
- MEAN_STD: Centers data around zero with unit variance.
- MIN_MAX: Scales data to [-1, 1] range using actual min/max values.
- QUANTILES: Scales data to [-1, 1] range using 1st and 99th percentiles (q01/q99).
- QUANTILE10: Scales data to [-1, 1] range using 10th and 90th percentiles (q10/q90).
Args:
tensor: The input tensor to transform.
key: The feature key corresponding to the tensor.
feature_type: The `FeatureType` of the tensor.
inverse: If `True`, applies the inverse transformation (unnormalization).
Returns:
The transformed tensor.
Raises:
ValueError: If an unsupported normalization mode is encountered.
"""
norm_mode = self.norm_map.get(feature_type, NormalizationMode.IDENTITY)
if norm_mode == NormalizationMode.IDENTITY or key not in self._tensor_stats:
return tensor
if norm_mode not in (
NormalizationMode.MEAN_STD,
NormalizationMode.MIN_MAX,
NormalizationMode.QUANTILES,
NormalizationMode.QUANTILE10,
):
raise ValueError(f"Unsupported normalization mode: {norm_mode}")
# For Accelerate compatibility: Ensure stats are on the same device and dtype as the input tensor
if self._tensor_stats and key in self._tensor_stats:
first_stat = next(iter(self._tensor_stats[key].values()))
if first_stat.device != tensor.device or first_stat.dtype != tensor.dtype:
self.to(device=tensor.device, dtype=tensor.dtype)
stats = self._tensor_stats[key]
if norm_mode == NormalizationMode.MEAN_STD:
mean = stats.get("mean", None)
std = stats.get("std", None)
if mean is None or std is None:
raise ValueError(
"MEAN_STD normalization mode requires mean and std stats, please update the dataset with the correct stats"
)
mean, std = stats["mean"], stats["std"]
# Avoid division by zero by adding a small epsilon.
denom = std + self.eps
if inverse:
return tensor * std + mean
return (tensor - mean) / denom
if norm_mode == NormalizationMode.MIN_MAX:
min_val = stats.get("min", None)
max_val = stats.get("max", None)
if min_val is None or max_val is None:
raise ValueError(
"MIN_MAX normalization mode requires min and max stats, please update the dataset with the correct stats"
)
min_val, max_val = stats["min"], stats["max"]
denom = max_val - min_val
# When min_val == max_val, substitute the denominator with a small epsilon
# to prevent division by zero. This consistently maps an input equal to
# min_val to -1, ensuring a stable transformation.
denom = torch.where(
denom == 0, torch.tensor(self.eps, device=tensor.device, dtype=tensor.dtype), denom
)
if inverse:
# Map from [-1, 1] back to [min, max]
return (tensor + 1) / 2 * denom + min_val
# Map from [min, max] to [-1, 1]
return 2 * (tensor - min_val) / denom - 1
if norm_mode == NormalizationMode.QUANTILES:
q01 = stats.get("q01", None)
q99 = stats.get("q99", None)
if q01 is None or q99 is None:
raise ValueError(
"QUANTILES normalization mode requires q01 and q99 stats, please update the dataset with the correct stats using the `augment_dataset_quantile_stats.py` script"
)
denom = q99 - q01
# Avoid division by zero by adding epsilon when quantiles are identical
denom = torch.where(
denom == 0, torch.tensor(self.eps, device=tensor.device, dtype=tensor.dtype), denom
)
if inverse:
return (tensor + 1.0) * denom / 2.0 + q01
return 2.0 * (tensor - q01) / denom - 1.0
if norm_mode == NormalizationMode.QUANTILE10:
q10 = stats.get("q10", None)
q90 = stats.get("q90", None)
if q10 is None or q90 is None:
raise ValueError(
"QUANTILE10 normalization mode requires q10 and q90 stats, please update the dataset with the correct stats using the `augment_dataset_quantile_stats.py` script"
)
denom = q90 - q10
# Avoid division by zero by adding epsilon when quantiles are identical
denom = torch.where(
denom == 0, torch.tensor(self.eps, device=tensor.device, dtype=tensor.dtype), denom
)
if inverse:
return (tensor + 1.0) * denom / 2.0 + q10
return 2.0 * (tensor - q10) / denom - 1.0
# If necessary stats are missing, return input unchanged.
return tensor
@dataclass
@ProcessorStepRegistry.register(name="normalizer_processor")
class NormalizerProcessorStep(_NormalizationMixin, ProcessorStep):
"""
A processor step that applies normalization to observations and actions in a transition.
This class uses the logic from `_NormalizationMixin` to perform forward normalization
(e.g., scaling data to have zero mean and unit variance, or to the range [-1, 1]).
It is typically used in the pre-processing pipeline before feeding data to a policy.
"""
@classmethod
def from_lerobot_dataset(
cls,
dataset: LeRobotDataset,
features: dict[str, PolicyFeature],
norm_map: dict[FeatureType, NormalizationMode],
*,
normalize_observation_keys: set[str] | None = None,
eps: float = 1e-8,
device: torch.device | str | None = None,
) -> NormalizerProcessorStep:
"""
Creates a `NormalizerProcessorStep` instance using statistics from a `LeRobotDataset`.
Args:
dataset: The dataset from which to extract normalization statistics.
features: The feature definition for the processor.
norm_map: The mapping from feature types to normalization modes.
normalize_observation_keys: An optional set of observation keys to normalize.
eps: A small epsilon value for numerical stability.
device: The target device for the processor.
Returns:
A new instance of `NormalizerProcessorStep`.
"""
return cls(
features=features,
norm_map=norm_map,
stats=dataset.meta.stats,
normalize_observation_keys=normalize_observation_keys,
eps=eps,
device=device,
)
def __call__(self, transition: EnvTransition) -> EnvTransition:
new_transition = transition.copy()
# Handle observation normalization.
observation = new_transition.get(TransitionKey.OBSERVATION)
if observation is not None:
new_transition[TransitionKey.OBSERVATION] = self._normalize_observation(
observation, inverse=False
)
# Handle action normalization.
action = new_transition.get(TransitionKey.ACTION)
if action is None:
return new_transition
if not isinstance(action, PolicyAction):
raise ValueError(f"Action should be a PolicyAction type got {type(action)}")
new_transition[TransitionKey.ACTION] = self._normalize_action(action, inverse=False)
return new_transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
@dataclass
@ProcessorStepRegistry.register(name="unnormalizer_processor")
class UnnormalizerProcessorStep(_NormalizationMixin, ProcessorStep):
"""
A processor step that applies unnormalization to observations and actions.
This class inverts the normalization process, scaling data back to its original
range. It is typically used in the post-processing pipeline to convert a policy's
normalized action output into a format that can be executed by a robot or
environment.
"""
@classmethod
def from_lerobot_dataset(
cls,
dataset: LeRobotDataset,
features: dict[str, PolicyFeature],
norm_map: dict[FeatureType, NormalizationMode],
*,
device: torch.device | str | None = None,
) -> UnnormalizerProcessorStep:
"""
Creates an `UnnormalizerProcessorStep` using statistics from a `LeRobotDataset`.
Args:
dataset: The dataset from which to extract normalization statistics.
features: The feature definition for the processor.
norm_map: The mapping from feature types to normalization modes.
device: The target device for the processor.
Returns:
A new instance of `UnnormalizerProcessorStep`.
"""
return cls(features=features, norm_map=norm_map, stats=dataset.meta.stats, device=device)
def __call__(self, transition: EnvTransition) -> EnvTransition:
new_transition = transition.copy()
# Handle observation unnormalization.
observation = new_transition.get(TransitionKey.OBSERVATION)
if observation is not None:
new_transition[TransitionKey.OBSERVATION] = self._normalize_observation(observation, inverse=True)
# Handle action unnormalization.
action = new_transition.get(TransitionKey.ACTION)
if action is None:
return new_transition
if not isinstance(action, PolicyAction):
raise ValueError(f"Action should be a PolicyAction type got {type(action)}")
new_transition[TransitionKey.ACTION] = self._normalize_action(action, inverse=True)
return new_transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
def hotswap_stats(
policy_processor: PolicyProcessorPipeline, stats: dict[str, dict[str, Any]]
) -> PolicyProcessorPipeline:
"""
Replaces normalization statistics in an existing `PolicyProcessorPipeline` instance.
This function creates a deep copy of the provided pipeline and updates the
statistics of any `NormalizerProcessorStep` or `UnnormalizerProcessorStep` it
contains. This is useful for adapting a trained policy to a new environment or
dataset with different data distributions without having to reconstruct the entire
pipeline.
Args:
policy_processor: The policy processor pipeline to modify.
stats: The new dictionary of normalization statistics to apply.
Returns:
A new `PolicyProcessorPipeline` instance with the updated statistics.
"""
rp = deepcopy(policy_processor)
for step in rp.steps:
if isinstance(step, _NormalizationMixin):
step.stats = stats
# Re-initialize tensor_stats on the correct device.
step._tensor_stats = to_tensor(stats, device=step.device, dtype=step.dtype) # type: ignore[assignment]
return rp
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/normalize_processor.py",
"license": "Apache License 2.0",
"lines": 467,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/observation_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import einops
import numpy as np
import torch
from torch import Tensor
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from lerobot.utils.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE, OBS_STR
from .pipeline import ObservationProcessorStep, ProcessorStepRegistry
@dataclass
@ProcessorStepRegistry.register(name="observation_processor")
class VanillaObservationProcessorStep(ObservationProcessorStep):
"""
Processes standard Gymnasium observations into the LeRobot format.
This step handles both image and state data from a typical observation dictionary,
preparing it for use in a LeRobot policy.
**Image Processing:**
- Converts channel-last (H, W, C), `uint8` images to channel-first (C, H, W),
`float32` tensors.
- Normalizes pixel values from the [0, 255] range to [0, 1].
- Adds a batch dimension if one is not already present.
- Recognizes a single image under the key `"pixels"` and maps it to
`"observation.image"`.
- Recognizes a dictionary of images under the key `"pixels"` and maps them
to `"observation.images.{camera_name}"`.
**State Processing:**
- Maps the `"environment_state"` key to `"observation.environment_state"`.
- Maps the `"agent_pos"` key to `"observation.state"`.
- Converts NumPy arrays to PyTorch tensors.
- Adds a batch dimension if one is not already present.
"""
def _process_single_image(self, img: np.ndarray) -> Tensor:
"""
Processes a single NumPy image array into a channel-first, normalized tensor.
Args:
img: A NumPy array representing the image, expected to be in channel-last
(H, W, C) format with a `uint8` dtype.
Returns:
A `float32` PyTorch tensor in channel-first (B, C, H, W) format, with
pixel values normalized to the [0, 1] range.
Raises:
ValueError: If the input image does not appear to be in channel-last
format or is not of `uint8` dtype.
"""
# Convert to tensor
img_tensor = torch.from_numpy(img)
# Add batch dimension if needed
if img_tensor.ndim == 3:
img_tensor = img_tensor.unsqueeze(0)
# Validate image format
_, h, w, c = img_tensor.shape
if not (c < h and c < w):
raise ValueError(f"Expected channel-last images, but got shape {img_tensor.shape}")
if img_tensor.dtype != torch.uint8:
raise ValueError(f"Expected torch.uint8 images, but got {img_tensor.dtype}")
# Convert to channel-first format
img_tensor = einops.rearrange(img_tensor, "b h w c -> b c h w").contiguous()
# Convert to float32 and normalize to [0, 1]
img_tensor = img_tensor.type(torch.float32) / 255.0
return img_tensor
def _process_observation(self, observation):
"""
Processes both image and state observations.
"""
processed_obs = observation.copy()
if "pixels" in processed_obs:
pixels = processed_obs.pop("pixels")
if isinstance(pixels, dict):
imgs = {f"{OBS_IMAGES}.{key}": img for key, img in pixels.items()}
else:
imgs = {OBS_IMAGE: pixels}
for imgkey, img in imgs.items():
processed_obs[imgkey] = self._process_single_image(img)
if "environment_state" in processed_obs:
env_state_np = processed_obs.pop("environment_state")
env_state = torch.from_numpy(env_state_np).float()
if env_state.dim() == 1:
env_state = env_state.unsqueeze(0)
processed_obs[OBS_ENV_STATE] = env_state
if "agent_pos" in processed_obs:
agent_pos_np = processed_obs.pop("agent_pos")
agent_pos = torch.from_numpy(agent_pos_np).float()
if agent_pos.dim() == 1:
agent_pos = agent_pos.unsqueeze(0)
processed_obs[OBS_STATE] = agent_pos
return processed_obs
def observation(self, observation):
return self._process_observation(observation)
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
Transforms feature keys from the Gym standard to the LeRobot standard.
This method standardizes the feature dictionary by renaming keys according
to LeRobot's conventions, ensuring that policies can be constructed correctly.
It handles various raw key formats, including those with an "observation." prefix.
**Renaming Rules:**
- `pixels` or `observation.pixels` -> `observation.image`
- `pixels.{cam}` or `observation.pixels.{cam}` -> `observation.images.{cam}`
- `environment_state` or `observation.environment_state` -> `observation.environment_state`
- `agent_pos` or `observation.agent_pos` -> `observation.state`
Args:
features: The policy features dictionary with Gym-style keys.
Returns:
The policy features dictionary with standardized LeRobot keys.
"""
# Build a new features mapping keyed by the same FeatureType buckets
# We assume callers already placed features in the correct FeatureType.
new_features: dict[PipelineFeatureType, dict[str, PolicyFeature]] = {ft: {} for ft in features}
exact_pairs = {
"pixels": OBS_IMAGE,
"environment_state": OBS_ENV_STATE,
"agent_pos": OBS_STATE,
}
prefix_pairs = {
"pixels.": f"{OBS_IMAGES}.",
}
# Iterate over all incoming feature buckets and normalize/move each entry
for src_ft, bucket in features.items():
for key, feat in list(bucket.items()):
handled = False
# Prefix-based rules (e.g. pixels.cam1 -> OBS_IMAGES.cam1)
for old_prefix, new_prefix in prefix_pairs.items():
prefixed_old = f"{OBS_STR}.{old_prefix}"
if key.startswith(prefixed_old):
suffix = key[len(prefixed_old) :]
new_key = f"{new_prefix}{suffix}"
new_features[src_ft][new_key] = feat
handled = True
break
if key.startswith(old_prefix):
suffix = key[len(old_prefix) :]
new_key = f"{new_prefix}{suffix}"
new_features[src_ft][new_key] = feat
handled = True
break
if handled:
continue
# Exact-name rules (pixels, environment_state, agent_pos)
for old, new in exact_pairs.items():
if key == old or key == f"{OBS_STR}.{old}":
new_key = new
new_features[src_ft][new_key] = feat
handled = True
break
if handled:
continue
# Default: keep key in the same source FeatureType bucket
new_features[src_ft][key] = feat
return new_features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/observation_processor.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/pipeline.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines a generic, sequential data processing pipeline framework, primarily designed for
transforming robotics data (observations, actions, rewards, etc.).
The core components are:
- ProcessorStep: An abstract base class for a single data transformation operation.
- ProcessorStepRegistry: A mechanism to register and retrieve ProcessorStep classes by name.
- DataProcessorPipeline: A class that chains multiple ProcessorStep instances together to form a complete
data processing workflow. It integrates with the Hugging Face Hub for easy sharing and versioning of
pipelines, including their configuration and state.
- Specialized abstract ProcessorStep subclasses (e.g., ObservationProcessorStep, ActionProcessorStep)
to simplify the creation of steps that target specific parts of a data transition.
"""
from __future__ import annotations
import importlib
import json
import os
import re
from abc import ABC, abstractmethod
from collections.abc import Callable, Iterable, Sequence
from copy import deepcopy
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, TypedDict, TypeVar, cast
import torch
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file, save_file
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from lerobot.utils.hub import HubMixin
from .converters import batch_to_transition, create_transition, transition_to_batch
from .core import EnvAction, EnvTransition, PolicyAction, RobotAction, RobotObservation, TransitionKey
# Generic type variables for pipeline input and output.
TInput = TypeVar("TInput")
TOutput = TypeVar("TOutput")
class ProcessorStepRegistry:
"""A registry for ProcessorStep classes to allow instantiation from a string name.
This class provides a way to map string identifiers to `ProcessorStep` classes,
which is useful for deserializing pipelines from configuration files without
hardcoding class imports.
"""
_registry: dict[str, type] = {}
@classmethod
def register(cls, name: str | None = None):
"""A class decorator to register a ProcessorStep.
Args:
name: The name to register the class under. If None, the class's `__name__` is used.
Returns:
A decorator function that registers the class and returns it.
Raises:
ValueError: If a step with the same name is already registered.
"""
def decorator(step_class: type) -> type:
"""The actual decorator that performs the registration."""
registration_name = name if name is not None else step_class.__name__
if registration_name in cls._registry:
raise ValueError(
f"Processor step '{registration_name}' is already registered. "
f"Use a different name or unregister the existing one first."
)
cls._registry[registration_name] = step_class
# Store the registration name on the class for easy lookup during serialization.
step_class._registry_name = registration_name
return step_class
return decorator
@classmethod
def get(cls, name: str) -> type:
"""Retrieves a processor step class from the registry by its name.
Args:
name: The name of the step to retrieve.
Returns:
The processor step class corresponding to the given name.
Raises:
KeyError: If the name is not found in the registry.
"""
if name not in cls._registry:
available = list(cls._registry.keys())
raise KeyError(
f"Processor step '{name}' not found in registry. "
f"Available steps: {available}. "
f"Make sure the step is registered using @ProcessorStepRegistry.register()"
)
return cls._registry[name]
@classmethod
def unregister(cls, name: str) -> None:
"""Removes a processor step from the registry.
Args:
name: The name of the step to unregister.
"""
cls._registry.pop(name, None)
@classmethod
def list(cls) -> list[str]:
"""Returns a list of all registered processor step names."""
return list(cls._registry.keys())
@classmethod
def clear(cls) -> None:
"""Clears all processor steps from the registry."""
cls._registry.clear()
class ProcessorStep(ABC):
"""Abstract base class for a single step in a data processing pipeline.
Each step must implement the `__call__` method to perform its transformation
on a data transition and the `transform_features` method to describe how it
alters the shape or type of data features.
Subclasses can optionally be stateful by implementing `state_dict` and `load_state_dict`.
"""
_current_transition: EnvTransition | None = None
@property
def transition(self) -> EnvTransition:
"""Provides access to the most recent transition being processed.
This is useful for steps that need to access other parts of the transition
data beyond their primary target (e.g., an action processing step that
needs to look at the observation).
Raises:
ValueError: If accessed before the step has been called with a transition.
"""
if self._current_transition is None:
raise ValueError("Transition is not set. Make sure to call the step with a transition first.")
return self._current_transition
@abstractmethod
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Processes an environment transition.
This method should contain the core logic of the processing step.
Args:
transition: The input data transition to be processed.
Returns:
The processed transition.
"""
return transition
def get_config(self) -> dict[str, Any]:
"""Returns the configuration of the step for serialization.
Returns:
A JSON-serializable dictionary of configuration parameters.
"""
return {}
def state_dict(self) -> dict[str, torch.Tensor]:
"""Returns the state of the step (e.g., learned parameters, running means).
Returns:
A dictionary mapping state names to tensors.
"""
return {}
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
"""Loads the step's state from a state dictionary.
Args:
state: A dictionary of state tensors.
"""
return None
def reset(self) -> None:
"""Resets the internal state of the processor step, if any."""
return None
@abstractmethod
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""Defines how this step modifies the description of pipeline features.
This method is used to track changes in data shapes, dtypes, or modalities
as data flows through the pipeline, without needing to process actual data.
Args:
features: A dictionary describing the input features for observations, actions, etc.
Returns:
A dictionary describing the output features after this step's transformation.
"""
return features
class ProcessorKwargs(TypedDict, total=False):
"""A TypedDict for optional keyword arguments used in pipeline construction."""
to_transition: Callable[[dict[str, Any]], EnvTransition] | None
to_output: Callable[[EnvTransition], Any] | None
name: str | None
before_step_hooks: list[Callable[[int, EnvTransition], None]] | None
after_step_hooks: list[Callable[[int, EnvTransition], None]] | None
class ProcessorMigrationError(Exception):
"""Raised when a model needs migration to the processor format"""
def __init__(self, model_path: str | Path, migration_command: str, original_error: str):
self.model_path = model_path
self.migration_command = migration_command
self.original_error = original_error
super().__init__(
f"Model '{model_path}' requires migration to processor format. "
f"Run: {migration_command}\n\nOriginal error: {original_error}"
)
@dataclass
class DataProcessorPipeline[TInput, TOutput](HubMixin):
"""A sequential pipeline for processing data, integrated with the Hugging Face Hub.
This class chains together multiple `ProcessorStep` instances to form a complete
data processing workflow. It's generic, allowing for custom input and output types,
which are handled by the `to_transition` and `to_output` converters.
Attributes:
steps: A sequence of `ProcessorStep` objects that make up the pipeline.
name: A descriptive name for the pipeline.
to_transition: A function to convert raw input data into the standardized `EnvTransition` format.
to_output: A function to convert the final `EnvTransition` into the desired output format.
before_step_hooks: A list of functions to be called before each step is executed.
after_step_hooks: A list of functions to be called after each step is executed.
"""
steps: Sequence[ProcessorStep] = field(default_factory=list)
name: str = "DataProcessorPipeline"
to_transition: Callable[[TInput], EnvTransition] = field(
default_factory=lambda: cast(Callable[[TInput], EnvTransition], batch_to_transition), repr=False
)
to_output: Callable[[EnvTransition], TOutput] = field(
default_factory=lambda: cast(Callable[[EnvTransition], TOutput], transition_to_batch),
repr=False,
)
before_step_hooks: list[Callable[[int, EnvTransition], None]] = field(default_factory=list, repr=False)
after_step_hooks: list[Callable[[int, EnvTransition], None]] = field(default_factory=list, repr=False)
def __call__(self, data: TInput) -> TOutput:
"""Processes input data through the full pipeline.
Args:
data: The input data to process.
Returns:
The processed data in the specified output format.
"""
transition = self.to_transition(data)
transformed_transition = self._forward(transition)
return self.to_output(transformed_transition)
def _forward(self, transition: EnvTransition) -> EnvTransition:
"""Executes all processing steps and hooks in sequence.
Args:
transition: The initial `EnvTransition` object.
Returns:
The final `EnvTransition` after all steps have been applied.
"""
for idx, processor_step in enumerate(self.steps):
# Execute pre-hooks
for hook in self.before_step_hooks:
hook(idx, transition)
transition = processor_step(transition)
# Execute post-hooks
for hook in self.after_step_hooks:
hook(idx, transition)
return transition
def step_through(self, data: TInput) -> Iterable[EnvTransition]:
"""Processes data step-by-step, yielding the transition at each stage.
This is a generator method useful for debugging and inspecting the intermediate
state of the data as it passes through the pipeline.
Args:
data: The input data.
Yields:
The `EnvTransition` object, starting with the initial state and then after
each processing step.
"""
transition = self.to_transition(data)
# Yield the initial state before any processing.
yield transition
for processor_step in self.steps:
transition = processor_step(transition)
yield transition
def _save_pretrained(self, save_directory: Path, **kwargs):
"""Internal method to comply with `HubMixin`'s saving mechanism.
This method does the actual saving work and is called by HubMixin.save_pretrained.
"""
config_filename = kwargs.pop("config_filename", None)
# Sanitize the pipeline name to create a valid filename prefix.
sanitized_name = re.sub(r"[^a-zA-Z0-9_]", "_", self.name.lower())
if config_filename is None:
config_filename = f"{sanitized_name}.json"
config: dict[str, Any] = {
"name": self.name,
"steps": [],
}
# Iterate through each step to build its configuration entry.
for step_index, processor_step in enumerate(self.steps):
registry_name = getattr(processor_step.__class__, "_registry_name", None)
step_entry: dict[str, Any] = {}
# Prefer registry name for portability, otherwise fall back to full class path.
if registry_name:
step_entry["registry_name"] = registry_name
else:
step_entry["class"] = (
f"{processor_step.__class__.__module__}.{processor_step.__class__.__name__}"
)
# Save step configuration if `get_config` is implemented.
if hasattr(processor_step, "get_config"):
step_entry["config"] = processor_step.get_config()
# Save step state if `state_dict` is implemented and returns a non-empty dict.
if hasattr(processor_step, "state_dict"):
state = processor_step.state_dict()
if state:
# Clone tensors to avoid modifying the original state.
cloned_state = {key: tensor.clone() for key, tensor in state.items()}
# Create a unique filename for the state file.
if registry_name:
state_filename = f"{sanitized_name}_step_{step_index}_{registry_name}.safetensors"
else:
state_filename = f"{sanitized_name}_step_{step_index}.safetensors"
save_file(cloned_state, os.path.join(str(save_directory), state_filename))
step_entry["state_file"] = state_filename
config["steps"].append(step_entry)
# Write the main configuration JSON file.
with open(os.path.join(str(save_directory), config_filename), "w") as file_pointer:
json.dump(config, file_pointer, indent=2)
def save_pretrained(
self,
save_directory: str | Path | None = None,
*,
repo_id: str | None = None,
push_to_hub: bool = False,
card_kwargs: dict[str, Any] | None = None,
config_filename: str | None = None,
**push_to_hub_kwargs,
):
"""Saves the pipeline's configuration and state to a directory.
This method creates a JSON configuration file that defines the pipeline's structure
(name and steps). For each stateful step, it also saves a `.safetensors` file
containing its state dictionary.
Args:
save_directory: The directory where the pipeline will be saved. If None, saves to
HF_LEROBOT_HOME/processors/{sanitized_pipeline_name}.
repo_id: ID of your repository on the Hub. Used only if `push_to_hub=true`.
push_to_hub: Whether or not to push your object to the Hugging Face Hub after saving it.
card_kwargs: Additional arguments passed to the card template to customize the card.
config_filename: The name of the JSON configuration file. If None, a name is
generated from the pipeline's `name` attribute.
**push_to_hub_kwargs: Additional key word arguments passed along to the push_to_hub method.
"""
if save_directory is None:
# Use default directory in HF_LEROBOT_HOME
from lerobot.utils.constants import HF_LEROBOT_HOME
sanitized_name = re.sub(r"[^a-zA-Z0-9_]", "_", self.name.lower())
save_directory = HF_LEROBOT_HOME / "processors" / sanitized_name
# For direct saves (not through hub), handle config_filename
if not push_to_hub and config_filename is not None:
# Call _save_pretrained directly with config_filename
save_directory = Path(save_directory)
save_directory.mkdir(parents=True, exist_ok=True)
self._save_pretrained(save_directory, config_filename=config_filename)
return None
# Pass config_filename through kwargs for _save_pretrained when using hub
if config_filename is not None:
push_to_hub_kwargs["config_filename"] = config_filename
# Call parent's save_pretrained which will call our _save_pretrained
return super().save_pretrained(
save_directory=save_directory,
repo_id=repo_id,
push_to_hub=push_to_hub,
card_kwargs=card_kwargs,
**push_to_hub_kwargs,
)
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path: str | Path,
config_filename: str,
*,
force_download: bool = False,
resume_download: bool | None = None,
proxies: dict[str, str] | None = None,
token: str | bool | None = None,
cache_dir: str | Path | None = None,
local_files_only: bool = False,
revision: str | None = None,
overrides: dict[str, Any] | None = None,
to_transition: Callable[[TInput], EnvTransition] | None = None,
to_output: Callable[[EnvTransition], TOutput] | None = None,
**kwargs,
) -> DataProcessorPipeline[TInput, TOutput]:
"""Loads a pipeline from a local directory, single file, or Hugging Face Hub repository.
This method implements a simplified loading pipeline with intelligent migration detection:
**Simplified Loading Strategy**:
1. **Config Loading** (_load_config):
- **Directory**: Load specified config_filename from directory
- **Single file**: Load file directly (config_filename ignored)
- **Hub repository**: Download specified config_filename from Hub
2. **Config Validation** (_validate_loaded_config):
- Format validation: Ensure config is valid processor format
- Migration detection: Guide users to migrate old LeRobot models
- Clear errors: Provide actionable error messages
3. **Step Construction** (_build_steps_with_overrides):
- Class resolution: Registry lookup or dynamic imports
- Override merging: User parameters override saved config
- State loading: Load .safetensors files for stateful steps
4. **Override Validation** (_validate_overrides_used):
- Ensure all user overrides were applied (catch typos)
- Provide helpful error messages with available keys
**Migration Detection**:
- **Smart detection**: Analyzes JSON files to detect old LeRobot models
- **Precise targeting**: Avoids false positives on other HuggingFace models
- **Clear guidance**: Provides exact migration command to run
- **Error mode**: Always raises ProcessorMigrationError for clear user action
**Loading Examples**:
```python
# Directory loading
pipeline = DataProcessorPipeline.from_pretrained("/models/my_model", config_filename="processor.json")
# Single file loading
pipeline = DataProcessorPipeline.from_pretrained(
"/models/my_model/processor.json", config_filename="processor.json"
)
# Hub loading
pipeline = DataProcessorPipeline.from_pretrained("user/repo", config_filename="processor.json")
# Multiple configs (preprocessor/postprocessor)
preprocessor = DataProcessorPipeline.from_pretrained(
"model", config_filename="policy_preprocessor.json"
)
postprocessor = DataProcessorPipeline.from_pretrained(
"model", config_filename="policy_postprocessor.json"
)
```
**Override System**:
- **Key matching**: Use registry names or class names as override keys
- **Config merging**: User overrides take precedence over saved config
- **Validation**: Ensure all override keys match actual steps (catch typos)
- **Example**: overrides={"NormalizeStep": {"device": "cuda"}}
Args:
pretrained_model_name_or_path: The identifier of the repository on the Hugging Face Hub,
a path to a local directory, or a path to a single config file.
config_filename: The name of the pipeline's JSON configuration file. Always required
to prevent ambiguity when multiple configs exist (e.g., preprocessor vs postprocessor).
force_download: Whether to force (re)downloading the files.
resume_download: Whether to resume a previously interrupted download.
proxies: A dictionary of proxy servers to use.
token: The token to use as HTTP bearer authorization for private Hub repositories.
cache_dir: The path to a specific cache folder to store downloaded files.
local_files_only: If True, avoid downloading files from the Hub.
revision: The specific model version to use (e.g., a branch name, tag name, or commit id).
overrides: A dictionary to override the configuration of specific steps. Keys should
match the step's class name or registry name.
to_transition: A custom function to convert input data to `EnvTransition`.
to_output: A custom function to convert the final `EnvTransition` to the output format.
**kwargs: Additional arguments (not used).
Returns:
An instance of `DataProcessorPipeline` loaded with the specified configuration and state.
Raises:
FileNotFoundError: If the config file cannot be found.
ValueError: If configuration is ambiguous or instantiation fails.
ImportError: If a step's class cannot be imported.
KeyError: If an override key doesn't match any step in the pipeline.
ProcessorMigrationError: If the model requires migration to processor format.
"""
model_id = str(pretrained_model_name_or_path)
hub_download_kwargs = {
"force_download": force_download,
"resume_download": resume_download,
"proxies": proxies,
"token": token,
"cache_dir": cache_dir,
"local_files_only": local_files_only,
"revision": revision,
}
# 1. Load configuration using simplified 3-way logic
loaded_config, base_path = cls._load_config(model_id, config_filename, hub_download_kwargs)
# 2. Validate configuration and handle migration
cls._validate_loaded_config(model_id, loaded_config, config_filename)
# 3. Build steps with overrides
steps, validated_overrides = cls._build_steps_with_overrides(
loaded_config, overrides or {}, model_id, base_path, hub_download_kwargs
)
# 4. Validate that all overrides were used
cls._validate_overrides_used(validated_overrides, loaded_config)
# 5. Construct and return the final pipeline instance
return cls(
steps=steps,
name=loaded_config.get("name", "DataProcessorPipeline"),
to_transition=to_transition or cast(Callable[[TInput], EnvTransition], batch_to_transition),
to_output=to_output or cast(Callable[[EnvTransition], TOutput], transition_to_batch),
)
@classmethod
def _load_config(
cls,
model_id: str,
config_filename: str,
hub_download_kwargs: dict[str, Any],
) -> tuple[dict[str, Any], Path]:
"""Load configuration from local file or Hugging Face Hub.
This method implements a super-simplified 3-way loading strategy:
1. **Local directory**: Load config_filename from directory
- Example: model_id="/models/my_model", config_filename="processor.json"
- Loads: "/models/my_model/processor.json"
2. **Single file**: Load file directly (ignore config_filename)
- Example: model_id="/models/my_model/processor.json"
- Loads: "/models/my_model/processor.json" (config_filename ignored)
3. **Hub repository**: Download config_filename from Hub
- Example: model_id="user/repo", config_filename="processor.json"
- Downloads and loads: config_filename from Hub repo
**Benefits of Explicit config_filename**:
- No auto-detection complexity or edge cases
- No risk of loading wrong config (preprocessor vs postprocessor)
- Consistent behavior across local and Hub usage
- Clear, predictable errors
Args:
model_id: The model identifier (Hub repo ID, local directory, or file path)
config_filename: The explicit config filename to load (always required)
hub_download_kwargs: Parameters for hf_hub_download (tokens, cache, etc.)
Returns:
Tuple of (loaded_config, base_path)
- loaded_config: Parsed JSON config dict (always loaded, never None)
- base_path: Directory containing config file (for state file resolution)
Raises:
FileNotFoundError: If config file cannot be found locally or on Hub
"""
model_path = Path(model_id)
if model_path.is_dir():
# Directory: load specified config from directory
config_path = model_path / config_filename
if not config_path.exists():
# Check for migration before giving clear error
if cls._should_suggest_migration(model_path):
cls._suggest_processor_migration(model_id, f"Config file '{config_filename}' not found")
raise FileNotFoundError(
f"Config file '{config_filename}' not found in directory '{model_id}'"
)
with open(config_path) as f:
return json.load(f), model_path
elif model_path.is_file():
# File: load file directly (config_filename is ignored for single files)
with open(model_path) as f:
return json.load(f), model_path.parent
else:
# Hub: download specified config
try:
config_path = hf_hub_download(
repo_id=model_id,
filename=config_filename,
repo_type="model",
**hub_download_kwargs,
)
with open(config_path) as f:
return json.load(f), Path(config_path).parent
except Exception as e:
raise FileNotFoundError(
f"Could not find '{config_filename}' on the HuggingFace Hub at '{model_id}'"
) from e
@classmethod
def _validate_loaded_config(
cls, model_id: str, loaded_config: dict[str, Any], config_filename: str
) -> None:
"""Validate that a config was loaded and is a valid processor config.
This method validates processor config format with intelligent migration detection:
**Config Format Validation**:
- Use _is_processor_config() to validate structure
- Must have "steps" field with list of step configurations
- Each step needs "class" or "registry_name"
- If validation fails AND local directory: Check for migration need
- If migration needed: Raise ProcessorMigrationError with command
- If no migration: Raise ValueError with helpful error message
**Migration Detection Logic**:
- Only triggered for local directories (not Hub repos)
- Analyzes all JSON files in directory to detect old LeRobot models
- Provides exact migration command with model path
Args:
model_id: The model identifier (used for migration detection)
loaded_config: The loaded config dictionary (guaranteed non-None)
config_filename: The config filename that was loaded (for error messages)
Raises:
ValueError: If config format is invalid
ProcessorMigrationError: If model needs migration to processor format
"""
# Validate that this is actually a processor config
if not cls._is_processor_config(loaded_config):
if Path(model_id).is_dir() and cls._should_suggest_migration(Path(model_id)):
cls._suggest_processor_migration(
model_id,
f"Config file '{config_filename}' is not a valid processor configuration",
)
raise ValueError(
f"Config file '{config_filename}' is not a valid processor configuration. "
f"Expected a config with 'steps' field, but got: {list(loaded_config.keys())}"
)
@classmethod
def _build_steps_with_overrides(
cls,
loaded_config: dict[str, Any],
overrides: dict[str, Any],
model_id: str,
base_path: Path | None,
hub_download_kwargs: dict[str, Any],
) -> tuple[list[ProcessorStep], set[str]]:
"""Build all processor steps with overrides and state loading.
This method orchestrates the complete step construction pipeline:
**For each step in loaded_config["steps"]**:
1. **Class Resolution** (via _resolve_step_class):
- **If "registry_name" exists**: Look up in ProcessorStepRegistry
Example: {"registry_name": "normalize_step"} -> Get registered class
- **Else use "class" field**: Dynamic import from full module path
Example: {"class": "lerobot.processor.normalize.NormalizeStep"}
- **Result**: (step_class, step_key) where step_key is used for overrides
2. **Step Instantiation** (via _instantiate_step):
- **Merge configs**: saved_config + user_overrides
- **Override priority**: User overrides take precedence over saved config
- **Example**: saved={"mean": 0.0}, override={"mean": 1.0} -> final={"mean": 1.0}
- **Result**: Instantiated ProcessorStep object
3. **State Loading** (via _load_step_state):
- **If step has "state_file"**: Load tensor state from .safetensors
- **Local first**: Check base_path/state_file.safetensors
- **Hub fallback**: Download state file if not found locally
- **Optional**: Only load if step has load_state_dict method
4. **Override Tracking**:
- **Track used overrides**: Remove step_key from remaining set
- **Purpose**: Validate all user overrides were applied (detect typos)
**Error Handling**:
- Class resolution errors -> ImportError with helpful message
- Instantiation errors -> ValueError with config details
- State loading errors -> Propagated from load_state_dict
Args:
loaded_config: The loaded processor configuration (must have "steps" field)
overrides: User-provided parameter overrides (keyed by class/registry name)
model_id: The model identifier (needed for Hub state file downloads)
base_path: Local directory path for finding state files
hub_download_kwargs: Parameters for hf_hub_download (tokens, cache, etc.)
Returns:
Tuple of (instantiated_steps_list, unused_override_keys)
- instantiated_steps_list: List of ready-to-use ProcessorStep instances
- unused_override_keys: Override keys that didn't match any step (for validation)
Raises:
ImportError: If a step class cannot be imported or found in registry
ValueError: If a step cannot be instantiated with its configuration
"""
steps: list[ProcessorStep] = []
override_keys = set(overrides.keys())
for step_entry in loaded_config["steps"]:
# 1. Get step class and key
step_class, step_key = cls._resolve_step_class(step_entry)
# 2. Instantiate step with overrides
step_instance = cls._instantiate_step(step_entry, step_class, step_key, overrides)
# 3. Load step state if available
cls._load_step_state(step_instance, step_entry, model_id, base_path, hub_download_kwargs)
# 4. Track used overrides
if step_key in override_keys:
override_keys.discard(step_key)
steps.append(step_instance)
return steps, override_keys
@classmethod
def _resolve_step_class(cls, step_entry: dict[str, Any]) -> tuple[type[ProcessorStep], str]:
"""Resolve step class from registry or import path.
This method implements a two-tier resolution strategy:
**Tier 1: Registry-based resolution** (preferred):
- **If "registry_name" in step_entry**: Look up in ProcessorStepRegistry
- **Advantage**: Faster, no imports needed, guaranteed compatibility
- **Example**: {"registry_name": "normalize_step"} -> Get pre-registered class
- **Error**: KeyError if registry_name not found -> Convert to ImportError
**Tier 2: Dynamic import fallback**:
- **Else use "class" field**: Full module.ClassName import path
- **Process**: Split "module.path.ClassName" into module + class parts
- **Import**: Use importlib.import_module() + getattr()
- **Example**: "lerobot.processor.normalize.NormalizeStep"
a. Import module: "lerobot.processor.normalize"
b. Get class: getattr(module, "NormalizeStep")
- **step_key**: Use class_name ("NormalizeStep") for overrides
**Override Key Strategy**:
- Registry steps: Use registry_name ("normalize_step")
- Import steps: Use class_name ("NormalizeStep")
- This allows users to override with: {"normalize_step": {...}} or {"NormalizeStep": {...}}
**Error Handling**:
- Registry KeyError -> ImportError with registry context
- Import/Attribute errors -> ImportError with helpful suggestions
- All errors include troubleshooting guidance
Args:
step_entry: The step configuration dictionary (must have "registry_name" or "class")
Returns:
Tuple of (step_class, step_key)
- step_class: The resolved ProcessorStep class (ready for instantiation)
- step_key: The key used for user overrides (registry_name or class_name)
Raises:
ImportError: If step class cannot be loaded from registry or import path
"""
if "registry_name" in step_entry:
try:
step_class = ProcessorStepRegistry.get(step_entry["registry_name"])
return step_class, step_entry["registry_name"]
except KeyError as e:
raise ImportError(f"Failed to load processor step from registry. {str(e)}") from e
else:
# Fallback to dynamic import using the full class path
full_class_path = step_entry["class"]
module_path, class_name = full_class_path.rsplit(".", 1)
try:
module = importlib.import_module(module_path)
step_class = getattr(module, class_name)
return step_class, class_name
except (ImportError, AttributeError) as e:
raise ImportError(
f"Failed to load processor step '{full_class_path}'. "
f"Make sure the module '{module_path}' is installed and contains class '{class_name}'. "
f"Consider registering the step using @ProcessorStepRegistry.register() for better portability. "
f"Error: {str(e)}"
) from e
@classmethod
def _instantiate_step(
cls,
step_entry: dict[str, Any],
step_class: type[ProcessorStep],
step_key: str,
overrides: dict[str, Any],
) -> ProcessorStep:
"""Instantiate a single processor step with config overrides.
This method handles the configuration merging and instantiation logic:
**Configuration Merging Strategy**:
1. **Extract saved config**: Get step_entry.get("config", {}) from saved pipeline
- Example: {"config": {"mean": 0.0, "std": 1.0}}
2. **Extract user overrides**: Get overrides.get(step_key, {}) for this step
- Example: overrides = {"NormalizeStep": {"mean": 2.0, "device": "cuda"}}
3. **Merge with priority**: {**saved_cfg, **step_overrides}
- **Override priority**: User values override saved values
- **Result**: {"mean": 2.0, "std": 1.0, "device": "cuda"}
**Instantiation Process**:
- **Call constructor**: step_class(**merged_cfg)
- **Example**: NormalizeStep(mean=2.0, std=1.0, device="cuda")
**Error Handling**:
- **Any exception during instantiation**: Convert to ValueError
- **Include context**: step name, attempted config, original error
- **Purpose**: Help users debug configuration issues
- **Common causes**:
a. Invalid parameter types (str instead of float)
b. Missing required parameters
c. Incompatible parameter combinations
Args:
step_entry: The step configuration from saved config (contains "config" dict)
step_class: The step class to instantiate (already resolved)
step_key: The key used for overrides ("registry_name" or class name)
overrides: User-provided parameter overrides (keyed by step_key)
Returns:
The instantiated processor step (ready for use)
Raises:
ValueError: If step cannot be instantiated, with detailed error context
"""
try:
saved_cfg = step_entry.get("config", {})
step_overrides = overrides.get(step_key, {})
merged_cfg = {**saved_cfg, **step_overrides}
return step_class(**merged_cfg)
except Exception as e:
step_name = step_entry.get("registry_name", step_entry.get("class", "Unknown"))
raise ValueError(
f"Failed to instantiate processor step '{step_name}' with config: {step_entry.get('config', {})}. "
f"Error: {str(e)}"
) from e
@classmethod
def _load_step_state(
cls,
step_instance: ProcessorStep,
step_entry: dict[str, Any],
model_id: str,
base_path: Path | None,
hub_download_kwargs: dict[str, Any],
) -> None:
"""Load state dictionary for a processor step if available.
This method implements conditional state loading with local/Hub fallback:
**Precondition Checks** (early return if not met):
1. **"state_file" in step_entry**: Step config specifies a state file
- **If missing**: Step has no saved state (e.g., stateless transforms)
2. **hasattr(step_instance, "load_state_dict")**: Step supports state loading
- **If missing**: Step doesn't implement state loading (rare)
**State File Resolution Strategy**:
1. **Local file priority**: Check base_path/state_filename exists
- **Advantage**: Faster, no network calls
- **Example**: "/models/my_model/normalize_step_0.safetensors"
- **Use case**: Loading from local saved model directory
2. **Hub download fallback**: Download state file from repository
- **When triggered**: Local file not found or base_path is None
- **Process**: Use hf_hub_download with same parameters as config
- **Example**: Download "normalize_step_0.safetensors" from "user/repo"
- **Result**: Downloaded to local cache, path returned
**State Loading Process**:
- **Load tensors**: Use safetensors.torch.load_file()
- **Apply to step**: Call step_instance.load_state_dict(tensor_dict)
- **In-place modification**: Updates step's internal tensor state
**Common state file examples**:
- "normalize_step_0.safetensors" - normalization statistics
- "custom_step_1.safetensors" - learned parameters
- "tokenizer_step_2.safetensors" - vocabulary embeddings
Args:
step_instance: The step instance to load state into (must have load_state_dict)
step_entry: The step configuration dictionary (may contain "state_file")
model_id: The model identifier (used for Hub downloads if needed)
base_path: Local directory path for finding state files (None for Hub-only)
hub_download_kwargs: Parameters for hf_hub_download (tokens, cache, etc.)
Note:
This method modifies step_instance in-place and returns None.
If state loading fails, exceptions from load_state_dict propagate.
"""
if "state_file" not in step_entry or not hasattr(step_instance, "load_state_dict"):
return
state_filename = step_entry["state_file"]
# Try local file first
if base_path and (base_path / state_filename).exists():
state_path = str(base_path / state_filename)
else:
# Download from Hub
state_path = hf_hub_download(
repo_id=model_id,
filename=state_filename,
repo_type="model",
**hub_download_kwargs,
)
step_instance.load_state_dict(load_file(state_path))
@classmethod
def _validate_overrides_used(
cls, remaining_override_keys: set[str], loaded_config: dict[str, Any]
) -> None:
"""Validate that all provided overrides were used.
This method ensures user overrides are valid to catch typos and configuration errors:
**Validation Logic**:
1. **If remaining_override_keys is empty**: All overrides were used -> Success
- **Early return**: No validation needed
- **Normal case**: User provided correct override keys
2. **If remaining_override_keys has entries**: Some overrides unused -> Error
- **Root cause**: User provided keys that don't match any step
- **Common issues**:
a. Typos in step names ("NormalizStep" vs "NormalizeStep")
b. Using wrong key type (class name vs registry name)
c. Step doesn't exist in saved pipeline
**Helpful Error Generation**:
- **Extract available keys**: Build list of valid override keys from config
a. **Registry steps**: Use "registry_name" directly
b. **Import steps**: Extract class name from "class" field
- Example: "lerobot.processor.normalize.NormalizeStep" -> "NormalizeStep"
- **Error message includes**:
a. Invalid keys provided by user
b. List of valid keys they can use
c. Guidance about registry vs class names
**Override Key Resolution Rules**:
- Steps with "registry_name": Use registry_name for overrides
- Steps with "class": Use final class name for overrides
- Users must match these exact keys in their overrides dict
Args:
remaining_override_keys: Override keys that weren't matched to any step
loaded_config: The loaded processor configuration (contains "steps" list)
Raises:
KeyError: If any override keys were not used, with helpful error message
"""
if not remaining_override_keys:
return
available_keys = [
step.get("registry_name") or step["class"].rsplit(".", 1)[1] for step in loaded_config["steps"]
]
raise KeyError(
f"Override keys {list(remaining_override_keys)} do not match any step in the saved configuration. "
f"Available step keys: {available_keys}. "
f"Make sure override keys match exact step class names or registry names."
)
@classmethod
def _should_suggest_migration(cls, model_path: Path) -> bool:
"""Check if directory has JSON files but no processor configs.
This method implements smart migration detection to avoid false positives:
**Decision Logic**:
1. **No JSON files found**: Return False
- **Reason**: Empty directory or only non-config files
- **Example**: Directory with only .safetensors, .md files
- **Action**: No migration needed
2. **JSON files exist**: Analyze each file
- **Goal**: Determine if ANY file is a valid processor config
- **Process**:
a. Try to parse each .json file
b. Skip files with JSON parse errors (malformed)
c. Check if parsed config passes _is_processor_config()
- **If ANY valid processor found**: Return False (no migration)
- **If NO valid processors found**: Return True (migration needed)
**Examples**:
- **No migration**: ["processor.json", "config.json"] where processor.json is valid
- **Migration needed**: ["config.json", "train.json"] where both are model configs
- **No migration**: [] (empty directory)
- **Migration needed**: ["old_model_config.json"] with old LeRobot format
**Why this works**:
- **Precise detection**: Only suggests migration for actual old LeRobot models
- **Avoids false positives**: Won't trigger on other HuggingFace model types
- **Graceful handling**: Ignores malformed JSON files
Args:
model_path: Path to local directory to analyze
Returns:
True if directory has JSON configs but none are processor configs (migration needed)
False if no JSON files or at least one valid processor config exists
"""
json_files = list(model_path.glob("*.json"))
if len(json_files) == 0:
return False
# Check if any JSON file is a processor config
for json_file in json_files:
try:
with open(json_file) as f:
config = json.load(f)
if cls._is_processor_config(config):
return False # Found at least one processor config, no migration needed
except (json.JSONDecodeError, OSError):
# Skip files that can't be parsed as JSON
continue
# Have JSON files but no processor configs - suggest migration
return True
@classmethod
def _is_processor_config(cls, config: dict) -> bool:
"""Check if config follows DataProcessorPipeline format.
This method validates the processor configuration structure:
**Required Structure Validation**:
1. **"steps" field existence**: Must have top-level "steps" key
- **If missing**: Not a processor config (e.g., model config, train config)
- **Example invalid**: {"type": "act", "hidden_dim": 256}
2. **"steps" field type**: Must be a list, not other types
- **If not list**: Invalid format
- **Example invalid**: {"steps": "some_string"} or {"steps": {"key": "value"}}
3. **Empty steps validation**: Empty list is valid
- **If len(steps) == 0**: Return True immediately
- **Use case**: Empty processor pipeline (no-op)
- **Example valid**: {"name": "EmptyProcessor", "steps": []}
**Individual Step Validation** (for non-empty steps):
For each step in the steps list:
1. **Step type**: Must be a dictionary
- **If not dict**: Invalid step format
- **Example invalid**: ["string_step", 123, true]
2. **Step identifier**: Must have either "class" OR "registry_name"
- **"registry_name"**: Registered step (preferred)
Example: {"registry_name": "normalize_step", "config": {...}}
- **"class"**: Full import path
Example: {"class": "lerobot.processor.normalize.NormalizeStep"}
- **If neither**: Invalid step (can't resolve class)
- **If both**: Also valid (registry_name takes precedence)
**Valid Processor Config Examples**:
- {"steps": []} - Empty processor
- {"steps": [{"registry_name": "normalize"}]} - Registry step
- {"steps": [{"class": "my.module.Step"}]} - Import step
- {"name": "MyProcessor", "steps": [...]} - With name
**Invalid Config Examples**:
- {"type": "act"} - Missing "steps"
- {"steps": "normalize"} - Steps not a list
- {"steps": [{}]} - Step missing class/registry_name
- {"steps": ["string"]} - Step not a dict
Args:
config: The configuration dictionary to validate
Returns:
True if config follows valid DataProcessorPipeline format, False otherwise
"""
# Must have a "steps" field with a list of step configurations
if not isinstance(config.get("steps"), list):
return False
steps = config["steps"]
if len(steps) == 0:
return True # Empty processor is valid
# Each step must be a dict with either "class" or "registry_name"
for step in steps:
if not isinstance(step, dict):
return False
if not ("class" in step or "registry_name" in step):
return False
return True
@classmethod
def _suggest_processor_migration(cls, model_path: str | Path, original_error: str) -> None:
"""Raise migration error when we detect JSON files but no processor configs.
This method is called when migration detection determines that a model
directory contains configuration files but none are valid processor configs.
This typically indicates an old LeRobot model that needs migration.
**When this is called**:
- User tries to load DataProcessorPipeline from local directory
- Directory contains JSON configuration files
- None of the JSON files follow processor config format
- _should_suggest_migration() returned True
**Migration Command Generation**:
- Constructs exact command user needs to run
- Uses the migration script: migrate_policy_normalization.py
- Includes the model path automatically
- Example: "python src/lerobot/processor/migrate_policy_normalization.py --pretrained-path /models/old_model"
**Error Structure**:
- **Always raises**: ProcessorMigrationError (never returns)
- **Includes**: model_path, migration_command, original_error
- **Purpose**: Force user attention to migration need
- **User experience**: Clear actionable error with exact command to run
**Migration Process**:
The suggested command will:
1. Extract normalization stats from old model
2. Create new processor configs (preprocessor + postprocessor)
3. Remove normalization layers from model
4. Save migrated model with processor pipeline
Args:
model_path: Path to the model directory needing migration
original_error: The error that triggered migration detection (for context)
Raises:
ProcessorMigrationError: Always raised (this method never returns normally)
"""
migration_command = (
f"python src/lerobot/processor/migrate_policy_normalization.py --pretrained-path {model_path}"
)
raise ProcessorMigrationError(model_path, migration_command, original_error)
def __len__(self) -> int:
"""Returns the number of steps in the pipeline."""
return len(self.steps)
def __getitem__(self, idx: int | slice) -> ProcessorStep | DataProcessorPipeline[TInput, TOutput]:
"""Retrieves a step or a sub-pipeline by index or slice.
Args:
idx: An integer index or a slice object.
Returns:
A `ProcessorStep` if `idx` is an integer, or a new `DataProcessorPipeline`
containing the sliced steps.
"""
if isinstance(idx, slice):
# Return a new pipeline instance with the sliced steps.
return DataProcessorPipeline(
steps=self.steps[idx],
name=self.name,
to_transition=self.to_transition,
to_output=self.to_output,
before_step_hooks=self.before_step_hooks.copy(),
after_step_hooks=self.after_step_hooks.copy(),
)
return self.steps[idx]
def register_before_step_hook(self, fn: Callable[[int, EnvTransition], None]):
"""Registers a function to be called before each step.
Args:
fn: A callable that accepts the step index and the current transition.
"""
self.before_step_hooks.append(fn)
def unregister_before_step_hook(self, fn: Callable[[int, EnvTransition], None]):
"""Unregisters a 'before_step' hook.
Args:
fn: The exact function object that was previously registered.
Raises:
ValueError: If the hook is not found in the list.
"""
try:
self.before_step_hooks.remove(fn)
except ValueError:
raise ValueError(
f"Hook {fn} not found in before_step_hooks. Make sure to pass the exact same function reference."
) from None
def register_after_step_hook(self, fn: Callable[[int, EnvTransition], None]):
"""Registers a function to be called after each step.
Args:
fn: A callable that accepts the step index and the current transition.
"""
self.after_step_hooks.append(fn)
def unregister_after_step_hook(self, fn: Callable[[int, EnvTransition], None]):
"""Unregisters an 'after_step' hook.
Args:
fn: The exact function object that was previously registered.
Raises:
ValueError: If the hook is not found in the list.
"""
try:
self.after_step_hooks.remove(fn)
except ValueError:
raise ValueError(
f"Hook {fn} not found in after_step_hooks. Make sure to pass the exact same function reference."
) from None
def reset(self):
"""Resets the state of all stateful steps in the pipeline."""
for step in self.steps:
if hasattr(step, "reset"):
step.reset()
def __repr__(self) -> str:
"""Provides a concise string representation of the pipeline."""
step_names = [step.__class__.__name__ for step in self.steps]
if not step_names:
steps_repr = "steps=0: []"
elif len(step_names) <= 3:
steps_repr = f"steps={len(step_names)}: [{', '.join(step_names)}]"
else:
# For long pipelines, show the first, second, and last steps.
displayed = f"{step_names[0]}, {step_names[1]}, ..., {step_names[-1]}"
steps_repr = f"steps={len(step_names)}: [{displayed}]"
parts = [f"name='{self.name}'", steps_repr]
return f"DataProcessorPipeline({', '.join(parts)})"
def __post_init__(self):
"""Validates that all provided steps are instances of `ProcessorStep`."""
for i, step in enumerate(self.steps):
if not isinstance(step, ProcessorStep):
raise TypeError(f"Step {i} ({type(step).__name__}) must inherit from ProcessorStep")
def transform_features(
self, initial_features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""Applies feature transformations from all steps sequentially.
This method propagates a feature description dictionary through each step's
`transform_features` method, allowing the pipeline to statically determine
the output feature specification without processing any real data.
Args:
initial_features: A dictionary describing the initial features.
Returns:
The final feature description after all transformations.
"""
features: dict[PipelineFeatureType, dict[str, PolicyFeature]] = deepcopy(initial_features)
for _, step in enumerate(self.steps):
out = step.transform_features(features)
features = out
return features
# Convenience methods for processing individual parts of a transition.
def process_observation(self, observation: RobotObservation) -> RobotObservation:
"""Processes only the observation part of a transition through the pipeline.
Args:
observation: The observation dictionary.
Returns:
The processed observation dictionary.
"""
transition: EnvTransition = create_transition(observation=observation)
transformed_transition = self._forward(transition)
return transformed_transition[TransitionKey.OBSERVATION]
def process_action(
self, action: PolicyAction | RobotAction | EnvAction
) -> PolicyAction | RobotAction | EnvAction:
"""Processes only the action part of a transition through the pipeline.
Args:
action: The action data.
Returns:
The processed action.
"""
transition: EnvTransition = create_transition(action=action)
transformed_transition = self._forward(transition)
return transformed_transition[TransitionKey.ACTION]
def process_reward(self, reward: float | torch.Tensor) -> float | torch.Tensor:
"""Processes only the reward part of a transition through the pipeline.
Args:
reward: The reward value.
Returns:
The processed reward.
"""
transition: EnvTransition = create_transition(reward=reward)
transformed_transition = self._forward(transition)
return transformed_transition[TransitionKey.REWARD]
def process_done(self, done: bool | torch.Tensor) -> bool | torch.Tensor:
"""Processes only the done flag of a transition through the pipeline.
Args:
done: The done flag.
Returns:
The processed done flag.
"""
transition: EnvTransition = create_transition(done=done)
transformed_transition = self._forward(transition)
return transformed_transition[TransitionKey.DONE]
def process_truncated(self, truncated: bool | torch.Tensor) -> bool | torch.Tensor:
"""Processes only the truncated flag of a transition through the pipeline.
Args:
truncated: The truncated flag.
Returns:
The processed truncated flag.
"""
transition: EnvTransition = create_transition(truncated=truncated)
transformed_transition = self._forward(transition)
return transformed_transition[TransitionKey.TRUNCATED]
def process_info(self, info: dict[str, Any]) -> dict[str, Any]:
"""Processes only the info dictionary of a transition through the pipeline.
Args:
info: The info dictionary.
Returns:
The processed info dictionary.
"""
transition: EnvTransition = create_transition(info=info)
transformed_transition = self._forward(transition)
return transformed_transition[TransitionKey.INFO]
def process_complementary_data(self, complementary_data: dict[str, Any]) -> dict[str, Any]:
"""Processes only the complementary data part of a transition through the pipeline.
Args:
complementary_data: The complementary data dictionary.
Returns:
The processed complementary data dictionary.
"""
transition: EnvTransition = create_transition(complementary_data=complementary_data)
transformed_transition = self._forward(transition)
return transformed_transition[TransitionKey.COMPLEMENTARY_DATA]
# Type aliases for semantic clarity.
RobotProcessorPipeline = DataProcessorPipeline[TInput, TOutput]
PolicyProcessorPipeline = DataProcessorPipeline[TInput, TOutput]
class ObservationProcessorStep(ProcessorStep, ABC):
"""An abstract `ProcessorStep` that specifically targets the observation in a transition."""
@abstractmethod
def observation(self, observation: RobotObservation) -> RobotObservation:
"""Processes an observation dictionary. Subclasses must implement this method.
Args:
observation: The input observation dictionary from the transition.
Returns:
The processed observation dictionary.
"""
...
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Applies the `observation` method to the transition's observation."""
self._current_transition = transition.copy()
new_transition = self._current_transition
observation = new_transition.get(TransitionKey.OBSERVATION)
if observation is None or not isinstance(observation, dict):
raise ValueError("ObservationProcessorStep requires an observation in the transition.")
processed_observation = self.observation(observation.copy())
new_transition[TransitionKey.OBSERVATION] = processed_observation
return new_transition
class ActionProcessorStep(ProcessorStep, ABC):
"""An abstract `ProcessorStep` that specifically targets the action in a transition."""
@abstractmethod
def action(
self, action: PolicyAction | RobotAction | EnvAction
) -> PolicyAction | RobotAction | EnvAction:
"""Processes an action. Subclasses must implement this method.
Args:
action: The input action from the transition.
Returns:
The processed action.
"""
...
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Applies the `action` method to the transition's action."""
self._current_transition = transition.copy()
new_transition = self._current_transition
action = new_transition.get(TransitionKey.ACTION)
if action is None:
raise ValueError("ActionProcessorStep requires an action in the transition.")
processed_action = self.action(action)
new_transition[TransitionKey.ACTION] = processed_action
return new_transition
class RobotActionProcessorStep(ProcessorStep, ABC):
"""An abstract `ProcessorStep` for processing a `RobotAction` (a dictionary)."""
@abstractmethod
def action(self, action: RobotAction) -> RobotAction:
"""Processes a `RobotAction`. Subclasses must implement this method.
Args:
action: The input `RobotAction` dictionary.
Returns:
The processed `RobotAction`.
"""
...
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Applies the `action` method to the transition's action, ensuring it's a `RobotAction`."""
self._current_transition = transition.copy()
new_transition = self._current_transition
action = new_transition.get(TransitionKey.ACTION)
if action is None or not isinstance(action, dict):
raise ValueError(f"Action should be a RobotAction type (dict), but got {type(action)}")
processed_action = self.action(action.copy())
new_transition[TransitionKey.ACTION] = processed_action
return new_transition
class PolicyActionProcessorStep(ProcessorStep, ABC):
"""An abstract `ProcessorStep` for processing a `PolicyAction` (a tensor or dict of tensors)."""
@abstractmethod
def action(self, action: PolicyAction) -> PolicyAction:
"""Processes a `PolicyAction`. Subclasses must implement this method.
Args:
action: The input `PolicyAction`.
Returns:
The processed `PolicyAction`.
"""
...
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Applies the `action` method to the transition's action, ensuring it's a `PolicyAction`."""
self._current_transition = transition.copy()
new_transition = self._current_transition
action = new_transition.get(TransitionKey.ACTION)
if not isinstance(action, PolicyAction):
raise ValueError(f"Action should be a PolicyAction type (tensor), but got {type(action)}")
processed_action = self.action(action)
new_transition[TransitionKey.ACTION] = processed_action
return new_transition
class RewardProcessorStep(ProcessorStep, ABC):
"""An abstract `ProcessorStep` that specifically targets the reward in a transition."""
@abstractmethod
def reward(self, reward) -> float | torch.Tensor:
"""Processes a reward. Subclasses must implement this method.
Args:
reward: The input reward from the transition.
Returns:
The processed reward.
"""
...
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Applies the `reward` method to the transition's reward."""
self._current_transition = transition.copy()
new_transition = self._current_transition
reward = new_transition.get(TransitionKey.REWARD)
if reward is None:
raise ValueError("RewardProcessorStep requires a reward in the transition.")
processed_reward = self.reward(reward)
new_transition[TransitionKey.REWARD] = processed_reward
return new_transition
class DoneProcessorStep(ProcessorStep, ABC):
"""An abstract `ProcessorStep` that specifically targets the 'done' flag in a transition."""
@abstractmethod
def done(self, done) -> bool | torch.Tensor:
"""Processes a 'done' flag. Subclasses must implement this method.
Args:
done: The input 'done' flag from the transition.
Returns:
The processed 'done' flag.
"""
...
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Applies the `done` method to the transition's 'done' flag."""
self._current_transition = transition.copy()
new_transition = self._current_transition
done = new_transition.get(TransitionKey.DONE)
if done is None:
raise ValueError("DoneProcessorStep requires a done flag in the transition.")
processed_done = self.done(done)
new_transition[TransitionKey.DONE] = processed_done
return new_transition
class TruncatedProcessorStep(ProcessorStep, ABC):
"""An abstract `ProcessorStep` that specifically targets the 'truncated' flag in a transition."""
@abstractmethod
def truncated(self, truncated) -> bool | torch.Tensor:
"""Processes a 'truncated' flag. Subclasses must implement this method.
Args:
truncated: The input 'truncated' flag from the transition.
Returns:
The processed 'truncated' flag.
"""
...
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Applies the `truncated` method to the transition's 'truncated' flag."""
self._current_transition = transition.copy()
new_transition = self._current_transition
truncated = new_transition.get(TransitionKey.TRUNCATED)
if truncated is None:
raise ValueError("TruncatedProcessorStep requires a truncated flag in the transition.")
processed_truncated = self.truncated(truncated)
new_transition[TransitionKey.TRUNCATED] = processed_truncated
return new_transition
class InfoProcessorStep(ProcessorStep, ABC):
"""An abstract `ProcessorStep` that specifically targets the 'info' dictionary in a transition."""
@abstractmethod
def info(self, info) -> dict[str, Any]:
"""Processes an 'info' dictionary. Subclasses must implement this method.
Args:
info: The input 'info' dictionary from the transition.
Returns:
The processed 'info' dictionary.
"""
...
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Applies the `info` method to the transition's 'info' dictionary."""
self._current_transition = transition.copy()
new_transition = self._current_transition
info = new_transition.get(TransitionKey.INFO)
if info is None or not isinstance(info, dict):
raise ValueError("InfoProcessorStep requires an info dictionary in the transition.")
processed_info = self.info(info.copy())
new_transition[TransitionKey.INFO] = processed_info
return new_transition
class ComplementaryDataProcessorStep(ProcessorStep, ABC):
"""An abstract `ProcessorStep` that targets the 'complementary_data' in a transition."""
@abstractmethod
def complementary_data(self, complementary_data) -> dict[str, Any]:
"""Processes a 'complementary_data' dictionary. Subclasses must implement this method.
Args:
complementary_data: The input 'complementary_data' from the transition.
Returns:
The processed 'complementary_data' dictionary.
"""
...
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Applies the `complementary_data` method to the transition's data."""
self._current_transition = transition.copy()
new_transition = self._current_transition
complementary_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA)
if complementary_data is None or not isinstance(complementary_data, dict):
raise ValueError("ComplementaryDataProcessorStep requires complementary data in the transition.")
processed_complementary_data = self.complementary_data(complementary_data.copy())
new_transition[TransitionKey.COMPLEMENTARY_DATA] = processed_complementary_data
return new_transition
class IdentityProcessorStep(ProcessorStep):
"""A no-op processor step that returns the input transition and features unchanged.
This can be useful as a placeholder or for debugging purposes.
"""
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Returns the transition without modification."""
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""Returns the features without modification."""
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/pipeline.py",
"license": "Apache License 2.0",
"lines": 1358,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/rename_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from .pipeline import ObservationProcessorStep, ProcessorStepRegistry
@dataclass
@ProcessorStepRegistry.register(name="rename_observations_processor")
class RenameObservationsProcessorStep(ObservationProcessorStep):
"""
A processor step that renames keys in an observation dictionary.
This step is useful for creating a standardized data interface by mapping keys
from an environment's format to the format expected by a LeRobot policy or
other downstream components.
Attributes:
rename_map: A dictionary mapping from old key names to new key names.
Keys present in an observation that are not in this map will
be kept with their original names.
"""
rename_map: dict[str, str] = field(default_factory=dict)
def observation(self, observation):
processed_obs = {}
for key, value in observation.items():
if key in self.rename_map:
processed_obs[self.rename_map[key]] = value
else:
processed_obs[key] = value
return processed_obs
def get_config(self) -> dict[str, Any]:
return {"rename_map": self.rename_map}
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""Transforms:
- Each key in the observation that appears in `rename_map` is renamed to its value.
- Keys not in `rename_map` remain unchanged.
"""
new_features: dict[PipelineFeatureType, dict[str, PolicyFeature]] = features.copy()
new_features[PipelineFeatureType.OBSERVATION] = {
self.rename_map.get(k, k): v for k, v in features[PipelineFeatureType.OBSERVATION].items()
}
return new_features
def rename_stats(stats: dict[str, dict[str, Any]], rename_map: dict[str, str]) -> dict[str, dict[str, Any]]:
"""
Renames the top-level keys in a statistics dictionary using a provided mapping.
This is a helper function typically used to keep normalization statistics
consistent with renamed observation or action features. It performs a defensive
deep copy to avoid modifying the original `stats` dictionary.
Args:
stats: A nested dictionary of statistics, where top-level keys are
feature names (e.g., `{"observation.state": {"mean": 0.5}}`).
rename_map: A dictionary mapping old feature names to new feature names.
Returns:
A new statistics dictionary with its top-level keys renamed. Returns an
empty dictionary if the input `stats` is empty.
"""
if not stats:
return {}
renamed: dict[str, dict[str, Any]] = {}
for old_key, sub_stats in stats.items():
new_key = rename_map.get(old_key, old_key)
renamed[new_key] = deepcopy(sub_stats) if sub_stats is not None else {}
return renamed
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/rename_processor.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:tests/processor/test_batch_conversion.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from lerobot.processor import DataProcessorPipeline, TransitionKey
from lerobot.processor.converters import batch_to_transition, transition_to_batch
from lerobot.utils.constants import ACTION, DONE, OBS_IMAGE, OBS_PREFIX, OBS_STATE, REWARD, TRUNCATED
def _dummy_batch():
"""Create a dummy batch using the new format with observation.* and next.* keys."""
return {
f"{OBS_IMAGE}.left": torch.randn(1, 3, 128, 128),
f"{OBS_IMAGE}.right": torch.randn(1, 3, 128, 128),
OBS_STATE: torch.tensor([[0.1, 0.2, 0.3, 0.4]]),
ACTION: torch.tensor([[0.5]]),
REWARD: 1.0,
DONE: False,
TRUNCATED: False,
"info": {"key": "value"},
}
def test_observation_grouping_roundtrip():
"""Test that observation.* keys are properly grouped and ungrouped."""
proc = DataProcessorPipeline([])
batch_in = _dummy_batch()
batch_out = proc(batch_in)
# Check that all observation.* keys are preserved
original_obs_keys = {k: v for k, v in batch_in.items() if k.startswith(OBS_PREFIX)}
reconstructed_obs_keys = {k: v for k, v in batch_out.items() if k.startswith(OBS_PREFIX)}
assert set(original_obs_keys.keys()) == set(reconstructed_obs_keys.keys())
# Check tensor values
assert torch.allclose(batch_out[f"{OBS_IMAGE}.left"], batch_in[f"{OBS_IMAGE}.left"])
assert torch.allclose(batch_out[f"{OBS_IMAGE}.right"], batch_in[f"{OBS_IMAGE}.right"])
assert torch.allclose(batch_out[OBS_STATE], batch_in[OBS_STATE])
# Check other fields
assert torch.allclose(batch_out[ACTION], batch_in[ACTION])
assert batch_out[REWARD] == batch_in[REWARD]
assert batch_out[DONE] == batch_in[DONE]
assert batch_out[TRUNCATED] == batch_in[TRUNCATED]
assert batch_out["info"] == batch_in["info"]
def test_batch_to_transition_observation_grouping():
"""Test that batch_to_transition correctly groups observation.* keys."""
batch = {
f"{OBS_IMAGE}.top": torch.randn(1, 3, 128, 128),
f"{OBS_IMAGE}.left": torch.randn(1, 3, 128, 128),
OBS_STATE: [1, 2, 3, 4],
ACTION: torch.tensor([0.1, 0.2, 0.3, 0.4]),
REWARD: 1.5,
DONE: True,
TRUNCATED: False,
"info": {"episode": 42},
}
transition = batch_to_transition(batch)
# Check observation is a dict with all observation.* keys
assert isinstance(transition[TransitionKey.OBSERVATION], dict)
assert f"{OBS_IMAGE}.top" in transition[TransitionKey.OBSERVATION]
assert f"{OBS_IMAGE}.left" in transition[TransitionKey.OBSERVATION]
assert OBS_STATE in transition[TransitionKey.OBSERVATION]
# Check values are preserved
assert torch.allclose(
transition[TransitionKey.OBSERVATION][f"{OBS_IMAGE}.top"], batch[f"{OBS_IMAGE}.top"]
)
assert torch.allclose(
transition[TransitionKey.OBSERVATION][f"{OBS_IMAGE}.left"], batch[f"{OBS_IMAGE}.left"]
)
assert transition[TransitionKey.OBSERVATION][OBS_STATE] == [1, 2, 3, 4]
# Check other fields
assert torch.allclose(transition[TransitionKey.ACTION], torch.tensor([0.1, 0.2, 0.3, 0.4]))
assert transition[TransitionKey.REWARD] == 1.5
assert transition[TransitionKey.DONE]
assert not transition[TransitionKey.TRUNCATED]
assert transition[TransitionKey.INFO] == {"episode": 42}
assert transition[TransitionKey.COMPLEMENTARY_DATA] == {}
def test_transition_to_batch_observation_flattening():
"""Test that transition_to_batch correctly flattens observation dict."""
observation_dict = {
f"{OBS_IMAGE}.top": torch.randn(1, 3, 128, 128),
f"{OBS_IMAGE}.left": torch.randn(1, 3, 128, 128),
OBS_STATE: [1, 2, 3, 4],
}
transition = {
TransitionKey.OBSERVATION: observation_dict,
TransitionKey.ACTION: "action_data",
TransitionKey.REWARD: 1.5,
TransitionKey.DONE: True,
TransitionKey.TRUNCATED: False,
TransitionKey.INFO: {"episode": 42},
TransitionKey.COMPLEMENTARY_DATA: {},
}
batch = transition_to_batch(transition)
# Check that observation.* keys are flattened back to batch
assert f"{OBS_IMAGE}.top" in batch
assert f"{OBS_IMAGE}.left" in batch
assert OBS_STATE in batch
# Check values are preserved
assert torch.allclose(batch[f"{OBS_IMAGE}.top"], observation_dict[f"{OBS_IMAGE}.top"])
assert torch.allclose(batch[f"{OBS_IMAGE}.left"], observation_dict[f"{OBS_IMAGE}.left"])
assert batch[OBS_STATE] == [1, 2, 3, 4]
# Check other fields are mapped to next.* format
assert batch[ACTION] == "action_data"
assert batch[REWARD] == 1.5
assert batch[DONE]
assert not batch[TRUNCATED]
assert batch["info"] == {"episode": 42}
def test_no_observation_keys():
"""Test behavior when there are no observation.* keys."""
batch = {
ACTION: torch.tensor([1.0, 2.0]),
REWARD: 2.0,
DONE: False,
TRUNCATED: True,
"info": {"test": "no_obs"},
}
transition = batch_to_transition(batch)
# Observation should be None when no observation.* keys
assert transition[TransitionKey.OBSERVATION] is None
# Check other fields
assert torch.allclose(transition[TransitionKey.ACTION], torch.tensor([1.0, 2.0]))
assert transition[TransitionKey.REWARD] == 2.0
assert not transition[TransitionKey.DONE]
assert transition[TransitionKey.TRUNCATED]
assert transition[TransitionKey.INFO] == {"test": "no_obs"}
# Round trip should work
reconstructed_batch = transition_to_batch(transition)
assert torch.allclose(reconstructed_batch[ACTION], torch.tensor([1.0, 2.0]))
assert reconstructed_batch[REWARD] == 2.0
assert not reconstructed_batch[DONE]
assert reconstructed_batch[TRUNCATED]
assert reconstructed_batch["info"] == {"test": "no_obs"}
def test_minimal_batch():
"""Test with minimal batch containing only observation.* and action."""
batch = {OBS_STATE: "minimal_state", ACTION: torch.tensor([0.5])}
transition = batch_to_transition(batch)
# Check observation
assert transition[TransitionKey.OBSERVATION] == {OBS_STATE: "minimal_state"}
assert torch.allclose(transition[TransitionKey.ACTION], torch.tensor([0.5]))
# Check defaults
assert transition[TransitionKey.REWARD] == 0.0
assert not transition[TransitionKey.DONE]
assert not transition[TransitionKey.TRUNCATED]
assert transition[TransitionKey.INFO] == {}
assert transition[TransitionKey.COMPLEMENTARY_DATA] == {}
# Round trip
reconstructed_batch = transition_to_batch(transition)
assert reconstructed_batch[OBS_STATE] == "minimal_state"
assert torch.allclose(reconstructed_batch[ACTION], torch.tensor([0.5]))
assert reconstructed_batch[REWARD] == 0.0
assert not reconstructed_batch[DONE]
assert not reconstructed_batch[TRUNCATED]
assert reconstructed_batch["info"] == {}
def test_empty_batch():
"""Test behavior with empty batch."""
batch = {}
transition = batch_to_transition(batch)
# All fields should have defaults
assert transition[TransitionKey.OBSERVATION] is None
assert transition[TransitionKey.ACTION] is None
assert transition[TransitionKey.REWARD] == 0.0
assert not transition[TransitionKey.DONE]
assert not transition[TransitionKey.TRUNCATED]
assert transition[TransitionKey.INFO] == {}
assert transition[TransitionKey.COMPLEMENTARY_DATA] == {}
# Round trip
reconstructed_batch = transition_to_batch(transition)
assert reconstructed_batch[ACTION] is None
assert reconstructed_batch[REWARD] == 0.0
assert not reconstructed_batch[DONE]
assert not reconstructed_batch[TRUNCATED]
assert reconstructed_batch["info"] == {}
def test_complex_nested_observation():
"""Test with complex nested observation data."""
batch = {
f"{OBS_IMAGE}.top": {"image": torch.randn(1, 3, 128, 128), "timestamp": 1234567890},
f"{OBS_IMAGE}.left": {"image": torch.randn(1, 3, 128, 128), "timestamp": 1234567891},
OBS_STATE: torch.randn(7),
ACTION: torch.randn(8),
REWARD: 3.14,
DONE: False,
TRUNCATED: True,
"info": {"episode_length": 200, "success": True},
}
transition = batch_to_transition(batch)
reconstructed_batch = transition_to_batch(transition)
# Check that all observation keys are preserved
original_obs_keys = {k for k in batch if k.startswith(OBS_PREFIX)}
reconstructed_obs_keys = {k for k in reconstructed_batch if k.startswith(OBS_PREFIX)}
assert original_obs_keys == reconstructed_obs_keys
# Check tensor values
assert torch.allclose(batch[OBS_STATE], reconstructed_batch[OBS_STATE])
# Check nested dict with tensors
assert torch.allclose(
batch[f"{OBS_IMAGE}.top"]["image"], reconstructed_batch[f"{OBS_IMAGE}.top"]["image"]
)
assert torch.allclose(
batch[f"{OBS_IMAGE}.left"]["image"], reconstructed_batch[f"{OBS_IMAGE}.left"]["image"]
)
# Check action tensor
assert torch.allclose(batch[ACTION], reconstructed_batch[ACTION])
# Check other fields
assert batch[REWARD] == reconstructed_batch[REWARD]
assert batch[DONE] == reconstructed_batch[DONE]
assert batch[TRUNCATED] == reconstructed_batch[TRUNCATED]
assert batch["info"] == reconstructed_batch["info"]
def test_custom_converter():
"""Test that custom converters can still be used."""
def to_tr(batch):
# Custom converter that modifies the reward
tr = batch_to_transition(batch)
# Double the reward
reward = tr.get(TransitionKey.REWARD, 0.0)
new_tr = tr.copy()
new_tr[TransitionKey.REWARD] = reward * 2 if reward is not None else 0.0
return new_tr
def to_batch(tr):
batch = transition_to_batch(tr)
return batch
processor = DataProcessorPipeline(steps=[], to_transition=to_tr, to_output=to_batch)
batch = {
OBS_STATE: torch.randn(1, 4),
ACTION: torch.randn(1, 2),
REWARD: 1.0,
DONE: False,
}
result = processor(batch)
# Check the reward was doubled by our custom converter
assert result[REWARD] == 2.0
assert torch.allclose(result[OBS_STATE], batch[OBS_STATE])
assert torch.allclose(result[ACTION], batch[ACTION])
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_batch_conversion.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_normalize_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock
import numpy as np
import pytest
import torch
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.processor import (
DataProcessorPipeline,
IdentityProcessorStep,
NormalizerProcessorStep,
TransitionKey,
UnnormalizerProcessorStep,
hotswap_stats,
)
from lerobot.processor.converters import create_transition, identity_transition, to_tensor
from lerobot.utils.constants import ACTION, OBS_IMAGE, OBS_STATE, OBS_STR
from lerobot.utils.utils import auto_select_torch_device
def test_numpy_conversion():
stats = {
OBS_IMAGE: {
"mean": np.array([0.5, 0.5, 0.5]),
"std": np.array([0.2, 0.2, 0.2]),
}
}
tensor_stats = to_tensor(stats)
assert isinstance(tensor_stats[OBS_IMAGE]["mean"], torch.Tensor)
assert isinstance(tensor_stats[OBS_IMAGE]["std"], torch.Tensor)
assert torch.allclose(tensor_stats[OBS_IMAGE]["mean"], torch.tensor([0.5, 0.5, 0.5]))
assert torch.allclose(tensor_stats[OBS_IMAGE]["std"], torch.tensor([0.2, 0.2, 0.2]))
def test_tensor_conversion():
stats = {
ACTION: {
"mean": torch.tensor([0.0, 0.0]),
"std": torch.tensor([1.0, 1.0]),
}
}
tensor_stats = to_tensor(stats)
assert tensor_stats[ACTION]["mean"].dtype == torch.float32
assert tensor_stats[ACTION]["std"].dtype == torch.float32
def test_scalar_conversion():
stats = {
"reward": {
"mean": 0.5,
"std": 0.1,
}
}
tensor_stats = to_tensor(stats)
assert torch.allclose(tensor_stats["reward"]["mean"], torch.tensor(0.5))
assert torch.allclose(tensor_stats["reward"]["std"], torch.tensor(0.1))
def test_list_conversion():
stats = {
OBS_STATE: {
"min": [0.0, -1.0, -2.0],
"max": [1.0, 1.0, 2.0],
}
}
tensor_stats = to_tensor(stats)
assert torch.allclose(tensor_stats[OBS_STATE]["min"], torch.tensor([0.0, -1.0, -2.0]))
assert torch.allclose(tensor_stats[OBS_STATE]["max"], torch.tensor([1.0, 1.0, 2.0]))
def test_unsupported_type():
stats = {
"bad_key": {
"mean": "string_value",
}
}
with pytest.raises(TypeError, match="Unsupported type"):
to_tensor(stats)
# Helper functions to create feature maps and norm maps
def _create_observation_features():
return {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96)),
OBS_STATE: PolicyFeature(FeatureType.STATE, (2,)),
}
def _create_observation_norm_map():
return {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.STATE: NormalizationMode.MIN_MAX,
}
# Fixtures for observation normalisation tests using NormalizerProcessorStep
@pytest.fixture
def observation_stats():
return {
OBS_IMAGE: {
"mean": np.array([0.5, 0.5, 0.5]),
"std": np.array([0.2, 0.2, 0.2]),
},
OBS_STATE: {
"min": np.array([0.0, -1.0]),
"max": np.array([1.0, 1.0]),
},
}
@pytest.fixture
def observation_normalizer(observation_stats):
"""Return a NormalizerProcessorStep that only has observation stats (no action)."""
features = _create_observation_features()
norm_map = _create_observation_norm_map()
return NormalizerProcessorStep(features=features, norm_map=norm_map, stats=observation_stats)
def test_mean_std_normalization(observation_normalizer):
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([0.5, 0.0]),
}
transition = create_transition(observation=observation)
normalized_transition = observation_normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
# Check mean/std normalization
expected_image = (torch.tensor([0.7, 0.5, 0.3]) - 0.5) / 0.2
assert torch.allclose(normalized_obs[OBS_IMAGE], expected_image)
def test_min_max_normalization(observation_normalizer):
observation = {
OBS_STATE: torch.tensor([0.5, 0.0]),
}
transition = create_transition(observation=observation)
normalized_transition = observation_normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
# Check min/max normalization to [-1, 1]
# For state[0]: 2 * (0.5 - 0.0) / (1.0 - 0.0) - 1 = 0.0
# For state[1]: 2 * (0.0 - (-1.0)) / (1.0 - (-1.0)) - 1 = 0.0
expected_state = torch.tensor([0.0, 0.0])
assert torch.allclose(normalized_obs[OBS_STATE], expected_state, atol=1e-6)
def test_quantile_normalization():
"""Test QUANTILES mode using 1st-99th percentiles."""
features = {
"observation.state": PolicyFeature(FeatureType.STATE, (2,)),
}
norm_map = {
FeatureType.STATE: NormalizationMode.QUANTILES,
}
stats = {
"observation.state": {
"q01": np.array([0.1, -0.8]), # 1st percentile
"q99": np.array([0.9, 0.8]), # 99th percentile
},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {
"observation.state": torch.tensor([0.5, 0.0]),
}
transition = create_transition(observation=observation)
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
# Check quantile normalization to [-1, 1]
# For state[0]: 2 * (0.5 - 0.1) / (0.9 - 0.1) - 1 = 2 * 0.4 / 0.8 - 1 = 0.0
# For state[1]: 2 * (0.0 - (-0.8)) / (0.8 - (-0.8)) - 1 = 2 * 0.8 / 1.6 - 1 = 0.0
expected_state = torch.tensor([0.0, 0.0])
assert torch.allclose(normalized_obs["observation.state"], expected_state, atol=1e-6)
def test_quantile10_normalization():
"""Test QUANTILE10 mode using 10th-90th percentiles."""
features = {
"observation.state": PolicyFeature(FeatureType.STATE, (2,)),
}
norm_map = {
FeatureType.STATE: NormalizationMode.QUANTILE10,
}
stats = {
"observation.state": {
"q10": np.array([0.2, -0.6]), # 10th percentile
"q90": np.array([0.8, 0.6]), # 90th percentile
},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {
"observation.state": torch.tensor([0.5, 0.0]),
}
transition = create_transition(observation=observation)
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
# Check quantile normalization to [-1, 1]
# For state[0]: 2 * (0.5 - 0.2) / (0.8 - 0.2) - 1 = 2 * 0.3 / 0.6 - 1 = 0.0
# For state[1]: 2 * (0.0 - (-0.6)) / (0.6 - (-0.6)) - 1 = 2 * 0.6 / 1.2 - 1 = 0.0
expected_state = torch.tensor([0.0, 0.0])
assert torch.allclose(normalized_obs["observation.state"], expected_state, atol=1e-6)
def test_quantile_unnormalization():
"""Test that quantile normalization can be reversed properly."""
features = {
"action": PolicyFeature(FeatureType.ACTION, (2,)),
}
norm_map = {
FeatureType.ACTION: NormalizationMode.QUANTILES,
}
stats = {
"action": {
"q01": np.array([0.1, -0.8]),
"q99": np.array([0.9, 0.8]),
},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
unnormalizer = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
# Test round-trip normalization
original_action = torch.tensor([0.5, 0.0])
transition = create_transition(action=original_action)
# Normalize then unnormalize
normalized = normalizer(transition)
unnormalized = unnormalizer(normalized)
# Should recover original values
recovered_action = unnormalized[TransitionKey.ACTION]
assert torch.allclose(recovered_action, original_action, atol=1e-6)
def test_quantile_division_by_zero():
"""Test quantile normalization handles edge case where q01 == q99."""
features = {
"observation.state": PolicyFeature(FeatureType.STATE, (1,)),
}
norm_map = {
FeatureType.STATE: NormalizationMode.QUANTILES,
}
stats = {
"observation.state": {
"q01": np.array([0.5]), # Same value
"q99": np.array([0.5]), # Same value -> division by zero case
},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {
"observation.state": torch.tensor([0.5]),
}
transition = create_transition(observation=observation)
# Should not crash and should handle gracefully
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
# When quantiles are identical, should normalize to 0 (due to epsilon handling)
assert torch.isfinite(normalized_obs["observation.state"]).all()
def test_quantile_partial_stats():
"""Test that quantile normalization handles missing quantile stats by raising."""
features = {
"observation.state": PolicyFeature(FeatureType.STATE, (2,)),
}
norm_map = {
FeatureType.STATE: NormalizationMode.QUANTILES,
}
# Missing q99 - should pass through unchanged
stats_partial = {
"observation.state": {
"q01": np.array([0.1, -0.8]), # Only q01, missing q99
},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats_partial)
observation = {
"observation.state": torch.tensor([0.5, 0.0]),
}
transition = create_transition(observation=observation)
with pytest.raises(ValueError, match="QUANTILES normalization mode requires q01 and q99 stats"):
_ = normalizer(transition)
def test_quantile_mixed_with_other_modes():
"""Test quantile normalization mixed with other normalization modes."""
features = {
"observation.image": PolicyFeature(FeatureType.VISUAL, (3,)),
"observation.state": PolicyFeature(FeatureType.STATE, (2,)),
"action": PolicyFeature(FeatureType.ACTION, (2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD, # Standard normalization
FeatureType.STATE: NormalizationMode.QUANTILES, # Quantile normalization
FeatureType.ACTION: NormalizationMode.QUANTILE10, # Different quantile mode
}
stats = {
"observation.image": {"mean": [0.5, 0.5, 0.5], "std": [0.2, 0.2, 0.2]},
"observation.state": {"q01": [0.1, -0.8], "q99": [0.9, 0.8]},
"action": {"q10": [0.2, -0.6], "q90": [0.8, 0.6]},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {
"observation.image": torch.tensor([0.7, 0.5, 0.3]),
"observation.state": torch.tensor([0.5, 0.0]), # Should use QUANTILES
}
action = torch.tensor([0.5, 0.0]) # Should use QUANTILE10
transition = create_transition(observation=observation, action=action)
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
normalized_action = normalized_transition[TransitionKey.ACTION]
# Image should be mean/std normalized: (0.7 - 0.5) / 0.2 = 1.0, etc.
expected_image = (torch.tensor([0.7, 0.5, 0.3]) - 0.5) / 0.2
assert torch.allclose(normalized_obs["observation.image"], expected_image)
# State should be quantile normalized: 2 * (0.5 - 0.1) / (0.9 - 0.1) - 1 = 0.0, etc.
expected_state = torch.tensor([0.0, 0.0])
assert torch.allclose(normalized_obs["observation.state"], expected_state, atol=1e-6)
# Action should be quantile10 normalized: 2 * (0.5 - 0.2) / (0.8 - 0.2) - 1 = 0.0, etc.
expected_action = torch.tensor([0.0, 0.0])
assert torch.allclose(normalized_action, expected_action, atol=1e-6)
def test_quantile_with_missing_stats():
"""Test that quantile normalization handles completely missing stats gracefully."""
features = {
"observation.state": PolicyFeature(FeatureType.STATE, (2,)),
}
norm_map = {
FeatureType.STATE: NormalizationMode.QUANTILES,
}
stats = {} # No stats provided
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {
"observation.state": torch.tensor([0.5, 0.0]),
}
transition = create_transition(observation=observation)
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
# Should pass through unchanged when no stats available
assert torch.allclose(normalized_obs["observation.state"], observation["observation.state"])
def test_selective_normalization(observation_stats):
features = _create_observation_features()
norm_map = _create_observation_norm_map()
normalizer = NormalizerProcessorStep(
features=features,
norm_map=norm_map,
stats=observation_stats,
normalize_observation_keys={OBS_IMAGE},
)
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([0.5, 0.0]),
}
transition = create_transition(observation=observation)
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
# Only image should be normalized
assert torch.allclose(normalized_obs[OBS_IMAGE], (torch.tensor([0.7, 0.5, 0.3]) - 0.5) / 0.2)
# State should remain unchanged
assert torch.allclose(normalized_obs[OBS_STATE], observation[OBS_STATE])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_device_compatibility(observation_stats):
features = _create_observation_features()
norm_map = _create_observation_norm_map()
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=observation_stats)
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]).cuda(),
}
transition = create_transition(observation=observation)
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
assert normalized_obs[OBS_IMAGE].device.type == "cuda"
def test_from_lerobot_dataset():
# Mock dataset
mock_dataset = Mock()
mock_dataset.meta.stats = {
OBS_IMAGE: {"mean": [0.5], "std": [0.2]},
ACTION: {"mean": [0.0], "std": [1.0]},
}
features = {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96)),
ACTION: PolicyFeature(FeatureType.ACTION, (1,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
normalizer = NormalizerProcessorStep.from_lerobot_dataset(mock_dataset, features, norm_map)
# Both observation and action statistics should be present in tensor stats
assert OBS_IMAGE in normalizer._tensor_stats
assert ACTION in normalizer._tensor_stats
def test_state_dict_save_load(observation_normalizer):
# Save state
state_dict = observation_normalizer.state_dict()
print("State dict:", state_dict)
# Create new normalizer and load state
features = _create_observation_features()
norm_map = _create_observation_norm_map()
new_normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats={})
new_normalizer.load_state_dict(state_dict)
# Test that it works the same
observation = {OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3])}
transition = create_transition(observation=observation)
result1 = observation_normalizer(transition)[TransitionKey.OBSERVATION]
result2 = new_normalizer(transition)[TransitionKey.OBSERVATION]
assert torch.allclose(result1[OBS_IMAGE], result2[OBS_IMAGE])
# Fixtures for ActionUnnormalizer tests
@pytest.fixture
def action_stats_mean_std():
return {
"mean": np.array([0.0, 0.0, 0.0]),
"std": np.array([1.0, 2.0, 0.5]),
}
@pytest.fixture
def action_stats_min_max():
return {
"min": np.array([-1.0, -2.0, 0.0]),
"max": np.array([1.0, 2.0, 1.0]),
}
def _create_action_features():
return {
ACTION: PolicyFeature(FeatureType.ACTION, (3,)),
}
def _create_action_norm_map_mean_std():
return {
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
def _create_action_norm_map_min_max():
return {
FeatureType.ACTION: NormalizationMode.MIN_MAX,
}
def test_mean_std_unnormalization(action_stats_mean_std):
features = _create_action_features()
norm_map = _create_action_norm_map_mean_std()
unnormalizer = UnnormalizerProcessorStep(
features=features, norm_map=norm_map, stats={ACTION: action_stats_mean_std}
)
normalized_action = torch.tensor([1.0, -0.5, 2.0])
transition = create_transition(action=normalized_action)
unnormalized_transition = unnormalizer(transition)
unnormalized_action = unnormalized_transition[TransitionKey.ACTION]
# action * std + mean
expected = torch.tensor([1.0 * 1.0 + 0.0, -0.5 * 2.0 + 0.0, 2.0 * 0.5 + 0.0])
assert torch.allclose(unnormalized_action, expected)
def test_min_max_unnormalization(action_stats_min_max):
features = _create_action_features()
norm_map = _create_action_norm_map_min_max()
unnormalizer = UnnormalizerProcessorStep(
features=features, norm_map=norm_map, stats={ACTION: action_stats_min_max}
)
# Actions in [-1, 1]
normalized_action = torch.tensor([0.0, -1.0, 1.0])
transition = create_transition(action=normalized_action)
unnormalized_transition = unnormalizer(transition)
unnormalized_action = unnormalized_transition[TransitionKey.ACTION]
# Map from [-1, 1] to [min, max]
# (action + 1) / 2 * (max - min) + min
expected = torch.tensor(
[
(0.0 + 1) / 2 * (1.0 - (-1.0)) + (-1.0), # 0.0
(-1.0 + 1) / 2 * (2.0 - (-2.0)) + (-2.0), # -2.0
(1.0 + 1) / 2 * (1.0 - 0.0) + 0.0, # 1.0
]
)
assert torch.allclose(unnormalized_action, expected)
def test_tensor_action_input(action_stats_mean_std):
features = _create_action_features()
norm_map = _create_action_norm_map_mean_std()
unnormalizer = UnnormalizerProcessorStep(
features=features, norm_map=norm_map, stats={ACTION: action_stats_mean_std}
)
normalized_action = torch.tensor([1.0, -0.5, 2.0], dtype=torch.float32)
transition = create_transition(action=normalized_action)
unnormalized_transition = unnormalizer(transition)
unnormalized_action = unnormalized_transition[TransitionKey.ACTION]
assert isinstance(unnormalized_action, torch.Tensor)
expected = torch.tensor([1.0, -1.0, 1.0])
assert torch.allclose(unnormalized_action, expected)
def test_none_action(action_stats_mean_std):
features = _create_action_features()
norm_map = _create_action_norm_map_mean_std()
unnormalizer = UnnormalizerProcessorStep(
features=features, norm_map=norm_map, stats={ACTION: action_stats_mean_std}
)
transition = create_transition()
result = unnormalizer(transition)
# Should return transition unchanged
assert result == transition
def test_action_from_lerobot_dataset():
mock_dataset = Mock()
mock_dataset.meta.stats = {ACTION: {"mean": [0.0], "std": [1.0]}}
features = {ACTION: PolicyFeature(FeatureType.ACTION, (1,))}
norm_map = {FeatureType.ACTION: NormalizationMode.MEAN_STD}
unnormalizer = UnnormalizerProcessorStep.from_lerobot_dataset(mock_dataset, features, norm_map)
assert "mean" in unnormalizer._tensor_stats[ACTION]
# Fixtures for NormalizerProcessorStep tests
@pytest.fixture
def full_stats():
return {
OBS_IMAGE: {
"mean": np.array([0.5, 0.5, 0.5]),
"std": np.array([0.2, 0.2, 0.2]),
},
OBS_STATE: {
"min": np.array([0.0, -1.0]),
"max": np.array([1.0, 1.0]),
},
ACTION: {
"mean": np.array([0.0, 0.0]),
"std": np.array([1.0, 2.0]),
},
}
def _create_full_features():
return {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96)),
OBS_STATE: PolicyFeature(FeatureType.STATE, (2,)),
ACTION: PolicyFeature(FeatureType.ACTION, (2,)),
}
def _create_full_norm_map():
return {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.STATE: NormalizationMode.MIN_MAX,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
@pytest.fixture
def normalizer_processor(full_stats):
features = _create_full_features()
norm_map = _create_full_norm_map()
return NormalizerProcessorStep(features=features, norm_map=norm_map, stats=full_stats)
def test_combined_normalization(normalizer_processor):
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([0.5, 0.0]),
}
action = torch.tensor([1.0, -0.5])
transition = create_transition(
observation=observation,
action=action,
reward=1.0,
done=False,
truncated=False,
info={},
complementary_data={},
)
processed_transition = normalizer_processor(transition)
# Check normalized observations
processed_obs = processed_transition[TransitionKey.OBSERVATION]
expected_image = (torch.tensor([0.7, 0.5, 0.3]) - 0.5) / 0.2
assert torch.allclose(processed_obs[OBS_IMAGE], expected_image)
# Check normalized action
processed_action = processed_transition[TransitionKey.ACTION]
expected_action = torch.tensor([(1.0 - 0.0) / 1.0, (-0.5 - 0.0) / 2.0])
assert torch.allclose(processed_action, expected_action)
# Check other fields remain unchanged
assert processed_transition[TransitionKey.REWARD] == 1.0
assert not processed_transition[TransitionKey.DONE]
def test_processor_from_lerobot_dataset(full_stats):
# Mock dataset
mock_dataset = Mock()
mock_dataset.meta.stats = full_stats
features = _create_full_features()
norm_map = _create_full_norm_map()
processor = NormalizerProcessorStep.from_lerobot_dataset(
mock_dataset, features, norm_map, normalize_observation_keys={OBS_IMAGE}
)
assert processor.normalize_observation_keys == {OBS_IMAGE}
assert OBS_IMAGE in processor._tensor_stats
assert ACTION in processor._tensor_stats
def test_get_config(full_stats):
features = _create_full_features()
norm_map = _create_full_norm_map()
processor = NormalizerProcessorStep(
features=features,
norm_map=norm_map,
stats=full_stats,
normalize_observation_keys={OBS_IMAGE},
eps=1e-6,
)
config = processor.get_config()
expected_config = {
"normalize_observation_keys": [OBS_IMAGE],
"eps": 1e-6,
"features": {
OBS_IMAGE: {"type": "VISUAL", "shape": (3, 96, 96)},
OBS_STATE: {"type": "STATE", "shape": (2,)},
ACTION: {"type": "ACTION", "shape": (2,)},
},
"norm_map": {
"VISUAL": "MEAN_STD",
"STATE": "MIN_MAX",
"ACTION": "MEAN_STD",
},
}
assert config == expected_config
def test_integration_with_robot_processor(normalizer_processor):
"""Test integration with RobotProcessor pipeline"""
robot_processor = DataProcessorPipeline(
[normalizer_processor], to_transition=identity_transition, to_output=identity_transition
)
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([0.5, 0.0]),
}
action = torch.tensor([1.0, -0.5])
transition = create_transition(
observation=observation,
action=action,
reward=1.0,
done=False,
truncated=False,
info={},
complementary_data={},
)
processed_transition = robot_processor(transition)
# Verify the processing worked
assert isinstance(processed_transition[TransitionKey.OBSERVATION], dict)
assert isinstance(processed_transition[TransitionKey.ACTION], torch.Tensor)
# Edge case tests
def test_empty_observation():
stats = {OBS_IMAGE: {"mean": [0.5], "std": [0.2]}}
features = {OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96))}
norm_map = {FeatureType.VISUAL: NormalizationMode.MEAN_STD}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
transition = create_transition()
result = normalizer(transition)
assert result == transition
def test_empty_stats():
features = {OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96))}
norm_map = {FeatureType.VISUAL: NormalizationMode.MEAN_STD}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats={})
observation = {OBS_IMAGE: torch.tensor([0.5])}
transition = create_transition(observation=observation)
result = normalizer(transition)
# Should return observation unchanged since no stats are available
assert torch.allclose(result[TransitionKey.OBSERVATION][OBS_IMAGE], observation[OBS_IMAGE])
def test_partial_stats():
"""If statistics are incomplete, we should raise."""
stats = {OBS_IMAGE: {"mean": [0.5]}} # Missing std / (min,max)
features = {OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96))}
norm_map = {FeatureType.VISUAL: NormalizationMode.MEAN_STD}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {OBS_IMAGE: torch.tensor([0.7])}
transition = create_transition(observation=observation)
with pytest.raises(ValueError, match="MEAN_STD normalization mode requires mean and std stats"):
_ = normalizer(transition)[TransitionKey.OBSERVATION]
def test_missing_action_stats_no_error():
mock_dataset = Mock()
mock_dataset.meta.stats = {OBS_IMAGE: {"mean": [0.5], "std": [0.2]}}
features = {OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96))}
norm_map = {FeatureType.VISUAL: NormalizationMode.MEAN_STD}
processor = UnnormalizerProcessorStep.from_lerobot_dataset(mock_dataset, features, norm_map)
# The tensor stats should not contain the 'action' key
assert ACTION not in processor._tensor_stats
def test_serialization_roundtrip(full_stats):
"""Test that features and norm_map can be serialized and deserialized correctly."""
features = _create_full_features()
norm_map = _create_full_norm_map()
original_processor = NormalizerProcessorStep(
features=features,
norm_map=norm_map,
stats=full_stats,
normalize_observation_keys={OBS_IMAGE},
eps=1e-6,
)
# Get config (serialization)
config = original_processor.get_config()
# Create a new processor from the config (deserialization)
new_processor = NormalizerProcessorStep(
features=config["features"],
norm_map=config["norm_map"],
stats=full_stats,
normalize_observation_keys=set(config["normalize_observation_keys"]),
eps=config["eps"],
)
# Test that both processors work the same way
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([0.5, 0.0]),
}
action = torch.tensor([1.0, -0.5])
transition = create_transition(
observation=observation,
action=action,
reward=1.0,
done=False,
truncated=False,
info={},
complementary_data={},
)
result1 = original_processor(transition)
result2 = new_processor(transition)
# Compare results
assert torch.allclose(
result1[TransitionKey.OBSERVATION][OBS_IMAGE],
result2[TransitionKey.OBSERVATION][OBS_IMAGE],
)
assert torch.allclose(result1[TransitionKey.ACTION], result2[TransitionKey.ACTION])
# Verify features and norm_map are correctly reconstructed
assert (
new_processor.transform_features(features).keys()
== original_processor.transform_features(features).keys()
)
for key in new_processor.transform_features(features):
assert (
new_processor.transform_features(features)[key].type
== original_processor.transform_features(features)[key].type
)
assert (
new_processor.transform_features(features)[key].shape
== original_processor.transform_features(features)[key].shape
)
assert new_processor.norm_map == original_processor.norm_map
# Identity normalization tests
def test_identity_normalization_observations():
"""Test that IDENTITY mode skips normalization for observations."""
features = {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96)),
OBS_STATE: PolicyFeature(FeatureType.STATE, (2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.IDENTITY, # IDENTITY mode
FeatureType.STATE: NormalizationMode.MEAN_STD, # Normal mode for comparison
}
stats = {
OBS_IMAGE: {"mean": [0.5, 0.5, 0.5], "std": [0.2, 0.2, 0.2]},
OBS_STATE: {"mean": [0.0, 0.0], "std": [1.0, 1.0]},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([1.0, -0.5]),
}
transition = create_transition(observation=observation)
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
# Image should remain unchanged (IDENTITY)
assert torch.allclose(normalized_obs[OBS_IMAGE], observation[OBS_IMAGE])
# State should be normalized (MEAN_STD)
expected_state = (torch.tensor([1.0, -0.5]) - torch.tensor([0.0, 0.0])) / torch.tensor([1.0, 1.0])
assert torch.allclose(normalized_obs[OBS_STATE], expected_state)
def test_identity_normalization_actions():
"""Test that IDENTITY mode skips normalization for actions."""
features = {ACTION: PolicyFeature(FeatureType.ACTION, (2,))}
norm_map = {FeatureType.ACTION: NormalizationMode.IDENTITY}
stats = {ACTION: {"mean": [0.0, 0.0], "std": [1.0, 2.0]}}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
action = torch.tensor([1.0, -0.5])
transition = create_transition(action=action)
normalized_transition = normalizer(transition)
# Action should remain unchanged
assert torch.allclose(normalized_transition[TransitionKey.ACTION], action)
def test_identity_unnormalization_observations():
"""Test that IDENTITY mode skips unnormalization for observations."""
features = {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96)),
OBS_STATE: PolicyFeature(FeatureType.STATE, (2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.IDENTITY, # IDENTITY mode
FeatureType.STATE: NormalizationMode.MIN_MAX, # Normal mode for comparison
}
stats = {
OBS_IMAGE: {"mean": [0.5, 0.5, 0.5], "std": [0.2, 0.2, 0.2]},
OBS_STATE: {"min": [-1.0, -1.0], "max": [1.0, 1.0]},
}
unnormalizer = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([0.0, -1.0]), # Normalized values in [-1, 1]
}
transition = create_transition(observation=observation)
unnormalized_transition = unnormalizer(transition)
unnormalized_obs = unnormalized_transition[TransitionKey.OBSERVATION]
# Image should remain unchanged (IDENTITY)
assert torch.allclose(unnormalized_obs[OBS_IMAGE], observation[OBS_IMAGE])
# State should be unnormalized (MIN_MAX)
# (0.0 + 1) / 2 * (1.0 - (-1.0)) + (-1.0) = 0.0
# (-1.0 + 1) / 2 * (1.0 - (-1.0)) + (-1.0) = -1.0
expected_state = torch.tensor([0.0, -1.0])
assert torch.allclose(unnormalized_obs[OBS_STATE], expected_state)
def test_identity_unnormalization_actions():
"""Test that IDENTITY mode skips unnormalization for actions."""
features = {ACTION: PolicyFeature(FeatureType.ACTION, (2,))}
norm_map = {FeatureType.ACTION: NormalizationMode.IDENTITY}
stats = {ACTION: {"min": [-1.0, -2.0], "max": [1.0, 2.0]}}
unnormalizer = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
action = torch.tensor([0.5, -0.8]) # Normalized values
transition = create_transition(action=action)
unnormalized_transition = unnormalizer(transition)
# Action should remain unchanged
assert torch.allclose(unnormalized_transition[TransitionKey.ACTION], action)
def test_identity_with_missing_stats():
"""Test that IDENTITY mode works even when stats are missing."""
features = {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96)),
ACTION: PolicyFeature(FeatureType.ACTION, (2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.IDENTITY,
FeatureType.ACTION: NormalizationMode.IDENTITY,
}
stats = {} # No stats provided
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
unnormalizer = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3])}
action = torch.tensor([1.0, -0.5])
transition = create_transition(observation=observation, action=action)
# Both should work without errors and return unchanged data
normalized_transition = normalizer(transition)
unnormalized_transition = unnormalizer(transition)
assert torch.allclose(
normalized_transition[TransitionKey.OBSERVATION][OBS_IMAGE],
observation[OBS_IMAGE],
)
assert torch.allclose(normalized_transition[TransitionKey.ACTION], action)
assert torch.allclose(
unnormalized_transition[TransitionKey.OBSERVATION][OBS_IMAGE],
observation[OBS_IMAGE],
)
assert torch.allclose(unnormalized_transition[TransitionKey.ACTION], action)
def test_identity_mixed_with_other_modes():
"""Test IDENTITY mode mixed with other normalization modes."""
features = {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3,)),
OBS_STATE: PolicyFeature(FeatureType.STATE, (2,)),
ACTION: PolicyFeature(FeatureType.ACTION, (2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.IDENTITY,
FeatureType.STATE: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MIN_MAX,
}
stats = {
OBS_IMAGE: {"mean": [0.5, 0.5, 0.5], "std": [0.2, 0.2, 0.2]}, # Will be ignored
OBS_STATE: {"mean": [0.0, 0.0], "std": [1.0, 1.0]},
ACTION: {"min": [-1.0, -1.0], "max": [1.0, 1.0]},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([1.0, -0.5]),
}
action = torch.tensor([0.5, 0.0])
transition = create_transition(observation=observation, action=action)
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
normalized_action = normalized_transition[TransitionKey.ACTION]
# Image should remain unchanged (IDENTITY)
assert torch.allclose(normalized_obs[OBS_IMAGE], observation[OBS_IMAGE])
# State should be normalized (MEAN_STD)
expected_state = torch.tensor([1.0, -0.5]) # (x - 0) / 1 = x
assert torch.allclose(normalized_obs[OBS_STATE], expected_state)
# Action should be normalized (MIN_MAX) to [-1, 1]
# 2 * (0.5 - (-1)) / (1 - (-1)) - 1 = 2 * 1.5 / 2 - 1 = 0.5
# 2 * (0.0 - (-1)) / (1 - (-1)) - 1 = 2 * 1.0 / 2 - 1 = 0.0
expected_action = torch.tensor([0.5, 0.0])
assert torch.allclose(normalized_action, expected_action)
def test_identity_defaults_when_not_in_norm_map():
"""Test that IDENTITY is used as default when feature type not in norm_map."""
features = {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3,)),
OBS_STATE: PolicyFeature(FeatureType.STATE, (2,)),
}
norm_map = {
FeatureType.STATE: NormalizationMode.MEAN_STD,
# VISUAL not specified, should default to IDENTITY
}
stats = {
OBS_IMAGE: {"mean": [0.5, 0.5, 0.5], "std": [0.2, 0.2, 0.2]},
OBS_STATE: {"mean": [0.0, 0.0], "std": [1.0, 1.0]},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([1.0, -0.5]),
}
transition = create_transition(observation=observation)
normalized_transition = normalizer(transition)
normalized_obs = normalized_transition[TransitionKey.OBSERVATION]
# Image should remain unchanged (defaults to IDENTITY)
assert torch.allclose(normalized_obs[OBS_IMAGE], observation[OBS_IMAGE])
# State should be normalized (explicitly MEAN_STD)
expected_state = torch.tensor([1.0, -0.5])
assert torch.allclose(normalized_obs[OBS_STATE], expected_state)
def test_identity_roundtrip():
"""Test that IDENTITY normalization and unnormalization are true inverses."""
features = {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3,)),
ACTION: PolicyFeature(FeatureType.ACTION, (2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.IDENTITY,
FeatureType.ACTION: NormalizationMode.IDENTITY,
}
stats = {
OBS_IMAGE: {"mean": [0.5, 0.5, 0.5], "std": [0.2, 0.2, 0.2]},
ACTION: {"min": [-1.0, -1.0], "max": [1.0, 1.0]},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
unnormalizer = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
original_observation = {OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3])}
original_action = torch.tensor([0.5, -0.2])
original_transition = create_transition(observation=original_observation, action=original_action)
# Normalize then unnormalize
normalized = normalizer(original_transition)
roundtrip = unnormalizer(normalized)
# Should be identical to original
assert torch.allclose(roundtrip[TransitionKey.OBSERVATION][OBS_IMAGE], original_observation[OBS_IMAGE])
assert torch.allclose(roundtrip[TransitionKey.ACTION], original_action)
def test_identity_config_serialization():
"""Test that IDENTITY mode is properly saved and loaded in config."""
features = {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3,)),
ACTION: PolicyFeature(FeatureType.ACTION, (2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.IDENTITY,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
stats = {
OBS_IMAGE: {"mean": [0.5], "std": [0.2]},
ACTION: {"mean": [0.0, 0.0], "std": [1.0, 1.0]},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
# Get config
config = normalizer.get_config()
# Check that IDENTITY is properly serialized
assert config["norm_map"]["VISUAL"] == "IDENTITY"
assert config["norm_map"]["ACTION"] == "MEAN_STD"
# Create new processor from config (simulating load)
new_normalizer = NormalizerProcessorStep(
features=config["features"],
norm_map=config["norm_map"],
stats=stats,
eps=config["eps"],
)
# Test that both work the same way
observation = {OBS_IMAGE: torch.tensor([0.7])}
action = torch.tensor([1.0, -0.5])
transition = create_transition(observation=observation, action=action)
result1 = normalizer(transition)
result2 = new_normalizer(transition)
# Results should be identical
assert torch.allclose(
result1[TransitionKey.OBSERVATION][OBS_IMAGE],
result2[TransitionKey.OBSERVATION][OBS_IMAGE],
)
assert torch.allclose(result1[TransitionKey.ACTION], result2[TransitionKey.ACTION])
# def test_unsupported_normalization_mode_error():
# """Test that unsupported normalization modes raise appropriate errors."""
# features = {OBS_STATE: PolicyFeature(FeatureType.STATE, (2,))}
# # Create an invalid norm_map (this would never happen in practice, but tests error handling)
# from enum import Enum
# class InvalidMode(str, Enum):
# INVALID = "INVALID"
# # We can't actually pass an invalid enum to the processor due to type checking,
# # but we can test the error by manipulating the norm_map after creation
# norm_map = {FeatureType.STATE: NormalizationMode.MEAN_STD}
# stats = {OBS_STATE: {"mean": [0.0, 0.0], "std": [1.0, 1.0]}}
# normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
# # Manually inject an invalid mode to test error handling
# normalizer.norm_map[FeatureType.STATE] = "INVALID_MODE"
# observation = {OBS_STATE: torch.tensor([1.0, -0.5])}
# transition = create_transition(observation=observation)
# with pytest.raises(ValueError, match="Unsupported normalization mode"):
# normalizer(transition)
def test_hotswap_stats_basic_functionality():
"""Test that hotswap_stats correctly updates stats in normalizer/unnormalizer steps."""
# Create initial stats
initial_stats = {
OBS_IMAGE: {"mean": np.array([0.5, 0.5, 0.5]), "std": np.array([0.2, 0.2, 0.2])},
ACTION: {"mean": np.array([0.0, 0.0]), "std": np.array([1.0, 1.0])},
}
# Create new stats for hotswapping
new_stats = {
OBS_IMAGE: {"mean": np.array([0.3, 0.3, 0.3]), "std": np.array([0.1, 0.1, 0.1])},
ACTION: {"mean": np.array([0.1, 0.1]), "std": np.array([0.5, 0.5])},
}
# Create features and norm_map
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
# Create processors
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
unnormalizer = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
identity = IdentityProcessorStep()
# Create robot processor
robot_processor = DataProcessorPipeline(steps=[normalizer, unnormalizer, identity])
# Hotswap stats
new_processor = hotswap_stats(robot_processor, new_stats)
# Check that normalizer and unnormalizer have new stats
assert new_processor.steps[0].stats == new_stats
assert new_processor.steps[1].stats == new_stats
# Check that tensor stats are updated correctly
expected_tensor_stats = to_tensor(new_stats)
for key in expected_tensor_stats:
for stat_name in expected_tensor_stats[key]:
torch.testing.assert_close(
new_processor.steps[0]._tensor_stats[key][stat_name], expected_tensor_stats[key][stat_name]
)
torch.testing.assert_close(
new_processor.steps[1]._tensor_stats[key][stat_name], expected_tensor_stats[key][stat_name]
)
def test_hotswap_stats_deep_copy():
"""Test that hotswap_stats creates a deep copy and doesn't modify the original processor."""
initial_stats = {
OBS_IMAGE: {"mean": np.array([0.5, 0.5, 0.5]), "std": np.array([0.2, 0.2, 0.2])},
}
new_stats = {
OBS_IMAGE: {"mean": np.array([0.3, 0.3, 0.3]), "std": np.array([0.1, 0.1, 0.1])},
}
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
}
norm_map = {FeatureType.VISUAL: NormalizationMode.MEAN_STD}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
original_processor = DataProcessorPipeline(steps=[normalizer])
# Store reference to original stats
original_stats_reference = original_processor.steps[0].stats
original_tensor_stats_reference = original_processor.steps[0]._tensor_stats
# Hotswap stats
new_processor = hotswap_stats(original_processor, new_stats)
# Original processor should be unchanged
assert original_processor.steps[0].stats is original_stats_reference
assert original_processor.steps[0]._tensor_stats is original_tensor_stats_reference
assert original_processor.steps[0].stats == initial_stats
# New processor should have new stats
assert new_processor.steps[0].stats == new_stats
assert new_processor.steps[0].stats is not original_stats_reference
# Processors should be different objects
assert new_processor is not original_processor
assert new_processor.steps[0] is not original_processor.steps[0]
def test_hotswap_stats_only_affects_normalizer_steps():
"""Test that hotswap_stats only modifies NormalizerProcessorStep and UnnormalizerProcessorStep steps."""
stats = {
OBS_IMAGE: {"mean": np.array([0.5]), "std": np.array([0.2])},
}
new_stats = {
OBS_IMAGE: {"mean": np.array([0.3]), "std": np.array([0.1])},
}
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
}
norm_map = {FeatureType.VISUAL: NormalizationMode.MEAN_STD}
# Create mixed steps
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
unnormalizer = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
identity = IdentityProcessorStep()
robot_processor = DataProcessorPipeline(steps=[normalizer, identity, unnormalizer])
# Hotswap stats
new_processor = hotswap_stats(robot_processor, new_stats)
# Check that only normalizer and unnormalizer steps are affected
assert new_processor.steps[0].stats == new_stats # normalizer
assert new_processor.steps[2].stats == new_stats # unnormalizer
# Identity processor should remain unchanged (and it doesn't have stats attribute)
assert not hasattr(new_processor.steps[1], "stats")
def test_hotswap_stats_empty_stats():
"""Test hotswap_stats with empty stats dictionary."""
initial_stats = {
OBS_IMAGE: {"mean": np.array([0.5]), "std": np.array([0.2])},
}
empty_stats = {}
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
}
norm_map = {FeatureType.VISUAL: NormalizationMode.MEAN_STD}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
robot_processor = DataProcessorPipeline(steps=[normalizer])
# Hotswap with empty stats
new_processor = hotswap_stats(robot_processor, empty_stats)
# Should update to empty stats
assert new_processor.steps[0].stats == empty_stats
assert new_processor.steps[0]._tensor_stats == {}
def test_hotswap_stats_no_normalizer_steps():
"""Test hotswap_stats with a processor that has no normalizer/unnormalizer steps."""
stats = {
OBS_IMAGE: {"mean": np.array([0.5]), "std": np.array([0.2])},
}
# Create processor with only identity steps
robot_processor = DataProcessorPipeline(steps=[IdentityProcessorStep(), IdentityProcessorStep()])
# Hotswap stats - should work without error
new_processor = hotswap_stats(robot_processor, stats)
# Should return a different object (deep copy)
assert new_processor is not robot_processor
# Steps should be deep copied but unchanged
assert len(new_processor.steps) == len(robot_processor.steps)
for i, step in enumerate(new_processor.steps):
assert step is not robot_processor.steps[i] # Different objects
assert isinstance(step, type(robot_processor.steps[i])) # Same type
def test_hotswap_stats_preserves_other_attributes():
"""Test that hotswap_stats preserves other processor attributes like features and norm_map."""
initial_stats = {
OBS_IMAGE: {"mean": np.array([0.5]), "std": np.array([0.2])},
}
new_stats = {
OBS_IMAGE: {"mean": np.array([0.3]), "std": np.array([0.1])},
}
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
}
norm_map = {FeatureType.VISUAL: NormalizationMode.MEAN_STD}
normalize_observation_keys = {OBS_IMAGE}
eps = 1e-6
normalizer = NormalizerProcessorStep(
features=features,
norm_map=norm_map,
stats=initial_stats,
normalize_observation_keys=normalize_observation_keys,
eps=eps,
)
robot_processor = DataProcessorPipeline(steps=[normalizer])
# Hotswap stats
new_processor = hotswap_stats(robot_processor, new_stats)
# Check that other attributes are preserved
new_normalizer = new_processor.steps[0]
assert new_normalizer.features == features
assert new_normalizer.norm_map == norm_map
assert new_normalizer.normalize_observation_keys == normalize_observation_keys
assert new_normalizer.eps == eps
# But stats should be updated
assert new_normalizer.stats == new_stats
def test_hotswap_stats_multiple_normalizer_types():
"""Test hotswap_stats with multiple normalizer and unnormalizer steps."""
initial_stats = {
OBS_IMAGE: {"mean": np.array([0.5]), "std": np.array([0.2])},
ACTION: {"min": np.array([-1.0]), "max": np.array([1.0])},
}
new_stats = {
OBS_IMAGE: {"mean": np.array([0.3]), "std": np.array([0.1])},
ACTION: {"min": np.array([-2.0]), "max": np.array([2.0])},
}
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(1,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MIN_MAX,
}
# Create multiple normalizers and unnormalizers
normalizer1 = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
normalizer2 = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
unnormalizer1 = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
unnormalizer2 = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
robot_processor = DataProcessorPipeline(steps=[normalizer1, unnormalizer1, normalizer2, unnormalizer2])
# Hotswap stats
new_processor = hotswap_stats(robot_processor, new_stats)
# All normalizer/unnormalizer steps should be updated
for step in new_processor.steps:
assert step.stats == new_stats
# Check tensor stats conversion
expected_tensor_stats = to_tensor(new_stats)
for key in expected_tensor_stats:
for stat_name in expected_tensor_stats[key]:
torch.testing.assert_close(
step._tensor_stats[key][stat_name], expected_tensor_stats[key][stat_name]
)
def test_hotswap_stats_with_different_data_types():
"""Test hotswap_stats with various data types in stats."""
initial_stats = {
OBS_IMAGE: {"mean": np.array([0.5]), "std": np.array([0.2])},
}
# New stats with different data types (int, float, list, tuple)
new_stats = {
OBS_IMAGE: {
"mean": [0.3, 0.4, 0.5], # list
"std": (0.1, 0.2, 0.3), # tuple
"min": 0, # int
"max": 1.0, # float
},
ACTION: {
"mean": np.array([0.1, 0.2]), # numpy array
"std": torch.tensor([0.5, 0.6]), # torch tensor
},
}
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
robot_processor = DataProcessorPipeline(steps=[normalizer])
# Hotswap stats
new_processor = hotswap_stats(robot_processor, new_stats)
# Check that stats are updated
assert new_processor.steps[0].stats == new_stats
# Check that tensor conversion worked correctly
tensor_stats = new_processor.steps[0]._tensor_stats
assert isinstance(tensor_stats[OBS_IMAGE]["mean"], torch.Tensor)
assert isinstance(tensor_stats[OBS_IMAGE]["std"], torch.Tensor)
assert isinstance(tensor_stats[OBS_IMAGE]["min"], torch.Tensor)
assert isinstance(tensor_stats[OBS_IMAGE]["max"], torch.Tensor)
assert isinstance(tensor_stats[ACTION]["mean"], torch.Tensor)
assert isinstance(tensor_stats[ACTION]["std"], torch.Tensor)
# Check values
torch.testing.assert_close(tensor_stats[OBS_IMAGE]["mean"], torch.tensor([0.3, 0.4, 0.5]))
torch.testing.assert_close(tensor_stats[OBS_IMAGE]["std"], torch.tensor([0.1, 0.2, 0.3]))
torch.testing.assert_close(tensor_stats[OBS_IMAGE]["min"], torch.tensor(0.0))
torch.testing.assert_close(tensor_stats[OBS_IMAGE]["max"], torch.tensor(1.0))
def test_hotswap_stats_functional_test():
"""Test that hotswapped processor actually works functionally."""
# Create test data
observation = {
OBS_IMAGE: torch.tensor([[[0.6, 0.7], [0.8, 0.9]], [[0.5, 0.6], [0.7, 0.8]]]),
}
action = torch.tensor([0.5, -0.5])
transition = create_transition(observation=observation, action=action)
# Initial stats
initial_stats = {
OBS_IMAGE: {"mean": np.array([0.5, 0.4]), "std": np.array([0.2, 0.3])},
ACTION: {"mean": np.array([0.0, 0.0]), "std": np.array([1.0, 1.0])},
}
# New stats
new_stats = {
OBS_IMAGE: {"mean": np.array([0.3, 0.2]), "std": np.array([0.1, 0.2])},
ACTION: {"mean": np.array([0.1, -0.1]), "std": np.array([0.5, 0.5])},
}
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(2, 2, 2)),
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
# Create original processor
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=initial_stats)
original_processor = DataProcessorPipeline(
steps=[normalizer], to_transition=identity_transition, to_output=identity_transition
)
# Process with original stats
original_result = original_processor(transition)
# Hotswap stats
new_processor = hotswap_stats(original_processor, new_stats)
# Process with new stats
new_result = new_processor(transition)
# Results should be different since normalization changed
assert not torch.allclose(
original_result[OBS_STR][OBS_IMAGE],
new_result[OBS_STR][OBS_IMAGE],
rtol=1e-3,
atol=1e-3,
)
assert not torch.allclose(original_result[ACTION], new_result[ACTION], rtol=1e-3, atol=1e-3)
# Verify that the new processor is actually using the new stats by checking internal state
assert new_processor.steps[0].stats == new_stats
assert torch.allclose(new_processor.steps[0]._tensor_stats[OBS_IMAGE]["mean"], torch.tensor([0.3, 0.2]))
assert torch.allclose(new_processor.steps[0]._tensor_stats[OBS_IMAGE]["std"], torch.tensor([0.1, 0.2]))
assert torch.allclose(new_processor.steps[0]._tensor_stats[ACTION]["mean"], torch.tensor([0.1, -0.1]))
assert torch.allclose(new_processor.steps[0]._tensor_stats[ACTION]["std"], torch.tensor([0.5, 0.5]))
# Test that normalization actually happens (output should not equal input)
assert not torch.allclose(new_result[OBS_STR][OBS_IMAGE], observation[OBS_IMAGE])
assert not torch.allclose(new_result[ACTION], action)
def test_zero_std_uses_eps():
"""When std == 0, (x-mean)/(std+eps) is well-defined; x==mean should map to 0."""
features = {OBS_STATE: PolicyFeature(FeatureType.STATE, (1,))}
norm_map = {FeatureType.STATE: NormalizationMode.MEAN_STD}
stats = {OBS_STATE: {"mean": np.array([0.5]), "std": np.array([0.0])}}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats, eps=1e-6)
observation = {OBS_STATE: torch.tensor([0.5])} # equals mean
out = normalizer(create_transition(observation=observation))
assert torch.allclose(out[TransitionKey.OBSERVATION][OBS_STATE], torch.tensor([0.0]))
def test_min_equals_max_maps_to_minus_one():
"""When min == max, MIN_MAX path maps to -1 after [-1,1] scaling for x==min."""
features = {OBS_STATE: PolicyFeature(FeatureType.STATE, (1,))}
norm_map = {FeatureType.STATE: NormalizationMode.MIN_MAX}
stats = {OBS_STATE: {"min": np.array([2.0]), "max": np.array([2.0])}}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats, eps=1e-6)
observation = {OBS_STATE: torch.tensor([2.0])}
out = normalizer(create_transition(observation=observation))
assert torch.allclose(out[TransitionKey.OBSERVATION][OBS_STATE], torch.tensor([-1.0]))
def test_action_normalized_despite_normalize_observation_keys():
"""Action normalization is independent of normalize_observation_keys filter for observations."""
features = {
OBS_STATE: PolicyFeature(FeatureType.STATE, (1,)),
ACTION: PolicyFeature(FeatureType.ACTION, (2,)),
}
norm_map = {FeatureType.STATE: NormalizationMode.IDENTITY, FeatureType.ACTION: NormalizationMode.MEAN_STD}
stats = {ACTION: {"mean": np.array([1.0, -1.0]), "std": np.array([2.0, 4.0])}}
normalizer = NormalizerProcessorStep(
features=features, norm_map=norm_map, stats=stats, normalize_observation_keys={OBS_STATE}
)
transition = create_transition(
observation={OBS_STATE: torch.tensor([3.0])}, action=torch.tensor([3.0, 3.0])
)
out = normalizer(transition)
# (3-1)/2 = 1.0 ; (3-(-1))/4 = 1.0
assert torch.allclose(out[TransitionKey.ACTION], torch.tensor([1.0, 1.0]))
def test_unnormalize_observations_mean_std_and_min_max():
features = {
"observation.ms": PolicyFeature(FeatureType.STATE, (2,)),
"observation.mm": PolicyFeature(FeatureType.STATE, (2,)),
}
# Build two processors: one mean/std and one min/max
unnorm_ms = UnnormalizerProcessorStep(
features={"observation.ms": features["observation.ms"]},
norm_map={FeatureType.STATE: NormalizationMode.MEAN_STD},
stats={"observation.ms": {"mean": np.array([1.0, -1.0]), "std": np.array([2.0, 4.0])}},
)
unnorm_mm = UnnormalizerProcessorStep(
features={"observation.mm": features["observation.mm"]},
norm_map={FeatureType.STATE: NormalizationMode.MIN_MAX},
stats={"observation.mm": {"min": np.array([0.0, -2.0]), "max": np.array([2.0, 2.0])}},
)
tr = create_transition(
observation={
"observation.ms": torch.tensor([0.0, 0.0]), # → mean
"observation.mm": torch.tensor([0.0, 0.0]), # → mid-point
}
)
out_ms = unnorm_ms(tr)[TransitionKey.OBSERVATION]["observation.ms"]
out_mm = unnorm_mm(tr)[TransitionKey.OBSERVATION]["observation.mm"]
assert torch.allclose(out_ms, torch.tensor([1.0, -1.0]))
assert torch.allclose(out_mm, torch.tensor([1.0, 0.0])) # mid of [0,2] and [-2,2]
def test_unknown_observation_keys_ignored():
features = {OBS_STATE: PolicyFeature(FeatureType.STATE, (1,))}
norm_map = {FeatureType.STATE: NormalizationMode.MEAN_STD}
stats = {OBS_STATE: {"mean": np.array([0.0]), "std": np.array([1.0])}}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
obs = {OBS_STATE: torch.tensor([1.0]), "observation.unknown": torch.tensor([5.0])}
tr = create_transition(observation=obs)
out = normalizer(tr)
# Unknown key should pass through unchanged and not be tracked
assert torch.allclose(out[TransitionKey.OBSERVATION]["observation.unknown"], obs["observation.unknown"])
def test_batched_action_normalization():
features = {ACTION: PolicyFeature(FeatureType.ACTION, (2,))}
norm_map = {FeatureType.ACTION: NormalizationMode.MEAN_STD}
stats = {ACTION: {"mean": np.array([1.0, -1.0]), "std": np.array([2.0, 4.0])}}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
actions = torch.tensor([[1.0, -1.0], [3.0, 3.0]]) # first equals mean → zeros; second → [1, 1]
out = normalizer(create_transition(action=actions))[TransitionKey.ACTION]
expected = torch.tensor([[0.0, 0.0], [1.0, 1.0]])
assert torch.allclose(out, expected)
def test_complementary_data_preservation():
features = {OBS_STATE: PolicyFeature(FeatureType.STATE, (1,))}
norm_map = {FeatureType.STATE: NormalizationMode.MEAN_STD}
stats = {OBS_STATE: {"mean": np.array([0.0]), "std": np.array([1.0])}}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
comp = {"existing": 123}
tr = create_transition(observation={OBS_STATE: torch.tensor([1.0])}, complementary_data=comp)
out = normalizer(tr)
new_comp = out[TransitionKey.COMPLEMENTARY_DATA]
assert new_comp["existing"] == 123
def test_roundtrip_normalize_unnormalize_non_identity():
features = {
OBS_STATE: PolicyFeature(FeatureType.STATE, (2,)),
ACTION: PolicyFeature(FeatureType.ACTION, (2,)),
}
norm_map = {FeatureType.STATE: NormalizationMode.MEAN_STD, FeatureType.ACTION: NormalizationMode.MIN_MAX}
stats = {
OBS_STATE: {"mean": np.array([1.0, -1.0]), "std": np.array([2.0, 4.0])},
ACTION: {"min": np.array([-2.0, 0.0]), "max": np.array([2.0, 4.0])},
}
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
unnormalizer = UnnormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
# Add a time dimension in action for broadcasting check (B,T,D)
obs = {OBS_STATE: torch.tensor([[3.0, 3.0], [1.0, -1.0]])}
act = torch.tensor([[[0.0, -1.0], [1.0, 1.0]]]) # shape (1,2,2) already in [-1,1]
tr = create_transition(observation=obs, action=act)
out = unnormalizer(normalizer(tr))
assert torch.allclose(out[TransitionKey.OBSERVATION][OBS_STATE], obs[OBS_STATE], atol=1e-5)
assert torch.allclose(out[TransitionKey.ACTION], act, atol=1e-5)
def test_dtype_adaptation_bfloat16_input_float32_normalizer():
"""Test automatic dtype adaptation: NormalizerProcessor(float32) adapts to bfloat16 input → bfloat16 output"""
features = {OBS_STATE: PolicyFeature(FeatureType.STATE, (5,))}
norm_map = {FeatureType.STATE: NormalizationMode.MEAN_STD}
stats = {
OBS_STATE: {
"mean": np.array([0.0, 0.0, 0.0, 0.0, 0.0]),
"std": np.array([1.0, 1.0, 1.0, 1.0, 1.0]),
}
}
# Create normalizer configured with float32 dtype
normalizer = NormalizerProcessorStep(
features=features, norm_map=norm_map, stats=stats, dtype=torch.float32
)
# Verify initial configuration
assert normalizer.dtype == torch.float32
for stat_tensor in normalizer._tensor_stats[OBS_STATE].values():
assert stat_tensor.dtype == torch.float32
# Create bfloat16 input tensor
observation = {OBS_STATE: torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], dtype=torch.bfloat16)}
transition = create_transition(observation=observation)
# Process the transition
result = normalizer(transition)
# Verify that:
# 1. Stats were automatically adapted to bfloat16
assert normalizer.dtype == torch.bfloat16
for stat_tensor in normalizer._tensor_stats[OBS_STATE].values():
assert stat_tensor.dtype == torch.bfloat16
# 2. Output is in bfloat16
output_tensor = result[TransitionKey.OBSERVATION][OBS_STATE]
assert output_tensor.dtype == torch.bfloat16
# 3. Normalization was applied correctly (mean should be close to original - mean) / std
expected = (
torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], dtype=torch.bfloat16)
- torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0], dtype=torch.bfloat16)
) / torch.tensor([1.0, 1.0, 1.0, 1.0, 1.0], dtype=torch.bfloat16)
assert torch.allclose(output_tensor, expected, atol=1e-2) # bfloat16 has lower precision
def test_stats_override_preservation_in_load_state_dict():
"""
Test that explicitly provided stats are preserved during load_state_dict.
This tests the fix for the bug where stats provided via overrides were
being overwritten when load_state_dict was called.
"""
# Create original stats
original_stats = {
OBS_IMAGE: {"mean": np.array([0.5, 0.5, 0.5]), "std": np.array([0.2, 0.2, 0.2])},
ACTION: {"mean": np.array([0.0, 0.0]), "std": np.array([1.0, 1.0])},
}
# Create override stats (what user wants to use)
override_stats = {
OBS_IMAGE: {"mean": np.array([0.3, 0.3, 0.3]), "std": np.array([0.1, 0.1, 0.1])},
ACTION: {"mean": np.array([0.1, 0.1]), "std": np.array([0.5, 0.5])},
}
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
# Create a normalizer with original stats and save its state
original_normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=original_stats)
saved_state_dict = original_normalizer.state_dict()
# Create a new normalizer with override stats (simulating from_pretrained with overrides)
override_normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=override_stats)
# Verify that the override stats are initially set correctly
assert set(override_normalizer.stats.keys()) == set(override_stats.keys())
for key in override_stats:
assert set(override_normalizer.stats[key].keys()) == set(override_stats[key].keys())
for stat_name in override_stats[key]:
np.testing.assert_array_equal(
override_normalizer.stats[key][stat_name], override_stats[key][stat_name]
)
assert override_normalizer._stats_explicitly_provided is True
# This is the critical test: load_state_dict should NOT overwrite the override stats
override_normalizer.load_state_dict(saved_state_dict)
# After loading state_dict, stats should still be the override stats, not the original stats
# Check that loaded stats match override stats
assert set(override_normalizer.stats.keys()) == set(override_stats.keys())
for key in override_stats:
assert set(override_normalizer.stats[key].keys()) == set(override_stats[key].keys())
for stat_name in override_stats[key]:
np.testing.assert_array_equal(
override_normalizer.stats[key][stat_name], override_stats[key][stat_name]
)
# Compare individual arrays to avoid numpy array comparison ambiguity
for key in override_stats:
for stat_name in override_stats[key]:
assert not np.array_equal(
override_normalizer.stats[key][stat_name], original_stats[key][stat_name]
), f"Stats for {key}.{stat_name} should not match original stats"
# Verify that _tensor_stats are also correctly set to match the override stats
expected_tensor_stats = to_tensor(override_stats)
for key in expected_tensor_stats:
for stat_name in expected_tensor_stats[key]:
if isinstance(expected_tensor_stats[key][stat_name], torch.Tensor):
torch.testing.assert_close(
override_normalizer._tensor_stats[key][stat_name], expected_tensor_stats[key][stat_name]
)
def test_stats_without_override_loads_normally():
"""
Test that when stats are not explicitly provided (normal case),
load_state_dict works as before.
"""
original_stats = {
OBS_IMAGE: {"mean": np.array([0.5, 0.5, 0.5]), "std": np.array([0.2, 0.2, 0.2])},
ACTION: {"mean": np.array([0.0, 0.0]), "std": np.array([1.0, 1.0])},
}
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
# Create a normalizer with original stats and save its state
original_normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=original_stats)
saved_state_dict = original_normalizer.state_dict()
# Create a new normalizer without stats (simulating normal from_pretrained)
new_normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats={})
# Verify that stats are not explicitly provided
assert new_normalizer._stats_explicitly_provided is False
# Load state dict - this should work normally and load the saved stats
new_normalizer.load_state_dict(saved_state_dict)
# Stats should now match the original stats (normal behavior)
# Check that all keys and values match
assert set(new_normalizer.stats.keys()) == set(original_stats.keys())
for key in original_stats:
assert set(new_normalizer.stats[key].keys()) == set(original_stats[key].keys())
for stat_name in original_stats[key]:
np.testing.assert_allclose(
new_normalizer.stats[key][stat_name], original_stats[key][stat_name], rtol=1e-6, atol=1e-6
)
def test_stats_explicit_provided_flag_detection():
"""Test that the _stats_explicitly_provided flag is set correctly in different scenarios."""
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)),
}
norm_map = {FeatureType.VISUAL: NormalizationMode.MEAN_STD}
# Test 1: Explicitly provided stats (non-empty dict)
stats = {OBS_IMAGE: {"mean": [0.5], "std": [0.2]}}
normalizer1 = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
assert normalizer1._stats_explicitly_provided is True
# Test 2: Empty stats dict
normalizer2 = NormalizerProcessorStep(features=features, norm_map=norm_map, stats={})
assert normalizer2._stats_explicitly_provided is False
# Test 3: None stats
normalizer3 = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=None)
assert normalizer3._stats_explicitly_provided is False
# Test 4: Stats not provided (defaults to None)
normalizer4 = NormalizerProcessorStep(features=features, norm_map=norm_map)
assert normalizer4._stats_explicitly_provided is False
def test_pipeline_from_pretrained_with_stats_overrides():
"""
Test the actual use case: DataProcessorPipeline.from_pretrained with stat overrides.
This is an integration test that verifies the fix works in the real scenario
where users provide stat overrides when loading a pipeline.
"""
import tempfile
# Create test data
features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 32, 32)),
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
original_stats = {
OBS_IMAGE: {"mean": np.array([0.5, 0.5, 0.5]), "std": np.array([0.2, 0.2, 0.2])},
ACTION: {"mean": np.array([0.0, 0.0]), "std": np.array([1.0, 1.0])},
}
override_stats = {
OBS_IMAGE: {"mean": np.array([0.3, 0.3, 0.3]), "std": np.array([0.1, 0.1, 0.1])},
ACTION: {"mean": np.array([0.1, 0.1]), "std": np.array([0.5, 0.5])},
}
# Create and save a pipeline with the original stats
normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=original_stats)
identity = IdentityProcessorStep()
original_pipeline = DataProcessorPipeline(steps=[normalizer, identity], name="test_pipeline")
with tempfile.TemporaryDirectory() as temp_dir:
# Save the pipeline
original_pipeline.save_pretrained(temp_dir)
# Load the pipeline with stat overrides
overrides = {"normalizer_processor": {"stats": override_stats}}
loaded_pipeline = DataProcessorPipeline.from_pretrained(
temp_dir, config_filename="test_pipeline.json", overrides=overrides
)
# The critical test: the loaded pipeline should use override stats, not original stats
loaded_normalizer = loaded_pipeline.steps[0]
assert isinstance(loaded_normalizer, NormalizerProcessorStep)
# Check that loaded stats match override stats
assert set(loaded_normalizer.stats.keys()) == set(override_stats.keys())
for key in override_stats:
assert set(loaded_normalizer.stats[key].keys()) == set(override_stats[key].keys())
for stat_name in override_stats[key]:
np.testing.assert_array_equal(
loaded_normalizer.stats[key][stat_name], override_stats[key][stat_name]
)
# Verify stats don't match original stats
for key in override_stats:
for stat_name in override_stats[key]:
assert not np.array_equal(
loaded_normalizer.stats[key][stat_name], original_stats[key][stat_name]
), f"Stats for {key}.{stat_name} should not match original stats"
# Test that the override stats are actually used in processing
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
}
action = torch.tensor([1.0, -0.5])
transition = create_transition(observation=observation, action=action)
# Process with override pipeline
override_result = loaded_pipeline(transition)
# Create a reference pipeline with override stats for comparison
reference_normalizer = NormalizerProcessorStep(
features=features, norm_map=norm_map, stats=override_stats
)
reference_pipeline = DataProcessorPipeline(
steps=[reference_normalizer, identity],
to_transition=identity_transition,
to_output=identity_transition,
)
_ = reference_pipeline(transition)
# The critical part was verified above: loaded_normalizer.stats == override_stats
# This confirms that override stats are preserved during load_state_dict.
# Let's just verify the pipeline processes data successfully.
assert ACTION in override_result
assert isinstance(override_result[ACTION], torch.Tensor)
def test_dtype_adaptation_device_processor_bfloat16_normalizer_float32():
"""Test policy pipeline scenario: DeviceProcessor(bfloat16) + NormalizerProcessor(float32) → bfloat16 output"""
from lerobot.processor import DeviceProcessorStep
features = {OBS_STATE: PolicyFeature(FeatureType.STATE, (3,))}
norm_map = {FeatureType.STATE: NormalizationMode.MEAN_STD}
stats = {OBS_STATE: {"mean": np.array([0.0, 0.0, 0.0]), "std": np.array([1.0, 1.0, 1.0])}}
# Create pipeline: DeviceProcessor(bfloat16) → NormalizerProcessor(float32)
device_processor = DeviceProcessorStep(device=str(auto_select_torch_device()), float_dtype="bfloat16")
normalizer = NormalizerProcessorStep(
features=features, norm_map=norm_map, stats=stats, dtype=torch.float32
)
# Verify initial normalizer configuration
assert normalizer.dtype == torch.float32
# Create CPU input
observation = {OBS_STATE: torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32)}
transition = create_transition(observation=observation)
# Step 1: DeviceProcessor converts to bfloat16 + moves to CUDA
processed_1 = device_processor(transition)
intermediate_tensor = processed_1[TransitionKey.OBSERVATION][OBS_STATE]
assert intermediate_tensor.dtype == torch.bfloat16
assert intermediate_tensor.device.type == str(auto_select_torch_device())
# Step 2: NormalizerProcessor receives bfloat16 input and adapts
final_result = normalizer(processed_1)
final_tensor = final_result[TransitionKey.OBSERVATION][OBS_STATE]
# Verify final output is bfloat16 (automatic adaptation worked)
assert final_tensor.dtype == torch.bfloat16
assert final_tensor.device.type == str(auto_select_torch_device())
# Verify normalizer adapted its internal state
assert normalizer.dtype == torch.bfloat16
for stat_tensor in normalizer._tensor_stats[OBS_STATE].values():
assert stat_tensor.dtype == torch.bfloat16
assert stat_tensor.device.type == str(auto_select_torch_device())
def test_stats_reconstruction_after_load_state_dict():
"""
Test that stats dict is properly reconstructed from _tensor_stats after loading.
This test ensures the bug where stats became empty after loading is fixed.
The bug occurred when:
1. Only _tensor_stats were saved via state_dict()
2. stats field became empty {} after loading
3. Calling to() method or hotswap_stats would fail because they depend on self.stats
"""
# Create normalizer with stats
features = {
OBS_IMAGE: PolicyFeature(FeatureType.VISUAL, (3, 96, 96)),
OBS_STATE: PolicyFeature(FeatureType.STATE, (2,)),
ACTION: PolicyFeature(FeatureType.ACTION, (2,)),
}
norm_map = {
FeatureType.VISUAL: NormalizationMode.MEAN_STD,
FeatureType.STATE: NormalizationMode.MIN_MAX,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
stats = {
OBS_IMAGE: {
"mean": np.array([0.5, 0.5, 0.5]),
"std": np.array([0.2, 0.2, 0.2]),
},
OBS_STATE: {
"min": np.array([0.0, -1.0]),
"max": np.array([1.0, 1.0]),
},
ACTION: {
"mean": np.array([0.0, 0.0]),
"std": np.array([1.0, 2.0]),
},
}
original_normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats)
# Save state dict (simulating save/load)
state_dict = original_normalizer.state_dict()
# Create new normalizer with empty stats (simulating load)
new_normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats={})
# Before fix: this would cause stats to remain empty
new_normalizer.load_state_dict(state_dict)
# Verify that stats dict is properly reconstructed from _tensor_stats
assert new_normalizer.stats is not None
assert new_normalizer.stats != {}
# Check that all expected keys are present
assert OBS_IMAGE in new_normalizer.stats
assert OBS_STATE in new_normalizer.stats
assert ACTION in new_normalizer.stats
# Check that values are correct (converted back from tensors)
np.testing.assert_allclose(new_normalizer.stats[OBS_IMAGE]["mean"], [0.5, 0.5, 0.5])
np.testing.assert_allclose(new_normalizer.stats[OBS_IMAGE]["std"], [0.2, 0.2, 0.2])
np.testing.assert_allclose(new_normalizer.stats[OBS_STATE]["min"], [0.0, -1.0])
np.testing.assert_allclose(new_normalizer.stats[OBS_STATE]["max"], [1.0, 1.0])
np.testing.assert_allclose(new_normalizer.stats[ACTION]["mean"], [0.0, 0.0])
np.testing.assert_allclose(new_normalizer.stats[ACTION]["std"], [1.0, 2.0])
# Test that methods that depend on self.stats work correctly after loading
# This would fail before the bug fix because self.stats was empty
# Test 1: to() method should work without crashing
try:
new_normalizer.to(device="cpu", dtype=torch.float32)
# If we reach here, the bug is fixed
except (KeyError, AttributeError) as e:
pytest.fail(f"to() method failed after loading state_dict: {e}")
# Test 2: hotswap_stats should work
new_stats = {
OBS_IMAGE: {"mean": [0.3, 0.3, 0.3], "std": [0.1, 0.1, 0.1]},
OBS_STATE: {"min": [-1.0, -2.0], "max": [2.0, 2.0]},
ACTION: {"mean": [0.1, 0.1], "std": [0.5, 0.5]},
}
pipeline = DataProcessorPipeline([new_normalizer])
try:
new_pipeline = hotswap_stats(pipeline, new_stats)
# If we reach here, hotswap_stats worked correctly
assert new_pipeline.steps[0].stats == new_stats
except (KeyError, AttributeError) as e:
pytest.fail(f"hotswap_stats failed after loading state_dict: {e}")
# Test 3: The normalizer should work functionally the same as the original
observation = {
OBS_IMAGE: torch.tensor([0.7, 0.5, 0.3]),
OBS_STATE: torch.tensor([0.5, 0.0]),
}
action = torch.tensor([1.0, -0.5])
transition = create_transition(observation=observation, action=action)
original_result = original_normalizer(transition)
new_result = new_normalizer(transition)
# Results should be identical (within floating point precision)
torch.testing.assert_close(
original_result[TransitionKey.OBSERVATION][OBS_IMAGE],
new_result[TransitionKey.OBSERVATION][OBS_IMAGE],
)
torch.testing.assert_close(
original_result[TransitionKey.OBSERVATION][OBS_STATE],
new_result[TransitionKey.OBSERVATION][OBS_STATE],
)
torch.testing.assert_close(original_result[TransitionKey.ACTION], new_result[TransitionKey.ACTION])
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_normalize_processor.py",
"license": "Apache License 2.0",
"lines": 1696,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_observation_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from lerobot.configs.types import FeatureType, PipelineFeatureType
from lerobot.processor import TransitionKey, VanillaObservationProcessorStep
from lerobot.processor.converters import create_transition
from lerobot.utils.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE
from tests.conftest import assert_contract_is_typed
def test_process_single_image():
"""Test processing a single image."""
processor = VanillaObservationProcessorStep()
# Create a mock image (H, W, C) format, uint8
image = np.random.randint(0, 256, size=(64, 64, 3), dtype=np.uint8)
observation = {"pixels": image}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that the image was processed correctly
assert OBS_IMAGE in processed_obs
processed_img = processed_obs[OBS_IMAGE]
# Check shape: should be (1, 3, 64, 64) - batch, channels, height, width
assert processed_img.shape == (1, 3, 64, 64)
# Check dtype and range
assert processed_img.dtype == torch.float32
assert processed_img.min() >= 0.0
assert processed_img.max() <= 1.0
def test_process_image_dict():
"""Test processing multiple images in a dictionary."""
processor = VanillaObservationProcessorStep()
# Create mock images
image1 = np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8)
image2 = np.random.randint(0, 256, size=(48, 48, 3), dtype=np.uint8)
observation = {"pixels": {"camera1": image1, "camera2": image2}}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that both images were processed
assert f"{OBS_IMAGES}.camera1" in processed_obs
assert f"{OBS_IMAGES}.camera2" in processed_obs
# Check shapes
assert processed_obs[f"{OBS_IMAGES}.camera1"].shape == (1, 3, 32, 32)
assert processed_obs[f"{OBS_IMAGES}.camera2"].shape == (1, 3, 48, 48)
def test_process_batched_image():
"""Test processing already batched images."""
processor = VanillaObservationProcessorStep()
# Create a batched image (B, H, W, C)
image = np.random.randint(0, 256, size=(2, 64, 64, 3), dtype=np.uint8)
observation = {"pixels": image}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that batch dimension is preserved
assert processed_obs[OBS_IMAGE].shape == (2, 3, 64, 64)
def test_invalid_image_format():
"""Test error handling for invalid image formats."""
processor = VanillaObservationProcessorStep()
# Test wrong channel order (channels first)
image = np.random.randint(0, 256, size=(3, 64, 64), dtype=np.uint8)
observation = {"pixels": image}
transition = create_transition(observation=observation)
with pytest.raises(ValueError, match="Expected channel-last images"):
processor(transition)
def test_invalid_image_dtype():
"""Test error handling for invalid image dtype."""
processor = VanillaObservationProcessorStep()
# Test wrong dtype
image = np.random.rand(64, 64, 3).astype(np.float32)
observation = {"pixels": image}
transition = create_transition(observation=observation)
with pytest.raises(ValueError, match="Expected torch.uint8 images"):
processor(transition)
def test_no_pixels_in_observation():
"""Test processor when no pixels are in observation."""
processor = VanillaObservationProcessorStep()
observation = {"other_data": np.array([1, 2, 3])}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Should preserve other data unchanged
assert "other_data" in processed_obs
np.testing.assert_array_equal(processed_obs["other_data"], np.array([1, 2, 3]))
def test_none_observation():
"""Test processor with None observation."""
processor = VanillaObservationProcessorStep()
transition = create_transition(observation={})
result = processor(transition)
assert result == transition
def test_serialization_methods():
"""Test serialization methods."""
processor = VanillaObservationProcessorStep()
# Test get_config
config = processor.get_config()
assert isinstance(config, dict)
# Test state_dict
state = processor.state_dict()
assert isinstance(state, dict)
# Test load_state_dict (should not raise)
processor.load_state_dict(state)
# Test reset (should not raise)
processor.reset()
def test_process_environment_state():
"""Test processing environment_state."""
processor = VanillaObservationProcessorStep()
env_state = np.array([1.0, 2.0, 3.0], dtype=np.float32)
observation = {"environment_state": env_state}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that environment_state was renamed and processed
assert OBS_ENV_STATE in processed_obs
assert "environment_state" not in processed_obs
processed_state = processed_obs[OBS_ENV_STATE]
assert processed_state.shape == (1, 3) # Batch dimension added
assert processed_state.dtype == torch.float32
torch.testing.assert_close(processed_state, torch.tensor([[1.0, 2.0, 3.0]]))
def test_process_agent_pos():
"""Test processing agent_pos."""
processor = VanillaObservationProcessorStep()
agent_pos = np.array([0.5, -0.5, 1.0], dtype=np.float32)
observation = {"agent_pos": agent_pos}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that agent_pos was renamed and processed
assert OBS_STATE in processed_obs
assert "agent_pos" not in processed_obs
processed_state = processed_obs[OBS_STATE]
assert processed_state.shape == (1, 3) # Batch dimension added
assert processed_state.dtype == torch.float32
torch.testing.assert_close(processed_state, torch.tensor([[0.5, -0.5, 1.0]]))
def test_process_batched_states():
"""Test processing already batched states."""
processor = VanillaObservationProcessorStep()
env_state = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
agent_pos = np.array([[0.5, -0.5], [1.0, -1.0]], dtype=np.float32)
observation = {"environment_state": env_state, "agent_pos": agent_pos}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that batch dimensions are preserved
assert processed_obs[OBS_ENV_STATE].shape == (2, 2)
assert processed_obs[OBS_STATE].shape == (2, 2)
def test_process_both_states():
"""Test processing both environment_state and agent_pos."""
processor = VanillaObservationProcessorStep()
env_state = np.array([1.0, 2.0], dtype=np.float32)
agent_pos = np.array([0.5, -0.5], dtype=np.float32)
observation = {"environment_state": env_state, "agent_pos": agent_pos, "other_data": "keep_me"}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that both states were processed
assert OBS_ENV_STATE in processed_obs
assert OBS_STATE in processed_obs
# Check that original keys were removed
assert "environment_state" not in processed_obs
assert "agent_pos" not in processed_obs
# Check that other data was preserved
assert processed_obs["other_data"] == "keep_me"
def test_no_states_in_observation():
"""Test processor when no states are in observation."""
processor = VanillaObservationProcessorStep()
observation = {"other_data": np.array([1, 2, 3])}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Should preserve data unchanged
np.testing.assert_array_equal(processed_obs, observation)
def test_complete_observation_processing():
"""Test processing a complete observation with both images and states."""
processor = VanillaObservationProcessorStep()
# Create mock data
image = np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8)
env_state = np.array([1.0, 2.0, 3.0], dtype=np.float32)
agent_pos = np.array([0.5, -0.5, 1.0], dtype=np.float32)
observation = {
"pixels": image,
"environment_state": env_state,
"agent_pos": agent_pos,
"other_data": "preserve_me",
}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that image was processed
assert OBS_IMAGE in processed_obs
assert processed_obs[OBS_IMAGE].shape == (1, 3, 32, 32)
# Check that states were processed
assert OBS_ENV_STATE in processed_obs
assert OBS_STATE in processed_obs
# Check that original keys were removed
assert "pixels" not in processed_obs
assert "environment_state" not in processed_obs
assert "agent_pos" not in processed_obs
# Check that other data was preserved
assert processed_obs["other_data"] == "preserve_me"
def test_image_only_processing():
"""Test processing observation with only images."""
processor = VanillaObservationProcessorStep()
image = np.random.randint(0, 256, size=(64, 64, 3), dtype=np.uint8)
observation = {"pixels": image}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
assert OBS_IMAGE in processed_obs
assert len(processed_obs) == 1
def test_state_only_processing():
"""Test processing observation with only states."""
processor = VanillaObservationProcessorStep()
agent_pos = np.array([1.0, 2.0], dtype=np.float32)
observation = {"agent_pos": agent_pos}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
assert OBS_STATE in processed_obs
assert "agent_pos" not in processed_obs
def test_empty_observation():
"""Test processing empty observation."""
processor = VanillaObservationProcessorStep()
observation = {}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
assert processed_obs == {}
def test_equivalent_to_original_function():
"""Test that ObservationProcessor produces equivalent results to preprocess_observation."""
# Import the original function for comparison
from lerobot.envs.utils import preprocess_observation
processor = VanillaObservationProcessorStep()
# Create test data similar to what the original function expects
image = np.random.randint(0, 256, size=(64, 64, 3), dtype=np.uint8)
env_state = np.array([1.0, 2.0, 3.0], dtype=np.float32)
agent_pos = np.array([0.5, -0.5, 1.0], dtype=np.float32)
observation = {"pixels": image, "environment_state": env_state, "agent_pos": agent_pos}
# Process with original function
original_result = preprocess_observation(observation)
# Process with new processor
transition = create_transition(observation=observation)
processor_result = processor(transition)[TransitionKey.OBSERVATION]
# Compare results
assert set(original_result.keys()) == set(processor_result.keys())
for key in original_result:
torch.testing.assert_close(original_result[key], processor_result[key])
def test_equivalent_with_image_dict():
"""Test equivalence with dictionary of images."""
from lerobot.envs.utils import preprocess_observation
processor = VanillaObservationProcessorStep()
# Create test data with multiple cameras
image1 = np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8)
image2 = np.random.randint(0, 256, size=(48, 48, 3), dtype=np.uint8)
agent_pos = np.array([1.0, 2.0], dtype=np.float32)
observation = {"pixels": {"cam1": image1, "cam2": image2}, "agent_pos": agent_pos}
# Process with original function
original_result = preprocess_observation(observation)
# Process with new processor
transition = create_transition(observation=observation)
processor_result = processor(transition)[TransitionKey.OBSERVATION]
# Compare results
assert set(original_result.keys()) == set(processor_result.keys())
for key in original_result:
torch.testing.assert_close(original_result[key], processor_result[key])
def test_image_processor_features_pixels_to_image(policy_feature_factory):
processor = VanillaObservationProcessorStep()
features = {
PipelineFeatureType.OBSERVATION: {
"pixels": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)),
"keep": policy_feature_factory(FeatureType.ENV, (1,)),
},
}
out = processor.transform_features(features.copy())
assert (
OBS_IMAGE in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION][OBS_IMAGE]
== features[PipelineFeatureType.OBSERVATION]["pixels"]
)
assert "pixels" not in out[PipelineFeatureType.OBSERVATION]
assert out[PipelineFeatureType.OBSERVATION]["keep"] == features[PipelineFeatureType.OBSERVATION]["keep"]
assert_contract_is_typed(out)
def test_image_processor_features_observation_pixels_to_image(policy_feature_factory):
processor = VanillaObservationProcessorStep()
features = {
PipelineFeatureType.OBSERVATION: {
"observation.pixels": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)),
"keep": policy_feature_factory(FeatureType.ENV, (1,)),
},
}
out = processor.transform_features(features.copy())
assert (
OBS_IMAGE in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION][OBS_IMAGE]
== features[PipelineFeatureType.OBSERVATION]["observation.pixels"]
)
assert "observation.pixels" not in out[PipelineFeatureType.OBSERVATION]
assert out[PipelineFeatureType.OBSERVATION]["keep"] == features[PipelineFeatureType.OBSERVATION]["keep"]
assert_contract_is_typed(out)
def test_image_processor_features_multi_camera_and_prefixed(policy_feature_factory):
processor = VanillaObservationProcessorStep()
features = {
PipelineFeatureType.OBSERVATION: {
"pixels.front": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)),
"pixels.wrist": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)),
"observation.pixels.rear": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)),
"keep": policy_feature_factory(FeatureType.ENV, (7,)),
},
}
out = processor.transform_features(features.copy())
assert (
f"{OBS_IMAGES}.front" in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION][f"{OBS_IMAGES}.front"]
== features[PipelineFeatureType.OBSERVATION]["pixels.front"]
)
assert (
f"{OBS_IMAGES}.wrist" in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION][f"{OBS_IMAGES}.wrist"]
== features[PipelineFeatureType.OBSERVATION]["pixels.wrist"]
)
assert (
f"{OBS_IMAGES}.rear" in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION][f"{OBS_IMAGES}.rear"]
== features[PipelineFeatureType.OBSERVATION]["observation.pixels.rear"]
)
assert (
"pixels.front" not in out[PipelineFeatureType.OBSERVATION]
and "pixels.wrist" not in out[PipelineFeatureType.OBSERVATION]
and "observation.pixels.rear" not in out[PipelineFeatureType.OBSERVATION]
)
assert out[PipelineFeatureType.OBSERVATION]["keep"] == features[PipelineFeatureType.OBSERVATION]["keep"]
assert_contract_is_typed(out)
def test_state_processor_features_environment_and_agent_pos(policy_feature_factory):
processor = VanillaObservationProcessorStep()
features = {
PipelineFeatureType.OBSERVATION: {
"environment_state": policy_feature_factory(FeatureType.STATE, (3,)),
"agent_pos": policy_feature_factory(FeatureType.STATE, (7,)),
"keep": policy_feature_factory(FeatureType.ENV, (1,)),
},
}
out = processor.transform_features(features.copy())
assert (
OBS_ENV_STATE in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION][OBS_ENV_STATE]
== features[PipelineFeatureType.OBSERVATION]["environment_state"]
)
assert (
OBS_STATE in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION][OBS_STATE]
== features[PipelineFeatureType.OBSERVATION]["agent_pos"]
)
assert (
"environment_state" not in out[PipelineFeatureType.OBSERVATION]
and "agent_pos" not in out[PipelineFeatureType.OBSERVATION]
)
assert out[PipelineFeatureType.OBSERVATION]["keep"] == features[PipelineFeatureType.OBSERVATION]["keep"]
assert_contract_is_typed(out)
def test_state_processor_features_prefixed_inputs(policy_feature_factory):
proc = VanillaObservationProcessorStep()
features = {
PipelineFeatureType.OBSERVATION: {
OBS_ENV_STATE: policy_feature_factory(FeatureType.STATE, (2,)),
"observation.agent_pos": policy_feature_factory(FeatureType.STATE, (4,)),
},
}
out = proc.transform_features(features.copy())
assert (
OBS_ENV_STATE in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION][OBS_ENV_STATE]
== features[PipelineFeatureType.OBSERVATION][OBS_ENV_STATE]
)
assert (
OBS_STATE in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION][OBS_STATE]
== features[PipelineFeatureType.OBSERVATION]["observation.agent_pos"]
)
assert (
"environment_state" not in out[PipelineFeatureType.OBSERVATION]
and "agent_pos" not in out[PipelineFeatureType.OBSERVATION]
)
assert_contract_is_typed(out)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_observation_processor.py",
"license": "Apache License 2.0",
"lines": 392,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_pipeline.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tempfile
from collections.abc import Callable
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
import pytest
import torch
import torch.nn as nn
from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features
from lerobot.processor import (
DataProcessorPipeline,
EnvTransition,
ProcessorStep,
ProcessorStepRegistry,
TransitionKey,
)
from lerobot.processor.converters import create_transition, identity_transition
from lerobot.utils.constants import ACTION, DONE, OBS_IMAGE, OBS_IMAGES, OBS_STATE, REWARD, TRUNCATED
from tests.conftest import assert_contract_is_typed
@dataclass
class MockStep(ProcessorStep):
"""Mock pipeline step for testing - demonstrates best practices.
This example shows the proper separation:
- JSON-serializable attributes (name, counter) go in get_config()
- Only torch tensors go in state_dict()
Note: The counter is part of the configuration, so it will be restored
when the step is recreated from config during loading.
"""
name: str = "mock_step"
counter: int = 0
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Add a counter to the complementary_data."""
comp_data = transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
comp_data = {} if comp_data is None else dict(comp_data) # Make a copy
comp_data[f"{self.name}_counter"] = self.counter
self.counter += 1
# Create a new transition with updated complementary_data
new_transition = transition.copy()
new_transition[TransitionKey.COMPLEMENTARY_DATA] = comp_data
return new_transition
def get_config(self) -> dict[str, Any]:
# Return all JSON-serializable attributes that should be persisted
# These will be passed to __init__ when loading
return {"name": self.name, "counter": self.counter}
def state_dict(self) -> dict[str, torch.Tensor]:
# Only return torch tensors (empty in this case since we have no tensor state)
return {}
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
# No tensor state to load
pass
def reset(self) -> None:
self.counter = 0
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
@dataclass
class MockStepWithoutOptionalMethods(ProcessorStep):
"""Mock step that only implements the required __call__ method."""
multiplier: float = 2.0
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Multiply reward by multiplier."""
reward = transition.get(TransitionKey.REWARD)
if reward is not None:
new_transition = transition.copy()
new_transition[TransitionKey.REWARD] = reward * self.multiplier
return new_transition
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
@dataclass
class MockStepWithTensorState(ProcessorStep):
"""Mock step demonstrating mixed JSON attributes and tensor state."""
name: str = "tensor_step"
learning_rate: float = 0.01
window_size: int = 10
def __init__(self, name: str = "tensor_step", learning_rate: float = 0.01, window_size: int = 10):
self.name = name
self.learning_rate = learning_rate
self.window_size = window_size
# Tensor state
self.running_mean = torch.zeros(window_size)
self.running_count = torch.tensor(0)
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Update running statistics."""
reward = transition.get(TransitionKey.REWARD)
if reward is not None:
# Update running mean
idx = self.running_count % self.window_size
self.running_mean[idx] = reward
self.running_count += 1
return transition
def get_config(self) -> dict[str, Any]:
# Only JSON-serializable attributes
return {
"name": self.name,
"learning_rate": self.learning_rate,
"window_size": self.window_size,
}
def state_dict(self) -> dict[str, torch.Tensor]:
# Only tensor state
return {
"running_mean": self.running_mean,
"running_count": self.running_count,
}
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
self.running_mean = state["running_mean"]
self.running_count = state["running_count"]
def reset(self) -> None:
self.running_mean.zero_()
self.running_count.zero_()
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
def test_empty_pipeline():
"""Test pipeline with no steps."""
pipeline = DataProcessorPipeline([], to_transition=identity_transition, to_output=identity_transition)
transition = create_transition()
result = pipeline(transition)
assert result == transition
assert len(pipeline) == 0
def test_single_step_pipeline():
"""Test pipeline with a single step."""
step = MockStep("test_step")
pipeline = DataProcessorPipeline([step], to_transition=identity_transition, to_output=identity_transition)
transition = create_transition()
result = pipeline(transition)
assert len(pipeline) == 1
assert result[TransitionKey.COMPLEMENTARY_DATA]["test_step_counter"] == 0
# Call again to test counter increment
result = pipeline(transition)
assert result[TransitionKey.COMPLEMENTARY_DATA]["test_step_counter"] == 1
def test_multiple_steps_pipeline():
"""Test pipeline with multiple steps."""
step1 = MockStep("step1")
step2 = MockStep("step2")
pipeline = DataProcessorPipeline(
[step1, step2], to_transition=identity_transition, to_output=identity_transition
)
transition = create_transition()
result = pipeline(transition)
assert len(pipeline) == 2
assert result[TransitionKey.COMPLEMENTARY_DATA]["step1_counter"] == 0
assert result[TransitionKey.COMPLEMENTARY_DATA]["step2_counter"] == 0
def test_invalid_transition_format():
"""Test pipeline with invalid transition format."""
pipeline = DataProcessorPipeline([MockStep()])
# Test with wrong type (tuple instead of dict)
with pytest.raises(ValueError, match="EnvTransition must be a dictionary"):
pipeline((None, None, 0.0, False, False, {}, {})) # Tuple instead of dict
# Test with wrong type (string)
with pytest.raises(ValueError, match="EnvTransition must be a dictionary"):
pipeline("not a dict")
def test_step_through():
"""Test step_through method with dict input."""
step1 = MockStep("step1")
step2 = MockStep("step2")
pipeline = DataProcessorPipeline([step1, step2])
transition = create_transition()
results = list(pipeline.step_through(transition))
assert len(results) == 3 # Original + 2 steps
assert results[0] == transition # Original
assert "step1_counter" in results[1][TransitionKey.COMPLEMENTARY_DATA] # After step1
assert "step2_counter" in results[2][TransitionKey.COMPLEMENTARY_DATA] # After step2
# Ensure all results are dicts (same format as input)
for result in results:
assert isinstance(result, dict)
assert all(isinstance(k, TransitionKey) for k in result)
def test_step_through_with_dict():
"""Test step_through method with dict input."""
step1 = MockStep("step1")
step2 = MockStep("step2")
pipeline = DataProcessorPipeline([step1, step2])
batch = {
OBS_IMAGE: None,
ACTION: None,
REWARD: 0.0,
DONE: False,
TRUNCATED: False,
"info": {},
}
results = list(pipeline.step_through(batch))
assert len(results) == 3 # Original + 2 steps
# Ensure all results are EnvTransition dicts (regardless of input format)
for result in results:
assert isinstance(result, dict)
# Check that keys are TransitionKey enums or at least valid transition keys
for key in result:
assert key in [
TransitionKey.OBSERVATION,
TransitionKey.ACTION,
TransitionKey.REWARD,
TransitionKey.DONE,
TransitionKey.TRUNCATED,
TransitionKey.INFO,
TransitionKey.COMPLEMENTARY_DATA,
]
# Check that the processing worked - verify step counters in complementary_data
assert results[1].get(TransitionKey.COMPLEMENTARY_DATA, {}).get("step1_counter") == 0
assert results[2].get(TransitionKey.COMPLEMENTARY_DATA, {}).get("step1_counter") == 0
assert results[2].get(TransitionKey.COMPLEMENTARY_DATA, {}).get("step2_counter") == 0
def test_step_through_no_hooks():
"""Test that step_through doesn't execute hooks."""
step = MockStep("test_step")
pipeline = DataProcessorPipeline([step])
hook_calls = []
def tracking_hook(idx: int, transition: EnvTransition):
hook_calls.append(f"hook_called_step_{idx}")
# Register hooks
pipeline.register_before_step_hook(tracking_hook)
pipeline.register_after_step_hook(tracking_hook)
# Use step_through
transition = create_transition()
results = list(pipeline.step_through(transition))
# Verify step was executed (counter should increment)
assert len(results) == 2 # Initial + 1 step
assert results[1][TransitionKey.COMPLEMENTARY_DATA]["test_step_counter"] == 0
# Verify hooks were NOT called
assert len(hook_calls) == 0
# Now use __call__ to verify hooks ARE called there
hook_calls.clear()
pipeline(transition)
# Verify hooks were called (before and after for 1 step = 2 calls)
assert len(hook_calls) == 2
assert hook_calls == ["hook_called_step_0", "hook_called_step_0"]
def test_indexing():
"""Test pipeline indexing."""
step1 = MockStep("step1")
step2 = MockStep("step2")
pipeline = DataProcessorPipeline([step1, step2])
# Test integer indexing
assert pipeline[0] is step1
assert pipeline[1] is step2
# Test slice indexing
sub_pipeline = pipeline[0:1]
assert isinstance(sub_pipeline, DataProcessorPipeline)
assert len(sub_pipeline) == 1
assert sub_pipeline[0] is step1
def test_hooks():
"""Test before/after step hooks."""
step = MockStep("test_step")
pipeline = DataProcessorPipeline([step])
before_calls = []
after_calls = []
def before_hook(idx: int, transition: EnvTransition):
before_calls.append(idx)
def after_hook(idx: int, transition: EnvTransition):
after_calls.append(idx)
pipeline.register_before_step_hook(before_hook)
pipeline.register_after_step_hook(after_hook)
transition = create_transition()
pipeline(transition)
assert before_calls == [0]
assert after_calls == [0]
def test_unregister_hooks():
"""Test unregistering hooks from the pipeline."""
step = MockStep("test_step")
pipeline = DataProcessorPipeline([step])
# Test before_step_hook
before_calls = []
def before_hook(idx: int, transition: EnvTransition):
before_calls.append(idx)
pipeline.register_before_step_hook(before_hook)
# Verify hook is registered
transition = create_transition()
pipeline(transition)
assert len(before_calls) == 1
# Unregister and verify it's no longer called
pipeline.unregister_before_step_hook(before_hook)
before_calls.clear()
pipeline(transition)
assert len(before_calls) == 0
# Test after_step_hook
after_calls = []
def after_hook(idx: int, transition: EnvTransition):
after_calls.append(idx)
pipeline.register_after_step_hook(after_hook)
pipeline(transition)
assert len(after_calls) == 1
pipeline.unregister_after_step_hook(after_hook)
after_calls.clear()
pipeline(transition)
assert len(after_calls) == 0
def test_unregister_nonexistent_hook():
"""Test error handling when unregistering hooks that don't exist."""
pipeline = DataProcessorPipeline([MockStep()])
def some_hook(idx: int, transition: EnvTransition):
pass
def reset_hook():
pass
# Test unregistering hooks that were never registered
with pytest.raises(ValueError, match="not found in before_step_hooks"):
pipeline.unregister_before_step_hook(some_hook)
with pytest.raises(ValueError, match="not found in after_step_hooks"):
pipeline.unregister_after_step_hook(some_hook)
def test_multiple_hooks_and_selective_unregister():
"""Test registering multiple hooks and selectively unregistering them."""
pipeline = DataProcessorPipeline([MockStep("step1"), MockStep("step2")])
calls_1 = []
calls_2 = []
calls_3 = []
def hook1(idx: int, transition: EnvTransition):
calls_1.append(f"hook1_step{idx}")
def hook2(idx: int, transition: EnvTransition):
calls_2.append(f"hook2_step{idx}")
def hook3(idx: int, transition: EnvTransition):
calls_3.append(f"hook3_step{idx}")
# Register multiple hooks
pipeline.register_before_step_hook(hook1)
pipeline.register_before_step_hook(hook2)
pipeline.register_before_step_hook(hook3)
# Run pipeline - all hooks should be called for both steps
transition = create_transition()
pipeline(transition)
assert calls_1 == ["hook1_step0", "hook1_step1"]
assert calls_2 == ["hook2_step0", "hook2_step1"]
assert calls_3 == ["hook3_step0", "hook3_step1"]
# Clear calls
calls_1.clear()
calls_2.clear()
calls_3.clear()
# Unregister middle hook
pipeline.unregister_before_step_hook(hook2)
# Run again - only hook1 and hook3 should be called
pipeline(transition)
assert calls_1 == ["hook1_step0", "hook1_step1"]
assert calls_2 == [] # hook2 was unregistered
assert calls_3 == ["hook3_step0", "hook3_step1"]
def test_hook_execution_order_documentation():
"""Test and document that hooks are executed sequentially in registration order."""
pipeline = DataProcessorPipeline([MockStep("step")])
execution_order = []
def hook_a(idx: int, transition: EnvTransition):
execution_order.append("A")
def hook_b(idx: int, transition: EnvTransition):
execution_order.append("B")
def hook_c(idx: int, transition: EnvTransition):
execution_order.append("C")
# Register in specific order: A, B, C
pipeline.register_before_step_hook(hook_a)
pipeline.register_before_step_hook(hook_b)
pipeline.register_before_step_hook(hook_c)
transition = create_transition()
pipeline(transition)
# Verify execution order matches registration order
assert execution_order == ["A", "B", "C"]
# Test that after unregistering B and re-registering it, it goes to the end
pipeline.unregister_before_step_hook(hook_b)
execution_order.clear()
pipeline(transition)
assert execution_order == ["A", "C"] # B is gone
# Re-register B - it should now be at the end
pipeline.register_before_step_hook(hook_b)
execution_order.clear()
pipeline(transition)
assert execution_order == ["A", "C", "B"] # B is now last
def test_save_and_load_pretrained():
"""Test saving and loading pipeline.
This test demonstrates that JSON-serializable attributes (like counter)
are saved in the config and restored when the step is recreated.
"""
step1 = MockStep("step1")
step2 = MockStep("step2")
# Increment counters to have some state
step1.counter = 5
step2.counter = 10
pipeline = DataProcessorPipeline([step1, step2], name="TestPipeline")
with tempfile.TemporaryDirectory() as tmp_dir:
# Save pipeline
pipeline.save_pretrained(tmp_dir)
# Check files were created
config_path = Path(tmp_dir) / "testpipeline.json" # Based on name="TestPipeline"
assert config_path.exists()
# Check config content
with open(config_path) as f:
config = json.load(f)
assert config["name"] == "TestPipeline"
assert len(config["steps"]) == 2
# Verify counters are saved in config, not in separate state files
assert config["steps"][0]["config"]["counter"] == 5
assert config["steps"][1]["config"]["counter"] == 10
# Load pipeline
loaded_pipeline = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="testpipeline.json")
assert loaded_pipeline.name == "TestPipeline"
assert len(loaded_pipeline) == 2
# Check that counter was restored from config
assert loaded_pipeline.steps[0].counter == 5
assert loaded_pipeline.steps[1].counter == 10
def test_step_without_optional_methods():
"""Test pipeline with steps that don't implement optional methods."""
step = MockStepWithoutOptionalMethods(multiplier=3.0)
pipeline = DataProcessorPipeline(
[step], to_transition=identity_transition, to_output=identity_transition
) # Identity for EnvTransition input/output
transition = create_transition(reward=2.0)
result = pipeline(transition)
assert result[TransitionKey.REWARD] == 6.0 # 2.0 * 3.0
# Reset should work even if step doesn't implement reset
pipeline.reset()
# Save/load should work even without optional methods
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir, config_filename="dataprocessorpipeline.json"
)
assert len(loaded_pipeline) == 1
def test_mixed_json_and_tensor_state():
"""Test step with both JSON attributes and tensor state."""
step = MockStepWithTensorState(name="stats", learning_rate=0.05, window_size=5)
pipeline = DataProcessorPipeline([step])
# Process some transitions with rewards
for i in range(10):
transition = create_transition(reward=float(i))
pipeline(transition)
# Check state
assert step.running_count.item() == 10
assert step.learning_rate == 0.05
# Save and load
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Check that both config and state files were created
config_path = Path(tmp_dir) / "dataprocessorpipeline.json" # Default name is "RobotProcessor"
state_path = Path(tmp_dir) / "dataprocessorpipeline_step_0.safetensors"
assert config_path.exists()
assert state_path.exists()
# Load and verify
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir, config_filename="dataprocessorpipeline.json"
)
loaded_step = loaded_pipeline.steps[0]
# Check JSON attributes were restored
assert loaded_step.name == "stats"
assert loaded_step.learning_rate == 0.05
assert loaded_step.window_size == 5
# Check tensor state was restored
assert loaded_step.running_count.item() == 10
assert torch.allclose(loaded_step.running_mean, step.running_mean)
class MockModuleStep(ProcessorStep, nn.Module):
"""Mock step that inherits from nn.Module to test state_dict handling of module parameters."""
def __init__(self, input_dim: int = 10, hidden_dim: int = 5):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.linear = nn.Linear(input_dim, hidden_dim)
self.running_mean = nn.Parameter(torch.zeros(hidden_dim), requires_grad=False)
self.counter = 0 # Non-tensor state
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.linear(x)
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Process transition and update running mean."""
obs = transition.get(TransitionKey.OBSERVATION)
if obs is not None and isinstance(obs, torch.Tensor):
# Process observation through linear layer
processed = self.forward(obs[:, : self.input_dim])
# Update running mean in-place (don't reassign the parameter)
with torch.no_grad():
self.running_mean.mul_(0.9).add_(processed.mean(dim=0), alpha=0.1)
self.counter += 1
return transition
def get_config(self) -> dict[str, Any]:
return {
"input_dim": self.input_dim,
"hidden_dim": self.hidden_dim,
"counter": self.counter,
}
def state_dict(self) -> dict[str, torch.Tensor]:
"""Override to return all module parameters and buffers."""
# Get the module's state dict (includes all parameters and buffers)
return nn.Module.state_dict(self)
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
"""Override to load all module parameters and buffers."""
# Use the module's load_state_dict
nn.Module.load_state_dict(self, state)
def reset(self) -> None:
self.running_mean.zero_()
self.counter = 0
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
class MockNonModuleStepWithState(ProcessorStep):
"""Mock step that explicitly does NOT inherit from nn.Module but has tensor state.
This tests the state_dict/load_state_dict path for regular classes.
"""
def __init__(self, name: str = "non_module_step", feature_dim: int = 10):
self.name = name
self.feature_dim = feature_dim
# Initialize tensor state - these are regular tensors, not nn.Parameters
self.weights = torch.randn(feature_dim, feature_dim)
self.bias = torch.zeros(feature_dim)
self.running_stats = torch.zeros(feature_dim)
self.step_count = torch.tensor(0)
# Non-tensor state
self.config_value = 42
self.history = []
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Process transition using tensor operations."""
obs = transition.get(TransitionKey.OBSERVATION)
comp_data = transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
if obs is not None and isinstance(obs, torch.Tensor) and obs.numel() >= self.feature_dim:
# Perform some tensor operations
flat_obs = obs.flatten()[: self.feature_dim]
# Simple linear transformation (ensure dimensions match for matmul)
output = torch.matmul(self.weights.T, flat_obs) + self.bias
# Update running stats
self.running_stats = 0.9 * self.running_stats + 0.1 * output
self.step_count += 1
# Add to complementary data
comp_data = {} if comp_data is None else dict(comp_data)
comp_data[f"{self.name}_mean_output"] = output.mean().item()
comp_data[f"{self.name}_steps"] = self.step_count.item()
# Return updated transition
new_transition = transition.copy()
new_transition[TransitionKey.COMPLEMENTARY_DATA] = comp_data
return new_transition
return transition
def get_config(self) -> dict[str, Any]:
return {
"name": self.name,
"feature_dim": self.feature_dim,
"config_value": self.config_value,
}
def state_dict(self) -> dict[str, torch.Tensor]:
"""Return only tensor state."""
return {
"weights": self.weights,
"bias": self.bias,
"running_stats": self.running_stats,
"step_count": self.step_count,
}
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
"""Load tensor state."""
self.weights = state["weights"]
self.bias = state["bias"]
self.running_stats = state["running_stats"]
self.step_count = state["step_count"]
def reset(self) -> None:
"""Reset statistics but keep learned parameters."""
self.running_stats.zero_()
self.step_count.zero_()
self.history.clear()
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
# Tests for overrides functionality
@dataclass
class MockStepWithNonSerializableParam(ProcessorStep):
"""Mock step that requires a non-serializable parameter."""
def __init__(self, name: str = "mock_env_step", multiplier: float = 1.0, env: Any = None):
self.name = name
# Add type validation for multiplier
if isinstance(multiplier, str):
raise ValueError(f"multiplier must be a number, got string '{multiplier}'")
if not isinstance(multiplier, (int | float)):
raise TypeError(f"multiplier must be a number, got {type(multiplier).__name__}")
self.multiplier = float(multiplier)
self.env = env # Non-serializable parameter (like gym.Env)
def __call__(self, transition: EnvTransition) -> EnvTransition:
reward = transition.get(TransitionKey.REWARD)
comp_data = transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
# Use the env parameter if provided
if self.env is not None:
comp_data = {} if comp_data is None else dict(comp_data)
comp_data[f"{self.name}_env_info"] = str(self.env)
# Apply multiplier to reward
new_transition = transition.copy()
if reward is not None:
new_transition[TransitionKey.REWARD] = reward * self.multiplier
if comp_data:
new_transition[TransitionKey.COMPLEMENTARY_DATA] = comp_data
return new_transition
def get_config(self) -> dict[str, Any]:
# Note: env is intentionally NOT included here as it's not serializable
return {
"name": self.name,
"multiplier": self.multiplier,
}
def state_dict(self) -> dict[str, torch.Tensor]:
return {}
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
pass
def reset(self) -> None:
pass
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
@ProcessorStepRegistry.register("registered_mock_step")
@dataclass
class RegisteredMockStep(ProcessorStep):
"""Mock step registered in the registry."""
value: int = 42
device: str = "cpu"
def __call__(self, transition: EnvTransition) -> EnvTransition:
comp_data = transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
comp_data = {} if comp_data is None else dict(comp_data)
comp_data["registered_step_value"] = self.value
comp_data["registered_step_device"] = self.device
new_transition = transition.copy()
new_transition[TransitionKey.COMPLEMENTARY_DATA] = comp_data
return new_transition
def get_config(self) -> dict[str, Any]:
return {
"value": self.value,
"device": self.device,
}
def state_dict(self) -> dict[str, torch.Tensor]:
return {}
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
pass
def reset(self) -> None:
pass
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
class MockEnvironment:
"""Mock environment for testing non-serializable parameters."""
def __init__(self, name: str):
self.name = name
def __str__(self):
return f"MockEnvironment({self.name})"
def test_from_pretrained_with_overrides():
"""Test loading processor with parameter overrides."""
# Create a processor with steps that need overrides
env_step = MockStepWithNonSerializableParam(name="env_step", multiplier=2.0)
registered_step = RegisteredMockStep(value=100, device="cpu")
pipeline = DataProcessorPipeline([env_step, registered_step], name="TestOverrides")
with tempfile.TemporaryDirectory() as tmp_dir:
# Save the pipeline
pipeline.save_pretrained(tmp_dir)
# Create a mock environment for override
mock_env = MockEnvironment("test_env")
# Load with overrides
overrides = {
"MockStepWithNonSerializableParam": {
"env": mock_env,
"multiplier": 3.0, # Override the multiplier too
},
"registered_mock_step": {"device": "cuda", "value": 200},
}
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="testoverrides.json",
overrides=overrides,
to_transition=identity_transition,
to_output=identity_transition,
)
# Verify the pipeline was loaded correctly
assert len(loaded_pipeline) == 2
assert loaded_pipeline.name == "TestOverrides"
# Test the loaded steps
transition = create_transition(reward=1.0)
result = loaded_pipeline(transition)
# Check that overrides were applied
comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert "env_step_env_info" in comp_data
assert comp_data["env_step_env_info"] == "MockEnvironment(test_env)"
assert comp_data["registered_step_value"] == 200
assert comp_data["registered_step_device"] == "cuda"
# Check that multiplier override was applied
assert result[TransitionKey.REWARD] == 3.0 # 1.0 * 3.0 (overridden multiplier)
def test_from_pretrained_with_partial_overrides():
"""Test loading processor with overrides for only some steps."""
step1 = MockStepWithNonSerializableParam(name="step1", multiplier=1.0)
step2 = MockStepWithNonSerializableParam(name="step2", multiplier=2.0)
pipeline = DataProcessorPipeline([step1, step2])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Override only one step
overrides = {"MockStepWithNonSerializableParam": {"multiplier": 5.0}}
# The current implementation applies overrides to ALL steps with the same class name
# Both steps will get the override
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
overrides=overrides,
to_transition=identity_transition,
to_output=identity_transition,
)
transition = create_transition(reward=1.0)
result = loaded_pipeline(transition)
# The reward should be affected by both steps, both getting the override
# First step: 1.0 * 5.0 = 5.0 (overridden)
# Second step: 5.0 * 5.0 = 25.0 (also overridden)
assert result[TransitionKey.REWARD] == 25.0
def test_from_pretrained_invalid_override_key():
"""Test that invalid override keys raise KeyError."""
step = MockStepWithNonSerializableParam()
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Try to override a non-existent step
overrides = {"NonExistentStep": {"param": "value"}}
with pytest.raises(KeyError, match="Override keys.*do not match any step"):
DataProcessorPipeline.from_pretrained(
tmp_dir, config_filename="dataprocessorpipeline.json", overrides=overrides
)
def test_from_pretrained_multiple_invalid_override_keys():
"""Test that multiple invalid override keys are reported."""
step = MockStepWithNonSerializableParam()
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Try to override multiple non-existent steps
overrides = {"NonExistentStep1": {"param": "value1"}, "NonExistentStep2": {"param": "value2"}}
with pytest.raises(KeyError) as exc_info:
DataProcessorPipeline.from_pretrained(
tmp_dir, config_filename="dataprocessorpipeline.json", overrides=overrides
)
error_msg = str(exc_info.value)
assert "NonExistentStep1" in error_msg
assert "NonExistentStep2" in error_msg
assert "Available step keys" in error_msg
def test_from_pretrained_registered_step_override():
"""Test overriding registered steps using registry names."""
registered_step = RegisteredMockStep(value=50, device="cpu")
pipeline = DataProcessorPipeline([registered_step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Override using registry name
overrides = {"registered_mock_step": {"value": 999, "device": "cuda"}}
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
overrides=overrides,
to_transition=identity_transition,
to_output=identity_transition,
)
# Test that overrides were applied
transition = create_transition()
result = loaded_pipeline(transition)
comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert comp_data["registered_step_value"] == 999
assert comp_data["registered_step_device"] == "cuda"
def test_from_pretrained_mixed_registered_and_unregistered():
"""Test overriding both registered and unregistered steps."""
unregistered_step = MockStepWithNonSerializableParam(name="unregistered", multiplier=1.0)
registered_step = RegisteredMockStep(value=10, device="cpu")
pipeline = DataProcessorPipeline([unregistered_step, registered_step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
mock_env = MockEnvironment("mixed_test")
overrides = {
"MockStepWithNonSerializableParam": {"env": mock_env, "multiplier": 4.0},
"registered_mock_step": {"value": 777},
}
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
overrides=overrides,
to_transition=identity_transition,
to_output=identity_transition,
)
# Test both steps
transition = create_transition(reward=2.0)
result = loaded_pipeline(transition)
comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert comp_data["unregistered_env_info"] == "MockEnvironment(mixed_test)"
assert comp_data["registered_step_value"] == 777
assert result[TransitionKey.REWARD] == 8.0 # 2.0 * 4.0
def test_from_pretrained_no_overrides():
"""Test that from_pretrained works without overrides (backward compatibility)."""
step = MockStepWithNonSerializableParam(name="no_override", multiplier=3.0)
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Load without overrides
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
to_transition=identity_transition,
to_output=identity_transition,
)
assert len(loaded_pipeline) == 1
# Test that the step works (env will be None)
transition = create_transition(reward=1.0)
result = loaded_pipeline(transition)
assert result[TransitionKey.REWARD] == 3.0 # 1.0 * 3.0
def test_from_pretrained_empty_overrides():
"""Test that from_pretrained works with empty overrides dict."""
step = MockStepWithNonSerializableParam(multiplier=2.0)
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Load with empty overrides
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
overrides={},
to_transition=identity_transition,
to_output=identity_transition,
)
assert len(loaded_pipeline) == 1
# Test that the step works normally
transition = create_transition(reward=1.0)
result = loaded_pipeline(transition)
assert result[TransitionKey.REWARD] == 2.0
def test_from_pretrained_override_instantiation_error():
"""Test that instantiation errors with overrides are properly reported."""
step = MockStepWithNonSerializableParam(multiplier=1.0)
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Try to override with invalid parameter type
overrides = {
"MockStepWithNonSerializableParam": {
"multiplier": "invalid_type" # Should be float, not string
}
}
with pytest.raises(ValueError, match="Failed to instantiate processor step"):
DataProcessorPipeline.from_pretrained(
tmp_dir, config_filename="dataprocessorpipeline.json", overrides=overrides
)
def test_from_pretrained_with_state_and_overrides():
"""Test that overrides work correctly with steps that have tensor state."""
step = MockStepWithTensorState(name="tensor_step", learning_rate=0.01, window_size=5)
pipeline = DataProcessorPipeline([step])
# Process some data to create state
for i in range(10):
transition = create_transition(reward=float(i))
pipeline(transition)
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Load with overrides
overrides = {
"MockStepWithTensorState": {
"learning_rate": 0.05, # Override learning rate
"window_size": 3, # Override window size
}
}
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir, config_filename="dataprocessorpipeline.json", overrides=overrides
)
loaded_step = loaded_pipeline.steps[0]
# Check that config overrides were applied
assert loaded_step.learning_rate == 0.05
assert loaded_step.window_size == 3
# Check that tensor state was preserved
assert loaded_step.running_count.item() == 10
# The running_mean should still have the original window_size (5) from saved state
# but the new step will use window_size=3 for future operations
assert loaded_step.running_mean.shape[0] == 5 # From saved state
def test_from_pretrained_override_error_messages():
"""Test that error messages for override failures are helpful."""
step1 = MockStepWithNonSerializableParam(name="step1")
step2 = RegisteredMockStep()
pipeline = DataProcessorPipeline([step1, step2])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Test with invalid override key
overrides = {"WrongStepName": {"param": "value"}}
with pytest.raises(KeyError) as exc_info:
DataProcessorPipeline.from_pretrained(
tmp_dir, config_filename="dataprocessorpipeline.json", overrides=overrides
)
error_msg = str(exc_info.value)
assert "WrongStepName" in error_msg
assert "Available step keys" in error_msg
assert "MockStepWithNonSerializableParam" in error_msg
assert "registered_mock_step" in error_msg
def test_repr_empty_processor():
"""Test __repr__ with empty processor."""
pipeline = DataProcessorPipeline()
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='DataProcessorPipeline', steps=0: [])"
assert repr_str == expected
def test_repr_single_step():
"""Test __repr__ with single step."""
step = MockStep("test_step")
pipeline = DataProcessorPipeline([step])
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='DataProcessorPipeline', steps=1: [MockStep])"
assert repr_str == expected
def test_repr_multiple_steps_under_limit():
"""Test __repr__ with 2-3 steps (all shown)."""
step1 = MockStep("step1")
step2 = MockStepWithoutOptionalMethods()
pipeline = DataProcessorPipeline([step1, step2])
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='DataProcessorPipeline', steps=2: [MockStep, MockStepWithoutOptionalMethods])"
assert repr_str == expected
# Test with 3 steps (boundary case)
step3 = MockStepWithTensorState()
pipeline = DataProcessorPipeline([step1, step2, step3])
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='DataProcessorPipeline', steps=3: [MockStep, MockStepWithoutOptionalMethods, MockStepWithTensorState])"
assert repr_str == expected
def test_repr_many_steps_truncated():
"""Test __repr__ with more than 3 steps (truncated with ellipsis)."""
step1 = MockStep("step1")
step2 = MockStepWithoutOptionalMethods()
step3 = MockStepWithTensorState()
step4 = MockModuleStep()
step5 = MockNonModuleStepWithState()
pipeline = DataProcessorPipeline([step1, step2, step3, step4, step5])
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='DataProcessorPipeline', steps=5: [MockStep, MockStepWithoutOptionalMethods, ..., MockNonModuleStepWithState])"
assert repr_str == expected
def test_repr_with_custom_name():
"""Test __repr__ with custom processor name."""
step = MockStep("test_step")
pipeline = DataProcessorPipeline([step], name="CustomProcessor")
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='CustomProcessor', steps=1: [MockStep])"
assert repr_str == expected
def test_repr_with_seed():
"""Test __repr__ with seed parameter."""
step = MockStep("test_step")
pipeline = DataProcessorPipeline([step])
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='DataProcessorPipeline', steps=1: [MockStep])"
assert repr_str == expected
def test_repr_with_custom_name_and_seed():
"""Test __repr__ with both custom name and seed."""
step1 = MockStep("step1")
step2 = MockStepWithoutOptionalMethods()
pipeline = DataProcessorPipeline([step1, step2], name="MyProcessor")
repr_str = repr(pipeline)
expected = (
"DataProcessorPipeline(name='MyProcessor', steps=2: [MockStep, MockStepWithoutOptionalMethods])"
)
assert repr_str == expected
def test_repr_without_seed():
"""Test __repr__ when seed is explicitly None (should not show seed)."""
step = MockStep("test_step")
pipeline = DataProcessorPipeline([step], name="TestProcessor")
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='TestProcessor', steps=1: [MockStep])"
assert repr_str == expected
def test_repr_various_step_types():
"""Test __repr__ with different types of steps to verify class name extraction."""
step1 = MockStep()
step2 = MockStepWithTensorState()
step3 = MockModuleStep()
step4 = MockNonModuleStepWithState()
pipeline = DataProcessorPipeline([step1, step2, step3, step4], name="MixedSteps")
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='MixedSteps', steps=4: [MockStep, MockStepWithTensorState, ..., MockNonModuleStepWithState])"
assert repr_str == expected
def test_repr_edge_case_long_names():
"""Test __repr__ handles steps with long class names properly."""
step1 = MockStepWithNonSerializableParam()
step2 = MockStepWithoutOptionalMethods()
step3 = MockStepWithTensorState()
step4 = MockNonModuleStepWithState()
pipeline = DataProcessorPipeline([step1, step2, step3, step4], name="LongNames")
repr_str = repr(pipeline)
expected = "DataProcessorPipeline(name='LongNames', steps=4: [MockStepWithNonSerializableParam, MockStepWithoutOptionalMethods, ..., MockNonModuleStepWithState])"
assert repr_str == expected
# Tests for config filename features and multiple processors
def test_save_with_custom_config_filename():
"""Test saving processor with custom config filename."""
step = MockStep("test")
pipeline = DataProcessorPipeline([step], name="TestProcessor")
with tempfile.TemporaryDirectory() as tmp_dir:
# Save with custom filename
pipeline.save_pretrained(tmp_dir, config_filename="my_custom_config.json")
# Check file exists
config_path = Path(tmp_dir) / "my_custom_config.json"
assert config_path.exists()
# Check content
with open(config_path) as f:
config = json.load(f)
assert config["name"] == "TestProcessor"
# Load with specific filename
loaded = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="my_custom_config.json")
assert loaded.name == "TestProcessor"
def test_multiple_processors_same_directory():
"""Test saving multiple processors to the same directory with different config files."""
# Create different processors
preprocessor = DataProcessorPipeline([MockStep("pre1"), MockStep("pre2")], name="preprocessor")
postprocessor = DataProcessorPipeline(
[MockStepWithoutOptionalMethods(multiplier=0.5)], name="postprocessor"
)
with tempfile.TemporaryDirectory() as tmp_dir:
# Save both to same directory
preprocessor.save_pretrained(tmp_dir)
postprocessor.save_pretrained(tmp_dir)
# Check both config files exist
assert (Path(tmp_dir) / "preprocessor.json").exists()
assert (Path(tmp_dir) / "postprocessor.json").exists()
# Load them back
loaded_pre = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="preprocessor.json")
loaded_post = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="postprocessor.json")
assert loaded_pre.name == "preprocessor"
assert loaded_post.name == "postprocessor"
assert len(loaded_pre) == 2
assert len(loaded_post) == 1
def test_explicit_config_filename_loading():
"""Test explicit config filename loading (no more auto-detection)."""
step = MockStepWithTensorState()
pipeline = DataProcessorPipeline([step], name="SingleConfig")
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Load with explicit config_filename (now required)
loaded = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="singleconfig.json")
assert loaded.name == "SingleConfig"
def test_explicit_config_selection_with_multiple_configs():
"""Test explicit config selection when multiple configs exist."""
proc1 = DataProcessorPipeline([MockStep()], name="processor1")
proc2 = DataProcessorPipeline([MockStep()], name="processor2")
with tempfile.TemporaryDirectory() as tmp_dir:
proc1.save_pretrained(tmp_dir)
proc2.save_pretrained(tmp_dir)
# Can load specific configs explicitly
loaded1 = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="processor1.json")
loaded2 = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="processor2.json")
assert loaded1.name == "processor1"
assert loaded2.name == "processor2"
def test_state_file_naming_with_indices():
"""Test that state files include pipeline name and step indices to avoid conflicts."""
# Create multiple steps of same type with state
step1 = MockStepWithTensorState(name="norm1", window_size=5)
step2 = MockStepWithTensorState(name="norm2", window_size=10)
step3 = MockModuleStep(input_dim=5)
pipeline = DataProcessorPipeline([step1, step2, step3])
# Process some data to create state
for i in range(5):
transition = create_transition(observation=torch.randn(2, 5), reward=float(i))
pipeline(transition)
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Check state files have indices
state_files = sorted(Path(tmp_dir).glob("*.safetensors"))
assert len(state_files) == 3
# Files should be named with pipeline name prefix and indices
expected_names = [
"dataprocessorpipeline_step_0.safetensors",
"dataprocessorpipeline_step_1.safetensors",
"dataprocessorpipeline_step_2.safetensors",
]
actual_names = [f.name for f in state_files]
assert actual_names == expected_names
def test_state_file_naming_with_registry():
"""Test state file naming for registered steps includes pipeline name, index and registry name."""
# Register a test step
@ProcessorStepRegistry.register("test_stateful_step")
@dataclass
class TestStatefulStep(ProcessorStep):
value: int = 0
def __init__(self, value: int = 0):
self.value = value
self.state_tensor = torch.randn(3, 3)
def __call__(self, transition: EnvTransition) -> EnvTransition:
return transition
def get_config(self):
return {"value": self.value}
def state_dict(self):
return {"state_tensor": self.state_tensor}
def load_state_dict(self, state):
self.state_tensor = state["state_tensor"]
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
try:
# Create pipeline with registered steps
step1 = TestStatefulStep(1)
step2 = TestStatefulStep(2)
pipeline = DataProcessorPipeline([step1, step2])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Check state files
state_files = sorted(Path(tmp_dir).glob("*.safetensors"))
assert len(state_files) == 2
# Should include pipeline name, index and registry name
expected_names = [
"dataprocessorpipeline_step_0_test_stateful_step.safetensors",
"dataprocessorpipeline_step_1_test_stateful_step.safetensors",
]
actual_names = [f.name for f in state_files]
assert actual_names == expected_names
finally:
# Cleanup registry
ProcessorStepRegistry.unregister("test_stateful_step")
# More comprehensive override tests
def test_override_with_nested_config():
"""Test overrides with nested configuration dictionaries."""
@ProcessorStepRegistry.register("complex_config_step")
@dataclass
class ComplexConfigStep(ProcessorStep):
name: str = "complex"
simple_param: int = 42
nested_config: dict = None
def __post_init__(self):
if self.nested_config is None:
self.nested_config = {"level1": {"level2": "default"}}
def __call__(self, transition: EnvTransition) -> EnvTransition:
comp_data = transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
comp_data = dict(comp_data)
comp_data["config_value"] = self.nested_config.get("level1", {}).get("level2", "missing")
new_transition = transition.copy()
new_transition[TransitionKey.COMPLEMENTARY_DATA] = comp_data
return new_transition
def get_config(self):
return {"name": self.name, "simple_param": self.simple_param, "nested_config": self.nested_config}
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
try:
step = ComplexConfigStep()
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Load with nested override
loaded = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
overrides={"complex_config_step": {"nested_config": {"level1": {"level2": "overridden"}}}},
to_transition=identity_transition,
to_output=identity_transition,
)
# Test that override worked
transition = create_transition()
result = loaded(transition)
assert result[TransitionKey.COMPLEMENTARY_DATA]["config_value"] == "overridden"
finally:
ProcessorStepRegistry.unregister("complex_config_step")
def test_override_preserves_defaults():
"""Test that overrides only affect specified parameters."""
step = MockStepWithNonSerializableParam(name="test", multiplier=2.0)
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Override only one parameter
loaded = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
overrides={
"MockStepWithNonSerializableParam": {
"multiplier": 5.0 # Only override multiplier
}
},
)
# Check that name was preserved from saved config
loaded_step = loaded.steps[0]
assert loaded_step.name == "test" # Original value
assert loaded_step.multiplier == 5.0 # Overridden value
def test_override_type_validation():
"""Test that type errors in overrides are caught properly."""
step = MockStepWithTensorState(learning_rate=0.01)
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Try to override with wrong type
overrides = {
"MockStepWithTensorState": {
"window_size": "not_an_int" # Should be int
}
}
with pytest.raises(ValueError, match="Failed to instantiate"):
DataProcessorPipeline.from_pretrained(
tmp_dir, config_filename="dataprocessorpipeline.json", overrides=overrides
)
def test_override_with_callables():
"""Test overriding with callable objects."""
@ProcessorStepRegistry.register("callable_step")
@dataclass
class CallableStep(ProcessorStep):
name: str = "callable_step"
transform_fn: Any = None
def __call__(self, transition: EnvTransition) -> EnvTransition:
obs = transition.get(TransitionKey.OBSERVATION)
if obs is not None and self.transform_fn is not None:
processed_obs = {}
for k, v in obs.items():
processed_obs[k] = self.transform_fn(v)
new_transition = transition.copy()
new_transition[TransitionKey.OBSERVATION] = processed_obs
return new_transition
return transition
def get_config(self):
return {"name": self.name}
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
try:
step = CallableStep()
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Define a transform function
def double_values(x):
if isinstance(x, (int | float | torch.Tensor)):
return x * 2
return x
# Load with callable override
loaded = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
overrides={"callable_step": {"transform_fn": double_values}},
to_transition=identity_transition,
to_output=identity_transition,
)
# Test it works
transition = create_transition(observation={"value": torch.tensor(5.0)})
result = loaded(transition)
assert result[TransitionKey.OBSERVATION]["value"].item() == 10.0
finally:
ProcessorStepRegistry.unregister("callable_step")
def test_override_multiple_same_class_warning():
"""Test behavior when multiple steps of same class exist."""
step1 = MockStepWithNonSerializableParam(name="step1", multiplier=1.0)
step2 = MockStepWithNonSerializableParam(name="step2", multiplier=2.0)
pipeline = DataProcessorPipeline([step1, step2])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Override affects all instances of the class
loaded = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
overrides={"MockStepWithNonSerializableParam": {"multiplier": 10.0}},
)
# Both steps get the same override
assert loaded.steps[0].multiplier == 10.0
assert loaded.steps[1].multiplier == 10.0
# But original names are preserved
assert loaded.steps[0].name == "step1"
assert loaded.steps[1].name == "step2"
def test_config_filename_special_characters():
"""Test config filenames with special characters are sanitized."""
# Processor name with special characters
pipeline = DataProcessorPipeline([MockStep()], name="My/Processor\\With:Special*Chars")
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Check that filename was sanitized
json_files = list(Path(tmp_dir).glob("*.json"))
assert len(json_files) == 1
# Should have replaced special chars with underscores
expected_name = "my_processor_with_special_chars.json"
assert json_files[0].name == expected_name
def test_state_file_naming_with_multiple_processors():
"""Test that state files are properly prefixed with pipeline names to avoid conflicts."""
# Create two processors with state
step1 = MockStepWithTensorState(name="norm", window_size=5)
preprocessor = DataProcessorPipeline([step1], name="PreProcessor")
step2 = MockStepWithTensorState(name="norm", window_size=10)
postprocessor = DataProcessorPipeline([step2], name="PostProcessor")
# Process some data to create state
for i in range(3):
transition = create_transition(reward=float(i))
preprocessor(transition)
postprocessor(transition)
with tempfile.TemporaryDirectory() as tmp_dir:
# Save both processors to the same directory
preprocessor.save_pretrained(tmp_dir)
postprocessor.save_pretrained(tmp_dir)
# Check that all files exist and are distinct
assert (Path(tmp_dir) / "preprocessor.json").exists()
assert (Path(tmp_dir) / "postprocessor.json").exists()
assert (Path(tmp_dir) / "preprocessor_step_0.safetensors").exists()
assert (Path(tmp_dir) / "postprocessor_step_0.safetensors").exists()
# Load both back and verify they work correctly
loaded_pre = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="preprocessor.json")
loaded_post = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="postprocessor.json")
assert loaded_pre.name == "PreProcessor"
assert loaded_post.name == "PostProcessor"
assert loaded_pre.steps[0].window_size == 5
assert loaded_post.steps[0].window_size == 10
def test_override_with_device_strings():
"""Test overriding device parameters with string values."""
@ProcessorStepRegistry.register("device_aware_step")
@dataclass
class DeviceAwareStep(ProcessorStep):
device: str = "cpu"
def __init__(self, device: str = "cpu"):
self.device = device
self.buffer = torch.zeros(10, device=device)
def __call__(self, transition: EnvTransition) -> EnvTransition:
return transition
def get_config(self):
return {"device": str(self.device)}
def state_dict(self):
return {"buffer": self.buffer}
def load_state_dict(self, state):
self.buffer = state["buffer"]
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# We do not test features here
return features
try:
step = DeviceAwareStep(device="cpu")
pipeline = DataProcessorPipeline([step])
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Override device
if torch.cuda.is_available():
loaded = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
overrides={"device_aware_step": {"device": "cuda:0"}},
)
loaded_step = loaded.steps[0]
assert loaded_step.device == "cuda:0"
# Note: buffer will still be on CPU from saved state
# until .to() is called on the processor
finally:
ProcessorStepRegistry.unregister("device_aware_step")
def test_from_pretrained_nonexistent_path():
"""Test error handling when loading from non-existent sources."""
from huggingface_hub.errors import HfHubHTTPError
# Test with an invalid local path - should raise FileNotFoundError
with pytest.raises(FileNotFoundError):
DataProcessorPipeline.from_pretrained("/path/that/does/not/exist", config_filename="processor.json")
# Test with a path that doesn't exist as a directory
with pytest.raises(FileNotFoundError):
DataProcessorPipeline.from_pretrained("user/repo/extra/path", config_filename="processor.json")
# Test with a non-existent Hub repo
with pytest.raises((FileNotFoundError, HfHubHTTPError)):
DataProcessorPipeline.from_pretrained(
"nonexistent-user/nonexistent-repo", config_filename="processor.json"
)
# Test with a local directory that exists but has no config files
with tempfile.TemporaryDirectory() as tmp_dir, pytest.raises(FileNotFoundError):
# Since the directory exists but has no config, it will raise FileNotFoundError
DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="processor.json")
def test_save_load_with_custom_converter_functions():
"""Test that custom to_transition and to_output functions are NOT saved."""
def custom_to_transition(batch):
# Custom conversion logic
return {
TransitionKey.OBSERVATION: batch.get("obs"),
TransitionKey.ACTION: batch.get("act"),
TransitionKey.REWARD: batch.get("rew", 0.0),
TransitionKey.DONE: batch.get("done", False),
TransitionKey.TRUNCATED: batch.get("truncated", False),
TransitionKey.INFO: {},
TransitionKey.COMPLEMENTARY_DATA: {},
}
def custom_to_output(transition):
# Custom output format
return {
"obs": transition.get(TransitionKey.OBSERVATION),
"act": transition.get(TransitionKey.ACTION),
"rew": transition.get(TransitionKey.REWARD),
"done": transition.get(TransitionKey.DONE),
"truncated": transition.get(TransitionKey.TRUNCATED),
}
# Create processor with custom converters
pipeline = DataProcessorPipeline(
[MockStep()], to_transition=custom_to_transition, to_output=custom_to_output
)
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
# Load - should use default converters
loaded = DataProcessorPipeline.from_pretrained(tmp_dir, config_filename="dataprocessorpipeline.json")
# Verify it uses default converters by checking with standard batch format
batch = {
OBS_IMAGE: torch.randn(1, 3, 32, 32),
ACTION: torch.randn(1, 7),
REWARD: torch.tensor([1.0]),
DONE: torch.tensor([False]),
TRUNCATED: torch.tensor([False]),
"info": {},
}
# Should work with standard format (wouldn't work with custom converter)
result = loaded(batch)
# With new behavior, default to_output is _default_transition_to_batch, so result is batch dict
assert OBS_IMAGE in result
class NonCompliantStep:
"""Intentionally non-compliant: missing features."""
def __call__(self, transition: EnvTransition) -> EnvTransition:
return transition
class NonCallableStep(ProcessorStep):
"""Intentionally non-compliant: missing __call__."""
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
def test_construction_rejects_step_without_call():
"""Test that DataProcessorPipeline rejects steps that don't inherit from ProcessorStep."""
with pytest.raises(TypeError, match=r"Can't instantiate abstract class NonCallableStep"):
DataProcessorPipeline([NonCallableStep()])
with pytest.raises(TypeError, match=r"must inherit from ProcessorStep"):
DataProcessorPipeline([NonCompliantStep()])
@dataclass
class FeatureContractAddStep(ProcessorStep):
"""Adds a PolicyFeature"""
key: str = "a"
value: PolicyFeature = field(default_factory=lambda: PolicyFeature(type=FeatureType.STATE, shape=(1,)))
def __call__(self, transition: EnvTransition) -> EnvTransition:
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
features[PipelineFeatureType.OBSERVATION][self.key] = self.value
return features
@dataclass
class FeatureContractMutateStep(ProcessorStep):
"""Mutates a PolicyFeature"""
key: str = "a"
fn: Callable[[PolicyFeature | None], PolicyFeature] = identity_transition # noqa: E731
def __call__(self, transition: EnvTransition) -> EnvTransition:
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
features[PipelineFeatureType.OBSERVATION][self.key] = self.fn(
features[PipelineFeatureType.OBSERVATION].get(self.key)
)
return features
@dataclass
class FeatureContractBadReturnStep(ProcessorStep):
"""Returns a non-dict"""
def __call__(self, transition: EnvTransition) -> EnvTransition:
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return ["not-a-dict"]
@dataclass
class FeatureContractRemoveStep(ProcessorStep):
"""Removes a PolicyFeature"""
key: str
def __call__(self, transition: EnvTransition) -> EnvTransition:
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
features[PipelineFeatureType.OBSERVATION].pop(self.key, None)
return features
def test_features_orders_and_merges(policy_feature_factory):
p = DataProcessorPipeline(
[
FeatureContractAddStep("a", policy_feature_factory(FeatureType.STATE, (1,))),
FeatureContractMutateStep("a", lambda v: PolicyFeature(type=v.type, shape=(3,))),
FeatureContractAddStep("b", policy_feature_factory(FeatureType.ENV, (2,))),
]
)
out = p.transform_features({PipelineFeatureType.OBSERVATION: {}})
assert out[PipelineFeatureType.OBSERVATION]["a"].type == FeatureType.STATE and out[
PipelineFeatureType.OBSERVATION
]["a"].shape == (3,)
assert out[PipelineFeatureType.OBSERVATION]["b"].type == FeatureType.ENV and out[
PipelineFeatureType.OBSERVATION
]["b"].shape == (2,)
assert_contract_is_typed(out)
def test_features_respects_initial_without_mutation(policy_feature_factory):
initial = {
PipelineFeatureType.OBSERVATION: {
"seed": policy_feature_factory(FeatureType.STATE, (7,)),
"nested": policy_feature_factory(FeatureType.ENV, (0,)),
}
}
p = DataProcessorPipeline(
[
FeatureContractMutateStep("seed", lambda v: PolicyFeature(type=v.type, shape=(v.shape[0] + 1,))),
FeatureContractMutateStep(
"nested", lambda v: PolicyFeature(type=v.type, shape=(v.shape[0] + 5,))
),
]
)
out = p.transform_features(initial_features=initial)
assert out[PipelineFeatureType.OBSERVATION]["seed"].shape == (8,)
assert out[PipelineFeatureType.OBSERVATION]["nested"].shape == (5,)
# Initial dict must be preserved
assert initial[PipelineFeatureType.OBSERVATION]["seed"].shape == (7,)
assert initial[PipelineFeatureType.OBSERVATION]["nested"].shape == (0,)
assert_contract_is_typed(out)
def test_features_execution_order_tracking():
class Track(ProcessorStep):
def __init__(self, label):
self.label = label
def __call__(self, transition: EnvTransition) -> EnvTransition:
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
code = {"A": 1, "B": 2, "C": 3}[self.label]
pf = features[PipelineFeatureType.OBSERVATION].get(
"order", PolicyFeature(type=FeatureType.ENV, shape=())
)
features[PipelineFeatureType.OBSERVATION]["order"] = PolicyFeature(
type=pf.type, shape=pf.shape + (code,)
)
return features
out = DataProcessorPipeline([Track("A"), Track("B"), Track("C")]).transform_features(
initial_features={PipelineFeatureType.OBSERVATION: {}}
)
assert out[PipelineFeatureType.OBSERVATION]["order"].shape == (1, 2, 3)
def test_features_remove_key(policy_feature_factory):
p = DataProcessorPipeline(
[
FeatureContractAddStep("a", policy_feature_factory(FeatureType.STATE, (1,))),
FeatureContractRemoveStep("a"),
]
)
out = p.transform_features({PipelineFeatureType.OBSERVATION: {}})
assert "a" not in out[PipelineFeatureType.OBSERVATION]
def test_features_remove_from_initial(policy_feature_factory):
initial = {
PipelineFeatureType.OBSERVATION: {
"keep": policy_feature_factory(FeatureType.STATE, (1,)),
"drop": policy_feature_factory(FeatureType.STATE, (1,)),
},
}
p = DataProcessorPipeline([FeatureContractRemoveStep("drop")])
out = p.transform_features(initial_features=initial)
assert (
"drop" not in out[PipelineFeatureType.OBSERVATION]
and out[PipelineFeatureType.OBSERVATION]["keep"] == initial[PipelineFeatureType.OBSERVATION]["keep"]
)
@dataclass
class AddActionEEAndJointFeatures(ProcessorStep):
"""Adds both EE and JOINT action features."""
def __call__(self, tr):
return tr
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# EE features
features[PipelineFeatureType.ACTION]["action.ee.x"] = float
features[PipelineFeatureType.ACTION]["action.ee.y"] = float
# JOINT features
features[PipelineFeatureType.ACTION]["action.j1.pos"] = float
features[PipelineFeatureType.ACTION]["action.j2.pos"] = float
return features
@dataclass
class AddObservationStateFeatures(ProcessorStep):
"""Adds state features (and optionally an image spec to test precedence)."""
add_front_image: bool = False
front_image_shape: tuple = (240, 320, 3)
def __call__(self, tr):
return tr
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
# State features (mix EE and a joint state)
features[PipelineFeatureType.OBSERVATION][f"{OBS_STATE}.ee.x"] = float
features[PipelineFeatureType.OBSERVATION][f"{OBS_STATE}.j1.pos"] = float
if self.add_front_image:
features[PipelineFeatureType.OBSERVATION][f"{OBS_IMAGES}.front"] = self.front_image_shape
return features
def test_aggregate_joint_action_only():
rp = DataProcessorPipeline([AddActionEEAndJointFeatures()])
initial = {PipelineFeatureType.OBSERVATION: {"front": (480, 640, 3)}, PipelineFeatureType.ACTION: {}}
out = aggregate_pipeline_dataset_features(
pipeline=rp,
initial_features=initial,
use_videos=True,
patterns=["action.j1.pos", "action.j2.pos"],
)
# Expect only ACTION with joint names
assert ACTION in out and OBS_STATE not in out
assert out[ACTION]["dtype"] == "float32"
assert set(out[ACTION]["names"]) == {"j1.pos", "j2.pos"}
assert out[ACTION]["shape"] == (len(out[ACTION]["names"]),)
def test_aggregate_ee_action_and_observation_with_videos():
rp = DataProcessorPipeline([AddActionEEAndJointFeatures(), AddObservationStateFeatures()])
initial = {"front": (480, 640, 3), "side": (720, 1280, 3)}
out = aggregate_pipeline_dataset_features(
pipeline=rp,
initial_features={PipelineFeatureType.OBSERVATION: initial, PipelineFeatureType.ACTION: {}},
use_videos=True,
patterns=["action.ee", OBS_STATE],
)
# Action should pack only EE names
assert ACTION in out
assert set(out[ACTION]["names"]) == {"ee.x", "ee.y"}
assert out[ACTION]["dtype"] == "float32"
# Observation state should pack both ee.x and j1.pos as a vector
assert OBS_STATE in out
assert set(out[OBS_STATE]["names"]) == {"ee.x", "j1.pos"}
assert out[OBS_STATE]["dtype"] == "float32"
# Cameras from initial_features appear as videos
for cam in ("front", "side"):
key = f"{OBS_IMAGES}.{cam}"
assert key in out
assert out[key]["dtype"] == "video"
assert out[key]["shape"] == initial[cam]
assert out[key]["names"] == ["height", "width", "channels"]
def test_aggregate_both_action_types():
rp = DataProcessorPipeline([AddActionEEAndJointFeatures()])
out = aggregate_pipeline_dataset_features(
pipeline=rp,
initial_features={PipelineFeatureType.ACTION: {}, PipelineFeatureType.OBSERVATION: {}},
use_videos=True,
patterns=["action.ee", "action.j1", "action.j2.pos"],
)
assert ACTION in out
expected = {"ee.x", "ee.y", "j1.pos", "j2.pos"}
assert set(out[ACTION]["names"]) == expected
assert out[ACTION]["shape"] == (len(expected),)
def test_aggregate_images_when_use_videos_false():
rp = DataProcessorPipeline([AddObservationStateFeatures(add_front_image=True)])
initial = {"back": (480, 640, 3)}
out = aggregate_pipeline_dataset_features(
pipeline=rp,
initial_features={PipelineFeatureType.ACTION: {}, PipelineFeatureType.OBSERVATION: initial},
use_videos=False, # expect "image" dtype
patterns=None,
)
key = f"{OBS_IMAGES}.back"
key_front = f"{OBS_IMAGES}.front"
assert key not in out
assert key_front not in out
def test_aggregate_images_when_use_videos_true():
rp = DataProcessorPipeline([AddObservationStateFeatures(add_front_image=True)])
initial = {"back": (480, 640, 3)}
out = aggregate_pipeline_dataset_features(
pipeline=rp,
initial_features={PipelineFeatureType.OBSERVATION: initial, PipelineFeatureType.ACTION: {}},
use_videos=True,
patterns=None,
)
key = f"{OBS_IMAGES}.front"
key_back = f"{OBS_IMAGES}.back"
assert key in out
assert key_back in out
assert out[key]["dtype"] == "video"
assert out[key_back]["dtype"] == "video"
assert out[key_back]["shape"] == initial["back"]
def test_initial_camera_not_overridden_by_step_image():
# Step explicitly sets a different front image shape; initial has another shape.
# aggregate_pipeline_dataset_features should keep the step's value (setdefault behavior on initial cams).
rp = DataProcessorPipeline(
[AddObservationStateFeatures(add_front_image=True, front_image_shape=(240, 320, 3))]
)
initial = {"front": (480, 640, 3)} # should NOT override the step-provided (240, 320, 3)
out = aggregate_pipeline_dataset_features(
pipeline=rp,
initial_features={PipelineFeatureType.ACTION: {}, PipelineFeatureType.OBSERVATION: initial},
use_videos=True,
patterns=[f"{OBS_IMAGES}.front"],
)
key = f"{OBS_IMAGES}.front"
assert key in out
assert out[key]["shape"] == (240, 320, 3) # from the step, not from initial
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_pipeline.py",
"license": "Apache License 2.0",
"lines": 1662,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_rename_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from pathlib import Path
import numpy as np
import torch
from lerobot.configs.types import FeatureType, PipelineFeatureType
from lerobot.processor import (
DataProcessorPipeline,
ProcessorStepRegistry,
RenameObservationsProcessorStep,
TransitionKey,
)
from lerobot.processor.converters import create_transition, identity_transition
from lerobot.processor.rename_processor import rename_stats
from lerobot.utils.constants import ACTION, OBS_IMAGE, OBS_IMAGES, OBS_STATE
from tests.conftest import assert_contract_is_typed
def test_basic_renaming():
"""Test basic key renaming functionality."""
rename_map = {
"old_key1": "new_key1",
"old_key2": "new_key2",
}
processor = RenameObservationsProcessorStep(rename_map=rename_map)
observation = {
"old_key1": torch.tensor([1.0, 2.0]),
"old_key2": np.array([3.0, 4.0]),
"unchanged_key": "keep_me",
}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check renamed keys
assert "new_key1" in processed_obs
assert "new_key2" in processed_obs
assert "old_key1" not in processed_obs
assert "old_key2" not in processed_obs
# Check values are preserved
torch.testing.assert_close(processed_obs["new_key1"], torch.tensor([1.0, 2.0]))
np.testing.assert_array_equal(processed_obs["new_key2"], np.array([3.0, 4.0]))
# Check unchanged key is preserved
assert processed_obs["unchanged_key"] == "keep_me"
def test_empty_rename_map():
"""Test processor with empty rename map (should pass through unchanged)."""
processor = RenameObservationsProcessorStep(rename_map={})
observation = {
"key1": torch.tensor([1.0]),
"key2": "value2",
}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# All keys should be unchanged
assert processed_obs.keys() == observation.keys()
torch.testing.assert_close(processed_obs["key1"], observation["key1"])
assert processed_obs["key2"] == observation["key2"]
def test_none_observation():
"""Test processor with None observation."""
processor = RenameObservationsProcessorStep(rename_map={"old": "new"})
transition = create_transition(observation={})
result = processor(transition)
# Should return transition unchanged
assert result == transition
def test_overlapping_rename():
"""Test renaming when new names might conflict."""
rename_map = {
"a": "b",
"b": "c", # This creates a potential conflict
}
processor = RenameObservationsProcessorStep(rename_map=rename_map)
observation = {
"a": 1,
"b": 2,
"x": 3,
}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that renaming happens correctly
assert "a" not in processed_obs
assert processed_obs["b"] == 1 # 'a' renamed to 'b'
assert processed_obs["c"] == 2 # original 'b' renamed to 'c'
assert processed_obs["x"] == 3
def test_partial_rename():
"""Test renaming only some keys."""
rename_map = {
OBS_STATE: "observation.proprio_state",
"pixels": OBS_IMAGE,
}
processor = RenameObservationsProcessorStep(rename_map=rename_map)
observation = {
OBS_STATE: torch.randn(10),
"pixels": np.random.randint(0, 256, (64, 64, 3), dtype=np.uint8),
"reward": 1.0,
"info": {"episode": 1},
}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check renamed keys
assert "observation.proprio_state" in processed_obs
assert OBS_IMAGE in processed_obs
assert OBS_STATE not in processed_obs
assert "pixels" not in processed_obs
# Check unchanged keys
assert processed_obs["reward"] == 1.0
assert processed_obs["info"] == {"episode": 1}
def test_get_config():
"""Test configuration serialization."""
rename_map = {
"old1": "new1",
"old2": "new2",
}
processor = RenameObservationsProcessorStep(rename_map=rename_map)
config = processor.get_config()
assert config == {"rename_map": rename_map}
def test_state_dict():
"""Test state dict (should be empty for RenameProcessorStep)."""
processor = RenameObservationsProcessorStep(rename_map={"old": "new"})
state = processor.state_dict()
assert state == {}
# Load state dict should work even with empty dict
processor.load_state_dict({})
def test_integration_with_robot_processor():
"""Test integration with RobotProcessor pipeline."""
rename_map = {
"agent_pos": OBS_STATE,
"pixels": OBS_IMAGE,
}
rename_processor = RenameObservationsProcessorStep(rename_map=rename_map)
pipeline = DataProcessorPipeline(
[rename_processor], to_transition=identity_transition, to_output=identity_transition
)
observation = {
"agent_pos": np.array([1.0, 2.0, 3.0]),
"pixels": np.zeros((32, 32, 3), dtype=np.uint8),
"other_data": "preserve_me",
}
transition = create_transition(
observation=observation, reward=0.5, done=False, truncated=False, info={}, complementary_data={}
)
result = pipeline(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check renaming worked through pipeline
assert OBS_STATE in processed_obs
assert OBS_IMAGE in processed_obs
assert "agent_pos" not in processed_obs
assert "pixels" not in processed_obs
assert processed_obs["other_data"] == "preserve_me"
# Check other transition elements unchanged
assert result[TransitionKey.REWARD] == 0.5
assert result[TransitionKey.DONE] is False
def test_save_and_load_pretrained():
"""Test saving and loading processor with RobotProcessor."""
rename_map = {
"old_state": OBS_STATE,
"old_image": OBS_IMAGE,
}
processor = RenameObservationsProcessorStep(rename_map=rename_map)
pipeline = DataProcessorPipeline([processor], name="TestRenameProcessorStep")
with tempfile.TemporaryDirectory() as tmp_dir:
# Save pipeline
pipeline.save_pretrained(tmp_dir)
# Check files were created
config_path = (
Path(tmp_dir) / "testrenameprocessorstep.json"
) # Based on name="TestRenameProcessorStep"
assert config_path.exists()
# No state files should be created for RenameProcessorStep
state_files = list(Path(tmp_dir).glob("*.safetensors"))
assert len(state_files) == 0
# Load pipeline
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="testrenameprocessorstep.json",
to_transition=identity_transition,
to_output=identity_transition,
)
assert loaded_pipeline.name == "TestRenameProcessorStep"
assert len(loaded_pipeline) == 1
# Check that loaded processor works correctly
loaded_processor = loaded_pipeline.steps[0]
assert isinstance(loaded_processor, RenameObservationsProcessorStep)
assert loaded_processor.rename_map == rename_map
# Test functionality after loading
observation = {"old_state": [1, 2, 3], "old_image": "image_data"}
transition = create_transition(observation=observation)
result = loaded_pipeline(transition)
processed_obs = result[TransitionKey.OBSERVATION]
assert OBS_STATE in processed_obs
assert OBS_IMAGE in processed_obs
assert processed_obs[OBS_STATE] == [1, 2, 3]
assert processed_obs[OBS_IMAGE] == "image_data"
def test_registry_functionality():
"""Test that RenameProcessorStep is properly registered."""
# Check that it's registered
assert "rename_observations_processor" in ProcessorStepRegistry.list()
# Get from registry
retrieved_class = ProcessorStepRegistry.get("rename_observations_processor")
assert retrieved_class is RenameObservationsProcessorStep
# Create instance from registry
instance = retrieved_class(rename_map={"old": "new"})
assert isinstance(instance, RenameObservationsProcessorStep)
assert instance.rename_map == {"old": "new"}
def test_registry_based_save_load():
"""Test save/load using registry name instead of module path."""
processor = RenameObservationsProcessorStep(rename_map={"key1": "renamed_key1"})
pipeline = DataProcessorPipeline(
[processor], to_transition=identity_transition, to_output=identity_transition
)
with tempfile.TemporaryDirectory() as tmp_dir:
# Save and load
pipeline.save_pretrained(tmp_dir)
# Verify config uses registry name
import json
with open(Path(tmp_dir) / "dataprocessorpipeline.json") as f: # Default name is "RobotProcessor"
config = json.load(f)
assert "registry_name" in config["steps"][0]
assert config["steps"][0]["registry_name"] == "rename_observations_processor"
assert "class" not in config["steps"][0] # Should use registry, not module path
# Load should work
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir, config_filename="dataprocessorpipeline.json"
)
loaded_processor = loaded_pipeline.steps[0]
assert isinstance(loaded_processor, RenameObservationsProcessorStep)
assert loaded_processor.rename_map == {"key1": "renamed_key1"}
def test_chained_rename_processors():
"""Test multiple RenameProcessorSteps in a pipeline."""
# First processor: rename raw keys to intermediate format
processor1 = RenameObservationsProcessorStep(
rename_map={
"pos": "agent_position",
"img": "camera_image",
}
)
# Second processor: rename to final format
processor2 = RenameObservationsProcessorStep(
rename_map={
"agent_position": OBS_STATE,
"camera_image": OBS_IMAGE,
}
)
pipeline = DataProcessorPipeline(
[processor1, processor2], to_transition=identity_transition, to_output=identity_transition
)
observation = {
"pos": np.array([1.0, 2.0]),
"img": "image_data",
"extra": "keep_me",
}
transition = create_transition(observation=observation)
# Step through to see intermediate results
results = list(pipeline.step_through(transition))
# After first processor
assert "agent_position" in results[1][TransitionKey.OBSERVATION]
assert "camera_image" in results[1][TransitionKey.OBSERVATION]
# After second processor
final_obs = results[2][TransitionKey.OBSERVATION]
assert OBS_STATE in final_obs
assert OBS_IMAGE in final_obs
assert final_obs["extra"] == "keep_me"
# Original keys should be gone
assert "pos" not in final_obs
assert "img" not in final_obs
assert "agent_position" not in final_obs
assert "camera_image" not in final_obs
def test_nested_observation_rename():
"""Test renaming with nested observation structures."""
rename_map = {
f"{OBS_IMAGES}.left": "observation.camera.left_view",
f"{OBS_IMAGES}.right": "observation.camera.right_view",
"observation.proprio": "observation.proprioception",
}
processor = RenameObservationsProcessorStep(rename_map=rename_map)
observation = {
f"{OBS_IMAGES}.left": torch.randn(3, 64, 64),
f"{OBS_IMAGES}.right": torch.randn(3, 64, 64),
"observation.proprio": torch.randn(7),
"observation.gripper": torch.tensor([0.0]), # Not renamed
}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check renames
assert "observation.camera.left_view" in processed_obs
assert "observation.camera.right_view" in processed_obs
assert "observation.proprioception" in processed_obs
# Check unchanged key
assert "observation.gripper" in processed_obs
# Check old keys removed
assert f"{OBS_IMAGES}.left" not in processed_obs
assert f"{OBS_IMAGES}.right" not in processed_obs
assert "observation.proprio" not in processed_obs
def test_value_types_preserved():
"""Test that various value types are preserved during renaming."""
rename_map = {"old_tensor": "new_tensor", "old_array": "new_array", "old_scalar": "new_scalar"}
processor = RenameObservationsProcessorStep(rename_map=rename_map)
tensor_value = torch.randn(3, 3)
array_value = np.random.rand(2, 2)
observation = {
"old_tensor": tensor_value,
"old_array": array_value,
"old_scalar": 42,
"old_string": "hello",
"old_dict": {"nested": "value"},
"old_list": [1, 2, 3],
}
transition = create_transition(observation=observation)
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check that values and types are preserved
assert torch.equal(processed_obs["new_tensor"], tensor_value)
assert np.array_equal(processed_obs["new_array"], array_value)
assert processed_obs["new_scalar"] == 42
assert processed_obs["old_string"] == "hello"
assert processed_obs["old_dict"] == {"nested": "value"}
assert processed_obs["old_list"] == [1, 2, 3]
def test_features_basic_renaming(policy_feature_factory):
processor = RenameObservationsProcessorStep(rename_map={"a": "x", "b": "y"})
features = {
PipelineFeatureType.OBSERVATION: {
"a": policy_feature_factory(FeatureType.VISUAL, (2,)),
"b": policy_feature_factory(FeatureType.VISUAL, (3,)),
"c": policy_feature_factory(FeatureType.VISUAL, (1,)),
},
}
out = processor.transform_features(features.copy())
# Values preserved and typed
assert out[PipelineFeatureType.OBSERVATION]["x"] == features[PipelineFeatureType.OBSERVATION]["a"]
assert out[PipelineFeatureType.OBSERVATION]["y"] == features[PipelineFeatureType.OBSERVATION]["b"]
assert out[PipelineFeatureType.OBSERVATION]["c"] == features[PipelineFeatureType.OBSERVATION]["c"]
assert_contract_is_typed(out)
# Input not mutated
assert set(features[PipelineFeatureType.OBSERVATION]) == {"a", "b", "c"}
def test_features_overlapping_keys(policy_feature_factory):
# Overlapping renames: both 'a' and 'b' exist. 'a'->'b', 'b'->'c'
processor = RenameObservationsProcessorStep(rename_map={"a": "b", "b": "c"})
features = {
PipelineFeatureType.OBSERVATION: {
"a": policy_feature_factory(FeatureType.VISUAL, (1,)),
"b": policy_feature_factory(FeatureType.VISUAL, (2,)),
},
}
out = processor.transform_features(features)
assert set(out[PipelineFeatureType.OBSERVATION]) == {"b", "c"}
assert (
out[PipelineFeatureType.OBSERVATION]["b"] == features[PipelineFeatureType.OBSERVATION]["a"]
) # 'a' renamed to'b'
assert (
out[PipelineFeatureType.OBSERVATION]["c"] == features[PipelineFeatureType.OBSERVATION]["b"]
) # 'b' renamed to 'c'
assert_contract_is_typed(out)
def test_features_chained_processors(policy_feature_factory):
# Chain two rename processors at the contract level
processor1 = RenameObservationsProcessorStep(rename_map={"pos": "agent_position", "img": "camera_image"})
processor2 = RenameObservationsProcessorStep(
rename_map={"agent_position": OBS_STATE, "camera_image": OBS_IMAGE}
)
pipeline = DataProcessorPipeline([processor1, processor2])
spec = {
PipelineFeatureType.OBSERVATION: {
"pos": policy_feature_factory(FeatureType.VISUAL, (7,)),
"img": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)),
"extra": policy_feature_factory(FeatureType.VISUAL, (1,)),
},
}
out = pipeline.transform_features(initial_features=spec)
assert set(out[PipelineFeatureType.OBSERVATION]) == {OBS_STATE, OBS_IMAGE, "extra"}
assert out[PipelineFeatureType.OBSERVATION][OBS_STATE] == spec[PipelineFeatureType.OBSERVATION]["pos"]
assert out[PipelineFeatureType.OBSERVATION][OBS_IMAGE] == spec[PipelineFeatureType.OBSERVATION]["img"]
assert out[PipelineFeatureType.OBSERVATION]["extra"] == spec[PipelineFeatureType.OBSERVATION]["extra"]
assert_contract_is_typed(out)
def test_rename_stats_basic():
orig = {
OBS_STATE: {"mean": np.array([0.0]), "std": np.array([1.0])},
ACTION: {"mean": np.array([0.0])},
}
mapping = {OBS_STATE: "observation.robot_state"}
renamed = rename_stats(orig, mapping)
assert "observation.robot_state" in renamed and OBS_STATE not in renamed
# Ensure deep copy: mutate original and verify renamed unaffected
orig[OBS_STATE]["mean"][0] = 42.0
assert renamed["observation.robot_state"]["mean"][0] != 42.0
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_rename_processor.py",
"license": "Apache License 2.0",
"lines": 396,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/async_inference/test_e2e.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End-to-end test of the asynchronous inference stack (client ↔ server).
This test spins up a lightweight gRPC `PolicyServer` instance with a stubbed
policy network and launches a `RobotClient` that uses a `MockRobot`. The goal
is to exercise the full communication loop:
1. Client sends policy specification → Server
2. Client streams observations → Server
3. Server streams action chunks → Client
4. Client executes received actions
The test succeeds if at least one action is executed and the server records at
least one predicted timestep - demonstrating that the gRPC round-trip works
end-to-end using real (but lightweight) protocol messages.
"""
from __future__ import annotations
import threading
from concurrent import futures
import pytest
import torch
# Skip entire module if grpc is not available
pytest.importorskip("grpc")
# -----------------------------------------------------------------------------
# End-to-end test
# -----------------------------------------------------------------------------
def test_async_inference_e2e(monkeypatch):
"""Tests the full asynchronous inference pipeline."""
# Import grpc-dependent modules inside the test function
import grpc
from lerobot.async_inference.configs import PolicyServerConfig, RobotClientConfig
from lerobot.async_inference.helpers import map_robot_keys_to_lerobot_features
from lerobot.async_inference.policy_server import PolicyServer
from lerobot.async_inference.robot_client import RobotClient
from lerobot.robots.utils import make_robot_from_config
from lerobot.transport import (
services_pb2, # type: ignore
services_pb2_grpc, # type: ignore
)
from tests.mocks.mock_robot import MockRobotConfig
# Create a stub policy similar to test_policy_server.py
class MockPolicy:
"""A minimal mock for an actual policy, returning zeros."""
class _Config:
robot_type = "dummy_robot"
@property
def image_features(self):
"""Empty image features since this test doesn't use images."""
return {}
def __init__(self):
self.config = self._Config()
def to(self, *args, **kwargs):
return self
def model(self, batch):
# Return a chunk of 20 dummy actions.
batch_size = len(batch["robot_type"])
return torch.zeros(batch_size, 20, 6)
# ------------------------------------------------------------------
# 1. Create PolicyServer instance with mock policy
# ------------------------------------------------------------------
policy_server_config = PolicyServerConfig(host="localhost", port=9999)
policy_server = PolicyServer(policy_server_config)
# Replace the real policy with our fast, deterministic stub.
policy_server.policy = MockPolicy()
policy_server.actions_per_chunk = 20
policy_server.device = "cpu"
# NOTE(Steven): Smelly tests as the Server is a state machine being partially mocked. Adding these processors as a quick fix.
policy_server.preprocessor = lambda obs: obs
policy_server.postprocessor = lambda tensor: tensor
# Set up robot config and features
robot_config = MockRobotConfig()
mock_robot = make_robot_from_config(robot_config)
lerobot_features = map_robot_keys_to_lerobot_features(mock_robot)
policy_server.lerobot_features = lerobot_features
# Force server to produce deterministic action chunks in test mode
policy_server.policy_type = "act"
def _fake_get_action_chunk(_self, _obs, _type="test"):
action_dim = 6
batch_size = 1
actions_per_chunk = policy_server.actions_per_chunk
return torch.zeros(batch_size, actions_per_chunk, action_dim)
monkeypatch.setattr(PolicyServer, "_get_action_chunk", _fake_get_action_chunk, raising=True)
# Bypass potentially heavy model loading inside SendPolicyInstructions
def _fake_send_policy_instructions(self, request, context): # noqa: N802
return services_pb2.Empty()
monkeypatch.setattr(PolicyServer, "SendPolicyInstructions", _fake_send_policy_instructions, raising=True)
# Build gRPC server running a PolicyServer
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1, thread_name_prefix="policy_server"))
services_pb2_grpc.add_AsyncInferenceServicer_to_server(policy_server, server)
# Use the host/port specified in the fixture's config
server_address = f"{policy_server.config.host}:{policy_server.config.port}"
server.add_insecure_port(server_address)
server.start()
# ------------------------------------------------------------------
# 2. Create a RobotClient around the MockRobot
# ------------------------------------------------------------------
client_config = RobotClientConfig(
server_address=server_address,
robot=robot_config,
chunk_size_threshold=0.0,
policy_type="test",
pretrained_name_or_path="test",
actions_per_chunk=20,
)
client = RobotClient(client_config)
assert client.start(), "Client failed initial handshake with the server"
# Track action chunks received and verify device type
action_chunks_received = {"count": 0, "actions_on_cpu": True}
original_aggregate = client._aggregate_action_queues
def counting_aggregate(*args, **kwargs):
action_chunks_received["count"] += 1
# Check that all received actions are on CPU
if args:
for timed_action in args[0]: # args[0] is the list of TimedAction
action_tensor = timed_action.get_action()
if action_tensor.device.type != "cpu":
action_chunks_received["actions_on_cpu"] = False
return original_aggregate(*args, **kwargs)
monkeypatch.setattr(client, "_aggregate_action_queues", counting_aggregate)
# Start client threads
action_thread = threading.Thread(target=client.receive_actions, daemon=True)
control_thread = threading.Thread(target=client.control_loop, args=({"task": ""}), daemon=True)
action_thread.start()
control_thread.start()
# ------------------------------------------------------------------
# 3. System exchanges a few messages
# ------------------------------------------------------------------
# Wait for 5 seconds
server.wait_for_termination(timeout=5)
assert action_chunks_received["count"] > 0, "Client did not receive any action chunks"
assert len(policy_server._predicted_timesteps) > 0, "Server did not record any predicted timesteps"
# ------------------------------------------------------------------
# 4. Stop the system
# ------------------------------------------------------------------
client.stop()
action_thread.join()
control_thread.join()
policy_server.stop()
server.stop(grace=None)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/async_inference/test_e2e.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/async_inference/test_helpers.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pickle
import time
import numpy as np
import torch
from lerobot.async_inference.helpers import (
FPSTracker,
TimedAction,
TimedObservation,
observations_similar,
prepare_image,
prepare_raw_observation,
raw_observation_to_observation,
resize_robot_observation_image,
)
from lerobot.configs.types import FeatureType, PolicyFeature
from lerobot.utils.constants import OBS_IMAGES, OBS_STATE
# ---------------------------------------------------------------------
# FPSTracker
# ---------------------------------------------------------------------
def test_fps_tracker_first_observation():
"""First observation should initialize timestamp and return 0 FPS."""
tracker = FPSTracker(target_fps=30.0)
timestamp = 1000.0
metrics = tracker.calculate_fps_metrics(timestamp)
assert tracker.first_timestamp == timestamp
assert tracker.total_obs_count == 1
assert metrics["avg_fps"] == 0.0
assert metrics["target_fps"] == 30.0
def test_fps_tracker_single_interval():
"""Two observations 1 second apart should give 1 FPS."""
tracker = FPSTracker(target_fps=30.0)
# First observation at t=0
metrics1 = tracker.calculate_fps_metrics(0.0)
assert metrics1["avg_fps"] == 0.0
# Second observation at t=1 (1 second later)
metrics2 = tracker.calculate_fps_metrics(1.0)
expected_fps = 1.0 # (2-1) observations / 1.0 seconds = 1 FPS
assert math.isclose(metrics2["avg_fps"], expected_fps, rel_tol=1e-6)
def test_fps_tracker_multiple_intervals():
"""Multiple observations should calculate correct average FPS."""
tracker = FPSTracker(target_fps=30.0)
# Simulate 5 observations over 2 seconds (should be 2 FPS average)
timestamps = [0.0, 0.5, 1.0, 1.5, 2.0]
for i, ts in enumerate(timestamps):
metrics = tracker.calculate_fps_metrics(ts)
if i == 0:
assert metrics["avg_fps"] == 0.0
elif i == len(timestamps) - 1:
# After 5 observations over 2 seconds: (5-1)/2 = 2 FPS
expected_fps = 2.0
assert math.isclose(metrics["avg_fps"], expected_fps, rel_tol=1e-6)
def test_fps_tracker_irregular_intervals():
"""FPS calculation should work with irregular time intervals."""
tracker = FPSTracker(target_fps=30.0)
# Irregular timestamps: 0, 0.1, 0.5, 2.0, 3.0 seconds
timestamps = [0.0, 0.1, 0.5, 2.0, 3.0]
for ts in timestamps:
metrics = tracker.calculate_fps_metrics(ts)
# 5 observations over 3 seconds: (5-1)/3 = 1.333... FPS
expected_fps = 4.0 / 3.0
assert math.isclose(metrics["avg_fps"], expected_fps, rel_tol=1e-6)
# ---------------------------------------------------------------------
# TimedData helpers
# ---------------------------------------------------------------------
def test_timed_action_getters():
"""TimedAction stores & returns timestamp, action tensor and timestep."""
ts = time.time()
action = torch.arange(10)
ta = TimedAction(timestamp=ts, action=action, timestep=0)
assert math.isclose(ta.get_timestamp(), ts, rel_tol=0, abs_tol=1e-6)
torch.testing.assert_close(ta.get_action(), action)
assert ta.get_timestep() == 0
def test_timed_observation_getters():
"""TimedObservation stores & returns timestamp, dict and timestep."""
ts = time.time()
obs_dict = {OBS_STATE: torch.ones(6)}
to = TimedObservation(timestamp=ts, observation=obs_dict, timestep=0)
assert math.isclose(to.get_timestamp(), ts, rel_tol=0, abs_tol=1e-6)
assert to.get_observation() is obs_dict
assert to.get_timestep() == 0
def test_timed_data_deserialization_data_getters():
"""TimedAction / TimedObservation survive a round-trip through ``pickle``.
The async-inference stack uses ``pickle.dumps`` to move these objects across
the gRPC boundary (see RobotClient.send_observation and PolicyServer.StreamActions).
This test ensures that the payload keeps its content intact after
the (de)serialization round-trip.
"""
ts = time.time()
# ------------------------------------------------------------------
# TimedAction
# ------------------------------------------------------------------
original_action = torch.randn(6)
ta_in = TimedAction(timestamp=ts, action=original_action, timestep=13)
# Serialize → bytes → deserialize
ta_bytes = pickle.dumps(ta_in) # nosec
ta_out: TimedAction = pickle.loads(ta_bytes) # nosec B301
# Identity & content checks
assert math.isclose(ta_out.get_timestamp(), ts, rel_tol=0, abs_tol=1e-6)
assert ta_out.get_timestep() == 13
torch.testing.assert_close(ta_out.get_action(), original_action)
# ------------------------------------------------------------------
# TimedObservation
# ------------------------------------------------------------------
obs_dict = {OBS_STATE: torch.arange(4).float()}
to_in = TimedObservation(timestamp=ts, observation=obs_dict, timestep=7, must_go=True)
to_bytes = pickle.dumps(to_in) # nosec
to_out: TimedObservation = pickle.loads(to_bytes) # nosec B301
assert math.isclose(to_out.get_timestamp(), ts, rel_tol=0, abs_tol=1e-6)
assert to_out.get_timestep() == 7
assert to_out.must_go is True
assert to_out.get_observation().keys() == obs_dict.keys()
torch.testing.assert_close(to_out.get_observation()[OBS_STATE], obs_dict[OBS_STATE])
# ---------------------------------------------------------------------
# observations_similar()
# ---------------------------------------------------------------------
def _make_obs(state: torch.Tensor) -> TimedObservation:
"""Create a TimedObservation with raw robot observation format."""
return TimedObservation(
timestamp=time.time(),
observation={
"shoulder": state[0].item() if len(state) > 0 else 0.0,
"elbow": state[1].item() if len(state) > 1 else 0.0,
"wrist": state[2].item() if len(state) > 2 else 0.0,
"gripper": state[3].item() if len(state) > 3 else 0.0,
},
timestep=0,
)
def test_observations_similar_true():
"""Distance below atol → observations considered similar."""
# Create mock lerobot features for the similarity check
lerobot_features = {
OBS_STATE: {
"dtype": "float32",
"shape": [4],
"names": ["shoulder", "elbow", "wrist", "gripper"],
}
}
obs1 = _make_obs(torch.zeros(4))
obs2 = _make_obs(0.5 * torch.ones(4))
assert observations_similar(obs1, obs2, lerobot_features, atol=2.0)
obs3 = _make_obs(2.0 * torch.ones(4))
assert not observations_similar(obs1, obs3, lerobot_features, atol=2.0)
# ---------------------------------------------------------------------
# raw_observation_to_observation and helpers
# ---------------------------------------------------------------------
def _create_mock_robot_observation():
"""Create a mock robot observation with motor positions and camera images."""
return {
"shoulder": 1.0,
"elbow": 2.0,
"wrist": 3.0,
"gripper": 0.5,
"laptop": np.random.randint(0, 256, size=(480, 640, 3), dtype=np.uint8),
"phone": np.random.randint(0, 256, size=(480, 640, 3), dtype=np.uint8),
}
def _create_mock_lerobot_features():
"""Create mock lerobot features mapping similar to what hw_to_dataset_features returns."""
return {
OBS_STATE: {
"dtype": "float32",
"shape": [4],
"names": ["shoulder", "elbow", "wrist", "gripper"],
},
f"{OBS_IMAGES}.laptop": {
"dtype": "image",
"shape": [480, 640, 3],
"names": ["height", "width", "channels"],
},
f"{OBS_IMAGES}.phone": {
"dtype": "image",
"shape": [480, 640, 3],
"names": ["height", "width", "channels"],
},
}
def _create_mock_policy_image_features():
"""Create mock policy image features with different resolutions."""
return {
f"{OBS_IMAGES}.laptop": PolicyFeature(
type=FeatureType.VISUAL,
shape=(3, 224, 224), # Policy expects smaller resolution
),
f"{OBS_IMAGES}.phone": PolicyFeature(
type=FeatureType.VISUAL,
shape=(3, 160, 160), # Different resolution for second camera
),
}
def test_prepare_image():
"""Test image preprocessing: int8 → float32, normalization to [0,1]."""
# Create mock int8 image data
image_int8 = torch.randint(0, 256, size=(3, 224, 224), dtype=torch.uint8)
processed = prepare_image(image_int8)
# Check dtype conversion
assert processed.dtype == torch.float32
# Check normalization range
assert processed.min() >= 0.0
assert processed.max() <= 1.0
# Check that values are scaled correctly (255 → 1.0, 0 → 0.0)
if image_int8.max() == 255:
assert torch.isclose(processed.max(), torch.tensor(1.0), atol=1e-6)
if image_int8.min() == 0:
assert torch.isclose(processed.min(), torch.tensor(0.0), atol=1e-6)
# Check memory contiguity
assert processed.is_contiguous()
def test_resize_robot_observation_image():
"""Test image resizing from robot resolution to policy resolution."""
# Create mock image: (H=480, W=640, C=3)
original_image = torch.randint(0, 256, size=(480, 640, 3), dtype=torch.uint8)
target_shape = (3, 224, 224) # (C, H, W)
resized = resize_robot_observation_image(original_image, target_shape)
# Check output shape matches target
assert resized.shape == target_shape
# Check that original image had different dimensions
assert original_image.shape != resized.shape
# Check that resizing preserves value range
assert resized.min() >= 0
assert resized.max() <= 255
def test_prepare_raw_observation():
"""Test the preparation of raw robot observation to lerobot format."""
robot_obs = _create_mock_robot_observation()
lerobot_features = _create_mock_lerobot_features()
policy_image_features = _create_mock_policy_image_features()
prepared = prepare_raw_observation(robot_obs, lerobot_features, policy_image_features)
# Check that state is properly extracted and batched
assert OBS_STATE in prepared
state = prepared[OBS_STATE]
assert isinstance(state, torch.Tensor)
assert state.shape == (1, 4) # Batched state
# Check that images are processed and resized
assert f"{OBS_IMAGES}.laptop" in prepared
assert f"{OBS_IMAGES}.phone" in prepared
laptop_img = prepared[f"{OBS_IMAGES}.laptop"]
phone_img = prepared[f"{OBS_IMAGES}.phone"]
# Check image shapes match policy requirements
assert laptop_img.shape == policy_image_features[f"{OBS_IMAGES}.laptop"].shape
assert phone_img.shape == policy_image_features[f"{OBS_IMAGES}.phone"].shape
# Check that images are tensors
assert isinstance(laptop_img, torch.Tensor)
assert isinstance(phone_img, torch.Tensor)
def test_raw_observation_to_observation_basic():
"""Test the main raw_observation_to_observation function."""
robot_obs = _create_mock_robot_observation()
lerobot_features = _create_mock_lerobot_features()
policy_image_features = _create_mock_policy_image_features()
observation = raw_observation_to_observation(robot_obs, lerobot_features, policy_image_features)
# Check that all expected keys are present
assert OBS_STATE in observation
assert f"{OBS_IMAGES}.laptop" in observation
assert f"{OBS_IMAGES}.phone" in observation
# Check state processing
state = observation[OBS_STATE]
assert isinstance(state, torch.Tensor)
assert state.shape == (1, 4) # Batched
# Check image processing
laptop_img = observation[f"{OBS_IMAGES}.laptop"]
phone_img = observation[f"{OBS_IMAGES}.phone"]
# Images should have batch dimension: (B, C, H, W)
assert laptop_img.shape == (1, 3, 224, 224)
assert phone_img.shape == (1, 3, 160, 160)
# Check image dtype and range (should be float32 in [0, 1])
assert laptop_img.dtype == torch.float32
assert phone_img.dtype == torch.float32
assert laptop_img.min() >= 0.0 and laptop_img.max() <= 1.0
assert phone_img.min() >= 0.0 and phone_img.max() <= 1.0
def test_raw_observation_to_observation_with_non_tensor_data():
"""Test that non-tensor data (like task strings) is preserved."""
robot_obs = _create_mock_robot_observation()
robot_obs["task"] = "pick up the red cube" # Add string instruction
lerobot_features = _create_mock_lerobot_features()
policy_image_features = _create_mock_policy_image_features()
observation = raw_observation_to_observation(robot_obs, lerobot_features, policy_image_features)
# Check that task string is preserved
assert "task" in observation
assert observation["task"] == "pick up the red cube"
assert isinstance(observation["task"], str)
@torch.no_grad()
def test_raw_observation_to_observation_device_handling():
"""Test that tensors are created (device placement is handled by preprocessor)."""
robot_obs = _create_mock_robot_observation()
lerobot_features = _create_mock_lerobot_features()
policy_image_features = _create_mock_policy_image_features()
observation = raw_observation_to_observation(robot_obs, lerobot_features, policy_image_features)
# Check that all expected keys produce tensors (device placement handled by preprocessor later)
for key, value in observation.items():
if isinstance(value, torch.Tensor):
assert value.device.type in ["cpu", "cuda", "mps", "xpu"], f"Tensor {key} on unexpected device"
def test_raw_observation_to_observation_deterministic():
"""Test that the function produces consistent results for the same input."""
robot_obs = _create_mock_robot_observation()
lerobot_features = _create_mock_lerobot_features()
policy_image_features = _create_mock_policy_image_features()
# Run twice with same input
obs1 = raw_observation_to_observation(robot_obs, lerobot_features, policy_image_features)
obs2 = raw_observation_to_observation(robot_obs, lerobot_features, policy_image_features)
# Results should be identical
assert set(obs1.keys()) == set(obs2.keys())
for key in obs1:
if isinstance(obs1[key], torch.Tensor):
torch.testing.assert_close(obs1[key], obs2[key])
else:
assert obs1[key] == obs2[key]
def test_image_processing_pipeline_preserves_content():
"""Test that the image processing pipeline preserves recognizable patterns."""
# Create an image with a specific pattern
original_img = np.zeros((100, 100, 3), dtype=np.uint8)
original_img[25:75, 25:75, :] = 255 # White square in center
robot_obs = {"shoulder": 1.0, "elbow": 1.0, "wrist": 1.0, "gripper": 1.0, "laptop": original_img}
lerobot_features = {
OBS_STATE: {
"dtype": "float32",
"shape": [4],
"names": ["shoulder", "elbow", "wrist", "gripper"],
},
f"{OBS_IMAGES}.laptop": {
"dtype": "image",
"shape": [100, 100, 3],
"names": ["height", "width", "channels"],
},
}
policy_image_features = {
f"{OBS_IMAGES}.laptop": PolicyFeature(
type=FeatureType.VISUAL,
shape=(3, 50, 50), # Downsamples from 100x100
)
}
observation = raw_observation_to_observation(robot_obs, lerobot_features, policy_image_features)
processed_img = observation[f"{OBS_IMAGES}.laptop"].squeeze(0) # Remove batch dim
# Check that the center region has higher values than corners
# Due to bilinear interpolation, exact values will change but pattern should remain
center_val = processed_img[:, 25, 25].mean() # Center of 50x50 image
corner_val = processed_img[:, 5, 5].mean() # Corner
assert center_val > corner_val, "Image processing should preserve recognizable patterns"
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/async_inference/test_helpers.py",
"license": "Apache License 2.0",
"lines": 345,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/async_inference/test_policy_server.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit-tests for the `PolicyServer` core logic.
Monkey-patch the `policy` attribute with a stub so that no real model inference is performed.
"""
from __future__ import annotations
import time
import pytest
import torch
from lerobot.configs.types import PolicyFeature
from lerobot.utils.constants import OBS_STATE
from tests.utils import require_package
# -----------------------------------------------------------------------------
# Test fixtures
# -----------------------------------------------------------------------------
class MockPolicy:
"""A minimal mock for an actual policy, returning zeros.
Refer to tests/policies for tests of the individual policies supported."""
class _Config:
robot_type = "dummy_robot"
@property
def image_features(self) -> dict[str, PolicyFeature]:
"""Empty image features since this test doesn't use images."""
return {}
def predict_action_chunk(self, observation: dict[str, torch.Tensor]) -> torch.Tensor:
"""Return a chunk of 20 dummy actions."""
batch_size = len(observation[OBS_STATE])
return torch.zeros(batch_size, 20, 6)
def __init__(self):
self.config = self._Config()
def to(self, *args, **kwargs):
# The server calls `policy.to(device)`. This stub ignores it.
return self
def model(self, batch: dict) -> torch.Tensor:
# Return a chunk of 20 dummy actions.
batch_size = len(batch["robot_type"])
return torch.zeros(batch_size, 20, 6)
@pytest.fixture
@require_package("grpcio", "grpc")
def policy_server():
"""Fresh `PolicyServer` instance with a stubbed-out policy model."""
# Import only when the test actually runs (after decorator check)
from lerobot.async_inference.configs import PolicyServerConfig
from lerobot.async_inference.policy_server import PolicyServer
test_config = PolicyServerConfig(host="localhost", port=9999)
server = PolicyServer(test_config)
# Replace the real policy with our fast, deterministic stub.
server.policy = MockPolicy()
server.actions_per_chunk = 20
server.device = "cpu"
# Add mock lerobot_features that the observation similarity functions need
server.lerobot_features = {
OBS_STATE: {
"dtype": "float32",
"shape": [6],
"names": ["joint1", "joint2", "joint3", "joint4", "joint5", "joint6"],
}
}
return server
# -----------------------------------------------------------------------------
# Helper utilities for tests
# -----------------------------------------------------------------------------
def _make_obs(state: torch.Tensor, timestep: int = 0, must_go: bool = False):
"""Create a TimedObservation with a given state vector."""
# Import only when needed
from lerobot.async_inference.helpers import TimedObservation
return TimedObservation(
observation={
"joint1": state[0].item() if len(state) > 0 else 0.0,
"joint2": state[1].item() if len(state) > 1 else 0.0,
"joint3": state[2].item() if len(state) > 2 else 0.0,
"joint4": state[3].item() if len(state) > 3 else 0.0,
"joint5": state[4].item() if len(state) > 4 else 0.0,
"joint6": state[5].item() if len(state) > 5 else 0.0,
},
timestamp=time.time(),
timestep=timestep,
must_go=must_go,
)
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_time_action_chunk(policy_server):
"""Verify that `_time_action_chunk` assigns correct timestamps and timesteps."""
start_ts = time.time()
start_t = 10
# A chunk of 3 action tensors.
action_tensors = [torch.randn(6) for _ in range(3)]
timed_actions = policy_server._time_action_chunk(start_ts, action_tensors, start_t)
assert len(timed_actions) == 3
# Check timesteps
assert [ta.get_timestep() for ta in timed_actions] == [10, 11, 12]
# Check timestamps
expected_timestamps = [
start_ts,
start_ts + policy_server.config.environment_dt,
start_ts + 2 * policy_server.config.environment_dt,
]
for ta, expected_ts in zip(timed_actions, expected_timestamps, strict=True):
assert abs(ta.get_timestamp() - expected_ts) < 1e-6
def test_maybe_enqueue_observation_must_go(policy_server):
"""An observation with `must_go=True` is always enqueued."""
obs = _make_obs(torch.zeros(6), must_go=True)
assert policy_server._enqueue_observation(obs) is True
assert policy_server.observation_queue.qsize() == 1
assert policy_server.observation_queue.get_nowait() is obs
def test_maybe_enqueue_observation_dissimilar(policy_server):
"""A dissimilar observation (not `must_go`) is enqueued."""
# Set a last predicted observation.
policy_server.last_processed_obs = _make_obs(torch.zeros(6))
# Create a new, dissimilar observation.
new_obs = _make_obs(torch.ones(6) * 5) # High norm difference
assert policy_server._enqueue_observation(new_obs) is True
assert policy_server.observation_queue.qsize() == 1
def test_maybe_enqueue_observation_is_skipped(policy_server):
"""A similar observation (not `must_go`) is skipped."""
# Set a last predicted observation.
policy_server.last_processed_obs = _make_obs(torch.zeros(6))
# Create a new, very similar observation.
new_obs = _make_obs(torch.zeros(6) + 1e-4)
assert policy_server._enqueue_observation(new_obs) is False
assert policy_server.observation_queue.empty() is True
def test_obs_sanity_checks(policy_server):
"""Unit-test the private `_obs_sanity_checks` helper."""
prev = _make_obs(torch.zeros(6), timestep=0)
# Case 1 – timestep already predicted
policy_server._predicted_timesteps.add(1)
obs_same_ts = _make_obs(torch.ones(6), timestep=1)
assert policy_server._obs_sanity_checks(obs_same_ts, prev) is False
# Case 2 – observation too similar
policy_server._predicted_timesteps.clear()
obs_similar = _make_obs(torch.zeros(6) + 1e-4, timestep=2)
assert policy_server._obs_sanity_checks(obs_similar, prev) is False
# Case 3 – genuinely new & dissimilar observation passes
obs_ok = _make_obs(torch.ones(6) * 5, timestep=3)
assert policy_server._obs_sanity_checks(obs_ok, prev) is True
def test_predict_action_chunk(monkeypatch, policy_server):
"""End-to-end test of `_predict_action_chunk` with a stubbed _get_action_chunk."""
# Import only when needed
from lerobot.async_inference.policy_server import PolicyServer
# Force server to act-style policy; patch method to return deterministic tensor
policy_server.policy_type = "act"
# NOTE(Steven): Smelly tests as the Server is a state machine being partially mocked. Adding these processors as a quick fix.
policy_server.preprocessor = lambda obs: obs
policy_server.postprocessor = lambda tensor: tensor
action_dim = 6
batch_size = 1
actions_per_chunk = policy_server.actions_per_chunk
def _fake_get_action_chunk(_self, _obs, _type="act"):
return torch.zeros(batch_size, actions_per_chunk, action_dim)
monkeypatch.setattr(PolicyServer, "_get_action_chunk", _fake_get_action_chunk, raising=True)
obs = _make_obs(torch.zeros(6), timestep=5)
timed_actions = policy_server._predict_action_chunk(obs)
assert len(timed_actions) == actions_per_chunk
assert [ta.get_timestep() for ta in timed_actions] == list(range(5, 5 + actions_per_chunk))
for i, ta in enumerate(timed_actions):
expected_ts = obs.get_timestamp() + i * policy_server.config.environment_dt
assert abs(ta.get_timestamp() - expected_ts) < 1e-6
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/async_inference/test_policy_server.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/async_inference/test_robot_client.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit-tests for the `RobotClient` action-queue logic (pure Python, no gRPC).
We monkey-patch `lerobot.robots.utils.make_robot_from_config` so that
no real hardware is accessed. Only the queue-update mechanism is verified.
"""
from __future__ import annotations
import time
from queue import Queue
import pytest
import torch
# Skip entire module if grpc is not available
pytest.importorskip("grpc")
# -----------------------------------------------------------------------------
# Test fixtures
# -----------------------------------------------------------------------------
@pytest.fixture()
def robot_client():
"""Fresh `RobotClient` instance for each test case (no threads started).
Uses DummyRobot."""
# Import only when the test actually runs (after decorator check)
from lerobot.async_inference.configs import RobotClientConfig
from lerobot.async_inference.robot_client import RobotClient
from tests.mocks.mock_robot import MockRobotConfig
test_config = MockRobotConfig()
# gRPC channel is not actually used in tests, so using a dummy address
test_config = RobotClientConfig(
robot=test_config,
server_address="localhost:9999",
policy_type="test",
pretrained_name_or_path="test",
actions_per_chunk=20,
)
client = RobotClient(test_config)
# Initialize attributes that are normally set in start() method
client.chunks_received = 0
client.available_actions_size = []
yield client
if client.robot.is_connected:
client.stop()
# -----------------------------------------------------------------------------
# Helper utilities for tests
# -----------------------------------------------------------------------------
def _make_actions(start_ts: float, start_t: int, count: int):
"""Generate `count` consecutive TimedAction objects starting at timestep `start_t`."""
from lerobot.async_inference.helpers import TimedAction
fps = 30 # emulates most common frame-rate
actions = []
for i in range(count):
timestep = start_t + i
timestamp = start_ts + i * (1 / fps)
action_tensor = torch.full((6,), timestep, dtype=torch.float32)
actions.append(TimedAction(action=action_tensor, timestep=timestep, timestamp=timestamp))
return actions
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_update_action_queue_discards_stale(robot_client):
"""`_update_action_queue` must drop actions with `timestep` <= `latest_action`."""
# Pretend we already executed up to action #4
robot_client.latest_action = 4
# Incoming chunk contains timesteps 3..7 -> expect 5,6,7 kept.
incoming = _make_actions(start_ts=time.time(), start_t=3, count=5) # 3,4,5,6,7
robot_client._aggregate_action_queues(incoming)
# Extract timesteps from queue
resulting_timesteps = [a.get_timestep() for a in robot_client.action_queue.queue]
assert resulting_timesteps == [5, 6, 7]
@pytest.mark.parametrize(
"weight_old, weight_new",
[
(1.0, 0.0),
(0.0, 1.0),
(0.5, 0.5),
(0.2, 0.8),
(0.8, 0.2),
(0.1, 0.9),
(0.9, 0.1),
],
)
def test_aggregate_action_queues_combines_actions_in_overlap(
robot_client, weight_old: float, weight_new: float
):
"""`_aggregate_action_queues` must combine actions on overlapping timesteps according
to the provided aggregate_fn, here tested with multiple coefficients."""
from lerobot.async_inference.helpers import TimedAction
robot_client.chunks_received = 0
# Pretend we already executed up to action #4, and queue contains actions for timesteps 5..6
robot_client.latest_action = 4
current_actions = _make_actions(
start_ts=time.time(), start_t=5, count=2
) # actions are [torch.ones(6), torch.ones(6), ...]
current_actions = [
TimedAction(action=10 * a.get_action(), timestep=a.get_timestep(), timestamp=a.get_timestamp())
for a in current_actions
]
for a in current_actions:
robot_client.action_queue.put(a)
# Incoming chunk contains timesteps 3..7 -> expect 5,6,7 kept.
incoming = _make_actions(start_ts=time.time(), start_t=3, count=5) # 3,4,5,6,7
overlap_timesteps = [5, 6] # properly tested in test_aggregate_action_queues_discards_stale
nonoverlap_timesteps = [7]
robot_client._aggregate_action_queues(
incoming, aggregate_fn=lambda x1, x2: weight_old * x1 + weight_new * x2
)
queue_overlap_actions = []
queue_non_overlap_actions = []
for a in robot_client.action_queue.queue:
if a.get_timestep() in overlap_timesteps:
queue_overlap_actions.append(a)
elif a.get_timestep() in nonoverlap_timesteps:
queue_non_overlap_actions.append(a)
queue_overlap_actions = sorted(queue_overlap_actions, key=lambda x: x.get_timestep())
queue_non_overlap_actions = sorted(queue_non_overlap_actions, key=lambda x: x.get_timestep())
assert torch.allclose(
queue_overlap_actions[0].get_action(),
weight_old * current_actions[0].get_action() + weight_new * incoming[-3].get_action(),
)
assert torch.allclose(
queue_overlap_actions[1].get_action(),
weight_old * current_actions[1].get_action() + weight_new * incoming[-2].get_action(),
)
assert torch.allclose(queue_non_overlap_actions[0].get_action(), incoming[-1].get_action())
@pytest.mark.parametrize(
"chunk_size, queue_len, expected",
[
(20, 12, False), # 12 / 20 = 0.6 > g=0.5 threshold, not ready to send
(20, 8, True), # 8 / 20 = 0.4 <= g=0.5, ready to send
(10, 5, True),
(10, 6, False),
],
)
def test_ready_to_send_observation(robot_client, chunk_size: int, queue_len: int, expected: bool):
"""Validate `_ready_to_send_observation` ratio logic for various sizes."""
robot_client.action_chunk_size = chunk_size
# Clear any existing actions then fill with `queue_len` dummy entries ----
robot_client.action_queue = Queue()
dummy_actions = _make_actions(start_ts=time.time(), start_t=0, count=queue_len)
for act in dummy_actions:
robot_client.action_queue.put(act)
assert robot_client._ready_to_send_observation() is expected
@pytest.mark.parametrize(
"g_threshold, expected",
[
# The condition is `queue_size / chunk_size <= g`.
# Here, ratio = 6 / 10 = 0.6.
(0.0, False), # 0.6 <= 0.0 is False
(0.1, False),
(0.2, False),
(0.3, False),
(0.4, False),
(0.5, False),
(0.6, True), # 0.6 <= 0.6 is True
(0.7, True),
(0.8, True),
(0.9, True),
(1.0, True),
],
)
def test_ready_to_send_observation_with_varying_threshold(robot_client, g_threshold: float, expected: bool):
"""Validate `_ready_to_send_observation` with fixed sizes and varying `g`."""
# Fixed sizes for this test: ratio = 6 / 10 = 0.6
chunk_size = 10
queue_len = 6
robot_client.action_chunk_size = chunk_size
# This is the parameter we are testing
robot_client._chunk_size_threshold = g_threshold
# Fill queue with dummy actions
robot_client.action_queue = Queue()
dummy_actions = _make_actions(start_ts=time.time(), start_t=0, count=queue_len)
for act in dummy_actions:
robot_client.action_queue.put(act)
assert robot_client._ready_to_send_observation() is expected
# -----------------------------------------------------------------------------
# Regression test: robot type registry populated by robot_client imports
# -----------------------------------------------------------------------------
def test_robot_client_registers_builtin_robot_types():
"""Importing robot_client must populate RobotConfig's ChoiceRegistry.
This is a regression test for a bug introduced in #2425, where removing
robot module imports from robot_client.py caused RobotConfig's registry to
be empty, breaking CLI argument parsing with:
error: argument --robot.type: invalid choice: 'so101_follower' (choose from )
Robot types are registered via @RobotConfig.register_subclass() decorators
at import time, so all supported modules must be explicitly imported.
"""
import lerobot.async_inference.robot_client # noqa: F401
from lerobot.robots.config import RobotConfig
known_choices = RobotConfig.get_known_choices()
expected_robot_types = [
"so100_follower",
"so101_follower",
"koch_follower",
"omx_follower",
"bi_so_follower",
]
for robot_type in expected_robot_types:
assert robot_type in known_choices, (
f"Robot type '{robot_type}' is not registered in RobotConfig's ChoiceRegistry. "
f"Ensure the corresponding module is imported in robot_client.py. "
f"Known choices: {sorted(known_choices)}"
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/async_inference/test_robot_client.py",
"license": "Apache License 2.0",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:src/lerobot/motors/calibration_gui.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
from dataclasses import dataclass
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1"
from .motors_bus import MotorCalibration, MotorsBus
BAR_LEN, BAR_THICKNESS = 450, 8
HANDLE_R = 10
BRACKET_W, BRACKET_H = 6, 14
TRI_W, TRI_H = 12, 14
BTN_W, BTN_H = 60, 22
SAVE_W, SAVE_H = 80, 28
LOAD_W = 80
DD_W, DD_H = 160, 28
TOP_GAP = 50
PADDING_Y, TOP_OFFSET = 70, 60
FONT_SIZE, FPS = 20, 60
BG_COLOR = (30, 30, 30)
BAR_RED, BAR_GREEN = (200, 60, 60), (60, 200, 60)
HANDLE_COLOR, TEXT_COLOR = (240, 240, 240), (250, 250, 250)
TICK_COLOR = (250, 220, 40)
BTN_COLOR, BTN_COLOR_HL = (80, 80, 80), (110, 110, 110)
DD_COLOR, DD_COLOR_HL = (70, 70, 70), (100, 100, 100)
def dist(a, b):
return math.hypot(a[0] - b[0], a[1] - b[1])
@dataclass
class RangeValues:
min_v: int
pos_v: int
max_v: int
class RangeSlider:
"""One motor = one slider row"""
def __init__(self, motor, idx, res, calibration, present, label_pad, base_y):
import pygame
self.motor = motor
self.res = res
self.x0 = 40 + label_pad
self.x1 = self.x0 + BAR_LEN
self.y = base_y + idx * PADDING_Y
self.min_v = calibration.range_min
self.max_v = calibration.range_max
self.pos_v = max(self.min_v, min(present, self.max_v))
self.min_x = self._pos_from_val(self.min_v)
self.max_x = self._pos_from_val(self.max_v)
self.pos_x = self._pos_from_val(self.pos_v)
self.min_btn = pygame.Rect(self.x0 - BTN_W - 6, self.y - BTN_H // 2, BTN_W, BTN_H)
self.max_btn = pygame.Rect(self.x1 + 6, self.y - BTN_H // 2, BTN_W, BTN_H)
self.drag_min = self.drag_max = self.drag_pos = False
self.tick_val = present
self.font = pygame.font.Font(None, FONT_SIZE)
def _val_from_pos(self, x):
return round((x - self.x0) / BAR_LEN * self.res)
def _pos_from_val(self, v):
return self.x0 + (v / self.res) * BAR_LEN
def set_tick(self, v):
self.tick_val = max(0, min(v, self.res))
def _triangle_hit(self, pos):
import pygame
tri_top = self.y - BAR_THICKNESS // 2 - 2
return pygame.Rect(self.pos_x - TRI_W // 2, tri_top - TRI_H, TRI_W, TRI_H).collidepoint(pos)
def handle_event(self, e):
import pygame
if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:
if self.min_btn.collidepoint(e.pos):
self.min_x, self.min_v = self.pos_x, self.pos_v
return
if self.max_btn.collidepoint(e.pos):
self.max_x, self.max_v = self.pos_x, self.pos_v
return
if dist(e.pos, (self.min_x, self.y)) <= HANDLE_R:
self.drag_min = True
elif dist(e.pos, (self.max_x, self.y)) <= HANDLE_R:
self.drag_max = True
elif self._triangle_hit(e.pos):
self.drag_pos = True
elif e.type == pygame.MOUSEBUTTONUP and e.button == 1:
self.drag_min = self.drag_max = self.drag_pos = False
elif e.type == pygame.MOUSEMOTION:
x = e.pos[0]
if self.drag_min:
self.min_x = max(self.x0, min(x, self.pos_x))
elif self.drag_max:
self.max_x = min(self.x1, max(x, self.pos_x))
elif self.drag_pos:
self.pos_x = max(self.min_x, min(x, self.max_x))
self.min_v = self._val_from_pos(self.min_x)
self.max_v = self._val_from_pos(self.max_x)
self.pos_v = self._val_from_pos(self.pos_x)
def _draw_button(self, surf, rect, text):
import pygame
clr = BTN_COLOR_HL if rect.collidepoint(pygame.mouse.get_pos()) else BTN_COLOR
pygame.draw.rect(surf, clr, rect, border_radius=4)
t = self.font.render(text, True, TEXT_COLOR)
surf.blit(t, (rect.centerx - t.get_width() // 2, rect.centery - t.get_height() // 2))
def draw(self, surf):
import pygame
# motor name above set-min button (right-aligned)
name_surf = self.font.render(self.motor, True, TEXT_COLOR)
surf.blit(
name_surf,
(self.min_btn.right - name_surf.get_width(), self.min_btn.y - name_surf.get_height() - 4),
)
# bar + active section
pygame.draw.rect(surf, BAR_RED, (self.x0, self.y - BAR_THICKNESS // 2, BAR_LEN, BAR_THICKNESS))
pygame.draw.rect(
surf, BAR_GREEN, (self.min_x, self.y - BAR_THICKNESS // 2, self.max_x - self.min_x, BAR_THICKNESS)
)
# tick
tick_x = self._pos_from_val(self.tick_val)
pygame.draw.line(
surf,
TICK_COLOR,
(tick_x, self.y - BAR_THICKNESS // 2 - 4),
(tick_x, self.y + BAR_THICKNESS // 2 + 4),
2,
)
# brackets
for x, sign in ((self.min_x, +1), (self.max_x, -1)):
pygame.draw.line(
surf, HANDLE_COLOR, (x, self.y - BRACKET_H // 2), (x, self.y + BRACKET_H // 2), 2
)
pygame.draw.line(
surf,
HANDLE_COLOR,
(x, self.y - BRACKET_H // 2),
(x + sign * BRACKET_W, self.y - BRACKET_H // 2),
2,
)
pygame.draw.line(
surf,
HANDLE_COLOR,
(x, self.y + BRACKET_H // 2),
(x + sign * BRACKET_W, self.y + BRACKET_H // 2),
2,
)
# triangle ▼
tri_top = self.y - BAR_THICKNESS // 2 - 2
pygame.draw.polygon(
surf,
HANDLE_COLOR,
[
(self.pos_x, tri_top),
(self.pos_x - TRI_W // 2, tri_top - TRI_H),
(self.pos_x + TRI_W // 2, tri_top - TRI_H),
],
)
# numeric labels
fh = self.font.get_height()
pos_y = tri_top - TRI_H - 4 - fh
txts = [
(self.min_v, self.min_x, self.y - BRACKET_H // 2 - 4 - fh),
(self.max_v, self.max_x, self.y - BRACKET_H // 2 - 4 - fh),
(self.pos_v, self.pos_x, pos_y),
]
for v, x, y in txts:
s = self.font.render(str(v), True, TEXT_COLOR)
surf.blit(s, (x - s.get_width() // 2, y))
# buttons
self._draw_button(surf, self.min_btn, "set min")
self._draw_button(surf, self.max_btn, "set max")
# external
def values(self) -> RangeValues:
return RangeValues(self.min_v, self.pos_v, self.max_v)
class RangeFinderGUI:
def __init__(self, bus: MotorsBus, groups: dict[str, list[str]] | None = None):
import pygame
self.bus = bus
self.groups = groups if groups is not None else {"all": list(bus.motors)}
self.group_names = list(self.groups)
self.current_group = self.group_names[0]
if not bus.is_connected:
bus.connect()
self.calibration = bus.read_calibration()
self.res_table = bus.model_resolution_table
self.present_cache = {
m: bus.read("Present_Position", m, normalize=False)
for motors in self.groups.values()
for m in motors
}
pygame.init()
self.font = pygame.font.Font(None, FONT_SIZE)
label_pad = max(self.font.size(m)[0] for ms in self.groups.values() for m in ms)
self.label_pad = label_pad
width = 40 + label_pad + BAR_LEN + 6 + BTN_W + 10 + SAVE_W + 10
self.controls_bottom = 10 + SAVE_H
self.base_y = self.controls_bottom + TOP_GAP
height = self.base_y + PADDING_Y * len(self.groups[self.current_group]) + 40
self.screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Motors range finder")
# ui rects
self.save_btn = pygame.Rect(width - SAVE_W - 10, 10, SAVE_W, SAVE_H)
self.load_btn = pygame.Rect(self.save_btn.left - LOAD_W - 10, 10, LOAD_W, SAVE_H)
self.dd_btn = pygame.Rect(width // 2 - DD_W // 2, 10, DD_W, DD_H)
self.dd_open = False # dropdown expanded?
self.clock = pygame.time.Clock()
self._build_sliders()
self._adjust_height()
def _adjust_height(self):
import pygame
motors = self.groups[self.current_group]
new_h = self.base_y + PADDING_Y * len(motors) + 40
if new_h != self.screen.get_height():
w = self.screen.get_width()
self.screen = pygame.display.set_mode((w, new_h))
def _build_sliders(self):
self.sliders: list[RangeSlider] = []
motors = self.groups[self.current_group]
for i, m in enumerate(motors):
self.sliders.append(
RangeSlider(
motor=m,
idx=i,
res=self.res_table[self.bus.motors[m].model] - 1,
calibration=self.calibration[m],
present=self.present_cache[m],
label_pad=self.label_pad,
base_y=self.base_y,
)
)
def _draw_dropdown(self):
import pygame
# collapsed box
hover = self.dd_btn.collidepoint(pygame.mouse.get_pos())
pygame.draw.rect(self.screen, DD_COLOR_HL if hover else DD_COLOR, self.dd_btn, border_radius=6)
txt = self.font.render(self.current_group, True, TEXT_COLOR)
self.screen.blit(
txt, (self.dd_btn.centerx - txt.get_width() // 2, self.dd_btn.centery - txt.get_height() // 2)
)
tri_w, tri_h = 12, 6
cx = self.dd_btn.right - 14
cy = self.dd_btn.centery + 1
pygame.draw.polygon(
self.screen,
TEXT_COLOR,
[(cx - tri_w // 2, cy - tri_h // 2), (cx + tri_w // 2, cy - tri_h // 2), (cx, cy + tri_h // 2)],
)
if not self.dd_open:
return
# expanded list
for i, name in enumerate(self.group_names):
item_rect = pygame.Rect(self.dd_btn.left, self.dd_btn.bottom + i * DD_H, DD_W, DD_H)
clr = DD_COLOR_HL if item_rect.collidepoint(pygame.mouse.get_pos()) else DD_COLOR
pygame.draw.rect(self.screen, clr, item_rect)
t = self.font.render(name, True, TEXT_COLOR)
self.screen.blit(
t, (item_rect.centerx - t.get_width() // 2, item_rect.centery - t.get_height() // 2)
)
def _handle_dropdown_event(self, e):
import pygame
if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:
if self.dd_btn.collidepoint(e.pos):
self.dd_open = not self.dd_open
return True
if self.dd_open:
for i, name in enumerate(self.group_names):
item_rect = pygame.Rect(self.dd_btn.left, self.dd_btn.bottom + i * DD_H, DD_W, DD_H)
if item_rect.collidepoint(e.pos):
if name != self.current_group:
self.current_group = name
self._build_sliders()
self._adjust_height()
self.dd_open = False
return True
self.dd_open = False
return False
def _save_current(self):
for s in self.sliders:
self.calibration[s.motor].range_min = s.min_v
self.calibration[s.motor].range_max = s.max_v
with self.bus.torque_disabled():
self.bus.write_calibration(self.calibration)
def _load_current(self):
self.calibration = self.bus.read_calibration()
for s in self.sliders:
s.min_v = self.calibration[s.motor].range_min
s.max_v = self.calibration[s.motor].range_max
s.min_x = s._pos_from_val(s.min_v)
s.max_x = s._pos_from_val(s.max_v)
def run(self) -> dict[str, MotorCalibration]:
import pygame
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
return self.calibration
if self._handle_dropdown_event(e):
continue
if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:
if self.save_btn.collidepoint(e.pos):
self._save_current()
elif self.load_btn.collidepoint(e.pos):
self._load_current()
for s in self.sliders:
s.handle_event(e)
# live goal write while dragging
for s in self.sliders:
if s.drag_pos:
self.bus.write("Goal_Position", s.motor, s.pos_v, normalize=False)
# tick update
for s in self.sliders:
pos = self.bus.read("Present_Position", s.motor, normalize=False)
s.set_tick(pos)
self.present_cache[s.motor] = pos
# ─ drawing
self.screen.fill(BG_COLOR)
for s in self.sliders:
s.draw(self.screen)
self._draw_dropdown()
# load / save buttons
for rect, text in ((self.load_btn, "LOAD"), (self.save_btn, "SAVE")):
clr = BTN_COLOR_HL if rect.collidepoint(pygame.mouse.get_pos()) else BTN_COLOR
pygame.draw.rect(self.screen, clr, rect, border_radius=6)
t = self.font.render(text, True, TEXT_COLOR)
self.screen.blit(t, (rect.centerx - t.get_width() // 2, rect.centery - t.get_height() // 2))
pygame.display.flip()
self.clock.tick(FPS)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/motors/calibration_gui.py",
"license": "Apache License 2.0",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/robots/hope_jr/config_hope_jr.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from lerobot.cameras import CameraConfig
from ..config import RobotConfig
@RobotConfig.register_subclass("hope_jr_hand")
@dataclass
class HopeJrHandConfig(RobotConfig):
port: str # Port to connect to the hand
side: str # "left" / "right"
disable_torque_on_disconnect: bool = True
cameras: dict[str, CameraConfig] = field(default_factory=dict)
def __post_init__(self):
super().__post_init__()
if self.side not in ["right", "left"]:
raise ValueError(self.side)
@RobotConfig.register_subclass("hope_jr_arm")
@dataclass
class HopeJrArmConfig(RobotConfig):
port: str # Port to connect to the hand
disable_torque_on_disconnect: bool = True
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
# Set this to a positive scalar to have the same value for all motors, or a dictionary that maps motor
# names to the max_relative_target value for that motor.
max_relative_target: float | dict[str, float] | None = None
cameras: dict[str, CameraConfig] = field(default_factory=dict)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/robots/hope_jr/config_hope_jr.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/robots/hope_jr/hope_jr_arm.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from functools import cached_property
from lerobot.cameras.utils import make_cameras_from_configs
from lerobot.motors import Motor, MotorNormMode
from lerobot.motors.calibration_gui import RangeFinderGUI
from lerobot.motors.feetech import (
FeetechMotorsBus,
)
from lerobot.processor import RobotAction, RobotObservation
from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
from ..robot import Robot
from ..utils import ensure_safe_goal_position
from .config_hope_jr import HopeJrArmConfig
logger = logging.getLogger(__name__)
class HopeJrArm(Robot):
config_class = HopeJrArmConfig
name = "hope_jr_arm"
def __init__(self, config: HopeJrArmConfig):
super().__init__(config)
self.config = config
self.bus = FeetechMotorsBus(
port=self.config.port,
motors={
"shoulder_pitch": Motor(1, "sm8512bl", MotorNormMode.RANGE_M100_100),
"shoulder_yaw": Motor(2, "sts3250", MotorNormMode.RANGE_M100_100),
"shoulder_roll": Motor(3, "sts3250", MotorNormMode.RANGE_M100_100),
"elbow_flex": Motor(4, "sts3250", MotorNormMode.RANGE_M100_100),
"wrist_roll": Motor(5, "sts3250", MotorNormMode.RANGE_M100_100),
"wrist_yaw": Motor(6, "sts3250", MotorNormMode.RANGE_M100_100),
"wrist_pitch": Motor(7, "sts3250", MotorNormMode.RANGE_M100_100),
},
calibration=self.calibration,
)
self.cameras = make_cameras_from_configs(config.cameras)
# HACK
self.shoulder_pitch = "shoulder_pitch"
self.other_motors = [m for m in self.bus.motors if m != "shoulder_pitch"]
@property
def _motors_ft(self) -> dict[str, type]:
return {f"{motor}.pos": float for motor in self.bus.motors}
@property
def _cameras_ft(self) -> dict[str, tuple]:
return {
cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
}
@cached_property
def observation_features(self) -> dict[str, type | tuple]:
return {**self._motors_ft, **self._cameras_ft}
@cached_property
def action_features(self) -> dict[str, type]:
return self._motors_ft
@property
def is_connected(self) -> bool:
return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
@check_if_already_connected
def connect(self, calibrate: bool = True) -> None:
"""
We assume that at connection time, arm is in a rest position,
and torque can be safely disabled to run calibration.
"""
self.bus.connect(handshake=False)
if not self.is_calibrated and calibrate:
self.calibrate()
# Connect the cameras
for cam in self.cameras.values():
cam.connect()
self.configure()
logger.info(f"{self} connected.")
@property
def is_calibrated(self) -> bool:
return self.bus.is_calibrated
def calibrate(self) -> None:
groups = {
"all": list(self.bus.motors.keys()),
"shoulder": ["shoulder_pitch", "shoulder_yaw", "shoulder_roll"],
"elbow": ["elbow_flex"],
"wrist": ["wrist_roll", "wrist_yaw", "wrist_pitch"],
}
self.calibration = RangeFinderGUI(self.bus, groups).run()
self._save_calibration()
print("Calibration saved to", self.calibration_fpath)
def configure(self) -> None:
with self.bus.torque_disabled():
self.bus.configure_motors(maximum_acceleration=30, acceleration=30)
def setup_motors(self) -> None:
# TODO: add docstring
for motor in reversed(self.bus.motors):
input(f"Connect the controller board to the '{motor}' motor only and press enter.")
self.bus.setup_motor(motor)
print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
@check_if_not_connected
def get_observation(self) -> RobotObservation:
# Read arm position
start = time.perf_counter()
obs_dict = self.bus.sync_read("Present_Position", self.other_motors)
obs_dict[self.shoulder_pitch] = self.bus.read("Present_Position", self.shoulder_pitch)
obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()}
dt_ms = (time.perf_counter() - start) * 1e3
logger.debug(f"{self} read state: {dt_ms:.1f}ms")
# Capture images from cameras
for cam_key, cam in self.cameras.items():
start = time.perf_counter()
obs_dict[cam_key] = cam.read_latest()
dt_ms = (time.perf_counter() - start) * 1e3
logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
return obs_dict
@check_if_not_connected
def send_action(self, action: RobotAction) -> RobotAction:
goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
# Cap goal position when too far away from present position.
# /!\ Slower fps expected due to reading from the follower.
if self.config.max_relative_target is not None:
present_pos = self.bus.sync_read("Present_Position")
goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()}
goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target)
self.bus.sync_write("Goal_Position", goal_pos)
return {f"{motor}.pos": val for motor, val in goal_pos.items()}
@check_if_not_connected
def disconnect(self):
self.bus.disconnect(self.config.disable_torque_on_disconnect)
for cam in self.cameras.values():
cam.disconnect()
logger.info(f"{self} disconnected.")
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/robots/hope_jr/hope_jr_arm.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/robots/hope_jr/hope_jr_hand.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from functools import cached_property
from lerobot.cameras.utils import make_cameras_from_configs
from lerobot.motors import Motor, MotorNormMode
from lerobot.motors.calibration_gui import RangeFinderGUI
from lerobot.motors.feetech import (
FeetechMotorsBus,
)
from lerobot.processor import RobotAction, RobotObservation
from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
from ..robot import Robot
from .config_hope_jr import HopeJrHandConfig
logger = logging.getLogger(__name__)
RIGHT_HAND_INVERSIONS = [
"thumb_mcp",
"thumb_dip",
"index_ulnar_flexor",
"middle_ulnar_flexor",
"ring_ulnar_flexor",
"ring_pip_dip",
"pinky_ulnar_flexor",
"pinky_pip_dip",
]
LEFT_HAND_INVERSIONS = [
"thumb_cmc",
"thumb_mcp",
"thumb_dip",
"index_radial_flexor",
"index_pip_dip",
"middle_radial_flexor",
"middle_pip_dip",
"ring_radial_flexor",
"ring_pip_dip",
"pinky_radial_flexor",
# "pinky_pip_dip",
]
class HopeJrHand(Robot):
config_class = HopeJrHandConfig
name = "hope_jr_hand"
def __init__(self, config: HopeJrHandConfig):
super().__init__(config)
self.config = config
self.bus = FeetechMotorsBus(
port=self.config.port,
motors={
# Thumb
"thumb_cmc": Motor(1, "scs0009", MotorNormMode.RANGE_0_100),
"thumb_mcp": Motor(2, "scs0009", MotorNormMode.RANGE_0_100),
"thumb_pip": Motor(3, "scs0009", MotorNormMode.RANGE_0_100),
"thumb_dip": Motor(4, "scs0009", MotorNormMode.RANGE_0_100),
# Index
"index_radial_flexor": Motor(5, "scs0009", MotorNormMode.RANGE_0_100),
"index_ulnar_flexor": Motor(6, "scs0009", MotorNormMode.RANGE_0_100),
"index_pip_dip": Motor(7, "scs0009", MotorNormMode.RANGE_0_100),
# Middle
"middle_radial_flexor": Motor(8, "scs0009", MotorNormMode.RANGE_0_100),
"middle_ulnar_flexor": Motor(9, "scs0009", MotorNormMode.RANGE_0_100),
"middle_pip_dip": Motor(10, "scs0009", MotorNormMode.RANGE_0_100),
# Ring
"ring_radial_flexor": Motor(11, "scs0009", MotorNormMode.RANGE_0_100),
"ring_ulnar_flexor": Motor(12, "scs0009", MotorNormMode.RANGE_0_100),
"ring_pip_dip": Motor(13, "scs0009", MotorNormMode.RANGE_0_100),
# Pinky
"pinky_radial_flexor": Motor(14, "scs0009", MotorNormMode.RANGE_0_100),
"pinky_ulnar_flexor": Motor(15, "scs0009", MotorNormMode.RANGE_0_100),
"pinky_pip_dip": Motor(16, "scs0009", MotorNormMode.RANGE_0_100),
},
calibration=self.calibration,
protocol_version=1,
)
self.cameras = make_cameras_from_configs(config.cameras)
self.inverted_motors = RIGHT_HAND_INVERSIONS if config.side == "right" else LEFT_HAND_INVERSIONS
@property
def _motors_ft(self) -> dict[str, type]:
return {f"{motor}.pos": float for motor in self.bus.motors}
@property
def _cameras_ft(self) -> dict[str, tuple]:
return {
cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
}
@cached_property
def observation_features(self) -> dict[str, type | tuple]:
return {**self._motors_ft, **self._cameras_ft}
@cached_property
def action_features(self) -> dict[str, type]:
return self._motors_ft
@property
def is_connected(self) -> bool:
return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values())
@check_if_already_connected
def connect(self, calibrate: bool = True) -> None:
self.bus.connect()
if not self.is_calibrated and calibrate:
self.calibrate()
# Connect the cameras
for cam in self.cameras.values():
cam.connect()
self.configure()
logger.info(f"{self} connected.")
@property
def is_calibrated(self) -> bool:
return self.bus.is_calibrated
def calibrate(self) -> None:
fingers = {}
for finger in ["thumb", "index", "middle", "ring", "pinky"]:
fingers[finger] = [motor for motor in self.bus.motors if motor.startswith(finger)]
self.calibration = RangeFinderGUI(self.bus, fingers).run()
for motor in self.inverted_motors:
self.calibration[motor].drive_mode = 1
self._save_calibration()
print("Calibration saved to", self.calibration_fpath)
def configure(self) -> None:
with self.bus.torque_disabled():
self.bus.configure_motors()
def setup_motors(self) -> None:
# TODO: add docstring
for motor in self.bus.motors:
input(f"Connect the controller board to the '{motor}' motor only and press enter.")
self.bus.setup_motor(motor)
print(f"'{motor}' motor id set to {self.bus.motors[motor].id}")
@check_if_not_connected
def get_observation(self) -> RobotObservation:
obs_dict = {}
# Read hand position
start = time.perf_counter()
for motor in self.bus.motors:
obs_dict[f"{motor}.pos"] = self.bus.read("Present_Position", motor)
dt_ms = (time.perf_counter() - start) * 1e3
logger.debug(f"{self} read state: {dt_ms:.1f}ms")
# Capture images from cameras
for cam_key, cam in self.cameras.items():
start = time.perf_counter()
obs_dict[cam_key] = cam.read_latest()
dt_ms = (time.perf_counter() - start) * 1e3
logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms")
return obs_dict
@check_if_not_connected
def send_action(self, action: RobotAction) -> RobotAction:
goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")}
self.bus.sync_write("Goal_Position", goal_pos)
return action
@check_if_not_connected
def disconnect(self):
self.bus.disconnect(self.config.disable_torque_on_disconnect)
for cam in self.cameras.values():
cam.disconnect()
logger.info(f"{self} disconnected.")
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/robots/hope_jr/hope_jr_hand.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/teleoperators/homunculus/config_homunculus.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from ..config import TeleoperatorConfig
@TeleoperatorConfig.register_subclass("homunculus_glove")
@dataclass
class HomunculusGloveConfig(TeleoperatorConfig):
port: str # Port to connect to the glove
side: str # "left" / "right"
baud_rate: int = 115_200
def __post_init__(self):
if self.side not in ["right", "left"]:
raise ValueError(self.side)
@TeleoperatorConfig.register_subclass("homunculus_arm")
@dataclass
class HomunculusArmConfig(TeleoperatorConfig):
port: str # Port to connect to the arm
baud_rate: int = 115_200
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/teleoperators/homunculus/config_homunculus.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/teleoperators/homunculus/homunculus_arm.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from collections import deque
from pprint import pformat
import serial
from lerobot.motors.motors_bus import MotorCalibration, MotorNormMode
from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
from lerobot.utils.utils import enter_pressed, move_cursor_up
from ..teleoperator import Teleoperator
from .config_homunculus import HomunculusArmConfig
logger = logging.getLogger(__name__)
class HomunculusArm(Teleoperator):
"""
Homunculus Arm designed by Hugging Face.
"""
config_class = HomunculusArmConfig
name = "homunculus_arm"
def __init__(self, config: HomunculusArmConfig):
super().__init__(config)
self.config = config
self.serial = serial.Serial(config.port, config.baud_rate, timeout=1)
self.serial_lock = threading.Lock()
self.joints = {
"shoulder_pitch": MotorNormMode.RANGE_M100_100,
"shoulder_yaw": MotorNormMode.RANGE_M100_100,
"shoulder_roll": MotorNormMode.RANGE_M100_100,
"elbow_flex": MotorNormMode.RANGE_M100_100,
"wrist_roll": MotorNormMode.RANGE_M100_100,
"wrist_yaw": MotorNormMode.RANGE_M100_100,
"wrist_pitch": MotorNormMode.RANGE_M100_100,
}
n = 50
# EMA parameters ---------------------------------------------------
self.n: int = n
self.alpha: float = 2 / (n + 1)
# one deque *per joint* so we can inspect raw history if needed
self._buffers: dict[str, deque[int]] = {
joint: deque(maxlen=n)
for joint in (
"shoulder_pitch",
"shoulder_yaw",
"shoulder_roll",
"elbow_flex",
"wrist_roll",
"wrist_yaw",
"wrist_pitch",
)
}
# running EMA value per joint – lazily initialised on first read
self._ema: dict[str, float | None] = dict.fromkeys(self._buffers)
self._state: dict[str, float] | None = None
self.new_state_event = threading.Event()
self.stop_event = threading.Event()
self.thread = threading.Thread(target=self._read_loop, daemon=True, name=f"{self} _read_loop")
self.state_lock = threading.Lock()
@property
def action_features(self) -> dict:
return {f"{joint}.pos": float for joint in self.joints}
@property
def feedback_features(self) -> dict:
return {}
@property
def is_connected(self) -> bool:
with self.serial_lock:
return self.serial.is_open and self.thread.is_alive()
@check_if_already_connected
def connect(self, calibrate: bool = True) -> None:
if not self.serial.is_open:
self.serial.open()
self.thread.start()
# wait for the thread to ramp up & 1st state to be ready
if not self.new_state_event.wait(timeout=2):
raise TimeoutError(f"{self}: Timed out waiting for state after 2s.")
if not self.is_calibrated and calibrate:
self.calibrate()
logger.info(f"{self} connected.")
@property
def is_calibrated(self) -> bool:
return self.calibration_fpath.is_file()
def calibrate(self) -> None:
print(
"\nMove all joints through their entire range of motion."
"\nRecording positions. Press ENTER to stop..."
)
range_mins, range_maxes = self._record_ranges_of_motion()
self.calibration = {}
for id_, joint in enumerate(self.joints):
self.calibration[joint] = MotorCalibration(
id=id_,
drive_mode=0,
homing_offset=0,
range_min=range_mins[joint],
range_max=range_maxes[joint],
)
self._save_calibration()
print("Calibration saved to", self.calibration_fpath)
# TODO(Steven): This function is copy/paste from the `HomunculusGlove` class. Consider moving it to an utility to reduce duplicated code.
def _record_ranges_of_motion(
self, joints: list[str] | None = None, display_values: bool = True
) -> tuple[dict[str, int], dict[str, int]]:
"""Interactively record the min/max encoder values of each joint.
Move the joints while the method streams live positions. Press :kbd:`Enter` to finish.
Args:
joints (list[str] | None, optional): Joints to record. Defaults to every joint (`None`).
display_values (bool, optional): When `True` (default) a live table is printed to the console.
Raises:
TypeError: `joints` is not `None` or a list.
ValueError: any joint's recorded min and max are the same.
Returns:
tuple[dict[str, int], dict[str, int]]: Two dictionaries *mins* and *maxes* with the extreme values
observed for each joint.
"""
if joints is None:
joints = list(self.joints)
elif not isinstance(joints, list):
raise TypeError(joints)
display_len = max(len(key) for key in joints)
start_positions = self._read(joints, normalize=False)
mins = start_positions.copy()
maxes = start_positions.copy()
user_pressed_enter = False
while not user_pressed_enter:
positions = self._read(joints, normalize=False)
mins = {joint: int(min(positions[joint], min_)) for joint, min_ in mins.items()}
maxes = {joint: int(max(positions[joint], max_)) for joint, max_ in maxes.items()}
if display_values:
print("\n-------------------------------------------")
print(f"{'NAME':<{display_len}} | {'MIN':>6} | {'POS':>6} | {'MAX':>6}")
for joint in joints:
print(
f"{joint:<{display_len}} | {mins[joint]:>6} | {positions[joint]:>6} | {maxes[joint]:>6}"
)
if enter_pressed():
user_pressed_enter = True
if display_values and not user_pressed_enter:
# Move cursor up to overwrite the previous output
move_cursor_up(len(joints) + 3)
same_min_max = [joint for joint in joints if mins[joint] == maxes[joint]]
if same_min_max:
raise ValueError(f"Some joints have the same min and max values:\n{pformat(same_min_max)}")
return mins, maxes
def configure(self) -> None:
pass
# TODO(Steven): This function is copy/paste from the `HomunculusGlove` class. Consider moving it to an utility to reduce duplicated code.
def _normalize(self, values: dict[str, int]) -> dict[str, float]:
if not self.calibration:
raise RuntimeError(f"{self} has no calibration registered.")
normalized_values = {}
for joint, val in values.items():
min_ = self.calibration[joint].range_min
max_ = self.calibration[joint].range_max
drive_mode = self.calibration[joint].drive_mode
bounded_val = min(max_, max(min_, val))
if self.joints[joint] is MotorNormMode.RANGE_M100_100:
norm = (((bounded_val - min_) / (max_ - min_)) * 200) - 100
normalized_values[joint] = -norm if drive_mode else norm
elif self.joints[joint] is MotorNormMode.RANGE_0_100:
norm = ((bounded_val - min_) / (max_ - min_)) * 100
normalized_values[joint] = 100 - norm if drive_mode else norm
return normalized_values
def _apply_ema(self, raw: dict[str, int]) -> dict[str, float]:
"""Update buffers & running EMA values; return smoothed dict."""
smoothed: dict[str, float] = {}
for joint, value in raw.items():
# maintain raw history
self._buffers[joint].append(value)
# initialise on first run
if self._ema[joint] is None:
self._ema[joint] = float(value)
else:
self._ema[joint] = self.alpha * value + (1 - self.alpha) * self._ema[joint]
smoothed[joint] = self._ema[joint]
return smoothed
def _read(
self, joints: list[str] | None = None, normalize: bool = True, timeout: float = 1
) -> dict[str, int | float]:
"""
Return the most recent (single) values from self.last_d,
optionally applying calibration.
"""
if not self.new_state_event.wait(timeout=timeout):
raise TimeoutError(f"{self}: Timed out waiting for state after {timeout}s.")
with self.state_lock:
state = self._state
self.new_state_event.clear()
if state is None:
raise RuntimeError(f"{self} Internal error: Event set but no state available.")
if joints is not None:
state = {k: v for k, v in state.items() if k in joints}
if normalize:
state = self._normalize(state)
state = self._apply_ema(state)
return state
def _read_loop(self):
"""
Continuously read from the serial buffer in its own thread and sends values to the main thread through
a queue.
"""
while not self.stop_event.is_set():
try:
raw_values = None
with self.serial_lock:
if self.serial.in_waiting > 0:
lines = []
while self.serial.in_waiting > 0:
line = self.serial.read_until().decode("utf-8").strip()
if line:
lines.append(line.split(" "))
if lines:
raw_values = lines[-1]
if raw_values is None or len(raw_values) != 21: # 16 raw + 5 angle values
continue
joint_angles = {
"shoulder_pitch": int(raw_values[19]),
"shoulder_yaw": int(raw_values[18]),
"shoulder_roll": int(raw_values[20]),
"elbow_flex": int(raw_values[17]),
"wrist_roll": int(raw_values[16]),
"wrist_yaw": int(raw_values[1]),
"wrist_pitch": int(raw_values[0]),
}
with self.state_lock:
self._state = joint_angles
self.new_state_event.set()
except Exception as e:
logger.debug(f"Error reading frame in background thread for {self}: {e}")
@check_if_not_connected
def get_action(self) -> dict[str, float]:
joint_positions = self._read()
return {f"{joint}.pos": pos for joint, pos in joint_positions.items()}
def send_feedback(self, feedback: dict[str, float]) -> None:
raise NotImplementedError
@check_if_not_connected
def disconnect(self) -> None:
self.stop_event.set()
self.thread.join(timeout=1)
self.serial.close()
logger.info(f"{self} disconnected.")
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/teleoperators/homunculus/homunculus_arm.py",
"license": "Apache License 2.0",
"lines": 252,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/teleoperators/homunculus/homunculus_glove.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from collections import deque
from pprint import pformat
import serial
from lerobot.motors import MotorCalibration
from lerobot.motors.motors_bus import MotorNormMode
from lerobot.teleoperators.homunculus.joints_translation import homunculus_glove_to_hope_jr_hand
from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
from lerobot.utils.utils import enter_pressed, move_cursor_up
from ..teleoperator import Teleoperator
from .config_homunculus import HomunculusGloveConfig
logger = logging.getLogger(__name__)
LEFT_HAND_INVERSIONS = [
"thumb_cmc",
"index_dip",
"middle_mcp_abduction",
"middle_dip",
"pinky_mcp_abduction",
"pinky_dip",
]
RIGHT_HAND_INVERSIONS = [
"thumb_mcp",
"thumb_cmc",
"thumb_pip",
"thumb_dip",
"index_mcp_abduction",
# "index_dip",
"middle_mcp_abduction",
# "middle_dip",
"ring_mcp_abduction",
"ring_mcp_flexion",
# "ring_dip",
"pinky_mcp_abduction",
]
class HomunculusGlove(Teleoperator):
"""
Homunculus Glove designed by NepYope & Hugging Face.
"""
config_class = HomunculusGloveConfig
name = "homunculus_glove"
def __init__(self, config: HomunculusGloveConfig):
super().__init__(config)
self.config = config
self.serial = serial.Serial(config.port, config.baud_rate, timeout=1)
self.serial_lock = threading.Lock()
self.joints = {
"thumb_cmc": MotorNormMode.RANGE_0_100,
"thumb_mcp": MotorNormMode.RANGE_0_100,
"thumb_pip": MotorNormMode.RANGE_0_100,
"thumb_dip": MotorNormMode.RANGE_0_100,
"index_mcp_abduction": MotorNormMode.RANGE_M100_100,
"index_mcp_flexion": MotorNormMode.RANGE_0_100,
"index_dip": MotorNormMode.RANGE_0_100,
"middle_mcp_abduction": MotorNormMode.RANGE_M100_100,
"middle_mcp_flexion": MotorNormMode.RANGE_0_100,
"middle_dip": MotorNormMode.RANGE_0_100,
"ring_mcp_abduction": MotorNormMode.RANGE_M100_100,
"ring_mcp_flexion": MotorNormMode.RANGE_0_100,
"ring_dip": MotorNormMode.RANGE_0_100,
"pinky_mcp_abduction": MotorNormMode.RANGE_M100_100,
"pinky_mcp_flexion": MotorNormMode.RANGE_0_100,
"pinky_dip": MotorNormMode.RANGE_0_100,
}
self.inverted_joints = RIGHT_HAND_INVERSIONS if config.side == "right" else LEFT_HAND_INVERSIONS
n = 10
# EMA parameters ---------------------------------------------------
self.n: int = n
self.alpha: float = 2 / (n + 1)
# one deque *per joint* so we can inspect raw history if needed
self._buffers: dict[str, deque[int]] = {joint: deque(maxlen=n) for joint in self.joints}
# running EMA value per joint – lazily initialised on first read
self._ema: dict[str, float | None] = dict.fromkeys(self._buffers)
self._state: dict[str, float] | None = None
self.new_state_event = threading.Event()
self.stop_event = threading.Event()
self.thread = threading.Thread(target=self._read_loop, daemon=True, name=f"{self} _read_loop")
self.state_lock = threading.Lock()
@property
def action_features(self) -> dict:
return {f"{joint}.pos": float for joint in self.joints}
@property
def feedback_features(self) -> dict:
return {}
@property
def is_connected(self) -> bool:
with self.serial_lock:
return self.serial.is_open and self.thread.is_alive()
@check_if_already_connected
def connect(self, calibrate: bool = True) -> None:
if not self.serial.is_open:
self.serial.open()
self.thread.start()
# wait for the thread to ramp up & 1st state to be ready
if not self.new_state_event.wait(timeout=2):
raise TimeoutError(f"{self}: Timed out waiting for state after 2s.")
if not self.is_calibrated and calibrate:
self.calibrate()
logger.info(f"{self} connected.")
@property
def is_calibrated(self) -> bool:
return self.calibration_fpath.is_file()
def calibrate(self) -> None:
range_mins, range_maxes = {}, {}
for finger in ["thumb", "index", "middle", "ring", "pinky"]:
print(
f"\nMove {finger} through its entire range of motion."
"\nRecording positions. Press ENTER to stop..."
)
finger_joints = [joint for joint in self.joints if joint.startswith(finger)]
finger_mins, finger_maxes = self._record_ranges_of_motion(finger_joints)
range_mins.update(finger_mins)
range_maxes.update(finger_maxes)
self.calibration = {}
for id_, joint in enumerate(self.joints):
self.calibration[joint] = MotorCalibration(
id=id_,
drive_mode=1 if joint in self.inverted_joints else 0,
homing_offset=0,
range_min=range_mins[joint],
range_max=range_maxes[joint],
)
self._save_calibration()
print("Calibration saved to", self.calibration_fpath)
# TODO(Steven): This function is copy/paste from the `HomunculusArm` class. Consider moving it to an utility to reduce duplicated code.
def _record_ranges_of_motion(
self, joints: list[str] | None = None, display_values: bool = True
) -> tuple[dict[str, int], dict[str, int]]:
"""Interactively record the min/max encoder values of each joint.
Move the joints while the method streams live positions. Press :kbd:`Enter` to finish.
Args:
joints (list[str] | None, optional): Joints to record. Defaults to every joint (`None`).
display_values (bool, optional): When `True` (default) a live table is printed to the console.
Raises:
TypeError: `joints` is not `None` or a list.
ValueError: any joint's recorded min and max are the same.
Returns:
tuple[dict[str, int], dict[str, int]]: Two dictionaries *mins* and *maxes* with the extreme values
observed for each joint.
"""
if joints is None:
joints = list(self.joints)
elif not isinstance(joints, list):
raise TypeError(joints)
display_len = max(len(key) for key in joints)
start_positions = self._read(joints, normalize=False)
mins = start_positions.copy()
maxes = start_positions.copy()
user_pressed_enter = False
while not user_pressed_enter:
positions = self._read(joints, normalize=False)
mins = {joint: int(min(positions[joint], min_)) for joint, min_ in mins.items()}
maxes = {joint: int(max(positions[joint], max_)) for joint, max_ in maxes.items()}
if display_values:
print("\n-------------------------------------------")
print(f"{'NAME':<{display_len}} | {'MIN':>6} | {'POS':>6} | {'MAX':>6}")
for joint in joints:
print(
f"{joint:<{display_len}} | {mins[joint]:>6} | {positions[joint]:>6} | {maxes[joint]:>6}"
)
if enter_pressed():
user_pressed_enter = True
if display_values and not user_pressed_enter:
# Move cursor up to overwrite the previous output
move_cursor_up(len(joints) + 3)
same_min_max = [joint for joint in joints if mins[joint] == maxes[joint]]
if same_min_max:
raise ValueError(f"Some joints have the same min and max values:\n{pformat(same_min_max)}")
return mins, maxes
def configure(self) -> None:
pass
# TODO(Steven): This function is copy/paste from the `HomunculusArm` class. Consider moving it to an utility to reduce duplicated code.
def _normalize(self, values: dict[str, int]) -> dict[str, float]:
if not self.calibration:
raise RuntimeError(f"{self} has no calibration registered.")
normalized_values = {}
for joint, val in values.items():
min_ = self.calibration[joint].range_min
max_ = self.calibration[joint].range_max
drive_mode = self.calibration[joint].drive_mode
bounded_val = min(max_, max(min_, val))
if self.joints[joint] is MotorNormMode.RANGE_M100_100:
norm = (((bounded_val - min_) / (max_ - min_)) * 200) - 100
normalized_values[joint] = -norm if drive_mode else norm
elif self.joints[joint] is MotorNormMode.RANGE_0_100:
norm = ((bounded_val - min_) / (max_ - min_)) * 100
normalized_values[joint] = 100 - norm if drive_mode else norm
return normalized_values
def _apply_ema(self, raw: dict[str, int]) -> dict[str, int]:
"""Update buffers & running EMA values; return smoothed dict as integers."""
smoothed: dict[str, int] = {}
for joint, value in raw.items():
# maintain raw history
self._buffers[joint].append(value)
# initialise on first run
if self._ema[joint] is None:
self._ema[joint] = float(value)
else:
self._ema[joint] = self.alpha * value + (1 - self.alpha) * self._ema[joint]
# Convert back to int for compatibility with normalization
smoothed[joint] = int(round(self._ema[joint]))
return smoothed
def _read(
self, joints: list[str] | None = None, normalize: bool = True, timeout: float = 1
) -> dict[str, int | float]:
"""
Return the most recent (single) values from self.last_d,
optionally applying calibration.
"""
if not self.new_state_event.wait(timeout=timeout):
raise TimeoutError(f"{self}: Timed out waiting for state after {timeout}s.")
with self.state_lock:
state = self._state
self.new_state_event.clear()
if state is None:
raise RuntimeError(f"{self} Internal error: Event set but no state available.")
if joints is not None:
state = {k: v for k, v in state.items() if k in joints}
# Apply EMA smoothing to raw values first
state = self._apply_ema(state)
# Then normalize if requested
if normalize:
state = self._normalize(state)
return state
def _read_loop(self):
"""
Continuously read from the serial buffer in its own thread and sends values to the main thread through
a queue.
"""
while not self.stop_event.is_set():
try:
positions = None
with self.serial_lock:
if self.serial.in_waiting > 0:
lines = []
while self.serial.in_waiting > 0:
line = self.serial.read_until().decode("utf-8").strip()
if line:
lines.append(line.split(" "))
if lines:
positions = lines[-1]
if positions is None or len(positions) != len(self.joints):
continue
joint_positions = {joint: int(pos) for joint, pos in zip(self.joints, positions, strict=True)}
with self.state_lock:
self._state = joint_positions
self.new_state_event.set()
except Exception as e:
logger.debug(f"Error reading frame in background thread for {self}: {e}")
@check_if_not_connected
def get_action(self) -> dict[str, float]:
joint_positions = self._read()
return homunculus_glove_to_hope_jr_hand(
{f"{joint}.pos": pos for joint, pos in joint_positions.items()}
)
def send_feedback(self, feedback: dict[str, float]) -> None:
raise NotImplementedError
@check_if_not_connected
def disconnect(self) -> None:
self.stop_event.set()
self.thread.join(timeout=1)
self.serial.close()
logger.info(f"{self} disconnected.")
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/teleoperators/homunculus/homunculus_glove.py",
"license": "Apache License 2.0",
"lines": 277,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/teleoperators/homunculus/joints_translation.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INDEX_SPLAY = 0.3
MIDDLE_SPLAY = 0.3
RING_SPLAY = 0.3
PINKY_SPLAY = 0.5
def get_ulnar_flexion(flexion: float, abduction: float, splay: float):
return -abduction * splay + flexion * (1 - splay)
def get_radial_flexion(flexion: float, abduction: float, splay: float):
return abduction * splay + flexion * (1 - splay)
def homunculus_glove_to_hope_jr_hand(glove_action: dict[str, float]) -> dict[str, float]:
return {
"thumb_cmc.pos": glove_action["thumb_cmc.pos"],
"thumb_mcp.pos": glove_action["thumb_mcp.pos"],
"thumb_pip.pos": glove_action["thumb_pip.pos"],
"thumb_dip.pos": glove_action["thumb_dip.pos"],
"index_radial_flexor.pos": get_radial_flexion(
glove_action["index_mcp_flexion.pos"], glove_action["index_mcp_abduction.pos"], INDEX_SPLAY
),
"index_ulnar_flexor.pos": get_ulnar_flexion(
glove_action["index_mcp_flexion.pos"], glove_action["index_mcp_abduction.pos"], INDEX_SPLAY
),
"index_pip_dip.pos": glove_action["index_dip.pos"],
"middle_radial_flexor.pos": get_radial_flexion(
glove_action["middle_mcp_flexion.pos"], glove_action["middle_mcp_abduction.pos"], MIDDLE_SPLAY
),
"middle_ulnar_flexor.pos": get_ulnar_flexion(
glove_action["middle_mcp_flexion.pos"], glove_action["middle_mcp_abduction.pos"], MIDDLE_SPLAY
),
"middle_pip_dip.pos": glove_action["middle_dip.pos"],
"ring_radial_flexor.pos": get_radial_flexion(
glove_action["ring_mcp_flexion.pos"], glove_action["ring_mcp_abduction.pos"], RING_SPLAY
),
"ring_ulnar_flexor.pos": get_ulnar_flexion(
glove_action["ring_mcp_flexion.pos"], glove_action["ring_mcp_abduction.pos"], RING_SPLAY
),
"ring_pip_dip.pos": glove_action["ring_dip.pos"],
"pinky_radial_flexor.pos": get_radial_flexion(
glove_action["pinky_mcp_flexion.pos"], glove_action["pinky_mcp_abduction.pos"], PINKY_SPLAY
),
"pinky_ulnar_flexor.pos": get_ulnar_flexion(
glove_action["pinky_mcp_flexion.pos"], glove_action["pinky_mcp_abduction.pos"], PINKY_SPLAY
),
"pinky_pip_dip.pos": glove_action["pinky_dip.pos"],
}
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/teleoperators/homunculus/joints_translation.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:tests/policies/hilserl/test_modeling_classifier.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
from lerobot.policies.sac.reward_model.modeling_classifier import ClassifierOutput
from lerobot.utils.constants import OBS_IMAGE, REWARD
from tests.utils import require_package
def test_classifier_output():
output = ClassifierOutput(
logits=torch.tensor([1, 2, 3]),
probabilities=torch.tensor([0.1, 0.2, 0.3]),
hidden_states=None,
)
assert (
f"{output}"
== "ClassifierOutput(logits=tensor([1, 2, 3]), probabilities=tensor([0.1000, 0.2000, 0.3000]), hidden_states=None)"
)
@require_package("transformers")
@pytest.mark.skip(
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
)
def test_binary_classifier_with_default_params():
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
config = RewardClassifierConfig()
config.input_features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
}
config.output_features = {
REWARD: PolicyFeature(type=FeatureType.REWARD, shape=(1,)),
}
config.normalization_mapping = {
"VISUAL": NormalizationMode.IDENTITY,
"REWARD": NormalizationMode.IDENTITY,
}
config.num_cameras = 1
classifier = Classifier(config)
batch_size = 10
input = {
OBS_IMAGE: torch.rand((batch_size, 3, 128, 128)),
REWARD: torch.randint(low=0, high=2, size=(batch_size,)).float(),
}
images, labels = classifier.extract_images_and_labels(input)
assert len(images) == 1
assert images[0].shape == torch.Size([batch_size, 3, 128, 128])
assert labels.shape == torch.Size([batch_size])
output = classifier.predict(images)
assert output is not None
assert output.logits.size() == torch.Size([batch_size])
assert not torch.isnan(output.logits).any(), "Tensor contains NaN values"
assert output.probabilities.shape == torch.Size([batch_size])
assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values"
assert output.hidden_states.shape == torch.Size([batch_size, 256])
assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values"
@require_package("transformers")
@pytest.mark.skip(
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
)
def test_multiclass_classifier():
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
num_classes = 5
config = RewardClassifierConfig()
config.input_features = {
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
}
config.output_features = {
REWARD: PolicyFeature(type=FeatureType.REWARD, shape=(num_classes,)),
}
config.num_cameras = 1
config.num_classes = num_classes
classifier = Classifier(config)
batch_size = 10
input = {
OBS_IMAGE: torch.rand((batch_size, 3, 128, 128)),
REWARD: torch.rand((batch_size, num_classes)),
}
images, labels = classifier.extract_images_and_labels(input)
assert len(images) == 1
assert images[0].shape == torch.Size([batch_size, 3, 128, 128])
assert labels.shape == torch.Size([batch_size, num_classes])
output = classifier.predict(images)
assert output is not None
assert output.logits.shape == torch.Size([batch_size, num_classes])
assert not torch.isnan(output.logits).any(), "Tensor contains NaN values"
assert output.probabilities.shape == torch.Size([batch_size, num_classes])
assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values"
assert output.hidden_states.shape == torch.Size([batch_size, 256])
assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values"
@require_package("transformers")
@pytest.mark.skip(
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
)
def test_default_device():
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
config = RewardClassifierConfig()
assert config.device == "cpu"
classifier = Classifier(config)
for p in classifier.parameters():
assert p.device == torch.device("cpu")
@require_package("transformers")
@pytest.mark.skip(
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
)
def test_explicit_device_setup():
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
config = RewardClassifierConfig(device="cpu")
assert config.device == "cpu"
classifier = Classifier(config)
for p in classifier.parameters():
assert p.device == torch.device("cpu")
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/policies/hilserl/test_modeling_classifier.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/policies/test_sac_config.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.policies.sac.configuration_sac import (
ActorLearnerConfig,
ActorNetworkConfig,
ConcurrencyConfig,
CriticNetworkConfig,
PolicyConfig,
SACConfig,
)
from lerobot.utils.constants import ACTION, OBS_IMAGE, OBS_STATE
def test_sac_config_default_initialization():
config = SACConfig()
assert config.normalization_mapping == {
"VISUAL": NormalizationMode.MEAN_STD,
"STATE": NormalizationMode.MIN_MAX,
"ENV": NormalizationMode.MIN_MAX,
"ACTION": NormalizationMode.MIN_MAX,
}
assert config.dataset_stats == {
OBS_IMAGE: {
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
OBS_STATE: {
"min": [0.0, 0.0],
"max": [1.0, 1.0],
},
ACTION: {
"min": [0.0, 0.0, 0.0],
"max": [1.0, 1.0, 1.0],
},
}
# Basic parameters
assert config.device == "cpu"
assert config.storage_device == "cpu"
assert config.discount == 0.99
assert config.temperature_init == 1.0
assert config.num_critics == 2
# Architecture specifics
assert config.vision_encoder_name is None
assert config.freeze_vision_encoder is True
assert config.image_encoder_hidden_dim == 32
assert config.shared_encoder is True
assert config.num_discrete_actions is None
assert config.image_embedding_pooling_dim == 8
# Training parameters
assert config.online_steps == 1000000
assert config.online_buffer_capacity == 100000
assert config.offline_buffer_capacity == 100000
assert config.async_prefetch is False
assert config.online_step_before_learning == 100
assert config.policy_update_freq == 1
# SAC algorithm parameters
assert config.num_subsample_critics is None
assert config.critic_lr == 3e-4
assert config.actor_lr == 3e-4
assert config.temperature_lr == 3e-4
assert config.critic_target_update_weight == 0.005
assert config.utd_ratio == 1
assert config.state_encoder_hidden_dim == 256
assert config.latent_dim == 256
assert config.target_entropy is None
assert config.use_backup_entropy is True
assert config.grad_clip_norm == 40.0
# Dataset stats defaults
expected_dataset_stats = {
OBS_IMAGE: {
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
OBS_STATE: {
"min": [0.0, 0.0],
"max": [1.0, 1.0],
},
ACTION: {
"min": [0.0, 0.0, 0.0],
"max": [1.0, 1.0, 1.0],
},
}
assert config.dataset_stats == expected_dataset_stats
# Critic network configuration
assert config.critic_network_kwargs.hidden_dims == [256, 256]
assert config.critic_network_kwargs.activate_final is True
assert config.critic_network_kwargs.final_activation is None
# Actor network configuration
assert config.actor_network_kwargs.hidden_dims == [256, 256]
assert config.actor_network_kwargs.activate_final is True
# Policy configuration
assert config.policy_kwargs.use_tanh_squash is True
assert config.policy_kwargs.std_min == 1e-5
assert config.policy_kwargs.std_max == 10.0
assert config.policy_kwargs.init_final == 0.05
# Discrete critic network configuration
assert config.discrete_critic_network_kwargs.hidden_dims == [256, 256]
assert config.discrete_critic_network_kwargs.activate_final is True
assert config.discrete_critic_network_kwargs.final_activation is None
# Actor learner configuration
assert config.actor_learner_config.learner_host == "127.0.0.1"
assert config.actor_learner_config.learner_port == 50051
assert config.actor_learner_config.policy_parameters_push_frequency == 4
# Concurrency configuration
assert config.concurrency.actor == "threads"
assert config.concurrency.learner == "threads"
assert isinstance(config.actor_network_kwargs, ActorNetworkConfig)
assert isinstance(config.critic_network_kwargs, CriticNetworkConfig)
assert isinstance(config.policy_kwargs, PolicyConfig)
assert isinstance(config.actor_learner_config, ActorLearnerConfig)
assert isinstance(config.concurrency, ConcurrencyConfig)
def test_critic_network_kwargs():
config = CriticNetworkConfig()
assert config.hidden_dims == [256, 256]
assert config.activate_final is True
assert config.final_activation is None
def test_actor_network_kwargs():
config = ActorNetworkConfig()
assert config.hidden_dims == [256, 256]
assert config.activate_final is True
def test_policy_kwargs():
config = PolicyConfig()
assert config.use_tanh_squash is True
assert config.std_min == 1e-5
assert config.std_max == 10.0
assert config.init_final == 0.05
def test_actor_learner_config():
config = ActorLearnerConfig()
assert config.learner_host == "127.0.0.1"
assert config.learner_port == 50051
assert config.policy_parameters_push_frequency == 4
def test_concurrency_config():
config = ConcurrencyConfig()
assert config.actor == "threads"
assert config.learner == "threads"
def test_sac_config_custom_initialization():
config = SACConfig(
device="cpu",
discount=0.95,
temperature_init=0.5,
num_critics=3,
)
assert config.device == "cpu"
assert config.discount == 0.95
assert config.temperature_init == 0.5
assert config.num_critics == 3
def test_validate_features():
config = SACConfig(
input_features={OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(10,))},
output_features={ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(3,))},
)
config.validate_features()
def test_validate_features_missing_observation():
config = SACConfig(
input_features={"wrong_key": PolicyFeature(type=FeatureType.STATE, shape=(10,))},
output_features={ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(3,))},
)
with pytest.raises(
ValueError, match="You must provide either 'observation.state' or an image observation"
):
config.validate_features()
def test_validate_features_missing_action():
config = SACConfig(
input_features={OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(10,))},
output_features={"wrong_key": PolicyFeature(type=FeatureType.ACTION, shape=(3,))},
)
with pytest.raises(ValueError, match="You must provide 'action' in the output features"):
config.validate_features()
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/policies/test_sac_config.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/policies/test_sac_policy.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pytest
import torch
from torch import Tensor, nn
from lerobot.configs.types import FeatureType, PolicyFeature
from lerobot.policies.sac.configuration_sac import SACConfig
from lerobot.policies.sac.modeling_sac import MLP, SACPolicy
from lerobot.utils.constants import ACTION, OBS_IMAGE, OBS_STATE
from lerobot.utils.random_utils import seeded_context, set_seed
try:
import transformers # noqa: F401
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
@pytest.fixture(autouse=True)
def set_random_seed():
seed = 42
set_seed(seed)
def test_mlp_with_default_args():
mlp = MLP(input_dim=10, hidden_dims=[256, 256])
x = torch.randn(10)
y = mlp(x)
assert y.shape == (256,)
def test_mlp_with_batch_dim():
mlp = MLP(input_dim=10, hidden_dims=[256, 256])
x = torch.randn(2, 10)
y = mlp(x)
assert y.shape == (2, 256)
def test_forward_with_empty_hidden_dims():
mlp = MLP(input_dim=10, hidden_dims=[])
x = torch.randn(1, 10)
assert mlp(x).shape == (1, 10)
def test_mlp_with_dropout():
mlp = MLP(input_dim=10, hidden_dims=[256, 256, 11], dropout_rate=0.1)
x = torch.randn(1, 10)
y = mlp(x)
assert y.shape == (1, 11)
drop_out_layers_count = sum(isinstance(layer, nn.Dropout) for layer in mlp.net)
assert drop_out_layers_count == 2
def test_mlp_with_custom_final_activation():
mlp = MLP(input_dim=10, hidden_dims=[256, 256], final_activation=torch.nn.Tanh())
x = torch.randn(1, 10)
y = mlp(x)
assert y.shape == (1, 256)
assert (y >= -1).all() and (y <= 1).all()
def test_sac_policy_with_default_args():
with pytest.raises(ValueError, match="should be an instance of class `PreTrainedConfig`"):
SACPolicy()
def create_dummy_state(batch_size: int, state_dim: int = 10) -> Tensor:
return {
OBS_STATE: torch.randn(batch_size, state_dim),
}
def create_dummy_with_visual_input(batch_size: int, state_dim: int = 10) -> Tensor:
return {
OBS_IMAGE: torch.randn(batch_size, 3, 84, 84),
OBS_STATE: torch.randn(batch_size, state_dim),
}
def create_dummy_action(batch_size: int, action_dim: int = 10) -> Tensor:
return torch.randn(batch_size, action_dim)
def create_default_train_batch(
batch_size: int = 8, state_dim: int = 10, action_dim: int = 10
) -> dict[str, Tensor]:
return {
ACTION: create_dummy_action(batch_size, action_dim),
"reward": torch.randn(batch_size),
"state": create_dummy_state(batch_size, state_dim),
"next_state": create_dummy_state(batch_size, state_dim),
"done": torch.randn(batch_size),
}
def create_train_batch_with_visual_input(
batch_size: int = 8, state_dim: int = 10, action_dim: int = 10
) -> dict[str, Tensor]:
return {
ACTION: create_dummy_action(batch_size, action_dim),
"reward": torch.randn(batch_size),
"state": create_dummy_with_visual_input(batch_size, state_dim),
"next_state": create_dummy_with_visual_input(batch_size, state_dim),
"done": torch.randn(batch_size),
}
def create_observation_batch(batch_size: int = 8, state_dim: int = 10) -> dict[str, Tensor]:
return {
OBS_STATE: torch.randn(batch_size, state_dim),
}
def create_observation_batch_with_visual_input(batch_size: int = 8, state_dim: int = 10) -> dict[str, Tensor]:
return {
OBS_STATE: torch.randn(batch_size, state_dim),
OBS_IMAGE: torch.randn(batch_size, 3, 84, 84),
}
def make_optimizers(policy: SACPolicy, has_discrete_action: bool = False) -> dict[str, torch.optim.Optimizer]:
"""Create optimizers for the SAC policy."""
optimizer_actor = torch.optim.Adam(
# Handle the case of shared encoder where the encoder weights are not optimized with the actor gradient
params=[
p
for n, p in policy.actor.named_parameters()
if not policy.config.shared_encoder or not n.startswith("encoder")
],
lr=policy.config.actor_lr,
)
optimizer_critic = torch.optim.Adam(
params=policy.critic_ensemble.parameters(),
lr=policy.config.critic_lr,
)
optimizer_temperature = torch.optim.Adam(
params=[policy.log_alpha],
lr=policy.config.critic_lr,
)
optimizers = {
"actor": optimizer_actor,
"critic": optimizer_critic,
"temperature": optimizer_temperature,
}
if has_discrete_action:
optimizers["discrete_critic"] = torch.optim.Adam(
params=policy.discrete_critic.parameters(),
lr=policy.config.critic_lr,
)
return optimizers
def create_default_config(
state_dim: int, continuous_action_dim: int, has_discrete_action: bool = False
) -> SACConfig:
action_dim = continuous_action_dim
if has_discrete_action:
action_dim += 1
config = SACConfig(
input_features={OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(state_dim,))},
output_features={ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(continuous_action_dim,))},
dataset_stats={
OBS_STATE: {
"min": [0.0] * state_dim,
"max": [1.0] * state_dim,
},
ACTION: {
"min": [0.0] * continuous_action_dim,
"max": [1.0] * continuous_action_dim,
},
},
)
config.validate_features()
return config
def create_config_with_visual_input(
state_dim: int, continuous_action_dim: int, has_discrete_action: bool = False
) -> SACConfig:
config = create_default_config(
state_dim=state_dim,
continuous_action_dim=continuous_action_dim,
has_discrete_action=has_discrete_action,
)
config.input_features[OBS_IMAGE] = PolicyFeature(type=FeatureType.VISUAL, shape=(3, 84, 84))
config.dataset_stats[OBS_IMAGE] = {
"mean": torch.randn(3, 1, 1),
"std": torch.randn(3, 1, 1),
}
# Let make tests a little bit faster
config.state_encoder_hidden_dim = 32
config.latent_dim = 32
config.validate_features()
return config
@pytest.mark.parametrize("batch_size,state_dim,action_dim", [(2, 6, 6), (1, 10, 10)])
def test_sac_policy_with_default_config(batch_size: int, state_dim: int, action_dim: int):
batch = create_default_train_batch(batch_size=batch_size, action_dim=action_dim, state_dim=state_dim)
config = create_default_config(state_dim=state_dim, continuous_action_dim=action_dim)
policy = SACPolicy(config=config)
policy.train()
optimizers = make_optimizers(policy)
cirtic_loss = policy.forward(batch, model="critic")["loss_critic"]
assert cirtic_loss.item() is not None
assert cirtic_loss.shape == ()
cirtic_loss.backward()
optimizers["critic"].step()
actor_loss = policy.forward(batch, model="actor")["loss_actor"]
assert actor_loss.item() is not None
assert actor_loss.shape == ()
actor_loss.backward()
optimizers["actor"].step()
temperature_loss = policy.forward(batch, model="temperature")["loss_temperature"]
assert temperature_loss.item() is not None
assert temperature_loss.shape == ()
temperature_loss.backward()
optimizers["temperature"].step()
policy.eval()
with torch.no_grad():
observation_batch = create_observation_batch(batch_size=batch_size, state_dim=state_dim)
selected_action = policy.select_action(observation_batch)
assert selected_action.shape == (batch_size, action_dim)
@pytest.mark.parametrize("batch_size,state_dim,action_dim", [(2, 6, 6), (1, 10, 10)])
def test_sac_policy_with_visual_input(batch_size: int, state_dim: int, action_dim: int):
config = create_config_with_visual_input(state_dim=state_dim, continuous_action_dim=action_dim)
policy = SACPolicy(config=config)
batch = create_train_batch_with_visual_input(
batch_size=batch_size, state_dim=state_dim, action_dim=action_dim
)
policy.train()
optimizers = make_optimizers(policy)
cirtic_loss = policy.forward(batch, model="critic")["loss_critic"]
assert cirtic_loss.item() is not None
assert cirtic_loss.shape == ()
cirtic_loss.backward()
optimizers["critic"].step()
actor_loss = policy.forward(batch, model="actor")["loss_actor"]
assert actor_loss.item() is not None
assert actor_loss.shape == ()
actor_loss.backward()
optimizers["actor"].step()
temperature_loss = policy.forward(batch, model="temperature")["loss_temperature"]
assert temperature_loss.item() is not None
assert temperature_loss.shape == ()
temperature_loss.backward()
optimizers["temperature"].step()
policy.eval()
with torch.no_grad():
observation_batch = create_observation_batch_with_visual_input(
batch_size=batch_size, state_dim=state_dim
)
selected_action = policy.select_action(observation_batch)
assert selected_action.shape == (batch_size, action_dim)
# Let's check best candidates for pretrained encoders
@pytest.mark.parametrize(
"batch_size,state_dim,action_dim,vision_encoder_name",
[(1, 6, 6, "helper2424/resnet10"), (1, 6, 6, "facebook/convnext-base-224")],
)
@pytest.mark.skipif(not TRANSFORMERS_AVAILABLE, reason="Transformers are not installed")
@pytest.mark.skip(
reason="helper2424/resnet10 needs to be updated to work with the latest version of transformers"
)
def test_sac_policy_with_pretrained_encoder(
batch_size: int, state_dim: int, action_dim: int, vision_encoder_name: str
):
config = create_config_with_visual_input(state_dim=state_dim, continuous_action_dim=action_dim)
config.vision_encoder_name = vision_encoder_name
policy = SACPolicy(config=config)
policy.train()
batch = create_train_batch_with_visual_input(
batch_size=batch_size, state_dim=state_dim, action_dim=action_dim
)
optimizers = make_optimizers(policy)
cirtic_loss = policy.forward(batch, model="critic")["loss_critic"]
assert cirtic_loss.item() is not None
assert cirtic_loss.shape == ()
cirtic_loss.backward()
optimizers["critic"].step()
actor_loss = policy.forward(batch, model="actor")["loss_actor"]
assert actor_loss.item() is not None
assert actor_loss.shape == ()
def test_sac_policy_with_shared_encoder():
batch_size = 2
action_dim = 10
state_dim = 10
config = create_config_with_visual_input(state_dim=state_dim, continuous_action_dim=action_dim)
config.shared_encoder = True
policy = SACPolicy(config=config)
policy.train()
batch = create_train_batch_with_visual_input(
batch_size=batch_size, state_dim=state_dim, action_dim=action_dim
)
policy.train()
optimizers = make_optimizers(policy)
cirtic_loss = policy.forward(batch, model="critic")["loss_critic"]
assert cirtic_loss.item() is not None
assert cirtic_loss.shape == ()
cirtic_loss.backward()
optimizers["critic"].step()
actor_loss = policy.forward(batch, model="actor")["loss_actor"]
assert actor_loss.item() is not None
assert actor_loss.shape == ()
actor_loss.backward()
optimizers["actor"].step()
def test_sac_policy_with_discrete_critic():
batch_size = 2
continuous_action_dim = 9
full_action_dim = continuous_action_dim + 1 # the last action is discrete
state_dim = 10
config = create_config_with_visual_input(
state_dim=state_dim, continuous_action_dim=continuous_action_dim, has_discrete_action=True
)
num_discrete_actions = 5
config.num_discrete_actions = num_discrete_actions
policy = SACPolicy(config=config)
policy.train()
batch = create_train_batch_with_visual_input(
batch_size=batch_size, state_dim=state_dim, action_dim=full_action_dim
)
policy.train()
optimizers = make_optimizers(policy, has_discrete_action=True)
cirtic_loss = policy.forward(batch, model="critic")["loss_critic"]
assert cirtic_loss.item() is not None
assert cirtic_loss.shape == ()
cirtic_loss.backward()
optimizers["critic"].step()
discrete_critic_loss = policy.forward(batch, model="discrete_critic")["loss_discrete_critic"]
assert discrete_critic_loss.item() is not None
assert discrete_critic_loss.shape == ()
discrete_critic_loss.backward()
optimizers["discrete_critic"].step()
actor_loss = policy.forward(batch, model="actor")["loss_actor"]
assert actor_loss.item() is not None
assert actor_loss.shape == ()
actor_loss.backward()
optimizers["actor"].step()
policy.eval()
with torch.no_grad():
observation_batch = create_observation_batch_with_visual_input(
batch_size=batch_size, state_dim=state_dim
)
selected_action = policy.select_action(observation_batch)
assert selected_action.shape == (batch_size, full_action_dim)
discrete_actions = selected_action[:, -1].long()
discrete_action_values = set(discrete_actions.tolist())
assert all(action in range(num_discrete_actions) for action in discrete_action_values), (
f"Discrete action {discrete_action_values} is not in range({num_discrete_actions})"
)
def test_sac_policy_with_default_entropy():
config = create_default_config(continuous_action_dim=10, state_dim=10)
policy = SACPolicy(config=config)
assert policy.target_entropy == -5.0
def test_sac_policy_default_target_entropy_with_discrete_action():
config = create_config_with_visual_input(state_dim=10, continuous_action_dim=6, has_discrete_action=True)
policy = SACPolicy(config=config)
assert policy.target_entropy == -3.0
def test_sac_policy_with_predefined_entropy():
config = create_default_config(state_dim=10, continuous_action_dim=6)
config.target_entropy = -3.5
policy = SACPolicy(config=config)
assert policy.target_entropy == pytest.approx(-3.5)
def test_sac_policy_update_temperature():
"""Test that temperature property is always in sync with log_alpha."""
config = create_default_config(continuous_action_dim=10, state_dim=10)
policy = SACPolicy(config=config)
assert policy.temperature == pytest.approx(1.0)
policy.log_alpha.data = torch.tensor([math.log(0.1)])
# Temperature property automatically reflects log_alpha changes
assert policy.temperature == pytest.approx(0.1)
def test_sac_policy_update_target_network():
config = create_default_config(state_dim=10, continuous_action_dim=6)
config.critic_target_update_weight = 1.0
policy = SACPolicy(config=config)
policy.train()
for p in policy.critic_ensemble.parameters():
p.data = torch.ones_like(p.data)
policy.update_target_networks()
for p in policy.critic_target.parameters():
assert torch.allclose(p.data, torch.ones_like(p.data)), (
f"Target network {p.data} is not equal to {torch.ones_like(p.data)}"
)
@pytest.mark.parametrize("num_critics", [1, 3])
def test_sac_policy_with_critics_number_of_heads(num_critics: int):
batch_size = 2
action_dim = 10
state_dim = 10
config = create_config_with_visual_input(state_dim=state_dim, continuous_action_dim=action_dim)
config.num_critics = num_critics
policy = SACPolicy(config=config)
policy.train()
assert len(policy.critic_ensemble.critics) == num_critics
batch = create_train_batch_with_visual_input(
batch_size=batch_size, state_dim=state_dim, action_dim=action_dim
)
policy.train()
optimizers = make_optimizers(policy)
cirtic_loss = policy.forward(batch, model="critic")["loss_critic"]
assert cirtic_loss.item() is not None
assert cirtic_loss.shape == ()
cirtic_loss.backward()
optimizers["critic"].step()
def test_sac_policy_save_and_load(tmp_path):
root = tmp_path / "test_sac_save_and_load"
state_dim = 10
action_dim = 10
batch_size = 2
config = create_default_config(state_dim=state_dim, continuous_action_dim=action_dim)
policy = SACPolicy(config=config)
policy.eval()
policy.save_pretrained(root)
loaded_policy = SACPolicy.from_pretrained(root, config=config)
loaded_policy.eval()
batch = create_default_train_batch(batch_size=1, state_dim=10, action_dim=10)
with torch.no_grad():
with seeded_context(12):
# Collect policy values before saving
cirtic_loss = policy.forward(batch, model="critic")["loss_critic"]
actor_loss = policy.forward(batch, model="actor")["loss_actor"]
temperature_loss = policy.forward(batch, model="temperature")["loss_temperature"]
observation_batch = create_observation_batch(batch_size=batch_size, state_dim=state_dim)
actions = policy.select_action(observation_batch)
with seeded_context(12):
# Collect policy values after loading
loaded_cirtic_loss = loaded_policy.forward(batch, model="critic")["loss_critic"]
loaded_actor_loss = loaded_policy.forward(batch, model="actor")["loss_actor"]
loaded_temperature_loss = loaded_policy.forward(batch, model="temperature")["loss_temperature"]
loaded_observation_batch = create_observation_batch(batch_size=batch_size, state_dim=state_dim)
loaded_actions = loaded_policy.select_action(loaded_observation_batch)
assert policy.state_dict().keys() == loaded_policy.state_dict().keys()
for k in policy.state_dict():
assert torch.allclose(policy.state_dict()[k], loaded_policy.state_dict()[k], atol=1e-6)
# Compare values before and after saving and loading
# They should be the same
assert torch.allclose(cirtic_loss, loaded_cirtic_loss)
assert torch.allclose(actor_loss, loaded_actor_loss)
assert torch.allclose(temperature_loss, loaded_temperature_loss)
assert torch.allclose(actions, loaded_actions)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/policies/test_sac_policy.py",
"license": "Apache License 2.0",
"lines": 414,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/rl/test_actor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
from unittest.mock import patch
import pytest
import torch
from torch.multiprocessing import Event, Queue
from lerobot.utils.constants import OBS_STR
from lerobot.utils.transition import Transition
from tests.utils import require_package
def create_learner_service_stub():
import grpc
from lerobot.transport import services_pb2, services_pb2_grpc
class MockLearnerService(services_pb2_grpc.LearnerServiceServicer):
def __init__(self):
self.ready_call_count = 0
self.should_fail = False
def Ready(self, request, context): # noqa: N802
self.ready_call_count += 1
if self.should_fail:
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details("Service unavailable")
raise grpc.RpcError("Service unavailable")
return services_pb2.Empty()
"""Fixture to start a LearnerService gRPC server and provide a connected stub."""
servicer = MockLearnerService()
# Create a gRPC server and add our servicer to it.
server = grpc.server(futures.ThreadPoolExecutor(max_workers=4))
services_pb2_grpc.add_LearnerServiceServicer_to_server(servicer, server)
port = server.add_insecure_port("[::]:0") # bind to a free port chosen by OS
server.start() # start the server (non-blocking call):contentReference[oaicite:1]{index=1}
# Create a client channel and stub connected to the server's port.
channel = grpc.insecure_channel(f"localhost:{port}")
return services_pb2_grpc.LearnerServiceStub(channel), servicer, channel, server
def close_service_stub(channel, server):
channel.close()
server.stop(None)
@require_package("grpcio", "grpc")
def test_establish_learner_connection_success():
from lerobot.rl.actor import establish_learner_connection
"""Test successful connection establishment."""
stub, _servicer, channel, server = create_learner_service_stub()
shutdown_event = Event()
# Test successful connection
result = establish_learner_connection(stub, shutdown_event, attempts=5)
assert result is True
close_service_stub(channel, server)
@require_package("grpcio", "grpc")
def test_establish_learner_connection_failure():
from lerobot.rl.actor import establish_learner_connection
"""Test connection failure."""
stub, servicer, channel, server = create_learner_service_stub()
servicer.should_fail = True
shutdown_event = Event()
# Test failed connection
with patch("time.sleep"): # Speed up the test
result = establish_learner_connection(stub, shutdown_event, attempts=2)
assert result is False
close_service_stub(channel, server)
@require_package("grpcio", "grpc")
def test_push_transitions_to_transport_queue():
from lerobot.rl.actor import push_transitions_to_transport_queue
from lerobot.transport.utils import bytes_to_transitions
from tests.transport.test_transport_utils import assert_transitions_equal
"""Test pushing transitions to transport queue."""
# Create mock transitions
transitions = []
for i in range(3):
transition = Transition(
state={OBS_STR: torch.randn(3, 64, 64), "state": torch.randn(10)},
action=torch.randn(5),
reward=torch.tensor(1.0 + i),
done=torch.tensor(False),
truncated=torch.tensor(False),
next_state={OBS_STR: torch.randn(3, 64, 64), "state": torch.randn(10)},
complementary_info={"step": torch.tensor(i)},
)
transitions.append(transition)
transitions_queue = Queue()
# Test pushing transitions
push_transitions_to_transport_queue(transitions, transitions_queue)
# Verify the data can be retrieved
serialized_data = transitions_queue.get()
assert isinstance(serialized_data, bytes)
deserialized_transitions = bytes_to_transitions(serialized_data)
assert len(deserialized_transitions) == len(transitions)
for i, deserialized_transition in enumerate(deserialized_transitions):
assert_transitions_equal(deserialized_transition, transitions[i])
@require_package("grpcio", "grpc")
@pytest.mark.timeout(3) # force cross-platform watchdog
def test_transitions_stream():
from lerobot.rl.actor import transitions_stream
"""Test transitions stream functionality."""
shutdown_event = Event()
transitions_queue = Queue()
# Add test data to queue
test_data = [b"transition_data_1", b"transition_data_2", b"transition_data_3"]
for data in test_data:
transitions_queue.put(data)
# Collect streamed data
streamed_data = []
stream_generator = transitions_stream(shutdown_event, transitions_queue, 0.1)
# Process a few items
for i, message in enumerate(stream_generator):
streamed_data.append(message)
if i >= len(test_data) - 1:
shutdown_event.set()
break
# Verify we got messages
assert len(streamed_data) == len(test_data)
assert streamed_data[0].data == b"transition_data_1"
assert streamed_data[1].data == b"transition_data_2"
assert streamed_data[2].data == b"transition_data_3"
@require_package("grpcio", "grpc")
@pytest.mark.timeout(3) # force cross-platform watchdog
def test_interactions_stream():
from lerobot.rl.actor import interactions_stream
from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes
"""Test interactions stream functionality."""
shutdown_event = Event()
interactions_queue = Queue()
# Create test interaction data (similar structure to what would be sent)
test_interactions = [
{"episode_reward": 10.5, "step": 1, "policy_fps": 30.2},
{"episode_reward": 15.2, "step": 2, "policy_fps": 28.7},
{"episode_reward": 8.7, "step": 3, "policy_fps": 29.1},
]
# Serialize the interaction data as it would be in practice
test_data = [
interactions_queue.put(python_object_to_bytes(interaction)) for interaction in test_interactions
]
# Collect streamed data
streamed_data = []
stream_generator = interactions_stream(shutdown_event, interactions_queue, 0.1)
# Process the items
for i, message in enumerate(stream_generator):
streamed_data.append(message)
if i >= len(test_data) - 1:
shutdown_event.set()
break
# Verify we got messages
assert len(streamed_data) == len(test_data)
# Verify the messages can be deserialized back to original data
for i, message in enumerate(streamed_data):
deserialized_interaction = bytes_to_python_object(message.data)
assert deserialized_interaction == test_interactions[i]
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/rl/test_actor.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/rl/test_actor_learner.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import threading
import time
import pytest
import torch
from torch.multiprocessing import Event, Queue
from lerobot.configs.train import TrainRLServerPipelineConfig
from lerobot.policies.sac.configuration_sac import SACConfig
from lerobot.utils.constants import OBS_STR
from lerobot.utils.transition import Transition
from tests.utils import require_package
def create_test_transitions(count: int = 3) -> list[Transition]:
"""Create test transitions for integration testing."""
transitions = []
for i in range(count):
transition = Transition(
state={OBS_STR: torch.randn(3, 64, 64), "state": torch.randn(10)},
action=torch.randn(5),
reward=torch.tensor(1.0 + i),
done=torch.tensor(i == count - 1), # Last transition is done
truncated=torch.tensor(False),
next_state={OBS_STR: torch.randn(3, 64, 64), "state": torch.randn(10)},
complementary_info={"step": torch.tensor(i), "episode_id": i // 2},
)
transitions.append(transition)
return transitions
def create_test_interactions(count: int = 3) -> list[dict]:
"""Create test interactions for integration testing."""
interactions = []
for i in range(count):
interaction = {
"episode_reward": 10.0 + i * 5,
"step": i * 100,
"policy_fps": 30.0 + i,
"intervention_rate": 0.1 * i,
"episode_length": 200 + i * 50,
}
interactions.append(interaction)
return interactions
def find_free_port():
"""Finds a free port on the local machine."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0)) # Bind to port 0 to let the OS choose a free port
s.listen(1)
port = s.getsockname()[1]
return port
@pytest.fixture
def cfg():
cfg = TrainRLServerPipelineConfig()
port = find_free_port()
policy_cfg = SACConfig()
policy_cfg.actor_learner_config.learner_host = "127.0.0.1"
policy_cfg.actor_learner_config.learner_port = port
policy_cfg.concurrency.actor = "threads"
policy_cfg.concurrency.learner = "threads"
policy_cfg.actor_learner_config.queue_get_timeout = 0.1
cfg.policy = policy_cfg
return cfg
@require_package("grpcio", "grpc")
@pytest.mark.timeout(10) # force cross-platform watchdog
def test_end_to_end_transitions_flow(cfg):
from lerobot.rl.actor import (
establish_learner_connection,
learner_service_client,
push_transitions_to_transport_queue,
send_transitions,
)
from lerobot.rl.learner import start_learner
from lerobot.transport.utils import bytes_to_transitions
from tests.transport.test_transport_utils import assert_transitions_equal
"""Test complete transitions flow from actor to learner."""
transitions_actor_queue = Queue()
transitions_learner_queue = Queue()
interactions_queue = Queue()
parameters_queue = Queue()
shutdown_event = Event()
learner_thread = threading.Thread(
target=start_learner,
args=(parameters_queue, transitions_learner_queue, interactions_queue, shutdown_event, cfg),
)
learner_thread.start()
policy_cfg = cfg.policy
learner_client, channel = learner_service_client(
host=policy_cfg.actor_learner_config.learner_host, port=policy_cfg.actor_learner_config.learner_port
)
assert establish_learner_connection(learner_client, shutdown_event, attempts=5)
send_transitions_thread = threading.Thread(
target=send_transitions, args=(cfg, transitions_actor_queue, shutdown_event, learner_client, channel)
)
send_transitions_thread.start()
input_transitions = create_test_transitions(count=5)
push_transitions_to_transport_queue(input_transitions, transitions_actor_queue)
# Wait for learner to start
time.sleep(0.1)
shutdown_event.set()
# Wait for learner to receive transitions
learner_thread.join()
send_transitions_thread.join()
channel.close()
received_transitions = []
while not transitions_learner_queue.empty():
received_transitions.extend(bytes_to_transitions(transitions_learner_queue.get()))
assert len(received_transitions) == len(input_transitions)
for i, transition in enumerate(received_transitions):
assert_transitions_equal(transition, input_transitions[i])
@require_package("grpcio", "grpc")
@pytest.mark.timeout(10)
def test_end_to_end_interactions_flow(cfg):
from lerobot.rl.actor import (
establish_learner_connection,
learner_service_client,
send_interactions,
)
from lerobot.rl.learner import start_learner
from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes
"""Test complete interactions flow from actor to learner."""
# Queues for actor-learner communication
interactions_actor_queue = Queue()
interactions_learner_queue = Queue()
# Other queues required by the learner
parameters_queue = Queue()
transitions_learner_queue = Queue()
shutdown_event = Event()
# Start the learner in a separate thread
learner_thread = threading.Thread(
target=start_learner,
args=(parameters_queue, transitions_learner_queue, interactions_learner_queue, shutdown_event, cfg),
)
learner_thread.start()
# Establish connection from actor to learner
policy_cfg = cfg.policy
learner_client, channel = learner_service_client(
host=policy_cfg.actor_learner_config.learner_host, port=policy_cfg.actor_learner_config.learner_port
)
assert establish_learner_connection(learner_client, shutdown_event, attempts=5)
# Start the actor's interaction sending process in a separate thread
send_interactions_thread = threading.Thread(
target=send_interactions,
args=(cfg, interactions_actor_queue, shutdown_event, learner_client, channel),
)
send_interactions_thread.start()
# Create and push test interactions to the actor's queue
input_interactions = create_test_interactions(count=5)
for interaction in input_interactions:
interactions_actor_queue.put(python_object_to_bytes(interaction))
# Wait for the communication to happen
time.sleep(0.1)
# Signal shutdown and wait for threads to complete
shutdown_event.set()
learner_thread.join()
send_interactions_thread.join()
channel.close()
# Verify that the learner received the interactions
received_interactions = []
while not interactions_learner_queue.empty():
received_interactions.append(bytes_to_python_object(interactions_learner_queue.get()))
assert len(received_interactions) == len(input_interactions)
# Sort by a unique key to handle potential reordering in queues
received_interactions.sort(key=lambda x: x["step"])
input_interactions.sort(key=lambda x: x["step"])
for received, expected in zip(received_interactions, input_interactions, strict=False):
assert received == expected
@require_package("grpcio", "grpc")
@pytest.mark.parametrize("data_size", ["small", "large"])
@pytest.mark.timeout(10)
def test_end_to_end_parameters_flow(cfg, data_size):
from lerobot.rl.actor import establish_learner_connection, learner_service_client, receive_policy
from lerobot.rl.learner import start_learner
from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes
"""Test complete parameter flow from learner to actor, with small and large data."""
# Actor's local queue to receive params
parameters_actor_queue = Queue()
# Learner's queue to send params from
parameters_learner_queue = Queue()
# Other queues required by the learner
transitions_learner_queue = Queue()
interactions_learner_queue = Queue()
shutdown_event = Event()
# Start the learner in a separate thread
learner_thread = threading.Thread(
target=start_learner,
args=(
parameters_learner_queue,
transitions_learner_queue,
interactions_learner_queue,
shutdown_event,
cfg,
),
)
learner_thread.start()
# Establish connection from actor to learner
policy_cfg = cfg.policy
learner_client, channel = learner_service_client(
host=policy_cfg.actor_learner_config.learner_host, port=policy_cfg.actor_learner_config.learner_port
)
assert establish_learner_connection(learner_client, shutdown_event, attempts=5)
# Start the actor's parameter receiving process in a separate thread
receive_params_thread = threading.Thread(
target=receive_policy,
args=(cfg, parameters_actor_queue, shutdown_event, learner_client, channel),
)
receive_params_thread.start()
# Create test parameters based on parametrization
if data_size == "small":
input_params = {"layer.weight": torch.randn(128, 64)}
else: # "large"
# CHUNK_SIZE is 2MB, so this tensor (4MB) will force chunking
input_params = {"large_layer.weight": torch.randn(1024, 1024)}
# Simulate learner having new parameters to send
parameters_learner_queue.put(state_to_bytes(input_params))
# Wait for the actor to receive the parameters
time.sleep(0.1)
# Signal shutdown and wait for threads to complete
shutdown_event.set()
learner_thread.join()
receive_params_thread.join()
channel.close()
# Verify that the actor received the parameters correctly
received_params = bytes_to_state_dict(parameters_actor_queue.get())
assert received_params.keys() == input_params.keys()
for key in input_params:
assert torch.allclose(received_params[key], input_params[key])
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/rl/test_actor_learner.py",
"license": "Apache License 2.0",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/rl/test_learner_service.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from concurrent import futures
from multiprocessing import Event, Queue
import pytest
from tests.utils import require_package # our gRPC servicer class
@pytest.fixture(scope="function")
def learner_service_stub():
shutdown_event = Event()
parameters_queue = Queue()
transitions_queue = Queue()
interactions_queue = Queue()
seconds_between_pushes = 1
client, channel, server = create_learner_service_stub(
shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes
)
yield client # provide the stub to the test function
close_learner_service_stub(channel, server)
@require_package("grpcio", "grpc")
def create_learner_service_stub(
shutdown_event: Event,
parameters_queue: Queue,
transitions_queue: Queue,
interactions_queue: Queue,
seconds_between_pushes: int,
queue_get_timeout: float = 0.1,
):
import grpc
from lerobot.rl.learner_service import LearnerService
from lerobot.transport import services_pb2_grpc # generated from .proto
"""Fixture to start a LearnerService gRPC server and provide a connected stub."""
servicer = LearnerService(
shutdown_event=shutdown_event,
parameters_queue=parameters_queue,
seconds_between_pushes=seconds_between_pushes,
transition_queue=transitions_queue,
interaction_message_queue=interactions_queue,
queue_get_timeout=queue_get_timeout,
)
# Create a gRPC server and add our servicer to it.
server = grpc.server(futures.ThreadPoolExecutor(max_workers=4))
services_pb2_grpc.add_LearnerServiceServicer_to_server(servicer, server)
port = server.add_insecure_port("[::]:0") # bind to a free port chosen by OS
server.start() # start the server (non-blocking call):contentReference[oaicite:1]{index=1}
# Create a client channel and stub connected to the server's port.
channel = grpc.insecure_channel(f"localhost:{port}")
return services_pb2_grpc.LearnerServiceStub(channel), channel, server
@require_package("grpcio", "grpc")
def close_learner_service_stub(channel, server):
channel.close()
server.stop(None)
@pytest.mark.timeout(3) # force cross-platform watchdog
def test_ready_method(learner_service_stub):
from lerobot.transport import services_pb2
"""Test the ready method of the UserService."""
request = services_pb2.Empty()
response = learner_service_stub.Ready(request)
assert response == services_pb2.Empty()
@require_package("grpcio", "grpc")
@pytest.mark.timeout(3) # force cross-platform watchdog
def test_send_interactions():
from lerobot.transport import services_pb2
shutdown_event = Event()
parameters_queue = Queue()
transitions_queue = Queue()
interactions_queue = Queue()
seconds_between_pushes = 1
client, channel, server = create_learner_service_stub(
shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes
)
list_of_interaction_messages = [
services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_BEGIN, data=b"1"),
services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE, data=b"2"),
services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"3"),
services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"4"),
services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"5"),
services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_BEGIN, data=b"6"),
services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE, data=b"7"),
services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"8"),
]
def mock_interactions_stream():
yield from list_of_interaction_messages
return services_pb2.Empty()
response = client.SendInteractions(mock_interactions_stream())
assert response == services_pb2.Empty()
close_learner_service_stub(channel, server)
# Extract the data from the interactions queue
interactions = []
while not interactions_queue.empty():
interactions.append(interactions_queue.get())
assert interactions == [b"123", b"4", b"5", b"678"]
@require_package("grpcio", "grpc")
@pytest.mark.timeout(3) # force cross-platform watchdog
def test_send_transitions():
from lerobot.transport import services_pb2
"""Test the SendTransitions method with various transition data."""
shutdown_event = Event()
parameters_queue = Queue()
transitions_queue = Queue()
interactions_queue = Queue()
seconds_between_pushes = 1
client, channel, server = create_learner_service_stub(
shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes
)
# Create test transition messages
list_of_transition_messages = [
services_pb2.Transition(
transfer_state=services_pb2.TransferState.TRANSFER_BEGIN, data=b"transition_1"
),
services_pb2.Transition(
transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE, data=b"transition_2"
),
services_pb2.Transition(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"transition_3"),
services_pb2.Transition(transfer_state=services_pb2.TransferState.TRANSFER_BEGIN, data=b"batch_1"),
services_pb2.Transition(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"batch_2"),
]
def mock_transitions_stream():
yield from list_of_transition_messages
response = client.SendTransitions(mock_transitions_stream())
assert response == services_pb2.Empty()
close_learner_service_stub(channel, server)
# Extract the data from the transitions queue
transitions = []
while not transitions_queue.empty():
transitions.append(transitions_queue.get())
# Should have assembled the chunked data
assert transitions == [b"transition_1transition_2transition_3", b"batch_1batch_2"]
@require_package("grpcio", "grpc")
@pytest.mark.timeout(3) # force cross-platform watchdog
def test_send_transitions_empty_stream():
from lerobot.transport import services_pb2
"""Test SendTransitions with empty stream."""
shutdown_event = Event()
parameters_queue = Queue()
transitions_queue = Queue()
interactions_queue = Queue()
seconds_between_pushes = 1
client, channel, server = create_learner_service_stub(
shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes
)
def empty_stream():
return iter([])
response = client.SendTransitions(empty_stream())
assert response == services_pb2.Empty()
close_learner_service_stub(channel, server)
# Queue should remain empty
assert transitions_queue.empty()
@require_package("grpcio", "grpc")
@pytest.mark.timeout(10) # force cross-platform watchdog
def test_stream_parameters():
import time
from lerobot.transport import services_pb2
"""Test the StreamParameters method."""
shutdown_event = Event()
parameters_queue = Queue()
transitions_queue = Queue()
interactions_queue = Queue()
seconds_between_pushes = 0.2 # Short delay for testing
client, channel, server = create_learner_service_stub(
shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes
)
# Add test parameters to the queue
test_params = [b"param_batch_1", b"param_batch_2"]
for param in test_params:
parameters_queue.put(param)
# Start streaming parameters
request = services_pb2.Empty()
stream = client.StreamParameters(request)
# Collect streamed parameters and timestamps
received_params = []
timestamps = []
for response in stream:
received_params.append(response.data)
timestamps.append(time.time())
# We should receive one last item
break
parameters_queue.put(b"param_batch_3")
for response in stream:
received_params.append(response.data)
timestamps.append(time.time())
# We should receive only one item
break
shutdown_event.set()
close_learner_service_stub(channel, server)
assert received_params == [b"param_batch_2", b"param_batch_3"]
# Check the time difference between the two sends
time_diff = timestamps[1] - timestamps[0]
# Check if the time difference is close to the expected push frequency
assert time_diff == pytest.approx(seconds_between_pushes, abs=0.1)
@require_package("grpcio", "grpc")
@pytest.mark.timeout(3) # force cross-platform watchdog
def test_stream_parameters_with_shutdown():
from lerobot.transport import services_pb2
"""Test StreamParameters handles shutdown gracefully."""
shutdown_event = Event()
parameters_queue = Queue()
transitions_queue = Queue()
interactions_queue = Queue()
seconds_between_pushes = 0.1
queue_get_timeout = 0.001
client, channel, server = create_learner_service_stub(
shutdown_event,
parameters_queue,
transitions_queue,
interactions_queue,
seconds_between_pushes,
queue_get_timeout=queue_get_timeout,
)
test_params = [b"param_batch_1", b"stop", b"param_batch_3", b"param_batch_4"]
# create a thread that will put the parameters in the queue
def producer():
for param in test_params:
parameters_queue.put(param)
time.sleep(0.1)
producer_thread = threading.Thread(target=producer)
producer_thread.start()
# Start streaming
request = services_pb2.Empty()
stream = client.StreamParameters(request)
# Collect streamed parameters
received_params = []
for response in stream:
received_params.append(response.data)
if response.data == b"stop":
shutdown_event.set()
producer_thread.join()
close_learner_service_stub(channel, server)
assert received_params == [b"param_batch_1", b"stop"]
@require_package("grpcio", "grpc")
@pytest.mark.timeout(3) # force cross-platform watchdog
def test_stream_parameters_waits_and_retries_on_empty_queue():
import threading
import time
from lerobot.transport import services_pb2
"""Test that StreamParameters waits and retries when the queue is empty."""
shutdown_event = Event()
parameters_queue = Queue()
transitions_queue = Queue()
interactions_queue = Queue()
seconds_between_pushes = 0.05
queue_get_timeout = 0.01
client, channel, server = create_learner_service_stub(
shutdown_event,
parameters_queue,
transitions_queue,
interactions_queue,
seconds_between_pushes,
queue_get_timeout=queue_get_timeout,
)
request = services_pb2.Empty()
stream = client.StreamParameters(request)
received_params = []
def producer():
# Let the consumer start and find an empty queue.
# It will wait `seconds_between_pushes` (0.05s), then `get` will timeout after `queue_get_timeout` (0.01s).
# Total time for the first empty loop is > 0.06s. We wait a bit longer to be safe.
time.sleep(0.06)
parameters_queue.put(b"param_after_wait")
time.sleep(0.05)
parameters_queue.put(b"param_after_wait_2")
producer_thread = threading.Thread(target=producer)
producer_thread.start()
# The consumer will block here until the producer sends an item.
for response in stream:
received_params.append(response.data)
if response.data == b"param_after_wait_2":
break # We only need one item for this test.
shutdown_event.set()
producer_thread.join()
close_learner_service_stub(channel, server)
assert received_params == [b"param_after_wait", b"param_after_wait_2"]
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/rl/test_learner_service.py",
"license": "Apache License 2.0",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/transport/test_transport_utils.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from multiprocessing import Event, Queue
from pickle import UnpicklingError
import pytest
import torch
from lerobot.utils.constants import ACTION
from lerobot.utils.transition import Transition
from tests.utils import require_cuda, require_package
@require_package("grpcio", "grpc")
def test_bytes_buffer_size_empty_buffer():
from lerobot.transport.utils import bytes_buffer_size
"""Test with an empty buffer."""
buffer = io.BytesIO()
assert bytes_buffer_size(buffer) == 0
# Ensure position is reset to beginning
assert buffer.tell() == 0
@require_package("grpcio", "grpc")
def test_bytes_buffer_size_small_buffer():
from lerobot.transport.utils import bytes_buffer_size
"""Test with a small buffer."""
buffer = io.BytesIO(b"Hello, World!")
assert bytes_buffer_size(buffer) == 13
assert buffer.tell() == 0
@require_package("grpcio", "grpc")
def test_bytes_buffer_size_large_buffer():
from lerobot.transport.utils import CHUNK_SIZE, bytes_buffer_size
"""Test with a large buffer."""
data = b"x" * (CHUNK_SIZE * 2 + 1000)
buffer = io.BytesIO(data)
assert bytes_buffer_size(buffer) == len(data)
assert buffer.tell() == 0
@require_package("grpcio", "grpc")
def test_send_bytes_in_chunks_empty_data():
from lerobot.transport.utils import send_bytes_in_chunks, services_pb2
"""Test sending empty data."""
message_class = services_pb2.InteractionMessage
chunks = list(send_bytes_in_chunks(b"", message_class))
assert len(chunks) == 0
@require_package("grpcio", "grpc")
def test_single_chunk_small_data():
from lerobot.transport.utils import send_bytes_in_chunks, services_pb2
"""Test data that fits in a single chunk."""
data = b"Some data"
message_class = services_pb2.InteractionMessage
chunks = list(send_bytes_in_chunks(data, message_class))
assert len(chunks) == 1
assert chunks[0].data == b"Some data"
assert chunks[0].transfer_state == services_pb2.TransferState.TRANSFER_END
@require_package("grpcio", "grpc")
def test_not_silent_mode():
from lerobot.transport.utils import send_bytes_in_chunks, services_pb2
"""Test not silent mode."""
data = b"Some data"
message_class = services_pb2.InteractionMessage
chunks = list(send_bytes_in_chunks(data, message_class, silent=False))
assert len(chunks) == 1
assert chunks[0].data == b"Some data"
@require_package("grpcio", "grpc")
def test_send_bytes_in_chunks_large_data():
from lerobot.transport.utils import CHUNK_SIZE, send_bytes_in_chunks, services_pb2
"""Test sending large data."""
data = b"x" * (CHUNK_SIZE * 2 + 1000)
message_class = services_pb2.InteractionMessage
chunks = list(send_bytes_in_chunks(data, message_class))
assert len(chunks) == 3
assert chunks[0].data == b"x" * CHUNK_SIZE
assert chunks[0].transfer_state == services_pb2.TransferState.TRANSFER_BEGIN
assert chunks[1].data == b"x" * CHUNK_SIZE
assert chunks[1].transfer_state == services_pb2.TransferState.TRANSFER_MIDDLE
assert chunks[2].data == b"x" * 1000
assert chunks[2].transfer_state == services_pb2.TransferState.TRANSFER_END
@require_package("grpcio", "grpc")
def test_send_bytes_in_chunks_large_data_with_exact_chunk_size():
from lerobot.transport.utils import CHUNK_SIZE, send_bytes_in_chunks, services_pb2
"""Test sending large data with exact chunk size."""
data = b"x" * CHUNK_SIZE
message_class = services_pb2.InteractionMessage
chunks = list(send_bytes_in_chunks(data, message_class))
assert len(chunks) == 1
assert chunks[0].data == data
assert chunks[0].transfer_state == services_pb2.TransferState.TRANSFER_END
@require_package("grpcio", "grpc")
def test_receive_bytes_in_chunks_empty_data():
from lerobot.transport.utils import receive_bytes_in_chunks
"""Test receiving empty data."""
queue = Queue()
shutdown_event = Event()
# Empty iterator
receive_bytes_in_chunks(iter([]), queue, shutdown_event)
assert queue.empty()
@require_package("grpcio", "grpc")
def test_receive_bytes_in_chunks_single_chunk():
from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2
"""Test receiving a single chunk message."""
queue = Queue()
shutdown_event = Event()
data = b"Single chunk data"
chunks = [
services_pb2.InteractionMessage(data=data, transfer_state=services_pb2.TransferState.TRANSFER_END)
]
receive_bytes_in_chunks(iter(chunks), queue, shutdown_event)
assert queue.get(timeout=0.01) == data
assert queue.empty()
@require_package("grpcio", "grpc")
def test_receive_bytes_in_chunks_single_not_end_chunk():
from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2
"""Test receiving a single chunk message."""
queue = Queue()
shutdown_event = Event()
data = b"Single chunk data"
chunks = [
services_pb2.InteractionMessage(data=data, transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE)
]
receive_bytes_in_chunks(iter(chunks), queue, shutdown_event)
assert queue.empty()
@require_package("grpcio", "grpc")
def test_receive_bytes_in_chunks_multiple_chunks():
from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2
"""Test receiving a multi-chunk message."""
queue = Queue()
shutdown_event = Event()
chunks = [
services_pb2.InteractionMessage(
data=b"First ", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN
),
services_pb2.InteractionMessage(
data=b"Middle ", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE
),
services_pb2.InteractionMessage(data=b"Last", transfer_state=services_pb2.TransferState.TRANSFER_END),
]
receive_bytes_in_chunks(iter(chunks), queue, shutdown_event)
assert queue.get(timeout=0.01) == b"First Middle Last"
assert queue.empty()
@require_package("grpcio", "grpc")
def test_receive_bytes_in_chunks_multiple_messages():
from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2
"""Test receiving multiple complete messages in sequence."""
queue = Queue()
shutdown_event = Event()
chunks = [
# First message - single chunk
services_pb2.InteractionMessage(
data=b"Message1", transfer_state=services_pb2.TransferState.TRANSFER_END
),
# Second message - multi chunk
services_pb2.InteractionMessage(
data=b"Start2 ", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN
),
services_pb2.InteractionMessage(
data=b"Middle2 ", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE
),
services_pb2.InteractionMessage(data=b"End2", transfer_state=services_pb2.TransferState.TRANSFER_END),
# Third message - single chunk
services_pb2.InteractionMessage(
data=b"Message3", transfer_state=services_pb2.TransferState.TRANSFER_END
),
]
receive_bytes_in_chunks(iter(chunks), queue, shutdown_event)
# Should have three messages in queue
assert queue.get(timeout=0.01) == b"Message1"
assert queue.get(timeout=0.01) == b"Start2 Middle2 End2"
assert queue.get(timeout=0.01) == b"Message3"
assert queue.empty()
@require_package("grpcio", "grpc")
def test_receive_bytes_in_chunks_shutdown_during_receive():
from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2
"""Test that shutdown event stops receiving mid-stream."""
queue = Queue()
shutdown_event = Event()
shutdown_event.set()
chunks = [
services_pb2.InteractionMessage(
data=b"First ", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN
),
services_pb2.InteractionMessage(
data=b"Middle ", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE
),
services_pb2.InteractionMessage(data=b"Last", transfer_state=services_pb2.TransferState.TRANSFER_END),
]
receive_bytes_in_chunks(iter(chunks), queue, shutdown_event)
assert queue.empty()
@require_package("grpcio", "grpc")
def test_receive_bytes_in_chunks_only_begin_chunk():
from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2
"""Test receiving only a BEGIN chunk without END."""
queue = Queue()
shutdown_event = Event()
chunks = [
services_pb2.InteractionMessage(
data=b"Start", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN
),
# No END chunk
]
receive_bytes_in_chunks(iter(chunks), queue, shutdown_event)
assert queue.empty()
@require_package("grpcio", "grpc")
def test_receive_bytes_in_chunks_missing_begin():
from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2
"""Test receiving chunks starting with MIDDLE instead of BEGIN."""
queue = Queue()
shutdown_event = Event()
chunks = [
# Missing BEGIN
services_pb2.InteractionMessage(
data=b"Middle", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE
),
services_pb2.InteractionMessage(data=b"End", transfer_state=services_pb2.TransferState.TRANSFER_END),
]
receive_bytes_in_chunks(iter(chunks), queue, shutdown_event)
# The implementation continues from where it is, so we should get partial data
assert queue.get(timeout=0.01) == b"MiddleEnd"
assert queue.empty()
# Tests for state_to_bytes and bytes_to_state_dict
@require_package("grpcio", "grpc")
def test_state_to_bytes_empty_dict():
from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes
"""Test converting empty state dict to bytes."""
state_dict = {}
data = state_to_bytes(state_dict)
reconstructed = bytes_to_state_dict(data)
assert reconstructed == state_dict
@require_package("grpcio", "grpc")
def test_bytes_to_state_dict_empty_data():
from lerobot.transport.utils import bytes_to_state_dict
"""Test converting empty data to state dict."""
with pytest.raises(EOFError):
bytes_to_state_dict(b"")
@require_package("grpcio", "grpc")
def test_state_to_bytes_simple_dict():
from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes
"""Test converting simple state dict to bytes."""
state_dict = {
"layer1.weight": torch.randn(10, 5),
"layer1.bias": torch.randn(10),
"layer2.weight": torch.randn(1, 10),
"layer2.bias": torch.randn(1),
}
data = state_to_bytes(state_dict)
assert isinstance(data, bytes)
assert len(data) > 0
reconstructed = bytes_to_state_dict(data)
assert len(reconstructed) == len(state_dict)
for key in state_dict:
assert key in reconstructed
assert torch.allclose(state_dict[key], reconstructed[key])
@require_package("grpcio", "grpc")
def test_state_to_bytes_various_dtypes():
from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes
"""Test converting state dict with various tensor dtypes."""
state_dict = {
"float32": torch.randn(5, 5),
"float64": torch.randn(3, 3).double(),
"int32": torch.randint(0, 100, (4, 4), dtype=torch.int32),
"int64": torch.randint(0, 100, (2, 2), dtype=torch.int64),
"bool": torch.tensor([True, False, True]),
"uint8": torch.randint(0, 255, (3, 3), dtype=torch.uint8),
}
data = state_to_bytes(state_dict)
reconstructed = bytes_to_state_dict(data)
for key in state_dict:
assert reconstructed[key].dtype == state_dict[key].dtype
if state_dict[key].dtype == torch.bool:
assert torch.equal(state_dict[key], reconstructed[key])
else:
assert torch.allclose(state_dict[key], reconstructed[key])
@require_package("grpcio", "grpc")
def test_bytes_to_state_dict_invalid_data():
from lerobot.transport.utils import bytes_to_state_dict
"""Test bytes_to_state_dict with invalid data."""
with pytest.raises(UnpicklingError):
bytes_to_state_dict(b"This is not a valid torch save file")
@require_cuda
@require_package("grpcio", "grpc")
def test_state_to_bytes_various_dtypes_cuda():
from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes
"""Test converting state dict with various tensor dtypes."""
state_dict = {
"float32": torch.randn(5, 5).cuda(),
"float64": torch.randn(3, 3).double().cuda(),
"int32": torch.randint(0, 100, (4, 4), dtype=torch.int32).cuda(),
"int64": torch.randint(0, 100, (2, 2), dtype=torch.int64).cuda(),
"bool": torch.tensor([True, False, True]),
"uint8": torch.randint(0, 255, (3, 3), dtype=torch.uint8),
}
data = state_to_bytes(state_dict)
reconstructed = bytes_to_state_dict(data)
for key in state_dict:
assert reconstructed[key].dtype == state_dict[key].dtype
if state_dict[key].dtype == torch.bool:
assert torch.equal(state_dict[key], reconstructed[key])
else:
assert torch.allclose(state_dict[key], reconstructed[key])
@require_package("grpcio", "grpc")
def test_python_object_to_bytes_none():
from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes
"""Test converting None to bytes."""
obj = None
data = python_object_to_bytes(obj)
reconstructed = bytes_to_python_object(data)
assert reconstructed is None
@pytest.mark.parametrize(
"obj",
[
42,
-123,
3.14159,
-2.71828,
"Hello, World!",
"Unicode: 你好世界 🌍",
True,
False,
b"byte string",
[],
[1, 2, 3],
[1, "two", 3.0, True, None],
{},
{"key": "value", "number": 123, "nested": {"a": 1}},
(),
(1, 2, 3),
],
)
@require_package("grpcio", "grpc")
def test_python_object_to_bytes_simple_types(obj):
from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes
"""Test converting simple Python types."""
data = python_object_to_bytes(obj)
reconstructed = bytes_to_python_object(data)
assert reconstructed == obj
assert type(reconstructed) is type(obj)
@require_package("grpcio", "grpc")
def test_python_object_to_bytes_with_tensors():
from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes
"""Test converting objects containing PyTorch tensors."""
obj = {
"tensor": torch.randn(5, 5),
"list_with_tensor": [1, 2, torch.randn(3, 3), "string"],
"nested": {
"tensor1": torch.randn(2, 2),
"tensor2": torch.tensor([1, 2, 3]),
},
}
data = python_object_to_bytes(obj)
reconstructed = bytes_to_python_object(data)
assert torch.allclose(obj["tensor"], reconstructed["tensor"])
assert reconstructed["list_with_tensor"][0] == 1
assert reconstructed["list_with_tensor"][3] == "string"
assert torch.allclose(obj["list_with_tensor"][2], reconstructed["list_with_tensor"][2])
assert torch.allclose(obj["nested"]["tensor1"], reconstructed["nested"]["tensor1"])
assert torch.equal(obj["nested"]["tensor2"], reconstructed["nested"]["tensor2"])
@require_package("grpcio", "grpc")
def test_transitions_to_bytes_empty_list():
from lerobot.transport.utils import bytes_to_transitions, transitions_to_bytes
"""Test converting empty transitions list."""
transitions = []
data = transitions_to_bytes(transitions)
reconstructed = bytes_to_transitions(data)
assert reconstructed == transitions
assert isinstance(reconstructed, list)
@require_package("grpcio", "grpc")
def test_transitions_to_bytes_single_transition():
from lerobot.transport.utils import bytes_to_transitions, transitions_to_bytes
"""Test converting a single transition."""
transition = Transition(
state={"image": torch.randn(3, 64, 64), "state": torch.randn(10)},
action=torch.randn(5),
reward=torch.tensor(1.5),
done=torch.tensor(False),
next_state={"image": torch.randn(3, 64, 64), "state": torch.randn(10)},
)
transitions = [transition]
data = transitions_to_bytes(transitions)
reconstructed = bytes_to_transitions(data)
assert len(reconstructed) == 1
assert_transitions_equal(transitions[0], reconstructed[0])
@require_package("grpcio", "grpc")
def assert_transitions_equal(t1: Transition, t2: Transition):
"""Helper to assert two transitions are equal."""
assert_observation_equal(t1["state"], t2["state"])
assert torch.allclose(t1[ACTION], t2[ACTION])
assert torch.allclose(t1["reward"], t2["reward"])
assert torch.equal(t1["done"], t2["done"])
assert_observation_equal(t1["next_state"], t2["next_state"])
@require_package("grpcio", "grpc")
def assert_observation_equal(o1: dict, o2: dict):
"""Helper to assert two observations are equal."""
assert set(o1.keys()) == set(o2.keys())
for key in o1:
assert torch.allclose(o1[key], o2[key])
@require_package("grpcio", "grpc")
def test_transitions_to_bytes_multiple_transitions():
from lerobot.transport.utils import bytes_to_transitions, transitions_to_bytes
"""Test converting multiple transitions."""
transitions = []
for i in range(5):
transition = Transition(
state={"data": torch.randn(10)},
action=torch.randn(3),
reward=torch.tensor(float(i)),
done=torch.tensor(i == 4),
next_state={"data": torch.randn(10)},
)
transitions.append(transition)
data = transitions_to_bytes(transitions)
reconstructed = bytes_to_transitions(data)
assert len(reconstructed) == len(transitions)
for original, reconstructed_item in zip(transitions, reconstructed, strict=False):
assert_transitions_equal(original, reconstructed_item)
@require_package("grpcio", "grpc")
def test_receive_bytes_in_chunks_unknown_state():
from lerobot.transport.utils import receive_bytes_in_chunks
"""Test receive_bytes_in_chunks with an unknown transfer state."""
# Mock the gRPC message object, which has `transfer_state` and `data` attributes.
class MockMessage:
def __init__(self, transfer_state, data):
self.transfer_state = transfer_state
self.data = data
# 10 is not a valid TransferState enum value
bad_iterator = [MockMessage(transfer_state=10, data=b"bad_data")]
output_queue = Queue()
shutdown_event = Event()
with pytest.raises(ValueError, match="Received unknown transfer state"):
receive_bytes_in_chunks(bad_iterator, output_queue, shutdown_event)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/transport/test_transport_utils.py",
"license": "Apache License 2.0",
"lines": 436,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/utils/test_process.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import signal
import threading
from unittest.mock import patch
import pytest
from lerobot.rl.process import ProcessSignalHandler
# Fixture to reset shutdown_event_counter and original signal handlers before and after each test
@pytest.fixture(autouse=True)
def reset_globals_and_handlers():
# Store original signal handlers
original_handlers = {
sig: signal.getsignal(sig)
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT]
if hasattr(signal, sig.name)
}
yield
# Restore original signal handlers
for sig, handler in original_handlers.items():
signal.signal(sig, handler)
def test_setup_process_handlers_event_with_threads():
"""Test that setup_process_handlers returns the correct event type."""
handler = ProcessSignalHandler(use_threads=True)
shutdown_event = handler.shutdown_event
assert isinstance(shutdown_event, threading.Event), "Should be a threading.Event"
assert not shutdown_event.is_set(), "Event should initially be unset"
def test_setup_process_handlers_event_with_processes():
"""Test that setup_process_handlers returns the correct event type."""
handler = ProcessSignalHandler(use_threads=False)
shutdown_event = handler.shutdown_event
assert isinstance(shutdown_event, type(multiprocessing.Event())), "Should be a multiprocessing.Event"
assert not shutdown_event.is_set(), "Event should initially be unset"
@pytest.mark.parametrize("use_threads", [True, False])
@pytest.mark.parametrize(
"sig",
[
signal.SIGINT,
signal.SIGTERM,
# SIGHUP and SIGQUIT are not reliably available on all platforms (e.g. Windows)
pytest.param(
signal.SIGHUP,
marks=pytest.mark.skipif(not hasattr(signal, "SIGHUP"), reason="SIGHUP not available"),
),
pytest.param(
signal.SIGQUIT,
marks=pytest.mark.skipif(not hasattr(signal, "SIGQUIT"), reason="SIGQUIT not available"),
),
],
)
def test_signal_handler_sets_event(use_threads, sig):
"""Test that the signal handler sets the event on receiving a signal."""
handler = ProcessSignalHandler(use_threads=use_threads)
shutdown_event = handler.shutdown_event
assert handler.counter == 0
os.kill(os.getpid(), sig)
# In some environments, the signal might take a moment to be handled.
shutdown_event.wait(timeout=1.0)
assert shutdown_event.is_set(), f"Event should be set after receiving signal {sig}"
# Ensure the internal counter was incremented
assert handler.counter == 1
@pytest.mark.parametrize("use_threads", [True, False])
@patch("sys.exit")
def test_force_shutdown_on_second_signal(mock_sys_exit, use_threads):
"""Test that a second signal triggers a force shutdown."""
handler = ProcessSignalHandler(use_threads=use_threads)
os.kill(os.getpid(), signal.SIGINT)
# Give a moment for the first signal to be processed
import time
time.sleep(0.1)
os.kill(os.getpid(), signal.SIGINT)
time.sleep(0.1)
assert handler.counter == 2
mock_sys_exit.assert_called_once_with(1)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/utils/test_process.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/utils/test_replay_buffer.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections.abc import Callable
import pytest
import torch
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.rl.buffer import BatchTransition, ReplayBuffer, random_crop_vectorized
from lerobot.utils.constants import ACTION, DONE, OBS_IMAGE, OBS_STATE, OBS_STR, REWARD
from tests.fixtures.constants import DUMMY_REPO_ID
def state_dims() -> list[str]:
return [OBS_IMAGE, OBS_STATE]
@pytest.fixture
def replay_buffer() -> ReplayBuffer:
return create_empty_replay_buffer()
def clone_state(state: dict) -> dict:
return {k: v.clone() for k, v in state.items()}
def create_empty_replay_buffer(
optimize_memory: bool = False,
use_drq: bool = False,
image_augmentation_function: Callable | None = None,
) -> ReplayBuffer:
buffer_capacity = 10
device = "cpu"
return ReplayBuffer(
buffer_capacity,
device,
state_dims(),
optimize_memory=optimize_memory,
use_drq=use_drq,
image_augmentation_function=image_augmentation_function,
)
def create_random_image() -> torch.Tensor:
return torch.rand(3, 84, 84)
def create_dummy_transition() -> dict:
return {
OBS_IMAGE: create_random_image(),
ACTION: torch.randn(4),
"reward": torch.tensor(1.0),
OBS_STATE: torch.randn(
10,
),
"done": torch.tensor(False),
"truncated": torch.tensor(False),
"complementary_info": {},
}
def create_dataset_from_replay_buffer(tmp_path) -> tuple[LeRobotDataset, ReplayBuffer]:
dummy_state_1 = create_dummy_state()
dummy_action_1 = create_dummy_action()
dummy_state_2 = create_dummy_state()
dummy_action_2 = create_dummy_action()
dummy_state_3 = create_dummy_state()
dummy_action_3 = create_dummy_action()
dummy_state_4 = create_dummy_state()
dummy_action_4 = create_dummy_action()
replay_buffer = create_empty_replay_buffer()
replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_1, False, False)
replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_2, False, False)
replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_3, True, True)
replay_buffer.add(dummy_state_4, dummy_action_4, 1.0, dummy_state_4, True, True)
root = tmp_path / "test"
return (replay_buffer.to_lerobot_dataset(DUMMY_REPO_ID, root=root), replay_buffer)
def create_dummy_state() -> dict:
return {
OBS_IMAGE: create_random_image(),
OBS_STATE: torch.randn(
10,
),
}
def get_tensor_memory_consumption(tensor):
return tensor.nelement() * tensor.element_size()
def get_tensors_memory_consumption(obj, visited_addresses):
total_size = 0
address = id(obj)
if address in visited_addresses:
return 0
visited_addresses.add(address)
if isinstance(obj, torch.Tensor):
return get_tensor_memory_consumption(obj)
elif isinstance(obj, (list | tuple)):
for item in obj:
total_size += get_tensors_memory_consumption(item, visited_addresses)
elif isinstance(obj, dict):
for value in obj.values():
total_size += get_tensors_memory_consumption(value, visited_addresses)
elif hasattr(obj, "__dict__"):
# It's an object, we need to get the size of the attributes
for _, attr in vars(obj).items():
total_size += get_tensors_memory_consumption(attr, visited_addresses)
return total_size
def get_object_memory(obj):
# Track visited addresses to avoid infinite loops
# and cases when two properties point to the same object
visited_addresses = set()
# Get the size of the object in bytes
total_size = sys.getsizeof(obj)
# Get the size of the tensor attributes
total_size += get_tensors_memory_consumption(obj, visited_addresses)
return total_size
def create_dummy_action() -> torch.Tensor:
return torch.randn(4)
def dict_properties() -> list:
return ["state", "next_state"]
@pytest.fixture
def dummy_state() -> dict:
return create_dummy_state()
@pytest.fixture
def next_dummy_state() -> dict:
return create_dummy_state()
@pytest.fixture
def dummy_action() -> torch.Tensor:
return torch.randn(4)
def test_empty_buffer_sample_raises_error(replay_buffer):
assert len(replay_buffer) == 0, "Replay buffer should be empty."
assert replay_buffer.capacity == 10, "Replay buffer capacity should be 10."
with pytest.raises(RuntimeError, match="Cannot sample from an empty buffer"):
replay_buffer.sample(1)
def test_zero_capacity_buffer_raises_error():
with pytest.raises(ValueError, match="Capacity must be greater than 0."):
ReplayBuffer(0, "cpu", [OBS_STR, "next_observation"])
def test_add_transition(replay_buffer, dummy_state, dummy_action):
replay_buffer.add(dummy_state, dummy_action, 1.0, dummy_state, False, False)
assert len(replay_buffer) == 1, "Replay buffer should have one transition after adding."
assert torch.equal(replay_buffer.actions[0], dummy_action), (
"Action should be equal to the first transition."
)
assert replay_buffer.rewards[0] == 1.0, "Reward should be equal to the first transition."
assert not replay_buffer.dones[0], "Done should be False for the first transition."
assert not replay_buffer.truncateds[0], "Truncated should be False for the first transition."
for dim in state_dims():
assert torch.equal(replay_buffer.states[dim][0], dummy_state[dim]), (
"Observation should be equal to the first transition."
)
assert torch.equal(replay_buffer.next_states[dim][0], dummy_state[dim]), (
"Next observation should be equal to the first transition."
)
def test_add_over_capacity():
replay_buffer = ReplayBuffer(2, "cpu", [OBS_STR, "next_observation"])
dummy_state_1 = create_dummy_state()
dummy_action_1 = create_dummy_action()
dummy_state_2 = create_dummy_state()
dummy_action_2 = create_dummy_action()
dummy_state_3 = create_dummy_state()
dummy_action_3 = create_dummy_action()
replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_1, False, False)
replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_2, False, False)
replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_3, True, True)
assert len(replay_buffer) == 2, "Replay buffer should have 2 transitions after adding 3."
for dim in state_dims():
assert torch.equal(replay_buffer.states[dim][0], dummy_state_3[dim]), (
"Observation should be equal to the first transition."
)
assert torch.equal(replay_buffer.next_states[dim][0], dummy_state_3[dim]), (
"Next observation should be equal to the first transition."
)
assert torch.equal(replay_buffer.actions[0], dummy_action_3), (
"Action should be equal to the last transition."
)
assert replay_buffer.rewards[0] == 1.0, "Reward should be equal to the last transition."
assert replay_buffer.dones[0], "Done should be True for the first transition."
assert replay_buffer.truncateds[0], "Truncated should be True for the first transition."
def test_sample_from_empty_buffer(replay_buffer):
with pytest.raises(RuntimeError, match="Cannot sample from an empty buffer"):
replay_buffer.sample(1)
def test_sample_with_1_transition(replay_buffer, dummy_state, next_dummy_state, dummy_action):
replay_buffer.add(dummy_state, dummy_action, 1.0, next_dummy_state, False, False)
got_batch_transition = replay_buffer.sample(1)
expected_batch_transition = BatchTransition(
state=clone_state(dummy_state),
action=dummy_action.clone(),
reward=1.0,
next_state=clone_state(next_dummy_state),
done=False,
truncated=False,
)
for buffer_property in dict_properties():
for k, v in expected_batch_transition[buffer_property].items():
got_state = got_batch_transition[buffer_property][k]
assert got_state.shape[0] == 1, f"{k} should have 1 transition."
assert got_state.device.type == "cpu", f"{k} should be on cpu."
assert torch.equal(got_state[0], v), f"{k} should be equal to the expected batch transition."
for key, _value in expected_batch_transition.items():
if key in dict_properties():
continue
got_value = got_batch_transition[key]
v_tensor = expected_batch_transition[key]
if not isinstance(v_tensor, torch.Tensor):
v_tensor = torch.tensor(v_tensor)
assert got_value.shape[0] == 1, f"{key} should have 1 transition."
assert got_value.device.type == "cpu", f"{key} should be on cpu."
assert torch.equal(got_value[0], v_tensor), f"{key} should be equal to the expected batch transition."
def test_sample_with_batch_bigger_than_buffer_size(
replay_buffer, dummy_state, next_dummy_state, dummy_action
):
replay_buffer.add(dummy_state, dummy_action, 1.0, next_dummy_state, False, False)
got_batch_transition = replay_buffer.sample(10)
expected_batch_transition = BatchTransition(
state=dummy_state,
action=dummy_action,
reward=1.0,
next_state=next_dummy_state,
done=False,
truncated=False,
)
for buffer_property in dict_properties():
for k in expected_batch_transition[buffer_property]:
got_state = got_batch_transition[buffer_property][k]
assert got_state.shape[0] == 1, f"{k} should have 1 transition."
for key in expected_batch_transition:
if key in dict_properties():
continue
got_value = got_batch_transition[key]
assert got_value.shape[0] == 1, f"{key} should have 1 transition."
def test_sample_batch(replay_buffer):
dummy_state_1 = create_dummy_state()
dummy_action_1 = create_dummy_action()
dummy_state_2 = create_dummy_state()
dummy_action_2 = create_dummy_action()
dummy_state_3 = create_dummy_state()
dummy_action_3 = create_dummy_action()
dummy_state_4 = create_dummy_state()
dummy_action_4 = create_dummy_action()
replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_1, False, False)
replay_buffer.add(dummy_state_2, dummy_action_2, 2.0, dummy_state_2, False, False)
replay_buffer.add(dummy_state_3, dummy_action_3, 3.0, dummy_state_3, True, True)
replay_buffer.add(dummy_state_4, dummy_action_4, 4.0, dummy_state_4, True, True)
dummy_states = [dummy_state_1, dummy_state_2, dummy_state_3, dummy_state_4]
dummy_actions = [dummy_action_1, dummy_action_2, dummy_action_3, dummy_action_4]
got_batch_transition = replay_buffer.sample(3)
for buffer_property in dict_properties():
for k in got_batch_transition[buffer_property]:
got_state = got_batch_transition[buffer_property][k]
assert got_state.shape[0] == 3, f"{k} should have 3 transition."
for got_state_item in got_state:
assert any(torch.equal(got_state_item, dummy_state[k]) for dummy_state in dummy_states), (
f"{k} should be equal to one of the dummy states."
)
for got_action_item in got_batch_transition[ACTION]:
assert any(torch.equal(got_action_item, dummy_action) for dummy_action in dummy_actions), (
"Actions should be equal to the dummy actions."
)
for k in got_batch_transition:
if k in dict_properties() or k == "complementary_info":
continue
got_value = got_batch_transition[k]
assert got_value.shape[0] == 3, f"{k} should have 3 transition."
def test_to_lerobot_dataset_with_empty_buffer(replay_buffer):
with pytest.raises(ValueError, match="The replay buffer is empty. Cannot convert to a dataset."):
replay_buffer.to_lerobot_dataset("dummy_repo")
def test_to_lerobot_dataset(tmp_path):
ds, buffer = create_dataset_from_replay_buffer(tmp_path)
assert len(ds) == len(buffer), "Dataset should have the same size as the Replay Buffer"
assert ds.fps == 1, "FPS should be 1"
assert ds.repo_id == "dummy/repo", "The dataset should have `dummy/repo` repo id"
for dim in state_dims():
assert dim in ds.features
assert ds.features[dim]["shape"] == buffer.states[dim][0].shape
assert ds.num_episodes == 2
assert ds.num_frames == 4
for j, value in enumerate(ds):
print(torch.equal(value[OBS_IMAGE], buffer.next_states[OBS_IMAGE][j]))
for i in range(len(ds)):
for feature, value in ds[i].items():
if feature == ACTION:
assert torch.equal(value, buffer.actions[i])
elif feature == REWARD:
assert torch.equal(value, buffer.rewards[i])
elif feature == DONE:
assert torch.equal(value, buffer.dones[i])
elif feature == OBS_IMAGE:
# Tensor -> numpy is not precise, so we have some diff there
# TODO: Check and fix it
torch.testing.assert_close(value, buffer.states[OBS_IMAGE][i], rtol=0.3, atol=0.003)
elif feature == OBS_STATE:
assert torch.equal(value, buffer.states[OBS_STATE][i])
def test_from_lerobot_dataset(tmp_path):
dummy_state_1 = create_dummy_state()
dummy_action_1 = create_dummy_action()
dummy_state_2 = create_dummy_state()
dummy_action_2 = create_dummy_action()
dummy_state_3 = create_dummy_state()
dummy_action_3 = create_dummy_action()
dummy_state_4 = create_dummy_state()
dummy_action_4 = create_dummy_action()
replay_buffer = create_empty_replay_buffer()
replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_1, False, False)
replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_2, False, False)
replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_3, True, True)
replay_buffer.add(dummy_state_4, dummy_action_4, 1.0, dummy_state_4, True, True)
root = tmp_path / "test"
ds = replay_buffer.to_lerobot_dataset(DUMMY_REPO_ID, root=root)
reconverted_buffer = ReplayBuffer.from_lerobot_dataset(
ds, state_keys=list(state_dims()), device="cpu", capacity=replay_buffer.capacity, use_drq=False
)
# Check only the part of the buffer that's actually filled with data
assert torch.equal(
reconverted_buffer.actions[: len(replay_buffer)],
replay_buffer.actions[: len(replay_buffer)],
), "Actions from converted buffer should be equal to the original replay buffer."
assert torch.equal(
reconverted_buffer.rewards[: len(replay_buffer)], replay_buffer.rewards[: len(replay_buffer)]
), "Rewards from converted buffer should be equal to the original replay buffer."
assert torch.equal(
reconverted_buffer.dones[: len(replay_buffer)], replay_buffer.dones[: len(replay_buffer)]
), "Dones from converted buffer should be equal to the original replay buffer."
# Lerobot DS haven't supported truncateds yet
expected_truncateds = torch.zeros(len(replay_buffer)).bool()
assert torch.equal(reconverted_buffer.truncateds[: len(replay_buffer)], expected_truncateds), (
"Truncateds from converted buffer should be equal False"
)
assert torch.equal(
replay_buffer.states[OBS_STATE][: len(replay_buffer)],
reconverted_buffer.states[OBS_STATE][: len(replay_buffer)],
), "State should be the same after converting to dataset and return back"
for i in range(4):
torch.testing.assert_close(
replay_buffer.states[OBS_IMAGE][i],
reconverted_buffer.states[OBS_IMAGE][i],
rtol=0.4,
atol=0.004,
)
# The 2, 3 frames have done flag, so their values will be equal to the current state
for i in range(2):
# In the current implementation we take the next state from the `states` and ignore `next_states`
next_index = (i + 1) % 4
torch.testing.assert_close(
replay_buffer.states[OBS_IMAGE][next_index],
reconverted_buffer.next_states[OBS_IMAGE][i],
rtol=0.4,
atol=0.004,
)
for i in range(2, 4):
assert torch.equal(
replay_buffer.states[OBS_STATE][i],
reconverted_buffer.next_states[OBS_STATE][i],
)
def test_buffer_sample_alignment():
# Initialize buffer
buffer = ReplayBuffer(capacity=100, device="cpu", state_keys=["state_value"], storage_device="cpu")
# Fill buffer with patterned data
for i in range(100):
signature = float(i) / 100.0
state = {"state_value": torch.tensor([[signature]]).float()}
action = torch.tensor([[2.0 * signature]]).float()
reward = 3.0 * signature
is_end = (i + 1) % 10 == 0
if is_end:
next_state = {"state_value": torch.tensor([[signature]]).float()}
done = True
else:
next_signature = float(i + 1) / 100.0
next_state = {"state_value": torch.tensor([[next_signature]]).float()}
done = False
buffer.add(state, action, reward, next_state, done, False)
# Sample and verify
batch = buffer.sample(50)
for i in range(50):
state_sig = batch["state"]["state_value"][i].item()
action_val = batch[ACTION][i].item()
reward_val = batch["reward"][i].item()
next_state_sig = batch["next_state"]["state_value"][i].item()
is_done = batch["done"][i].item() > 0.5
# Verify relationships
assert abs(action_val - 2.0 * state_sig) < 1e-4, (
f"Action {action_val} should be 2x state signature {state_sig}"
)
assert abs(reward_val - 3.0 * state_sig) < 1e-4, (
f"Reward {reward_val} should be 3x state signature {state_sig}"
)
if is_done:
assert abs(next_state_sig - state_sig) < 1e-4, (
f"For done states, next_state {next_state_sig} should equal state {state_sig}"
)
else:
# Either it's the next sequential state (+0.01) or same state (for episode boundaries)
valid_next = (
abs(next_state_sig - state_sig - 0.01) < 1e-4 or abs(next_state_sig - state_sig) < 1e-4
)
assert valid_next, (
f"Next state {next_state_sig} should be either state+0.01 or same as state {state_sig}"
)
def test_memory_optimization():
dummy_state_1 = create_dummy_state()
dummy_action_1 = create_dummy_action()
dummy_state_2 = create_dummy_state()
dummy_action_2 = create_dummy_action()
dummy_state_3 = create_dummy_state()
dummy_action_3 = create_dummy_action()
dummy_state_4 = create_dummy_state()
dummy_action_4 = create_dummy_action()
replay_buffer = create_empty_replay_buffer()
replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_2, False, False)
replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_3, False, False)
replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_4, False, False)
replay_buffer.add(dummy_state_4, dummy_action_4, 1.0, dummy_state_4, True, True)
optimized_replay_buffer = create_empty_replay_buffer(True)
optimized_replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_2, False, False)
optimized_replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_3, False, False)
optimized_replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_4, False, False)
optimized_replay_buffer.add(dummy_state_4, dummy_action_4, 1.0, None, True, True)
assert get_object_memory(optimized_replay_buffer) < get_object_memory(replay_buffer), (
"Optimized replay buffer should be smaller than the original replay buffer"
)
def test_check_image_augmentations_with_drq_and_dummy_image_augmentation_function(dummy_state, dummy_action):
def dummy_image_augmentation_function(x):
return torch.ones_like(x) * 10
replay_buffer = create_empty_replay_buffer(
use_drq=True, image_augmentation_function=dummy_image_augmentation_function
)
replay_buffer.add(dummy_state, dummy_action, 1.0, dummy_state, False, False)
sampled_transitions = replay_buffer.sample(1)
assert torch.all(sampled_transitions["state"][OBS_IMAGE] == 10), "Image augmentations should be applied"
assert torch.all(sampled_transitions["next_state"][OBS_IMAGE] == 10), (
"Image augmentations should be applied"
)
def test_check_image_augmentations_with_drq_and_default_image_augmentation_function(
dummy_state, dummy_action
):
replay_buffer = create_empty_replay_buffer(use_drq=True)
replay_buffer.add(dummy_state, dummy_action, 1.0, dummy_state, False, False)
# Let's check that it doesn't fail and shapes are correct
sampled_transitions = replay_buffer.sample(1)
assert sampled_transitions["state"][OBS_IMAGE].shape == (1, 3, 84, 84)
assert sampled_transitions["next_state"][OBS_IMAGE].shape == (1, 3, 84, 84)
def test_random_crop_vectorized_basic():
# Create a batch of 2 images with known patterns
batch_size, channels, height, width = 2, 3, 10, 8
images = torch.zeros((batch_size, channels, height, width))
# Fill with unique values for testing
for b in range(batch_size):
images[b] = b + 1
crop_size = (6, 4) # Smaller than original
cropped = random_crop_vectorized(images, crop_size)
# Check output shape
assert cropped.shape == (batch_size, channels, *crop_size)
# Check that values are preserved (should be either 1s or 2s for respective batches)
assert torch.all(cropped[0] == 1)
assert torch.all(cropped[1] == 2)
def test_random_crop_vectorized_invalid_size():
images = torch.zeros((2, 3, 10, 8))
# Test crop size larger than image
with pytest.raises(ValueError, match="Requested crop size .* is bigger than the image size"):
random_crop_vectorized(images, (12, 8))
with pytest.raises(ValueError, match="Requested crop size .* is bigger than the image size"):
random_crop_vectorized(images, (10, 10))
def _populate_buffer_for_async_test(capacity: int = 10) -> ReplayBuffer:
"""Create a small buffer with deterministic 3×128×128 images and 11-D state."""
buffer = ReplayBuffer(
capacity=capacity,
device="cpu",
state_keys=[OBS_IMAGE, OBS_STATE],
storage_device="cpu",
)
for i in range(capacity):
img = torch.ones(3, 128, 128) * i
state_vec = torch.arange(11).float() + i
state = {
OBS_IMAGE: img,
OBS_STATE: state_vec,
}
buffer.add(
state=state,
action=torch.tensor([0.0]),
reward=0.0,
next_state=state,
done=False,
truncated=False,
)
return buffer
def test_async_iterator_shapes_basic():
buffer = _populate_buffer_for_async_test()
batch_size = 2
iterator = buffer.get_iterator(batch_size=batch_size, async_prefetch=True, queue_size=1)
batch = next(iterator)
images = batch["state"][OBS_IMAGE]
states = batch["state"][OBS_STATE]
assert images.shape == (batch_size, 3, 128, 128)
assert states.shape == (batch_size, 11)
next_images = batch["next_state"][OBS_IMAGE]
next_states = batch["next_state"][OBS_STATE]
assert next_images.shape == (batch_size, 3, 128, 128)
assert next_states.shape == (batch_size, 11)
def test_async_iterator_multiple_iterations():
buffer = _populate_buffer_for_async_test()
batch_size = 2
iterator = buffer.get_iterator(batch_size=batch_size, async_prefetch=True, queue_size=2)
for _ in range(5):
batch = next(iterator)
images = batch["state"][OBS_IMAGE]
states = batch["state"][OBS_STATE]
assert images.shape == (batch_size, 3, 128, 128)
assert states.shape == (batch_size, 11)
next_images = batch["next_state"][OBS_IMAGE]
next_states = batch["next_state"][OBS_STATE]
assert next_images.shape == (batch_size, 3, 128, 128)
assert next_states.shape == (batch_size, 11)
# Ensure iterator can be disposed without blocking
del iterator
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/utils/test_replay_buffer.py",
"license": "Apache License 2.0",
"lines": 510,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:examples/backward_compatibility/replay.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Replays the actions of an episode from a dataset on a robot.
Example:
```shell
lerobot-replay \
--robot.type=so100_follower \
--robot.port=/dev/tty.usbmodem58760431541 \
--robot.id=black \
--dataset.repo_id=<USER>/record-test \
--dataset.episode=2
```
"""
import logging
import time
from dataclasses import asdict, dataclass
from pathlib import Path
from pprint import pformat
import draccus
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.robots import ( # noqa: F401
Robot,
RobotConfig,
koch_follower,
make_robot_from_config,
so_follower,
)
from lerobot.utils.constants import ACTION
from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import (
init_logging,
log_say,
)
@dataclass
class DatasetReplayConfig:
# Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).
repo_id: str
# Episode to replay.
episode: int
# Root directory where the dataset will be stored (e.g. 'dataset/path'). If None, defaults to $HF_LEROBOT_HOME/repo_id.
root: str | Path | None = None
# Limit the frames per second. By default, uses the policy fps.
fps: int = 30
@dataclass
class ReplayConfig:
robot: RobotConfig
dataset: DatasetReplayConfig
# Use vocal synthesis to read events.
play_sounds: bool = True
@draccus.wrap()
def replay(cfg: ReplayConfig):
init_logging()
logging.info(pformat(asdict(cfg)))
robot = make_robot_from_config(cfg.robot)
dataset = LeRobotDataset(cfg.dataset.repo_id, root=cfg.dataset.root, episodes=[cfg.dataset.episode])
actions = dataset.hf_dataset.select_columns(ACTION)
robot.connect()
try:
log_say("Replaying episode", cfg.play_sounds, blocking=True)
for idx in range(dataset.num_frames):
start_episode_t = time.perf_counter()
action_array = actions[idx][ACTION]
action = {}
for i, name in enumerate(dataset.features[ACTION]["names"]):
key = f"{name.removeprefix('main_')}.pos"
action[key] = action_array[i].item()
action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90)
action["elbow_flex.pos"] -= 90
robot.send_action(action)
dt_s = time.perf_counter() - start_episode_t
precise_sleep(max(1 / dataset.fps - dt_s, 0.0))
finally:
robot.disconnect()
if __name__ == "__main__":
replay()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/backward_compatibility/replay.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/lekiwi/evaluate.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.utils import hw_to_dataset_features
from lerobot.policies.act.modeling_act import ACTPolicy
from lerobot.policies.factory import make_pre_post_processors
from lerobot.processor import make_default_processors
from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
from lerobot.scripts.lerobot_record import record_loop
from lerobot.utils.constants import ACTION, OBS_STR
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun
NUM_EPISODES = 2
FPS = 30
EPISODE_TIME_SEC = 60
TASK_DESCRIPTION = "My task description"
HF_MODEL_ID = "<hf_username>/<model_repo_id>"
HF_DATASET_ID = "<hf_username>/<eval_dataset_repo_id>"
def main():
# Create the robot configuration & robot
robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
robot = LeKiwiClient(robot_config)
# Create policy
policy = ACTPolicy.from_pretrained(HF_MODEL_ID)
# Configure the dataset features
action_features = hw_to_dataset_features(robot.action_features, ACTION)
obs_features = hw_to_dataset_features(robot.observation_features, OBS_STR)
dataset_features = {**action_features, **obs_features}
# Create the dataset
dataset = LeRobotDataset.create(
repo_id=HF_DATASET_ID,
fps=FPS,
features=dataset_features,
robot_type=robot.name,
use_videos=True,
image_writer_threads=4,
)
# Build Policy Processors
preprocessor, postprocessor = make_pre_post_processors(
policy_cfg=policy,
pretrained_path=HF_MODEL_ID,
dataset_stats=dataset.meta.stats,
# The inference device is automatically set to match the detected hardware, overriding any previous device settings from training to ensure compatibility.
preprocessor_overrides={"device_processor": {"device": str(policy.config.device)}},
)
# Connect the robot
# To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi`
robot.connect()
# TODO(Steven): Update this example to use pipelines
teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors()
# Initialize the keyboard listener and rerun visualization
listener, events = init_keyboard_listener()
init_rerun(session_name="lekiwi_evaluate")
try:
if not robot.is_connected:
raise ValueError("Robot is not connected!")
print("Starting evaluate loop...")
recorded_episodes = 0
while recorded_episodes < NUM_EPISODES and not events["stop_recording"]:
log_say(f"Running inference, recording eval episode {recorded_episodes} of {NUM_EPISODES}")
# Main record loop
record_loop(
robot=robot,
events=events,
fps=FPS,
policy=policy,
preprocessor=preprocessor, # Pass the pre and post policy processors
postprocessor=postprocessor,
dataset=dataset,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=teleop_action_processor,
robot_action_processor=robot_action_processor,
robot_observation_processor=robot_observation_processor,
)
# Reset the environment if not stopping or re-recording
if not events["stop_recording"] and (
(recorded_episodes < NUM_EPISODES - 1) or events["rerecord_episode"]
):
log_say("Reset the environment")
record_loop(
robot=robot,
events=events,
fps=FPS,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=teleop_action_processor,
robot_action_processor=robot_action_processor,
robot_observation_processor=robot_observation_processor,
)
if events["rerecord_episode"]:
log_say("Re-record episode")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
# Save episode
dataset.save_episode()
recorded_episodes += 1
finally:
# Clean up
log_say("Stop recording")
robot.disconnect()
listener.stop()
dataset.finalize()
dataset.push_to_hub()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/lekiwi/evaluate.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/lekiwi/record.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.utils import hw_to_dataset_features
from lerobot.processor import make_default_processors
from lerobot.robots.lekiwi.config_lekiwi import LeKiwiClientConfig
from lerobot.robots.lekiwi.lekiwi_client import LeKiwiClient
from lerobot.scripts.lerobot_record import record_loop
from lerobot.teleoperators.keyboard import KeyboardTeleop, KeyboardTeleopConfig
from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig
from lerobot.utils.constants import ACTION, OBS_STR
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun
NUM_EPISODES = 2
FPS = 30
EPISODE_TIME_SEC = 30
RESET_TIME_SEC = 10
TASK_DESCRIPTION = "My task description"
HF_REPO_ID = "<hf_username>/<dataset_repo_id>"
def main():
# Create the robot and teleoperator configurations
robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
leader_arm_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm")
keyboard_config = KeyboardTeleopConfig()
# Initialize the robot and teleoperator
robot = LeKiwiClient(robot_config)
leader_arm = SO100Leader(leader_arm_config)
keyboard = KeyboardTeleop(keyboard_config)
# TODO(Steven): Update this example to use pipelines
teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors()
# Configure the dataset features
action_features = hw_to_dataset_features(robot.action_features, ACTION)
obs_features = hw_to_dataset_features(robot.observation_features, OBS_STR)
dataset_features = {**action_features, **obs_features}
# Create the dataset
dataset = LeRobotDataset.create(
repo_id=HF_REPO_ID,
fps=FPS,
features=dataset_features,
robot_type=robot.name,
use_videos=True,
image_writer_threads=4,
)
# Connect the robot and teleoperator
# To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi`
robot.connect()
leader_arm.connect()
keyboard.connect()
# Initialize the keyboard listener and rerun visualization
listener, events = init_keyboard_listener()
init_rerun(session_name="lekiwi_record")
try:
if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected:
raise ValueError("Robot or teleop is not connected!")
print("Starting record loop...")
recorded_episodes = 0
while recorded_episodes < NUM_EPISODES and not events["stop_recording"]:
log_say(f"Recording episode {recorded_episodes}")
# Main record loop
record_loop(
robot=robot,
events=events,
fps=FPS,
dataset=dataset,
teleop=[leader_arm, keyboard],
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=teleop_action_processor,
robot_action_processor=robot_action_processor,
robot_observation_processor=robot_observation_processor,
)
# Reset the environment if not stopping or re-recording
if not events["stop_recording"] and (
(recorded_episodes < NUM_EPISODES - 1) or events["rerecord_episode"]
):
log_say("Reset the environment")
record_loop(
robot=robot,
events=events,
fps=FPS,
teleop=[leader_arm, keyboard],
control_time_s=RESET_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=teleop_action_processor,
robot_action_processor=robot_action_processor,
robot_observation_processor=robot_observation_processor,
)
if events["rerecord_episode"]:
log_say("Re-record episode")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
# Save episode
dataset.save_episode()
recorded_episodes += 1
finally:
# Clean up
log_say("Stop recording")
robot.disconnect()
leader_arm.disconnect()
keyboard.disconnect()
listener.stop()
dataset.finalize()
dataset.push_to_hub()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/lekiwi/record.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/lekiwi/replay.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.robots.lekiwi.config_lekiwi import LeKiwiClientConfig
from lerobot.robots.lekiwi.lekiwi_client import LeKiwiClient
from lerobot.utils.constants import ACTION
from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import log_say
EPISODE_IDX = 0
def main():
# Initialize the robot config
robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
# Initialize the robot
robot = LeKiwiClient(robot_config)
# Fetch the dataset to replay
dataset = LeRobotDataset("<hf_username>/<dataset_repo_id>", episodes=[EPISODE_IDX])
# Filter dataset to only include frames from the specified episode since episodes are chunked in dataset V3.0
episode_frames = dataset.hf_dataset.filter(lambda x: x["episode_index"] == EPISODE_IDX)
actions = episode_frames.select_columns(ACTION)
# Connect to the robot
robot.connect()
try:
if not robot.is_connected:
raise ValueError("Robot is not connected!")
print("Starting replay loop...")
log_say(f"Replaying episode {EPISODE_IDX}")
for idx in range(len(episode_frames)):
t0 = time.perf_counter()
# Get recorded action from dataset
action = {
name: float(actions[idx][ACTION][i])
for i, name in enumerate(dataset.features[ACTION]["names"])
}
# Send action to robot
_ = robot.send_action(action)
precise_sleep(max(1.0 / dataset.fps - (time.perf_counter() - t0), 0.0))
finally:
robot.disconnect()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/lekiwi/replay.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/lekiwi/teleoperate.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
from lerobot.teleoperators.keyboard.teleop_keyboard import KeyboardTeleop, KeyboardTeleopConfig
from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig
from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
FPS = 30
def main():
# Create the robot and teleoperator configurations
robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="my_lekiwi")
teleop_arm_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm")
keyboard_config = KeyboardTeleopConfig(id="my_laptop_keyboard")
# Initialize the robot and teleoperator
robot = LeKiwiClient(robot_config)
leader_arm = SO100Leader(teleop_arm_config)
keyboard = KeyboardTeleop(keyboard_config)
# Connect to the robot and teleoperator
# To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi`
robot.connect()
leader_arm.connect()
keyboard.connect()
# Init rerun viewer
init_rerun(session_name="lekiwi_teleop")
if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected:
raise ValueError("Robot or teleop is not connected!")
print("Starting teleop loop...")
while True:
t0 = time.perf_counter()
# Get robot observation
observation = robot.get_observation()
# Get teleop action
# Arm
arm_action = leader_arm.get_action()
arm_action = {f"arm_{k}": v for k, v in arm_action.items()}
# Keyboard
keyboard_keys = keyboard.get_action()
base_action = robot._from_keyboard_to_base_action(keyboard_keys)
action = {**arm_action, **base_action} if len(base_action) > 0 else arm_action
# Send action to robot
_ = robot.send_action(action)
# Visualize
log_rerun_data(observation=observation, action=action)
precise_sleep(max(1.0 / FPS - (time.perf_counter() - t0), 0.0))
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/lekiwi/teleoperate.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:tests/cameras/test_opencv.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Example of running a specific test:
# ```bash
# pytest tests/cameras/test_opencv.py::test_connect
# ```
from pathlib import Path
from unittest.mock import patch
import cv2
import numpy as np
import pytest
from lerobot.cameras.configs import Cv2Rotation
from lerobot.cameras.opencv import OpenCVCamera, OpenCVCameraConfig
from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
RealVideoCapture = cv2.VideoCapture
class MockLoopingVideoCapture:
"""
Wraps the real OpenCV VideoCapture.
Motivation: cv2.VideoCapture(file.png) is only valid for one read.
Strategy: Read the file once & return the cached frame for subsequent reads.
Consequence: No recurrent I/O operations, but we keep the test artifacts simple.
"""
def __init__(self, *args, **kwargs):
args_clean = [str(a) if isinstance(a, Path) else a for a in args]
self._real_vc = RealVideoCapture(*args_clean, **kwargs)
self._cached_frame = None
def read(self):
ret, frame = self._real_vc.read()
if ret:
self._cached_frame = frame
return ret, frame
if not ret and self._cached_frame is not None:
return True, self._cached_frame.copy()
return ret, frame
def __getattr__(self, name):
return getattr(self._real_vc, name)
@pytest.fixture(autouse=True)
def patch_opencv_videocapture():
"""
Automatically patches cv2.VideoCapture for all tests.
"""
module_path = OpenCVCamera.__module__
target = f"{module_path}.cv2.VideoCapture"
with patch(target, new=MockLoopingVideoCapture):
yield
# NOTE(Steven): more tests + assertions?
TEST_ARTIFACTS_DIR = Path(__file__).parent.parent / "artifacts" / "cameras"
DEFAULT_PNG_FILE_PATH = TEST_ARTIFACTS_DIR / "image_160x120.png"
TEST_IMAGE_SIZES = ["128x128", "160x120", "320x180", "480x270"]
TEST_IMAGE_PATHS = [TEST_ARTIFACTS_DIR / f"image_{size}.png" for size in TEST_IMAGE_SIZES]
def test_abc_implementation():
"""Instantiation should raise an error if the class doesn't implement abstract methods/properties."""
config = OpenCVCameraConfig(index_or_path=0)
_ = OpenCVCamera(config)
def test_connect():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, warmup_s=0)
with OpenCVCamera(config) as camera:
assert camera.is_connected
def test_connect_already_connected():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, warmup_s=0)
with OpenCVCamera(config) as camera, pytest.raises(DeviceAlreadyConnectedError):
camera.connect()
def test_connect_invalid_camera_path():
config = OpenCVCameraConfig(index_or_path="nonexistent/camera.png")
camera = OpenCVCamera(config)
with pytest.raises(ConnectionError):
camera.connect(warmup=False)
def test_invalid_width_connect():
config = OpenCVCameraConfig(
index_or_path=DEFAULT_PNG_FILE_PATH,
width=99999, # Invalid width to trigger error
height=480,
)
camera = OpenCVCamera(config)
with pytest.raises(RuntimeError):
camera.connect(warmup=False)
@pytest.mark.parametrize("index_or_path", TEST_IMAGE_PATHS, ids=TEST_IMAGE_SIZES)
def test_read(index_or_path):
config = OpenCVCameraConfig(index_or_path=index_or_path, warmup_s=0)
with OpenCVCamera(config) as camera:
img = camera.read()
assert isinstance(img, np.ndarray)
def test_read_before_connect():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH)
camera = OpenCVCamera(config)
with pytest.raises(DeviceNotConnectedError):
_ = camera.read()
def test_disconnect():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH)
camera = OpenCVCamera(config)
camera.connect(warmup=False)
camera.disconnect()
assert not camera.is_connected
def test_disconnect_before_connect():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH)
camera = OpenCVCamera(config)
with pytest.raises(DeviceNotConnectedError):
_ = camera.disconnect()
@pytest.mark.parametrize("index_or_path", TEST_IMAGE_PATHS, ids=TEST_IMAGE_SIZES)
def test_async_read(index_or_path):
config = OpenCVCameraConfig(index_or_path=index_or_path, warmup_s=0)
with OpenCVCamera(config) as camera:
img = camera.async_read()
assert camera.thread is not None
assert camera.thread.is_alive()
assert isinstance(img, np.ndarray)
def test_async_read_timeout():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, warmup_s=0)
with OpenCVCamera(config) as camera, pytest.raises(TimeoutError):
camera.async_read(timeout_ms=0) # consumes any available frame by then
camera.async_read(timeout_ms=0) # request immediately another one
def test_async_read_before_connect():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH)
camera = OpenCVCamera(config)
with pytest.raises(DeviceNotConnectedError):
_ = camera.async_read()
def test_read_latest():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, warmup_s=0)
with OpenCVCamera(config) as camera:
# ensure at least one fresh frame is captured
frame = camera.read()
latest = camera.read_latest()
assert isinstance(latest, np.ndarray)
assert latest.shape == frame.shape
def test_read_latest_before_connect():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH)
camera = OpenCVCamera(config)
with pytest.raises(DeviceNotConnectedError):
_ = camera.read_latest()
def test_read_latest_high_frequency():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, warmup_s=0)
with OpenCVCamera(config) as camera:
# prime to ensure frames are available
ref = camera.read()
for _ in range(20):
latest = camera.read_latest()
assert isinstance(latest, np.ndarray)
assert latest.shape == ref.shape
def test_read_latest_too_old():
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, warmup_s=0)
with OpenCVCamera(config) as camera:
# prime to ensure frames are available
_ = camera.read()
with pytest.raises(TimeoutError):
_ = camera.read_latest(max_age_ms=0) # immediately too old
def test_fourcc_configuration():
"""Test FourCC configuration validation and application."""
# Test MJPG specifically (main use case)
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, fourcc="MJPG")
camera = OpenCVCamera(config)
assert camera.config.fourcc == "MJPG"
# Test a few other common formats
valid_fourcc_codes = ["YUYV", "YUY2", "RGB3"]
for fourcc in valid_fourcc_codes:
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, fourcc=fourcc)
camera = OpenCVCamera(config)
assert camera.config.fourcc == fourcc
# Test invalid FOURCC codes
invalid_fourcc_codes = ["ABC", "ABCDE", ""]
for fourcc in invalid_fourcc_codes:
with pytest.raises(ValueError):
OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, fourcc=fourcc)
def test_fourcc_with_camera():
"""Test FourCC functionality with actual camera connection."""
config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH, fourcc="MJPG", warmup_s=0)
# Connect should work with MJPG specified
with OpenCVCamera(config) as camera:
assert camera.is_connected
# Read should work normally
img = camera.read()
assert isinstance(img, np.ndarray)
@pytest.mark.parametrize("index_or_path", TEST_IMAGE_PATHS, ids=TEST_IMAGE_SIZES)
@pytest.mark.parametrize(
"rotation",
[
Cv2Rotation.NO_ROTATION,
Cv2Rotation.ROTATE_90,
Cv2Rotation.ROTATE_180,
Cv2Rotation.ROTATE_270,
],
ids=["no_rot", "rot90", "rot180", "rot270"],
)
def test_rotation(rotation, index_or_path):
filename = Path(index_or_path).name
dimensions = filename.split("_")[-1].split(".")[0] # Assumes filenames format (_wxh.png)
original_width, original_height = map(int, dimensions.split("x"))
config = OpenCVCameraConfig(index_or_path=index_or_path, rotation=rotation, warmup_s=0)
with OpenCVCamera(config) as camera:
img = camera.read()
assert isinstance(img, np.ndarray)
if rotation in (Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_270):
assert camera.width == original_height
assert camera.height == original_width
assert img.shape[:2] == (original_width, original_height)
else:
assert camera.width == original_width
assert camera.height == original_height
assert img.shape[:2] == (original_height, original_width)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/cameras/test_opencv.py",
"license": "Apache License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/cameras/test_realsense.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Example of running a specific test:
# ```bash
# pytest tests/cameras/test_opencv.py::test_connect
# ```
from pathlib import Path
from unittest.mock import patch
import numpy as np
import pytest
from lerobot.cameras.configs import Cv2Rotation
from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError
pytest.importorskip("pyrealsense2")
from lerobot.cameras.realsense import RealSenseCamera, RealSenseCameraConfig
TEST_ARTIFACTS_DIR = Path(__file__).parent.parent / "artifacts" / "cameras"
BAG_FILE_PATH = TEST_ARTIFACTS_DIR / "test_rs.bag"
# NOTE(Steven): For some reason these tests take ~20sec in macOS but only ~2sec in Linux.
def mock_rs_config_enable_device_from_file(rs_config_instance, _sn):
return rs_config_instance.enable_device_from_file(str(BAG_FILE_PATH), repeat_playback=True)
def mock_rs_config_enable_device_bad_file(rs_config_instance, _sn):
return rs_config_instance.enable_device_from_file("non_existent_file.bag", repeat_playback=True)
@pytest.fixture(name="patch_realsense", autouse=True)
def fixture_patch_realsense():
"""Automatically mock pyrealsense2.config.enable_device for all tests."""
with patch(
"pyrealsense2.config.enable_device", side_effect=mock_rs_config_enable_device_from_file
) as mock:
yield mock
def test_abc_implementation():
"""Instantiation should raise an error if the class doesn't implement abstract methods/properties."""
config = RealSenseCameraConfig(serial_number_or_name="042")
_ = RealSenseCamera(config)
def test_connect():
config = RealSenseCameraConfig(serial_number_or_name="042", warmup_s=0)
with RealSenseCamera(config) as camera:
assert camera.is_connected
def test_connect_already_connected():
config = RealSenseCameraConfig(serial_number_or_name="042", warmup_s=0)
with RealSenseCamera(config) as camera, pytest.raises(DeviceAlreadyConnectedError):
camera.connect(warmup=False)
def test_connect_invalid_camera_path(patch_realsense):
patch_realsense.side_effect = mock_rs_config_enable_device_bad_file
config = RealSenseCameraConfig(serial_number_or_name="042")
camera = RealSenseCamera(config)
with pytest.raises(ConnectionError):
camera.connect(warmup=False)
def test_invalid_width_connect():
config = RealSenseCameraConfig(serial_number_or_name="042", width=99999, height=480, fps=30)
camera = RealSenseCamera(config)
with pytest.raises(ConnectionError):
camera.connect(warmup=False)
def test_read():
config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30, warmup_s=0)
with RealSenseCamera(config) as camera:
img = camera.read()
assert isinstance(img, np.ndarray)
# TODO(Steven): Fix this test for the latest version of pyrealsense2.
@pytest.mark.skip("Skipping test: pyrealsense2 version > 2.55.1.6486")
def test_read_depth():
config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30, use_depth=True)
camera = RealSenseCamera(config)
camera.connect(warmup=False)
img = camera.read_depth(timeout_ms=2000) # NOTE(Steven): Reading depth takes longer in CI environments.
assert isinstance(img, np.ndarray)
def test_read_before_connect():
config = RealSenseCameraConfig(serial_number_or_name="042")
camera = RealSenseCamera(config)
with pytest.raises(DeviceNotConnectedError):
_ = camera.read()
def test_disconnect():
config = RealSenseCameraConfig(serial_number_or_name="042")
camera = RealSenseCamera(config)
camera.connect(warmup=False)
camera.disconnect()
assert not camera.is_connected
def test_disconnect_before_connect():
config = RealSenseCameraConfig(serial_number_or_name="042")
camera = RealSenseCamera(config)
with pytest.raises(DeviceNotConnectedError):
camera.disconnect()
def test_async_read():
config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30, warmup_s=0)
with RealSenseCamera(config) as camera:
img = camera.async_read()
assert camera.thread is not None
assert camera.thread.is_alive()
assert isinstance(img, np.ndarray)
def test_async_read_timeout():
config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30, warmup_s=0)
with RealSenseCamera(config) as camera, pytest.raises(TimeoutError):
camera.async_read(timeout_ms=0) # consumes any available frame by then
camera.async_read(timeout_ms=0) # request immediately another one
def test_async_read_before_connect():
config = RealSenseCameraConfig(serial_number_or_name="042")
camera = RealSenseCamera(config)
with pytest.raises(DeviceNotConnectedError):
_ = camera.async_read()
def test_read_latest():
config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30, warmup_s=0)
with RealSenseCamera(config) as camera:
img = camera.read()
latest = camera.read_latest()
assert isinstance(latest, np.ndarray)
assert latest.shape == img.shape
def test_read_latest_high_frequency():
config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30, warmup_s=0)
with RealSenseCamera(config) as camera:
# prime with one read to ensure frames are available
ref = camera.read()
for _ in range(20):
latest = camera.read_latest()
assert isinstance(latest, np.ndarray)
assert latest.shape == ref.shape
def test_read_latest_before_connect():
config = RealSenseCameraConfig(serial_number_or_name="042")
camera = RealSenseCamera(config)
with pytest.raises(DeviceNotConnectedError):
_ = camera.read_latest()
def test_read_latest_too_old():
config = RealSenseCameraConfig(serial_number_or_name="042")
with RealSenseCamera(config) as camera:
# prime to ensure frames are available
_ = camera.read()
with pytest.raises(TimeoutError):
_ = camera.read_latest(max_age_ms=0) # immediately too old
@pytest.mark.parametrize(
"rotation",
[
Cv2Rotation.NO_ROTATION,
Cv2Rotation.ROTATE_90,
Cv2Rotation.ROTATE_180,
Cv2Rotation.ROTATE_270,
],
ids=["no_rot", "rot90", "rot180", "rot270"],
)
def test_rotation(rotation):
config = RealSenseCameraConfig(serial_number_or_name="042", rotation=rotation, warmup_s=0)
with RealSenseCamera(config) as camera:
img = camera.read()
assert isinstance(img, np.ndarray)
if rotation in (Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_270):
assert camera.width == 480
assert camera.height == 640
assert img.shape[:2] == (640, 480)
else:
assert camera.width == 640
assert camera.height == 480
assert img.shape[:2] == (480, 640)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/cameras/test_realsense.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/mocks/mock_dynamixel.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from collections.abc import Callable
import dynamixel_sdk as dxl
import serial
from mock_serial.mock_serial import MockSerial
from lerobot.motors.dynamixel.dynamixel import _split_into_byte_chunks
from .mock_serial_patch import WaitableStub
# https://emanual.robotis.com/docs/en/dxl/crc/
DXL_CRC_TABLE = [
0x0000, 0x8005, 0x800F, 0x000A, 0x801B, 0x001E, 0x0014, 0x8011,
0x8033, 0x0036, 0x003C, 0x8039, 0x0028, 0x802D, 0x8027, 0x0022,
0x8063, 0x0066, 0x006C, 0x8069, 0x0078, 0x807D, 0x8077, 0x0072,
0x0050, 0x8055, 0x805F, 0x005A, 0x804B, 0x004E, 0x0044, 0x8041,
0x80C3, 0x00C6, 0x00CC, 0x80C9, 0x00D8, 0x80DD, 0x80D7, 0x00D2,
0x00F0, 0x80F5, 0x80FF, 0x00FA, 0x80EB, 0x00EE, 0x00E4, 0x80E1,
0x00A0, 0x80A5, 0x80AF, 0x00AA, 0x80BB, 0x00BE, 0x00B4, 0x80B1,
0x8093, 0x0096, 0x009C, 0x8099, 0x0088, 0x808D, 0x8087, 0x0082,
0x8183, 0x0186, 0x018C, 0x8189, 0x0198, 0x819D, 0x8197, 0x0192,
0x01B0, 0x81B5, 0x81BF, 0x01BA, 0x81AB, 0x01AE, 0x01A4, 0x81A1,
0x01E0, 0x81E5, 0x81EF, 0x01EA, 0x81FB, 0x01FE, 0x01F4, 0x81F1,
0x81D3, 0x01D6, 0x01DC, 0x81D9, 0x01C8, 0x81CD, 0x81C7, 0x01C2,
0x0140, 0x8145, 0x814F, 0x014A, 0x815B, 0x015E, 0x0154, 0x8151,
0x8173, 0x0176, 0x017C, 0x8179, 0x0168, 0x816D, 0x8167, 0x0162,
0x8123, 0x0126, 0x012C, 0x8129, 0x0138, 0x813D, 0x8137, 0x0132,
0x0110, 0x8115, 0x811F, 0x011A, 0x810B, 0x010E, 0x0104, 0x8101,
0x8303, 0x0306, 0x030C, 0x8309, 0x0318, 0x831D, 0x8317, 0x0312,
0x0330, 0x8335, 0x833F, 0x033A, 0x832B, 0x032E, 0x0324, 0x8321,
0x0360, 0x8365, 0x836F, 0x036A, 0x837B, 0x037E, 0x0374, 0x8371,
0x8353, 0x0356, 0x035C, 0x8359, 0x0348, 0x834D, 0x8347, 0x0342,
0x03C0, 0x83C5, 0x83CF, 0x03CA, 0x83DB, 0x03DE, 0x03D4, 0x83D1,
0x83F3, 0x03F6, 0x03FC, 0x83F9, 0x03E8, 0x83ED, 0x83E7, 0x03E2,
0x83A3, 0x03A6, 0x03AC, 0x83A9, 0x03B8, 0x83BD, 0x83B7, 0x03B2,
0x0390, 0x8395, 0x839F, 0x039A, 0x838B, 0x038E, 0x0384, 0x8381,
0x0280, 0x8285, 0x828F, 0x028A, 0x829B, 0x029E, 0x0294, 0x8291,
0x82B3, 0x02B6, 0x02BC, 0x82B9, 0x02A8, 0x82AD, 0x82A7, 0x02A2,
0x82E3, 0x02E6, 0x02EC, 0x82E9, 0x02F8, 0x82FD, 0x82F7, 0x02F2,
0x02D0, 0x82D5, 0x82DF, 0x02DA, 0x82CB, 0x02CE, 0x02C4, 0x82C1,
0x8243, 0x0246, 0x024C, 0x8249, 0x0258, 0x825D, 0x8257, 0x0252,
0x0270, 0x8275, 0x827F, 0x027A, 0x826B, 0x026E, 0x0264, 0x8261,
0x0220, 0x8225, 0x822F, 0x022A, 0x823B, 0x023E, 0x0234, 0x8231,
0x8213, 0x0216, 0x021C, 0x8219, 0x0208, 0x820D, 0x8207, 0x0202
] # fmt: skip
class MockDynamixelPacketv2(abc.ABC):
@classmethod
def build(cls, dxl_id: int, params: list[int], length: int, *args, **kwargs) -> bytes:
packet = cls._build(dxl_id, params, length, *args, **kwargs)
packet = cls._add_stuffing(packet)
packet = cls._add_crc(packet)
return bytes(packet)
@abc.abstractclassmethod
def _build(cls, dxl_id: int, params: list[int], length: int, *args, **kwargs) -> list[int]:
pass
@staticmethod
def _add_stuffing(packet: list[int]) -> list[int]:
"""
Byte stuffing is a method of adding additional data to generated instruction packets to ensure that
the packets are processed successfully. When the byte pattern "0xFF 0xFF 0xFD" appears in a packet,
byte stuffing adds 0xFD to the end of the pattern to convert it to “0xFF 0xFF 0xFD 0xFD” to ensure
that it is not interpreted as the header at the start of another packet.
Source: https://emanual.robotis.com/docs/en/dxl/protocol2/#transmission-process
Args:
packet (list[int]): The raw packet without stuffing.
Returns:
list[int]: The packet stuffed if it contained a "0xFF 0xFF 0xFD" byte sequence in its data bytes.
"""
packet_length_in = dxl.DXL_MAKEWORD(packet[dxl.PKT_LENGTH_L], packet[dxl.PKT_LENGTH_H])
packet_length_out = packet_length_in
temp = [0] * dxl.TXPACKET_MAX_LEN
# FF FF FD XX ID LEN_L LEN_H
temp[dxl.PKT_HEADER0 : dxl.PKT_HEADER0 + dxl.PKT_LENGTH_H + 1] = packet[
dxl.PKT_HEADER0 : dxl.PKT_HEADER0 + dxl.PKT_LENGTH_H + 1
]
index = dxl.PKT_INSTRUCTION
for i in range(0, packet_length_in - 2): # except CRC
temp[index] = packet[i + dxl.PKT_INSTRUCTION]
index = index + 1
if (
packet[i + dxl.PKT_INSTRUCTION] == 0xFD
and packet[i + dxl.PKT_INSTRUCTION - 1] == 0xFF
and packet[i + dxl.PKT_INSTRUCTION - 2] == 0xFF
):
# FF FF FD
temp[index] = 0xFD
index = index + 1
packet_length_out = packet_length_out + 1
temp[index] = packet[dxl.PKT_INSTRUCTION + packet_length_in - 2]
temp[index + 1] = packet[dxl.PKT_INSTRUCTION + packet_length_in - 1]
index = index + 2
if packet_length_in != packet_length_out:
packet = [0] * index
packet[0:index] = temp[0:index]
packet[dxl.PKT_LENGTH_L] = dxl.DXL_LOBYTE(packet_length_out)
packet[dxl.PKT_LENGTH_H] = dxl.DXL_HIBYTE(packet_length_out)
return packet
@staticmethod
def _add_crc(packet: list[int]) -> list[int]:
"""Computes and add CRC to the packet.
https://emanual.robotis.com/docs/en/dxl/crc/
https://en.wikipedia.org/wiki/Cyclic_redundancy_check
Args:
packet (list[int]): The raw packet without CRC (but with placeholders for it).
Returns:
list[int]: The raw packet with a valid CRC.
"""
crc = 0
for j in range(len(packet) - 2):
i = ((crc >> 8) ^ packet[j]) & 0xFF
crc = ((crc << 8) ^ DXL_CRC_TABLE[i]) & 0xFFFF
packet[-2] = dxl.DXL_LOBYTE(crc)
packet[-1] = dxl.DXL_HIBYTE(crc)
return packet
class MockInstructionPacket(MockDynamixelPacketv2):
"""
Helper class to build valid Dynamixel Protocol 2.0 Instruction Packets.
Protocol 2.0 Instruction Packet structure
https://emanual.robotis.com/docs/en/dxl/protocol2/#instruction-packet
| Header | Packet ID | Length | Instruction | Params | CRC |
| ------------------- | --------- | ----------- | ----------- | ----------------- | ----------- |
| 0xFF 0xFF 0xFD 0x00 | ID | Len_L Len_H | Instr | Param 1 … Param N | CRC_L CRC_H |
"""
@classmethod
def _build(cls, dxl_id: int, params: list[int], length: int, instruction: int) -> list[int]:
length = len(params) + 3
return [
0xFF, 0xFF, 0xFD, 0x00, # header
dxl_id, # servo id
dxl.DXL_LOBYTE(length), # length_l
dxl.DXL_HIBYTE(length), # length_h
instruction, # instruction type
*params, # data bytes
0x00, 0x00 # placeholder for CRC
] # fmt: skip
@classmethod
def ping(
cls,
dxl_id: int,
) -> bytes:
"""
Builds a "Ping" broadcast instruction.
https://emanual.robotis.com/docs/en/dxl/protocol2/#ping-0x01
No parameters required.
"""
return cls.build(dxl_id=dxl_id, params=[], length=3, instruction=dxl.INST_PING)
@classmethod
def read(
cls,
dxl_id: int,
start_address: int,
data_length: int,
) -> bytes:
"""
Builds a "Read" instruction.
https://emanual.robotis.com/docs/en/dxl/protocol2/#read-0x02
The parameters for Read (Protocol 2.0) are:
param[0] = start_address L
param[1] = start_address H
param[2] = data_length L
param[3] = data_length H
And 'length' = data_length + 5, where:
+1 is for instruction byte,
+2 is for the length bytes,
+2 is for the CRC at the end.
"""
params = [
dxl.DXL_LOBYTE(start_address),
dxl.DXL_HIBYTE(start_address),
dxl.DXL_LOBYTE(data_length),
dxl.DXL_HIBYTE(data_length),
]
length = len(params) + 3
# length = data_length + 5
return cls.build(dxl_id=dxl_id, params=params, length=length, instruction=dxl.INST_READ)
@classmethod
def write(
cls,
dxl_id: int,
value: int,
start_address: int,
data_length: int,
) -> bytes:
"""
Builds a "Write" instruction.
https://emanual.robotis.com/docs/en/dxl/protocol2/#write-0x03
The parameters for Write (Protocol 2.0) are:
param[0] = start_address L
param[1] = start_address H
param[2] = 1st Byte
param[3] = 2nd Byte
...
param[1+X] = X-th Byte
And 'length' = data_length + 5, where:
+1 is for instruction byte,
+2 is for the length bytes,
+2 is for the CRC at the end.
"""
data = _split_into_byte_chunks(value, data_length)
params = [
dxl.DXL_LOBYTE(start_address),
dxl.DXL_HIBYTE(start_address),
*data,
]
length = data_length + 5
return cls.build(dxl_id=dxl_id, params=params, length=length, instruction=dxl.INST_WRITE)
@classmethod
def sync_read(
cls,
dxl_ids: list[int],
start_address: int,
data_length: int,
) -> bytes:
"""
Builds a "Sync_Read" broadcast instruction.
https://emanual.robotis.com/docs/en/dxl/protocol2/#sync-read-0x82
The parameters for Sync_Read (Protocol 2.0) are:
param[0] = start_address L
param[1] = start_address H
param[2] = data_length L
param[3] = data_length H
param[4+] = motor IDs to read from
And 'length' = (number_of_params + 7), where:
+1 is for instruction byte,
+2 is for the address bytes,
+2 is for the length bytes,
+2 is for the CRC at the end.
"""
params = [
dxl.DXL_LOBYTE(start_address),
dxl.DXL_HIBYTE(start_address),
dxl.DXL_LOBYTE(data_length),
dxl.DXL_HIBYTE(data_length),
*dxl_ids,
]
length = len(dxl_ids) + 7
return cls.build(
dxl_id=dxl.BROADCAST_ID, params=params, length=length, instruction=dxl.INST_SYNC_READ
)
@classmethod
def sync_write(
cls,
ids_values: dict[int, int],
start_address: int,
data_length: int,
) -> bytes:
"""
Builds a "Sync_Write" broadcast instruction.
https://emanual.robotis.com/docs/en/dxl/protocol2/#sync-write-0x83
The parameters for Sync_Write (Protocol 2.0) are:
param[0] = start_address L
param[1] = start_address H
param[2] = data_length L
param[3] = data_length H
param[5] = [1st motor] ID
param[5+1] = [1st motor] 1st Byte
param[5+2] = [1st motor] 2nd Byte
...
param[5+X] = [1st motor] X-th Byte
param[6] = [2nd motor] ID
param[6+1] = [2nd motor] 1st Byte
param[6+2] = [2nd motor] 2nd Byte
...
param[6+X] = [2nd motor] X-th Byte
And 'length' = ((number_of_params * 1 + data_length) + 7), where:
+1 is for instruction byte,
+2 is for the address bytes,
+2 is for the length bytes,
+2 is for the CRC at the end.
"""
data = []
for id_, value in ids_values.items():
split_value = _split_into_byte_chunks(value, data_length)
data += [id_, *split_value]
params = [
dxl.DXL_LOBYTE(start_address),
dxl.DXL_HIBYTE(start_address),
dxl.DXL_LOBYTE(data_length),
dxl.DXL_HIBYTE(data_length),
*data,
]
length = len(ids_values) * (1 + data_length) + 7
return cls.build(
dxl_id=dxl.BROADCAST_ID, params=params, length=length, instruction=dxl.INST_SYNC_WRITE
)
class MockStatusPacket(MockDynamixelPacketv2):
"""
Helper class to build valid Dynamixel Protocol 2.0 Status Packets.
Protocol 2.0 Status Packet structure
https://emanual.robotis.com/docs/en/dxl/protocol2/#status-packet
| Header | Packet ID | Length | Instruction | Error | Params | CRC |
| ------------------- | --------- | ----------- | ----------- | ----- | ----------------- | ----------- |
| 0xFF 0xFF 0xFD 0x00 | ID | Len_L Len_H | 0x55 | Err | Param 1 … Param N | CRC_L CRC_H |
"""
@classmethod
def _build(cls, dxl_id: int, params: list[int], length: int, error: int = 0) -> list[int]:
return [
0xFF, 0xFF, 0xFD, 0x00, # header
dxl_id, # servo id
dxl.DXL_LOBYTE(length), # length_l
dxl.DXL_HIBYTE(length), # length_h
0x55, # instruction = 'status'
error, # error
*params, # data bytes
0x00, 0x00 # placeholder for CRC
] # fmt: skip
@classmethod
def ping(cls, dxl_id: int, model_nb: int = 1190, firm_ver: int = 50, error: int = 0) -> bytes:
"""
Builds a 'Ping' status packet.
https://emanual.robotis.com/docs/en/dxl/protocol2/#ping-0x01
Args:
dxl_id (int): ID of the servo responding.
model_nb (int, optional): Desired 'model number' to be returned in the packet. Defaults to 1190
which corresponds to a XL330-M077-T.
firm_ver (int, optional): Desired 'firmware version' to be returned in the packet.
Defaults to 50.
Returns:
bytes: The raw 'Ping' status packet ready to be sent through serial.
"""
params = [dxl.DXL_LOBYTE(model_nb), dxl.DXL_HIBYTE(model_nb), firm_ver]
length = 7
return cls.build(dxl_id, params=params, length=length, error=error)
@classmethod
def read(cls, dxl_id: int, value: int, param_length: int, error: int = 0) -> bytes:
"""
Builds a 'Read' status packet (also works for 'Sync Read')
https://emanual.robotis.com/docs/en/dxl/protocol2/#read-0x02
https://emanual.robotis.com/docs/en/dxl/protocol2/#sync-read-0x82
Args:
dxl_id (int): ID of the servo responding.
value (int): Desired value to be returned in the packet.
param_length (int): The address length as reported in the control table.
Returns:
bytes: The raw 'Present_Position' status packet ready to be sent through serial.
"""
params = _split_into_byte_chunks(value, param_length)
length = param_length + 4
return cls.build(dxl_id, params=params, length=length, error=error)
class MockPortHandler(dxl.PortHandler):
"""
This class overwrite the 'setupPort' method of the Dynamixel PortHandler because it can specify
baudrates that are not supported with a serial port on MacOS.
"""
def setupPort(self, cflag_baud): # noqa: N802
if self.is_open:
self.closePort()
self.ser = serial.Serial(
port=self.port_name,
# baudrate=self.baudrate, <- This will fail on MacOS
# parity = serial.PARITY_ODD,
# stopbits = serial.STOPBITS_TWO,
bytesize=serial.EIGHTBITS,
timeout=0,
)
self.is_open = True
self.ser.reset_input_buffer()
self.tx_time_per_byte = (1000.0 / self.baudrate) * 10.0
return True
class MockMotors(MockSerial):
"""
This class will simulate physical motors by responding with valid status packets upon receiving some
instruction packets. It is meant to test MotorsBus classes.
"""
def __init__(self):
super().__init__()
@property
def stubs(self) -> dict[str, WaitableStub]:
return super().stubs
def stub(self, *, name=None, **kwargs):
new_stub = WaitableStub(**kwargs)
self._MockSerial__stubs[name or new_stub.receive_bytes] = new_stub
return new_stub
def build_broadcast_ping_stub(
self, ids_models: dict[int, list[int]] | None = None, num_invalid_try: int = 0
) -> str:
ping_request = MockInstructionPacket.ping(dxl.BROADCAST_ID)
return_packets = b"".join(MockStatusPacket.ping(id_, model) for id_, model in ids_models.items())
ping_response = self._build_send_fn(return_packets, num_invalid_try)
stub_name = "Ping_" + "_".join([str(id_) for id_ in ids_models])
self.stub(
name=stub_name,
receive_bytes=ping_request,
send_fn=ping_response,
)
return stub_name
def build_ping_stub(
self, dxl_id: int, model_nb: int, firm_ver: int = 50, num_invalid_try: int = 0, error: int = 0
) -> str:
ping_request = MockInstructionPacket.ping(dxl_id)
return_packet = MockStatusPacket.ping(dxl_id, model_nb, firm_ver, error)
ping_response = self._build_send_fn(return_packet, num_invalid_try)
stub_name = f"Ping_{dxl_id}"
self.stub(
name=stub_name,
receive_bytes=ping_request,
send_fn=ping_response,
)
return stub_name
def build_read_stub(
self,
address: int,
length: int,
dxl_id: int,
value: int,
reply: bool = True,
error: int = 0,
num_invalid_try: int = 0,
) -> str:
read_request = MockInstructionPacket.read(dxl_id, address, length)
return_packet = MockStatusPacket.read(dxl_id, value, length, error) if reply else b""
read_response = self._build_send_fn(return_packet, num_invalid_try)
stub_name = f"Read_{address}_{length}_{dxl_id}_{value}_{error}"
self.stub(
name=stub_name,
receive_bytes=read_request,
send_fn=read_response,
)
return stub_name
def build_write_stub(
self,
address: int,
length: int,
dxl_id: int,
value: int,
reply: bool = True,
error: int = 0,
num_invalid_try: int = 0,
) -> str:
sync_read_request = MockInstructionPacket.write(dxl_id, value, address, length)
return_packet = MockStatusPacket.build(dxl_id, params=[], length=4, error=error) if reply else b""
stub_name = f"Write_{address}_{length}_{dxl_id}"
self.stub(
name=stub_name,
receive_bytes=sync_read_request,
send_fn=self._build_send_fn(return_packet, num_invalid_try),
)
return stub_name
def build_sync_read_stub(
self,
address: int,
length: int,
ids_values: dict[int, int],
reply: bool = True,
num_invalid_try: int = 0,
) -> str:
sync_read_request = MockInstructionPacket.sync_read(list(ids_values), address, length)
return_packets = (
b"".join(MockStatusPacket.read(id_, pos, length) for id_, pos in ids_values.items())
if reply
else b""
)
sync_read_response = self._build_send_fn(return_packets, num_invalid_try)
stub_name = f"Sync_Read_{address}_{length}_" + "_".join([str(id_) for id_ in ids_values])
self.stub(
name=stub_name,
receive_bytes=sync_read_request,
send_fn=sync_read_response,
)
return stub_name
def build_sequential_sync_read_stub(
self, address: int, length: int, ids_values: dict[int, list[int]] | None = None
) -> str:
sequence_length = len(next(iter(ids_values.values())))
assert all(len(positions) == sequence_length for positions in ids_values.values())
sync_read_request = MockInstructionPacket.sync_read(list(ids_values), address, length)
sequential_packets = []
for count in range(sequence_length):
return_packets = b"".join(
MockStatusPacket.read(id_, positions[count], length) for id_, positions in ids_values.items()
)
sequential_packets.append(return_packets)
sync_read_response = self._build_sequential_send_fn(sequential_packets)
stub_name = f"Seq_Sync_Read_{address}_{length}_" + "_".join([str(id_) for id_ in ids_values])
self.stub(
name=stub_name,
receive_bytes=sync_read_request,
send_fn=sync_read_response,
)
return stub_name
def build_sync_write_stub(
self, address: int, length: int, ids_values: dict[int, int], num_invalid_try: int = 0
) -> str:
sync_read_request = MockInstructionPacket.sync_write(ids_values, address, length)
stub_name = f"Sync_Write_{address}_{length}_" + "_".join([str(id_) for id_ in ids_values])
self.stub(
name=stub_name,
receive_bytes=sync_read_request,
send_fn=self._build_send_fn(b"", num_invalid_try),
)
return stub_name
@staticmethod
def _build_send_fn(packet: bytes, num_invalid_try: int = 0) -> Callable[[int], bytes]:
def send_fn(_call_count: int) -> bytes:
if num_invalid_try >= _call_count:
return b""
return packet
return send_fn
@staticmethod
def _build_sequential_send_fn(packets: list[bytes]) -> Callable[[int], bytes]:
def send_fn(_call_count: int) -> bytes:
return packets[_call_count - 1]
return send_fn
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/mocks/mock_dynamixel.py",
"license": "Apache License 2.0",
"lines": 514,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/mocks/mock_feetech.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from collections.abc import Callable
import scservo_sdk as scs
import serial
from mock_serial import MockSerial
from lerobot.motors.feetech.feetech import _split_into_byte_chunks, patch_setPacketTimeout
from .mock_serial_patch import WaitableStub
class MockFeetechPacket(abc.ABC):
@classmethod
def build(cls, scs_id: int, params: list[int], length: int, *args, **kwargs) -> bytes:
packet = cls._build(scs_id, params, length, *args, **kwargs)
packet = cls._add_checksum(packet)
return bytes(packet)
@abc.abstractclassmethod
def _build(cls, scs_id: int, params: list[int], length: int, *args, **kwargs) -> list[int]:
pass
@staticmethod
def _add_checksum(packet: list[int]) -> list[int]:
checksum = 0
for id_ in range(2, len(packet) - 1): # except header & checksum
checksum += packet[id_]
packet[-1] = ~checksum & 0xFF
return packet
class MockInstructionPacket(MockFeetechPacket):
"""
Helper class to build valid Feetech Instruction Packets.
Instruction Packet structure
(from https://files.waveshare.com/upload/2/27/Communication_Protocol_User_Manual-EN%28191218-0923%29.pdf)
| Header | Packet ID | Length | Instruction | Params | Checksum |
| --------- | --------- | ------ | ----------- | ----------------- | -------- |
| 0xFF 0xFF | ID | Len | Instr | Param 1 … Param N | Sum |
"""
@classmethod
def _build(cls, scs_id: int, params: list[int], length: int, instruction: int) -> list[int]:
return [
0xFF, 0xFF, # header
scs_id, # servo id
length, # length
instruction, # instruction type
*params, # data bytes
0x00, # placeholder for checksum
] # fmt: skip
@classmethod
def ping(
cls,
scs_id: int,
) -> bytes:
"""
Builds a "Ping" broadcast instruction.
No parameters required.
"""
return cls.build(scs_id=scs_id, params=[], length=2, instruction=scs.INST_PING)
@classmethod
def read(
cls,
scs_id: int,
start_address: int,
data_length: int,
) -> bytes:
"""
Builds a "Read" instruction.
The parameters for Read are:
param[0] = start_address
param[1] = data_length
And 'length' = 4, where:
+1 is for instruction byte,
+1 is for the address byte,
+1 is for the length bytes,
+1 is for the checksum at the end.
"""
params = [start_address, data_length]
length = 4
return cls.build(scs_id=scs_id, params=params, length=length, instruction=scs.INST_READ)
@classmethod
def write(
cls,
scs_id: int,
value: int,
start_address: int,
data_length: int,
) -> bytes:
"""
Builds a "Write" instruction.
The parameters for Write are:
param[0] = start_address L
param[1] = start_address H
param[2] = 1st Byte
param[3] = 2nd Byte
...
param[1+X] = X-th Byte
And 'length' = data_length + 3, where:
+1 is for instruction byte,
+1 is for the length bytes,
+1 is for the checksum at the end.
"""
data = _split_into_byte_chunks(value, data_length)
params = [start_address, *data]
length = data_length + 3
return cls.build(scs_id=scs_id, params=params, length=length, instruction=scs.INST_WRITE)
@classmethod
def sync_read(
cls,
scs_ids: list[int],
start_address: int,
data_length: int,
) -> bytes:
"""
Builds a "Sync_Read" broadcast instruction.
The parameters for Sync Read are:
param[0] = start_address
param[1] = data_length
param[2+] = motor IDs to read from
And 'length' = (number_of_params + 4), where:
+1 is for instruction byte,
+1 is for the address byte,
+1 is for the length bytes,
+1 is for the checksum at the end.
"""
params = [start_address, data_length, *scs_ids]
length = len(scs_ids) + 4
return cls.build(
scs_id=scs.BROADCAST_ID, params=params, length=length, instruction=scs.INST_SYNC_READ
)
@classmethod
def sync_write(
cls,
ids_values: dict[int, int],
start_address: int,
data_length: int,
) -> bytes:
"""
Builds a "Sync_Write" broadcast instruction.
The parameters for Sync_Write are:
param[0] = start_address
param[1] = data_length
param[2] = [1st motor] ID
param[2+1] = [1st motor] 1st Byte
param[2+2] = [1st motor] 2nd Byte
...
param[5+X] = [1st motor] X-th Byte
param[6] = [2nd motor] ID
param[6+1] = [2nd motor] 1st Byte
param[6+2] = [2nd motor] 2nd Byte
...
param[6+X] = [2nd motor] X-th Byte
And 'length' = ((number_of_params * 1 + data_length) + 4), where:
+1 is for instruction byte,
+1 is for the address byte,
+1 is for the length bytes,
+1 is for the checksum at the end.
"""
data = []
for id_, value in ids_values.items():
split_value = _split_into_byte_chunks(value, data_length)
data += [id_, *split_value]
params = [start_address, data_length, *data]
length = len(ids_values) * (1 + data_length) + 4
return cls.build(
scs_id=scs.BROADCAST_ID, params=params, length=length, instruction=scs.INST_SYNC_WRITE
)
class MockStatusPacket(MockFeetechPacket):
"""
Helper class to build valid Feetech Status Packets.
Status Packet structure
(from https://files.waveshare.com/upload/2/27/Communication_Protocol_User_Manual-EN%28191218-0923%29.pdf)
| Header | Packet ID | Length | Error | Params | Checksum |
| --------- | --------- | ------ | ----- | ----------------- | -------- |
| 0xFF 0xFF | ID | Len | Err | Param 1 … Param N | Sum |
"""
@classmethod
def _build(cls, scs_id: int, params: list[int], length: int, error: int = 0) -> list[int]:
return [
0xFF, 0xFF, # header
scs_id, # servo id
length, # length
error, # status
*params, # data bytes
0x00, # placeholder for checksum
] # fmt: skip
@classmethod
def ping(cls, scs_id: int, error: int = 0) -> bytes:
"""Builds a 'Ping' status packet.
Args:
scs_id (int): ID of the servo responding.
error (int, optional): Error to be returned. Defaults to 0 (success).
Returns:
bytes: The raw 'Ping' status packet ready to be sent through serial.
"""
return cls.build(scs_id, params=[], length=2, error=error)
@classmethod
def read(cls, scs_id: int, value: int, param_length: int, error: int = 0) -> bytes:
"""Builds a 'Read' status packet.
Args:
scs_id (int): ID of the servo responding.
value (int): Desired value to be returned in the packet.
param_length (int): The address length as reported in the control table.
Returns:
bytes: The raw 'Sync Read' status packet ready to be sent through serial.
"""
params = _split_into_byte_chunks(value, param_length)
length = param_length + 2
return cls.build(scs_id, params=params, length=length, error=error)
class MockPortHandler(scs.PortHandler):
"""
This class overwrite the 'setupPort' method of the Feetech PortHandler because it can specify
baudrates that are not supported with a serial port on MacOS.
"""
def setupPort(self, cflag_baud): # noqa: N802
if self.is_open:
self.closePort()
self.ser = serial.Serial(
port=self.port_name,
# baudrate=self.baudrate, <- This will fail on MacOS
# parity = serial.PARITY_ODD,
# stopbits = serial.STOPBITS_TWO,
bytesize=serial.EIGHTBITS,
timeout=0,
)
self.is_open = True
self.ser.reset_input_buffer()
self.tx_time_per_byte = (1000.0 / self.baudrate) * 10.0
return True
def setPacketTimeout(self, packet_length): # noqa: N802
return patch_setPacketTimeout(self, packet_length)
class MockMotors(MockSerial):
"""
This class will simulate physical motors by responding with valid status packets upon receiving some
instruction packets. It is meant to test MotorsBus classes.
"""
def __init__(self):
super().__init__()
@property
def stubs(self) -> dict[str, WaitableStub]:
return super().stubs
def stub(self, *, name=None, **kwargs):
new_stub = WaitableStub(**kwargs)
self._MockSerial__stubs[name or new_stub.receive_bytes] = new_stub
return new_stub
def build_broadcast_ping_stub(self, ids: list[int] | None = None, num_invalid_try: int = 0) -> str:
ping_request = MockInstructionPacket.ping(scs.BROADCAST_ID)
return_packets = b"".join(MockStatusPacket.ping(id_) for id_ in ids)
ping_response = self._build_send_fn(return_packets, num_invalid_try)
stub_name = "Ping_" + "_".join([str(id_) for id_ in ids])
self.stub(
name=stub_name,
receive_bytes=ping_request,
send_fn=ping_response,
)
return stub_name
def build_ping_stub(self, scs_id: int, num_invalid_try: int = 0, error: int = 0) -> str:
ping_request = MockInstructionPacket.ping(scs_id)
return_packet = MockStatusPacket.ping(scs_id, error)
ping_response = self._build_send_fn(return_packet, num_invalid_try)
stub_name = f"Ping_{scs_id}_{error}"
self.stub(
name=stub_name,
receive_bytes=ping_request,
send_fn=ping_response,
)
return stub_name
def build_read_stub(
self,
address: int,
length: int,
scs_id: int,
value: int,
reply: bool = True,
error: int = 0,
num_invalid_try: int = 0,
) -> str:
read_request = MockInstructionPacket.read(scs_id, address, length)
return_packet = MockStatusPacket.read(scs_id, value, length, error) if reply else b""
read_response = self._build_send_fn(return_packet, num_invalid_try)
stub_name = f"Read_{address}_{length}_{scs_id}_{value}_{error}"
self.stub(
name=stub_name,
receive_bytes=read_request,
send_fn=read_response,
)
return stub_name
def build_write_stub(
self,
address: int,
length: int,
scs_id: int,
value: int,
reply: bool = True,
error: int = 0,
num_invalid_try: int = 0,
) -> str:
sync_read_request = MockInstructionPacket.write(scs_id, value, address, length)
return_packet = MockStatusPacket.build(scs_id, params=[], length=2, error=error) if reply else b""
stub_name = f"Write_{address}_{length}_{scs_id}"
self.stub(
name=stub_name,
receive_bytes=sync_read_request,
send_fn=self._build_send_fn(return_packet, num_invalid_try),
)
return stub_name
def build_sync_read_stub(
self,
address: int,
length: int,
ids_values: dict[int, int],
reply: bool = True,
num_invalid_try: int = 0,
) -> str:
sync_read_request = MockInstructionPacket.sync_read(list(ids_values), address, length)
return_packets = (
b"".join(MockStatusPacket.read(id_, pos, length) for id_, pos in ids_values.items())
if reply
else b""
)
sync_read_response = self._build_send_fn(return_packets, num_invalid_try)
stub_name = f"Sync_Read_{address}_{length}_" + "_".join([str(id_) for id_ in ids_values])
self.stub(
name=stub_name,
receive_bytes=sync_read_request,
send_fn=sync_read_response,
)
return stub_name
def build_sequential_sync_read_stub(
self, address: int, length: int, ids_values: dict[int, list[int]] | None = None
) -> str:
sequence_length = len(next(iter(ids_values.values())))
assert all(len(positions) == sequence_length for positions in ids_values.values())
sync_read_request = MockInstructionPacket.sync_read(list(ids_values), address, length)
sequential_packets = []
for count in range(sequence_length):
return_packets = b"".join(
MockStatusPacket.read(id_, positions[count], length) for id_, positions in ids_values.items()
)
sequential_packets.append(return_packets)
sync_read_response = self._build_sequential_send_fn(sequential_packets)
stub_name = f"Seq_Sync_Read_{address}_{length}_" + "_".join([str(id_) for id_ in ids_values])
self.stub(
name=stub_name,
receive_bytes=sync_read_request,
send_fn=sync_read_response,
)
return stub_name
def build_sync_write_stub(
self, address: int, length: int, ids_values: dict[int, int], num_invalid_try: int = 0
) -> str:
sync_read_request = MockInstructionPacket.sync_write(ids_values, address, length)
stub_name = f"Sync_Write_{address}_{length}_" + "_".join([str(id_) for id_ in ids_values])
self.stub(
name=stub_name,
receive_bytes=sync_read_request,
send_fn=self._build_send_fn(b"", num_invalid_try),
)
return stub_name
@staticmethod
def _build_send_fn(packet: bytes, num_invalid_try: int = 0) -> Callable[[int], bytes]:
def send_fn(_call_count: int) -> bytes:
if num_invalid_try >= _call_count:
return b""
return packet
return send_fn
@staticmethod
def _build_sequential_send_fn(packets: list[bytes]) -> Callable[[int], bytes]:
def send_fn(_call_count: int) -> bytes:
return packets[_call_count - 1]
return send_fn
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/mocks/mock_feetech.py",
"license": "Apache License 2.0",
"lines": 378,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/mocks/mock_motors_bus.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ruff: noqa: N802
from lerobot.motors.motors_bus import (
Motor,
MotorsBus,
)
DUMMY_CTRL_TABLE_1 = {
"Firmware_Version": (0, 1),
"Model_Number": (1, 2),
"Present_Position": (3, 4),
"Goal_Position": (11, 2),
}
DUMMY_CTRL_TABLE_2 = {
"Model_Number": (0, 2),
"Firmware_Version": (2, 1),
"Present_Position": (3, 4),
"Present_Velocity": (7, 4),
"Goal_Position": (11, 4),
"Goal_Velocity": (15, 4),
"Lock": (19, 1),
}
DUMMY_MODEL_CTRL_TABLE = {
"model_1": DUMMY_CTRL_TABLE_1,
"model_2": DUMMY_CTRL_TABLE_2,
"model_3": DUMMY_CTRL_TABLE_2,
}
DUMMY_BAUDRATE_TABLE = {
0: 1_000_000,
1: 500_000,
2: 250_000,
}
DUMMY_MODEL_BAUDRATE_TABLE = {
"model_1": DUMMY_BAUDRATE_TABLE,
"model_2": DUMMY_BAUDRATE_TABLE,
"model_3": DUMMY_BAUDRATE_TABLE,
}
DUMMY_ENCODING_TABLE = {
"Present_Position": 8,
"Goal_Position": 10,
}
DUMMY_MODEL_ENCODING_TABLE = {
"model_1": DUMMY_ENCODING_TABLE,
"model_2": DUMMY_ENCODING_TABLE,
"model_3": DUMMY_ENCODING_TABLE,
}
DUMMY_MODEL_NUMBER_TABLE = {
"model_1": 1234,
"model_2": 5678,
"model_3": 5799,
}
DUMMY_MODEL_RESOLUTION_TABLE = {
"model_1": 4096,
"model_2": 1024,
"model_3": 4096,
}
class MockPortHandler:
def __init__(self, port_name):
self.is_open: bool = False
self.baudrate: int
self.packet_start_time: float
self.packet_timeout: float
self.tx_time_per_byte: float
self.is_using: bool = False
self.port_name: str = port_name
self.ser = None
def openPort(self):
self.is_open = True
return self.is_open
def closePort(self):
self.is_open = False
def clearPort(self): ...
def setPortName(self, port_name):
self.port_name = port_name
def getPortName(self):
return self.port_name
def setBaudRate(self, baudrate):
self.baudrate: baudrate
def getBaudRate(self):
return self.baudrate
def getBytesAvailable(self): ...
def readPort(self, length): ...
def writePort(self, packet): ...
def setPacketTimeout(self, packet_length): ...
def setPacketTimeoutMillis(self, msec): ...
def isPacketTimeout(self): ...
def getCurrentTime(self): ...
def getTimeSinceStart(self): ...
def setupPort(self, cflag_baud): ...
def getCFlagBaud(self, baudrate): ...
class MockMotorsBus(MotorsBus):
available_baudrates = [500_000, 1_000_000]
default_timeout = 1000
model_baudrate_table = DUMMY_MODEL_BAUDRATE_TABLE
model_ctrl_table = DUMMY_MODEL_CTRL_TABLE
model_encoding_table = DUMMY_MODEL_ENCODING_TABLE
model_number_table = DUMMY_MODEL_NUMBER_TABLE
model_resolution_table = DUMMY_MODEL_RESOLUTION_TABLE
normalized_data = ["Present_Position", "Goal_Position"]
def __init__(self, port: str, motors: dict[str, Motor]):
super().__init__(port, motors)
self.port_handler = MockPortHandler(port)
def _assert_protocol_is_compatible(self, instruction_name): ...
def _handshake(self): ...
def _find_single_motor(self, motor, initial_baudrate): ...
def configure_motors(self): ...
def is_calibrated(self): ...
def read_calibration(self): ...
def write_calibration(self, calibration_dict): ...
def disable_torque(self, motors, num_retry): ...
def _disable_torque(self, motor, model, num_retry): ...
def enable_torque(self, motors, num_retry): ...
def _get_half_turn_homings(self, positions): ...
def _encode_sign(self, data_name, ids_values): ...
def _decode_sign(self, data_name, ids_values): ...
def _split_into_byte_chunks(self, value, length): ...
def broadcast_ping(self, num_retry, raise_on_error): ...
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/mocks/mock_motors_bus.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/mocks/mock_robot.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from dataclasses import dataclass, field
from functools import cached_property
from lerobot.cameras import CameraConfig, make_cameras_from_configs
from lerobot.motors.motors_bus import Motor, MotorNormMode
from lerobot.processor import RobotAction, RobotObservation
from lerobot.robots import Robot, RobotConfig
from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
from tests.mocks.mock_motors_bus import MockMotorsBus
@RobotConfig.register_subclass("mock_robot")
@dataclass
class MockRobotConfig(RobotConfig):
n_motors: int = 3
cameras: dict[str, CameraConfig] = field(default_factory=dict)
random_values: bool = True
static_values: list[float] | None = None
calibrated: bool = True
def __post_init__(self):
if self.n_motors < 1:
raise ValueError(self.n_motors)
if self.random_values and self.static_values is not None:
raise ValueError("Choose either random values or static values")
if self.static_values is not None and len(self.static_values) != self.n_motors:
raise ValueError("Specify the same number of static values as motors")
if len(self.cameras) > 0:
raise NotImplementedError # TODO with the cameras refactor
class MockRobot(Robot):
"""Mock Robot to be used for testing."""
config_class = MockRobotConfig
name = "mock_robot"
def __init__(self, config: MockRobotConfig):
super().__init__(config)
self.config = config
self._is_connected = False
self._is_calibrated = config.calibrated
self.cameras = make_cameras_from_configs(config.cameras)
mock_motors = {}
for i in range(config.n_motors):
motor_name = f"motor_{i + 1}"
mock_motors[motor_name] = Motor(
id=i + 1,
model="model_1", # Use model_1 which exists in MockMotorsBus tables
norm_mode=MotorNormMode.RANGE_M100_100,
)
self.bus = MockMotorsBus("/dev/dummy-port", mock_motors)
# NOTE(fracapuano): The .motors attribute was used from the previous interface
self.motors = [f"motor_{i + 1}" for i in range(config.n_motors)]
@property
def _motors_ft(self) -> dict[str, type]:
return {f"{motor}.pos": float for motor in self.motors}
@property
def _cameras_ft(self) -> dict[str, tuple]:
return {
cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras
}
@cached_property
def observation_features(self) -> dict[str, type | tuple]:
return {**self._motors_ft, **self._cameras_ft}
@cached_property
def action_features(self) -> dict[str, type]:
return self._motors_ft
@property
def is_connected(self) -> bool:
return self._is_connected
@check_if_already_connected
def connect(self, calibrate: bool = True) -> None:
self._is_connected = True
if calibrate:
self.calibrate()
@property
def is_calibrated(self) -> bool:
return self._is_calibrated
@check_if_not_connected
def calibrate(self) -> None:
self._is_calibrated = True
def configure(self) -> None:
pass
@check_if_not_connected
def get_observation(self) -> RobotObservation:
if self.config.random_values:
return {f"{motor}.pos": random.uniform(-100, 100) for motor in self.motors}
else:
return {
f"{motor}.pos": val for motor, val in zip(self.motors, self.config.static_values, strict=True)
}
@check_if_not_connected
def send_action(self, action: RobotAction) -> RobotAction:
return action
@check_if_not_connected
def disconnect(self) -> None:
self._is_connected = False
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/mocks/mock_robot.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/mocks/mock_serial_patch.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from mock_serial.mock_serial import Stub
class WaitableStub(Stub):
"""
In some situations, a test might be checking if a stub has been called before `MockSerial` thread had time
to read, match, and call the stub. In these situations, the test can fail randomly.
Use `wait_called()` or `wait_calls()` to block until the stub is called, avoiding race conditions.
Proposed fix:
https://github.com/benthorner/mock_serial/pull/3
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._event = threading.Event()
def call(self):
self._event.set()
return super().call()
def wait_called(self, timeout: float = 1.0):
return self._event.wait(timeout)
def wait_calls(self, min_calls: int = 1, timeout: float = 1.0):
start = time.perf_counter()
while time.perf_counter() - start < timeout:
if self.calls >= min_calls:
return self.calls
time.sleep(0.005)
raise TimeoutError(f"Stub not called {min_calls} times within {timeout} seconds.")
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/mocks/mock_serial_patch.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/mocks/mock_teleop.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from dataclasses import dataclass
from functools import cached_property
from typing import Any
from lerobot.processor import RobotAction
from lerobot.teleoperators import Teleoperator, TeleoperatorConfig
from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
@TeleoperatorConfig.register_subclass("mock_teleop")
@dataclass
class MockTeleopConfig(TeleoperatorConfig):
n_motors: int = 3
random_values: bool = True
static_values: list[float] | None = None
calibrated: bool = True
def __post_init__(self):
if self.n_motors < 1:
raise ValueError(self.n_motors)
if self.random_values and self.static_values is not None:
raise ValueError("Choose either random values or static values")
if self.static_values is not None and len(self.static_values) != self.n_motors:
raise ValueError("Specify the same number of static values as motors")
class MockTeleop(Teleoperator):
"""Mock Teleoperator to be used for testing."""
config_class = MockTeleopConfig
name = "mock_teleop"
def __init__(self, config: MockTeleopConfig):
super().__init__(config)
self.config = config
self._is_connected = False
self._is_calibrated = config.calibrated
self.motors = [f"motor_{i + 1}" for i in range(config.n_motors)]
@cached_property
def action_features(self) -> dict[str, type]:
return {f"{motor}.pos": float for motor in self.motors}
@cached_property
def feedback_features(self) -> dict[str, type]:
return {f"{motor}.pos": float for motor in self.motors}
@property
def is_connected(self) -> bool:
return self._is_connected
@check_if_already_connected
def connect(self, calibrate: bool = True) -> None:
self._is_connected = True
if calibrate:
self.calibrate()
@property
def is_calibrated(self) -> bool:
return self._is_calibrated
@check_if_not_connected
def calibrate(self) -> None:
self._is_calibrated = True
def configure(self) -> None:
pass
@check_if_not_connected
def get_action(self) -> RobotAction:
if self.config.random_values:
return {f"{motor}.pos": random.uniform(-100, 100) for motor in self.motors}
else:
return {
f"{motor}.pos": val for motor, val in zip(self.motors, self.config.static_values, strict=True)
}
@check_if_not_connected
def send_feedback(self, feedback: dict[str, Any]) -> None: ...
@check_if_not_connected
def disconnect(self) -> None:
self._is_connected = False
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/mocks/mock_teleop.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/motors/test_dynamixel.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from collections.abc import Generator
from unittest.mock import MagicMock, patch
import pytest
from lerobot.motors import Motor, MotorCalibration, MotorNormMode
from lerobot.motors.dynamixel import MODEL_NUMBER_TABLE, DynamixelMotorsBus
from lerobot.motors.dynamixel.tables import X_SERIES_CONTROL_TABLE
from lerobot.motors.encoding_utils import encode_twos_complement
try:
import dynamixel_sdk as dxl
from tests.mocks.mock_dynamixel import MockMotors, MockPortHandler
except (ImportError, ModuleNotFoundError):
pytest.skip("dynamixel_sdk not available", allow_module_level=True)
@pytest.fixture(autouse=True)
def patch_port_handler():
if sys.platform == "darwin":
with patch.object(dxl, "PortHandler", MockPortHandler):
yield
else:
yield
@pytest.fixture
def mock_motors() -> Generator[MockMotors, None, None]:
motors = MockMotors()
motors.open()
yield motors
motors.close()
@pytest.fixture
def dummy_motors() -> dict[str, Motor]:
return {
"dummy_1": Motor(1, "xl430-w250", MotorNormMode.RANGE_M100_100),
"dummy_2": Motor(2, "xm540-w270", MotorNormMode.RANGE_M100_100),
"dummy_3": Motor(3, "xl330-m077", MotorNormMode.RANGE_M100_100),
}
@pytest.fixture
def dummy_calibration(dummy_motors) -> dict[str, MotorCalibration]:
drive_modes = [0, 1, 0]
homings = [-709, -2006, 1624]
mins = [43, 27, 145]
maxes = [1335, 3608, 3999]
calibration = {}
for motor, m in dummy_motors.items():
calibration[motor] = MotorCalibration(
id=m.id,
drive_mode=drive_modes[m.id - 1],
homing_offset=homings[m.id - 1],
range_min=mins[m.id - 1],
range_max=maxes[m.id - 1],
)
return calibration
@pytest.mark.skipif(sys.platform != "darwin", reason=f"No patching needed on {sys.platform=}")
def test_autouse_patch():
"""Ensures that the autouse fixture correctly patches dxl.PortHandler with MockPortHandler."""
assert dxl.PortHandler is MockPortHandler
@pytest.mark.parametrize(
"value, length, expected",
[
(0x12, 1, [0x12]),
(0x1234, 2, [0x34, 0x12]),
(0x12345678, 4, [0x78, 0x56, 0x34, 0x12]),
],
ids=[
"1 byte",
"2 bytes",
"4 bytes",
],
) # fmt: skip
def test__split_into_byte_chunks(value, length, expected):
bus = DynamixelMotorsBus("", {})
assert bus._split_into_byte_chunks(value, length) == expected
def test_abc_implementation(dummy_motors):
"""Instantiation should raise an error if the class doesn't implement abstract methods/properties."""
DynamixelMotorsBus(port="/dev/dummy-port", motors=dummy_motors)
@pytest.mark.parametrize("id_", [1, 2, 3])
def test_ping(id_, mock_motors, dummy_motors):
expected_model_nb = MODEL_NUMBER_TABLE[dummy_motors[f"dummy_{id_}"].model]
stub = mock_motors.build_ping_stub(id_, expected_model_nb)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
ping_model_nb = bus.ping(id_)
assert ping_model_nb == expected_model_nb
assert mock_motors.stubs[stub].called
def test_broadcast_ping(mock_motors, dummy_motors):
models = {m.id: m.model for m in dummy_motors.values()}
expected_model_nbs = {id_: MODEL_NUMBER_TABLE[model] for id_, model in models.items()}
stub = mock_motors.build_broadcast_ping_stub(expected_model_nbs)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
ping_model_nbs = bus.broadcast_ping()
assert ping_model_nbs == expected_model_nbs
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize(
"addr, length, id_, value",
[
(0, 1, 1, 2),
(10, 2, 2, 999),
(42, 4, 3, 1337),
],
)
def test__read(addr, length, id_, value, mock_motors, dummy_motors):
stub = mock_motors.build_read_stub(addr, length, id_, value)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
read_value, _, _ = bus._read(addr, length, id_)
assert mock_motors.stubs[stub].called
assert read_value == value
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__read_error(raise_on_error, mock_motors, dummy_motors):
addr, length, id_, value, error = (10, 4, 1, 1337, dxl.ERRNUM_DATA_LIMIT)
stub = mock_motors.build_read_stub(addr, length, id_, value, error=error)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(
RuntimeError, match=re.escape("[RxPacketError] The data value exceeds the limit value!")
):
bus._read(addr, length, id_, raise_on_error=raise_on_error)
else:
_, _, read_error = bus._read(addr, length, id_, raise_on_error=raise_on_error)
assert read_error == error
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__read_comm(raise_on_error, mock_motors, dummy_motors):
addr, length, id_, value = (10, 4, 1, 1337)
stub = mock_motors.build_read_stub(addr, length, id_, value, reply=False)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(ConnectionError, match=re.escape("[TxRxResult] There is no status packet!")):
bus._read(addr, length, id_, raise_on_error=raise_on_error)
else:
_, read_comm, _ = bus._read(addr, length, id_, raise_on_error=raise_on_error)
assert read_comm == dxl.COMM_RX_TIMEOUT
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize(
"addr, length, id_, value",
[
(0, 1, 1, 2),
(10, 2, 2, 999),
(42, 4, 3, 1337),
],
)
def test__write(addr, length, id_, value, mock_motors, dummy_motors):
stub = mock_motors.build_write_stub(addr, length, id_, value)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
comm, error = bus._write(addr, length, id_, value)
assert mock_motors.stubs[stub].called
assert comm == dxl.COMM_SUCCESS
assert error == 0
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__write_error(raise_on_error, mock_motors, dummy_motors):
addr, length, id_, value, error = (10, 4, 1, 1337, dxl.ERRNUM_DATA_LIMIT)
stub = mock_motors.build_write_stub(addr, length, id_, value, error=error)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(
RuntimeError, match=re.escape("[RxPacketError] The data value exceeds the limit value!")
):
bus._write(addr, length, id_, value, raise_on_error=raise_on_error)
else:
_, write_error = bus._write(addr, length, id_, value, raise_on_error=raise_on_error)
assert write_error == error
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__write_comm(raise_on_error, mock_motors, dummy_motors):
addr, length, id_, value = (10, 4, 1, 1337)
stub = mock_motors.build_write_stub(addr, length, id_, value, reply=False)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(ConnectionError, match=re.escape("[TxRxResult] There is no status packet!")):
bus._write(addr, length, id_, value, raise_on_error=raise_on_error)
else:
write_comm, _ = bus._write(addr, length, id_, value, raise_on_error=raise_on_error)
assert write_comm == dxl.COMM_RX_TIMEOUT
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize(
"addr, length, ids_values",
[
(0, 1, {1: 4}),
(10, 2, {1: 1337, 2: 42}),
(42, 4, {1: 1337, 2: 42, 3: 4016}),
],
ids=["1 motor", "2 motors", "3 motors"],
)
def test__sync_read(addr, length, ids_values, mock_motors, dummy_motors):
stub = mock_motors.build_sync_read_stub(addr, length, ids_values)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
read_values, _ = bus._sync_read(addr, length, list(ids_values))
assert mock_motors.stubs[stub].called
assert read_values == ids_values
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__sync_read_comm(raise_on_error, mock_motors, dummy_motors):
addr, length, ids_values = (10, 4, {1: 1337})
stub = mock_motors.build_sync_read_stub(addr, length, ids_values, reply=False)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(ConnectionError, match=re.escape("[TxRxResult] There is no status packet!")):
bus._sync_read(addr, length, list(ids_values), raise_on_error=raise_on_error)
else:
_, read_comm = bus._sync_read(addr, length, list(ids_values), raise_on_error=raise_on_error)
assert read_comm == dxl.COMM_RX_TIMEOUT
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize(
"addr, length, ids_values",
[
(0, 1, {1: 4}),
(10, 2, {1: 1337, 2: 42}),
(42, 4, {1: 1337, 2: 42, 3: 4016}),
],
ids=["1 motor", "2 motors", "3 motors"],
)
def test__sync_write(addr, length, ids_values, mock_motors, dummy_motors):
stub = mock_motors.build_sync_write_stub(addr, length, ids_values)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
comm = bus._sync_write(addr, length, ids_values)
assert mock_motors.stubs[stub].wait_called()
assert comm == dxl.COMM_SUCCESS
def test_is_calibrated(mock_motors, dummy_motors, dummy_calibration):
drive_modes = {m.id: m.drive_mode for m in dummy_calibration.values()}
encoded_homings = {m.id: encode_twos_complement(m.homing_offset, 4) for m in dummy_calibration.values()}
mins = {m.id: m.range_min for m in dummy_calibration.values()}
maxes = {m.id: m.range_max for m in dummy_calibration.values()}
drive_modes_stub = mock_motors.build_sync_read_stub(*X_SERIES_CONTROL_TABLE["Drive_Mode"], drive_modes)
offsets_stub = mock_motors.build_sync_read_stub(*X_SERIES_CONTROL_TABLE["Homing_Offset"], encoded_homings)
mins_stub = mock_motors.build_sync_read_stub(*X_SERIES_CONTROL_TABLE["Min_Position_Limit"], mins)
maxes_stub = mock_motors.build_sync_read_stub(*X_SERIES_CONTROL_TABLE["Max_Position_Limit"], maxes)
bus = DynamixelMotorsBus(
port=mock_motors.port,
motors=dummy_motors,
calibration=dummy_calibration,
)
bus.connect(handshake=False)
is_calibrated = bus.is_calibrated
assert is_calibrated
assert mock_motors.stubs[drive_modes_stub].called
assert mock_motors.stubs[offsets_stub].called
assert mock_motors.stubs[mins_stub].called
assert mock_motors.stubs[maxes_stub].called
def test_reset_calibration(mock_motors, dummy_motors):
write_homing_stubs = []
write_mins_stubs = []
write_maxes_stubs = []
for motor in dummy_motors.values():
write_homing_stubs.append(
mock_motors.build_write_stub(*X_SERIES_CONTROL_TABLE["Homing_Offset"], motor.id, 0)
)
write_mins_stubs.append(
mock_motors.build_write_stub(*X_SERIES_CONTROL_TABLE["Min_Position_Limit"], motor.id, 0)
)
write_maxes_stubs.append(
mock_motors.build_write_stub(*X_SERIES_CONTROL_TABLE["Max_Position_Limit"], motor.id, 4095)
)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
bus.reset_calibration()
assert all(mock_motors.stubs[stub].called for stub in write_homing_stubs)
assert all(mock_motors.stubs[stub].called for stub in write_mins_stubs)
assert all(mock_motors.stubs[stub].called for stub in write_maxes_stubs)
def test_set_half_turn_homings(mock_motors, dummy_motors):
"""
For this test, we assume that the homing offsets are already 0 such that
Present_Position == Actual_Position
"""
current_positions = {
1: 1337,
2: 42,
3: 3672,
}
expected_homings = {
1: 710, # 2047 - 1337
2: 2005, # 2047 - 42
3: -1625, # 2047 - 3672
}
read_pos_stub = mock_motors.build_sync_read_stub(
*X_SERIES_CONTROL_TABLE["Present_Position"], current_positions
)
write_homing_stubs = []
for id_, homing in expected_homings.items():
encoded_homing = encode_twos_complement(homing, 4)
stub = mock_motors.build_write_stub(*X_SERIES_CONTROL_TABLE["Homing_Offset"], id_, encoded_homing)
write_homing_stubs.append(stub)
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
bus.reset_calibration = MagicMock()
bus.set_half_turn_homings()
bus.reset_calibration.assert_called_once()
assert mock_motors.stubs[read_pos_stub].called
assert all(mock_motors.stubs[stub].called for stub in write_homing_stubs)
def test_record_ranges_of_motion(mock_motors, dummy_motors):
positions = {
1: [351, 42, 1337],
2: [28, 3600, 2444],
3: [4002, 2999, 146],
}
expected_mins = {
"dummy_1": 42,
"dummy_2": 28,
"dummy_3": 146,
}
expected_maxes = {
"dummy_1": 1337,
"dummy_2": 3600,
"dummy_3": 4002,
}
read_pos_stub = mock_motors.build_sequential_sync_read_stub(
*X_SERIES_CONTROL_TABLE["Present_Position"], positions
)
with patch("lerobot.motors.motors_bus.enter_pressed", side_effect=[False, True]):
bus = DynamixelMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
mins, maxes = bus.record_ranges_of_motion(display_values=False)
assert mock_motors.stubs[read_pos_stub].calls == 3
assert mins == expected_mins
assert maxes == expected_maxes
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/motors/test_dynamixel.py",
"license": "Apache License 2.0",
"lines": 334,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/motors/test_feetech.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from collections.abc import Generator
from unittest.mock import MagicMock, patch
import pytest
from lerobot.motors import Motor, MotorCalibration, MotorNormMode
from lerobot.motors.encoding_utils import encode_sign_magnitude
from lerobot.motors.feetech import MODEL_NUMBER, MODEL_NUMBER_TABLE, FeetechMotorsBus
from lerobot.motors.feetech.tables import STS_SMS_SERIES_CONTROL_TABLE
try:
import scservo_sdk as scs
from tests.mocks.mock_feetech import MockMotors, MockPortHandler
except (ImportError, ModuleNotFoundError):
pytest.skip("scservo_sdk not available", allow_module_level=True)
@pytest.fixture(autouse=True)
def patch_port_handler():
if sys.platform == "darwin":
with patch.object(scs, "PortHandler", MockPortHandler):
yield
else:
yield
@pytest.fixture
def mock_motors() -> Generator[MockMotors, None, None]:
motors = MockMotors()
motors.open()
yield motors
motors.close()
@pytest.fixture
def dummy_motors() -> dict[str, Motor]:
return {
"dummy_1": Motor(1, "sts3215", MotorNormMode.RANGE_M100_100),
"dummy_2": Motor(2, "sts3215", MotorNormMode.RANGE_M100_100),
"dummy_3": Motor(3, "sts3215", MotorNormMode.RANGE_M100_100),
}
@pytest.fixture
def dummy_calibration(dummy_motors) -> dict[str, MotorCalibration]:
homings = [-709, -2006, 1624]
mins = [43, 27, 145]
maxes = [1335, 3608, 3999]
calibration = {}
for motor, m in dummy_motors.items():
calibration[motor] = MotorCalibration(
id=m.id,
drive_mode=0,
homing_offset=homings[m.id - 1],
range_min=mins[m.id - 1],
range_max=maxes[m.id - 1],
)
return calibration
@pytest.mark.skipif(sys.platform != "darwin", reason=f"No patching needed on {sys.platform=}")
def test_autouse_patch():
"""Ensures that the autouse fixture correctly patches scs.PortHandler with MockPortHandler."""
assert scs.PortHandler is MockPortHandler
@pytest.mark.parametrize(
"protocol, value, length, expected",
[
(0, 0x12, 1, [0x12]),
(1, 0x12, 1, [0x12]),
(0, 0x1234, 2, [0x34, 0x12]),
(1, 0x1234, 2, [0x12, 0x34]),
(0, 0x12345678, 4, [0x78, 0x56, 0x34, 0x12]),
(1, 0x12345678, 4, [0x56, 0x78, 0x12, 0x34]),
],
ids=[
"P0: 1 byte",
"P1: 1 byte",
"P0: 2 bytes",
"P1: 2 bytes",
"P0: 4 bytes",
"P1: 4 bytes",
],
) # fmt: skip
def test__split_into_byte_chunks(protocol, value, length, expected):
bus = FeetechMotorsBus("", {}, protocol_version=protocol)
assert bus._split_into_byte_chunks(value, length) == expected
def test_abc_implementation(dummy_motors):
"""Instantiation should raise an error if the class doesn't implement abstract methods/properties."""
FeetechMotorsBus(port="/dev/dummy-port", motors=dummy_motors)
@pytest.mark.parametrize("id_", [1, 2, 3])
def test_ping(id_, mock_motors, dummy_motors):
expected_model_nb = MODEL_NUMBER_TABLE[dummy_motors[f"dummy_{id_}"].model]
addr, length = MODEL_NUMBER
ping_stub = mock_motors.build_ping_stub(id_)
mobel_nb_stub = mock_motors.build_read_stub(addr, length, id_, expected_model_nb)
bus = FeetechMotorsBus(
port=mock_motors.port,
motors=dummy_motors,
)
bus.connect(handshake=False)
ping_model_nb = bus.ping(id_)
assert ping_model_nb == expected_model_nb
assert mock_motors.stubs[ping_stub].called
assert mock_motors.stubs[mobel_nb_stub].called
def test_broadcast_ping(mock_motors, dummy_motors):
models = {m.id: m.model for m in dummy_motors.values()}
addr, length = MODEL_NUMBER
ping_stub = mock_motors.build_broadcast_ping_stub(list(models))
mobel_nb_stubs = []
expected_model_nbs = {}
for id_, model in models.items():
model_nb = MODEL_NUMBER_TABLE[model]
stub = mock_motors.build_read_stub(addr, length, id_, model_nb)
expected_model_nbs[id_] = model_nb
mobel_nb_stubs.append(stub)
bus = FeetechMotorsBus(
port=mock_motors.port,
motors=dummy_motors,
)
bus.connect(handshake=False)
ping_model_nbs = bus.broadcast_ping()
assert ping_model_nbs == expected_model_nbs
assert mock_motors.stubs[ping_stub].called
assert all(mock_motors.stubs[stub].called for stub in mobel_nb_stubs)
@pytest.mark.parametrize(
"addr, length, id_, value",
[
(0, 1, 1, 2),
(10, 2, 2, 999),
(42, 4, 3, 1337),
],
)
def test__read(addr, length, id_, value, mock_motors, dummy_motors):
stub = mock_motors.build_read_stub(addr, length, id_, value)
bus = FeetechMotorsBus(
port=mock_motors.port,
motors=dummy_motors,
)
bus.connect(handshake=False)
read_value, _, _ = bus._read(addr, length, id_)
assert mock_motors.stubs[stub].called
assert read_value == value
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__read_error(raise_on_error, mock_motors, dummy_motors):
addr, length, id_, value, error = (10, 4, 1, 1337, scs.ERRBIT_VOLTAGE)
stub = mock_motors.build_read_stub(addr, length, id_, value, error=error)
bus = FeetechMotorsBus(
port=mock_motors.port,
motors=dummy_motors,
)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(RuntimeError, match=re.escape("[RxPacketError] Input voltage error!")):
bus._read(addr, length, id_, raise_on_error=raise_on_error)
else:
_, _, read_error = bus._read(addr, length, id_, raise_on_error=raise_on_error)
assert read_error == error
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__read_comm(raise_on_error, mock_motors, dummy_motors):
addr, length, id_, value = (10, 4, 1, 1337)
stub = mock_motors.build_read_stub(addr, length, id_, value, reply=False)
bus = FeetechMotorsBus(
port=mock_motors.port,
motors=dummy_motors,
)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(ConnectionError, match=re.escape("[TxRxResult] There is no status packet!")):
bus._read(addr, length, id_, raise_on_error=raise_on_error)
else:
_, read_comm, _ = bus._read(addr, length, id_, raise_on_error=raise_on_error)
assert read_comm == scs.COMM_RX_TIMEOUT
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize(
"addr, length, id_, value",
[
(0, 1, 1, 2),
(10, 2, 2, 999),
(42, 4, 3, 1337),
],
)
def test__write(addr, length, id_, value, mock_motors, dummy_motors):
stub = mock_motors.build_write_stub(addr, length, id_, value)
bus = FeetechMotorsBus(
port=mock_motors.port,
motors=dummy_motors,
)
bus.connect(handshake=False)
comm, error = bus._write(addr, length, id_, value)
assert mock_motors.stubs[stub].wait_called()
assert comm == scs.COMM_SUCCESS
assert error == 0
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__write_error(raise_on_error, mock_motors, dummy_motors):
addr, length, id_, value, error = (10, 4, 1, 1337, scs.ERRBIT_VOLTAGE)
stub = mock_motors.build_write_stub(addr, length, id_, value, error=error)
bus = FeetechMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(RuntimeError, match=re.escape("[RxPacketError] Input voltage error!")):
bus._write(addr, length, id_, value, raise_on_error=raise_on_error)
else:
_, write_error = bus._write(addr, length, id_, value, raise_on_error=raise_on_error)
assert write_error == error
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__write_comm(raise_on_error, mock_motors, dummy_motors):
addr, length, id_, value = (10, 4, 1, 1337)
stub = mock_motors.build_write_stub(addr, length, id_, value, reply=False)
bus = FeetechMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(ConnectionError, match=re.escape("[TxRxResult] There is no status packet!")):
bus._write(addr, length, id_, value, raise_on_error=raise_on_error)
else:
write_comm, _ = bus._write(addr, length, id_, value, raise_on_error=raise_on_error)
assert write_comm == scs.COMM_RX_TIMEOUT
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize(
"addr, length, ids_values",
[
(0, 1, {1: 4}),
(10, 2, {1: 1337, 2: 42}),
(42, 4, {1: 1337, 2: 42, 3: 4016}),
],
ids=["1 motor", "2 motors", "3 motors"],
)
def test__sync_read(addr, length, ids_values, mock_motors, dummy_motors):
stub = mock_motors.build_sync_read_stub(addr, length, ids_values)
bus = FeetechMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
read_values, _ = bus._sync_read(addr, length, list(ids_values))
assert mock_motors.stubs[stub].called
assert read_values == ids_values
@pytest.mark.parametrize("raise_on_error", (True, False))
def test__sync_read_comm(raise_on_error, mock_motors, dummy_motors):
addr, length, ids_values = (10, 4, {1: 1337})
stub = mock_motors.build_sync_read_stub(addr, length, ids_values, reply=False)
bus = FeetechMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
if raise_on_error:
with pytest.raises(ConnectionError, match=re.escape("[TxRxResult] There is no status packet!")):
bus._sync_read(addr, length, list(ids_values), raise_on_error=raise_on_error)
else:
_, read_comm = bus._sync_read(addr, length, list(ids_values), raise_on_error=raise_on_error)
assert read_comm == scs.COMM_RX_TIMEOUT
assert mock_motors.stubs[stub].called
@pytest.mark.parametrize(
"addr, length, ids_values",
[
(0, 1, {1: 4}),
(10, 2, {1: 1337, 2: 42}),
(42, 4, {1: 1337, 2: 42, 3: 4016}),
],
ids=["1 motor", "2 motors", "3 motors"],
)
def test__sync_write(addr, length, ids_values, mock_motors, dummy_motors):
stub = mock_motors.build_sync_write_stub(addr, length, ids_values)
bus = FeetechMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
comm = bus._sync_write(addr, length, ids_values)
assert mock_motors.stubs[stub].wait_called()
assert comm == scs.COMM_SUCCESS
def test_is_calibrated(mock_motors, dummy_motors, dummy_calibration):
mins_stubs, maxes_stubs, homings_stubs = [], [], []
for cal in dummy_calibration.values():
mins_stubs.append(
mock_motors.build_read_stub(
*STS_SMS_SERIES_CONTROL_TABLE["Min_Position_Limit"], cal.id, cal.range_min
)
)
maxes_stubs.append(
mock_motors.build_read_stub(
*STS_SMS_SERIES_CONTROL_TABLE["Max_Position_Limit"], cal.id, cal.range_max
)
)
homings_stubs.append(
mock_motors.build_read_stub(
*STS_SMS_SERIES_CONTROL_TABLE["Homing_Offset"],
cal.id,
encode_sign_magnitude(cal.homing_offset, 11),
)
)
bus = FeetechMotorsBus(
port=mock_motors.port,
motors=dummy_motors,
calibration=dummy_calibration,
)
bus.connect(handshake=False)
is_calibrated = bus.is_calibrated
assert is_calibrated
assert all(mock_motors.stubs[stub].called for stub in mins_stubs)
assert all(mock_motors.stubs[stub].called for stub in maxes_stubs)
assert all(mock_motors.stubs[stub].called for stub in homings_stubs)
def test_reset_calibration(mock_motors, dummy_motors):
write_homing_stubs = []
write_mins_stubs = []
write_maxes_stubs = []
for motor in dummy_motors.values():
write_homing_stubs.append(
mock_motors.build_write_stub(*STS_SMS_SERIES_CONTROL_TABLE["Homing_Offset"], motor.id, 0)
)
write_mins_stubs.append(
mock_motors.build_write_stub(*STS_SMS_SERIES_CONTROL_TABLE["Min_Position_Limit"], motor.id, 0)
)
write_maxes_stubs.append(
mock_motors.build_write_stub(*STS_SMS_SERIES_CONTROL_TABLE["Max_Position_Limit"], motor.id, 4095)
)
bus = FeetechMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
bus.reset_calibration()
assert all(mock_motors.stubs[stub].wait_called() for stub in write_homing_stubs)
assert all(mock_motors.stubs[stub].wait_called() for stub in write_mins_stubs)
assert all(mock_motors.stubs[stub].wait_called() for stub in write_maxes_stubs)
def test_set_half_turn_homings(mock_motors, dummy_motors):
"""
For this test, we assume that the homing offsets are already 0 such that
Present_Position == Actual_Position
"""
current_positions = {
1: 1337,
2: 42,
3: 3672,
}
expected_homings = {
1: -710, # 1337 - 2047
2: -2005, # 42 - 2047
3: 1625, # 3672 - 2047
}
read_pos_stub = mock_motors.build_sync_read_stub(
*STS_SMS_SERIES_CONTROL_TABLE["Present_Position"], current_positions
)
write_homing_stubs = []
for id_, homing in expected_homings.items():
encoded_homing = encode_sign_magnitude(homing, 11)
stub = mock_motors.build_write_stub(
*STS_SMS_SERIES_CONTROL_TABLE["Homing_Offset"], id_, encoded_homing
)
write_homing_stubs.append(stub)
bus = FeetechMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
bus.reset_calibration = MagicMock()
bus.set_half_turn_homings()
bus.reset_calibration.assert_called_once()
assert mock_motors.stubs[read_pos_stub].called
assert all(mock_motors.stubs[stub].wait_called() for stub in write_homing_stubs)
def test_record_ranges_of_motion(mock_motors, dummy_motors):
positions = {
1: [351, 42, 1337],
2: [28, 3600, 2444],
3: [4002, 2999, 146],
}
expected_mins = {
"dummy_1": 42,
"dummy_2": 28,
"dummy_3": 146,
}
expected_maxes = {
"dummy_1": 1337,
"dummy_2": 3600,
"dummy_3": 4002,
}
stub = mock_motors.build_sequential_sync_read_stub(
*STS_SMS_SERIES_CONTROL_TABLE["Present_Position"], positions
)
with patch("lerobot.motors.motors_bus.enter_pressed", side_effect=[False, True]):
bus = FeetechMotorsBus(port=mock_motors.port, motors=dummy_motors)
bus.connect(handshake=False)
mins, maxes = bus.record_ranges_of_motion(display_values=False)
assert mock_motors.stubs[stub].calls == 3
assert mins == expected_mins
assert maxes == expected_maxes
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/motors/test_feetech.py",
"license": "Apache License 2.0",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/motors/test_motors_bus.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from unittest.mock import patch
import pytest
from lerobot.motors.motors_bus import (
Motor,
MotorNormMode,
assert_same_address,
get_address,
get_ctrl_table,
)
from tests.mocks.mock_motors_bus import (
DUMMY_CTRL_TABLE_1,
DUMMY_CTRL_TABLE_2,
DUMMY_MODEL_CTRL_TABLE,
MockMotorsBus,
)
@pytest.fixture
def dummy_motors() -> dict[str, Motor]:
return {
"dummy_1": Motor(1, "model_2", MotorNormMode.RANGE_M100_100),
"dummy_2": Motor(2, "model_3", MotorNormMode.RANGE_M100_100),
"dummy_3": Motor(3, "model_2", MotorNormMode.RANGE_0_100),
}
def test_get_ctrl_table():
model = "model_1"
ctrl_table = get_ctrl_table(DUMMY_MODEL_CTRL_TABLE, model)
assert ctrl_table == DUMMY_CTRL_TABLE_1
def test_get_ctrl_table_error():
model = "model_99"
with pytest.raises(KeyError, match=f"Control table for {model=} not found."):
get_ctrl_table(DUMMY_MODEL_CTRL_TABLE, model)
def test_get_address():
addr, n_bytes = get_address(DUMMY_MODEL_CTRL_TABLE, "model_1", "Firmware_Version")
assert addr == 0
assert n_bytes == 1
def test_get_address_error():
model = "model_1"
data_name = "Lock"
with pytest.raises(KeyError, match=f"Address for '{data_name}' not found in {model} control table."):
get_address(DUMMY_MODEL_CTRL_TABLE, "model_1", data_name)
def test_assert_same_address():
models = ["model_1", "model_2"]
assert_same_address(DUMMY_MODEL_CTRL_TABLE, models, "Present_Position")
def test_assert_same_length_different_addresses():
models = ["model_1", "model_2"]
with pytest.raises(
NotImplementedError,
match=re.escape("At least two motor models use a different address"),
):
assert_same_address(DUMMY_MODEL_CTRL_TABLE, models, "Model_Number")
def test_assert_same_address_different_length():
models = ["model_1", "model_2"]
with pytest.raises(
NotImplementedError,
match=re.escape("At least two motor models use a different bytes representation"),
):
assert_same_address(DUMMY_MODEL_CTRL_TABLE, models, "Goal_Position")
def test__serialize_data_invalid_length():
bus = MockMotorsBus("", {})
with pytest.raises(NotImplementedError):
bus._serialize_data(100, 3)
def test__serialize_data_negative_numbers():
bus = MockMotorsBus("", {})
with pytest.raises(ValueError):
bus._serialize_data(-1, 1)
def test__serialize_data_large_number():
bus = MockMotorsBus("", {})
with pytest.raises(ValueError):
bus._serialize_data(2**32, 4) # 4-byte max is 0xFFFFFFFF
@pytest.mark.parametrize(
"data_name, id_, value",
[
("Firmware_Version", 1, 14),
("Model_Number", 1, 5678),
("Present_Position", 2, 1337),
("Present_Velocity", 3, 42),
],
)
def test_read(data_name, id_, value, dummy_motors):
bus = MockMotorsBus("/dev/dummy-port", dummy_motors)
bus.connect(handshake=False)
addr, length = DUMMY_CTRL_TABLE_2[data_name]
with (
patch.object(MockMotorsBus, "_read", return_value=(value, 0, 0)) as mock__read,
patch.object(MockMotorsBus, "_decode_sign", return_value={id_: value}) as mock__decode_sign,
patch.object(MockMotorsBus, "_normalize", return_value={id_: value}) as mock__normalize,
):
returned_value = bus.read(data_name, f"dummy_{id_}")
assert returned_value == value
mock__read.assert_called_once_with(
addr,
length,
id_,
num_retry=0,
raise_on_error=True,
err_msg=f"Failed to read '{data_name}' on {id_=} after 1 tries.",
)
mock__decode_sign.assert_called_once_with(data_name, {id_: value})
if data_name in bus.normalized_data:
mock__normalize.assert_called_once_with({id_: value})
@pytest.mark.parametrize(
"data_name, id_, value",
[
("Goal_Position", 1, 1337),
("Goal_Velocity", 2, 3682),
("Lock", 3, 1),
],
)
def test_write(data_name, id_, value, dummy_motors):
bus = MockMotorsBus("/dev/dummy-port", dummy_motors)
bus.connect(handshake=False)
addr, length = DUMMY_CTRL_TABLE_2[data_name]
with (
patch.object(MockMotorsBus, "_write", return_value=(0, 0)) as mock__write,
patch.object(MockMotorsBus, "_encode_sign", return_value={id_: value}) as mock__encode_sign,
patch.object(MockMotorsBus, "_unnormalize", return_value={id_: value}) as mock__unnormalize,
):
bus.write(data_name, f"dummy_{id_}", value)
mock__write.assert_called_once_with(
addr,
length,
id_,
value,
num_retry=0,
raise_on_error=True,
err_msg=f"Failed to write '{data_name}' on {id_=} with '{value}' after 1 tries.",
)
mock__encode_sign.assert_called_once_with(data_name, {id_: value})
if data_name in bus.normalized_data:
mock__unnormalize.assert_called_once_with({id_: value})
@pytest.mark.parametrize(
"data_name, id_, value",
[
("Firmware_Version", 1, 14),
("Model_Number", 1, 5678),
("Present_Position", 2, 1337),
("Present_Velocity", 3, 42),
],
)
def test_sync_read_by_str(data_name, id_, value, dummy_motors):
bus = MockMotorsBus("/dev/dummy-port", dummy_motors)
bus.connect(handshake=False)
addr, length = DUMMY_CTRL_TABLE_2[data_name]
ids = [id_]
expected_value = {f"dummy_{id_}": value}
with (
patch.object(MockMotorsBus, "_sync_read", return_value=({id_: value}, 0)) as mock__sync_read,
patch.object(MockMotorsBus, "_decode_sign", return_value={id_: value}) as mock__decode_sign,
patch.object(MockMotorsBus, "_normalize", return_value={id_: value}) as mock__normalize,
):
returned_dict = bus.sync_read(data_name, f"dummy_{id_}")
assert returned_dict == expected_value
mock__sync_read.assert_called_once_with(
addr,
length,
ids,
num_retry=0,
raise_on_error=True,
err_msg=f"Failed to sync read '{data_name}' on {ids=} after 1 tries.",
)
mock__decode_sign.assert_called_once_with(data_name, {id_: value})
if data_name in bus.normalized_data:
mock__normalize.assert_called_once_with({id_: value})
@pytest.mark.parametrize(
"data_name, ids_values",
[
("Model_Number", {1: 5678}),
("Present_Position", {1: 1337, 2: 42}),
("Present_Velocity", {1: 1337, 2: 42, 3: 4016}),
],
ids=["1 motor", "2 motors", "3 motors"],
)
def test_sync_read_by_list(data_name, ids_values, dummy_motors):
bus = MockMotorsBus("/dev/dummy-port", dummy_motors)
bus.connect(handshake=False)
addr, length = DUMMY_CTRL_TABLE_2[data_name]
ids = list(ids_values)
expected_values = {f"dummy_{id_}": val for id_, val in ids_values.items()}
with (
patch.object(MockMotorsBus, "_sync_read", return_value=(ids_values, 0)) as mock__sync_read,
patch.object(MockMotorsBus, "_decode_sign", return_value=ids_values) as mock__decode_sign,
patch.object(MockMotorsBus, "_normalize", return_value=ids_values) as mock__normalize,
):
returned_dict = bus.sync_read(data_name, [f"dummy_{id_}" for id_ in ids])
assert returned_dict == expected_values
mock__sync_read.assert_called_once_with(
addr,
length,
ids,
num_retry=0,
raise_on_error=True,
err_msg=f"Failed to sync read '{data_name}' on {ids=} after 1 tries.",
)
mock__decode_sign.assert_called_once_with(data_name, ids_values)
if data_name in bus.normalized_data:
mock__normalize.assert_called_once_with(ids_values)
@pytest.mark.parametrize(
"data_name, ids_values",
[
("Model_Number", {1: 5678, 2: 5799, 3: 5678}),
("Present_Position", {1: 1337, 2: 42, 3: 4016}),
("Goal_Position", {1: 4008, 2: 199, 3: 3446}),
],
ids=["Model_Number", "Present_Position", "Goal_Position"],
)
def test_sync_read_by_none(data_name, ids_values, dummy_motors):
bus = MockMotorsBus("/dev/dummy-port", dummy_motors)
bus.connect(handshake=False)
addr, length = DUMMY_CTRL_TABLE_2[data_name]
ids = list(ids_values)
expected_values = {f"dummy_{id_}": val for id_, val in ids_values.items()}
with (
patch.object(MockMotorsBus, "_sync_read", return_value=(ids_values, 0)) as mock__sync_read,
patch.object(MockMotorsBus, "_decode_sign", return_value=ids_values) as mock__decode_sign,
patch.object(MockMotorsBus, "_normalize", return_value=ids_values) as mock__normalize,
):
returned_dict = bus.sync_read(data_name)
assert returned_dict == expected_values
mock__sync_read.assert_called_once_with(
addr,
length,
ids,
num_retry=0,
raise_on_error=True,
err_msg=f"Failed to sync read '{data_name}' on {ids=} after 1 tries.",
)
mock__decode_sign.assert_called_once_with(data_name, ids_values)
if data_name in bus.normalized_data:
mock__normalize.assert_called_once_with(ids_values)
@pytest.mark.parametrize(
"data_name, value",
[
("Goal_Position", 500),
("Goal_Velocity", 4010),
("Lock", 0),
],
)
def test_sync_write_by_single_value(data_name, value, dummy_motors):
bus = MockMotorsBus("/dev/dummy-port", dummy_motors)
bus.connect(handshake=False)
addr, length = DUMMY_CTRL_TABLE_2[data_name]
ids_values = {m.id: value for m in dummy_motors.values()}
with (
patch.object(MockMotorsBus, "_sync_write", return_value=(ids_values, 0)) as mock__sync_write,
patch.object(MockMotorsBus, "_encode_sign", return_value=ids_values) as mock__encode_sign,
patch.object(MockMotorsBus, "_unnormalize", return_value=ids_values) as mock__unnormalize,
):
bus.sync_write(data_name, value)
mock__sync_write.assert_called_once_with(
addr,
length,
ids_values,
num_retry=0,
raise_on_error=True,
err_msg=f"Failed to sync write '{data_name}' with {ids_values=} after 1 tries.",
)
mock__encode_sign.assert_called_once_with(data_name, ids_values)
if data_name in bus.normalized_data:
mock__unnormalize.assert_called_once_with(ids_values)
@pytest.mark.parametrize(
"data_name, ids_values",
[
("Goal_Position", {1: 1337, 2: 42, 3: 4016}),
("Goal_Velocity", {1: 50, 2: 83, 3: 2777}),
("Lock", {1: 0, 2: 0, 3: 1}),
],
ids=["Goal_Position", "Goal_Velocity", "Lock"],
)
def test_sync_write_by_value_dict(data_name, ids_values, dummy_motors):
bus = MockMotorsBus("/dev/dummy-port", dummy_motors)
bus.connect(handshake=False)
addr, length = DUMMY_CTRL_TABLE_2[data_name]
values = {f"dummy_{id_}": val for id_, val in ids_values.items()}
with (
patch.object(MockMotorsBus, "_sync_write", return_value=(ids_values, 0)) as mock__sync_write,
patch.object(MockMotorsBus, "_encode_sign", return_value=ids_values) as mock__encode_sign,
patch.object(MockMotorsBus, "_unnormalize", return_value=ids_values) as mock__unnormalize,
):
bus.sync_write(data_name, values)
mock__sync_write.assert_called_once_with(
addr,
length,
ids_values,
num_retry=0,
raise_on_error=True,
err_msg=f"Failed to sync write '{data_name}' with {ids_values=} after 1 tries.",
)
mock__encode_sign.assert_called_once_with(data_name, ids_values)
if data_name in bus.normalized_data:
mock__unnormalize.assert_called_once_with(ids_values)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/motors/test_motors_bus.py",
"license": "Apache License 2.0",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/robots/test_so100_follower.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from unittest.mock import MagicMock, patch
import pytest
from lerobot.robots.so_follower import (
SO100Follower,
SO100FollowerConfig,
)
def _make_bus_mock() -> MagicMock:
"""Return a bus mock with just the attributes used by the robot."""
bus = MagicMock(name="FeetechBusMock")
bus.is_connected = False
def _connect():
bus.is_connected = True
def _disconnect(_disable=True):
bus.is_connected = False
bus.connect.side_effect = _connect
bus.disconnect.side_effect = _disconnect
@contextmanager
def _dummy_cm():
yield
bus.torque_disabled.side_effect = _dummy_cm
return bus
@pytest.fixture
def follower():
bus_mock = _make_bus_mock()
def _bus_side_effect(*_args, **kwargs):
bus_mock.motors = kwargs["motors"]
motors_order: list[str] = list(bus_mock.motors)
bus_mock.sync_read.return_value = {motor: idx for idx, motor in enumerate(motors_order, 1)}
bus_mock.sync_write.return_value = None
bus_mock.write.return_value = None
bus_mock.disable_torque.return_value = None
bus_mock.enable_torque.return_value = None
bus_mock.is_calibrated = True
return bus_mock
with (
patch(
"lerobot.robots.so_follower.so_follower.FeetechMotorsBus",
side_effect=_bus_side_effect,
),
patch.object(SO100Follower, "configure", lambda self: None),
):
cfg = SO100FollowerConfig(port="/dev/null")
robot = SO100Follower(cfg)
yield robot
if robot.is_connected:
robot.disconnect()
def test_connect_disconnect(follower):
assert not follower.is_connected
follower.connect()
assert follower.is_connected
follower.disconnect()
assert not follower.is_connected
def test_get_observation(follower):
follower.connect()
obs = follower.get_observation()
expected_keys = {f"{m}.pos" for m in follower.bus.motors}
assert set(obs.keys()) == expected_keys
for idx, motor in enumerate(follower.bus.motors, 1):
assert obs[f"{motor}.pos"] == idx
def test_send_action(follower):
follower.connect()
action = {f"{m}.pos": i * 10 for i, m in enumerate(follower.bus.motors, 1)}
returned = follower.send_action(action)
assert returned == action
goal_pos = {m: (i + 1) * 10 for i, m in enumerate(follower.bus.motors)}
follower.bus.sync_write.assert_called_once_with("Goal_Position", goal_pos)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/robots/test_so100_follower.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/utils/test_encoding_utils.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from lerobot.motors.encoding_utils import (
decode_sign_magnitude,
decode_twos_complement,
encode_sign_magnitude,
encode_twos_complement,
)
@pytest.mark.parametrize(
"value, sign_bit_index, expected",
[
(5, 4, 5),
(0, 4, 0),
(7, 3, 7),
(-1, 4, 17),
(-8, 4, 24),
(-3, 3, 11),
],
)
def test_encode_sign_magnitude(value, sign_bit_index, expected):
assert encode_sign_magnitude(value, sign_bit_index) == expected
@pytest.mark.parametrize(
"encoded, sign_bit_index, expected",
[
(5, 4, 5),
(0, 4, 0),
(7, 3, 7),
(17, 4, -1),
(24, 4, -8),
(11, 3, -3),
],
)
def test_decode_sign_magnitude(encoded, sign_bit_index, expected):
assert decode_sign_magnitude(encoded, sign_bit_index) == expected
@pytest.mark.parametrize(
"encoded, sign_bit_index",
[
(16, 4),
(-9, 3),
],
)
def test_encode_raises_on_overflow(encoded, sign_bit_index):
with pytest.raises(ValueError):
encode_sign_magnitude(encoded, sign_bit_index)
def test_encode_decode_sign_magnitude():
for sign_bit_index in range(2, 6):
max_val = (1 << sign_bit_index) - 1
for value in range(-max_val, max_val + 1):
encoded = encode_sign_magnitude(value, sign_bit_index)
decoded = decode_sign_magnitude(encoded, sign_bit_index)
assert decoded == value, f"Failed at value={value}, index={sign_bit_index}"
@pytest.mark.parametrize(
"value, n_bytes, expected",
[
(0, 1, 0),
(5, 1, 5),
(-1, 1, 255),
(-128, 1, 128),
(-2, 1, 254),
(127, 1, 127),
(0, 2, 0),
(5, 2, 5),
(-1, 2, 65_535),
(-32_768, 2, 32_768),
(-2, 2, 65_534),
(32_767, 2, 32_767),
(0, 4, 0),
(5, 4, 5),
(-1, 4, 4_294_967_295),
(-2_147_483_648, 4, 2_147_483_648),
(-2, 4, 4_294_967_294),
(2_147_483_647, 4, 2_147_483_647),
],
)
def test_encode_twos_complement(value, n_bytes, expected):
assert encode_twos_complement(value, n_bytes) == expected
@pytest.mark.parametrize(
"value, n_bytes, expected",
[
(0, 1, 0),
(5, 1, 5),
(255, 1, -1),
(128, 1, -128),
(254, 1, -2),
(127, 1, 127),
(0, 2, 0),
(5, 2, 5),
(65_535, 2, -1),
(32_768, 2, -32_768),
(65_534, 2, -2),
(32_767, 2, 32_767),
(0, 4, 0),
(5, 4, 5),
(4_294_967_295, 4, -1),
(2_147_483_648, 4, -2_147_483_648),
(4_294_967_294, 4, -2),
(2_147_483_647, 4, 2_147_483_647),
],
)
def test_decode_twos_complement(value, n_bytes, expected):
assert decode_twos_complement(value, n_bytes) == expected
@pytest.mark.parametrize(
"value, n_bytes",
[
(-129, 1),
(128, 1),
(-32_769, 2),
(32_768, 2),
(-2_147_483_649, 4),
(2_147_483_648, 4),
],
)
def test_encode_twos_complement_out_of_range(value, n_bytes):
with pytest.raises(ValueError):
encode_twos_complement(value, n_bytes)
@pytest.mark.parametrize(
"value, n_bytes",
[
(-128, 1),
(-1, 1),
(0, 1),
(1, 1),
(127, 1),
(-32_768, 2),
(-1, 2),
(0, 2),
(1, 2),
(32_767, 2),
(-2_147_483_648, 4),
(-1, 4),
(0, 4),
(1, 4),
(2_147_483_647, 4),
],
)
def test_encode_decode_twos_complement(value, n_bytes):
encoded = encode_twos_complement(value, n_bytes)
decoded = decode_twos_complement(encoded, n_bytes)
assert decoded == value, f"Failed at value={value}, n_bytes={n_bytes}"
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/utils/test_encoding_utils.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/open-r1:src/open_r1/utils/competitive_programming/cf_scoring.py | import asyncio
import os
from io import BytesIO
from typing import Literal
from async_lru import alru_cache
from .piston_client import PistonClient
from .utils import batched
async def score_single_test_case(
client: PistonClient,
problem_data: dict,
test_input: str,
test_output: str,
submission: str,
submission_language: str = "cpp",
) -> tuple[str, str]:
if submission_language not in ["python", "cpp"]:
raise ValueError(f"Invalid submission language: {submission_language}")
try:
result = await client.send_execute(
{
"files": [
{"name": f"main.{submission_language}", "content": submission},
*(
[{"name": "checker.py", "content": problem_data["generated_checker"]}]
if problem_data["generated_checker"]
else []
),
{"name": "input.txt", "content": test_input},
{"name": "correct_output.txt", "content": test_output},
{
"name": "grader_config",
"content": "\n".join(
f"{key}={value}"
for key, value in {
"TIME_LIMIT": problem_data["time_limit"],
"MEMORY_LIMIT": problem_data["memory_limit"],
"INPUT_MODE": problem_data["input_mode"],
}.items()
),
},
],
"run_timeout": (problem_data["time_limit"] + 10) * 1000,
# +10 seconds hard limit. time limits are handled by the codeforces script
},
language="cf_python3" if submission_language == "python" else "c++17",
)
except Exception as e:
print(f"Error scoring submission: {e}")
return False
return result
@alru_cache(maxsize=32) # TODO make this configurable
async def get_generated_contest_tests(contest_id: str) -> list[dict]:
import pandas as pd
import aiofiles
import aiofiles.os
tests_folder = os.environ.get("CF_TESTS_FOLDER", None)
if not tests_folder:
raise ValueError(
"CF_TESTS_FOLDER environment variable not set! Please download the codeforces generated tests and set CF_TESTS_FOLDER to the folder path. See https://huggingface.co/datasets/open-r1/codeforces for more information."
)
if not await aiofiles.os.path.exists(tests_folder):
raise ValueError(
f"CF_TESTS_FOLDER path '{tests_folder}' does not exist! Please download the codeforces generated tests and set CF_TESTS_FOLDER to the folder path. See https://huggingface.co/datasets/open-r1/codeforces for more information."
)
parquet_path = os.path.join(tests_folder, f"test_cases_{int(contest_id):04d}.parquet")
if not await aiofiles.os.path.exists(parquet_path):
return {}
# Read parquet file asynchronously
async with aiofiles.open(parquet_path, "rb") as f:
content = await f.read()
df = pd.read_parquet(BytesIO(content))
# Group by problem_id and convert to dictionary of lists
grouped_tests = df.groupby("problem_id").apply(lambda x: x[["input", "output"]].to_dict("records")).to_dict()
return grouped_tests
async def get_generated_tests(problem_id: str) -> list[dict]:
contest_id = problem_id.split("/")[0]
return (await get_generated_contest_tests(contest_id)).get(problem_id, [])
async def score_submission(
client: PistonClient,
problem_data: dict,
submission: str,
test_batch_size: int = 1,
scoring_mode: Literal["pass_fail", "partial", "weighted_sum"] = "weighted_sum",
no_compile_reward: float = -0.1,
no_submission_reward: float = -1.0,
submission_language: str = "cpp",
) -> float:
if submission_language not in ["python", "cpp"]:
raise ValueError(f"Invalid submission language: {submission_language}")
test_cases = problem_data["official_tests"] + (await get_generated_tests(problem_data["id"]))
# invalid/not a coding problem
if test_cases is None or len(test_cases) == 0:
return None
# no code extracted
if not submission:
return no_submission_reward
passed_test_cases = 0
# run one batch, check if any of them failed (0 score): if so stop evaluating (assuming non partial score); otherwise continue with the next batch of test cases.
for test_batch_to_run in batched(test_cases, test_batch_size) if test_batch_size >= 1 else [test_cases]:
results = await asyncio.gather(
*[
asyncio.create_task(
score_single_test_case(
client, problem_data, test_case["input"], test_case["output"], submission, submission_language
)
)
for test_case in test_batch_to_run
]
)
if any(result and result["compile"]["code"] != 0 for result in results):
return no_compile_reward
tests_passed_results = [
result and result["run"]["code"] == 0 and result["run"]["stdout"].strip() == "1" for result in results
]
if scoring_mode == "pass_fail" and any(not test_passed for test_passed in tests_passed_results):
break
passed_test_cases += sum(1 for test_passed in tests_passed_results if test_passed)
pass_fail_score = 1.0 if passed_test_cases == len(test_cases) else 0.0
if scoring_mode == "pass_fail":
return pass_fail_score
elif scoring_mode == "partial":
return passed_test_cases / len(test_cases)
elif scoring_mode == "weighted_sum":
return pass_fail_score + 0.1 * (passed_test_cases / len(test_cases))
else:
raise ValueError(f"Invalid scoring mode: {scoring_mode}")
| {
"repo_id": "huggingface/open-r1",
"file_path": "src/open_r1/utils/competitive_programming/cf_scoring.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/open-r1:src/open_r1/utils/competitive_programming/code_patcher.py | import re
def fix_python3_imports(source_code):
"""
Fix common import and function changes between Python 3 versions
Args:
source_code (str): The Python source code to update
Returns:
str: The updated source code
"""
# Dictionary of patterns to replacements
replacements = [
# Fix collections.abc imports (changed in Python 3.3+)
(
r"from collections import (Mapping|Sequence|Set|Container|MutableMapping|MutableSet|MutableSequence)",
r"from collections.abc import \1",
),
# Fix imp module deprecation (deprecated in 3.4)
(r"import imp", r"import importlib"),
# Fix asyncio.async() to asyncio.ensure_future() (renamed in 3.4.4)
(r"asyncio\.async\(", r"asyncio.ensure_future("),
# Fix inspect.getargspec to inspect.getfullargspec (deprecated in 3.5)
(r"inspect\.getargspec", r"inspect.getfullargspec"),
# Fix array.array 'c' type code to 'b' (removed in 3.9)
(r"array\.array\('c'", r"array.array('b'"),
# Fix backslash line continuation with multiple newlines (Python-specific issue)
(r"\\(\r\n|\r|\n)+", "\\\n"),
# some solutions use getlogin() to check if they are debugging or on an actual submission
(r"(?:os\s*\.\s*)?getlogin\s*\(\s*\)", "False"),
# Fix usage of fractions.gcd (moved to math in 3.5)
# 1. Fix direct usage: fractions.gcd -> math.gcd
(r"\bfractions\.gcd\b", r"math.gcd"),
# 2. Fix 'from fractions import gcd, X' -> 'from fractions import X' (start/middle)
(r"(from\s+fractions\s+import\s+(?:\([^)]*)?)\bgcd\s*,\s*", r"\1"),
# 3. Fix 'from fractions import X, gcd' -> 'from fractions import X' (end)
(r"(from\s+fractions\s+import\s+.*?\S)\s*,\s*\bgcd(\s*\)?\s*(?:#.*)?)", r"\1\2"),
# 4. Fix standalone 'from fractions import gcd' -> 'from math import gcd'
(r"from\s+fractions\s+import\s+\(?\s*gcd\s*\)?", r""),
# --- End: Replacement for the faulty line ---
]
lines = source_code.splitlines()
last_import = max(
[
i
for i, line in enumerate(lines)
if line.strip().startswith("import") or (line.strip().startswith("from") and "import" in line)
],
default=0,
)
import_section = "\n".join(lines[: last_import + 1])
main_source = "\n".join(lines[last_import:])
if "fractions.gcd" in source_code and "import math" not in source_code:
import_section += "\nimport math"
elif "gcd" in source_code and "from math import gcd" not in source_code:
import_section += "\nfrom math import gcd"
if "set_int_max_str_digits" not in source_code:
import_section += "\nimport sys\nsys.set_int_max_str_digits(0)"
source_code = import_section + "\n" + main_source
# Apply each replacement
for pattern, replacement in replacements:
source_code = re.sub(pattern, replacement, source_code)
source_code = source_code.rstrip("\\")
return source_code
def fix_cpp_includes(source_code):
# has most of the useful functions
code_header = "#include <bits/stdc++.h>\n"
# use namespace std since models forget std:: often
if "using namespace std;" not in source_code and "std::" not in source_code:
code_header += "\nusing namespace std;\n\n"
return code_header + source_code
def is_patchable(lang):
return lang in ("python", "python3", "Python 3", "PyPy 3", "PyPy 3-64", "cpp") or "C++" in lang
def patch_code(text, lang):
if not text:
return text
if lang in ("python", "python3", "Python 3", "PyPy 3", "PyPy 3-64"):
return fix_python3_imports(text)
elif "cpp" in lang or "C++" in lang:
return fix_cpp_includes(text)
return text
tests = [
"""read = lambda: map(int, input().split())
n, m, z = read()
from fractions import gcd
ans = z // (n * m // gcd(n, m))
print(ans)""",
"""from fractions import Fraction,gcd
a,b,c,d = [int(x) for x in input().split()]
if a*d > b*c:
num = a*d-b*c
denom = a*d
else:
num = b*c-a*d
denom = b*c
div = gcd(num,denom)
print('%d/%d'%(num//div,denom//div))""",
]
if __name__ == "__main__":
for test in tests:
print("ORIGINAL:", test, sep="\n\n")
print("PATCHED:", patch_code(test, "Python 3"), sep="\n\n")
print("=" * 50)
| {
"repo_id": "huggingface/open-r1",
"file_path": "src/open_r1/utils/competitive_programming/code_patcher.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/open-r1:src/open_r1/utils/competitive_programming/utils.py | from itertools import islice
def batched(iterable, n):
"Batch data into lists of length n. The last batch may be shorter."
# batched('ABCDEFG', 3) --> ABC DEF G
if n < 1:
return iterable
it = iter(iterable)
while batch := list(islice(it, n)):
yield batch
| {
"repo_id": "huggingface/open-r1",
"file_path": "src/open_r1/utils/competitive_programming/utils.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/open-r1:src/open_r1/utils/data.py | import logging
import datasets
from datasets import DatasetDict, concatenate_datasets
from ..configs import ScriptArguments
logger = logging.getLogger(__name__)
def get_dataset(args: ScriptArguments) -> DatasetDict:
"""Load a dataset or a mixture of datasets based on the configuration.
Args:
args (ScriptArguments): Script arguments containing dataset configuration.
Returns:
DatasetDict: The loaded datasets.
"""
if args.dataset_name and not args.dataset_mixture:
logger.info(f"Loading dataset: {args.dataset_name}")
return datasets.load_dataset(args.dataset_name, args.dataset_config)
elif args.dataset_mixture:
logger.info(f"Creating dataset mixture with {len(args.dataset_mixture.datasets)} datasets")
seed = args.dataset_mixture.seed
datasets_list = []
for dataset_config in args.dataset_mixture.datasets:
logger.info(f"Loading dataset for mixture: {dataset_config.id} (config: {dataset_config.config})")
ds = datasets.load_dataset(
dataset_config.id,
dataset_config.config,
split=dataset_config.split,
)
if dataset_config.columns is not None:
ds = ds.select_columns(dataset_config.columns)
if dataset_config.weight is not None:
ds = ds.shuffle(seed=seed).select(range(int(len(ds) * dataset_config.weight)))
logger.info(
f"Subsampled dataset '{dataset_config.id}' (config: {dataset_config.config}) with weight={dataset_config.weight} to {len(ds)} examples"
)
datasets_list.append(ds)
if datasets_list:
combined_dataset = concatenate_datasets(datasets_list)
combined_dataset = combined_dataset.shuffle(seed=seed)
logger.info(f"Created dataset mixture with {len(combined_dataset)} examples")
if args.dataset_mixture.test_split_size is not None:
combined_dataset = combined_dataset.train_test_split(
test_size=args.dataset_mixture.test_split_size, seed=seed
)
logger.info(
f"Split dataset into train and test sets with test size: {args.dataset_mixture.test_split_size}"
)
return combined_dataset
else:
return DatasetDict({"train": combined_dataset})
else:
raise ValueError("No datasets were loaded from the mixture configuration")
else:
raise ValueError("Either `dataset_name` or `dataset_mixture` must be provided")
| {
"repo_id": "huggingface/open-r1",
"file_path": "src/open_r1/utils/data.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/open-r1:tests/utils/test_data.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from dataclasses import asdict
from datasets import DatasetDict, load_dataset
from open_r1.configs import DatasetConfig, DatasetMixtureConfig, ScriptArguments
from open_r1.utils.data import get_dataset
class TestGetDataset(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dataset_name = "trl-internal-testing/zen"
cls.dataset_config = "conversational_preference"
cls.ref_dataset = load_dataset(cls.dataset_name, cls.dataset_config)
def test_dataset_and_config_name(self):
args = ScriptArguments(dataset_name=self.dataset_name, dataset_config=self.dataset_config)
dataset = get_dataset(args)
self.assertIsInstance(dataset, DatasetDict)
self.assertIn("train", dataset)
self.assertEqual(len(dataset["train"]), len(self.ref_dataset["train"]))
def test_unweighted_mixture(self):
"""Mix train and test splits of the same dataset."""
dataset_configs = [
DatasetConfig(id=self.dataset_name, config=self.dataset_config, split="train", columns=None, weight=None),
DatasetConfig(id=self.dataset_name, config=self.dataset_config, split="test", columns=None, weight=None),
]
dataset_mixture = DatasetMixtureConfig(
datasets=dataset_configs,
)
args = ScriptArguments(dataset_mixture=asdict(dataset_mixture))
dataset = get_dataset(args)
self.assertIsInstance(dataset, DatasetDict)
self.assertIn("train", dataset)
self.assertEqual(len(dataset["train"]), len(self.ref_dataset["train"]) + len(self.ref_dataset["test"]))
def test_weighted_mixture(self):
"""Test loading a dataset mixture with weights."""
dataset_configs = [
DatasetConfig(id=self.dataset_name, config=self.dataset_config, split="train", columns=None, weight=0.25),
DatasetConfig(id=self.dataset_name, config=self.dataset_config, split="test", columns=None, weight=0.5),
]
dataset_mixture = DatasetMixtureConfig(
datasets=dataset_configs,
)
args = ScriptArguments(dataset_mixture=asdict(dataset_mixture))
dataset = get_dataset(args)
self.assertIsInstance(dataset, DatasetDict)
self.assertIn("train", dataset)
self.assertEqual(
len(dataset["train"]), len(self.ref_dataset["train"]) // 4 + len(self.ref_dataset["test"]) // 2
)
def test_mixture_and_test_split(self):
"""Test loading a dataset mixture with test split."""
dataset_configs = [
DatasetConfig(
id=self.dataset_name, config=self.dataset_config, split="train[:10]", columns=None, weight=None
),
]
dataset_mixture = DatasetMixtureConfig(datasets=dataset_configs, test_split_size=0.2)
args = ScriptArguments(dataset_name=None, dataset_mixture=asdict(dataset_mixture))
dataset = get_dataset(args)
self.assertIsInstance(dataset, DatasetDict)
self.assertIn("train", dataset)
self.assertIn("test", dataset)
self.assertEqual(len(dataset["train"]), 8)
self.assertEqual(len(dataset["test"]), 2)
def test_mixture_column_selection(self):
"""Test loading a dataset mixture with column selection."""
dataset_configs = [
DatasetConfig(
id=self.dataset_name,
config=self.dataset_config,
split="train",
columns=["prompt", "chosen"],
weight=None,
),
]
dataset_mixture = DatasetMixtureConfig(
datasets=dataset_configs,
)
args = ScriptArguments(dataset_mixture=asdict(dataset_mixture))
dataset = get_dataset(args)
self.assertIsInstance(dataset, DatasetDict)
self.assertIn("train", dataset)
self.assertIn("prompt", dataset["train"].column_names)
self.assertIn("chosen", dataset["train"].column_names)
def test_mixture_with_mismatched_columns(self):
dataset_configs = [
DatasetConfig(
id=self.dataset_name, config=self.dataset_config, split="train", columns=["prompt"], weight=None
),
DatasetConfig(
id=self.dataset_name, config=self.dataset_config, split="train", columns=["chosen"], weight=None
),
]
dataset_mixture = DatasetMixtureConfig(
datasets=dataset_configs,
)
with self.assertRaises(ValueError) as context:
_ = ScriptArguments(dataset_mixture=asdict(dataset_mixture))
self.assertIn("Column names must be consistent", str(context.exception))
def test_no_dataset_name_or_mixture(self):
with self.assertRaises(ValueError) as context:
_ = ScriptArguments(dataset_name=None, dataset_mixture=None)
self.assertIn("Either `dataset_name` or `dataset_mixture` must be provided", str(context.exception))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "huggingface/open-r1",
"file_path": "tests/utils/test_data.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/open-r1:scripts/pass_rate_filtering/compute_pass_rate.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# example usage python scripts/filter_dataset.py --config recipes/dataset_filtering/config_demo.yaml
import logging
from dataclasses import dataclass
from git import Optional
import torch
import sys
import datasets
import transformers
from datasets import load_dataset
from transformers import set_seed
from open_r1.configs import GRPOConfig, GRPOScriptArguments
from open_r1.rewards import get_reward_funcs
from open_r1.utils import get_tokenizer
from trl import ModelConfig, TrlParser
from trl.data_utils import apply_chat_template
from vllm import LLM, SamplingParams
logger = logging.getLogger(__name__)
@dataclass
class PassRateScriptArguments(GRPOScriptArguments):
# we can be lazy and just use the same script args as GRPO
output_dataset_name: Optional[str] = None
pass_rate_min: float = 0.1
pass_rate_max: float = 0.9
dataset_start_index: Optional[int] = None
dataset_end_index: Optional[int] = None
dataset_split: str = "train"
def main(script_args, training_args, model_args):
# Set seed for reproducibility
set_seed(training_args.seed)
###############
# Setup logging
###############
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Model parameters {model_args}")
logger.info(f"Script parameters {script_args}")
logger.info(f"Training parameters {training_args}")
# Load the dataset
dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config, split=script_args.dataset_split)
if script_args.dataset_start_index is not None and script_args.dataset_end_index is not None:
dataset = dataset.select(range(script_args.dataset_start_index, script_args.dataset_end_index))
# Get reward functions from the registry
reward_funcs = get_reward_funcs(script_args)
# Format into conversation
def make_conversation(example, prompt_column: str = script_args.dataset_prompt_column):
example["prompt_backup"] = example[prompt_column]
prompt = []
if training_args.system_prompt is not None:
prompt.append({"role": "system", "content": training_args.system_prompt})
if prompt_column not in example:
raise ValueError(f"Dataset Question Field Error: {prompt_column} is not supported.")
prompt.append({"role": "user", "content": example[prompt_column]})
return {"prompt": prompt}
dataset = dataset.map(make_conversation)
tokenizer = get_tokenizer(model_args, training_args)
if "messages" in dataset.column_names:
dataset = dataset.remove_columns("messages")
dataset = dataset.map(apply_chat_template, fn_kwargs={"tokenizer": tokenizer})
llm = LLM(
model=model_args.model_name_or_path,
revision=model_args.model_revision,
trust_remote_code=model_args.trust_remote_code,
)
sampling_params=SamplingParams(
temperature=training_args.temperature,
top_p=training_args.top_p,
top_k=training_args.top_k,
n=training_args.num_generations,
max_tokens=training_args.max_completion_length,
)
def batch_score(examples):
prompts = examples["prompt"]
outputs = llm.generate(
prompts,
sampling_params=sampling_params,
use_tqdm=False,
)
repeated_prompts = []
reward_completions = []
grouped_completions = []
for output in outputs:
prompt = output.prompt
group = []
for completion in output.outputs:
text = completion.text
group.append(text)
message = [{"role": "assistant", "content": text}]
repeated_prompts.append(prompt)
reward_completions.append(message)
grouped_completions.append(group)
def repeat_each_element_k_times(list_to_repeat: list, k: int) -> list:
return [element for item in list_to_repeat for element in [item] * k]
rewards_per_func = torch.zeros(len(repeated_prompts), len(reward_funcs))
for i, reward_func in enumerate(reward_funcs):
keys = [key for key in examples.data.keys() if key not in ["prompt", "completion"]]
reward_kwargs = {key: repeat_each_element_k_times(examples[key], training_args.num_generations) for key in keys}
output_reward_func = reward_func(prompts=repeated_prompts, completions=reward_completions, **reward_kwargs)
# Convert None values to NaN
output_reward_func = [reward if reward is not None else torch.nan for reward in output_reward_func]
rewards_per_func[:, i] = torch.tensor(output_reward_func, dtype=torch.float32)
reshaped_rewards = rewards_per_func.view(-1, training_args.num_generations)
examples["pass_rate_generations"] = grouped_completions
examples["pass_rate_rewards"] = reshaped_rewards.tolist()
return examples
dataset = dataset.map(batch_score, batched=True, batch_size=64)
# we need to restore the prompt for the final dataset
def restore_prompt(example):
example["prompt"] = example["prompt_backup"]
return example
dataset = dataset.map(restore_prompt)
dataset = dataset.remove_columns("prompt_backup")
if script_args.output_dataset_name is not None:
output_dataset_name = script_args.output_dataset_name
else:
model_name = model_args.model_name_or_path
if "/" in model_name:
model_name = model_name.split("/")[-1]
model_revision = model_args.model_revision
output_dataset_name = f"{script_args.dataset_name}-{model_name}-{model_revision}-gen"
config_name="default"
filtered_config_name = f"filt-{script_args.pass_rate_min}-{script_args.pass_rate_max}"
if script_args.dataset_start_index is not None and script_args.dataset_end_index is not None:
config_name = f"gen-{script_args.dataset_start_index}-{script_args.dataset_end_index}"
filtered_config_name = f"{filtered_config_name}-{script_args.dataset_start_index}-{script_args.dataset_end_index}"
dataset.push_to_hub(output_dataset_name, config_name=config_name, revision="gen")
def filter_func(example):
rewards = example["pass_rate_rewards"]
# get the mean of the rewards that are not None
mean_reward = torch.nanmean(torch.tensor(rewards, dtype=torch.float32))
return script_args.pass_rate_min < mean_reward < script_args.pass_rate_max
logger.info(f"Filtering dataset with low reward threshold {script_args.pass_rate_min} and high reward threshold {script_args.pass_rate_max}")
logger.info(f"Dataset size before filtering: {dataset}")
dataset = dataset.filter(filter_func)
logger.info(f"Dataset size after filtering: {dataset}")
dataset.push_to_hub(output_dataset_name, config_name=filtered_config_name, revision="pass_rate")
if __name__ == "__main__":
parser = TrlParser((PassRateScriptArguments, GRPOConfig, ModelConfig))
script_args, training_args, model_args = parser.parse_args_and_config()
main(script_args, training_args, model_args)
| {
"repo_id": "huggingface/open-r1",
"file_path": "scripts/pass_rate_filtering/compute_pass_rate.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/open-r1:scripts/morph_router.py | # coding=utf-8
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import asyncio
from fastapi import FastAPI
from pydantic import BaseModel, ConfigDict
from typing import Optional, List
from fastapi import FastAPI, Request
import uvicorn
from dotenv import load_dotenv
import os
load_dotenv()
class BatchRequest(BaseModel):
"""
BatchRequest is a data model representing a batch processing request.
Attributes:
scripts (list[str]): A list of script names or paths to be executed.
languages (List[str]): The programming languages for each script in the list.
timeout (int): The maximum allowed execution time for each script in seconds.
request_timeout (int): The maximum allowed time for the entire batch request in seconds.
"""
scripts: List[str]
languages: List[str]
timeout: int
request_timeout: int
class ScriptResult(BaseModel):
"""
ScriptResult is a Pydantic model that represents the result of a script execution.
Attributes:
text (Optional[str]): The output text from the script execution.
exception_str (Optional[str]): An optional string that captures the exception
message or details if an error occurred during the script's execution.
model_config (ConfigDict): A configuration dictionary that allows arbitrary
types to be used within the Pydantic model.
"""
text: Optional[str]
exception_str: Optional[str]
model_config = ConfigDict(arbitrary_types_allowed=True)
def create_app(args):
"""
Creates and configures a FastAPI application instance for the MorphCloud router.
Args:
args: An object containing configuration parameters for the application.
- max_num_sandboxes (int): The maximum number of concurrent sandboxes allowed.
- api_key (str): The MorphCloud API key to use.
Returns:
FastAPI: A configured FastAPI application instance.
"""
app = FastAPI()
from morphcloud.api import MorphCloudClient
from morphcloud.sandbox import Sandbox
app.state.client = MorphCloudClient(api_key=args.api_key)
app.state.Sandbox = Sandbox
app.state.sandbox_semaphore = asyncio.Semaphore(args.max_num_sandboxes)
@app.get("/health")
async def health():
return {"status": "ok"}
@app.post("/execute_batch")
async def execute_batch(batch: BatchRequest, request: Request):
semaphore = request.app.state.sandbox_semaphore
client = request.app.state.client
Sandbox = request.app.state.Sandbox
languages = batch.languages
timeout = batch.timeout
request_timeout = batch.request_timeout
asyncio_timeout = batch.timeout + 1
async def run_script(script: str, language: str) -> ScriptResult:
sandbox = None
sandbox_id = "unknown"
async with semaphore:
try:
sandbox = await asyncio.to_thread(
Sandbox.new,
client=client,
ttl_seconds=timeout
)
sandbox_id = getattr(sandbox, 'id', None) or getattr(sandbox._instance, 'id', 'unknown')
execution = await asyncio.wait_for(
asyncio.to_thread(
sandbox.run_code,
script,
language=language,
timeout=timeout * 1000
),
timeout=asyncio_timeout,
)
if hasattr(execution, 'text') and execution.text:
return ScriptResult(text=execution.text, exception_str=None)
elif hasattr(execution, 'stdout') and execution.stdout:
return ScriptResult(text=execution.stdout, exception_str=None)
else:
return ScriptResult(text="", exception_str="No output from execution")
except Exception as e:
return ScriptResult(text=None, exception_str=str(e))
finally:
if sandbox:
try:
await asyncio.to_thread(sandbox.close)
await asyncio.to_thread(sandbox.shutdown)
except Exception:
pass
tasks = [run_script(script, lang) for script, lang in zip(batch.scripts, batch.languages)]
return await asyncio.gather(*tasks)
return app
def parse_args():
"""
Parse command-line arguments for the morph_router script.
Arguments:
--host (str): The hostname or IP address to bind the server to. Defaults to "0.0.0.0".
--port (int): The port number on which the server will listen. Defaults to 8001.
--max_num_sandboxes (int): The maximum number of sandboxes that can be created simultaneously. Defaults to 20.
--api_key (str): The MorphCloud API key. If not provided, it will be read from the MORPH_API_KEY environment variable.
Returns:
argparse.Namespace: Parsed command-line arguments as an object.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--host", default="0.0.0.0")
parser.add_argument("--port", type=int, default=8001)
parser.add_argument("--max_num_sandboxes", type=int, default=20)
parser.add_argument("--api_key", default=os.getenv("MORPH_API_KEY"))
args = parser.parse_args()
if not args.api_key:
raise ValueError("MorphCloud API key not provided. Please set MORPH_API_KEY environment variable or use --api_key.")
return args
if __name__ == "__main__":
args = parse_args()
app = create_app(args)
print(f"Starting MorphCloud Router on {args.host}:{args.port}")
uvicorn.run(app, host=args.host, port=args.port) | {
"repo_id": "huggingface/open-r1",
"file_path": "scripts/morph_router.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/open-r1:src/open_r1/utils/code_providers.py | # coding=utf-8
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code execution providers for executing and evaluating code snippets."""
import abc
import asyncio
from typing import List, Optional
from ..utils import is_e2b_available, is_morph_available
if is_e2b_available():
from e2b_code_interpreter import AsyncSandbox
from e2b_code_interpreter.models import Execution
from .routed_sandbox import RoutedSandbox
else:
AsyncSandbox = None
Execution = None
RoutedSandbox = None
if is_morph_available():
from morphcloud.api import MorphCloudClient
from morphcloud.sandbox import Sandbox
from .routed_morph import RoutedMorphSandbox
else:
MorphCloudClient = None
Sandbox = None
RoutedMorphSandbox = None
class CodeExecutionProvider(abc.ABC):
"""Abstract base class for code execution providers."""
@abc.abstractmethod
def execute_scripts(self, scripts: List[str], languages: List[str]) -> List[float]:
"""Execute multiple scripts and return their reward values.
Args:
scripts: List of code scripts to execute
language: The programming language of the scripts
Returns:
List of float rewards (one per script)
"""
pass
class E2BProvider(CodeExecutionProvider):
"""Provider that executes code using E2B sandboxes."""
def __init__(self, num_parallel: int = 2, e2b_router_url: Optional[str] = None):
"""Initialize the E2B provider.
Args:
num_parallel: Number of parallel sandboxes to use
e2b_router_url: URL for the E2B router (if using router mode)
"""
if not is_e2b_available():
raise ImportError(
"E2B is not available and required for this provider. Please install E2B with "
"`pip install e2b-code-interpreter` and add an API key to a `.env` file."
)
self.num_parallel = num_parallel
self.e2b_router_url = e2b_router_url
def execute_scripts(self, scripts: List[str], languages: List[str]) -> List[float]:
"""Execute scripts using E2B sandboxes.
If e2b_router_url is provided, uses the RoutedSandbox for batch processing.
Otherwise, uses direct AsyncSandbox with parallelization.
"""
if self.e2b_router_url is not None:
routed_sandbox = RoutedSandbox(router_url=self.e2b_router_url)
executions = routed_sandbox.run_code(
scripts=scripts,
languages=languages,
timeout=30,
request_timeout=28,
)
rewards = []
for execution in executions:
try:
reward = float(execution.text)
rewards.append(reward)
except Exception:
rewards.append(None)
return rewards
try:
rewards = self._run_async_from_sync(scripts, languages, self.num_parallel)
except Exception as e:
print(f"Error from E2B executor: {e}")
rewards = [0.0] * len(scripts)
return rewards
def _run_async_from_sync(self, scripts: List[str], languages: List[str], num_parallel: int) -> List[float]:
"""Function wrapping the `_run_async` function."""
try:
rewards = asyncio.run(self._run_async(scripts, languages, num_parallel))
except Exception as e:
print(f"Error from E2B executor async: {e}")
raise e
return rewards
async def _run_async(self, scripts: List[str], languages: List[str], num_parallel: int) -> List[float]:
semaphore = asyncio.Semaphore(num_parallel)
tasks = [self._run_script(script, languages, semaphore) for script in scripts]
results = await asyncio.gather(*tasks)
rewards = list(results)
return rewards
async def _run_script(self, script: str, languages: List[str], semaphore: asyncio.Semaphore) -> float:
# We set a timeout margin, as the AsyncSandbox timeout does not seem to work
# These values are based on running 256 examples with the gold solution
# from open-r1/verifiable-coding-problems-python_decontaminated
# see scripts/benchmark_e2b.py
SANDBOX_TIMEOUT = 30
MARGIN = 2
REQUEST_TIMEOUT = SANDBOX_TIMEOUT - MARGIN
ASYNCIO_TIMEOUT = SANDBOX_TIMEOUT + MARGIN
async with semaphore:
try:
sandbox = await AsyncSandbox.create(timeout=SANDBOX_TIMEOUT, request_timeout=REQUEST_TIMEOUT)
execution = await asyncio.wait_for(
sandbox.run_code(script, languages=languages),
timeout=ASYNCIO_TIMEOUT,
)
return float(execution.text)
except (TypeError, ValueError):
return 0.0
except asyncio.TimeoutError:
print("Operation timed out")
return 0.0
except Exception as e:
print(f"Error in `_run_script` from E2B sandbox ID {sandbox.sandbox_id} : {e}")
return 0.0
finally:
try:
await sandbox.kill()
except Exception as e:
print(f"Error from E2B executor kill with sandbox ID {sandbox.sandbox_id} : {e}")
class MorphProvider(CodeExecutionProvider):
"""Provider that executes code using MorphCloud's Sandbox API."""
def __init__(self, num_parallel: int = 2, morph_router_url: Optional[str] = None):
"""Initialize the Morph provider.
Args:
num_parallel: Number of parallel executions to use
morph_router_url: URL for the MorphCloud router (if using router mode)
"""
if not is_morph_available():
raise ImportError(
"MorphCloud is not available and required for this provider. Please install MorphCloud with "
"`pip install morphcloud` and add an API key to a `.env` file."
)
try:
from dotenv import load_dotenv
load_dotenv()
except ImportError:
print("Warning: python-dotenv not installed. Environment variables must be set directly.")
self.num_parallel = num_parallel
self.morph_router_url = morph_router_url
if self.morph_router_url is not None:
self.routed_sandbox = RoutedMorphSandbox(router_url=self.morph_router_url)
return
import os
self.api_key = os.getenv("MORPH_API_KEY")
if not self.api_key:
raise ValueError("MorphCloud API key not found. Please set the MORPH_API_KEY environment variable.")
try:
self.client = MorphCloudClient(api_key=self.api_key)
self.Sandbox = Sandbox
except ImportError as e:
raise ImportError(f"Required MorphCloud dependencies not installed: {e}")
def execute_scripts(self, scripts: List[str], languages: List[str]) -> List[float]:
"""Execute scripts using MorphCloud Sandbox API.
Args:
scripts: List of Python scripts to execute
language: Programming language
Returns:
List of float rewards (one per script)
"""
if hasattr(self, "routed_sandbox"):
try:
results = self.routed_sandbox.run_code(
scripts=scripts,
languages=languages,
timeout=90,
request_timeout=96,
)
rewards = []
for result in results:
try:
reward = float(result.text)
rewards.append(reward)
except (ValueError, AttributeError):
rewards.append(0.0)
return rewards
except Exception as e:
print(f"Error from MorphCloud router: {e}")
return [0.0] * len(scripts)
import asyncio
try:
rewards = asyncio.run(self._run_async(scripts, languages, self.num_parallel))
except Exception as e:
print(f"Error from MorphCloud executor: {e}")
rewards = [0.0] * len(scripts)
return rewards
async def _run_async(self, scripts: List[str], languages: List[str], num_parallel: int) -> List[float]:
"""Run multiple scripts concurrently with limited parallelism.
Args:
scripts: List of scripts to execute
language: Programming language
num_parallel: Maximum number of concurrent executions
Returns:
List of rewards
"""
semaphore = asyncio.Semaphore(num_parallel)
tasks = [self._run_script(script, languages, semaphore) for script in scripts]
results = await asyncio.gather(*tasks)
return list(results)
async def _run_script(self, script: str, languages: List[str], semaphore: asyncio.Semaphore) -> float:
"""Execute a single script in a MorphCloud Sandbox.
Args:
script: The script to execute
language: Programming language
semaphore: Semaphore to limit concurrency
Returns:
Float reward from script execution
"""
SANDBOX_TIMEOUT = 90
MARGIN = 6
ASYNCIO_TIMEOUT = SANDBOX_TIMEOUT + MARGIN
sandbox = None
async with semaphore:
try:
sandbox = await asyncio.to_thread(self.Sandbox.new, client=self.client, ttl_seconds=SANDBOX_TIMEOUT)
result = await asyncio.wait_for(
asyncio.to_thread(
sandbox.run_code,
script,
languages=languages,
timeout=SANDBOX_TIMEOUT,
),
timeout=ASYNCIO_TIMEOUT,
)
reward = 0.0
try:
if hasattr(result, "text") and result.text:
lines = result.text.strip().split("\n")
if lines:
try:
reward = float(lines[-1])
except ValueError:
try:
reward = float(result.text.strip())
except ValueError:
pass
elif hasattr(result, "stdout") and result.stdout:
lines = result.stdout.strip().split("\n")
if lines:
try:
reward = float(lines[-1])
except ValueError:
pass
except (ValueError, AttributeError):
pass
return reward
except asyncio.TimeoutError:
return 0.0
except Exception:
return 0.0
finally:
if sandbox:
try:
await asyncio.to_thread(sandbox.close)
await asyncio.to_thread(sandbox.shutdown)
except Exception:
pass
def get_provider(provider_type: str = "e2b", **kwargs) -> CodeExecutionProvider:
"""Factory function to get the appropriate code execution provider.
Args:
provider_type: Type of provider to use ("e2b", "morph")
**kwargs: Additional arguments to pass to the provider
Returns:
An instance of CodeExecutionProvider
"""
num_parallel = kwargs.pop("num_parallel", 2)
if provider_type == "e2b":
# Extract E2B-specific arguments
e2b_router_url = kwargs.pop("e2b_router_url", None)
return E2BProvider(
num_parallel=num_parallel,
e2b_router_url=e2b_router_url,
)
elif provider_type == "morph":
# Extract Morph-specific arguments
morph_router_url = kwargs.pop("morph_router_url", None)
return MorphProvider(
num_parallel=num_parallel,
morph_router_url=morph_router_url,
)
else:
raise ValueError(f"Unknown provider type: {provider_type}")
| {
"repo_id": "huggingface/open-r1",
"file_path": "src/open_r1/utils/code_providers.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/open-r1:src/open_r1/utils/routed_morph.py | # coding=utf-8
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import requests
class RoutedMorphSandbox:
"""
Client for the MorphCloud router service that mimics the API of MorphCloud's Sandbox.
This class provides a simple interface to execute code via a central MorphCloud router,
which manages sandbox creation and cleanup. It allows batch processing of multiple scripts
in a single request for improved efficiency.
Attributes:
router_url (str): The URL of the MorphCloud router service.
timeout (int): Execution timeout in seconds.
request_timeout (int): HTTP request timeout in seconds.
"""
def __init__(self, router_url: str, timeout: int = 300, request_timeout: int = 60):
"""
Initialize the routed MorphCloud sandbox client.
Args:
router_url: The URL of the MorphCloud router, including host and port.
timeout: Default execution timeout in seconds.
request_timeout: Default HTTP request timeout in seconds.
"""
self.router_url = router_url
self.timeout = timeout
self.request_timeout = request_timeout
def run_code(
self,
scripts: List[str],
languages: Optional[List[str]] = None,
timeout: Optional[int] = None,
request_timeout: Optional[int] = None,
) -> List:
"""
Execute multiple scripts using MorphCloud via the router.
Args:
scripts: List of code scripts to execute.
languages: List of programming languages for each script. If None, defaults to Python for all scripts.
timeout: Execution timeout in seconds. If None, uses the instance timeout.
request_timeout: HTTP request timeout in seconds. If None, uses the instance request_timeout.
Returns:
List of execution results with text and exception_str properties.
"""
actual_timeout = timeout if timeout is not None else self.timeout
actual_request_timeout = request_timeout if request_timeout is not None else self.request_timeout
# Default to Python for all scripts if languages is not provided
if languages is None:
languages = ["python"] * len(scripts)
payload = {
"scripts": scripts,
"languages": languages,
"timeout": actual_timeout,
"request_timeout": actual_request_timeout,
}
try:
endpoint = f"http://{self.router_url}/execute_batch"
response = requests.post(endpoint, json=payload, timeout=actual_request_timeout)
if response.status_code != 200:
error = f"Request to MorphCloud router failed with status code: {response.status_code}"
print(error)
results = []
for _ in scripts:
results.append(type("obj", (object,), {"text": None, "exception_str": error}))
return results
response_data = response.json()
results = []
for item in response_data:
# Log the response data to see what we're getting
# print(f"RoutedMorphSandbox: Got response item: {item}")
result = type(
"obj",
(object,),
{
"text": item.get("text"),
"exception_str": item.get("exception_str"),
},
)
results.append(result)
return results
except Exception as e:
error = f"Error communicating with MorphCloud router: {str(e)}"
print(error)
results = []
for _ in scripts:
results.append(type("obj", (object,), {"text": None, "exception_str": error}))
return results
| {
"repo_id": "huggingface/open-r1",
"file_path": "src/open_r1/utils/routed_morph.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/lily_finetuning/lily_finetuning.py | # This script is based on examples/gralora_finetuning/gralora_finetuning.py
import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import LilyConfig, get_peft_model
def train_model(
base_model: str,
data_path: str,
output_dir: str,
batch_size: int,
num_epochs: int,
learning_rate: float,
cutoff_len: int,
val_set_size: int,
eval_step: int,
save_step: int,
device: str,
lily_r: int,
lily_scaling: float,
lily_stride_A: int,
lily_num_B: int,
lily_target_modules: str,
hub_model_id: str,
push_to_hub: bool,
):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
hf_token = os.getenv("HF_TOKEN")
# Setup device
if device == "auto":
device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
else:
device = torch.device(device)
print(f"Using device: {device}")
# load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token)
model = AutoModelForCausalLM.from_pretrained(base_model, token=hf_token)
# Lily config for the PEFT model
lily_config = LilyConfig(
r=lily_r,
scaling=lily_scaling,
stride_A=lily_stride_A,
num_B=lily_num_B,
target_modules=(
lily_target_modules.split(",")
if lily_target_modules
else ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
),
)
# get the peft model with Lily config
model = get_peft_model(model, lily_config)
model.print_trainable_parameters()
model.to(device)
tokenizer.pad_token = tokenizer.eos_token
# Load the dataset
dataset = load_dataset(data_path)
def tokenize_function(examples):
inputs = tokenizer(examples["text"], padding="max_length", truncation=True, max_length=cutoff_len)
inputs["labels"] = inputs["input_ids"].copy() # setting labels for a language modeling task
return inputs
# Tokenize the dataset and prepare for training
tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names)
# Data collator to dynamically pad the batched examples
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# Define training arguments
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_steps=100,
weight_decay=0.01,
logging_steps=eval_step,
save_steps=save_step,
save_total_limit=2,
push_to_hub=push_to_hub,
hub_model_id=hub_model_id,
gradient_accumulation_steps=16,
fp16=True,
learning_rate=learning_rate,
hub_token=hf_token,
)
# Clear device cache to free memory
if torch.cuda.is_available():
torch.cuda.empty_cache()
elif torch.xpu.is_available():
torch.xpu.empty_cache()
# Initialize the Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
data_collator=data_collator,
)
# Start model training
trainer.train()
# Save and push the trained model and tokenizer
if push_to_hub:
trainer.push_to_hub(commit_message="Fine-tuned model with Lily")
# Save the model and tokenizer locally
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fine-tune LLaMA with Lily and PEFT")
parser.add_argument("--base_model", type=str, default="meta-llama/Llama-3.2-3B", help="Base model path or name")
parser.add_argument(
"--data_path", type=str, default="timdettmers/openassistant-guanaco", help="Dataset path or name"
)
parser.add_argument(
"--output_dir", type=str, default="path/to/output", help="Output directory for the fine-tuned model"
)
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=1e-4, help="Learning rate")
parser.add_argument("--cutoff_len", type=int, default=512, help="Cutoff length for tokenization")
parser.add_argument("--val_set_size", type=int, default=500, help="Validation set size")
parser.add_argument("--eval_step", type=int, default=10, help="Evaluation step interval")
parser.add_argument("--save_step", type=int, default=100, help="Save step interval")
parser.add_argument("--device", type=str, default="auto", help="Device to use for training")
parser.add_argument("--lily_r", type=int, default=32, help="Lily rank")
parser.add_argument(
"--lily_scaling", type=float, default=2.0, help="Lily scaling factor applied to adapter output"
)
parser.add_argument(
"--lily_stride_A", type=int, default=4, help="Number of consecutive layers sharing one A adapter"
)
parser.add_argument("--lily_num_B", type=int, default=7, help="Number of shared B adapters (must be >= 2)")
parser.add_argument(
"--lily_target_modules", type=str, default=None, help="Comma-separated list of target modules for Lily"
)
parser.add_argument(
"--hub_model_id",
type=str,
default="path/to/repo",
help="Repository name to push the model on the Hugging Face Hub",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub")
args = parser.parse_args()
train_model(
base_model=args.base_model,
data_path=args.data_path,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
val_set_size=args.val_set_size,
eval_step=args.eval_step,
save_step=args.save_step,
device=args.device,
lily_r=args.lily_r,
lily_scaling=args.lily_scaling,
lily_stride_A=args.lily_stride_A,
lily_num_B=args.lily_num_B,
lily_target_modules=args.lily_target_modules,
hub_model_id=args.hub_model_id,
push_to_hub=args.push_to_hub,
)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/lily_finetuning/lily_finetuning.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/peft:src/peft/tuners/lily/config.py | # Copyright 2026-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class LilyConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`LilyModel`].
Args:
r (`int`):
Lily's rank. Determines the inner hidden dimension of each adapter and the rank of the weight update `A @
B`. In Lily, since the number of adapters is typically smaller than in LoRA, each adapter needs to carry
more capacity, so it is recommended to use a larger `r` than in LoRA — typically `2x`, `3x`, or `4x` the
LoRA rank you would normally use. The total number of trainable parameters scales with `r * (total_layers /
stride_A + num_B)`, so increasing `r` while keeping `stride_A` large and `num_B` small is the recommended
trade-off.
stride_A (`int`):
The number of consecutive layers that share one A adapter. For example, if `stride_A=4`, every 4 adjacent
layers share the same A adapter, resulting in `total_layers / stride_A` distinct A adapters in total. The A
adapter compresses the input into a low-rank representation of size `r`. `stride_A` should be no less than
1. Suggested values: `2`, `3`, or `4` (i.e. sharing every 2, 3, or 4 layers). Keeping `stride_A` large
(fewer distinct A adapters) and increasing `r` instead leads to better performance than the opposite
trade-off (small `stride_A`, small `r`). Setting `stride_A=1` means every layer has its own A adapter.
NOTE: the A sharing happens within each target (layers with the same target suffix). For example, if your
target_modules are `['q_proj', 'v_proj']` and you set `stride_A=2`, then every 2 adjacent q_proj layers
will share an A adapter, and every 2 adjacent v_proj layers will share another A adapter, but the q_proj
and v_proj layers will not share A adapters with each other since they have different suffixes.
num_B (`int`):
The number of shared B adapters. Unlike A adapters (which are grouped by layer), all B adapters are shared
globally across every layer. For each forward pass, a router computes a weighted combination of all `num_B`
B adapters (using softmax-normalized weights) to produce a single combined B adapter, which then projects
the low-rank representation back to the original dimension. It is recommended to set `num_B` to a similar
order as `total_layers / stride_A`. Suggested values: `total_layers / 2`, `total_layers / 3`, or
`total_layers / 4`. Similar to `stride_A`, prefer smaller `num_B` with larger `r` over larger `num_B` with
smaller `r`. NOTE: to train the router, you need at least 2 B adapters (i.e. `num_B >= 2`), since the
router learns to compute a weighted combination of the B adapters. NOTE: the B sharing happens within each
target (layers with the same target suffix). For example, if your target_modules are `['q_proj', 'v_proj']`
and you set `num_B=4`, then there will be 4 B adapters shared across all q_proj layers, and another 4 B
adapters shared across all v_proj layers, but the q_proj and v_proj layers will not share B adapters with
each other since they have different suffixes.
target_modules (`Union[List[str], str]`, *optional*):
The names of the modules to apply Lily to. Can be a list of module name strings (e.g. `['q_proj',
'v_proj']`) or a regex pattern (e.g. `'.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'`). If not
specified, Lily will be applied to all supported linear layers.
scaling (`float`):
A scalar multiplier applied to the combined adapter output (`scaling * A @ combined_B`) before adding it to
the frozen weight's forward pass. Unlike LoRA, Lily does not use an `alpha / r` formulation; instead,
`scaling` is a direct multiplier. This design makes it straightforward to sweep over values on a log scale
(e.g. `0.01`, `0.1`, `1.0`, `10.0`). The optimal value is task-dependent and should be treated as a
hyperparameter. We recommend starting with `1.0`.
modules_to_save (`List[str]`, *optional*):
List of modules apart from Lily layers to be set as trainable and saved in the final checkpoint. For
example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are
randomly initialized and as such need to be trainable and saved.
exclude_modules (`Union[List[str], str]`, *optional*):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
layers_to_transform (`Union[list[int], int]`, *optional*):
The layer indexes to transform, if this argument is specified, PEFT will transform only the layers indexes
that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at
this index.
layers_pattern (`Optional[Union[List[str], str]]`, *optional*):
The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is
not in the common layers pattern. This should target the `nn.ModuleList` of the model, which is often
called `'layers'` or `'h'`.
init_weights (`bool`):
Whether to initialize Lily adapter weights using the default initialization scheme: A matrices are
initialized with Kaiming uniform, and B matrices are initialized to zero, ensuring that the adapter output
is zero at the start of training and does not disturb the pretrained model. It is strongly recommended to
keep this as `True` unless you have a specific reason to change it.
"""
r: int = field(
default=32,
metadata={
"help": (
"Lily's rank. Determines the inner hidden dimension of each adapter and the rank of the "
"weight update `A @ B`. In Lily, since the number of adapters is typically smaller than in LoRA, "
"each adapter needs to carry more capacity, so it is recommended to use a larger `r` than "
"in LoRA — typically `2x`, `3x`, or `4x` the LoRA rank you would normally use. "
"The total number of trainable parameters scales with `r * (total_layers / stride_A + num_B)`, "
"so increasing `r` while keeping `stride_A` large and `num_B` small is the recommended trade-off."
)
},
)
stride_A: int = field(
default=1,
metadata={
"help": (
"The number of consecutive layers that share one A adapter. For example, if `stride_A=4`, "
"every 4 adjacent layers share the same A adapter, resulting in `total_layers / stride_A` "
"distinct A adapters in total."
"The A adapter compresses the input into a low-rank representation of size `r`. "
"`stride_A` should be no less than 1."
"Suggested values: `2`, `3`, or `4` (i.e. sharing every 2, 3, or 4 layers). "
"Keeping `stride_A` large (fewer distinct A adapters) and increasing `r` instead leads to "
"better performance than the opposite trade-off (small `stride_A`, small `r`). "
"Setting `stride_A=1` means every layer has its own A adapter."
"Note: A sharing happens within each target module type independently. For example, if "
"`target_modules=['q_proj', 'v_proj']` and `stride_A=2`, then every 2 adjacent `q_proj` "
"layers share one A adapter and every 2 adjacent `v_proj` layers share another A adapter, "
"but `q_proj` and `v_proj` layers never share A adapters with each other."
)
},
)
num_B: int = field(
default=2,
metadata={
"help": (
"The number of shared B adapters. Unlike A adapters (which are grouped by layer), "
"all B adapters are shared globally across every layer. For each forward pass, a router "
"computes a weighted combination of all `num_B` B adapters (using softmax-normalized "
"weights) to produce a single combined B adapter, which then projects the low-rank "
"representation back to the original dimension. It is recommended to set `num_B` to a "
"similar order as `total_layers / stride_A`. Suggested values: `total_layers / 2`, "
"`total_layers / 3`, or `total_layers / 4`. Similar to `stride_A`, prefer smaller `num_B` "
"with larger `r` over larger `num_B` with smaller `r`. "
"NOTE: to train the router, you need at least 2 B adapters (i.e. `num_B >= 2`), since the "
"router learns to compute a weighted combination of the B adapters."
"Note: B sharing happens within each target module type independently. For example, if "
"`target_modules=['q_proj', 'v_proj']` and `num_B=4`, then there will be 4 B adapters "
"shared across all `q_proj` layers and another 4 B adapters shared across all `v_proj` "
"layers, but `q_proj` and `v_proj` layers never share B adapters with each other."
)
},
)
scaling: float = field(
default=1.0,
metadata={
"help": (
"A scalar multiplier applied to the combined adapter output (`scaling * A @ combined_B`) "
"before adding it to the frozen weight's forward pass. Unlike LoRA, Lily does not use an "
"`alpha / r` formulation; instead, `scaling` is a direct multiplier. This design "
"makes it straightforward to sweep over values on a log scale (e.g. `0.01`, `0.1`, "
"`1.0`, `10.0`). The optimal value is task-dependent and should be treated as a "
"hyperparameter. We recommend starting with `1.0`."
)
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with Lily. "
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. "
"If not specified, Lily will be applied to all supported linear layers."
)
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to exclude from Lily. "
"When passing a string, a regex match will be performed. When passing a list of strings, "
"either an exact match will be performed or it is checked if the name of the module ends "
"with any of the passed strings."
)
},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"List of modules apart from Lily layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
)
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": (
"The layer indexes to transform, if this argument is specified, PEFT will transform only the layers "
"indexes that are specified inside this list. If a single integer is passed, PEFT will transform only "
"the layer at this index."
)
},
)
layers_pattern: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"The layer pattern name, used only if `layers_to_transform` is different to None and if the layer "
"pattern is not in the common layers pattern. This should target the `nn.ModuleList` of the model, "
"which is often called `'layers'` or `'h'`."
)
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize Lily adapter weights using the default initialization scheme: A matrices are "
"initialized with Kaiming uniform, and B matrices are initialized to zero, ensuring that the adapter "
"output is zero at the start of training and does not disturb the pretrained model. It is strongly "
"recommended to keep this as `True` unless you have a specific reason to change it."
)
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.LILY
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.exclude_modules = (
set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules
)
if self.layers_pattern and not self.layers_to_transform:
raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified.")
if self.stride_A < 1:
raise ValueError("`stride_A` must be at least 1.")
if self.num_B < 2:
raise ValueError("`num_B` must be at least 2 for the router to be trained.")
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/lily/config.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/lily/layer.py | # Copyright 2026-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
from typing import Any, Optional
import torch
import torch.nn.functional as F
from torch import nn
from peft.tuners.tuners_utils import BaseTunerLayer
class LilyLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names: tuple[str, ...] = ("lily_A", "lily_B", "lily_router")
# All names of other parameters that may contain adapter-related parameters
other_param_names: tuple[str, ...] = ("r", "scaling", "stride_A", "num_B")
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.r = {}
self.scaling = {}
self.stride_A = {}
self.num_B = {}
self.lily_A = nn.ModuleDict({})
self.lily_B = nn.ModuleDict({})
self.lily_router = nn.ModuleDict({})
self.kwargs = kwargs
self._disable_adapters = False
self.merged_adapters = []
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
else:
# possibly support user provided custom layer types using dynamic dispatch
if hasattr(base_layer, "in_features") and hasattr(base_layer, "out_features"):
in_features, out_features = base_layer.in_features, base_layer.out_features
else:
in_features, out_features = None, None
warnings.warn(
f"Unsupported layer type '{type(base_layer)}' encountered, proceed at your own risk.", UserWarning
)
self.in_features = in_features
self.out_features = out_features
def update_layer(
self,
adapter_name,
r,
scaling,
stride_A,
num_B,
lily_A: Optional[nn.Linear] = None,
lily_B: Optional[nn.Linear] = None,
init_weights: bool = True,
inference_mode: bool = False,
):
# collect the kwargs
kwargs = locals().copy()
del kwargs["self"]
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.scaling[adapter_name] = scaling
# Actual trainble parameters
self.lily_A[adapter_name] = lily_A if lily_A is not None else nn.Linear(self.in_features, r, bias=False)
self.lily_B[adapter_name] = (
lily_B if lily_B is not None else nn.Linear(self.out_features, num_B * r, bias=False)
)
self.lily_router[adapter_name] = nn.Linear(r, num_B, bias=False)
self.stride_A[adapter_name] = stride_A
self.num_B[adapter_name] = num_B
self.reset_lily_parameters(adapter_name, init_weights=init_weights) # initialize the parameters
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters, inference_mode=inference_mode)
def reset_lily_parameters(self, adapter_name, init_weights: bool = True):
if adapter_name in self.lily_A:
nn.init.kaiming_uniform_(self.lily_A[adapter_name].weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lily_router[adapter_name].weight, a=math.sqrt(5))
if not init_weights:
nn.init.kaiming_uniform_(self.lily_B[adapter_name].weight, a=math.sqrt(5))
else:
nn.init.zeros_(self.lily_B[adapter_name].weight)
class Linear(nn.Module, LilyLayer):
# Lily implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 32,
scaling: float = 1.0,
stride_A: int = 1,
num_B: int = 2,
lily_A: nn.Linear = None,
lily_B: nn.Linear = None,
init_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
LilyLayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
scaling=scaling,
lily_A=lily_A,
lily_B=lily_B,
stride_A=stride_A,
num_B=num_B,
init_weights=init_weights,
)
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
raise NotImplementedError("This method is not supported for Lily.")
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
raise NotImplementedError("This method is not supported for Lily.")
def unmerge(self) -> None:
raise NotImplementedError("This method is not supported for Lily.")
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
result = self.base_layer(x, *args, **kwargs)
if self.disable_adapters or not self.active_adapters:
return result
torch_result_dtype = result.dtype
lily_A_keys = self.lily_A.keys()
for active_adapter in self.active_adapters:
if active_adapter not in lily_A_keys:
continue
lily_A = self.lily_A[active_adapter]
lily_B = self.lily_B[active_adapter]
router = self.lily_router[active_adapter]
num_B = self.num_B[active_adapter]
B = lily_B.weight.reshape(num_B, -1, lily_B.weight.shape[1])
scaling = self.scaling[active_adapter]
x = self._cast_input_dtype(x, lily_A.weight.dtype)
hidden = lily_A(x)
router_logits = router(hidden) # [B, N, num_of_experts]
router_probability = F.softmax(router_logits, dim=-1) # [B, N, num_of_experts]
expert_probabilities = router_probability.reshape(-1, num_B).mean(dim=0)
combined_B = torch.einsum("e,eio->io", expert_probabilities, B)
delta = torch.matmul(hidden, combined_B)
result = result + (delta * scaling).to(torch_result_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lily." + rep
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/lily/layer.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/lily/model.py | # Copyright 2026-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import torch
from torch import nn
from peft.tuners.tuners_utils import (
BaseTuner,
BaseTunerLayer,
)
from peft.utils import TRANSFORMERS_MODELS_TO_LILY_TARGET_MODULES_MAPPING
from .config import LilyConfig
from .layer import LilyLayer, Linear
class LilyModel(BaseTuner):
"""
Creates a Low-Rank Interconnected Adaptation Across Layers (Lily) model from a pretrained transformers model.
The method is described in detail in https://arxiv.org/abs/2407.09946.
Args:
model ([`torch.nn.Module`]): The model to be adapted.
config ([`LilyConfig`]): The configuration of the Lily model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
Returns:
`torch.nn.Module`: The Lily PEFT model.
**Attributes**:
- **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`LilyConfig`]): The configuration of the Lily model.
"""
prefix: str = "lily_"
tuner_layer_cls = LilyLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_LILY_TARGET_MODULES_MAPPING
def _create_and_replace(
self,
lily_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
"""
Create a new Lily layer with independent A and B for each layer. Sharing of A/B across layers is deferred to
_post_injection_hook.
"""
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
if isinstance(target, LilyLayer):
target.update_layer(
adapter_name,
lily_config.r,
scaling=lily_config.scaling,
stride_A=lily_config.stride_A,
num_B=lily_config.num_B,
init_weights=lily_config.init_weights,
)
else:
new_module = self._create_new_module(lily_config, adapter_name, target)
if adapter_name not in self.active_adapters:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _create_new_module(lily_config, adapter_name, target):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
return Linear(
target,
adapter_name,
r=lily_config.r,
scaling=lily_config.scaling,
stride_A=lily_config.stride_A,
num_B=lily_config.num_B,
init_weights=lily_config.init_weights,
)
raise NotImplementedError(f"Lily does not support target modules of type {type(target_base_layer)} yet.")
def _post_injection_hook(self, model: nn.Module, config: LilyConfig, adapter_name: str) -> None:
"""
After all layers have been independently initialized, apply A/B sharing across layers.
A sharing: for each (target_module_suffix, weight_shape) group, consecutive blocks of `stride_A` layers share
the same A. The first layer in each block keeps its own A; subsequent layers in the block have their lily_A
replaced by the group leader's.
B sharing: all layers in the same (target_module_suffix, weight_shape) group share the B from the very first
layer in that group.
Both lily_A and lily_B are nn.Linear modules, so they move correctly with model.to(device) via standard
nn.Module parameter propagation.
Note: (target_module_suffix, weight_shape) is used as the group key rather than target_module_suffix alone, to
correctly handle architectures like UNet where the same target key can appear with different shapes across
layers.
"""
stride_A = config.stride_A
# Collect all adapted LilyLayer modules in traversal order, grouped by
# (target_module_suffix, weight_shape).
# Maps (target_suffix, weight_shape) -> list of LilyLayer in traversal order.
target_to_layers: dict[tuple[str, torch.Size], list[LilyLayer]] = {}
for key, module in model.named_modules():
if not isinstance(module, LilyLayer):
continue
if adapter_name not in module.lily_A:
continue
base = module.get_base_layer()
shape = base.weight.shape # (out_features, in_features)
# Find the longest matching target suffix for this key
matched_suffix = None
if isinstance(config.target_modules, str):
matched_suffix = config.target_modules
else:
for suffix in config.target_modules:
if key.endswith(suffix):
matched_suffix = suffix
if matched_suffix is None:
# Should not happen since inject_adapter already matched this layer
continue
group_key = (matched_suffix, shape)
if group_key not in target_to_layers:
target_to_layers[group_key] = []
target_to_layers[group_key].append(module)
# Apply A and B sharing for each (target_suffix, shape) group.
for (target_suffix, shape), layers in target_to_layers.items():
# B sharing: all layers share the first layer's B
shared_B = layers[0].lily_B[adapter_name]
for i, layer in enumerate(layers):
if i != 0:
layer.lily_B[adapter_name] = shared_B
# A sharing: layers within the same stride_A block share the group leader's A.
# Group leader is the first layer in each block of stride_A consecutive layers.
group_idx = (i // stride_A) * stride_A
if i != group_idx:
layer.lily_A[adapter_name] = layers[group_idx].lily_A[adapter_name]
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/lily/model.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/lora_finetuning_transformer_engine/lora_finetuning_te.py | #!/usr/bin/env python3
"""
Transformer Engine ESM2 LoRA fine-tuning example using Hugging Face Trainer.
This script demonstrates:
1. Loading a Transformer Engine-based ESM2 token classification model
2. Applying LoRA adapters with PEFT
3. Building a synthetic protein token-classification dataset in code
4. Training with the Hugging Face Trainer (no DDP setup required)
"""
import os
# TE-backed models are incompatible with Trainer's DataParallel wrapping.
# Pin to a single GPU before torch is imported so torch.cuda.device_count() == 1.
os.environ.setdefault("CUDA_VISIBLE_DEVICES", "0")
import argparse
import random
import numpy as np
import pandas as pd
import torch
from datasets import Dataset
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorForTokenClassification,
Trainer,
TrainingArguments,
)
from peft import LoraConfig, TaskType, get_peft_model
SS3_ID2LABEL = {0: "H", 1: "E", 2: "C"}
SS3_LABEL2ID = {label: idx for idx, label in SS3_ID2LABEL.items()}
AMINO_ACIDS = "ACDEFGHIKLMNPQRSTVWY"
HELIX_AA = set("AELMQKRH")
BETA_AA = set("VIFYWT")
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description="Transformer Engine ESM2 LoRA fine-tuning with Hugging Face Trainer")
parser.add_argument(
"--base_model",
type=str,
default="nvidia/esm2_t6_8M_UR50D",
help="Transformer Engine ESM2 model name or path",
)
parser.add_argument("--output_dir", type=str, default="./esm2_lora_output", help="Output directory")
parser.add_argument("--max_length", type=int, default=128, help="Maximum sequence length")
parser.add_argument("--num_train_samples", type=int, default=256, help="Number of synthetic training samples")
parser.add_argument("--num_eval_samples", type=int, default=64, help="Number of synthetic evaluation samples")
parser.add_argument("--min_seq_len", type=int, default=32, help="Minimum sequence length")
parser.add_argument("--max_seq_len", type=int, default=96, help="Maximum sequence length")
parser.add_argument("--batch_size", type=int, default=8, help="Per-device batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=5e-4, help="Learning rate")
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay")
parser.add_argument("--logging_steps", type=int, default=10, help="Logging frequency (steps)")
parser.add_argument("--eval_steps", type=int, default=25, help="Evaluation frequency (steps)")
parser.add_argument("--save_steps", type=int, default=25, help="Save frequency (steps)")
parser.add_argument("--seed", type=int, default=42, help="Random seed")
parser.add_argument("--lora_r", type=int, default=8, help="LoRA rank")
parser.add_argument("--lora_alpha", type=int, default=16, help="LoRA alpha")
parser.add_argument("--lora_dropout", type=float, default=0.05, help="LoRA dropout")
parser.add_argument(
"--trust_remote_code",
action="store_true",
default=False,
help="Allow loading model code from the Hub. Required for models like nvidia/esm2_*.",
)
parser.add_argument(
"--train_parquet",
type=str,
default=None,
help="Path to a training parquet file with Sequence and Secondary_structure columns. "
"When provided, the synthetic dataset is not generated.",
)
parser.add_argument(
"--val_parquet",
type=str,
default=None,
help="Path to a validation parquet file with the same schema as --train_parquet.",
)
args = parser.parse_args()
if bool(args.train_parquet) != bool(args.val_parquet):
parser.error("--train_parquet and --val_parquet must both be provided or both omitted.")
return args
def set_seed(seed: int):
"""Set random seeds for reproducibility."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def build_synthetic_sequences(num_samples: int, min_len: int, max_len: int):
"""Generate random amino-acid sequences."""
sequences = []
for _ in range(num_samples):
length = random.randint(min_len, max_len)
sequences.append("".join(random.choices(AMINO_ACIDS, k=length)))
return sequences
def ss_char_to_label(char: str) -> int:
"""Map a single secondary structure character to a label id (H/E/C)."""
return SS3_LABEL2ID.get(char, SS3_LABEL2ID["C"])
def tokenize_and_align_labels(sequences, label_strings, tokenizer, max_length: int):
"""Tokenize protein sequences and align per-residue label strings to token positions."""
batch_input_ids = []
batch_attention_mask = []
batch_labels = []
for sequence, label_str in zip(sequences, label_strings):
encoded = tokenizer(
sequence,
truncation=True,
max_length=max_length,
add_special_tokens=True,
)
input_ids = encoded["input_ids"]
attention_mask = encoded["attention_mask"]
labels = [-100] * len(input_ids)
usable_len = min(len(sequence), len(label_str), len(input_ids) - 2)
for idx in range(usable_len):
labels[idx + 1] = ss_char_to_label(label_str[idx])
batch_input_ids.append(input_ids)
batch_attention_mask.append(attention_mask)
batch_labels.append(labels)
return {
"input_ids": batch_input_ids,
"attention_mask": batch_attention_mask,
"labels": batch_labels,
}
def load_parquet_dataset(train_path: str, val_path: str, tokenizer, max_length: int):
"""Load train/val parquet files and return tokenized Datasets."""
train_df = pd.read_parquet(train_path)
val_df = pd.read_parquet(val_path)
train_dataset = Dataset.from_pandas(train_df).map(
lambda ex: tokenize_and_align_labels(ex["Sequence"], ex["Secondary_structure"], tokenizer, max_length),
batched=True,
remove_columns=train_df.columns.tolist(),
)
val_dataset = Dataset.from_pandas(val_df).map(
lambda ex: tokenize_and_align_labels(ex["Sequence"], ex["Secondary_structure"], tokenizer, max_length),
batched=True,
remove_columns=val_df.columns.tolist(),
)
return train_dataset, val_dataset
def compute_metrics(eval_pred):
"""Compute token accuracy while ignoring -100 labels."""
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
mask = labels != -100
correct = (predictions == labels) & mask
total_tokens = mask.sum()
accuracy = float(correct.sum() / total_tokens) if total_tokens > 0 else 0.0
return {"token_accuracy": accuracy}
def residue_to_ss_char(aa: str) -> str:
"""Map an amino acid to a synthetic secondary structure character (H/E/C)."""
if aa in HELIX_AA:
return "H"
if aa in BETA_AA:
return "E"
return "C"
def sequence_to_synthetic_labels(sequence: str) -> str:
"""Derive a synthetic per-residue label string from an amino acid sequence."""
return "".join(residue_to_ss_char(aa) for aa in sequence)
def make_synthetic_dataset(
tokenizer,
num_train_samples: int,
num_eval_samples: int,
min_seq_len: int,
max_seq_len: int,
max_length: int,
):
"""Create a synthetic train/eval dataset for token classification."""
train_sequences = build_synthetic_sequences(num_train_samples, min_seq_len, max_seq_len)
eval_sequences = build_synthetic_sequences(num_eval_samples, min_seq_len, max_seq_len)
train_labels = [sequence_to_synthetic_labels(s) for s in train_sequences]
eval_labels = [sequence_to_synthetic_labels(s) for s in eval_sequences]
train_dataset = Dataset.from_dict({"sequence": train_sequences, "labels_str": train_labels}).map(
lambda ex: tokenize_and_align_labels(ex["sequence"], ex["labels_str"], tokenizer, max_length),
batched=True,
remove_columns=["sequence", "labels_str"],
)
eval_dataset = Dataset.from_dict({"sequence": eval_sequences, "labels_str": eval_labels}).map(
lambda ex: tokenize_and_align_labels(ex["sequence"], ex["labels_str"], tokenizer, max_length),
batched=True,
remove_columns=["sequence", "labels_str"],
)
return train_dataset, eval_dataset
def main():
args = parse_args()
set_seed(args.seed)
if not args.trust_remote_code and "esm2" in args.base_model.lower():
raise ValueError(
f"Model '{args.base_model}' requires remote code execution. "
"Re-run with --trust_remote_code to confirm you trust this model's code."
)
os.makedirs(args.output_dir, exist_ok=True)
use_bf16 = torch.cuda.is_available() and torch.cuda.is_bf16_supported()
model_dtype = torch.bfloat16 if use_bf16 else torch.float32
print(f"Loading tokenizer and model from: {args.base_model}")
tokenizer = AutoTokenizer.from_pretrained(args.base_model, trust_remote_code=args.trust_remote_code)
config = AutoConfig.from_pretrained(args.base_model, trust_remote_code=args.trust_remote_code)
config.num_labels = 3
config.id2label = SS3_ID2LABEL
config.label2id = SS3_LABEL2ID
model = AutoModelForTokenClassification.from_pretrained(
args.base_model,
config=config,
trust_remote_code=args.trust_remote_code,
dtype=model_dtype,
)
lora_config = LoraConfig(
task_type=TaskType.TOKEN_CLS,
r=args.lora_r,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
target_modules=["layernorm_qkv"],
bias="none",
inference_mode=False,
)
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
if args.train_parquet and args.val_parquet:
print(f"Loading parquet datasets: train={args.train_parquet}, val={args.val_parquet}")
train_dataset, eval_dataset = load_parquet_dataset(
train_path=args.train_parquet,
val_path=args.val_parquet,
tokenizer=tokenizer,
max_length=args.max_length,
)
else:
print("Building synthetic dataset...")
train_dataset, eval_dataset = make_synthetic_dataset(
tokenizer=tokenizer,
num_train_samples=args.num_train_samples,
num_eval_samples=args.num_eval_samples,
min_seq_len=args.min_seq_len,
max_seq_len=args.max_seq_len,
max_length=args.max_length,
)
data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
training_args = TrainingArguments(
output_dir=args.output_dir,
num_train_epochs=args.num_epochs,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=args.batch_size,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
logging_steps=args.logging_steps,
eval_steps=args.eval_steps,
save_steps=args.save_steps,
eval_strategy="steps",
save_strategy="steps",
save_total_limit=2,
load_best_model_at_end=True,
metric_for_best_model="token_accuracy",
greater_is_better=True,
report_to="none",
remove_unused_columns=False,
bf16=use_bf16,
seed=args.seed,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
trainer.train()
final_metrics = trainer.evaluate()
print(f"Final evaluation metrics: {final_metrics}")
model.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
print(f"Saved LoRA adapter and tokenizer to: {args.output_dir}")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/peft",
"file_path": "examples/lora_finetuning_transformer_engine/lora_finetuning_te.py",
"license": "Apache License 2.0",
"lines": 273,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/peft:src/peft/tuners/lora/te.py | # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-Apache2
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from peft.import_utils import is_te_available
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.tuners_utils import BaseTunerLayer
from .config import LoraConfig
if is_te_available():
import transformer_engine as te
class TeLinear(torch.nn.Module, LoraLayer):
"""LoRA layer for TransformerEngine linear modules.
Supports ``te.pytorch.Linear``, ``te.pytorch.LayerNormLinear``, and ``te.pytorch.LayerNormMLP`` as base layers.
Note:
Adapter weight merging (``merge`` / ``unmerge``) is **not supported** yet.
"""
def __init__(
self,
base_layer,
adapter_name: str,
config: LoraConfig,
r: int = 0,
lora_alpha: int = 1,
**kwargs,
):
if config.use_dora:
raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
super().__init__()
LoraLayer.__init__(self, base_layer=base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
config=config,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""Not supported yet for TransformerEngine layers.
Raises:
NotImplementedError: Always.
"""
raise NotImplementedError(f"{self.__class__.__name__} does not support merge yet.")
def unmerge(self) -> None:
"""Not supported yet for TransformerEngine layers.
Raises:
NotImplementedError: Always.
"""
raise NotImplementedError(f"{self.__class__.__name__} does not support unmerge yet.")
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
if self.disable_adapters:
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
raise ValueError(f"{self.__class__.__name__} does not support mixed_batch_forward yet.")
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
lora_A_keys = self.lora_A.keys()
for active_adapter in self.active_adapters:
if active_adapter not in lora_A_keys:
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
x = self._cast_input_dtype(x, lora_A.weight.dtype)
result = result + lora_B(lora_A(dropout(x))) * scaling
result = result.to(torch_result_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
def dispatch_transformer_engine(
target: torch.nn.Module,
adapter_name: str,
config: LoraConfig,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_te_available() and isinstance(
target_base_layer, (te.pytorch.LayerNormLinear, te.pytorch.LayerNormMLP, te.pytorch.Linear)
):
new_module = TeLinear(target, adapter_name, config=config, **kwargs)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/lora/te.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/psoft_finetuning/psoft_finetuning.py | # Copyright 2026-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
from trl import SFTConfig, SFTTrainer
from peft import PsoftConfig, get_peft_model
@dataclass
class ScriptArguments(SFTConfig):
# --- model ---
base_model_name_or_path: Optional[str] = field(
default=None, metadata={"help": "The name or path of the fp32/16 base model."}
)
bits: str = field(
default="fp32",
metadata={"help": "Precision to load the base model. Choices: ['bf16', 'fp16', 'fp32']."},
)
# --- PSOFT ---
r: int = field(default=32, metadata={"help": "Rank (r): dimension of trainable R."})
psoft_alpha: int = field(default=32, metadata={"help": "Scaling factor (typically set to r)."})
target_modules: list[str] = field(
default_factory=lambda: ["q_proj", "v_proj"],
metadata={"help": "Target module names, e.g. ['q_proj','k_proj','v_proj','o_proj', ...]."},
)
# SVD / init
ab_svd_init: str = field(
default="psoft_init",
metadata={"help": "Principal-subspace init identifier (e.g. 'psoft_init')."},
)
psoft_svd: str = field(
default="full",
metadata={"help": "SVD method. Typical choices: ['full', 'lowrank']."},
)
psoft_svd_lowrank_niter: Optional[int] = field(
default=None,
metadata={"help": "If psoft_svd='lowrank', number of iterations for lowrank SVD (optional)."},
)
# Orth / relaxation
psoft_orth: bool = field(default=True, metadata={"help": "Use orthogonal R (Cayley parameterization)."})
psoft_mag_a: bool = field(default=True, metadata={"help": "Enable tunable vector alpha (relaxed mode)."})
psoft_mag_b: bool = field(default=True, metadata={"help": "Enable tunable vector beta (relaxed mode)."})
# Cayley–Neumann approximation
use_cayley_neumann: bool = field(default=False, metadata={"help": "Enable Cayley-Neumann approximation."})
num_cayley_neumann_terms: int = field(default=5, metadata={"help": "Number of Neumann series terms."})
cayley_neumann_eps: Optional[float] = field(
default=None, metadata={"help": "Optional eps for numerical stability."}
)
# --- data ---
data_path: str = field(default="imdb", metadata={"help": "Dataset name/path for training."})
dataset_split: str = field(default="train[:1%]", metadata={"help": "Dataset split, e.g. 'train[:1%]'."})
dataset_field: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"Fields used to build SFT text. "
"If provided, will build: '### USER: <field0>\\n### ASSISTANT: <field1>'. "
"If None, must already have a 'text' column."
)
},
)
def _dtype_from_bits(bits: str) -> torch.dtype:
bits = bits.lower()
if bits == "bf16":
return torch.bfloat16
if bits == "fp16":
return torch.float16
if bits == "fp32":
return torch.float32
raise ValueError(f"Unknown bits={bits}. Use one of: bf16, fp16, fp32.")
def main():
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
print(script_args)
if script_args.base_model_name_or_path is None:
raise ValueError("--base_model_name_or_path is required.")
# PSOFT does NOT support quantized layers (nf4/int8/etc.).
# We only allow fp16/bf16/fp32 here to avoid accidental quantized loading.
if script_args.bits.lower() not in {"bf16", "fp16", "fp32"}:
raise ValueError("PSOFT example only supports bits in ['bf16','fp16','fp32'] (no quantization).")
torch_dtype = _dtype_from_bits(script_args.bits)
# Load base model
model = AutoModelForCausalLM.from_pretrained(
script_args.base_model_name_or_path,
dtype=torch_dtype,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path)
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
# Build PSOFT config
psoft_kwargs = {
"r": script_args.r,
"psoft_alpha": script_args.psoft_alpha,
"target_modules": script_args.target_modules,
"ab_svd_init": script_args.ab_svd_init,
"psoft_svd": script_args.psoft_svd,
"psoft_orth": script_args.psoft_orth,
"psoft_mag_a": script_args.psoft_mag_a,
"psoft_mag_b": script_args.psoft_mag_b,
"use_cayley_neumann": script_args.use_cayley_neumann,
"num_cayley_neumann_terms": script_args.num_cayley_neumann_terms,
"cayley_neumann_eps": script_args.cayley_neumann_eps,
"task_type": "CAUSAL_LM",
}
# Only pass lowrank_niter when user sets it (and typically when psoft_svd='lowrank')
if script_args.psoft_svd_lowrank_niter is not None:
psoft_kwargs["psoft_svd_lowrank_niter"] = script_args.psoft_svd_lowrank_niter
peft_config = PsoftConfig(**psoft_kwargs)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
# Load dataset
dataset = load_dataset(script_args.data_path, split=script_args.dataset_split)
# Ensure a "text" field for SFTTrainer
if script_args.dataset_field is not None:
if len(script_args.dataset_field) != 2:
raise ValueError("dataset_field must be a list of exactly 2 field names: [input_field, output_field].")
in_f, out_f = script_args.dataset_field[0], script_args.dataset_field[1]
def to_sft_text(example):
return {"text": f"### USER: {example[in_f]}\n### ASSISTANT: {example[out_f]}"}
dataset = dataset.map(to_sft_text)
else:
if "text" not in dataset.column_names:
raise ValueError("dataset_field is None but dataset has no 'text' column. Provide dataset_field.")
# Train
trainer = SFTTrainer(
model=model,
args=script_args,
train_dataset=dataset,
processing_class=tokenizer,
)
trainer.train()
trainer.save_state()
# Save adapter (PSOFT)
os.makedirs(script_args.output_dir, exist_ok=True)
model.save_pretrained(os.path.join(script_args.output_dir, "psoft_ft"))
tokenizer.save_pretrained(os.path.join(script_args.output_dir, "psoft_ft"))
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/peft",
"file_path": "examples/psoft_finetuning/psoft_finetuning.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/psoft/config.py | # Copyright 2026-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from dataclasses import dataclass, field
from typing import Literal, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class PsoftConfig(PeftConfig):
"""
Configuration for PSOFT (Efficient Orthogonal Fine-Tuning with Principal Subspace Adaptation).
PSOFT inserts an r*r orthogonal transformation R between low-rank matrices A and B, so the low-rank update is ΔW =
B @ (R-I) @ A. Only R (and optional tunable vectors) are trained; A and B are initialized with psoft_init
(SVD-based, row-orthogonal A) and frozen.
Args:
r (`int`):
Defaults to 32. PSOFT rank (r) controls the adapter capacity through an r*r transformation R. Smaller ranks
32-128 are typically sufficient for simple tasks, More complex tasks may benefit from 64-256, increasing
expressiveness at the cost of additional parameters and computation. See the paper for empirically
validated settings: https://openreview.net/forum?id=FSHrinMArK.
target_modules (`Optional[Union[List[str], str]]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen (if
the model is a PreTrainedModel, the output layer excluded). If this is not specified, modules will be
chosen according to the model architecture. If the architecture is not known, an error will be raised -- in
this case, you should specify the target modules manually.
exclude_modules (`Optional[Union[List[str], str]]`):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
psoft_alpha (`int`): Defaults to 32. It controls PSOFT scaling factor. Same semantics as LoRA alpha.
psoft_dropout (`float`): Defaults to 0.0. Dropout for PSOFT path. Same semantics as LoRA dropout.
fan_in_fan_out (`bool`):
Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
`Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
ab_svd_init (`Literal["psoft_init", "pissa_init"]`):
Defaults to 'psoft_init'. Initialization strategy for A and B used to construct the principal subspace in
PSOFT. 'psoft_init': SVD-based initialization with row-orthogonal A, ensuring strict orthogonality (PSOFT).
'pissa_init': SVD-based initialization with symmetric A and B (standard PiSSA).
psoft_svd (`Literal["full", "lowrank"]`):
Defaults to 'full'. SVD backend for initialization: 'full' uses torch.linalg.svd; 'lowrank' uses
torch.svd_lowrank.
psoft_svd_lowrank_niter (`int`):
Only used when psoft_svd='lowrank'. Defaults to 10. Number of power iterations used by torch.svd_lowrank
when psoft_svd='lowrank'.
psoft_orth (`bool`):
Defaults to 'True'. If True, constrains R to be orthogonal via Cayley parameterization, preserving the
geometric relationships among column of the pre-trained weight vectors. If False, R is a free matrix
without orthogonality constraints.
psoft_mag_b (`bool`):
Defaults to 'True'. If True, learns a diagonal scaling vector on the 'output' side of R. Commonly paired
with psoft_mag_a to increase task adaptability, with slight distortion to the pre-trained geometry.
psoft_mag_a (`bool`):
Defaults to 'True'. If True, learns a diagonal scaling vector on the 'input' side of R. Commonly paired
with psoft_mag_b to increase task adaptability, with slight distortion to the pre-trained geometry.
use_cayley_neumann (`bool`):
Defaults to 'False'. Whether to use the Cayley-Neumann formulation of PSOFT or not. Set to True to improve
computational efficiency but comes at costs of bigger approximation error for orthogonality.
num_cayley_neumann_terms (`int`):
Defaults to 5. Only used when use_cayley_neumann=True. Number of Cayley-Neumann terms to use. Higher number
results in less approximation error for orthogonality.
cayley_neumann_eps (`optional[float]`):
Defaults to 'None'. Only used when use_cayley_neumann=True. Optional Frobenius-norm bound for the generator
matrix Q in the Cayley-Neumann approximation. If None (default), no rescaling is applied. If set to a value
in (0, 1) (e.g., 0.9), Q is rescaled whenever ||Q||_F exceeds the threshold to improve numerical stability.
See https://spherelab.ai/oftv2/ for details.
init_weights (`bool`):
Defaults to 'True'. Whether to initialize the weights of the PSOFT layers with their default
initialization. Don't change this setting, except if you know exactly what you're doing.
modules_to_save (`List[str]`):
List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
layers_to_transform (`Union[List[int], int]`):
The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
that are specified in this list. If a single integer is passed, it will apply the transformations on the
layer at this index.
layers_pattern (`Optional[Union[List[str], str]]`):
The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the
`nn.ModuleList` of the model, which is often called `'layers'` or `'h'`.
"""
r: int = field(
default=32,
metadata={
"help": (
"PSOFT rank (r) controls the adapter capacity through an r*r transformation R. "
"Smaller ranks 32-128 are typically sufficient for simple tasks, More complex tasks may benefit from 64-256, "
"increasing expressiveness at the cost of additional parameters and computation. "
"See the paper for empirically validated settings: https://openreview.net/forum?id=FSHrinMArK. "
)
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with PSOFT. "
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. "
"This can also be a wildcard 'all-linear' which matches all linear/Conv1D "
"(if the model is a PreTrainedModel, the output layer excluded). "
"If not specified, modules will be chosen according to the model architecture, If the architecture is "
"not known, an error will be raised -- in this case, you should specify the target modules manually. "
),
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex expression of the module names to exclude from PSOFT. "},
)
psoft_alpha: int = field(
default=32, metadata={"help": "It controls PSOFT scaling factor. Same semantics as LoRA alpha. "}
)
psoft_dropout: float = field(
default=0.0, metadata={"help": "Dropout for PSOFT path. Same semantics as LoRA dropout. "}
)
fan_in_fan_out: bool = field(
default=False,
metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out). "},
)
ab_svd_init: Literal["psoft_init", "pissa_init"] = field(
default="psoft_init",
metadata={
"help": (
"Initialization strategy for A and B used to construct the principal subspace in PSOFT. "
"- 'psoft_init': SVD-based initialization with row-orthogonal A (asymmetric A and B), ensuring strict orthogonality (PSOFT). "
"- 'pissa_init': SVD-based initialization with symmetric A and B, without strict orthogonality constraint (standard PiSSA). "
)
},
)
psoft_svd: Literal["full", "lowrank"] = field(
default="full",
metadata={
"help": "SVD backend for initialization: 'full' uses torch.linalg.svd; 'lowrank' uses torch.svd_lowrank. "
},
)
psoft_svd_lowrank_niter: int = field(
default=10,
metadata={
"help": "Number of power iterations used by torch.svd_lowrank when psoft_svd='lowrank'. Only used when psoft_svd='lowrank'. "
},
)
psoft_orth: bool = field(
default=True,
metadata={
"help": (
"If True, constrains R to be orthogonal via Cayley parameterization, preserving the geometric relationships among column of the pre-trained weight vectors. "
"If False, R is a free matrix without orthogonality constraints. "
)
},
)
psoft_mag_b: bool = field(
default=True,
metadata={
"help": (
"If True, learns a diagonal scaling vector on the 'output' side of R. "
"Commonly paired with psoft_mag_a to increase task adaptability, with slight distortion to the pre-trained geometry. "
)
},
)
psoft_mag_a: bool = field(
default=True,
metadata={
"help": (
"If True, learns a diagonal scaling vector on the 'input' side of R. "
"Commonly paired with psoft_mag_b to increase task adaptability, with slight distortion to the pre-trained geometry. "
)
},
)
use_cayley_neumann: bool = field(
default=False,
metadata={
"help": "Whether to use the Cayley-Neumann Formulation of PSOFT or not. Set to True to improve computational efficiency but comes at costs of bigger approximation error for orthogonality. "
},
)
num_cayley_neumann_terms: int = field(
default=5,
metadata={
"help": "Number of Cayley-Neumann terms to use. Higher number results in less approximation error for orthogonality. Only used when use_cayley_neumann=True."
},
)
cayley_neumann_eps: Optional[float] = field(
default=None,
metadata={
"help": (
"Optional Frobenius-norm bound for the generator matrix Q in the Cayley-Neumann approximation. Only used when use_cayley_neumann=True. "
"If None (default), no rescaling is applied. "
"If set to a value in (0, 1) (e.g., 0.9), Q is rescaled whenever ||Q||_F exceeds the threshold to improve numerical stability. "
"See https://spherelab.ai/oftv2/ for details. "
)
},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"List of modules apart from PSOFT layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved. "
)
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the PSOFT layers with their default initialization. "
"Don't change this setting, except if you know exactly what you're doing. "
)
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": (
"The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. "
"This only works when target_modules is a list of str."
)
},
)
layers_pattern: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. "
"This only works when target_modules is a list of str. This should target the `nn.ModuleList` of the "
"model, which is often called `'layers'` or `'h'`. "
)
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.PSOFT
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.exclude_modules = (
set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules
)
# if target_modules is a regex expression, then layers_to_transform should be None
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.")
# if target_modules is a regex expression, then layers_pattern should be None
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.")
# check for layers_to_transform and layers_pattern
if self.layers_pattern and not self.layers_to_transform:
raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
if self.r <= 0:
raise ValueError(f"`r` must be a positive integer; got {self.r}.")
allowed_inits = {"psoft_init", "pissa_init"}
if self.ab_svd_init not in allowed_inits:
raise ValueError(f"`ab_svd_init` must be one of {sorted(allowed_inits)}; got {self.ab_svd_init!r}.")
allowed_svd_backends = {"full", "lowrank"}
if self.psoft_svd not in allowed_svd_backends:
raise ValueError(f"`psoft_svd` must be one of {sorted(allowed_svd_backends)}; got {self.psoft_svd!r}.")
DEFAULT_LOW_RANK_NITER = self.__dataclass_fields__["psoft_svd_lowrank_niter"].default
if self.psoft_svd != "lowrank" and self.psoft_svd_lowrank_niter != DEFAULT_LOW_RANK_NITER:
warnings.warn(
"`psoft_svd_lowrank_niter` is only used when `psoft_svd='lowrank'`. "
f"Got psoft_svd={self.psoft_svd!r}, so psoft_svd_lowrank_niter="
f"{self.psoft_svd_lowrank_niter} will be ignored.",
UserWarning,
)
DEFAULT_NUM_CAYLEY_NEUMANN_TERMS = self.__dataclass_fields__["num_cayley_neumann_terms"].default
if self.use_cayley_neumann:
if self.num_cayley_neumann_terms <= 0:
raise ValueError(
f"`num_cayley_neumann_terms` must be a positive integer; got {self.num_cayley_neumann_terms}."
)
if self.cayley_neumann_eps is not None and not (0.0 < self.cayley_neumann_eps < 1.0):
raise ValueError(f"`cayley_neumann_eps` must be in (0, 1) when set; got {self.cayley_neumann_eps}.")
else:
if self.num_cayley_neumann_terms != DEFAULT_NUM_CAYLEY_NEUMANN_TERMS:
warnings.warn(
"`num_cayley_neumann_terms` is only used when `use_cayley_neumann=True`. "
f"Since `use_cayley_neumann=False`, `num_cayley_neumann_terms={self.num_cayley_neumann_terms}` will be ignored.",
UserWarning,
)
if self.cayley_neumann_eps is not None:
warnings.warn(
"`cayley_neumann_eps` is only used when `use_cayley_neumann=True`. "
f"Since `use_cayley_neumann=False`, `cayley_neumann_eps={self.cayley_neumann_eps}` will be ignored.",
UserWarning,
)
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/psoft/config.py",
"license": "Apache License 2.0",
"lines": 296,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/psoft/layer.py | # Copyright 2026-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Any, Optional
import torch
from torch import nn, svd_lowrank
from peft.tuners._buffer_dict import BufferDict
from peft.tuners.tuners_utils import BaseTunerLayer, _get_in_out_features, check_adapters_to_merge
from peft.utils.integrations import gather_params_ctx
from peft.utils.other import transpose
from .config import PsoftConfig
class OrthLayer(nn.Module):
"""
r*r orthogonal transformation R used in PSOFT between A and B. Forward: output = input @ R.T
"""
def __init__(
self,
size: int,
orth: bool = True,
mag_b: bool = True,
mag_a: bool = True,
use_cayley_neumann: bool = False,
num_cayley_neumann_terms: int = 5,
cayley_neumann_eps: Optional[float] = None,
):
super().__init__()
self.size = size
self.orth = orth
self.mag_b = mag_b
self.mag_a = mag_a
self.use_cayley_neumann = use_cayley_neumann
self.num_cayley_neumann_terms = num_cayley_neumann_terms
self.cayley_neumann_eps = cayley_neumann_eps
if orth:
self.weight = nn.Parameter(torch.empty((size * (size - 1)) // 2))
rows, cols = torch.triu_indices(size, size, 1)
self.register_buffer("rows", rows, persistent=False)
self.register_buffer("cols", cols, persistent=False)
else:
self.weight = nn.Parameter(torch.empty(size, size))
self.vector_b = nn.Parameter(torch.empty(size)) if mag_b else None
self.vector_a = nn.Parameter(torch.empty(size)) if mag_a else None
def reset_parameters(self, init_weights: bool = True) -> None:
params = [self.weight]
if self.vector_b is not None:
params.append(self.vector_b)
if self.vector_a is not None:
params.append(self.vector_a)
if any(p.is_meta for p in params):
return
with torch.no_grad():
if init_weights:
if self.orth:
self.weight.zero_()
else:
nn.init.eye_(self.weight)
if self.vector_b is not None:
self.vector_b.fill_(1.0)
if self.vector_a is not None:
self.vector_a.fill_(1.0)
else:
if self.orth:
nn.init.normal_(self.weight, mean=0.0, std=0.1)
else:
nn.init.eye_(self.weight)
self.weight.add_(torch.randn_like(self.weight) * 0.1)
if self.vector_b is not None:
self.vector_b.fill_(1.0)
if self.vector_a is not None:
self.vector_a.fill_(1.0)
def forward(self, input: torch.Tensor) -> torch.Tensor:
R = self.get_matrix()
if input.device.type == "cpu" and input.dtype in (torch.float16, torch.bfloat16):
compute_dtype = torch.float32
else:
compute_dtype = input.dtype
return (input.to(compute_dtype) @ R.to(compute_dtype).t()).to(input.dtype)
# Adapted from the Cayley/Neumann-based orthogonal parametrization used in OFT v2
# (PEFT implementation: https://github.com/huggingface/peft/blob/main/src/peft/tuners/oft/layer.py) #L104
def _skew_symmetric(self) -> torch.Tensor:
Q = torch.zeros((self.size, self.size), device=self.weight.device, dtype=self.weight.dtype)
Q = Q.index_put((self.rows, self.cols), self.weight)
return Q - Q.transpose(0, 1)
# Adapted from the Cayley/Neumann-based orthogonal parametrization used in OFT v2
# (PEFT implementation: https://github.com/huggingface/peft/blob/main/src/peft/tuners/oft/layer.py) #L160
def _project_Q(self, Q: torch.Tensor, eps: float = 0.9) -> torch.Tensor:
norm = torch.linalg.norm(Q, ord="fro")
if torch.isfinite(norm) and norm > eps:
Q = Q * (eps / (norm + 1e-12))
return Q
# R = (I+Q)(I-Q)^(-1)
def get_matrix(self) -> torch.Tensor:
cast_to_fp32 = False
orig_dtype = None
if not self.orth:
R = self.weight
else:
Q = self._skew_symmetric()
orig_dtype = Q.dtype
cast_to_fp32 = (Q.device.type == "cpu") and (orig_dtype in (torch.float16, torch.bfloat16))
if cast_to_fp32:
Q = Q.float()
id_mat = torch.eye(self.size, device=Q.device, dtype=Q.dtype)
if self.use_cayley_neumann:
if self.cayley_neumann_eps is not None:
Q = self._project_Q(Q, eps=self.cayley_neumann_eps)
t = int(self.num_cayley_neumann_terms)
R = id_mat.clone()
if t > 1:
R.add_(Q, alpha=2.0)
if t > 2:
Q_squared = Q @ Q
R.add_(Q_squared, alpha=2.0)
Q_power = Q_squared
for _ in range(3, t - 1):
Q_power = Q_power @ Q
R.add_(Q_power, alpha=2.0)
Q_power = Q_power @ Q
R.add_(Q_power)
else:
R = torch.linalg.solve(id_mat - Q, id_mat + Q, left=False)
# Apply scaling vectors to R
if self.vector_b is not None:
R = self.vector_b[:, None] * R
if self.vector_a is not None:
R = R * self.vector_a[None, :]
if cast_to_fp32:
R = R.to(orig_dtype)
return R
def __repr__(self) -> str:
return (
f"psoft.{self.__class__.__name__}("
f"size={self.size}, orth={self.orth}, "
f"use_cayley_neumann={self.use_cayley_neumann}, "
f"num_cayley_neumann_terms={int(self.num_cayley_neumann_terms)}, "
f"cayley_neumann_eps={self.cayley_neumann_eps}, "
f"mag_a={self.mag_a}, mag_b={self.mag_b}"
f")"
)
class PsoftLayer(BaseTunerLayer):
adapter_layer_names: tuple[str, ...] = ("psoft_R",)
other_param_names: tuple[str, ...] = (
"r",
"psoft_alpha",
"scaling",
"psoft_dropout",
"psoft_svd",
"psoft_svd_lowrank_niter",
"ab_svd_init",
)
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
super().__init__()
self.base_layer = base_layer
# per-adapter hyperparams
self.r: dict[str, int] = {}
self.psoft_alpha: dict[str, float] = {}
self.scaling: dict[str, float] = {}
self.psoft_dropout = nn.ModuleDict({})
self.psoft_svd: dict[str, str] = {}
self.psoft_svd_lowrank_niter: dict[str, int] = {}
self.ab_svd_init: dict[str, Optional[str]] = {}
# per-adapter trainable module
self.psoft_R = nn.ModuleDict({})
# per-adapter cache state
self._psoft_A_cache = BufferDict(persistent=False)
self._psoft_B_cache = BufferDict(persistent=False)
self.merged_adapters: list[str] = []
self._disable_adapters = False
self.kwargs = kwargs
self.fan_in_fan_out = False
base_layer = self.get_base_layer()
in_features, out_features = _get_in_out_features(base_layer)
self.in_features = in_features
self.out_features = out_features
def _get_psoft_ab_cache_buffers(self, adapter_name: str):
return self._psoft_A_cache[adapter_name], self._psoft_B_cache[adapter_name]
def _set_psoft_ab_cache_buffers(self, adapter_name: str, A: torch.Tensor, B: torch.Tensor) -> None:
self._psoft_A_cache[adapter_name] = A
self._psoft_B_cache[adapter_name] = B
def update_layer(self, adapter_name: str, config: PsoftConfig, **kwargs: Any) -> None:
ab_svd_init = config.ab_svd_init
init_weights = config.init_weights
r = int(config.r)
self.fan_in_fan_out = config.fan_in_fan_out
self.r[adapter_name] = r
self.psoft_alpha[adapter_name] = config.psoft_alpha
self.scaling[adapter_name] = config.psoft_alpha / r
self.psoft_dropout[adapter_name] = (
nn.Dropout(p=config.psoft_dropout) if config.psoft_dropout > 0.0 else nn.Identity()
)
self.ab_svd_init[adapter_name] = config.ab_svd_init
self.psoft_svd[adapter_name] = config.psoft_svd
self.psoft_svd_lowrank_niter[adapter_name] = config.psoft_svd_lowrank_niter
self.psoft_R[adapter_name] = OrthLayer(
size=r,
orth=config.psoft_orth,
mag_b=config.psoft_mag_b,
mag_a=config.psoft_mag_a,
use_cayley_neumann=config.use_cayley_neumann,
num_cayley_neumann_terms=config.num_cayley_neumann_terms,
cayley_neumann_eps=config.cayley_neumann_eps,
)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.psoft_R[adapter_name].reset_parameters(init_weights=init_weights)
self.psoft_R[adapter_name].requires_grad_(True)
with gather_params_ctx(self.get_base_layer().weight):
self._build_psoft_ab_cache_buffers(adapter_name, ab_svd_init)
self.set_adapter([adapter_name])
# Adapted from the asymmetric SVD used in PiSSA
# (PEFT implementation: https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/layer.py) #L316
def _build_psoft_ab_cache_buffers(self, adapter_name: str, init_type: str) -> None:
with torch.no_grad():
base = self.get_base_layer()
weight = base.weight
dtype = weight.dtype
if dtype not in (torch.float32, torch.float16, torch.bfloat16):
raise TypeError("PSOFT init requires float32/float16/bfloat16. Re-quantize after init if needed.")
# W: (out, in) fp32
W = transpose(weight.to(torch.float32), self.fan_in_fan_out)
r = self.r[adapter_name]
Vr, Sr, Uhr = self._compute_svd_factors(
W,
r,
svd_mode=self.psoft_svd[adapter_name],
niter=self.psoft_svd_lowrank_niter[adapter_name],
)
Sr_scaled = Sr / self.scaling[adapter_name]
if init_type == "psoft_init":
A = Uhr # (r, in)
B = Vr @ torch.diag(Sr_scaled) # (out, r)
elif init_type == "pissa_init":
s_sqrt = torch.sqrt(Sr_scaled)
A = torch.diag(s_sqrt) @ Uhr # (r, in)
B = Vr @ torch.diag(s_sqrt) # (out, r)
else:
raise ValueError(f"Unknown ab_svd_init: {init_type}")
A = A.contiguous().detach()
B = B.contiguous().detach()
self._set_psoft_ab_cache_buffers(adapter_name, A, B)
def _compute_svd_factors(self, weight: torch.Tensor, r: int, *, svd_mode: str, niter: int):
# weight: (out, in) fp32
if svd_mode == "full":
U, S, Vh = torch.linalg.svd(weight.data, full_matrices=False)
Vr = U[:, :r] # (out, r)
Sr = S[:r] # (r,)
Uhr = Vh[:r, :] # (r, in)
elif svd_mode == "lowrank":
U, S, V = svd_lowrank(weight.data, q=r, niter=niter) # V: (in, r)
Vr = U[:, :r]
Sr = S[:r]
Uhr = V[:, :r].t() # (r, in)
else:
raise ValueError(f"Unknown svd_mode: {svd_mode}")
return Vr, Sr, Uhr
class Linear(nn.Module, PsoftLayer):
def __init__(
self,
base_layer: nn.Module,
adapter_name: str,
config: PsoftConfig,
**kwargs: Any,
) -> None:
super().__init__()
PsoftLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = config.fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, config=config, **kwargs)
def _get_R_matrix(self, adapter_name: str) -> torch.Tensor:
return self.psoft_R[adapter_name].get_matrix()
def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
"""
ΔW = scaling * B (R - id_mat) A Returns in base weight layout (respecting fan_in_fan_out).
"""
A, B = self._get_psoft_ab_cache_buffers(adapter_name)
base_w = self.get_base_layer().weight
device = base_w.device
out_dtype = base_w.dtype
R = self._get_R_matrix(adapter_name)
r = self.r[adapter_name]
compute_dtype = (
torch.float32 if (device.type == "cpu" and out_dtype in (torch.float16, torch.bfloat16)) else out_dtype
)
A_c = A.to(device=device, dtype=compute_dtype)
B_c = B.to(device=device, dtype=compute_dtype)
R_c = R.to(device=device, dtype=compute_dtype)
id_mat = torch.eye(r, device=device, dtype=compute_dtype)
delta = B_c @ (R_c - id_mat) @ A_c # (out, in)
delta = transpose(delta, self.fan_in_fan_out)
delta = delta * self.scaling[adapter_name]
return delta.to(dtype=out_dtype)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
base_layer = self.get_base_layer()
for active_adapter in adapter_names:
if active_adapter not in self.psoft_R:
continue
if safe_merge:
orig_weight = base_layer.weight.data.clone()
orig_dtype = orig_weight.dtype
delta_weight = self.get_delta_weight(active_adapter)
orig_weight += delta_weight.to(orig_dtype)
if not torch.isfinite(orig_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weight
else:
delta_weight = self.get_delta_weight(active_adapter)
base_layer.weight.data += delta_weight.to(base_layer.weight.dtype)
self.merged_adapters.append(active_adapter)
def supports_lora_conversion(self, adapter_name: str = "default") -> bool:
return True
def unmerge(self) -> None:
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.", UserWarning)
return
weight = self.get_base_layer().weight
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.psoft_R:
continue
orig_dtype = weight.dtype
delta_weight = self.get_delta_weight(active_adapter)
weight.data -= delta_weight.to(orig_dtype)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
psoft_keys = self.psoft_R.keys()
for active_adapter in self.active_adapters:
if active_adapter not in psoft_keys:
continue
A, B = self._get_psoft_ab_cache_buffers(active_adapter)
dropout = self.psoft_dropout[active_adapter]
scaling = self.scaling[active_adapter]
R_layer = self.psoft_R[active_adapter]
x_cast = self._cast_input_dtype(x, A.dtype)
x_d = dropout(x_cast)
A_c = A.to(device=x_d.device, dtype=x_d.dtype)
B_c = B.to(device=x_d.device, dtype=x_d.dtype)
xa = x_d @ A_c.t()
xr = R_layer(xa)
delta_y = (xr - xa) @ B_c.t()
result = result + (delta_y * scaling)
result = result.to(torch_result_dtype)
return result
def __repr__(self) -> str:
return "psoft." + super().__repr__()
def dispatch_default(
target: nn.Module,
adapter_name: str,
config: PsoftConfig,
**kwargs,
) -> Optional[nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
if config.fan_in_fan_out:
warnings.warn(
"fan_in_fan_out=True is not compatible with `torch.nn.Linear`. Setting fan_in_fan_out=False."
)
config.fan_in_fan_out = False
new_module = Linear(target, adapter_name, config=config, **kwargs)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/psoft/layer.py",
"license": "Apache License 2.0",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/psoft/model.py | # Copyright 2026-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional
from torch import nn
from peft.tuners.tuners_utils import BaseTuner, get_device_map
from peft.utils import TRANSFORMERS_MODELS_TO_PSOFT_TARGET_MODULES_MAPPING
from .config import PsoftConfig
from .layer import PsoftLayer, dispatch_default
class PsoftModel(BaseTuner):
"""
PSOFT (Efficient Orthogonal Fine-Tuning with Principal Subspace Adaptation) model.
Inserts an r*r orthogonal (or scaled) transformation R between low-rank A and B: ΔW = B @ (R-I) @ A. Use
ab_svd_init="psoft_init" to initialize A/B from SVD and freeze them, training only R (and optional magnitude
vectors).
Args:
model: The model to adapt.
config: PsoftConfig.
adapter_name: Adapter name, default "default".
low_cpu_mem_usage: Create empty adapter weights on meta device.
"""
prefix: str = "psoft_"
tuner_layer_cls = PsoftLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_PSOFT_TARGET_MODULES_MAPPING
def _create_and_replace(
self,
peft_config: PsoftConfig,
adapter_name: str,
target: nn.Module,
target_name: str,
parent: nn.Module,
current_key: str,
*,
parameter_name: Optional[str] = None,
) -> None:
if current_key is None:
raise ValueError("Current key must not be None.")
kwargs = {
"target_name": current_key,
"parameter_name": parameter_name,
}
if isinstance(target, PsoftLayer):
target.update_layer(adapter_name, config=peft_config, **kwargs)
return
device_map = get_device_map(self.model)
new_module = self._create_new_module(peft_config, adapter_name, target, device_map=device_map, **kwargs)
if adapter_name not in self.active_adapters:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _create_new_module(psoft_config: PsoftConfig, adapter_name: str, target: nn.Module, **kwargs) -> nn.Module:
new_module = dispatch_default(target, adapter_name, config=psoft_config, **kwargs)
if new_module is None:
raise ValueError(
f"Target module {target} is not supported by minimal PSOFT. Only torch.nn.Linear is supported."
)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/psoft/model.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/pvera/confidence_interval_generation.py | import tempfile
import numpy as np
import scipy
import torch
from datasets import load_dataset
from torch.nn import LazyLinear, Sequential, Softmax
from torchvision.transforms import Compose, Normalize, Resize
from tqdm import tqdm
from transformers import AutoModel
from peft import PeftModel, PveraConfig, get_peft_model
# load the dataset
device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
dataset = load_dataset("beans", split="train").with_format("torch")
transform = Compose((Resize((224, 224)), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))))
num_classes = dataset.features["labels"].num_classes
# load the model with adapters and create the linear probe
base_model = AutoModel.from_pretrained("facebook/dinov2-base")
config = PveraConfig(r=128, sample_at_inference=False)
model = get_peft_model(base_model, config).to(device)
linear_probe = Sequential(LazyLinear(num_classes), Softmax(-1)).to(device)
# train the model
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(list(model.parameters()) + list(linear_probe.parameters()), lr=1e-4)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)
for batch in tqdm(dataloader):
imgs, lbls = transform(batch["image"].float()), batch["labels"]
pred = linear_probe(model(imgs.to(device)).pooler_output)
loss = criterion(pred, lbls.to(device))
loss.backward()
optimizer.step()
# save the model and load it with sample_at_inference=True
model.eval()
linear_probe.eval()
with tempfile.TemporaryDirectory() as tmpdir:
# save the model and the linear probe
model.save_pretrained(tmpdir)
torch.save(linear_probe.state_dict(), tmpdir + "/linear_probe.bin")
# load the model with sample_at_inference=True
base_model = AutoModel.from_pretrained("facebook/dinov2-base")
config = PveraConfig.from_pretrained(tmpdir)
config.sample_at_inference = True
loaded_model = PeftModel.from_pretrained(base_model, tmpdir, config=config).to(device)
loaded_model.eval()
# load the linear probe
loaded_linear_probe = Sequential(LazyLinear(num_classes), Softmax(-1)).to(device)
loaded_linear_probe.load_state_dict(torch.load(tmpdir + "/linear_probe.bin"))
loaded_linear_probe.eval()
# make multiple predictions on an image
img = dataset[0]["image"].unsqueeze(0).to(device)
with torch.no_grad():
all_preds = [loaded_linear_probe(loaded_model(img).pooler_output) for _ in range(16)]
all_preds = torch.vstack(all_preds)
top_pred = all_preds.argmax(-1).mode(0).values
softmax_top_pred = all_preds[:, top_pred]
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2.0, n - 1)
return max(0, m - h), min(1, m + h)
print(mean_confidence_interval(softmax_top_pred.cpu()))
| {
"repo_id": "huggingface/peft",
"file_path": "examples/pvera/confidence_interval_generation.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/peft:src/peft/tuners/pvera/bnb.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Optional
import bitsandbytes as bnb
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import check_adapters_to_merge
from peft.utils.integrations import dequantize_bnb_weight
from peft.utils.other import transpose
from .layer import PveraLayer
if is_bnb_available():
class Linear8bitLt(torch.nn.Module, PveraLayer):
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
pvera_A,
pvera_B,
r: int = 0,
pvera_dropout: float = 0.0,
fan_in_fan_out: bool = False,
init_weights: bool = True,
d_initial: float = 0.1,
**kwargs,
) -> None:
super().__init__()
PveraLayer.__init__(self, base_layer)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
pvera_A,
pvera_B,
r,
pvera_dropout=pvera_dropout,
init_weights=init_weights,
d_initial=d_initial,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
if self.merged:
warnings.warn(
f"Already following adapters were merged {','.join(self.merged_adapters)}. "
f"You are now additionally merging {','.join(self.active_adapters)}."
)
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter not in self.pvera_lambda_d.keys():
continue
warnings.warn(
"Merge pvera module to 8-bit linear may get different generations due to rounding errors."
)
pvera_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
output = dequantize_bnb_weight(weight, state)
w_data = output.to(pvera_data.dtype).to(pvera_data.device) + pvera_data
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
state.reset_grads()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn("Already unmerged. Nothing to do")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.pvera_lambda_d.keys():
continue
warnings.warn(
"Unmerge pvera module to 8-bit linear may get different generations due to rounding errors."
)
pvera_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
output = dequantize_bnb_weight(weight, state=state)
w_data = output.to(pvera_data.dtype).to(pvera_data.device) - pvera_data
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
state.reset_grads()
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str): The name of the adapter for which the delta weight should be computed.
Returns:
torch.Tensor: The computed delta weight for the PVeRA adapter.
Note:
This method implements the PVeRA-specific weight update. Unlike LoRA, PVeRA uses shared projection
matrices (pvera_A and pvera_B) across all layers, along with per-layer trainable parameters (lambda_d
and lambda_b).
"""
# Retrieve shared projection matrices
pvera_A = self.pvera_A[adapter]
pvera_B = self.pvera_B[adapter]
# Retrieve per-layer trainable parameters
device = pvera_B.device
dtype = pvera_B.dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
lambda_d = self.pvera_lambda_d[adapter]
lambda_b = self.pvera_lambda_b[adapter]
if cast_to_fp32:
pvera_A = pvera_A.float()
pvera_B = pvera_B.float()
lambda_d = lambda_d.float()
lambda_b = lambda_b.float()
sliced_A = pvera_A[:, : self.in_features].to(lambda_d.device)
sliced_B = pvera_B[: self.out_features, :].to(lambda_d.device)
lambda_b = lambda_b.unsqueeze(-1)
lambda_d = lambda_d.unsqueeze(-1)
# In PVeRA, the first half of the lambda_d and sliced_A vector corresponds to the mean (mu) and the second half to the log-variance (logvar). When merging, we can only do mean sampling, and therefore only need the first half
lambda_d = lambda_d[: lambda_d.size(0) // 2, :]
sliced_A = sliced_A[: sliced_A.size(0) // 2, :]
# PVeRA-specific computation:
# 1. Apply lambda_d to the input projection (pvera_A)
# 2. Apply lambda_b to the output projection (pvera_B)
# 3. Compute the outer product of the scaled projections
output_tensor = transpose((lambda_b * sliced_B) @ (lambda_d * sliced_A), self.fan_in_fan_out)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
"""
Perform the forward pass using the PVeRA adapter.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after applying the PVeRA adaptation.
Note:
This method implements the PVeRA-specific forward pass. It applies the shared projections (pvera_A and
pvera_B) along with the per-layer trainable parameters (lambda_d and lambda_b) to compute the adapter
output.
"""
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.pvera_lambda_d.keys():
continue
lambda_d = self.pvera_lambda_d[active_adapter]
lambda_b = self.pvera_lambda_b[active_adapter]
pvera_A = self.pvera_A[active_adapter]
pvera_B = self.pvera_B[active_adapter]
dropout = self.pvera_dropout[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
compute_dtype = lambda_d.dtype
if x.dtype != compute_dtype:
x = x.to(compute_dtype)
sliced_A = pvera_A[:, : self.in_features].to(x.device)
sliced_B = pvera_B[: self.out_features, :].to(x.device)
x_temp = dropout(x.to(lambda_d.dtype))
adapter_output = lambda_b * torch.nn.functional.linear(
lambda_d * torch.nn.functional.linear(x_temp, sliced_A), sliced_B
)
if requires_conversion:
adapter_output = adapter_output.to(expected_dtype)
result = result + adapter_output
# Ensure the output tensor has the same dtype as the input tensor
return result.to(x.dtype)
def __repr__(self) -> str:
rep = super().__repr__()
return "pvera." + rep
if is_bnb_4bit_available():
class Linear4bit(torch.nn.Module, PveraLayer):
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
pvera_A,
pvera_B,
r: int = 0,
pvera_dropout: float = 0.0,
fan_in_fan_out: bool = False,
init_weights: bool = True,
d_initial: float = 0.1,
**kwargs,
) -> None:
super().__init__()
PveraLayer.__init__(self, base_layer)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
pvera_A,
pvera_B,
r,
pvera_dropout=pvera_dropout,
init_weights=init_weights,
d_initial=d_initial,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
if self.merged:
warnings.warn(
f"Already following adapters were merged {','.join(self.merged_adapters)}. "
f"You are now additionally merging {','.join(self.active_adapters)}."
)
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter not in self.pvera_lambda_d.keys():
continue
warnings.warn(
"Merge pvera module to 4-bit linear may get different generations due to rounding errors."
)
pvera_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
kwargs = weight.__dict__
# torch.compile can introduce attributes preceded by '_', remove them
kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
w_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) + pvera_data
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to(
weight.device
)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn("Already unmerged. Nothing to do")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.pvera_lambda_d.keys():
continue
warnings.warn(
"Unmerge pvera module to 4-bit linear may get different generations due to rounding errors."
)
pvera_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
kwargs = weight.__dict__
w_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) - pvera_data
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to(
weight.device
)
def get_delta_weight(self, adapter) -> torch.Tensor:
pvera_A = self.pvera_A[adapter]
pvera_B = self.pvera_B[adapter]
device = pvera_B.device
dtype = pvera_B.dtype
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
lambda_d = self.pvera_lambda_d[adapter]
lambda_b = self.pvera_lambda_b[adapter]
if cast_to_fp32:
pvera_A = pvera_A.float()
pvera_B = pvera_B.float()
lambda_d = lambda_d.float()
lambda_b = lambda_b.float()
sliced_A = pvera_A[:, : self.in_features].to(lambda_d.device)
sliced_B = pvera_B[: self.out_features, :].to(lambda_d.device)
lambda_b = lambda_b.unsqueeze(-1)
lambda_d = lambda_d.unsqueeze(-1)
# In PVeRA, the first half of the lambda_d and sliced_A vector corresponds to the mean (mu) and the second half to the log-variance (logvar). When merging, we can only do mean sampling, and therefore only need the first half
lambda_d = lambda_d[: lambda_d.size(0) // 2, :]
sliced_A = sliced_A[: sliced_A.size(0) // 2, :]
output_tensor = transpose((lambda_b * sliced_B) @ (lambda_d * sliced_A), self.fan_in_fan_out)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
result = result.clone()
for active_adapter in self.active_adapters:
if active_adapter not in self.pvera_lambda_d.keys():
continue
lambda_d = self.pvera_lambda_d[active_adapter]
lambda_b = self.pvera_lambda_b[active_adapter]
pvera_A = self.pvera_A[active_adapter]
pvera_B = self.pvera_B[active_adapter]
dropout = self.pvera_dropout[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
compute_dtype = lambda_d.dtype
if x.dtype != compute_dtype:
x = x.to(compute_dtype)
sliced_A = pvera_A[:, : self.in_features].to(x.device)
sliced_B = pvera_B[: self.out_features, :].to(x.device)
x_temp = dropout(x.to(lambda_d.dtype))
adapter_output = lambda_b * torch.nn.functional.linear(
lambda_d * torch.nn.functional.linear(x_temp, sliced_A), sliced_B
)
if requires_conversion:
adapter_output = adapter_output.to(expected_dtype)
result = result + adapter_output
# Ensure the output tensor has the same dtype as the input tensor
return result.to(x.dtype)
def __repr__(self) -> str:
rep = super().__repr__()
return "pvera." + rep
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/pvera/bnb.py",
"license": "Apache License 2.0",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/pvera/config.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class PveraConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`PveraModel`].
Paper: https://www.arxiv.org/abs/2512.07703.
Args:
r (`int`, *optional*, defaults to `256`):
PVeRA parameter dimension ("rank"). Choose higher values than LoRA ranks here, since PVeRA shares
parameters across layers and therefore uses far fewer parameters than LoRA.
target_modules (`Union[List[str], str]`):
The names of the modules to apply PVeRA to. Only linear layers are supported. When passing a string, a
regex match will be performed. If this is specified as 'all-linear', then all linear/Conv1D modules are
chosen. If this is not specified, modules will bechosen according to the model architecture. If the
architecture is not known, an error will be raised.
projection_prng_key (`int`):
PVeRA PRNG init key. Used for initialising pvera_A and pvera_B for new models or when loading a checkpoint
that did not include these projections. Defaults to `0`.
save_projection (`bool`):
Whether to save the pvera_A / pvera_B projections in the state dict alongside per layer lambda_b / lambda_d
weights. This will increase the size of the checkpoint, but guarantee that we can reload the checkpoint on
all system configurations. Defaults to `True`.
pvera_dropout (`float`):
The dropout probability for PVeRA layers.
d_initial (`float`, *optional*, defaults to `0.1`):
Initial value for `pvera_lambda_d` vector used when initializing the PVeRA parameters. Small values (<=0.1)
are recommended.
fan_in_fan_out (`bool`):
Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
`Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
bias (`str`):
Bias type for PVeRA. Can be 'none', 'all' or 'pvera_only'. If 'all' or 'pvera_only', the corresponding
biases will be updated during training. Be aware that this means that, even when disabling the adapters,
the model will not produce the same output as the base model would have without adaptation.
modules_to_save (`List[str]`):
List of modules apart from PVeRA layers to be set as trainable and saved in the final checkpoint.
init_weights (`bool`):
Whether to initialize the weights of the PVeRA layers with their default initialization. Don't change this
setting, except if you know exactly what you're doing.
layers_to_transform (`Union[List[int],int]`):
The layer indexes to transform, if this argument is specified, it will apply the PVeRA transformations on
the layer indexes that are specified in this list. If a single integer is passed, it will apply the PVeRA
transformations on the layer at this index.
layers_pattern (`Optional[Union[List[str], str]]`):
The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the
`nn.ModuleList` of the model, which is often called `'layers'` or `'h'`.
sample_at_inference (`bool` | `dict`, defaults to `False`):
Whether to sample from the learned PVeRA distribution at inference. If false, the learned mean is used. The
default is False (indicating false for all adapters). If True is provided, then the value will be true for
all adapters. If a dict is provided, then a specific value can be specified per adapter (with False by
default for non-specified adapters). For example
`sample_at_inference={'encoder.layer.0.attention.attention.query': True}` will only sample at inference for
one specific adapter.
"""
r: int = field(
default=256,
metadata={
"help": (
"PVeRA parameter dimension ('rank'). Choose higher values than LoRA ranks here, since PVeRA shares "
"parameters across layers and therefore uses far fewer parameters than LoRA."
)
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"The names of the modules to apply PVeRA to. Only linear layers are supported. When passing a string, a "
"regex match will be performed. If this is specified as 'all-linear', then all linear/Conv1D modules are "
"chosen. If this is not specified, modules will bechosen according to the model architecture. If the "
"architecture is not known, an error will be raised."
)
},
)
projection_prng_key: int = field(
default=0,
metadata={
"help": (
"PVeRA PRNG init key. Used for initialising pvera_A and pvera_B for new models or when loading a checkpoint "
"that did not include these projections. Defaults to `0`."
)
},
)
save_projection: bool = field(
default=True,
metadata={
"help": (
"Whether to save the pvera_A / pvera_B projections in the state dict alongside per layer lambda_b / lambda_d "
"weights. This will increase the size of the checkpoint, but guarantee that we can reload the checkpoint on "
"all system configurations. Defaults to `True`."
)
},
)
pvera_dropout: float = field(default=0.0, metadata={"help": "The dropout probability for PVeRA layers."})
d_initial: float = field(default=0.1, metadata={"help": "Initial value for d vector. Default is 0.1."})
fan_in_fan_out: bool = field(
default=False,
metadata={
"help": (
"Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses "
"`Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`."
)
},
)
bias: str = field(
default="none",
metadata={
"help": (
"Bias type for PVeRA. Can be 'none', 'all' or 'pvera_only'. If 'all' or 'pvera_only', the corresponding "
"biases will be updated during training. Be aware that this means that, even when disabling the adapters, "
"the model will not produce the same output as the base model would have without adaptation."
)
},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"List of modules apart from PVeRA layers to be set as trainable and saved in the final checkpoint."
)
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the PVeRA layers with their default initialization. Don't change this "
"setting, except if you know exactly what you're doing."
),
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": (
"The layer indexes to transform, if this argument is specified, it will apply the PVeRA transformations on "
"the layer indexes that are specified in this list. If a single integer is passed, it will apply the PVeRA "
"transformations on the layer at this index."
)
},
)
layers_pattern: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the "
"`nn.ModuleList` of the model, which is often called `'layers'` or `'h'`."
)
},
)
sample_at_inference: bool = field(
default=False,
metadata={
"help": (
"Whether to sample from the learned PVeRA distribution at inference. If false, the learned mean is used. The "
"default is False (indicating false for all adapters). If True is provided, then the value will be true for "
"all adapters. If a dict is provided, then a specific value can be specified per adapter (with False by "
"default for non-specified adapters). For example "
"`sample_at_inference={'encoder.layer.0.attention.attention.query': True}` will only sample at inference for "
"one specific adapter."
),
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.PVERA
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
# check for layers_to_transform and layers_pattern
if self.layers_pattern and not self.layers_to_transform:
raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
if not self.save_projection:
warnings.warn(
"Specified to not save pvera_A and pvera_B within the state dictionary, instead they will be restored "
"using the PRNG key store in `config.projection_prng_key`. Consider setting `config.save_projection` "
"to `True` to guarantee restoring the checkpoint correctly on all system configurations."
)
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/pvera/config.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/pvera/layer.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.other import transpose
from .._buffer_dict import BufferDict
class PveraLayer(BaseTunerLayer):
# List all names of layers that may contain adapter weights
adapter_layer_names = ("pvera_lambda_b", "pvera_lambda_d")
other_param_names = ("pvera_A", "pvera_B")
def __init__(self, base_layer: nn.Module, **kwargs):
self.base_layer = base_layer
self.r = {}
self.pvera_dropout = nn.ModuleDict({})
# For storing vector scale
self.pvera_lambda_b = nn.ParameterDict({})
self.pvera_lambda_d = nn.ParameterDict({})
# Stores a reference to the pvera_A/B BufferDict.
# Set to `None` otherwise to avoid computation with random weights
self.pvera_A: Optional[BufferDict] = None
self.pvera_B: Optional[BufferDict] = None
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, Conv1D):
in_features, out_features = (
base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
)
self.in_features = in_features
self.out_features = out_features
self.kwargs = kwargs
@property
def merged(self) -> bool:
return bool(self.merged_adapters)
def update_layer(
self,
adapter_name,
pvera_A: BufferDict,
pvera_B: BufferDict,
r,
pvera_dropout,
init_weights,
d_initial: float = 0.1,
inference_mode: bool = False,
**kwargs,
):
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
if pvera_dropout > 0.0:
pvera_dropout_layer = nn.Dropout(p=pvera_dropout)
else:
pvera_dropout_layer = nn.Identity()
self.pvera_dropout.update(nn.ModuleDict({adapter_name: pvera_dropout_layer}))
# Actual trainable parameters
self.pvera_lambda_b[adapter_name] = nn.Parameter(torch.ones(self.out_features), requires_grad=True)
self.pvera_lambda_d[adapter_name] = nn.Parameter(torch.randn(r * 2), requires_grad=True)
# non trainable references to pvera_A/B buffers
self.pvera_A = pvera_A
self.pvera_B = pvera_B
if adapter_name not in pvera_A:
# This means that this is not the first PVeRA adapter. We have to add an entry in the dict for this adapter.
if len(self.pvera_A) < 1:
raise ValueError(
"The `pvera_A` and `pvera_B` buffers are empty. This should not happen. Please report this issue."
)
# we can take any of the existing adapter's parameters, as they should all be identical
pvera_A_param = list(self.pvera_A.values())[0]
pvera_B_param = list(self.pvera_B.values())[0]
error_tmpl = (
"{} has a size of {} but {} or greater is required; this probably happened because an additional PVeRA "
"adapter was added after the first one with incompatible shapes."
)
# check input size
if pvera_A_param.shape[1] < self.in_features:
raise ValueError(error_tmpl.format("pvera_A", pvera_A_param.shape[1], self.in_features))
# check output size
if pvera_B_param.shape[0] < self.out_features:
raise ValueError(error_tmpl.format("pvera_B", pvera_B_param.shape[0], self.out_features))
# check r
error_tmpl = (
"{} has a size of {} but {} or greater is required; this probably happened because an additional PVeRA "
"adapter with a lower rank was added after the first one; loading the adapters "
"in reverse order may solve this."
)
if pvera_A_param.shape[0] < self.r[adapter_name]:
raise ValueError(error_tmpl.format("pvera_A", pvera_A_param.shape[0], self.r[adapter_name]))
if pvera_B_param.shape[1] < self.r[adapter_name]:
raise ValueError(error_tmpl.format("pvera_B", pvera_B_param.shape[1], self.r[adapter_name]))
self.pvera_A[adapter_name] = pvera_A_param
self.pvera_B[adapter_name] = pvera_B_param
if init_weights:
self.reset_pvera_parameters(adapter_name, d_initial=d_initial)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters, inference_mode=inference_mode)
def reset_pvera_parameters(self, adapter_name, d_initial: float = 0.1):
if adapter_name in self.pvera_lambda_d.keys():
with torch.no_grad():
nn.init.zeros_(self.pvera_lambda_d[adapter_name]).fill_(d_initial)
nn.init.zeros_(self.pvera_lambda_b[adapter_name])
class Linear(nn.Linear, PveraLayer):
# PVeRA implemented in a dense layer
def __init__(
self,
base_layer,
pvera_A: BufferDict,
pvera_B: BufferDict,
adapter_name: str,
r: int = 0,
pvera_dropout: float = 0.0,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
is_target_conv_1d_layer: bool = False,
init_weights: bool = True,
d_initial: float = 0.1,
sample_at_inference: bool = False,
**kwargs,
) -> None:
# this gets the init from nn.Linear's super perspective, i.e. nn.Module.__init__, which should always be called
super(nn.Linear, self).__init__()
PveraLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self.sample_at_inference = sample_at_inference
self._active_adapter = adapter_name
self.update_layer(adapter_name, pvera_A, pvera_B, r, pvera_dropout, init_weights, d_initial=d_initial)
self.is_target_conv_1d_layer = is_target_conv_1d_layer
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.pvera_lambda_d.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.pvera_lambda_d.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
pvera_A = self.pvera_A[adapter]
pvera_B = self.pvera_B[adapter]
device = pvera_B.device
dtype = pvera_B.dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
lambda_d = self.pvera_lambda_d[adapter]
lambda_b = self.pvera_lambda_b[adapter]
if cast_to_fp32:
pvera_A = pvera_A.float()
pvera_B = pvera_B.float()
lambda_d = lambda_d.float()
lambda_b = lambda_b.float()
sliced_A = pvera_A[:, : self.in_features].to(lambda_d.device)
sliced_B = pvera_B[: self.out_features, :].to(lambda_d.device)
lambda_b = lambda_b.unsqueeze(-1)
lambda_d = lambda_d.unsqueeze(-1)
# In PVeRA, the first half of the lambda_d and sliced_A vector corresponds to the mean (mu) and the second half to the log-variance (logvar). When merging, we can only do mean sampling, and therefore only need the first half
lambda_d = lambda_d[: lambda_d.size(0) // 2, :]
sliced_A = sliced_A[: sliced_A.size(0) // 2, :]
output_tensor = transpose((lambda_b * sliced_B) @ (lambda_d * sliced_A), self.fan_in_fan_out)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
return output_tensor
def _reparametrize(self, mu, logvar, sample_at_inference):
if self.training or (not self.training and sample_at_inference):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
z = mu + eps * std
else:
z = mu
return z
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.pvera_lambda_d.keys():
continue
lambda_d = self.pvera_lambda_d[active_adapter]
lambda_b = self.pvera_lambda_b[active_adapter]
pvera_A = self.pvera_A[active_adapter]
pvera_B = self.pvera_B[active_adapter]
# As adapted layers may have different shapes and PVeRA contains a single shared pair of A and B matrices,
# we initialize these matrices with the largest required size for each dimension.
# During the forward pass, required submatrices are sliced out from the shared pvera_A and pvera_B.
sliced_A = pvera_A[:, : self.in_features].to(x.device)
sliced_B = pvera_B[: self.out_features, :].to(x.device)
dropout = self.pvera_dropout[active_adapter]
x = x.to(lambda_d.dtype)
mu, logvar = (lambda_d * F.linear(dropout(x), sliced_A)).chunk(2, dim=-1)
result = result + lambda_b * F.linear(
self._reparametrize(mu, logvar, self.sample_at_inference), sliced_B
)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "pvera." + rep
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/pvera/layer.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/pvera/model.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
import torch
import torch.nn as nn
from transformers.pytorch_utils import Conv1D
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer
from peft.utils import (
TRANSFORMERS_MODELS_TO_PVERA_TARGET_MODULES_MAPPING,
)
from .._buffer_dict import BufferDict
from ..tuners_utils import _maybe_include_all_linear_layers
from .config import PveraConfig
from .layer import Linear, PveraLayer
class PveraModel(BaseTuner):
"""
Creates Probabilistic Vector-based Random Matrix Adaptation (PVeRA) model from a pretrained transformers model.
Args:
model ([`~transformers.PreTrainedModel`]): The model to be adapted.
config ([`PveraConfig`]): The configuration of the PVeRA model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
Create empty adapter weights on meta device. Useful to speed up the loading process.
Returns:
`torch.nn.Module`: The PVeRA model.
Example:
```py
>>> from transformers import AutoModel
>>> from peft import PveraConfig, get_peft_model
>>> base_model = AutoModel.from_pretrained("facebook/dinov2-base")
>>> config = PveraConfig(r=128, sample_at_inference=False)
>>> model = get_peft_model(base_model, config)
```
**Attributes**:
- **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`PveraConfig`]): The configuration of the PVeRA model.
"""
prefix: str = "pvera_lambda_"
tuner_layer_cls = PveraLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_PVERA_TARGET_MODULES_MAPPING
def _find_dim(self, config) -> tuple[int, int]:
"""
Finds the largest input and output dimensions across linear layers that have been wrapped with PVeRA.
This will be used for determining the size of the shared pvera_A and pvera_B matrices.
"""
model_config = self.get_model_config(self.model)
peft_config = self._prepare_adapter_config(config, model_config)
peft_config = _maybe_include_all_linear_layers(peft_config, self.model)
largest_shape = None
for key, module in self.model.named_modules():
if not self._check_target_module_exists(peft_config, key):
continue
if isinstance(module, nn.Linear):
module_shape = module.out_features, module.in_features
elif isinstance(module, Conv1D):
module_shape = module.weight.ds_shape if hasattr(module.weight, "ds_shape") else module.weight.shape
module_shape = module_shape[::-1]
else:
continue
if largest_shape is None:
largest_shape = module_shape
continue
if module_shape != largest_shape:
largest_shape = tuple(max(a, b) for a, b in zip(largest_shape, module_shape))
if largest_shape is None:
msg = "No layers types compatible with PVeRA were found. Please check `peft_config.target_modules`."
raise ValueError(msg)
return largest_shape
def _init_pvera_A_pvera_B(self, config: PveraConfig, adapter_name: str) -> None:
linear_out_dim, linear_in_dim = self._find_dim(config)
# use of persistent to exclude pvera_A and pvera_B from the state dict if we choose not to save them.
self.pvera_A = BufferDict({}, persistent=config.save_projection)
self.pvera_B = BufferDict({}, persistent=config.save_projection)
# deterministic init of pvera_A and pvera_B if we know the key
generator = torch.Generator(device="cpu").manual_seed(config.projection_prng_key)
pvera_A = torch.nn.init.kaiming_uniform_(torch.empty(config.r * 2, linear_in_dim), generator=generator)
pvera_B = torch.nn.init.kaiming_uniform_(torch.empty(linear_out_dim, config.r), generator=generator)
self.pvera_A[adapter_name] = pvera_A
self.pvera_B[adapter_name] = pvera_B
def _pre_injection_hook(self, model: nn.Module, config: PveraConfig, adapter_name: str) -> None:
self._init_pvera_A_pvera_B(config, adapter_name)
def _check_new_adapter_config(self, config: PveraConfig) -> None:
"""
A helper method to check the config when a new adapter is being added.
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
"""
super()._check_new_adapter_config(config)
for existing_config in self.peft_config.values():
if existing_config is config:
# skip the current config
continue
if existing_config.projection_prng_key != config.projection_prng_key:
raise ValueError(
f"PVeRA PRNG initialisation key must be the same for all adapters. Got {config.projection_prng_key=} but "
f"previous config had {existing_config.projection_prng_key}."
)
save_project_unique_values = {config.save_projection for config in self.peft_config.values()}
if len(save_project_unique_values) > 1:
raise ValueError(
"PVeRA projection weights must be saved for all adapters or none, but got multiple different values: "
f"{save_project_unique_values}"
)
def _create_and_replace(
self,
pvera_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
r = pvera_config.r
bias = hasattr(target, "bias") and target.bias is not None
kwargs = {
"r": r,
"pvera_dropout": pvera_config.pvera_dropout,
"fan_in_fan_out": pvera_config.fan_in_fan_out,
"init_weights": pvera_config.init_weights,
"loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
"loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
}
kwargs["bias"] = bias
if isinstance(target, Linear):
target.update_layer(
adapter_name,
self.pvera_A,
self.pvera_B,
r,
pvera_config.pvera_dropout,
pvera_config.init_weights,
d_initial=pvera_config.d_initial,
)
else:
new_module = self._create_new_module(
pvera_config, self.pvera_A, self.pvera_B, adapter_name, target, current_key, **kwargs
)
if adapter_name not in self.active_adapter:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _create_new_module(pvera_config, pvera_A, pvera_B, adapter_name, target, current_key, **kwargs):
# avoid eager bnb import
if is_bnb_available():
import bitsandbytes as bnb
from .bnb import Linear8bitLt
if is_bnb_4bit_available():
from .bnb import Linear4bit
bias = kwargs.pop("bias", False)
loaded_in_8bit = kwargs.get("loaded_in_8bit", False)
loaded_in_4bit = kwargs.get("loaded_in_4bit", False)
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
eightbit_kwargs = kwargs.copy()
eightbit_kwargs.update(
{
"has_fp16_weights": target_base_layer.state.has_fp16_weights,
"threshold": target_base_layer.state.threshold,
"index": target_base_layer.index,
}
)
return Linear8bitLt(target, adapter_name, pvera_A, pvera_B, **eightbit_kwargs)
elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update(
{
"compute_dtype": target_base_layer.compute_dtype,
"compress_statistics": target_base_layer.weight.compress_statistics,
"quant_type": target_base_layer.weight.quant_type,
}
)
return Linear4bit(target, adapter_name, pvera_A, pvera_B, **fourbit_kwargs)
elif isinstance(target_base_layer, torch.nn.Linear):
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = pvera_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
kwargs["is_target_conv_1d_layer"] = True
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = pvera_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. Currently, only the following modules are supported: "
"`torch.nn.Linear`, `transformers.pytorch_utils.Conv1D`."
)
if isinstance(pvera_config.sample_at_inference, bool):
module_sample_at_inference = pvera_config.sample_at_inference
else:
module_sample_at_inference = pvera_config.sample_at_inference.get(current_key, False)
new_module = Linear(
target,
pvera_A,
pvera_B,
adapter_name,
bias=bias,
d_initial=pvera_config.d_initial,
sample_at_inference=module_sample_at_inference,
**kwargs,
)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/pvera/model.py",
"license": "Apache License 2.0",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:tests/test_pvera.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This test file is based on the VeRA test file, since both methods are very close.
import os
import pytest
import torch
from accelerate.utils.imports import is_bf16_available
from safetensors import safe_open
from torch import nn
from peft import PeftModel, PveraConfig, get_peft_model
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.relu = nn.ReLU()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.lin1 = nn.Linear(20, 20, bias=bias) # lin1 and lin2 have same shape
self.lin2 = nn.Linear(20, 20, bias=bias)
self.lin3 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
X = self.relu(X)
X = self.lin2(X)
X = self.relu(X)
X = self.lin3(X)
X = self.sm(X)
return X
class TestPVeRA:
@pytest.fixture
def mlp(self):
torch.manual_seed(0)
model = MLP()
return model
@pytest.fixture
def mlp_same_prng(self, mlp):
torch.manual_seed(0)
config = PveraConfig(target_modules=["lin1", "lin2"], init_weights=False)
# creates a default PVeRA adapter
peft_model = get_peft_model(mlp, config)
config2 = PveraConfig(target_modules=["lin1", "lin2"], init_weights=False)
peft_model.add_adapter("other", config2)
return peft_model
def test_multiple_adapters_same_prng_weights(self, mlp_same_prng):
# we can have multiple adapters with the same prng key, in which case the weights should be shared
assert (
mlp_same_prng.base_model.model.lin1.pvera_A["default"]
is mlp_same_prng.base_model.model.lin1.pvera_A["other"]
)
assert (
mlp_same_prng.base_model.model.lin1.pvera_B["default"]
is mlp_same_prng.base_model.model.lin1.pvera_B["other"]
)
assert (
mlp_same_prng.base_model.model.lin2.pvera_A["default"]
is mlp_same_prng.base_model.model.lin2.pvera_A["other"]
)
assert (
mlp_same_prng.base_model.model.lin2.pvera_B["default"]
is mlp_same_prng.base_model.model.lin2.pvera_B["other"]
)
input = torch.randn(5, 10)
mlp_same_prng.set_adapter("default")
output_default = mlp_same_prng(input)
mlp_same_prng.set_adapter("other")
output_other = mlp_same_prng(input)
assert not torch.allclose(output_default, output_other, atol=1e-3, rtol=1e-3)
def test_multiple_adapters_different_prng_raises(self):
# we cannot have multiple adapters with different prng keys
model = MLP()
config = PveraConfig(target_modules=["lin1", "lin2"], init_weights=False)
# creates a default PVeRA adapter
peft_model = get_peft_model(model, config)
config2 = PveraConfig(target_modules=["lin1", "lin2"], init_weights=False, projection_prng_key=123)
msg = (
r"PVeRA PRNG initialisation key must be the same for all adapters. Got config.projection_prng_key=123 but "
r"previous config had 0"
)
with pytest.raises(ValueError, match=msg):
peft_model.add_adapter("other", config2)
def test_multiple_adapters_save_load_save_projection_false(self, mlp, tmp_path):
# check saving and loading works with multiple adapters without saved projection weights
torch.manual_seed(1)
config = PveraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
# creates a default PVeRA adapter
peft_model = get_peft_model(mlp, config, adapter_name="first")
config2 = PveraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
peft_model.add_adapter("second", config2)
peft_model.eval()
input = torch.randn(5, 10)
peft_model.set_adapter("first")
output_first = peft_model(input)
peft_model.set_adapter("second")
output_second = peft_model(input)
# sanity check
assert not torch.allclose(output_first, output_second, atol=1e-3, rtol=1e-3)
save_path = tmp_path / "pvera"
peft_model.save_pretrained(save_path)
assert os.path.exists(save_path / "first" / "adapter_config.json")
assert os.path.exists(save_path / "second" / "adapter_config.json")
torch.manual_seed(0)
mlp = MLP()
peft_model = PeftModel.from_pretrained(mlp, save_path / "first", adapter_name="first")
peft_model.load_adapter(save_path / "second", "second")
peft_model.eval()
peft_model.set_adapter("first")
output_first_loaded = peft_model(input)
peft_model.set_adapter("second")
output_second_loaded = peft_model(input)
assert torch.allclose(output_first, output_first_loaded, atol=1e-3, rtol=1e-3)
assert torch.allclose(output_second, output_second_loaded, atol=1e-3, rtol=1e-3)
def test_multiple_adapters_save_projection_false_contains_no_pvera_A_pvera_B(self, mlp, tmp_path):
torch.manual_seed(1)
config = PveraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
# creates a default PVeRA adapter
peft_model = get_peft_model(mlp, config, adapter_name="first")
config2 = PveraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
peft_model.add_adapter("second", config2)
save_path = tmp_path / "pvera"
peft_model.save_pretrained(save_path)
sd_default = {}
with safe_open(save_path / "first" / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_default[key] = f.get_tensor(key)
assert not any("pvera_A" in key for key in sd_default)
assert not any("pvera_B" in key for key in sd_default)
sd_other = {}
with safe_open(save_path / "second" / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_other[key] = f.get_tensor(key)
assert not any("pvera_A" in key for key in sd_other)
assert not any("pvera_B" in key for key in sd_other)
def test_pvera_A_pvera_B_share_memory(self, mlp_same_prng):
pvera_A = mlp_same_prng.pvera_A["default"]
pvera_B = mlp_same_prng.pvera_B["default"]
# these tensors should share the same data
assert pvera_A.data_ptr() == mlp_same_prng.base_model.model.lin1.pvera_A["default"].data_ptr()
assert pvera_B.data_ptr() == mlp_same_prng.base_model.model.lin1.pvera_B["default"].data_ptr()
assert pvera_A.data_ptr() == mlp_same_prng.base_model.model.lin2.pvera_A["default"].data_ptr()
assert pvera_B.data_ptr() == mlp_same_prng.base_model.model.lin2.pvera_B["default"].data_ptr()
# sanity check: these tensors shouldn't share the same data
assert pvera_A.data_ptr() != pvera_B.data_ptr()
def test_pvera_lambda_dont_share_memory(self, mlp_same_prng):
# sanity check: these tensors shouldn't share the same data
assert (
mlp_same_prng.base_model.model.lin1.pvera_lambda_b["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin1.pvera_lambda_b["other"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.pvera_lambda_b["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.pvera_lambda_b["default"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.pvera_lambda_b["other"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.pvera_lambda_b["other"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.pvera_lambda_d["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin1.pvera_lambda_d["other"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.pvera_lambda_d["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.pvera_lambda_d["default"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.pvera_lambda_d["other"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.pvera_lambda_d["other"].data_ptr()
)
def test_pvera_different_shapes(self, mlp):
config = PveraConfig(target_modules=["lin0", "lin3"], init_weights=False)
mlp_different_shapes = get_peft_model(mlp, config)
pvera_A = mlp_different_shapes.pvera_A["default"]
pvera_B = mlp_different_shapes.pvera_B["default"]
# sanity check
assert mlp.lin0.base_layer.weight.shape != mlp.lin3.base_layer.weight.shape
# lin0 has the largest output dimension, lin3 has the largest input dimension
# pvera_A should have the shape of (rank*2, largest_in), pvera_B should have the shape of (largest_out, rank)
assert pvera_A.shape == (config.r * 2, mlp.lin3.in_features)
assert pvera_B.shape == (mlp.lin0.out_features, config.r)
# should not raise
input = torch.randn(5, 10)
mlp_different_shapes(input)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16])
def test_pvera_dtypes(self, dtype):
if dtype == torch.bfloat16:
# skip if bf16 is not supported on hardware, see #1872
if not is_bf16_available():
pytest.skip("bfloat16 not supported on this system, skipping the test")
model = MLP().to(dtype)
config = PveraConfig(target_modules=["lin1", "lin2"], init_weights=False)
peft_model = get_peft_model(model, config)
inputs = torch.randn(5, 10).to(dtype)
output = peft_model(inputs) # should not raise
assert output.dtype == dtype
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_pvera.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:tests/training/adapters.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to test FSDP adapter operations (disable_adapters, set_adapter, etc.) in a distributed environment.
This script is designed to be run with `accelerate launch` to properly test FSDP behavior while running one pass with
autograd and another with adapters being disabled.
Usage:
accelerate launch --config_file tests/training/fsdp_config.yaml tests/training/adapters.py
"""
import argparse
import tempfile
import torch
from accelerate import PartialState
from datasets import load_dataset
from torch import nn
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import LoraConfig, get_peft_model
def get_base_model_weights(peft_model):
"""Extract base model weights (non-LoRA weights)."""
base_weights = {}
for name, param in peft_model.named_parameters():
if "lora" not in name.lower() and "modules_to_save" not in name:
base_weights[name] = param.detach().clone()
return base_weights
def get_adapter_weights(peft_model, adapter_name):
"""Extract weights for a specific adapter."""
adapter_weights = {}
for name, param in peft_model.named_parameters():
if adapter_name in name:
adapter_weights[name] = param.detach().clone()
return adapter_weights
def verify_weights_unchanged(initial_weights, final_weights, weight_type):
"""Verify that weights have not changed during training."""
for name in initial_weights:
if name not in final_weights:
raise AssertionError(f"{weight_type} weight missing after training: {name}")
torch.testing.assert_close(
initial_weights[name].to(device=final_weights[name].device, dtype=final_weights[name].dtype),
final_weights[name],
)
class Model(nn.Module):
def __init__(self, model_id):
super().__init__()
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
)
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
peft_config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
modules_to_save=["lm_head"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
self.peft_model = get_peft_model(model, peft_config)
# Second adapter config (will remain disabled/unused throughout training)
peft_config_second = LoraConfig(
r=8,
lora_alpha=16,
target_modules=["q_proj", "v_proj"],
modules_to_save=["lm_head"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
self.peft_model.add_adapter("second_adapter", peft_config_second)
self.peft_model.set_adapter("default")
self.peft_model.to(torch.bfloat16)
self.peft_model.set_requires_grad("default", requires_grad=True)
self.peft_model.set_requires_grad("second_adapter", requires_grad=False)
def forward(self, input_ids=None, attention_mask=None, labels=None):
out1 = self.peft_model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
with self.peft_model.disable_adapter():
out2 = self.peft_model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
combined_loss = out1.loss + out2.loss
return (combined_loss,)
def test_training(model_id: str):
state = PartialState()
torch.manual_seed(42)
model = Model(model_id)
initial_base_weights = get_base_model_weights(model.peft_model)
initial_second_adapter_weights = get_adapter_weights(model.peft_model, "second_adapter")
if state.is_main_process:
print(f"Number of base model weight tensors: {len(initial_base_weights)}")
print(f"Number of second_adapter weight tensors: {len(initial_second_adapter_weights)}")
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: model.tokenizer(samples["quote"]), batched=True)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = Trainer(
model=model,
train_dataset=data["train"],
optimizer_cls_and_kwargs=(torch.optim.SGD, {"lr": 2e-4}),
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=5,
learning_rate=2e-4,
bf16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(model.tokenizer, mlm=False),
)
trainer.train()
with FSDP.summon_full_params(trainer.model):
final_base_weights = get_base_model_weights(model.peft_model)
final_second_adapter_weights = get_adapter_weights(model.peft_model, "second_adapter")
# Test to make sure that through this FSDP setup the base weights remain unchanged
# (i.e. adapter training doesn't somehow influence the base weights)
verify_weights_unchanged(initial_base_weights, final_base_weights, "Base model")
verify_weights_unchanged(initial_second_adapter_weights, final_second_adapter_weights, "second_adapter")
def main(model_id: str):
test_training(model_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_id", type=str, required=False, default="Qwen/Qwen3-0.6B")
args = parser.parse_args()
main(model_id=args.model_id)
| {
"repo_id": "huggingface/peft",
"file_path": "tests/training/adapters.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:src/peft/tuners/lora/intruders.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .layer import LoraLayer
def reduce_intruder_dimension(
peft_model,
old_adapter_name="default",
new_adapter_name="intruder_reduced",
top_k=10,
threshold_epsilon=0.5,
mitigation_lambda=0.75,
logging_sink=print,
):
"""
Intruder dimension mitigation based on https://huggingface.co/papers/2410.21228 ("LoRA vs Full Fine-tuning: An
Illusion of Equivalence").
This method can recover previous knowledge (i.e. mitigate forgetting) by post-processing already trained low-rank
adapters. This comes at a cost of task accuracy - tuning the `migration_lambda` value can be used to trade between
these two factors.
After mitigation is done there will be a new adapter with the name set in `new_adapter_name` which is also set to
be the currently active adapter. Inference on the mitigated model will therefore use the modified adapter. To
switch back to the original adapter you can use `peft_model.set_adapter(<old_adapter_name>)`.
Currently only LoRA is supported as it is not clear whether this method generalizes to other delta-weight methods.
Parameters:
peft_model:
The PEFT model with a loaded LoRA adapter with the name provided in `old_adapter_name`. Currently mixed
models are not supported.
top_k (default: 10)
Consider the top-k dimensions for intruder detection. The larger the value, the more dimensions will be
considered for intruder detection analysis (and the more false-postiives there can be). Operates on the
cosine similarity between base weights and adapter weights roughly sorted by influence of dimension
(determined by singular value decomposition), so a top-k of 10 will look at the 10 most 'important'
dimensions.
threshold_epsilon (default: 0.5)
Threshold value when to consider a cosine similarity between base weight and adapter weight as intruder.
According to the paper, intruder dimensions show near-zero absolute cosine similarity with pre-trained
singular vectors. The lower this value, the less potential intruder dimensions are identified. The higher
the value, the more potential false-positives are considered as intruders.
mitigation_lambda (default: 0.75)
The relative portion of the intruder dimensions that is subtracted from the adapter's delta weight. The
higher the value the more of the intruder dimension is subtracted but the more information is lost. Refer
to Figure 8 in the paper for a trade-off analysis.
logging_sink (default: print)
Function that prints information about the mitigation process. Set to None if you don't want any output.
"""
# Note that this function currently doesn't support `compile_kwargs` similar to the LoRA conversion tooling
# since there's was no clear way how `torch.compile` can be used to improve performance at the time of
# implementation. See discussion: https://github.com/huggingface/peft/pull/2999#discussion_r2717989613
def no_logging_sink(*args, **kwargs):
pass
if logging_sink is None:
logging_sink = no_logging_sink
if peft_model.peft_type != "LORA":
raise ValueError("The provided model is not using LoRA and is therefore not supported.")
peft_model.add_adapter(new_adapter_name, peft_model.peft_config[old_adapter_name])
# apply mitigation on the old adapter's weights and move them to the new adapter's weights
for layer_name, layer in peft_model.named_modules():
if not isinstance(layer, LoraLayer):
continue
W = layer.get_base_layer().weight.data
dW = layer.get_delta_weight(old_adapter_name)
W_merged = W + dW
is_embedding = old_adapter_name not in layer.lora_B
cast_to_fp32 = W.dtype in (torch.float16, torch.bfloat16)
if cast_to_fp32:
W_dtype = W.dtype
W = W.float()
# compare base weights and adapter weights using cosine similarity.
# based on this similarity we can find intruder dimensions using threshold_epsilon
# on the top_k dimensions
U_base, _S_base, _V_base = torch.linalg.svd(W, full_matrices=False)
U_merged, S_merged, V_merged = torch.linalg.svd(W_merged, full_matrices=False)
cos_sim = (U_merged.T @ U_base).abs().max(dim=1).values
intruder_idcs = torch.where(cos_sim[:top_k] < threshold_epsilon)[0].tolist()
if not intruder_idcs:
logging_sink(f"{layer_name}: No intruders")
# we're not modifying the weights since there are no intruders but we make sure to copy the
# adapter weights unmodified to the new adapter, otherwise these weights will be
# initialized randomly
if is_embedding:
layer.lora_embedding_B[new_adapter_name].data = layer.lora_embedding_B[old_adapter_name].data.clone()
layer.lora_embedding_A[new_adapter_name].data = layer.lora_embedding_A[old_adapter_name].data.clone()
else:
layer.lora_B[new_adapter_name].weight.data = layer.lora_B[old_adapter_name].weight.data.clone()
layer.lora_A[new_adapter_name].weight.data = layer.lora_A[old_adapter_name].weight.data.clone()
continue
else:
logging_sink(f"{layer_name}: Intruders: {len(intruder_idcs)}")
# the paper computes the intruder dimensions that are subtracted on (W + dW), so we do the same. experiments
# showed that this achieves better knowledge recovery than on dW alone.
B_intruder = U_merged[:, intruder_idcs] @ torch.diag(S_merged)[intruder_idcs, :].sqrt()
A_intruder = (torch.diag(S_merged)[:, intruder_idcs]).sqrt() @ V_merged[intruder_idcs, :]
# apply mitigation and recover dW = (B@A).
# (W+dW+mitigation)-W = dW+mitigation, so we can convert dW back to A/B using SVD
# since we know the effective rank from the adapter config.
#
# note that we also remove the scaling from dW which is applied in get_delta_weight() since
# it impacts mitigation performance both in task accuracy and forgetting.
W_mitigated = W_merged + (mitigation_lambda - 1) * (B_intruder @ A_intruder)
dW_mitigated = W_mitigated - W
dW_mitigated /= layer.scaling[old_adapter_name]
U_dW, S_dW, V_dW = torch.linalg.svd(dW_mitigated, full_matrices=False)
# Note: share scaling by S equally between B and A to avoid one matrix having a significantly
# different norm and avoid possibly weird training dynamics.
effective_rank = layer.r[old_adapter_name]
B_new = U_dW[:, :effective_rank] @ torch.diag(S_dW[:effective_rank]).sqrt()
A_new = torch.diag(S_dW[:effective_rank]).sqrt() @ V_dW[:effective_rank]
if is_embedding:
layer.lora_embedding_B[new_adapter_name].data = B_new
layer.lora_embedding_A[new_adapter_name].data = A_new
else:
layer.lora_B[new_adapter_name].weight.data = B_new
layer.lora_A[new_adapter_name].weight.data = A_new
# cast W back from float32 to whatever it was before to save memory in the long run
if cast_to_fp32:
W = W.to(W_dtype)
logging_sink(f"Enabling new adapter {new_adapter_name}")
peft_model.set_adapter(new_adapter_name)
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/lora/intruders.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:tests/test_lora_intruders.py | from functools import partial
from io import StringIO
import pytest
import torch
from transformers import AutoModelForCausalLM
from peft import LoraConfig, MissConfig, get_peft_model
from peft.tuners.lora.intruders import reduce_intruder_dimension
from .testing_utils import hub_online_once
class TestLoraIntruders:
@pytest.fixture
def model_lin(self):
model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM"
with hub_online_once(model_id):
base_model = AutoModelForCausalLM.from_pretrained(model_id)
cfg = LoraConfig(target_modules=["q_proj"])
peft_model = get_peft_model(base_model, cfg)
return peft_model
@pytest.fixture
def model_emb(self):
model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM"
with hub_online_once(model_id):
base_model = AutoModelForCausalLM.from_pretrained(model_id)
cfg = LoraConfig(target_modules=["embed_tokens"])
peft_model = get_peft_model(base_model, cfg)
return peft_model
@pytest.fixture
def model_lin_non_lora(self):
model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM"
with hub_online_once(model_id):
base_model = AutoModelForCausalLM.from_pretrained(model_id)
cfg = MissConfig(target_modules=["q_proj"])
peft_model = get_peft_model(base_model, cfg)
return peft_model
def test_lora_intruders_linear(self, model_lin):
original_weights = {}
for name, module in model_lin.named_modules():
if "q_proj" in name and hasattr(module, "lora_B"):
original_weights[name] = module.lora_B["default"].weight.detach().clone()
buffer = StringIO()
# use a high epsilon to make sure that we get a match to see whether layers get modified
reduce_intruder_dimension(model_lin, threshold_epsilon=999, logging_sink=partial(print, file=buffer))
# the old adapter should not be active anymore, just the new one. but the old one should still exist.
assert model_lin.active_adapters == ["intruder_reduced"]
assert set(model_lin.peft_config.keys()) == {"default", "intruder_reduced"}
buffer.seek(0)
lines = buffer.readlines()
assert len(lines) > 0
assert any("q_proj" in line for line in lines)
for name, module in model_lin.named_modules():
if name in original_weights:
# Make sure that the original adapter was not modified
assert torch.equal(module.lora_B["default"].weight.detach(), original_weights[name])
# Since the epsilon is really low, we should modify every layer so the weights should differ
new_weight = module.lora_B["intruder_reduced"].weight.detach()
assert not torch.equal(new_weight, original_weights[name])
def test_lora_intruders_embedding(self, model_emb):
original_weights = {}
for name, module in model_emb.named_modules():
if "embed_tokens" in name and hasattr(module, "lora_B"):
original_weights[name] = module.lora_embedding_B["default"].detach().clone()
buffer = StringIO()
# use a high epsilon to make sure that we get a match to see whether layers get modified
reduce_intruder_dimension(model_emb, threshold_epsilon=999, logging_sink=partial(print, file=buffer))
# the old adapter should not be active anymore, just the new one. but the old one should still exist.
assert model_emb.active_adapters == ["intruder_reduced"]
assert set(model_emb.peft_config.keys()) == {"default", "intruder_reduced"}
buffer.seek(0)
lines = buffer.readlines()
assert len(lines) > 0
assert any("embed_tokens" in line for line in lines)
for name, module in model_emb.named_modules():
if name in original_weights:
# Make sure that the original adapter was not modified
assert torch.equal(module.lora_embedding_B["default"].detach(), original_weights[name])
# Since the epsilon is really low, we should modify every layer so the weights should differ
new_weight = module.lora_embedding_B["intruder_reduced"].detach()
assert not torch.equal(new_weight, original_weights[name])
def test_non_lora_intruders_linear_raises(self, model_lin_non_lora):
with pytest.raises(ValueError) as e:
reduce_intruder_dimension(model_lin_non_lora, threshold_epsilon=999)
assert "The provided model is not using LoRA" in str(e)
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_lora_intruders.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:tests/training/training.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a simple example of training a model with QLoRA.
"""
import argparse
import os
import tempfile
from typing import Literal
import torch
from accelerate import PartialState
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import LoraConfig, get_peft_model
def print_if_process_zero(*args, **kwargs):
PartialState().print(*args, **kwargs)
def main(model_id: str, quant: Literal["4bit", "8bit"] | None, target_modules: list[str] | None):
if target_modules == ["all-linear"]:
target_modules = "all-linear"
print_if_process_zero("=" * 50)
print_if_process_zero(f"{model_id=}, {quant=}, {target_modules=}")
print_if_process_zero("=" * 50)
data = load_dataset("ybelkada/english_quotes_copy")
is_fsdp = "FSDP_VERSION" in os.environ
if quant == "4bit":
quant_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_type="bfloat16",
bnb_4bit_quant_storage="bfloat16",
bnb_4bit_use_double_quant=True,
)
elif quant == "8bit":
if is_fsdp:
raise ValueError("QLoRA with 8bit bnb is not supported for FSDP.")
quant_config = BitsAndBytesConfig(load_in_8bit=True)
elif quant is None:
quant_config = None
else:
raise ValueError(f"Unsupported quantization: {quant}, expected one of '4bit', '8bit', or None")
tokenizer = AutoTokenizer.from_pretrained(model_id)
if not tokenizer.pad_token:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_id, quantization_config=quant_config, dtype=torch.bfloat16, device_map={"": PartialState().process_index}
)
peft_config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=target_modules,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, peft_config)
print_if_process_zero(model)
if PartialState().is_local_main_process:
model.print_trainable_parameters()
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = Trainer(
model=model,
train_dataset=data["train"],
optimizer_cls_and_kwargs=(torch.optim.SGD, {"lr": 2e-4}),
# FSDP with AdamW:
# > RuntimeError: output with shape [] doesn't match the broadcast shape [1]
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=15,
learning_rate=2e-4,
bf16=True,
logging_steps=5,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
trainer.train()
if trainer.is_fsdp_enabled:
trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT")
trainer.save_model(tmp_dir)
# some checks
if PartialState().is_local_main_process:
files = os.listdir(tmp_dir)
assert "adapter_model.safetensors" in files
assert "adapter_config.json" in files
final_log = trainer.state.log_history[-1]
assert final_log["train_loss"] < 10.0, f"Final loss is too high: {final_log['loss']}"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_id", type=str, required=False, default="Qwen/Qwen3-0.6B")
parser.add_argument("--quant", type=str, choices=["4bit", "8bit"], required=False, default=None)
parser.add_argument(
"--target_modules",
type=str,
nargs="+",
required=False,
default=None,
help="List of target modules for LoRA adaptation",
)
args = parser.parse_args()
main(model_id=args.model_id, quant=args.quant, target_modules=args.target_modules)
| {
"repo_id": "huggingface/peft",
"file_path": "tests/training/training.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:examples/lora_ga_finetuning/lora_ga_finetuning.py | #!/usr/bin/env python3
"""
Example script demonstrating LoRA-GA (Low-Rank Adaptation with Gradient Approximation) fine-tuning.
LoRA-GA improves upon standard LoRA by using gradient information during initialization,
achieving 2-4x faster convergence while maintaining the same final performance.
This example shows:
1. How to define a train_step callback for gradient estimation
2. How to use preprocess_loraga for LoRA-GA initialization
3. Training with standard Hugging Face Trainer
4. Saving the trained adapter
"""
import argparse
import os
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
default_data_collator,
)
from peft import LoraConfig, get_peft_model
from peft.tuners.lora import LoraGAConfig, preprocess_loraga
def parse_args():
parser = argparse.ArgumentParser(description="LoRA-GA fine-tuning example")
# Model arguments
parser.add_argument("--base_model", type=str, default="gpt2", help="Base model name or path")
parser.add_argument("--output_dir", type=str, default="./lora_ga_output", help="Output directory")
# Dataset arguments
parser.add_argument("--dataset_name", type=str, default="wikitext", help="Dataset name")
parser.add_argument("--dataset_config", type=str, default="wikitext-2-raw-v1", help="Dataset configuration")
parser.add_argument("--max_length", type=int, default=512, help="Maximum sequence length")
# LoRA-GA configuration
parser.add_argument("--r", type=int, default=8, help="LoRA rank")
parser.add_argument("--lora_alpha", type=int, default=16, help="LoRA alpha")
parser.add_argument("--lora_dropout", type=float, default=0.1, help="LoRA dropout")
parser.add_argument(
"--target_modules",
type=str,
nargs="+",
default=["c_attn"],
help="Target modules for LoRA (e.g., c_attn for GPT-2)",
)
parser.add_argument(
"--direction",
type=str,
default="ArB2r",
choices=["ArBr", "A2rBr", "ArB2r", "random"],
help="Direction strategy for LoRA-GA initialization",
)
parser.add_argument(
"--scale",
type=str,
default="stable",
choices=["stable", "weight_svd", "gd_scale", "unit"],
help="Scaling strategy for LoRA-GA initialization",
)
parser.add_argument("--stable_gamma", type=int, default=16, help="Gamma for stable scaling")
# Gradient estimation arguments
parser.add_argument(
"--grad_estimate_iters", type=int, default=64, help="Number of iterations for gradient estimation"
)
parser.add_argument("--grad_estimate_batch_size", type=int, default=2, help="Batch size for gradient estimation")
# Training arguments
parser.add_argument("--num_epochs", type=int, default=3, help="Number of training epochs")
parser.add_argument("--batch_size", type=int, default=8, help="Training batch size")
parser.add_argument("--learning_rate", type=float, default=3e-5, help="Learning rate")
parser.add_argument("--warmup_steps", type=int, default=100, help="Warmup steps")
parser.add_argument("--logging_steps", type=int, default=10, help="Logging steps")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint steps")
parser.add_argument("--eval_steps", type=int, default=500, help="Evaluation steps")
# Other arguments
parser.add_argument("--seed", type=int, default=42, help="Random seed")
return parser.parse_args()
def prepare_dataset(dataset_name, dataset_config, tokenizer, max_length):
"""Load and prepare the dataset."""
print(f"\nLoading dataset: {dataset_name}/{dataset_config}")
dataset = load_dataset(dataset_name, dataset_config)
def tokenize_function(examples):
# For causal language modeling, we tokenize and set labels = input_ids
result = tokenizer(
examples["text"], padding="max_length", truncation=True, max_length=max_length, return_tensors="pt"
)
result["labels"] = result["input_ids"].clone()
return result
# Tokenize the dataset
print("Tokenizing dataset...")
tokenized_datasets = dataset.map(
tokenize_function, batched=True, remove_columns=dataset["train"].column_names, desc="Tokenizing"
)
return tokenized_datasets
def main():
args = parse_args()
# Set random seed
torch.manual_seed(args.seed)
# Create output directory
os.makedirs(args.output_dir, exist_ok=True)
# Load tokenizer and model
print(f"\nLoading model: {args.base_model}")
tokenizer = AutoTokenizer.from_pretrained(args.base_model)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(args.base_model)
# Prepare dataset
tokenized_datasets = prepare_dataset(args.dataset_name, args.dataset_config, tokenizer, args.max_length)
# Create LoRA-GA configuration
print("\nCreating LoRA-GA configuration...")
lora_ga_config = LoraGAConfig(
direction=args.direction,
scale=args.scale,
stable_gamma=args.stable_gamma,
)
lora_config = LoraConfig(
r=args.r,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
target_modules=args.target_modules,
bias="none",
task_type="CAUSAL_LM",
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
print(f" Direction: {args.direction}")
print(f" Scale: {args.scale}")
print(f" Rank: {args.r}, Alpha: {args.lora_alpha}")
# ===== GRADIENT ESTIMATION PHASE =====
print("\n" + "=" * 70)
print("GRADIENT ESTIMATION PHASE")
print("=" * 70)
print(f"Estimating gradients over {args.grad_estimate_iters} iterations...")
print("This allows LoRA-GA to initialize adapters aligned with full fine-tuning.")
# Prepare gradient estimation dataloader
train_dataset = tokenized_datasets["train"]
# Create a simple DataLoader for gradient estimation
grad_dataloader = DataLoader(
train_dataset,
batch_size=args.grad_estimate_batch_size,
shuffle=True,
collate_fn=default_data_collator,
)
# Move model to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.train()
# Define train_step callback
def train_step():
"""Run forward and backward passes for gradient estimation."""
grad_iter = iter(grad_dataloader)
for _ in range(args.grad_estimate_iters):
batch = next(grad_iter)
# Move batch to device
batch = {k: v.to(device) for k, v in batch.items()}
# Forward pass
outputs = model(**batch)
loss = outputs.loss
# Backward pass
loss.backward()
# Preprocess with LoRA-GA
print("Running gradient estimation...")
preprocess_loraga(model, lora_config, train_step)
print("✓ Gradient estimation complete!")
# ===== MODEL INITIALIZATION PHASE =====
print("\n" + "=" * 70)
print("LORA-GA INITIALIZATION PHASE")
print("=" * 70)
print("Initializing LoRA adapters with gradient information...")
# Create PEFT model with LoRA-GA initialization
peft_model = get_peft_model(model, lora_config)
# Print trainable parameters
peft_model.print_trainable_parameters()
# ===== TRAINING PHASE =====
print("\n" + "=" * 70)
print("TRAINING PHASE")
print("=" * 70)
print("Starting training with LoRA-GA initialized adapters...")
print("LoRA-GA achieves 2-4x faster convergence compared to random initialization!\n")
# Setup training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
num_train_epochs=args.num_epochs,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=args.batch_size,
learning_rate=args.learning_rate,
warmup_steps=args.warmup_steps,
logging_steps=args.logging_steps,
save_steps=args.save_steps,
eval_steps=args.eval_steps,
eval_strategy="steps",
save_total_limit=2,
load_best_model_at_end=True,
report_to="none",
seed=args.seed,
)
# Data collator
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# Create Trainer
trainer = Trainer(
model=peft_model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
)
# Train the model
trainer.train()
# ===== SAVING PHASE =====
print("\n" + "=" * 70)
print("SAVING PHASE")
print("=" * 70)
print("Saving trained adapter...")
# Save the trained adapter
peft_model.save_pretrained(args.output_dir)
print(f"\n✓ Training complete! Model saved to: {args.output_dir}")
print("\nSaved files:")
print(" - adapter_model.safetensors: Trained adapter weights")
print(" - adapter_config.json: Configuration file")
print("\n" + "=" * 70)
print("DONE!")
print("=" * 70)
print("\nYou can now use the trained adapter with:")
print(" from peft import PeftModel")
print(" from transformers import AutoModelForCausalLM")
print(f" model = AutoModelForCausalLM.from_pretrained('{args.base_model}')")
print(f" model = PeftModel.from_pretrained(model, '{args.output_dir}')")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/peft",
"file_path": "examples/lora_ga_finetuning/lora_ga_finetuning.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/peft:src/peft/tuners/lora/loraga.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference code: https://github.com/Outsider565/LoRA-GA
# Reference paper: https://arxiv.org/abs/2407.05000
import os
from collections.abc import Callable
from typing import Any, Optional
import torch
import torch.nn as nn
from transformers.pytorch_utils import Conv1D
from peft.tuners.lora.config import LoraConfig
from peft.tuners.lora.model import LoraModel
def get_target_modules(model: nn.Module, config: LoraConfig):
"""
Iterate over LoRA-GA target name and modules of a model. A module is a target if its name is in
`config.target_modules` and is `nn.Linear` or `Conv1D`.
"""
for name, module in model.named_modules():
if LoraModel._check_target_module_exists(config, name) and isinstance(module, (nn.Linear, Conv1D)):
yield name, module
def get_model_device(model: nn.Module) -> str:
if hasattr(model, "module"): # Handle DeepSpeed/DataParallel
model = model.module
return next(iter(model.parameters())).device
@torch.no_grad()
def preprocess_loraga(
model: nn.Module,
lora_config: LoraConfig,
train_step: Callable[[], None],
cache_file: Optional[str] = None,
):
"""
Build necessary LoRA-GA fields for a model by estimating gradients.
For each linear layer, gradients will be estimated by running the provided train_step callback. These gradients are
then attached to the modules and used during initialization.
Args:
model (`nn.Module`):
Model to preprocess.
lora_config (`LoraConfig`):
Lora configuration of the model. `lora_config.lora_ga_config` should be set.
train_step (`Callable[[], None]`):
Callback to run gradient estimation. Typically you should run model forward and backward passes in this
callback. The gradients will be accumulated across all calls within this callback.
cache_file (`Optional[str]`):
Optional path to cache file for saving/loading gradients. If provided and the file exists, gradients will
be loaded from cache. Otherwise, gradients will be estimated and saved to this path.
Upon completion, the following fields are set for each target module:
_peft_loraga_grad (`torch.Tensor`):
Accumulated gradient for the weight matrix.
"""
if lora_config.lora_ga_config is None:
raise ValueError(
"If you want to use LoRA-GA, please initialize the LoraConfig with "
"init_lora_weights='lora_ga' and lora_ga_config=LoraGAConfig(...)."
)
# Populate target_modules from defaults if empty
# This logic mirrors BaseTuner._prepare_adapter_config which runs after get_peft_model.
# Since preprocess_loraga is called before get_peft_model, we need to handle this ourselves.
if lora_config.target_modules is None:
model_config = LoraModel.get_model_config(model)
target_modules = LoraModel.target_module_mapping.get(model_config["model_type"])
if target_modules is None:
raise ValueError("Please specify `target_modules` in `peft_config`")
lora_config.target_modules = set(target_modules)
# Check for quantized models - LoRA-GA requires full-precision gradients
for name, module in get_target_modules(model, lora_config):
if hasattr(module, "quant_state"):
raise ValueError(
f"LoRA-GA does not support quantized models. Found quantized module: '{name}'. "
"LoRA-GA requires full-precision gradients during preprocessing."
)
# If cache exists, load from cache
if cache_file is not None and os.path.exists(cache_file) and os.path.getsize(cache_file) > 0:
cache = torch.load(cache_file, map_location=get_model_device(model))
for name, module in get_target_modules(model, lora_config):
module._peft_loraga_grad = cache[f"{name}._peft_loraga_grad"]
else:
# Estimate gradients by running train_step
estimate_gradients(model, lora_config, train_step)
# Save cache to disk if specified
if cache_file is not None:
cache: dict[str, Any] = {}
for name, module in get_target_modules(model, lora_config):
cache[f"{name}._peft_loraga_grad"] = module._peft_loraga_grad
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
torch.save(cache, cache_file)
def estimate_gradients(
model: nn.Module,
lora_config: LoraConfig,
train_step: Callable[[], None],
):
"""
Estimate gradients for LoRA-GA initialization.
This function enables gradient computation ONLY on target module weights and runs the train_step callback. This is
more memory-efficient than enabling gradients globally.
"""
# Remember original training state
was_training = model.training
model.train()
# Get target modules list once for efficiency
target_module_list = list(get_target_modules(model, lora_config))
# Check if any supported layers were found
if not target_module_list:
raise ValueError(
"No supported layers found for LoRA-GA initialization. "
"LoRA-GA only supports nn.Linear and Conv1D layers. "
"Please ensure your model contains at least one of these layer types in target_modules."
)
# Initialize gradient storage and count for each target module
for name, module in target_module_list:
module._peft_loraga_grad_count = 0
# Memory-efficient gradient computation: disable gradients for all parameters first,
# then enable only for target module weights
original_requires_grad = {}
for name, param in model.named_parameters():
original_requires_grad[name] = param.requires_grad
param.requires_grad = False
# Enable gradients ONLY for target module weights
for name, module in target_module_list:
module.weight.requires_grad = True
# Register backward hooks to count gradient computations
hooks = []
def backward_hook(module, grad_input, grad_output):
module._peft_loraga_grad_count += 1
for name, module in target_module_list:
hook = module.register_full_backward_hook(backward_hook)
hooks.append(hook)
# Enable gradient computation and run train_step
with torch.enable_grad():
train_step()
# Remove hooks
for hook in hooks:
hook.remove()
# Restore original requires_grad state for all parameters
for name, param in model.named_parameters():
if name in original_requires_grad:
param.requires_grad = original_requires_grad[name]
# Average gradients and clean up temporary fields
for name, module in target_module_list:
if module._peft_loraga_grad_count > 0:
module._peft_loraga_grad = module.weight.grad.detach() / module._peft_loraga_grad_count
module.weight.grad = None
del module._peft_loraga_grad_count
# Restore original training state
if not was_training:
model.eval()
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/lora/loraga.py",
"license": "Apache License 2.0",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:tests/test_lora_ga.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from peft import LoraConfig, PeftModel, get_peft_model
from peft.tuners.lora import LoraGAConfig, preprocess_loraga
class TestLoraGAPreprocessing:
"""Test preprocess_loraga functionality."""
def test_preprocess_basic(self, simple_model, simple_train_step):
lora_ga_config = LoraGAConfig(direction="ArB2r", scale="stable")
lora_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["0"],
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
# Run preprocessing
preprocess_loraga(simple_model, lora_config, simple_train_step)
# Check that gradients were attached
assert hasattr(simple_model[0], "_peft_loraga_grad")
assert simple_model[0]._peft_loraga_grad.shape == simple_model[0].weight.shape
def test_preprocess_without_lora_ga_config_raises(self, simple_model):
def train_step():
pass
lora_config = LoraConfig(r=4, lora_alpha=8, target_modules=["0"])
with pytest.raises(ValueError, match="If you want to use LoRA-GA"):
preprocess_loraga(simple_model, lora_config, train_step)
def test_init_without_lora_ga_config_raises(self, simple_model, simple_train_step):
# Properly preprocess with lora_ga_config
lora_ga_config = LoraGAConfig(direction="ArB2r", scale="stable")
lora_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["0"],
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
preprocess_loraga(simple_model, lora_config, simple_train_step)
# Now try to create a config without lora_ga_config but with init_lora_weights="lora_ga"
bad_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["0"],
init_lora_weights="lora_ga",
lora_ga_config=None, # Missing lora_ga_config!
)
# This should raise an error during get_peft_model
with pytest.raises(ValueError, match="lora_ga_config must be provided"):
get_peft_model(simple_model, bad_config)
@pytest.fixture
def simple_model():
"""Fixture providing a fresh simple sequential model for each test."""
model = torch.nn.Sequential(torch.nn.Linear(10, 10))
model.train()
return model
@pytest.fixture
def simple_train_step(simple_model):
"""Fixture providing a train step function for the model."""
def train_step():
for _ in range(4):
inputs = torch.randn(2, 10)
outputs = simple_model(inputs)
loss = outputs.sum()
loss.backward()
return train_step
class TestLoraGAIntegration:
"""Integration tests for LoRA-GA."""
@pytest.mark.parametrize("direction", ["ArBr", "A2rBr", "ArB2r", "random"])
@pytest.mark.parametrize("scale", ["stable", "weight_svd", "gd_scale", "unit"])
def test_save_load_inference(self, tmp_path, simple_model, simple_train_step, direction, scale):
"""Test that saved and loaded models produce the same output."""
torch.manual_seed(42)
lora_ga_config = LoraGAConfig(direction=direction, scale=scale)
lora_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["0"],
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
preprocess_loraga(simple_model, lora_config, simple_train_step)
peft_model = get_peft_model(simple_model, lora_config)
# Generate output before saving
test_input = torch.randn(2, 10)
with torch.no_grad():
output_before = peft_model(test_input)
# Save model
peft_model.save_pretrained(str(tmp_path))
# Load model - need to use the same base model that was modified by LoRA-GA
# Create a fresh model and load the saved state
loaded_model = PeftModel.from_pretrained(simple_model, str(tmp_path))
# Generate output after loading
with torch.no_grad():
output_after = loaded_model(test_input)
# Outputs should be identical
assert torch.allclose(output_before, output_after, atol=1e-5)
@pytest.mark.parametrize("scale", ["stable", "weight_svd", "gd_scale", "unit"])
@pytest.mark.parametrize("direction", ["ArBr", "A2rBr", "ArB2r", "random"])
def test_save_load_with_weight_conversion(self, tmp_path, simple_model, simple_train_step, direction, scale):
# Skip the random+weight_svd combination as it produces non-deterministic results
if direction == "random" and scale == "weight_svd":
pytest.skip("Skipping random+weight_svd combination due to non-deterministic behavior")
"""Test save/load with path_initial_model_for_weight_conversion."""
torch.manual_seed(42)
# Save RNG state for reproducing exact initialization later
rng_state = torch.get_rng_state()
# Save original base model weights (before LoRA-GA preprocessing)
original_weights = {k: v.clone() for k, v in simple_model.state_dict().items()}
lora_ga_config = LoraGAConfig(direction=direction, scale=scale)
lora_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["0"],
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
preprocess_loraga(simple_model, lora_config, simple_train_step)
peft_model = get_peft_model(simple_model, lora_config)
# Save the initialized adapter (before training)
init_adapter_path = tmp_path / "init_adapter"
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(str(init_adapter_path))
# Generate output before saving (simulating after training)
test_input = torch.randn(2, 10)
with torch.no_grad():
output_before = peft_model(test_input)
# Save with weight conversion
adapter_path = tmp_path / "adapter"
peft_model.save_pretrained(str(adapter_path), path_initial_model_for_weight_conversion=str(init_adapter_path))
# Load with original base model - need fresh model instance with same original weights
# Restore RNG state to ensure random operations (like randperm for direction="random") are reproducible
torch.set_rng_state(rng_state)
base_model = torch.nn.Sequential(torch.nn.Linear(10, 10))
base_model.train()
base_model.load_state_dict(original_weights)
# Load converted adapter
loaded_model = PeftModel.from_pretrained(base_model, str(adapter_path))
# Generate output after loading
with torch.no_grad():
output_after = loaded_model(test_input)
# Outputs should be identical
assert torch.allclose(output_before, output_after, atol=1e-5)
def test_cached_gradients(self, tmp_path):
"""Test that cached gradients produce the same results as fresh gradients."""
torch.manual_seed(42)
# First run: compute gradients and save to cache
model1 = torch.nn.Sequential(torch.nn.Linear(10, 10))
model1.train()
def train_step1():
for _ in range(4):
inputs = torch.randn(2, 10)
outputs = model1(inputs)
loss = outputs.sum()
model1.zero_grad()
loss.backward()
lora_ga_config = LoraGAConfig(direction="ArB2r", scale="stable")
lora_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["0"],
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
cache_file = tmp_path / "gradient_cache.pt"
preprocess_loraga(model1, lora_config, train_step1, cache_file=str(cache_file))
peft_model1 = get_peft_model(model1, lora_config)
# Check that cache file was created
assert cache_file.exists()
assert cache_file.stat().st_size > 0
# Generate output from first model
test_input = torch.randn(2, 10)
with torch.no_grad():
output1 = peft_model1(test_input)
# Second run: load gradients from cache
torch.manual_seed(42) # Reset seed to get same initial weights
model2 = torch.nn.Sequential(torch.nn.Linear(10, 10))
model2.train()
def train_step2():
for _ in range(4):
inputs = torch.randn(2, 10)
outputs = model2(inputs)
loss = outputs.sum()
model2.zero_grad()
loss.backward()
# Use same config and cache file - should load from cache without running train_step
preprocess_loraga(model2, lora_config, train_step2, cache_file=str(cache_file))
peft_model2 = get_peft_model(model2, lora_config)
# Generate output from second model
with torch.no_grad():
output2 = peft_model2(test_input)
# Outputs should be identical since both used the same cached gradients
assert torch.allclose(output1, output2, atol=1e-5)
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
def test_lower_precision_dtype(self, tmp_path, dtype):
"""Test LoRA-GA works with lower precision dtypes (fp16/bf16)."""
torch.manual_seed(42)
# Create model in lower precision
model = torch.nn.Sequential(torch.nn.Linear(10, 10))
model = model.to(dtype=dtype)
model.train()
def train_step():
for _ in range(4):
inputs = torch.randn(2, 10, dtype=dtype)
outputs = model(inputs)
loss = outputs.sum()
model.zero_grad()
loss.backward()
lora_ga_config = LoraGAConfig(direction="ArB2r", scale="stable")
lora_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["0"],
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
# Preprocess and create PEFT model with autocast_adapter_dtype=False
# to ensure LoRA adapters are also in lower precision
preprocess_loraga(model, lora_config, train_step)
peft_model = get_peft_model(model, lora_config, autocast_adapter_dtype=False)
# Verify adapter dtype matches model dtype
for name, module in peft_model.named_modules():
if hasattr(module, "lora_A"):
assert module.lora_A["default"].weight.dtype == dtype
assert module.lora_B["default"].weight.dtype == dtype
# Generate output before saving
test_input = torch.randn(2, 10, dtype=dtype)
with torch.no_grad():
output_before = peft_model(test_input)
# Save and load model
peft_model.save_pretrained(str(tmp_path))
loaded_model = PeftModel.from_pretrained(model, str(tmp_path))
# Generate output after loading
with torch.no_grad():
output_after = loaded_model(test_input)
# Outputs should be close - use looser tolerance for lower precision
assert torch.allclose(output_before, output_after, atol=1e-2)
def test_quantized_model_rejection(self):
"""Test that quantized models are properly rejected with clear error."""
class MockQuantizedLinear(torch.nn.Linear):
"""Mock quantized layer that simulates bitsandbytes quantized layers."""
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features)
# Simulate quantized layer by adding quant_state attribute
self.quant_state = "mock_quantized"
# Create model with quantized layer
model = torch.nn.Sequential(MockQuantizedLinear(10, 10))
model.train()
def train_step():
for _ in range(4):
inputs = torch.randn(2, 10)
outputs = model(inputs)
loss = outputs.sum()
model.zero_grad()
loss.backward()
lora_ga_config = LoraGAConfig(direction="ArB2r", scale="stable")
lora_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["0"],
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
# Should raise ValueError mentioning quantization
with pytest.raises(ValueError, match="quantized"):
preprocess_loraga(model, lora_config, train_step)
def test_unsupported_layer_types_no_error(self):
"""Test that unsupported layer types don't cause errors."""
class MixedModel(torch.nn.Module):
"""Model with both supported and unsupported layer types."""
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10) # Supported
self.conv2d = torch.nn.Conv2d(3, 16, 3) # Unsupported
self.embedding = torch.nn.Embedding(100, 10) # Unsupported
def forward(self, x):
return self.linear(x)
model = MixedModel()
model.train()
def train_step():
for _ in range(4):
inputs = torch.randn(2, 10)
outputs = model(inputs)
loss = outputs.sum()
model.zero_grad()
loss.backward()
lora_ga_config = LoraGAConfig(direction="ArB2r", scale="stable")
lora_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["linear", "conv2d", "embedding"], # Mix of supported and unsupported
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
# Should not raise error - unsupported layers are silently skipped
preprocess_loraga(model, lora_config, train_step)
# Verify that linear layer has LoRA-GA gradient attached during preprocessing
assert hasattr(model.linear, "_peft_loraga_grad")
# Unsupported layers won't have gradients attached
assert not hasattr(model.conv2d, "_peft_loraga_grad")
assert not hasattr(model.embedding, "_peft_loraga_grad")
# Now create PEFT model - should work without errors
peft_model = get_peft_model(model, lora_config)
# Verify model still works
test_input = torch.randn(2, 10)
with torch.no_grad():
output = peft_model(test_input)
assert output.shape == (2, 10)
def test_no_supported_layers_raises_error(self):
"""Test that having no supported layers raises clear error."""
class UnsupportedModel(torch.nn.Module):
"""Model with only unsupported layer types."""
def __init__(self):
super().__init__()
self.conv2d = torch.nn.Conv2d(3, 16, 3)
self.embedding = torch.nn.Embedding(100, 10)
def forward(self, x):
return x
model = UnsupportedModel()
model.train()
def train_step():
model.zero_grad()
lora_ga_config = LoraGAConfig(direction="ArB2r", scale="stable")
lora_config = LoraConfig(
r=4,
lora_alpha=8,
target_modules=["conv2d", "embedding"], # Only unsupported layers
init_lora_weights="lora_ga",
lora_ga_config=lora_ga_config,
)
# Should raise ValueError about no supported layers
with pytest.raises(ValueError, match="No supported layers found"):
preprocess_loraga(model, lora_config, train_step)
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_lora_ga.py",
"license": "Apache License 2.0",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:examples/orthogonal_subspace_learning/osf_continual_learning.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OSF Continual Learning Example
This script demonstrates OSF's ability to learn multiple tasks sequentially while preventing
catastrophic forgetting, compared to standard full fine-tuning.
Tasks:
1. ScienceQA - Science question answering
2. NumGLUE - Mathematical reasoning
3. FOMC - Financial sentiment classification
OSF Configuration:
- Task 1: effective_rank=0.3 (train 70%, freeze 30%)
- Task 2: effective_rank=0.5 (train 50%, freeze 50%)
- Task 3: effective_rank=0.7 (train 30%, freeze 70%)
"""
import argparse
import os
import re
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from utils import (
DataCollatorForCompletionOnly,
format_fomc_for_llama,
format_numglue_for_llama,
format_scienceqa_for_llama,
load_fomc,
load_numglue,
load_scienceqa,
)
from peft import OSFConfig, get_peft_model
def compute_accuracy_scienceqa(model, eval_dataset, tokenizer, data_collator):
"""Compute accuracy for ScienceQA (extract predicted letter)."""
model.eval()
correct = 0
total = 0
# Create a simple dataloader
from torch.utils.data import DataLoader
dataloader = DataLoader(eval_dataset, batch_size=8, collate_fn=data_collator)
with torch.no_grad():
for batch in dataloader:
input_ids = batch["input_ids"].to(model.device)
attention_mask = batch["attention_mask"].to(model.device)
labels = batch["labels"]
# Generate predictions
outputs = model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_new_tokens=5,
pad_token_id=tokenizer.pad_token_id,
do_sample=False,
)
# Extract predictions and ground truth
for i in range(len(outputs)):
# Decode the generated text
generated_text = tokenizer.decode(outputs[i], skip_special_tokens=True)
# Extract the answer (last letter in the generated text)
# Look for single capital letters A, B, C, D
matches = re.findall(r"\b([A-D])\b", generated_text)
pred = matches[-1] if matches else "X"
# Get ground truth (find the label that's not -100)
label_ids = labels[i][labels[i] != -100]
if len(label_ids) > 0:
gt = tokenizer.decode(label_ids, skip_special_tokens=True).strip()
if pred == gt:
correct += 1
total += 1
accuracy = correct / total if total > 0 else 0.0
return accuracy
def compute_accuracy_numglue(model, eval_dataset, tokenizer, data_collator):
"""Compute accuracy for NumGLUE (extract predicted number)."""
model.eval()
correct = 0
total = 0
from torch.utils.data import DataLoader
dataloader = DataLoader(eval_dataset, batch_size=8, collate_fn=data_collator)
with torch.no_grad():
for batch in dataloader:
input_ids = batch["input_ids"].to(model.device)
attention_mask = batch["attention_mask"].to(model.device)
labels = batch["labels"]
outputs = model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_new_tokens=20,
pad_token_id=tokenizer.pad_token_id,
do_sample=False,
)
for i in range(len(outputs)):
generated_text = tokenizer.decode(outputs[i], skip_special_tokens=True)
# Extract number from generated text
numbers = re.findall(r"-?\d+\.?\d*", generated_text)
pred = numbers[-1] if numbers else "-999"
# Get ground truth
label_ids = labels[i][labels[i] != -100]
if len(label_ids) > 0:
gt = tokenizer.decode(label_ids, skip_special_tokens=True).strip()
if pred == gt:
correct += 1
total += 1
accuracy = correct / total if total > 0 else 0.0
return accuracy
def compute_accuracy_fomc(model, eval_dataset, tokenizer, data_collator):
"""Compute accuracy for FOMC (extract predicted sentiment)."""
model.eval()
correct = 0
total = 0
from torch.utils.data import DataLoader
dataloader = DataLoader(eval_dataset, batch_size=8, collate_fn=data_collator)
valid_labels = ["Dovish", "Hawkish", "Neutral"]
with torch.no_grad():
for batch in dataloader:
input_ids = batch["input_ids"].to(model.device)
attention_mask = batch["attention_mask"].to(model.device)
labels = batch["labels"]
outputs = model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_new_tokens=10,
pad_token_id=tokenizer.pad_token_id,
do_sample=False,
)
for i in range(len(outputs)):
generated_text = tokenizer.decode(outputs[i], skip_special_tokens=True)
# Extract sentiment label
pred = None
for label in valid_labels:
if label in generated_text:
pred = label
break
# Get ground truth
label_ids = labels[i][labels[i] != -100]
if len(label_ids) > 0:
gt = tokenizer.decode(label_ids, skip_special_tokens=True).strip()
if pred == gt:
correct += 1
total += 1
accuracy = correct / total if total > 0 else 0.0
return accuracy
def evaluate_model(model, eval_dataset, data_collator, tokenizer, task_name, task_type):
"""Evaluate model on a dataset and return loss and accuracy."""
# Compute loss
trainer = Trainer(
model=model,
data_collator=data_collator,
eval_dataset=eval_dataset,
args=TrainingArguments(
label_names=["labels"],
),
)
results = trainer.evaluate()
loss = results["eval_loss"]
# Compute accuracy based on task type
if task_type == "scienceqa":
accuracy = compute_accuracy_scienceqa(model, eval_dataset, tokenizer, data_collator)
elif task_type == "numglue":
accuracy = compute_accuracy_numglue(model, eval_dataset, tokenizer, data_collator)
elif task_type == "fomc":
accuracy = compute_accuracy_fomc(model, eval_dataset, tokenizer, data_collator)
else:
accuracy = 0.0
print(f" {task_name}: Loss = {loss:.4f}, Accuracy = {accuracy * 100:.2f}%")
return loss, accuracy
def train_with_osf(
model_name,
num_train,
num_eval,
output_dir,
num_epochs,
learning_rate,
batch_size,
gradient_accumulation_steps,
max_length,
seed,
):
"""Train using OSF with progressive rank allocation."""
print("\n" + "=" * 80)
print("TRAINING WITH OSF (Orthogonal Subspace Fine-tuning)")
print("=" * 80)
# Load tokenizer and base model
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
base_model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
# Load all datasets with task-specific sizes
# FOMC only has 496 samples total, so we use 350 train + 146 eval for it
print("\nLoading datasets...")
scienceqa_train, scienceqa_eval = load_scienceqa(1000, 200, seed)
numglue_train, numglue_eval = load_numglue(1000, 200, seed)
fomc_train, fomc_eval = load_fomc(350, 146, seed)
# Store original eval datasets for later
scienceqa_eval_original = scienceqa_eval
numglue_eval_original = numglue_eval
fomc_eval_original = fomc_eval
# Format datasets
scienceqa_train = scienceqa_train.map(
lambda x: format_scienceqa_for_llama(x, tokenizer, max_length),
batched=True,
remove_columns=scienceqa_train.column_names,
)
scienceqa_eval = scienceqa_eval.map(
lambda x: format_scienceqa_for_llama(x, tokenizer, max_length),
batched=True,
remove_columns=scienceqa_eval.column_names,
)
numglue_train = numglue_train.map(
lambda x: format_numglue_for_llama(x, tokenizer, max_length),
batched=True,
remove_columns=numglue_train.column_names,
)
numglue_eval = numglue_eval.map(
lambda x: format_numglue_for_llama(x, tokenizer, max_length),
batched=True,
remove_columns=numglue_eval.column_names,
)
fomc_train = fomc_train.map(
lambda x: format_fomc_for_llama(x, tokenizer, max_length), batched=True, remove_columns=fomc_train.column_names
)
fomc_eval = fomc_eval.map(
lambda x: format_fomc_for_llama(x, tokenizer, max_length), batched=True, remove_columns=fomc_eval.column_names
)
data_collator = DataCollatorForCompletionOnly(tokenizer, max_length)
# Task configurations
tasks = [
{
"name": "ScienceQA",
"train": scienceqa_train,
"eval": scienceqa_eval,
"eval_original": scienceqa_eval_original,
"effective_rank": 0.3, # Freeze 30%, train 70%
"type": "scienceqa",
},
{
"name": "NumGLUE",
"train": numglue_train,
"eval": numglue_eval,
"eval_original": numglue_eval_original,
"effective_rank": 0.5, # Freeze 50%, train 50%
"type": "numglue",
},
{
"name": "FOMC",
"train": fomc_train,
"eval": fomc_eval,
"eval_original": fomc_eval_original,
"effective_rank": 0.7, # Freeze 70%, train 30%
"type": "fomc",
},
]
# Store evaluation history: {task_name: [(loss, accuracy), ...]}
eval_history = {
"ScienceQA": [],
"NumGLUE": [],
"FOMC": [],
}
# Sequential task training
model = base_model
for task_idx, task in enumerate(tasks):
print(f"\n{'=' * 80}")
print(f"TASK {task_idx + 1}: {task['name']}")
print(f"Effective Rank: {task['effective_rank']} (preserving {task['effective_rank'] * 100:.0f}%)")
print(f"{'=' * 80}")
# Configure OSF for this task
config = OSFConfig(
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
effective_rank=task["effective_rank"],
)
# Apply OSF to the model
model = get_peft_model(model, config)
# Training arguments
training_args = TrainingArguments(
output_dir=f"{output_dir}/osf_{task['name'].lower()}",
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
learning_rate=learning_rate,
logging_steps=10,
save_strategy="no",
bf16=True,
remove_unused_columns=False,
)
# Train on current task
trainer = Trainer(
model=model,
args=training_args,
train_dataset=task["train"],
data_collator=data_collator,
)
print(f"\nTraining on {task['name']}...")
trainer.train()
# Evaluate on all tasks seen so far
print(f"\nEvaluating on all tasks after training on {task['name']}:")
for eval_task_idx in range(task_idx + 1):
eval_task = tasks[eval_task_idx]
loss, accuracy = evaluate_model(
model, eval_task["eval"], data_collator, tokenizer, eval_task["name"], eval_task["type"]
)
eval_history[eval_task["name"]].append((loss, accuracy))
# Unload OSF to get the updated base model for next task (if not last task)
if task_idx < len(tasks) - 1:
print("\nUnloading OSF adapter to prepare for next task...")
model = model.unload()
# Save final model
final_model_path = f"{output_dir}/osf_final"
model.save_pretrained(final_model_path)
print(f"\nFinal OSF model saved to {final_model_path}")
return eval_history
def train_full_finetuning(
model_name,
num_train,
num_eval,
output_dir,
num_epochs,
learning_rate,
batch_size,
gradient_accumulation_steps,
max_length,
seed,
):
"""Train using standard full fine-tuning (baseline for comparison)."""
print("\n" + "=" * 80)
print("TRAINING WITH FULL FINE-TUNING (Baseline)")
print("=" * 80)
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
# Load all datasets with task-specific sizes
# FOMC only has 496 samples total, so we use 350 train + 146 eval for it
print("\nLoading datasets...")
scienceqa_train, scienceqa_eval = load_scienceqa(1000, 200, seed)
numglue_train, numglue_eval = load_numglue(1000, 200, seed)
fomc_train, fomc_eval = load_fomc(350, 146, seed)
# Store original eval datasets
scienceqa_eval_original = scienceqa_eval
numglue_eval_original = numglue_eval
fomc_eval_original = fomc_eval
# Format datasets
scienceqa_train = scienceqa_train.map(
lambda x: format_scienceqa_for_llama(x, tokenizer, max_length),
batched=True,
remove_columns=scienceqa_train.column_names,
)
scienceqa_eval = scienceqa_eval.map(
lambda x: format_scienceqa_for_llama(x, tokenizer, max_length),
batched=True,
remove_columns=scienceqa_eval.column_names,
)
numglue_train = numglue_train.map(
lambda x: format_numglue_for_llama(x, tokenizer, max_length),
batched=True,
remove_columns=numglue_train.column_names,
)
numglue_eval = numglue_eval.map(
lambda x: format_numglue_for_llama(x, tokenizer, max_length),
batched=True,
remove_columns=numglue_eval.column_names,
)
fomc_train = fomc_train.map(
lambda x: format_fomc_for_llama(x, tokenizer, max_length), batched=True, remove_columns=fomc_train.column_names
)
fomc_eval = fomc_eval.map(
lambda x: format_fomc_for_llama(x, tokenizer, max_length), batched=True, remove_columns=fomc_eval.column_names
)
data_collator = DataCollatorForCompletionOnly(tokenizer, max_length)
tasks = [
{"name": "ScienceQA", "train": scienceqa_train, "eval": scienceqa_eval, "type": "scienceqa"},
{"name": "NumGLUE", "train": numglue_train, "eval": numglue_eval, "type": "numglue"},
{"name": "FOMC", "train": fomc_train, "eval": fomc_eval, "type": "fomc"},
]
# Store evaluation history
eval_history = {
"ScienceQA": [],
"NumGLUE": [],
"FOMC": [],
}
# Load base model once
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
# Sequential task training
for task_idx, task in enumerate(tasks):
print(f"\n{'=' * 80}")
print(f"TASK {task_idx + 1}: {task['name']}")
print(f"{'=' * 80}")
# Training arguments
training_args = TrainingArguments(
output_dir=f"{output_dir}/full_{task['name'].lower()}",
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
learning_rate=learning_rate,
logging_steps=10,
save_strategy="no",
bf16=True,
remove_unused_columns=False,
)
# Train on current task
trainer = Trainer(
model=model,
args=training_args,
train_dataset=task["train"],
data_collator=data_collator,
)
print(f"\nTraining on {task['name']}...")
trainer.train()
# Evaluate on all tasks seen so far
print(f"\nEvaluating on all tasks after training on {task['name']}:")
for eval_task_idx in range(task_idx + 1):
eval_task = tasks[eval_task_idx]
loss, accuracy = evaluate_model(
model, eval_task["eval"], data_collator, tokenizer, eval_task["name"], eval_task["type"]
)
eval_history[eval_task["name"]].append((loss, accuracy))
# Save final model
final_model_path = f"{output_dir}/full_final"
model.save_pretrained(final_model_path)
print(f"\nFinal full fine-tuning model saved to {final_model_path}")
return eval_history
def print_results_comparison(osf_history, full_history):
"""Print comparison table of OSF vs Full Fine-tuning."""
print("\n" + "=" * 80)
print("RESULTS COMPARISON: OSF vs Full Fine-tuning")
print("=" * 80)
tasks = ["ScienceQA", "NumGLUE", "FOMC"]
# Print detailed results
print("\n" + "-" * 80)
print("DETAILED RESULTS (Accuracy %)")
print("-" * 80)
print(f"{'Task':<15} {'After Task':<15} {'OSF Acc %':<15} {'Full FT Acc %':<15} {'Difference':<15}")
print("-" * 80)
for task_idx, task in enumerate(tasks):
for eval_after_idx in range(task_idx, len(tasks)):
eval_after = tasks[eval_after_idx]
osf_acc = osf_history[task][eval_after_idx - task_idx][1] * 100
full_acc = full_history[task][eval_after_idx - task_idx][1] * 100
diff = osf_acc - full_acc
print(
f"{task:<15} {eval_after:<15} {osf_acc:<15.2f} {full_acc:<15.2f} {diff:+15.2f}{' (OSF better)' if diff > 0 else ''}"
)
# Summary statistics
print("\n" + "=" * 80)
print("SUMMARY METRICS")
print("=" * 80)
# Final average accuracy across all 3 tasks
osf_final_accs = [osf_history[task][-1][1] * 100 for task in tasks]
full_final_accs = [full_history[task][-1][1] * 100 for task in tasks]
osf_avg_final = sum(osf_final_accs) / len(osf_final_accs)
full_avg_final = sum(full_final_accs) / len(full_final_accs)
print("\n1. Average Accuracy Across All 3 Tasks (After Final Task):")
print(f" OSF: {osf_avg_final:.2f}%")
print(f" Full FT: {full_avg_final:.2f}%")
print(
f" Difference: {osf_avg_final - full_avg_final:+.2f}% {'(OSF better)' if osf_avg_final > full_avg_final else '(Full FT better)'}"
)
# Average forgetting (for tasks 1 and 2 only, since task 3 is the final task)
print("\n2. Average Forgetting (Task 1 & 2):")
print(" Forgetting = Final Accuracy - Initial Accuracy (negative is worse)\n")
osf_forgetting_vals = []
full_forgetting_vals = []
for task_idx, task in enumerate(tasks[:-1]): # Exclude last task
osf_initial_acc = osf_history[task][0][1] * 100 # Right after learning task
osf_final_acc = osf_history[task][-1][1] * 100 # After learning all tasks
osf_forgetting = osf_final_acc - osf_initial_acc
full_initial_acc = full_history[task][0][1] * 100
full_final_acc = full_history[task][-1][1] * 100
full_forgetting = full_final_acc - full_initial_acc
osf_forgetting_vals.append(osf_forgetting)
full_forgetting_vals.append(full_forgetting)
print(f" {task}:")
print(f" OSF: {osf_forgetting:+.2f}% (initial: {osf_initial_acc:.2f}% → final: {osf_final_acc:.2f}%)")
print(
f" Full FT: {full_forgetting:+.2f}% (initial: {full_initial_acc:.2f}% → final: {full_final_acc:.2f}%)"
)
print(
f" Difference: {osf_forgetting - full_forgetting:+.2f}% {'(OSF better)' if osf_forgetting > full_forgetting else '(Full FT better)'}\n"
)
osf_avg_forgetting = sum(osf_forgetting_vals) / len(osf_forgetting_vals)
full_avg_forgetting = sum(full_forgetting_vals) / len(full_forgetting_vals)
print(" Average Forgetting:")
print(f" OSF: {osf_avg_forgetting:+.2f}%")
print(f" Full FT: {full_avg_forgetting:+.2f}%")
print(
f" Difference: {osf_avg_forgetting - full_avg_forgetting:+.2f}% {'(OSF better)' if osf_avg_forgetting > full_avg_forgetting else '(Full FT better)'}"
)
print("\n" + "=" * 80)
def main():
parser = argparse.ArgumentParser(description="OSF Continual Learning Example")
parser.add_argument(
"--model_name",
type=str,
default="meta-llama/Llama-3.1-8B-Instruct",
help="Model name or path",
)
parser.add_argument("--num_train", type=int, default=1000, help="Number of training samples per task")
parser.add_argument("--num_eval", type=int, default=200, help="Number of evaluation samples per task")
parser.add_argument("--output_dir", type=str, default="./osf_continual_learning_outputs", help="Output directory")
parser.add_argument("--num_epochs", type=int, default=2, help="Number of training epochs per task")
parser.add_argument("--learning_rate", type=float, default=5e-6, help="Learning rate")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size per device")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1, help="Gradient accumulation steps")
parser.add_argument("--max_length", type=int, default=512, help="Maximum sequence length")
parser.add_argument("--seed", type=int, default=42, help="Random seed")
parser.add_argument(
"--run_baseline",
action="store_true",
help="Also run full fine-tuning baseline for comparison",
)
args = parser.parse_args()
# Create output directory
os.makedirs(args.output_dir, exist_ok=True)
# Train with OSF
osf_history = train_with_osf(
args.model_name,
args.num_train,
args.num_eval,
args.output_dir,
args.num_epochs,
args.learning_rate,
args.batch_size,
args.gradient_accumulation_steps,
args.max_length,
args.seed,
)
# Optionally train with full fine-tuning baseline
if args.run_baseline:
full_history = train_full_finetuning(
args.model_name,
args.num_train,
args.num_eval,
args.output_dir,
args.num_epochs,
args.learning_rate,
args.batch_size,
args.gradient_accumulation_steps,
args.max_length,
args.seed,
)
# Print comparison
print_results_comparison(osf_history, full_history)
else:
print("\n" + "=" * 80)
print("OSF TRAINING COMPLETE")
print("=" * 80)
print("\nTo compare with full fine-tuning baseline, run with --run_baseline flag")
# Print OSF-only summary
tasks = ["ScienceQA", "NumGLUE", "FOMC"]
print("\n" + "=" * 80)
print("OSF SUMMARY METRICS")
print("=" * 80)
# Final average accuracy
osf_final_accs = [osf_history[task][-1][1] * 100 for task in tasks]
osf_avg_final = sum(osf_final_accs) / len(osf_final_accs)
print(f"\n1. Average Accuracy Across All 3 Tasks (After Final Task): {osf_avg_final:.2f}%")
for task, acc in zip(tasks, osf_final_accs):
print(f" {task}: {acc:.2f}%")
# Average forgetting
print("\n2. Average Forgetting (Task 1 & 2):")
osf_forgetting_vals = []
for task_idx, task in enumerate(tasks[:-1]):
osf_initial_acc = osf_history[task][0][1] * 100
osf_final_acc = osf_history[task][-1][1] * 100
osf_forgetting = osf_initial_acc - osf_final_acc
osf_forgetting_vals.append(osf_forgetting)
print(f" {task}: {osf_forgetting:+.2f}% (initial: {osf_initial_acc:.2f}% → final: {osf_final_acc:.2f}%)")
osf_avg_forgetting = sum(osf_forgetting_vals) / len(osf_forgetting_vals)
print(f" Average: {osf_avg_forgetting:+.2f}%")
print("\n" + "=" * 80)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/peft",
"file_path": "examples/orthogonal_subspace_learning/osf_continual_learning.py",
"license": "Apache License 2.0",
"lines": 576,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/orthogonal_subspace_learning/utils.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from datasets import load_dataset
def load_scienceqa(num_train=1000, num_eval=200, seed=42):
"""
Load ScienceQA dataset for science question answering.
Args:
num_train: Number of training samples
num_eval: Number of evaluation samples
seed: Random seed for reproducibility
Returns:
train_dataset, eval_dataset
"""
dataset = load_dataset("derek-thomas/ScienceQA", split="train")
# Shuffle and split
dataset = dataset.shuffle(seed=seed)
train_dataset = dataset.select(range(num_train))
eval_dataset = dataset.select(range(num_train, num_train + num_eval))
return train_dataset, eval_dataset
def load_numglue(num_train=1000, num_eval=200, seed=42):
"""
Load NumGLUE dataset for mathematical reasoning.
Args:
num_train: Number of training samples
num_eval: Number of evaluation samples
seed: Random seed for reproducibility
Returns:
train_dataset, eval_dataset
"""
import json
from datasets import Dataset
from huggingface_hub import hf_hub_download
# Download the NumGLUE JSON file manually
json_path = hf_hub_download(repo_id="metaeval/num-glue", filename="NumGLUE_train.json", repo_type="dataset")
# Read and process the JSON file line by line
data = []
with open(json_path) as f:
for line in f:
if line.strip(): # Skip empty lines
item = json.loads(line)
# Extract the number from the answer JSON structure
answer = item.get("answer", "")
if isinstance(answer, dict):
# NumGLUE answers are JSON with 'number' and 'date' fields
# Extract just the number field
answer_str = answer.get("number", "")
else:
answer_str = str(answer)
data.append({"question": item.get("question", ""), "answer": answer_str})
# Create dataset from processed data
dataset = Dataset.from_list(data)
# Shuffle and split
dataset = dataset.shuffle(seed=seed)
train_dataset = dataset.select(range(min(num_train, len(dataset))))
# If not enough samples, use what's available
eval_start = min(num_train, len(dataset))
eval_end = min(num_train + num_eval, len(dataset))
eval_dataset = dataset.select(range(eval_start, eval_end))
return train_dataset, eval_dataset
def load_fomc(num_train=1000, num_eval=200, seed=42):
"""
Load FOMC dataset for financial sentiment classification.
Args:
num_train: Number of training samples
num_eval: Number of evaluation samples
seed: Random seed for reproducibility
Returns:
train_dataset, eval_dataset
"""
dataset = load_dataset("TheFinAI/finben-fomc", split="test")
# Shuffle and split
dataset = dataset.shuffle(seed=seed)
train_dataset = dataset.select(range(min(num_train, len(dataset))))
eval_start = min(num_train, len(dataset))
eval_end = min(num_train + num_eval, len(dataset))
eval_dataset = dataset.select(range(eval_start, eval_end))
return train_dataset, eval_dataset
def format_scienceqa_for_llama(examples, tokenizer, max_length=512):
"""Format ScienceQA examples for Llama instruction following."""
prompts = []
labels_text = []
for i in range(len(examples["question"])):
# Build the question with choices
question = examples["question"][i]
choices = examples["choices"][i]
# Format choices
choices_text = "\n".join([f"{chr(65 + j)}. {choice}" for j, choice in enumerate(choices)])
prompt = f"""Answer the following science question by selecting the correct option.
Question: {question}
Choices:
{choices_text}
Answer (just the letter):"""
# Get the answer (convert index to letter)
answer_idx = examples["answer"][i]
answer = chr(65 + answer_idx)
prompts.append(prompt)
labels_text.append(answer)
# Tokenize
model_inputs = tokenizer(prompts, max_length=max_length, truncation=True, padding=False)
# Tokenize labels
labels = tokenizer(labels_text, max_length=10, truncation=True, padding=False)
# Combine input and label for training
combined_input_ids = []
combined_attention_mask = []
combined_labels = []
for i in range(len(model_inputs["input_ids"])):
input_ids = model_inputs["input_ids"][i]
label_ids = labels["input_ids"][i]
# Combine input and label
combined = input_ids + label_ids + [tokenizer.eos_token_id]
combined_input_ids.append(combined)
# Attention mask
combined_attention_mask.append([1] * len(combined))
# Labels (mask the prompt part, only train on answer)
label_masked = [-100] * len(input_ids) + label_ids + [tokenizer.eos_token_id]
combined_labels.append(label_masked)
return {
"input_ids": combined_input_ids,
"attention_mask": combined_attention_mask,
"labels": combined_labels,
}
def format_numglue_for_llama(examples, tokenizer, max_length=512):
"""Format NumGLUE examples for Llama instruction following."""
prompts = []
labels_text = []
for i in range(len(examples["question"])):
question = examples["question"][i]
answer = str(examples["answer"][i])
prompt = f"""Solve the following math problem and provide just the numerical answer.
Question: {question}
Answer:"""
prompts.append(prompt)
labels_text.append(answer)
# Tokenize
model_inputs = tokenizer(prompts, max_length=max_length, truncation=True, padding=False)
labels = tokenizer(labels_text, max_length=20, truncation=True, padding=False)
combined_input_ids = []
combined_attention_mask = []
combined_labels = []
for i in range(len(model_inputs["input_ids"])):
input_ids = model_inputs["input_ids"][i]
label_ids = labels["input_ids"][i]
combined = input_ids + label_ids + [tokenizer.eos_token_id]
combined_input_ids.append(combined)
combined_attention_mask.append([1] * len(combined))
label_masked = [-100] * len(input_ids) + label_ids + [tokenizer.eos_token_id]
combined_labels.append(label_masked)
return {
"input_ids": combined_input_ids,
"attention_mask": combined_attention_mask,
"labels": combined_labels,
}
def format_fomc_for_llama(examples, tokenizer, max_length=512):
"""Format FOMC examples for Llama instruction following."""
prompts = []
labels_text = []
for i in range(len(examples["text"])):
text = examples["text"][i]
# FOMC dataset has 'answer' column with values like 'dovish', 'hawkish', 'neutral'
label = examples["answer"][i].capitalize() # Capitalize first letter
prompt = f"""Classify the sentiment of the following Federal Reserve statement as Dovish, Hawkish, or Neutral.
Statement: {text}
Sentiment:"""
prompts.append(prompt)
labels_text.append(label)
# Tokenize
model_inputs = tokenizer(prompts, max_length=max_length, truncation=True, padding=False)
labels = tokenizer(labels_text, max_length=10, truncation=True, padding=False)
combined_input_ids = []
combined_attention_mask = []
combined_labels = []
for i in range(len(model_inputs["input_ids"])):
input_ids = model_inputs["input_ids"][i]
label_ids = labels["input_ids"][i]
combined = input_ids + label_ids + [tokenizer.eos_token_id]
combined_input_ids.append(combined)
combined_attention_mask.append([1] * len(combined))
label_masked = [-100] * len(input_ids) + label_ids + [tokenizer.eos_token_id]
combined_labels.append(label_masked)
return {
"input_ids": combined_input_ids,
"attention_mask": combined_attention_mask,
"labels": combined_labels,
}
class DataCollatorForCompletionOnly:
"""Data collator that pads sequences for training."""
def __init__(self, tokenizer, max_length=512):
self.tokenizer = tokenizer
self.max_length = max_length
def __call__(self, features):
# Pad sequences
max_len = min(max(len(f["input_ids"]) for f in features), self.max_length)
input_ids = []
attention_mask = []
labels = []
for f in features:
# Truncate if needed
curr_input_ids = f["input_ids"][:max_len]
curr_attention_mask = f["attention_mask"][:max_len]
curr_labels = f["labels"][:max_len]
# Pad
padding_length = max_len - len(curr_input_ids)
curr_input_ids = curr_input_ids + [self.tokenizer.pad_token_id] * padding_length
curr_attention_mask = curr_attention_mask + [0] * padding_length
curr_labels = curr_labels + [-100] * padding_length
input_ids.append(curr_input_ids)
attention_mask.append(curr_attention_mask)
labels.append(curr_labels)
return {
"input_ids": torch.tensor(input_ids, dtype=torch.long),
"attention_mask": torch.tensor(attention_mask, dtype=torch.long),
"labels": torch.tensor(labels, dtype=torch.long),
}
| {
"repo_id": "huggingface/peft",
"file_path": "examples/orthogonal_subspace_learning/utils.py",
"license": "Apache License 2.0",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/dora_finetuning/dora-caching.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Small script to measure DoRA caching efficiency
"""
import argparse
import time
from contextlib import contextmanager
import torch
from transformers import AutoModelForCausalLM
from peft import LoraConfig, get_peft_model
from peft.helpers import DoraCaching
from peft.utils import infer_device
device = infer_device()
# check for CPU
if device == "cpu":
raise ValueError("This benchmark requires a hardware accelerator, only found CPU")
torch_accelerator_module = getattr(torch, device, torch.cuda)
@contextmanager
def timeit(logs):
start = time.perf_counter()
yield
end = time.perf_counter()
dur = end - start
logs["time"].append(dur)
def run_benchmark(model, num_runs):
logs = {
"time": [],
}
mem_start = torch_accelerator_module.max_memory_reserved()
for _ in range(num_runs + 1):
with timeit(logs):
for i in range(3):
x = torch.randint(10, 100, (1, 50)).to(device)
model(x)
mem_end = torch_accelerator_module.max_memory_reserved()
logs["memory"] = (mem_end - mem_start) / 1024**2
# remove the first run (warm up)
del logs["time"][0]
return logs
def main(model_id, num_runs):
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map=device)
base_memory = torch_accelerator_module.max_memory_reserved() / 1024**2
# LORA
config = LoraConfig(init_lora_weights=False, use_dora=False)
model = get_peft_model(model, config)
model.eval()
torch_accelerator_module.reset_peak_memory_stats()
logs_lora = run_benchmark(model, num_runs)
avg_duration_lora = sum(logs_lora["time"]) / num_runs
max_memory_lora = logs_lora["memory"] + base_memory
# DORA
del model
torch_accelerator_module.empty_cache()
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map=device)
base_memory = torch_accelerator_module.max_memory_reserved() / 1024**2
config = LoraConfig(init_lora_weights=False, use_dora=True)
model = get_peft_model(model, config)
model.eval()
# WITHOUT CACHING
torch_accelerator_module.reset_peak_memory_stats()
logs_dora_no_caching = run_benchmark(model, num_runs)
avg_duration_no_caching = sum(logs_dora_no_caching["time"]) / num_runs
max_memory_no_caching = logs_dora_no_caching["memory"] + base_memory
# WITH CACHING
torch_accelerator_module.reset_peak_memory_stats()
with DoraCaching():
logs_dora_caching = run_benchmark(model, num_runs)
avg_duration_caching = sum(logs_dora_caching["time"]) / num_runs
max_memory_caching = logs_dora_caching["memory"] + base_memory
print(
f"Benchmark results for model {model_id} with {num_runs} runs:\n\n"
f"avg time LoRA: {avg_duration_lora:.4f} sec\n"
f"avg time DoRA no caching: {avg_duration_no_caching:.4f} sec\n"
f"avg time DoRA with caching: {avg_duration_caching:.4f} sec\n"
f"\n"
f"memory LoRA: {max_memory_lora:.2f} MB\n"
f"memory DoRA no caching: {max_memory_no_caching:.2f} MB\n"
f"memory DoRA with caching: {max_memory_caching:.2f} MB\n"
f"\n"
f"DoRA time overhead no caching: {(avg_duration_no_caching - avg_duration_lora) / avg_duration_lora * 100:.2f}%\n"
f"DoRA time overhead with caching: {(avg_duration_caching - avg_duration_lora) / avg_duration_lora * 100:.2f}%\n"
f"\n"
f"DoRA memory overhead no caching: {(max_memory_no_caching - max_memory_lora) / max_memory_lora * 100:.2f}%\n"
f"DoRA memory overhead with caching: {(max_memory_caching - max_memory_lora) / max_memory_lora * 100:.2f}%"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Benchmark DoRA caching efficiency")
parser.add_argument("--model_id", type=str, default="meta-llama/Llama-3.1-8B", help="Model ID to benchmark")
parser.add_argument("--num_runs", type=int, default=10, help="Number of runs for the benchmark")
args = parser.parse_args()
main(args.model_id, args.num_runs)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/dora_finetuning/dora-caching.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:scripts/evaluate-lora-conversion.py | #!/usr/bin/env python3
# Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to evaluate a PEFT checkpoint converted into a LoRA on GSM8K
To run this script, first train a PEFT model on MetaMathQA as described here:
https://github.com/huggingface/peft/tree/main/method_comparison/MetaMathQA
Call the script with the `-v` (verbose) option. When that run finishes, it will save a checkpoint of that model and
print a message like this: "Saved PEFT checkpoint to ...". Use this path as the `--path` argument to this script.
Example usage:
```bash
# Convert to LoRA with rank 8 and evaluate it
python evaluate-lora-conversion.py --path /path/to/peft/checkpoint --rank 8
# Convert to LoRA with dynamic rank (50% singular value threshold) and evaluate it
python evaluate-lora-conversion.py --path /path/to/peft/checkpoint --rank 0.5
# Evaluate the original PEFT model without LoRA conversion
python evaluate-lora-conversion.py --path /path/to/peft/checkpoint
```
The script will report the evaluation accuracy, maximum CUDA memory reserved, and evaluation time for the converted LoRA
model.
"""
import argparse
import importlib.util
import os
import sys
import time
import torch
from transformers import AutoModelForCausalLM
from peft import PeftModel, convert_to_lora, get_peft_model, set_peft_model_state_dict
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
spec = importlib.util.spec_from_file_location("data", os.path.join(root, "method_comparison", "MetaMathQA", "data.py"))
mm_data = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mm_data)
sys.modules["data"] = mm_data
spec = importlib.util.spec_from_file_location(
"utils", os.path.join(root, "method_comparison", "MetaMathQA", "utils.py")
)
mm_utils = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mm_utils)
sys.modules["utils"] = mm_utils
spec = importlib.util.spec_from_file_location("run", os.path.join(root, "method_comparison", "MetaMathQA", "run.py"))
mm_run = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mm_run)
def noop(*args, **kwargs):
pass
def evaluate_model(model, tokenizer, ds_test):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
tic = time.perf_counter()
predictions, responses = mm_run.evaluate(
model=model,
tokenizer=tokenizer,
ds=ds_test,
batch_size=50,
generate_kwargs={"max_length": 800, "max_new_tokens": 300, "pad_token_id": tokenizer.eos_token_id},
use_tqdm=True,
)
toc = time.perf_counter()
accuracy_peft = mm_utils.get_accuracy(predictions=predictions, responses=responses)
cuda_mem_reserved_max = torch.cuda.memory_reserved(0)
print(f"Evaluation Accuracy: {100 * accuracy_peft:.2f}%")
print(f"Max CUDA Memory Reserved: {cuda_mem_reserved_max / (1024**3):.2f} GB")
print(f"Evaluation Time: {toc - tic:.0f} seconds".format(toc - tic))
def main(path_peft_model: str, rank: int | float | None) -> None:
model_id = "meta-llama/Llama-3.2-3B"
tokenizer = mm_utils.get_tokenizer(model_id=model_id, max_seq_length=768)
_, _, ds_test = mm_data.get_train_valid_test_datasets(
tokenizer=tokenizer, query_template="Question: {query} Think step by step.\nAnswer:", print_fn=noop
)
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16).to(0)
model = PeftModel.from_pretrained(model, path_peft_model)
if rank is None:
print("Evaluating the original PEFT model without LoRA conversion...")
model.set_adapter("default")
model.print_trainable_parameters()
model.eval()
evaluate_model(model, tokenizer, ds_test)
return
print(f"Converting PEFT model to LoRA with rank={rank}...")
tic = time.perf_counter()
lora_config, lora_state_dict = convert_to_lora(model, rank=rank, progressbar=True)
toc = time.perf_counter()
print(f"Conversion completed in {toc - tic:.0f} seconds.".format(toc - tic))
del model
torch.cuda.empty_cache()
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16).to(0)
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
load_result = set_peft_model_state_dict(model, lora_state_dict)
assert not load_result.unexpected_keys, (
f"Unexpected keys when loading LoRA state dict: {load_result.unexpected_keys}"
)
del lora_state_dict
model.eval()
evaluate_model(model, tokenizer, ds_test)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate a PEFT checkpoint converted into a LoRA on GSM8K")
parser.add_argument(
"--path",
type=str,
required=True,
help="Path to the input PEFT checkpoint",
)
parser.add_argument(
"--rank",
required=False,
default=None,
help="Rank for the LoRA decomposition (int, float, or None for no conversion)",
)
args = parser.parse_args()
if args.rank is not None:
if "." in str(args.rank):
args.rank = float(args.rank)
else:
args.rank = int(args.rank)
main(args.path, args.rank)
| {
"repo_id": "huggingface/peft",
"file_path": "scripts/evaluate-lora-conversion.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/lora/conversion.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import pathlib
import warnings
import torch
from safetensors.torch import save_file
from tqdm import tqdm
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import SAFETENSORS_WEIGHTS_NAME
from peft.utils.other import ModulesToSaveWrapper
from .config import LoraConfig
def _find_cutoff_index(S: torch.Tensor, threshold: float) -> int:
# assumes that the singular values are sorted
if S.dim() != 1:
raise ValueError("Input vector must be 1d.")
energy = S**2
cs = torch.cumsum(energy, dim=0)
total = cs[-1]
cutoff = threshold * total
# smallest index i with cs[i] >= cutoff
k = torch.searchsorted(cs, cutoff).item()
# k is the index of the first item that surpasses the threshold; since we want to include it, add + 1
return k + 1
@torch.no_grad()
def _convert_module_to_lora(
module: BaseTunerLayer, rank: int | float, adapter_name: str = "default"
) -> tuple[torch.Tensor, torch.Tensor, int]:
"""Convert a single BaseTunerLayer's adapter weight to a LoRA weight, return A, B, and the effective rank."""
delta_weight = module.get_delta_weight(adapter_name)
# Note: Explore different algorithms (truncated, randomized, ...) to see if they are more efficient
orig_dtype = delta_weight.dtype
delta_weight = delta_weight.float() # SVD not implemented for half-precision types
U, S, V = torch.linalg.svd(delta_weight, full_matrices=False)
if isinstance(rank, int):
effective_rank = rank
else:
# float => interpret as threshold
effective_rank = _find_cutoff_index(S, threshold=rank)
if effective_rank > U.shape[1]:
raise ValueError(
f"The chosen rank {effective_rank} is larger than the weight shape ({U.shape[1]}), please choose a lower "
"rank."
)
lora_B = U[:, :effective_rank] * S[:effective_rank]
lora_A = V[:effective_rank]
lora_A, lora_B = lora_A.to(orig_dtype), lora_B.to(orig_dtype)
if isinstance(module.get_base_layer(), Conv1D):
# Conv1D => original weight is transposed compared to Linear
return lora_B.T.contiguous(), lora_A.T.contiguous(), effective_rank
return lora_A.contiguous(), lora_B.contiguous(), effective_rank
def convert_to_lora(
model: torch.nn.Module,
rank: int | float,
adapter_name: str = "default",
progressbar: bool = False,
compile_kwargs=None,
) -> tuple[LoraConfig, dict[str, torch.Tensor]]:
"""
Convert a non-LoRA model with PEFT layers to a LoRA checkpoint.
This is only supported for some specific PEFT methods that allow an equivalent conversion. Essentially, this comes
down to PEFT methods that work by updating the base weight with a delta weight. Also, right now, only linear layers
are supported.
The LoRA adapter will try to approximate the initial adapter as close as possible. The higher the rank, the better
the approximation. It is expected that the approximation will never reach the full performance of the original
adapter, and that the parameter efficiency of the LoRA adapter will be less than that of the original adapter (i.e.
for a similar performance, it will require more parameters). The conversion can still be useful in many situations:
- In PEFT, LoRA supports more features than most other methods, e.g. mixed adapter batches. Thus the converted
adapter can be used with those features.
- Some downstream packages support LoRA adapters, but not other PEFT methods, e.g. Diffusers. The conversion allows
to use a non-LoRA adapter with those packages.
The LoRA scaling factor is already baked into the LoRA weights, thus the scaling will always be one (i.e. rank and
alpha are chosen to be identical).
Note: This function does not support sharded models (yet).
Args:
model:
The model to be converted. Should be a model that has PEFT layers that support conversion.
rank (`int` or `float`):
The desired rank for the returned LoRA adapter. A higher rank results in a LoRA adapter that more
accurately mirrors the original adapter. It will, however, also require more memory, compute, and disk
space. Therefore, choose a value that represents the best trade off for your use case and validate the
final adapter. If a float is passed, it is interpreted as an explained variance / energy threshold: we pick
the smallest rank k such that the top k singular values account for at least that fraction of the total
squared singular values. This effectively results in lower ranks being assigned if a few singular can
capture the adaptation of this layer. A lower float means the rank is lower and vice versa. Be aware that
dynamic ranks can lead to very unequal ranks per layer, which means that some layers may require a
disproportionally high amount of memory for activations. Choosing a fixed (int) rank is better to achieve
predictable memory requirement.
adapter_name (`str`, *optional*):
The name of the adapter to be converted. Can only convert a single adapter at a time. Defaults to
`"default"`.
progressbar (`bool`):
whether to show a progressbar indicating the progress of the conversion (it can take a few minutes for big
models).
compile_kwargs (`dict`, *optional*):
If provided, the compile the function to convert individual modules to LoRA with the given kwargs being
passed to `torch.compile`. This can potentially speed up the conversion on large models.
Returns:
lora_config (`LoraConfig`)
The `LoraConfig` that corresponds to the converted LoRA adapter.
state_dict (`dict[str, torch.Tensor]`)
The `state_dict` containing the LoRA weights.
Raises
TypeError:
If the provided model does not have any layers that can be converted to LoRA, a `TypeError` is raised.
ValueError:
If an invalid rank was chosen (too high or too low).
"""
from peft import PeftType # local to avoid circular import
##########
# CHECKS #
##########
if isinstance(rank, float) and not (0 < rank <= 1):
raise ValueError(
f"If rank is a float, it is interpreted as a threshold. It must be between 0 and 1 but got {rank}."
)
elif rank == 0:
raise ValueError("Passing a rank of 0 doesn't make sense, please pass a valid value.")
# check if LoRA conversion is supported at all
modules_not_supporting_lora = []
num_modules_with_support = 0
num_modules_total = 0
for module in model.modules():
num_modules_total += 1
if not isinstance(module, BaseTunerLayer):
continue
if module.supports_lora_conversion(adapter_name):
num_modules_with_support += 1
else:
modules_not_supporting_lora.append(module)
unsupported = {repr(type(module)) for module in modules_not_supporting_lora}
if unsupported:
raise TypeError(f"Some module types on this model do not support LoRA conversion: {', '.join(unsupported)}.")
if num_modules_with_support == 0:
raise TypeError("Could not detect any layer that supports LoRA conversion.")
peft_config = getattr(model, "peft_config", {}).get(adapter_name)
if (peft_config is not None) and (peft_config.peft_type == PeftType.LORA):
warnings.warn(
"Converting a PEFT adapter to LoRA that is already a LoRA adapter. There is typically no need for that."
)
config_bias = getattr(peft_config, "bias", getattr(peft_config, "lora_bias", "none"))
if config_bias != "none":
# TODO: remove if/when we remove support for bias completely
raise ValueError(f"The adapter's config sets bias={config_bias}, this is not supported right now.")
###############
# PREPARATION #
###############
peft_prefix = "base_model.model."
config_kwargs = {
"rank_pattern": {},
"alpha_pattern": {},
"exclude_modules": set(),
}
if peft_config is not None:
# use the model's PEFT config, if it exists, to initialize the new LoraConfig
peft_config = model.peft_config[adapter_name]
config_kwargs["target_modules"] = copy.copy(peft_config.target_modules)
config_kwargs["base_model_name_or_path"] = peft_config.base_model_name_or_path
if hasattr(peft_config, "layers_pattern"):
# those two go hand in hand
config_kwargs["layers_pattern"] = peft_config.layers_pattern
config_kwargs["layers_to_transform"] = peft_config.layers_to_transform
if isinstance(rank, int):
# hard-coded rank
lora_config = LoraConfig(r=rank, lora_alpha=rank, **config_kwargs)
else:
# r and lora_alpha shouldn't matter, as the rank will be determined by rank/alpha pattern
lora_config = LoraConfig(r=1, lora_alpha=1, **config_kwargs)
else:
# create a new LoraConfig from scratch, inferring the target modules from the model
lora_config = LoraConfig(
r=rank if isinstance(rank, int) else 1, # 1 is a dummy value, actual values will come from rank_pattern
target_modules=[],
**config_kwargs,
)
if compile_kwargs is not None:
convert_module_to_lora = torch.compile(_convert_module_to_lora, **compile_kwargs)
else:
convert_module_to_lora = _convert_module_to_lora
##############
# CONVERSION #
##############
state_dict = {}
for name, module in tqdm(
model.named_modules(), disable=not progressbar, desc="Converting to LoRA", total=num_modules_total
):
if not isinstance(module, BaseTunerLayer):
continue
if not hasattr(module, "get_delta_weight"):
# if we arrive here, it means that the layer actually does not support LoRA conversion, which should not
# happen
raise TypeError(
f"Module of type {type(module)} does not have a get_delta_weight method, which is required for "
"conversion. Please open an issue: https://github.com/huggingface/peft/issues"
)
lora_A, lora_B, effective_rank = convert_module_to_lora(module, rank=rank, adapter_name=adapter_name)
if effective_rank == 0:
# This shouldn't really happen, as we ensure that the rank is greater than 0 (int) or, for thresholds
# (float), at least one SV is included. But better be safe than sorry, as, in principle, it is fine to
# exclude some layers. Also makes this more future proof.
lora_config.exclude_modules.add(name.removeprefix(peft_prefix))
continue
# the rank was dynamically adjusted, store it in rank and alpha pattern
if (effective_rank != rank) or isinstance(lora_config.target_modules, str):
# we need to add an entry to rank/alpha pattern iff:
# 1) The effective rank differs from the general rank
# 2) target modules is a string, as we cannot simply append the name to target_modules regex
lora_config.rank_pattern[name.removeprefix(peft_prefix)] = effective_rank
lora_config.alpha_pattern[name.removeprefix(peft_prefix)] = effective_rank
else:
# effective rank is the same and target_modules are a set, just add the name
lora_config.target_modules.add(name.removeprefix(peft_prefix))
# don't include adapter_name in key
state_dict[f"{name}.lora_A.weight"] = lora_A
state_dict[f"{name}.lora_B.weight"] = lora_B
if not state_dict:
# no layer was converted, which should not happen
raise ValueError(
"Did not convert a single layer, this means that something went wrong. Please open an issue: "
"https://github.com/huggingface/peft/issues"
)
##################
# NON-LORA PARTS #
##################
if (peft_config is not None) and getattr(peft_config, "modules_to_save", None):
# logic to take care of modules_to_save; might not cover all edge cases, like sharded model
lora_config.modules_to_save = copy.copy(peft_config.modules_to_save)
for module_name, module in model.named_modules():
if isinstance(module, ModulesToSaveWrapper):
for param_name, param in module.modules_to_save.named_parameters():
# it is expected that '.modules_to_save.' is not part of the key
prefix, _, _ = module_name.partition(".modules_to_save.")
# remove the adapter name
_, _, suffix = param_name.rpartition(".")
state_dict[f"{prefix}.{suffix}"] = param.data
return lora_config, state_dict
def save_as_lora(
path: str | os.PathLike,
model: torch.nn.Module,
rank: int | float,
adapter_name: str = "default",
progressbar: bool = False,
compile_kwargs=None,
) -> None:
"""
Convert a non-LoRA model with PEFT layers to a LoRA, then save the checkpoint file and PEFT config.
This is only supported for some specific PEFT methods that allow an equivalent conversion. Essentially, this comes
down to PEFT methods that work by updating the base weight with a delta weight. Also, right now, only linear layers
are supported.
The LoRA adapter will try to approximate the initial adapter as close as possible. The higher the rank, the better
the approximation. It is expected that the approximation will never reach the full performance of the original
adapter, and that the parameter efficiency of the LoRA adapter will be less than that of the original adapter (i.e.
for a similar performance, it will require more parameters). The conversion can still be useful in many situations:
- In PEFT, LoRA supports more features than most other methods, e.g. mixed adapter batches. Thus the converted
adapter can be used with those features.
- Some downstream packages support LoRA adapters, but not other PEFT methods, e.g. Diffusers. The conversion allows
to use a non-LoRA adapter with those packages.
The LoRA scaling factor is already baked into the LoRA weights, thus the scaling will always be one (i.e. rank and
alpha are chosen to be identical).
You can load the converted LoRA weight like this:
```py
>>> lora_path = ...
>>> save_as_lora(lora_path, model, rank=...)
>>> base_model = AutoModel.from_pretrained(...)
>>> lora_model = PeftModel.from_pretrained(base_model, lora_path)
```
Note: This function does not support sharded models (yet).
Args:
model:
The model to be converted. Should be a model that has PEFT layers that support conversion.
rank (`int` or `float`):
The desired rank for the returned LoRA adapter. A higher rank results in a LoRA adapter that more
accurately mirrors the original adapter. It will, however, also require more memory, compute, and disk
space. Therefore, choose a value that represents the best trade off for your use case and validate the
final adapter. If a float is passed, it is interpreted as an explained variance / energy threshold: we pick
the smallest rank k such that the top k singular values account for at least that fraction of the total
squared singular values. This effectively results in lower ranks being assigned if a few singular can
capture the adaptation of this layer. A lower float means the rank is lower and vice versa. Be aware that
dynamic ranks can lead to very unequal ranks per layer, which means that some layers may require a
disproportionally high amount of memory for activations. Choosing a fixed (int) rank is better to achieve
predictable memory requirement.
adapter_name (`str`, *optional*):
The name of the adapter to be converted. Can only convert a single adapter at a time. Defaults to
`"default"`.
progressbar (`bool`):
whether to show a progressbar indicating the progress of the conversion (it can take a few minutes for big
models).
compile_kwargs (`dict`, *optional*):
If provided, the compile the function to convert individual modules to LoRA with the given kwargs being
passed to `torch.compile`. This can potentially speed up the conversion on large models.
Raises
TypeError:
If the provided model does not have any layers that can be converted to LoRA, a `TypeError` is raised.
ValueError:
If an invalid rank was chosen (too high or too low).
"""
path = pathlib.Path(path)
if not path.exists():
os.makedirs(path)
lora_config, state_dict = convert_to_lora(
model, rank=rank, adapter_name=adapter_name, progressbar=progressbar, compile_kwargs=compile_kwargs
)
save_file(state_dict, path / SAFETENSORS_WEIGHTS_NAME)
lora_config.save_pretrained(str(path))
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/lora/conversion.py",
"license": "Apache License 2.0",
"lines": 317,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:tests/test_lora_conversion.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import platform
import re
import pytest
import torch
from torch import nn
from transformers import AutoModelForCausalLM
from peft import (
C3AConfig,
IA3Config,
LoKrConfig,
LoraConfig,
PeftModel,
PrefixTuningConfig,
convert_to_lora,
get_peft_model,
get_peft_model_state_dict,
save_as_lora,
set_peft_model_state_dict,
)
from peft.utils import infer_device
from .testing_utils import hub_online_once
class TestLoraConversion:
"""Test functionality to convert non-LoRA adapters to LoRA adapters
This is mainly testing with LoKr, as it would be wasteful to test with all compatible PEFT methods in detail. For a
broad suite of tests across PEFT methods, check test_decoder_models.py::test_lora_conversion.
We mainly use convert_to_lora and not save_as_lora here, as is just a thin wrapper around convert_to_lora and
involves disk IO, which we want to avoid as much as possible. For most users, save_as_lora will most likely be the
main entry point,
For comparing outputs, it's not ideal to check the logits, as most of them are close to zero and we cannot use
torch.allclose, as a certain deviation is expected from conversion. A robust way would be to check the hidden
states after subtracting the base model's hidden states (since the contribution of the adapter is what we want to
compare).
"""
model_id = "peft-internal-testing/tiny-random-OPTForCausalLM"
torch_device = infer_device()
base_model = None
def get_base_model(self):
if self.base_model is None:
with hub_online_once(self.model_id):
self.base_model = AutoModelForCausalLM.from_pretrained(self.model_id).to(self.torch_device)
return copy.deepcopy(self.base_model)
@pytest.fixture
def lokr_model(self):
torch.manual_seed(0)
return get_peft_model(self.get_base_model(), LoKrConfig(init_weights=False))
@staticmethod
def get_mse(output1, output2):
return nn.functional.mse_loss(output1.hidden_states[-1], output2.hidden_states[-1]).item()
def test_no_peft_layer_raises(self):
# Model without any PEFT layer should raise
base_model = self.get_base_model()
msg = "Could not detect any layer that supports LoRA conversion"
with pytest.raises(TypeError, match=msg):
convert_to_lora(base_model, rank=8)
def test_prompt_learning_model_raises(self):
# Prefix Tuning does not support LoRA conversion
base_model = self.get_base_model()
config = PrefixTuningConfig(num_virtual_tokens=10, task_type="CAUSAL_LM")
prefix_model = get_peft_model(base_model, config).eval()
assert not prefix_model.supports_lora_conversion()
msg = "Could not detect any layer that supports LoRA conversion"
with pytest.raises(TypeError, match=msg):
convert_to_lora(prefix_model, rank=8)
def test_peft_model_but_no_support_raises(self):
# IA3 has BaseTunerLayers but does not support LoRA conversion
base_model = self.get_base_model()
ia3_model = get_peft_model(base_model, IA3Config()).eval()
assert not ia3_model.supports_lora_conversion()
msg = "Some module types on this model do not support LoRA conversion"
with pytest.raises(TypeError, match=msg):
convert_to_lora(ia3_model, rank=8)
def test_model_with_unsupported_layers_raises(self):
# conv layers do not support LoRA conversion (yet)
# note: change this test if we add support for conv layer conversion
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(16, 16, 3)
self.lin = nn.Linear(16, 16)
lokr_model = get_peft_model(MyModule(), LoKrConfig(target_modules=["conv", "lin"])).eval()
assert not lokr_model.supports_lora_conversion()
msg = "Some module types on this model do not support LoRA conversion"
with pytest.raises(TypeError, match=msg):
convert_to_lora(lokr_model, rank=8)
def test_targeted_modules_identical(self, lokr_model):
lora_config, lora_state_dict = convert_to_lora(lokr_model, rank=8)
lokr_state_dict = lokr_model.state_dict()
# LoRA should have an entry for each layer targeted by LoKr
# cut off parameter name and PEFT method specific part of the name to obtain module name
modules_lokr = {k.rsplit(".", 2)[0] for k in lokr_state_dict.keys() if ".lokr" in k}
modules_lora = {k.rsplit(".", 2)[0] for k in lora_state_dict.keys() if ".lora" in k}
assert modules_lokr == modules_lora
# creating a new LoRA model based on the returned config should give the same state dict keys
base_model = self.get_base_model()
new_lora_model = get_peft_model(base_model, lora_config).eval()
new_lora_state_dict = get_peft_model_state_dict(new_lora_model)
assert lora_state_dict.keys() == new_lora_state_dict.keys()
def test_targeted_modules_identical_target_modules_str(self):
base_model = self.get_base_model()
lokr_config = LoKrConfig(target_modules=r".*\.q_proj", r=16, init_weights=False)
lokr_model = get_peft_model(base_model, lokr_config).eval()
lora_config, lora_state_dict = convert_to_lora(lokr_model, rank=8)
lokr_state_dict = lokr_model.state_dict()
# LoRA should have an entry for each layer targeted by LoKr
# cut off parameter name and PEFT method specific part of the name to obtain module name
modules_lokr = {k.rsplit(".", 2)[0] for k in lokr_state_dict.keys() if ".lokr" in k}
modules_lora = {k.rsplit(".", 2)[0] for k in lora_state_dict.keys() if ".lora" in k}
assert modules_lokr == modules_lora
# creating a new LoRA model based on the returned config should give the same state dict keys
base_model = self.get_base_model()
new_lora_model = get_peft_model(base_model, lora_config).eval()
new_lora_state_dict = get_peft_model_state_dict(new_lora_model)
assert lora_state_dict.keys() == new_lora_state_dict.keys()
def test_fixed_rank_lora_config(self, lokr_model):
# with a fixed rank, we expect target_modules to be set on the LoRA config but not rank_pattern, alpha_pattern
lora_config, _ = convert_to_lora(lokr_model, rank=8)
assert isinstance(lora_config, LoraConfig)
assert lora_config.r == 8
assert lora_config.lora_alpha == 8
assert lora_config.target_modules
assert not lora_config.rank_pattern
assert not lora_config.alpha_pattern
def test_dynamic_rank_lora_config(self, lokr_model):
# with a dynmaic rank, we expect rank_pattern and alpha_pattern to be set
lora_config, state_dict = convert_to_lora(lokr_model, rank=0.5)
assert lora_config.r == 1 # dummy value
assert lora_config.lora_alpha == 1 # dummy value
assert lora_config.rank_pattern
assert lora_config.alpha_pattern
# rank and alpha are always the same, i.e. scaling is 1
assert lora_config.rank_pattern == lora_config.alpha_pattern
# for each module, two LoRA weights
assert 2 * len(lora_config.rank_pattern) == len(state_dict)
def test_dynamic_rank_1_lora_config(self, lokr_model):
# with a dynmaic rank, we expect rank_pattern and alpha_pattern to be set
lora_config, state_dict = convert_to_lora(lokr_model, rank=1.0)
assert lora_config.r == 1 # dummy value
assert lora_config.lora_alpha == 1 # dummy value
assert lora_config.rank_pattern
assert lora_config.alpha_pattern
# rank and alpha are always the same, i.e. scaling is 1
assert lora_config.rank_pattern == lora_config.alpha_pattern
# for each module, two LoRA weights
assert 2 * len(lora_config.rank_pattern) == len(state_dict)
def test_threshold_wrong_value_raises(self, lokr_model):
# if a threshold is used, it must be between 0 and 1
msg = "If rank is a float, it is interpreted as a threshold. It must be between 0 and 1 but got 123.0"
with pytest.raises(ValueError, match=msg):
convert_to_lora(lokr_model, rank=123.0)
msg = "If rank is a float, it is interpreted as a threshold. It must be between 0 and 1 but got -0.5"
with pytest.raises(ValueError, match=msg):
convert_to_lora(lokr_model, rank=-0.5)
def test_rank_higher_than_weight_dim_raises(self, lokr_model):
# if the requested rank is higher than the weight dimension, we should raise
msg = re.escape("The chosen rank 123 is larger than the weight shape (16), please choose a lower rank")
with pytest.raises(ValueError, match=msg):
convert_to_lora(lokr_model, rank=123)
def test_fixed_rank_0_raises(self, lokr_model):
msg = "Passing a rank of 0 doesn't make sense"
with pytest.raises(ValueError, match=msg):
convert_to_lora(lokr_model, rank=0)
def test_converting_transformers_model_works(self, lokr_model, tmp_path):
# test that we can convert a transformers model that has loaded LoKr directly
assert lokr_model.supports_lora_conversion()
inputs = torch.arange(10).view(1, -1).to(self.torch_device)
with torch.inference_mode():
output_lokr = lokr_model(inputs, output_hidden_states=True)
lokr_model.save_pretrained(tmp_path)
# load directly with transformers
loaded_model = AutoModelForCausalLM.from_pretrained(tmp_path).to(self.torch_device)
with torch.inference_mode():
output_loaded = loaded_model(inputs, output_hidden_states=True)
# sanity check
atol, rtol = 1e-4, 1e-4
assert torch.allclose(output_lokr.logits, output_loaded.logits, atol=atol, rtol=rtol)
save_as_lora(tmp_path / "converted", lokr_model, rank=8)
lora_model = AutoModelForCausalLM.from_pretrained(tmp_path / "converted").to(self.torch_device)
# With from_pretrained, we don't get a load_result and thus cannot check for missing keys. As a proxy,
# instead check that no LoRA weight is all zeros (which would indicate a missing weight)
for name, param in lora_model.named_parameters():
if (".lora_A" in name) or (".lora_B" in name):
assert not torch.all(param == 0)
with torch.inference_mode():
output_converted = lora_model(inputs, output_hidden_states=True)
assert 0.0 < self.get_mse(output_converted, output_lokr) < 0.1
def test_converted_lora_approximates_original_adapter(self, lokr_model):
inputs = torch.arange(10).view(1, -1).to(self.torch_device)
with torch.inference_mode():
with lokr_model.disable_adapter():
output_base = lokr_model(inputs, output_hidden_states=True)
output_lokr = lokr_model(inputs, output_hidden_states=True)
# sanity check
atol, rtol = 1e-4, 1e-4
assert not torch.allclose(output_base.logits, output_lokr.logits, atol=atol, rtol=rtol)
##############
# fixed rank #
##############
lora_config, state_dict = convert_to_lora(lokr_model, rank=8)
base_model = self.get_base_model()
lora_model = get_peft_model(base_model, lora_config).eval()
# by default, the LoRA model should be an identity transform
with torch.inference_mode():
output_lora = lora_model(inputs, output_hidden_states=True)
assert torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol)
# load the converted LoRA weights
load_result = set_peft_model_state_dict(lora_model, state_dict)
assert not load_result.unexpected_keys
# sanity check the number of trainable parameters
num_train_params, total_params = lora_model.get_nb_trainable_parameters()
assert 100 < num_train_params < 0.1 * total_params
with torch.inference_mode():
output_converted = lora_model(inputs, output_hidden_states=True)
mse_lora = self.get_mse(output_lora, output_lokr)
mse_converted = self.get_mse(output_converted, output_lokr)
assert mse_lora > 0.5
assert 0.0 < mse_converted < 0.1
###############################
# this time with dynamic rank #
###############################
lora_config, state_dict = convert_to_lora(lokr_model, rank=0.9)
base_model = self.get_base_model()
lora_model = get_peft_model(base_model, lora_config).eval()
load_result = set_peft_model_state_dict(lora_model, state_dict)
assert not load_result.unexpected_keys
# sanity check the number of trainable parameters
num_train_params, total_params = lora_model.get_nb_trainable_parameters()
assert 100 < num_train_params < 0.1 * total_params
with torch.inference_mode():
output_converted = lora_model(inputs, output_hidden_states=True)
mse_converted = self.get_mse(output_converted, output_lokr)
assert 0.0 < mse_converted < 0.1
def test_with_tqdm_works(self, lokr_model, capsys):
# pass progressbar=True to use tqdm
convert_to_lora(lokr_model, rank=8, progressbar=True)
captured = capsys.readouterr()
assert "Converting to LoRA" in captured.err
def test_save_as_lora(self, lokr_model, tmp_path):
# whether using save_as_lora gives the same result as convert_to_lora
inputs = torch.arange(10).view(1, -1).to(self.torch_device)
atol, rtol = 1e-4, 1e-4
lora_config, state_dict = convert_to_lora(lokr_model, rank=8)
base_model = self.get_base_model()
lora_model = get_peft_model(base_model, lora_config).eval()
load_result = set_peft_model_state_dict(lora_model, state_dict)
assert not load_result.unexpected_keys
with torch.inference_mode():
output_before = lora_model(inputs).logits
# test that save_as_lora works as expected
save_as_lora(tmp_path, lokr_model, rank=8)
base_model = self.get_base_model()
loaded_model = PeftModel.from_pretrained(base_model, tmp_path).to(self.torch_device)
with torch.inference_mode():
output_after = loaded_model(inputs).logits
assert torch.allclose(output_before, output_after, atol=atol, rtol=rtol)
def test_model_without_peft_config(self, lokr_model):
# Conversion also works with models that don't have a PeftConfig on them. This is a bit of a convoluted case,
# but conversion doesn't strictly rely on an existing peft_config, so it should still work.
def unwrap(peft_model):
unwrapped = peft_model.get_base_model()
del unwrapped.peft_config
return unwrapped
inputs = torch.arange(10).view(1, -1).to(self.torch_device)
with torch.inference_mode():
output_lokr = lokr_model(inputs, output_hidden_states=True)
# remove the PeftModel wrapper and the peft_config attribute -- this should still work
unwrapped_lokr_model = unwrap(lokr_model)
lora_config, state_dict = convert_to_lora(unwrapped_lokr_model, rank=8)
base_model = self.get_base_model()
lora_model = get_peft_model(base_model, lora_config).eval()
unwrapped_lora_model = unwrap(lora_model)
# Note: On the unwrapped model, we cannot use set_peft_model_state_dict, as that requires a peft_config. Thus,
# we need to manually inject the adapter name into state_dict keys, which is done automatically when using
# set_peft_model_state_dict.
new_state_dict = {}
for k, v in state_dict.items():
new_k = k.replace(".lora_A.weight", ".lora_A.default.weight")
new_k = new_k.replace(".lora_B.weight", ".lora_B.default.weight")
new_state_dict[new_k] = v
load_result = unwrapped_lora_model.load_state_dict(new_state_dict, strict=False)
assert not load_result.unexpected_keys
with torch.inference_mode():
output_converted = lora_model(inputs, output_hidden_states=True)
mse = self.get_mse(output_converted, output_lokr)
assert 0.0 < mse < 0.1
def test_converted_lora_to_lora_works_and_warns(self):
# In general, there is no need to convert LoRA to LoRA, but it should still work. One possible use case would be
# to shrink the rank of an existing LoRA adapter. The resulting correlation in this test is surprisingly high,
# probably because the initial LoRA was not trained but initialized with random weights.
inputs = torch.arange(10).view(1, -1).to(self.torch_device)
base_model = self.get_base_model()
with torch.inference_mode():
output_base = base_model(inputs, output_hidden_states=True)
orig_lora_config = LoraConfig(r=16, init_lora_weights=False)
orig_lora_model = get_peft_model(base_model, orig_lora_config).eval()
with torch.inference_mode():
output_orig_lora = orig_lora_model(inputs, output_hidden_states=True)
# sanity check
atol, rtol = 1e-4, 1e-4
assert not torch.allclose(output_base.logits, output_orig_lora.logits)
# convert from rank 16 to rank 8
msg = "Converting a PEFT adapter to LoRA that is already a LoRA adapter"
with pytest.warns(UserWarning, match=msg):
# check that a warning was issued
lora_config, state_dict = convert_to_lora(orig_lora_model, rank=8)
base_model = self.get_base_model()
lora_model = get_peft_model(base_model, lora_config).eval()
# load the converted LoRA weights
load_result = set_peft_model_state_dict(lora_model, state_dict)
assert not load_result.unexpected_keys
with torch.inference_mode():
output_converted = lora_model(inputs, output_hidden_states=True)
mse_converted = self.get_mse(output_converted, output_orig_lora)
assert 0.0 < mse_converted < 0.1
def test_converted_lora_with_multiple_adapters(self, lokr_model):
# ensure that we can convert specific adapters when multiple are present
lokr_config = LoKrConfig(r=16, init_weights=False)
lokr_model.add_adapter("other", lokr_config)
inputs = torch.arange(10).view(1, -1).to(self.torch_device)
with torch.inference_mode():
output_lokr_default = lokr_model(inputs, output_hidden_states=True)
lokr_model.set_adapter("other")
output_lokr_other = lokr_model(inputs, output_hidden_states=True)
# sanity check
atol, rtol = 1e-4, 1e-4
assert not torch.allclose(output_lokr_default.logits, output_lokr_other.logits, atol=atol, rtol=rtol)
# convert the default adapter
lora_config_default, state_dict_default = convert_to_lora(lokr_model, rank=8)
base_model = self.get_base_model()
lora_model_default = get_peft_model(base_model, lora_config_default).eval()
# load the converted LoRA weights for the default adapter
load_result = set_peft_model_state_dict(lora_model_default, state_dict_default)
assert not load_result.unexpected_keys
with torch.inference_mode():
output_converted_default = lora_model_default(inputs, output_hidden_states=True)
# convert the other adapter
lora_config_other, state_dict_other = convert_to_lora(lokr_model, rank=8, adapter_name="other")
base_model = self.get_base_model()
lora_model_other = get_peft_model(base_model, lora_config_other).eval()
# load the converted LoRA weights for the other adapter
load_result = set_peft_model_state_dict(lora_model_other, state_dict_other)
assert not load_result.unexpected_keys
with torch.inference_mode():
output_converted_other = lora_model_other(inputs, output_hidden_states=True)
mse_default_default = self.get_mse(output_converted_default, output_lokr_default)
mse_other_other = self.get_mse(output_converted_other, output_lokr_other)
mse_default_other = self.get_mse(output_converted_default, output_lokr_other)
mse_other_default = self.get_mse(output_converted_other, output_lokr_default)
assert 0.0 < mse_default_default < 0.1
assert 0.0 < mse_other_other < 0.1
assert mse_default_other > 0.5
assert mse_other_default > 0.5
def test_convert_model_with_modules_to_save(self):
# If the original adapter defines modules_to_save, these need to be included in the LoRA adapter
model = self.get_base_model()
inputs = torch.arange(10).view(1, -1).to(self.torch_device)
with torch.inference_mode():
output_base = model(inputs, output_hidden_states=True)
# lokr is initialized as identity transform to ensure that modules_to_save is the thing that impacts the output
lokr_config = LoKrConfig(modules_to_save=["0.fc1"])
lokr_model = get_peft_model(model, lokr_config)
# ensure that the modules_to_save affects the output
lokr_model.base_model.model.model.decoder.layers[0].fc1.modules_to_save.default.weight.data.mul_(-10.0)
lokr_model.base_model.model.model.decoder.layers[0].fc1.modules_to_save.default.bias.data.mul_(-10.0)
with torch.inference_mode():
output_lokr = lokr_model(inputs, output_hidden_states=True)
# sanity check
atol, rtol = 1e-4, 1e-4
assert not torch.allclose(output_base.logits, output_lokr.logits, atol=atol, rtol=rtol)
lora_config, state_dict = convert_to_lora(lokr_model, rank=8)
assert lora_config.modules_to_save == lokr_config.modules_to_save
base_model = self.get_base_model()
lora_model = get_peft_model(base_model, lora_config).eval()
# load the converted LoRA weights
load_result = set_peft_model_state_dict(lora_model, state_dict)
assert not load_result.unexpected_keys
with torch.inference_mode():
output_converted = lora_model(inputs, output_hidden_states=True)
mse_converted = self.get_mse(output_converted, output_lokr)
# here we expect an actual loss of 0, since only the modules_to_save affect the result, and those are identical
assert mse_converted == 0.0
@pytest.mark.parametrize("bias", ["c3a_only", "all"])
def test_convert_model_with_trainable_bias_raises(self, bias):
# If the original adapter includes trainable bias terms, we raise. LoKr doesn't support this, so taking C3A
model = self.get_base_model()
inputs = torch.arange(10).view(1, -1).to(self.torch_device)
c3a_config = C3AConfig(block_size=4, bias=bias)
c3a_model = get_peft_model(model, c3a_config)
msg = "The adapter's config sets bias"
with pytest.raises(ValueError, match=msg):
convert_to_lora(c3a_model, rank=8)
@pytest.mark.skipif(platform.system() != "Linux", reason="Running test involving torch.compile only on Linux.")
def test_with_torch_compile(self, lokr_model):
# ensure that we can call lora conversion with compilation
lora_config_no_comp, state_dict_no_comp = convert_to_lora(lokr_model, rank=8)
lora_config_comp, state_dict_comp = convert_to_lora(
lokr_model, rank=8, compile_kwargs={"mode": "max-autotune-no-cudagraphs"}
)
assert lora_config_no_comp.to_dict() == lora_config_comp.to_dict()
assert state_dict_no_comp.keys() == state_dict_comp.keys()
for key, weight_no_comp in state_dict_no_comp.items():
weight_comp = state_dict_comp[key]
assert torch.allclose(weight_comp, weight_no_comp)
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
def test_convert_float16_dtype(self, dtype):
inputs = torch.arange(10).view(1, -1).to(self.torch_device)
torch.manual_seed(0)
base_model = self.get_base_model().to(dtype)
with torch.inference_mode():
output_base = base_model(inputs, output_hidden_states=True)
# load a LoKr model with 16 bit precision
lokr_model = get_peft_model(base_model, LoKrConfig(init_weights=False), autocast_adapter_dtype=False)
with torch.inference_mode():
output_lokr = lokr_model(inputs, output_hidden_states=True)
# sanity check
atol, rtol = 1e-4, 1e-4
assert not torch.allclose(output_base.logits, output_lokr.logits, atol=atol, rtol=rtol)
lora_config, state_dict = convert_to_lora(lokr_model, rank=8)
for weight in state_dict.values():
assert weight.dtype == dtype
base_model = self.get_base_model().to(dtype)
lora_model = get_peft_model(base_model, lora_config, autocast_adapter_dtype=False).eval()
# load the converted LoRA weights
load_result = set_peft_model_state_dict(lora_model, state_dict)
assert not load_result.unexpected_keys
with torch.inference_mode():
output_converted = lora_model(inputs, output_hidden_states=True)
mse_converted = self.get_mse(output_converted, output_lokr)
assert 0.0 < mse_converted < 0.1
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_lora_conversion.py",
"license": "Apache License 2.0",
"lines": 442,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:examples/cartridge_self_study/arxiv_synthesize.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
from synthesize import synthesize_self_study_jsonl
from transformers import AutoTokenizer
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="Qwen/Qwen2.5-0.5B-Instruct")
parser.add_argument(
"--corpus_path",
type=str,
default=str(Path(__file__).resolve().parent / "data/cartridges.tex"),
)
parser.add_argument("--out_jsonl", type=str, default="distill.jsonl")
parser.add_argument("--num_samples", type=int, default=256)
parser.add_argument("--seed_prompts", type=str, default="structuring,summarization,question,use_cases,creative")
parser.add_argument("--max_new_tokens", type=int, default=512)
parser.add_argument("--temperature", type=float, default=0.7)
parser.add_argument("--top_p", type=float, default=0.95)
parser.add_argument("--max_corpus_tokens", type=int, default=2048)
parser.add_argument(
"--use_vllm",
action="store_true",
help="Use vLLM for faster generation with automatic prefix caching.",
)
parser.add_argument("--seed", type=int, default=0, help="Seed for deterministic prompt-type shuffling.")
parser.add_argument(
"--tensor_parallel_size",
type=int,
default=1,
help="Tensor parallel size for vLLM (number of GPUs).",
)
args = parser.parse_args()
corpus_text = Path(args.corpus_path).read_text(encoding="utf-8")
tokenizer = AutoTokenizer.from_pretrained(args.model)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
if args.max_corpus_tokens is not None:
ids = tokenizer(
corpus_text,
add_special_tokens=False,
truncation=True,
max_length=args.max_corpus_tokens,
)["input_ids"]
corpus_text = tokenizer.decode(ids, skip_special_tokens=True)
if args.use_vllm:
from vllm import LLM
model = LLM(
model=args.model,
tensor_parallel_size=args.tensor_parallel_size,
enable_prefix_caching=True,
)
else:
import torch
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(args.model, dtype=torch.bfloat16, device_map="auto")
synthesize_self_study_jsonl(
output_path=Path(args.out_jsonl),
model=model,
tokenizer=tokenizer,
corpus_text=corpus_text,
num_samples=args.num_samples,
seed_prompt_types=[s.strip() for s in args.seed_prompts.split(",") if s.strip()],
max_new_tokens=args.max_new_tokens,
temperature=args.temperature,
top_p=args.top_p,
use_vllm=args.use_vllm,
seed=args.seed,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/peft",
"file_path": "examples/cartridge_self_study/arxiv_synthesize.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/cartridge_self_study/arxiv_train.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
import torch
from train_distill import DistillationCollator, DistillationTrainer, DistillJsonlDataset
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments
from peft import CartridgeConfig, get_peft_model
from peft.tuners.cartridge.utils import initialize_kv_prefix_from_text
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True, help="Model to use for both teacher and student")
parser.add_argument("--document", type=str, required=True, help="Path to text file for KV cache initialization")
parser.add_argument("--distill_jsonl", type=str, default="distill.jsonl")
parser.add_argument("--output_dir", type=str, default="cartridge_adapter")
parser.add_argument("--num_virtual_tokens", type=int, default=256)
parser.add_argument("--num_frozen_tokens", type=int, default=1)
parser.add_argument("--top_k", type=int, default=20)
parser.add_argument("--per_device_train_batch_size", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=1e-3)
parser.add_argument("--max_steps", type=int, default=1000)
parser.add_argument("--device", type=str, default="cuda", choices=["cpu", "mps", "cuda", "xpu"])
parser.add_argument(
"--max_init_length", type=int, default=2048, help="Max tokens for text initialization (truncate long docs)"
)
args = parser.parse_args()
if args.device == "mps" and not (hasattr(torch.backends, "mps") and torch.backends.mps.is_available()):
raise ValueError("Requested device 'mps' but MPS is not available.")
if args.device == "cuda" and not torch.cuda.is_available():
raise ValueError("Requested device 'cuda' but CUDA is not available.")
model_dtype = torch.float16 if args.device in {"cuda", "mps"} else None
device_map = args.device if args.device != "cpu" else None
tokenizer = AutoTokenizer.from_pretrained(args.model)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
base_model = AutoModelForCausalLM.from_pretrained(args.model, dtype=model_dtype, device_map=device_map)
model = get_peft_model(
base_model,
CartridgeConfig(
task_type="CAUSAL_LM",
num_virtual_tokens=args.num_virtual_tokens,
num_frozen_tokens=args.num_frozen_tokens,
),
)
print(f"Initializing cartridge from document: {args.document}", flush=True)
document_text = Path(args.document).read_text()
initialize_kv_prefix_from_text(
model,
tokenizer,
text=document_text,
use_chat_template=False,
max_length=args.max_init_length,
)
print(f"Cartridge initialized with {args.num_virtual_tokens} tokens from text", flush=True)
ds = DistillJsonlDataset(args.distill_jsonl)
collator = DistillationCollator(tokenizer)
train_args = TrainingArguments(
output_dir=args.output_dir,
per_device_train_batch_size=args.per_device_train_batch_size,
learning_rate=args.learning_rate,
max_steps=args.max_steps,
logging_steps=10,
save_steps=100,
report_to=[],
remove_unused_columns=False,
use_cpu=args.device == "cpu",
dataloader_pin_memory=False,
)
trainer = DistillationTrainer(
model=model,
top_k=args.top_k,
args=train_args,
train_dataset=ds,
data_collator=collator,
)
trainer.train()
model.save_pretrained(args.output_dir)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/peft",
"file_path": "examples/cartridge_self_study/arxiv_train.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/cartridge_self_study/synthesize.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import random
from pathlib import Path
from transformers import AutoTokenizer
SEED_PROMPTS = {
"structuring": (
"Generate a single instruction asking an LLM to structure information from the document above. "
"Be specific about what section or topic to structure. "
"Output only the instruction, nothing else."
),
"summarization": (
"Generate a single instruction asking an LLM to summarize part of the document above. "
"Be explicit about which section to summarize. "
"Output only the instruction, nothing else."
),
"question": (
"Generate a question that tests knowledge of the document above. "
"Include specific details (names, dates, numbers) so the question is unambiguous. "
"Output only the question, nothing else."
),
"use_cases": (
"Think of a practical real-world task someone could accomplish using knowledge from the document. "
"Generate a single question or instruction reflecting that use case. "
"Output only the question/instruction, nothing else."
),
"creative": (
"Generate a creative question inspired by the document above. Output only the question, nothing else."
),
}
# Chat template kwargs to disable thinking mode for models like Qwen3
CHAT_TEMPLATE_KWARGS = {"enable_thinking": False}
MAX_NEW_TOKENS_FOR_QUESTIONS = 256
def synthesize_self_study_jsonl(
*,
output_path: Path,
model,
tokenizer,
corpus_text: str,
num_samples: int,
seed_prompt_types: list[str],
max_new_tokens: int,
temperature: float,
top_p: float,
use_vllm: bool = False,
seed: int = 0,
):
"""
Synthesize self-study data for cartridge training.
Uses the full corpus as context for all samples, varying only the seed prompt.
With vLLM's prefix caching, the document KV cache is computed once and reused.
If use_vllm=True, `model` should be a vllm.LLM instance.
Otherwise, `model` should be a HuggingFace model.
"""
output_path.parent.mkdir(parents=True, exist_ok=True)
if output_path.exists():
output_path.unlink()
for t in seed_prompt_types:
if t not in SEED_PROMPTS:
raise ValueError(f"Unknown seed prompt type '{t}', expected one of: {sorted(SEED_PROMPTS)}")
# Pre-generate prompt indices (cycling through seed prompt types).
prompt_indices = [i % len(seed_prompt_types) for i in range(num_samples)]
rng = random.Random(seed)
rng.shuffle(prompt_indices)
if use_vllm:
_synthesize_vllm(
output_path=output_path,
model=model,
tokenizer=tokenizer,
corpus_text=corpus_text,
seed_prompt_types=seed_prompt_types,
prompt_indices=prompt_indices,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
)
else:
_synthesize_hf(
output_path=output_path,
model=model,
tokenizer=tokenizer,
corpus_text=corpus_text,
seed_prompt_types=seed_prompt_types,
prompt_indices=prompt_indices,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
)
def _synthesize_vllm(
*,
output_path: Path,
model,
tokenizer,
corpus_text: str,
seed_prompt_types: list[str],
prompt_indices: list[int],
max_new_tokens: int,
temperature: float,
top_p: float,
):
"""Synthesize using vLLM with prefix caching (two-stage like original cartridges).
Stage 1: Generate questions using meta-prompts (all share document prefix)
Stage 2: Generate answers to those questions (all share document prefix)
"""
from vllm import SamplingParams
# Stage 1: Generate questions
question_messages = [
[
{"role": "system", "content": corpus_text},
{"role": "user", "content": SEED_PROMPTS[seed_prompt_types[prompt_idx]]},
]
for prompt_idx in prompt_indices
]
question_params = SamplingParams(
max_tokens=MAX_NEW_TOKENS_FOR_QUESTIONS,
temperature=temperature if temperature > 0 else 0.0,
top_p=top_p if temperature > 0 else 1.0,
)
print("Stage 1: Generating questions...")
question_outputs = model.chat(
question_messages,
question_params,
use_tqdm=True,
chat_template_kwargs=CHAT_TEMPLATE_KWARGS,
)
questions = [out.outputs[0].text.strip() for out in question_outputs]
# Stage 2: Generate answers
answer_messages = [
[
{"role": "system", "content": corpus_text},
{"role": "user", "content": question},
]
for question in questions
]
answer_params = SamplingParams(
max_tokens=max_new_tokens,
temperature=0.0,
top_p=1.0,
)
print("Stage 2: Generating answers...")
answer_outputs = model.chat(
answer_messages,
answer_params,
use_tqdm=True,
chat_template_kwargs=CHAT_TEMPLATE_KWARGS,
)
# Build training records
for i, (question, answer_out) in enumerate(zip(questions, answer_outputs)):
# Get the answer token IDs directly from vLLM output (avoids decode/re-encode mismatch)
answer_ids = list(answer_out.outputs[0].token_ids)
teacher_prompt_ids = tokenizer.apply_chat_template(
[{"role": "system", "content": corpus_text}, {"role": "user", "content": question}],
tokenize=True,
add_generation_prompt=True,
**CHAT_TEMPLATE_KWARGS,
)
student_prompt_ids = tokenizer.apply_chat_template(
[{"role": "user", "content": question}],
tokenize=True,
add_generation_prompt=True,
**CHAT_TEMPLATE_KWARGS,
)
record = {
"teacher_input_ids": teacher_prompt_ids + answer_ids,
"student_input_ids": student_prompt_ids + answer_ids,
"ctx_len": len(teacher_prompt_ids) - len(student_prompt_ids),
}
with output_path.open("a", encoding="utf-8") as f:
f.write(json.dumps(record) + "\n")
def _synthesize_hf(
*,
output_path: Path,
model,
tokenizer,
corpus_text: str,
seed_prompt_types: list[str],
prompt_indices: list[int],
max_new_tokens: int,
temperature: float,
top_p: float,
):
"""Synthesize using HuggingFace transformers (two-stage, one sample at a time)."""
import torch
from tqdm import tqdm
device = getattr(model, "device", torch.device("cpu"))
model.eval()
for prompt_idx in tqdm(prompt_indices, desc="Generating samples"):
meta_prompt = SEED_PROMPTS[seed_prompt_types[prompt_idx]]
# Stage 1: Generate question
question_input = tokenizer.apply_chat_template(
[{"role": "system", "content": corpus_text}, {"role": "user", "content": meta_prompt}],
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
return_dict=False,
**CHAT_TEMPLATE_KWARGS,
).to(device)
gen_kwargs = {
"max_new_tokens": MAX_NEW_TOKENS_FOR_QUESTIONS,
"do_sample": temperature > 0,
"pad_token_id": tokenizer.pad_token_id or tokenizer.eos_token_id,
}
if temperature > 0:
gen_kwargs["temperature"] = max(temperature, 1e-5)
gen_kwargs["top_p"] = top_p
with torch.no_grad():
question_out = model.generate(question_input, **gen_kwargs)
question_tokens = question_out[0, question_input.shape[1] :].tolist()
question = tokenizer.decode(question_tokens, skip_special_tokens=True).strip()
# Stage 2: Generate answer
teacher_input = tokenizer.apply_chat_template(
[{"role": "system", "content": corpus_text}, {"role": "user", "content": question}],
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
return_dict=False,
**CHAT_TEMPLATE_KWARGS,
).to(device)
student_input = tokenizer.apply_chat_template(
[{"role": "user", "content": question}],
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
return_dict=False,
**CHAT_TEMPLATE_KWARGS,
).to(device)
with torch.no_grad():
answer_out = model.generate(
teacher_input,
max_new_tokens=max_new_tokens,
do_sample=False,
pad_token_id=tokenizer.pad_token_id or tokenizer.eos_token_id,
)
answer_tokens = answer_out[0, teacher_input.shape[1] :].tolist()
record = {
"teacher_input_ids": answer_out[0].tolist(),
"student_input_ids": student_input[0].tolist() + answer_tokens,
"ctx_len": int(teacher_input.shape[1]) - int(student_input.shape[1]),
}
with output_path.open("a", encoding="utf-8") as f:
f.write(json.dumps(record) + "\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="Qwen/Qwen2.5-0.5B-Instruct")
parser.add_argument("--corpus_path", type=str, required=True)
parser.add_argument("--out_jsonl", type=str, required=True)
parser.add_argument("--num_samples", type=int, default=1024)
parser.add_argument("--seed_prompts", type=str, default="structuring,summarization,question,use_cases,creative")
parser.add_argument("--max_new_tokens", type=int, default=512)
parser.add_argument("--temperature", type=float, default=0.7)
parser.add_argument("--top_p", type=float, default=0.95)
parser.add_argument(
"--max_corpus_tokens",
type=int,
default=None,
help="Optional cap on the number of tokens used from the corpus.",
)
parser.add_argument(
"--use_vllm",
action="store_true",
help="Use vLLM for faster generation with automatic prefix caching.",
)
parser.add_argument("--seed", type=int, default=0, help="Seed for deterministic prompt-type shuffling.")
parser.add_argument(
"--tensor_parallel_size",
type=int,
default=1,
help="Tensor parallel size for vLLM (number of GPUs).",
)
args = parser.parse_args()
corpus_text = Path(args.corpus_path).read_text(encoding="utf-8")
tokenizer = AutoTokenizer.from_pretrained(args.model)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
if args.max_corpus_tokens is not None:
ids = tokenizer(
corpus_text,
add_special_tokens=False,
truncation=True,
max_length=args.max_corpus_tokens,
)["input_ids"]
corpus_text = tokenizer.decode(ids, skip_special_tokens=True)
if args.use_vllm:
from vllm import LLM
model = LLM(
model=args.model,
tensor_parallel_size=args.tensor_parallel_size,
enable_prefix_caching=True,
)
else:
import torch
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(args.model, dtype=torch.bfloat16, device_map="auto")
synthesize_self_study_jsonl(
output_path=Path(args.out_jsonl),
model=model,
tokenizer=tokenizer,
corpus_text=corpus_text,
num_samples=args.num_samples,
seed_prompt_types=[s.strip() for s in args.seed_prompts.split(",") if s.strip()],
max_new_tokens=args.max_new_tokens,
temperature=args.temperature,
top_p=args.top_p,
use_vllm=args.use_vllm,
seed=args.seed,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/peft",
"file_path": "examples/cartridge_self_study/synthesize.py",
"license": "Apache License 2.0",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/cartridge_self_study/train_distill.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from pathlib import Path
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from peft import CartridgeConfig, get_peft_model
from peft.tuners.cartridge.utils import initialize_kv_prefix_from_text
class DistillJsonlDataset(Dataset):
def __init__(self, path: str | Path):
self.rows = []
with Path(path).open("r", encoding="utf-8") as f:
for line in f:
if line.strip():
self.rows.append(json.loads(line))
def __len__(self) -> int:
return len(self.rows)
def __getitem__(self, idx: int):
r = self.rows[idx]
return {
"teacher_input_ids": r["teacher_input_ids"],
"student_input_ids": r["student_input_ids"],
"ctx_len": r["ctx_len"],
}
class DistillationCollator:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, features):
teacher_ids = [{"input_ids": f["teacher_input_ids"]} for f in features]
student_ids = [{"input_ids": f["student_input_ids"]} for f in features]
teacher_batch = self.tokenizer.pad(teacher_ids, return_tensors="pt")
student_batch = self.tokenizer.pad(student_ids, return_tensors="pt")
ctx_len = torch.tensor([int(f["ctx_len"]) for f in features], dtype=torch.long)
return {
"teacher_input_ids": teacher_batch["input_ids"],
"teacher_attention_mask": teacher_batch["attention_mask"],
"student_input_ids": student_batch["input_ids"],
"student_attention_mask": student_batch["attention_mask"],
"ctx_len": ctx_len,
}
class DistillationTrainer(Trainer):
def __init__(self, *args, top_k: int = 20, teacher_temperature: float = 1.0, **kwargs):
super().__init__(*args, **kwargs)
self.top_k = int(top_k)
self.teacher_temperature = float(teacher_temperature)
def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
teacher_input_ids = inputs["teacher_input_ids"].to(model.device)
teacher_attention_mask = inputs["teacher_attention_mask"].to(model.device)
student_input_ids = inputs["student_input_ids"].to(model.device)
student_attention_mask = inputs["student_attention_mask"].to(model.device)
ctx_len = inputs["ctx_len"].to(model.device)
with torch.no_grad():
with model.disable_adapter():
teacher_out = model(
input_ids=teacher_input_ids,
attention_mask=teacher_attention_mask,
use_cache=False,
)
teacher_logits = teacher_out.logits / max(self.teacher_temperature, 1e-5)
student_out = model(
input_ids=student_input_ids,
attention_mask=student_attention_mask,
use_cache=False,
)
student_logits = student_out.logits
# Vectorized distillation loss (avoids Python `.item()` in per-example indexing).
# Align teacher logits to student positions via the per-example `ctx_len` offset.
student_logits = student_logits[:, :-1, :] # [B, Ls-1, V]
seq_len = student_logits.shape[1]
pos = torch.arange(seq_len, device=student_logits.device)[None, :] # [1, Ls-1]
student_len = student_attention_mask.sum(dim=1).to(torch.long) # [B]
valid = pos < (student_len - 1).clamp(min=0)[:, None] # [B, Ls-1]
teacher_pos = ctx_len[:, None] + pos # [B, Ls-1]
in_bounds = teacher_pos < teacher_logits.shape[1]
valid = valid & in_bounds
teacher_pos = teacher_pos.clamp(min=0, max=teacher_logits.shape[1] - 1)
teacher_slice = teacher_logits.gather(
dim=1, index=teacher_pos[:, :, None].expand(-1, -1, teacher_logits.shape[-1])
) # [B, Ls-1, V]
k = min(self.top_k, teacher_slice.shape[-1])
topk_ids = torch.topk(teacher_slice, k=k, dim=-1).indices # [B, Ls-1, K]
teacher_logprobs = F.log_softmax(teacher_slice, dim=-1).gather(-1, topk_ids)
student_logprobs = F.log_softmax(student_logits, dim=-1).gather(-1, topk_ids)
loss_by_pos = -(teacher_logprobs.exp() * student_logprobs).sum(dim=-1) # [B, Ls-1]
loss_by_pos = loss_by_pos.masked_fill(~valid, 0.0)
denom = valid.sum(dim=1).clamp(min=1)
per_example = loss_by_pos.sum(dim=1) / denom
if valid.any():
loss = per_example[valid.any(dim=1)].mean()
else:
loss = student_logits.new_zeros(())
return (loss, student_out) if return_outputs else loss
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True, help="Model to use for both teacher and student")
parser.add_argument("--distill_jsonl", type=str, required=True)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument("--document", type=str, required=True, help="Path to text file for KV cache initialization")
parser.add_argument("--num_virtual_tokens", type=int, default=256)
parser.add_argument("--num_frozen_tokens", type=int, default=1)
parser.add_argument("--top_k", type=int, default=20)
parser.add_argument("--per_device_train_batch_size", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=1e-3)
parser.add_argument("--max_steps", type=int, default=1000)
parser.add_argument("--device", type=str, default="cuda", choices=["cpu", "mps", "cuda", "xpu"])
parser.add_argument(
"--max_init_length", type=int, default=2048, help="Max tokens for text initialization (truncate long docs)"
)
args = parser.parse_args()
if args.device == "mps" and not (hasattr(torch.backends, "mps") and torch.backends.mps.is_available()):
raise ValueError("Requested device 'mps' but MPS is not available.")
if args.device == "xpu" and not torch.xpu.is_available():
raise ValueError("Requested device 'xpu' but XPU is not available.")
if args.device == "cuda" and not torch.cuda.is_available():
raise ValueError("Requested device 'cuda' but CUDA is not available.")
model_dtype = torch.float16 if args.device in {"cuda", "mps", "xpu"} else None
device_map = args.device if args.device != "cpu" else None
tokenizer = AutoTokenizer.from_pretrained(args.model)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
base_model = AutoModelForCausalLM.from_pretrained(args.model, dtype=model_dtype, device_map=device_map)
model = get_peft_model(
base_model,
CartridgeConfig(
task_type="CAUSAL_LM",
num_virtual_tokens=args.num_virtual_tokens,
num_frozen_tokens=args.num_frozen_tokens,
),
)
print(f"Initializing cartridge from document: {args.document}", flush=True)
document_text = Path(args.document).read_text()
initialize_kv_prefix_from_text(
model,
tokenizer,
text=document_text,
use_chat_template=False,
max_length=args.max_init_length,
)
print(f"Cartridge initialized with {args.num_virtual_tokens} tokens from text", flush=True)
ds = DistillJsonlDataset(args.distill_jsonl)
collator = DistillationCollator(tokenizer)
train_args = TrainingArguments(
output_dir=args.output_dir,
per_device_train_batch_size=args.per_device_train_batch_size,
learning_rate=args.learning_rate,
max_steps=args.max_steps,
logging_steps=10,
save_steps=100,
report_to=[],
remove_unused_columns=False,
use_cpu=args.device == "cpu",
dataloader_pin_memory=False,
)
trainer = DistillationTrainer(
model=model,
top_k=args.top_k,
args=train_args,
train_dataset=ds,
data_collator=collator,
)
trainer.train()
model.save_pretrained(args.output_dir)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/peft",
"file_path": "examples/cartridge_self_study/train_distill.py",
"license": "Apache License 2.0",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.