sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
huggingface/lerobot:tests/training/test_visual_validation.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Visual Feature Consistency Tests
This module tests the `validate_visual_features_consistency` function,
which ensures that visual features (camera observations) in a dataset/env
match the expectations defined in a policy configuration.
The purpose of this check is to prevent mismatches between what a policy expects
(e.g., `observation.images.camera1`, `camera2`, `camera3`) and what a dataset or
environment actually provides (e.g., `observation.images.top`, `side`, or fewer cameras).
"""
from pathlib import Path
import numpy as np
import pytest
from lerobot.configs.default import DatasetConfig
from lerobot.configs.policies import PreTrainedConfig
from lerobot.configs.train import TrainPipelineConfig
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.policies.factory import make_policy_config
from lerobot.scripts.lerobot_train import train
from lerobot.utils.utils import auto_select_torch_device
pytest.importorskip("transformers")
DUMMY_REPO_ID = "dummy/repo"
@pytest.fixture
def temp_dir(tmp_path):
return tmp_path
DUMMY_STATE_DIM = 6
DUMMY_ACTION_DIM = 6
IMAGE_SIZE = 8
DEVICE = auto_select_torch_device()
def make_dummy_dataset(camera_keys, tmp_path):
"""Creates a minimal dummy dataset for testing rename_mapping logic."""
features = {
"action": {"dtype": "float32", "shape": (DUMMY_ACTION_DIM,), "names": None},
"observation.state": {"dtype": "float32", "shape": (DUMMY_STATE_DIM,), "names": None},
}
for cam in camera_keys:
features[f"observation.images.{cam}"] = {
"dtype": "image",
"shape": (IMAGE_SIZE, IMAGE_SIZE, 3),
"names": ["height", "width", "channel"],
}
dataset = LeRobotDataset.create(
repo_id=DUMMY_REPO_ID,
fps=30,
features=features,
root=tmp_path / "_dataset",
)
root = tmp_path / "_dataset"
for ep_idx in range(2):
for _ in range(3):
frame = {
"action": np.random.randn(DUMMY_ACTION_DIM).astype(np.float32),
"observation.state": np.random.randn(DUMMY_STATE_DIM).astype(np.float32),
}
for cam in camera_keys:
frame[f"observation.images.{cam}"] = np.random.randint(
0, 255, size=(IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.uint8
)
frame["task"] = f"task_{ep_idx}"
dataset.add_frame(frame)
dataset.save_episode()
dataset.finalize()
return dataset, root
def custom_validate(train_config: TrainPipelineConfig, policy_path: str, empty_cameras: int):
train_config.policy = PreTrainedConfig.from_pretrained(policy_path)
train_config.policy.pretrained_path = Path(policy_path)
# override empty_cameras and push_to_hub for testing
train_config.policy.empty_cameras = empty_cameras
train_config.policy.push_to_hub = False
if train_config.use_policy_training_preset:
train_config.optimizer = train_config.policy.get_optimizer_preset()
train_config.scheduler = train_config.policy.get_scheduler_preset()
return train_config
@pytest.mark.skip(reason="Skipping this test as it results OOM")
@pytest.mark.parametrize(
"camera_keys, empty_cameras, rename_map, expect_success",
[
# case 1: dataset has fewer cameras than policy (3 instead of 4), but we specify empty_cameras=1 for smolvla, pi0, pi05
(["camera1", "camera2", "camera3"], 1, {}, True),
# case 2: dataset has 2 cameras with different names, rename_mapping provided
(
["top", "side"],
0,
{
"observation.images.top": "observation.images.camera1",
"observation.images.side": "observation.images.camera2",
},
True,
),
# case 3: dataset has 2 cameras, policy expects 3, names do not match, no empty_cameras
(["top", "side"], 0, {}, False),
# TODO: case 4: dataset has 2 cameras, policy expects 3, no rename_map, no empty_cameras, should raise for smolvla
# (["camera1", "camera2"], 0, {}, False),
],
)
def test_train_with_camera_mismatch(camera_keys, empty_cameras, rename_map, expect_success, tmp_path):
"""Tests that training works or fails depending on camera/feature alignment."""
_dataset, root = make_dummy_dataset(camera_keys, tmp_path)
pretrained_path = "lerobot/smolvla_base"
dataset_config = DatasetConfig(repo_id=DUMMY_REPO_ID, root=root)
policy_config = make_policy_config(
"smolvla",
optimizer_lr=0.01,
push_to_hub=False,
pretrained_path=pretrained_path,
device=DEVICE,
)
policy_config.empty_cameras = empty_cameras
train_config = TrainPipelineConfig(
dataset=dataset_config,
policy=policy_config,
rename_map=rename_map,
output_dir=tmp_path / "_output",
steps=1,
)
train_config = custom_validate(train_config, policy_path=pretrained_path, empty_cameras=empty_cameras)
# HACK: disable the internal CLI validation step for tests, we did it with custom_validate
train_config.validate = lambda: None
if expect_success:
train(train_config)
else:
with pytest.raises(ValueError):
train(train_config)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/training/test_visual_validation.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:examples/tutorial/act/act_training_example.py | """This script demonstrates how to train ACT Policy on a real-world dataset."""
from pathlib import Path
import torch
from lerobot.configs.types import FeatureType
from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.datasets.utils import dataset_to_policy_features
from lerobot.policies.act.configuration_act import ACTConfig
from lerobot.policies.act.modeling_act import ACTPolicy
from lerobot.policies.factory import make_pre_post_processors
def make_delta_timestamps(delta_indices: list[int] | None, fps: int) -> list[float]:
if delta_indices is None:
return [0]
return [i / fps for i in delta_indices]
def main():
output_directory = Path("outputs/robot_learning_tutorial/act")
output_directory.mkdir(parents=True, exist_ok=True)
# Select your device
device = torch.device("mps") # or "cuda" or "cpu"
dataset_id = "lerobot/svla_so101_pickplace"
# This specifies the inputs the model will be expecting and the outputs it will produce
dataset_metadata = LeRobotDatasetMetadata(dataset_id)
features = dataset_to_policy_features(dataset_metadata.features)
output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION}
input_features = {key: ft for key, ft in features.items() if key not in output_features}
cfg = ACTConfig(input_features=input_features, output_features=output_features)
policy = ACTPolicy(cfg)
preprocessor, postprocessor = make_pre_post_processors(cfg, dataset_stats=dataset_metadata.stats)
policy.train()
policy.to(device)
# To perform action chunking, ACT expects a given number of actions as targets
delta_timestamps = {
"action": make_delta_timestamps(cfg.action_delta_indices, dataset_metadata.fps),
}
# add image features if they are present
delta_timestamps |= {
k: make_delta_timestamps(cfg.observation_delta_indices, dataset_metadata.fps)
for k in cfg.image_features
}
# Instantiate the dataset
dataset = LeRobotDataset(dataset_id, delta_timestamps=delta_timestamps)
# Create the optimizer and dataloader for offline training
optimizer = cfg.get_optimizer_preset().build(policy.parameters())
batch_size = 32
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=device.type != "cpu",
drop_last=True,
)
# Number of training steps and logging frequency
training_steps = 1
log_freq = 1
# Run training loop
step = 0
done = False
while not done:
for batch in dataloader:
batch = preprocessor(batch)
loss, _ = policy.forward(batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if step % log_freq == 0:
print(f"step: {step} loss: {loss.item():.3f}")
step += 1
if step >= training_steps:
done = True
break
# Save the policy checkpoint, alongside the pre/post processors
policy.save_pretrained(output_directory)
preprocessor.save_pretrained(output_directory)
postprocessor.save_pretrained(output_directory)
# Save all assets to the Hub
policy.push_to_hub("<user>/robot_learning_tutorial_act")
preprocessor.push_to_hub("<user>/robot_learning_tutorial_act")
postprocessor.push_to_hub("<user>/robot_learning_tutorial_act")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/act/act_training_example.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/lerobot:examples/tutorial/act/act_using_example.py | import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata
from lerobot.policies.act.modeling_act import ACTPolicy
from lerobot.policies.factory import make_pre_post_processors
from lerobot.policies.utils import build_inference_frame, make_robot_action
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
MAX_EPISODES = 5
MAX_STEPS_PER_EPISODE = 20
def main():
device = torch.device("mps") # or "cuda" or "cpu"
model_id = "<user>/robot_learning_tutorial_act"
model = ACTPolicy.from_pretrained(model_id)
dataset_id = "lerobot/svla_so101_pickplace"
# This only downloads the metadata for the dataset, ~10s of MB even for large-scale datasets
dataset_metadata = LeRobotDatasetMetadata(dataset_id)
preprocess, postprocess = make_pre_post_processors(model.config, dataset_stats=dataset_metadata.stats)
# # find ports using lerobot-find-port
follower_port = ... # something like "/dev/tty.usbmodem58760431631"
# # the robot ids are used the load the right calibration files
follower_id = ... # something like "follower_so100"
# Robot and environment configuration
# Camera keys must match the name and resolutions of the ones used for training!
# You can check the camera keys expected by a model in the info.json card on the model card on the Hub
camera_config = {
"side": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30),
"up": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30),
}
robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_config)
robot = SO100Follower(robot_cfg)
robot.connect()
for _ in range(MAX_EPISODES):
for _ in range(MAX_STEPS_PER_EPISODE):
obs = robot.get_observation()
obs_frame = build_inference_frame(
observation=obs, ds_features=dataset_metadata.features, device=device
)
obs = preprocess(obs_frame)
action = model.select_action(obs)
action = postprocess(action)
action = make_robot_action(action, dataset_metadata.features)
robot.send_action(action)
print("Episode finished! Starting new episode...")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/act/act_using_example.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/lerobot:examples/tutorial/async-inf/policy_server.py | from lerobot.async_inference.configs import PolicyServerConfig
from lerobot.async_inference.policy_server import serve
def main():
host = ... # something like "127.0.0.1" if you're exposing to localhost
port = ... # something like 8080
config = PolicyServerConfig(
host=host,
port=port,
)
serve(config)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/async-inf/policy_server.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/lerobot:examples/tutorial/async-inf/robot_client.py | import threading
from lerobot.async_inference.configs import RobotClientConfig
from lerobot.async_inference.helpers import visualize_action_queue_size
from lerobot.async_inference.robot_client import RobotClient
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.robots.so_follower import SO100FollowerConfig
def main():
# these cameras must match the ones expected by the policy - find your cameras with lerobot-find-cameras
# check the config.json on the Hub for the policy you are using to see the expected camera specs
camera_cfg = {
"up": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30),
"side": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30),
}
# # find ports using lerobot-find-port
follower_port = ... # something like "/dev/tty.usbmodem58760431631"
# # the robot ids are used the load the right calibration files
follower_id = ... # something like "follower_so100"
robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_cfg)
server_address = ... # something like "127.0.0.1:8080" if using localhost
# 3. Create client configuration
client_cfg = RobotClientConfig(
robot=robot_cfg,
server_address=server_address,
policy_device="mps",
client_device="cpu",
policy_type="act",
pretrained_name_or_path="<user>/robot_learning_tutorial_act",
chunk_size_threshold=0.5, # g
actions_per_chunk=50, # make sure this is less than the max actions of the policy
)
# 4. Create and start client
client = RobotClient(client_cfg)
# 5. Provide a textual description of the task
task = ...
if client.start():
# Start action receiver thread
action_receiver_thread = threading.Thread(target=client.receive_actions, daemon=True)
action_receiver_thread.start()
try:
# Run the control loop
client.control_loop(task)
except KeyboardInterrupt:
client.stop()
action_receiver_thread.join()
# (Optionally) plot the action queue size
visualize_action_queue_size(client.action_queue_size)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/async-inf/robot_client.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/lerobot:examples/tutorial/diffusion/diffusion_training_example.py | """This script demonstrates how to train Diffusion Policy on a real-world dataset."""
from pathlib import Path
import torch
from lerobot.configs.types import FeatureType
from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.datasets.utils import dataset_to_policy_features
from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy
from lerobot.policies.factory import make_pre_post_processors
def make_delta_timestamps(delta_indices: list[int] | None, fps: int) -> list[float]:
if delta_indices is None:
return [0]
return [i / fps for i in delta_indices]
def main():
output_directory = Path("outputs/robot_learning_tutorial/diffusion")
output_directory.mkdir(parents=True, exist_ok=True)
# Select your device
device = torch.device("mps") # or "cuda" or "cpu"
dataset_id = "lerobot/svla_so101_pickplace"
# This specifies the inputs the model will be expecting and the outputs it will produce
dataset_metadata = LeRobotDatasetMetadata(dataset_id)
features = dataset_to_policy_features(dataset_metadata.features)
output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION}
input_features = {key: ft for key, ft in features.items() if key not in output_features}
cfg = DiffusionConfig(input_features=input_features, output_features=output_features)
policy = DiffusionPolicy(cfg)
preprocessor, postprocessor = make_pre_post_processors(cfg, dataset_stats=dataset_metadata.stats)
policy.train()
policy.to(device)
# To perform action chunking, ACT expects a given number of actions as targets
delta_timestamps = {
"observation.state": make_delta_timestamps(cfg.observation_delta_indices, dataset_metadata.fps),
"action": make_delta_timestamps(cfg.action_delta_indices, dataset_metadata.fps),
}
# add image features if they are present
delta_timestamps |= {
k: make_delta_timestamps(cfg.observation_delta_indices, dataset_metadata.fps)
for k in cfg.image_features
}
# Instantiate the dataset
dataset = LeRobotDataset(dataset_id, delta_timestamps=delta_timestamps)
# Create the optimizer and dataloader for offline training
optimizer = cfg.get_optimizer_preset().build(policy.parameters())
batch_size = 32
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=device.type != "cpu",
drop_last=True,
)
# Number of training steps and logging frequency
training_steps = 1
log_freq = 1
# Run training loop
step = 0
done = False
while not done:
for batch in dataloader:
batch = preprocessor(batch)
loss, _ = policy.forward(batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if step % log_freq == 0:
print(f"step: {step} loss: {loss.item():.3f}")
step += 1
if step >= training_steps:
done = True
break
# Save the policy checkpoint, alongside the pre/post processors
policy.save_pretrained(output_directory)
preprocessor.save_pretrained(output_directory)
postprocessor.save_pretrained(output_directory)
# Save all assets to the Hub
policy.push_to_hub("<user>/robot_learning_tutorial_diffusion")
preprocessor.push_to_hub("<user>/robot_learning_tutorial_diffusion")
postprocessor.push_to_hub("<user>/robot_learning_tutorial_diffusion")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/diffusion/diffusion_training_example.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/lerobot:examples/tutorial/diffusion/diffusion_using_example.py | import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata
from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy
from lerobot.policies.factory import make_pre_post_processors
from lerobot.policies.utils import build_inference_frame, make_robot_action
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
MAX_EPISODES = 5
MAX_STEPS_PER_EPISODE = 20
def main():
device = torch.device("mps") # or "cuda" or "cpu"
model_id = "<user>/robot_learning_tutorial_diffusion"
model = DiffusionPolicy.from_pretrained(model_id)
dataset_id = "lerobot/svla_so101_pickplace"
# This only downloads the metadata for the dataset, ~10s of MB even for large-scale datasets
dataset_metadata = LeRobotDatasetMetadata(dataset_id)
preprocess, postprocess = make_pre_post_processors(
model.config, model_id, dataset_stats=dataset_metadata.stats
)
# # find ports using lerobot-find-port
follower_port = ... # something like "/dev/tty.usbmodem58760431631"
# # the robot ids are used the load the right calibration files
follower_id = ... # something like "follower_so100"
# Robot and environment configuration
# Camera keys must match the name and resolutions of the ones used for training!
# You can check the camera keys expected by a model in the info.json card on the model card on the Hub
camera_config = {
"side": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30),
"up": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30),
}
robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_config)
robot = SO100Follower(robot_cfg)
robot.connect()
for _ in range(MAX_EPISODES):
for _ in range(MAX_STEPS_PER_EPISODE):
obs = robot.get_observation()
obs_frame = build_inference_frame(
observation=obs, ds_features=dataset_metadata.features, device=device
)
obs = preprocess(obs_frame)
action = model.select_action(obs)
action = postprocess(action)
action = make_robot_action(action, dataset_metadata.features)
robot.send_action(action)
print("Episode finished! Starting new episode...")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/diffusion/diffusion_using_example.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/lerobot:examples/tutorial/pi0/using_pi0_example.py | import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.datasets.utils import hw_to_dataset_features
from lerobot.policies.factory import make_pre_post_processors
from lerobot.policies.pi0.modeling_pi0 import PI0Policy
from lerobot.policies.utils import build_inference_frame, make_robot_action
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
MAX_EPISODES = 5
MAX_STEPS_PER_EPISODE = 20
def main():
device = torch.device("mps") # or "cuda" or "cpu"
model_id = "lerobot/pi0_base"
model = PI0Policy.from_pretrained(model_id)
preprocess, postprocess = make_pre_post_processors(
model.config,
model_id,
# This overrides allows to run on MPS, otherwise defaults to CUDA (if available)
preprocessor_overrides={"device_processor": {"device": str(device)}},
)
# find ports using lerobot-find-port
follower_port = ... # something like "/dev/tty.usbmodem58760431631"
# the robot ids are used the load the right calibration files
follower_id = ... # something like "follower_so100"
# Robot and environment configuration
# Camera keys must match the name and resolutions of the ones used for training!
# You can check the camera keys expected by a model in the info.json card on the model card on the Hub
camera_config = {
"base_0_rgb": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30),
"left_wrist_0_rgb": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30),
"right_wrist_0_rgb": OpenCVCameraConfig(index_or_path=2, width=640, height=480, fps=30),
}
robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_config)
robot = SO100Follower(robot_cfg)
robot.connect()
task = "" # something like "pick the red block"
robot_type = "" # something like "so100_follower" for multi-embodiment datasets
# This is used to match the raw observation keys to the keys expected by the policy
action_features = hw_to_dataset_features(robot.action_features, "action")
obs_features = hw_to_dataset_features(robot.observation_features, "observation")
dataset_features = {**action_features, **obs_features}
for _ in range(MAX_EPISODES):
for _ in range(MAX_STEPS_PER_EPISODE):
obs = robot.get_observation()
obs_frame = build_inference_frame(
observation=obs, ds_features=dataset_features, device=device, task=task, robot_type=robot_type
)
obs = preprocess(obs_frame)
action = model.select_action(obs)
action = postprocess(action)
action = make_robot_action(action, dataset_features)
robot.send_action(action)
print("Episode finished! Starting new episode...")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/pi0/using_pi0_example.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/lerobot:examples/tutorial/rl/hilserl_example.py | import multiprocessing as mp
import signal
from pathlib import Path
from queue import Empty, Full
import torch
import torch.optim as optim
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.utils import hw_to_dataset_features
from lerobot.envs.configs import HILSerlProcessorConfig, HILSerlRobotEnvConfig
from lerobot.policies.sac.configuration_sac import SACConfig
from lerobot.policies.sac.modeling_sac import SACPolicy
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
from lerobot.rl.buffer import ReplayBuffer
from lerobot.rl.gym_manipulator import make_robot_env
from lerobot.robots.so_follower import SO100FollowerConfig
from lerobot.teleoperators.so_leader import SO100LeaderConfig
from lerobot.teleoperators.utils import TeleopEvents
LOG_EVERY = 10
SEND_EVERY = 10
MAX_EPISODES = 5
MAX_STEPS_PER_EPISODE = 20
def run_learner(
transitions_queue: mp.Queue,
parameters_queue: mp.Queue,
shutdown_event: mp.Event,
policy_learner: SACPolicy,
online_buffer: ReplayBuffer,
offline_buffer: ReplayBuffer,
lr: float = 3e-4,
batch_size: int = 32,
device: torch.device = "mps",
):
"""The learner process - trains SAC policy on transitions streamed from the actor, updating parameters
for the actor to adopt."""
policy_learner.train()
policy_learner.to(device)
# Create Adam optimizer from scratch - simple and clean
optimizer = optim.Adam(policy_learner.parameters(), lr=lr)
print(f"[LEARNER] Online buffer capacity: {online_buffer.capacity}")
print(f"[LEARNER] Offline buffer capacity: {offline_buffer.capacity}")
training_step = 0
while not shutdown_event.is_set():
# retrieve incoming transitions from the actor process
try:
transitions = transitions_queue.get(timeout=0.1)
for transition in transitions:
# HIL-SERL: Add ALL transitions to online buffer
online_buffer.add(**transition)
# HIL-SERL: Add ONLY human intervention transitions to offline buffer
is_intervention = transition.get("complementary_info", {}).get("is_intervention", False)
if is_intervention:
offline_buffer.add(**transition)
print(
f"[LEARNER] Human intervention detected! Added to offline buffer (now {len(offline_buffer)} transitions)"
)
except Empty:
pass # No transitions available, continue
# Train if we have enough data
if len(online_buffer) >= policy_learner.config.online_step_before_learning:
# Sample from online buffer (autonomous + human data)
online_batch = online_buffer.sample(batch_size // 2)
# Sample from offline buffer (human demonstrations only, either precollected or at runtime)
offline_batch = offline_buffer.sample(batch_size // 2)
# Combine batches - this is the key HIL-SERL mechanism!
batch = {}
for key in online_batch:
if key in offline_batch:
batch[key] = torch.cat([online_batch[key], offline_batch[key]], dim=0)
else:
batch[key] = online_batch[key]
loss, _ = policy_learner.forward(batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
training_step += 1
if training_step % LOG_EVERY == 0:
print(
f"[LEARNER] Training step {training_step}, Loss: {loss.item():.4f}, "
f"Buffers: Online={len(online_buffer)}, Offline={len(offline_buffer)}"
)
# Send updated parameters to actor every 10 training steps
if training_step % SEND_EVERY == 0:
try:
state_dict = {k: v.cpu() for k, v in policy_learner.state_dict().items()}
parameters_queue.put_nowait(state_dict)
print("[LEARNER] Sent updated parameters to actor")
except Full:
# Missing write due to queue not being consumed (should happen rarely)
pass
print("[LEARNER] Learner process finished")
def run_actor(
transitions_queue: mp.Queue,
parameters_queue: mp.Queue,
shutdown_event: mp.Event,
policy_actor: SACPolicy,
reward_classifier: Classifier,
env_cfg: HILSerlRobotEnvConfig,
device: torch.device = "mps",
output_directory: Path | None = None,
):
"""The actor process - interacts with environment and collects data.
The policy is frozen and only the parameters are updated, popping the most recent ones from a queue."""
policy_actor.eval()
policy_actor.to(device)
reward_classifier.eval()
reward_classifier.to(device)
# Create robot environment inside the actor process
env, teleop_device = make_robot_env(env_cfg)
try:
for episode in range(MAX_EPISODES):
if shutdown_event.is_set():
break
obs, _info = env.reset()
episode_reward = 0.0
step = 0
episode_transitions = []
print(f"[ACTOR] Starting episode {episode + 1}")
while step < MAX_STEPS_PER_EPISODE and not shutdown_event.is_set():
try:
new_params = parameters_queue.get_nowait()
policy_actor.load_state_dict(new_params)
print("[ACTOR] Updated policy parameters from learner")
except Empty: # No new updated parameters available from learner, waiting
pass
# Get action from policy
policy_obs = make_policy_obs(obs, device=device)
action_tensor = policy_actor.select_action(policy_obs) # predicts a single action
action = action_tensor.squeeze(0).cpu().numpy()
# Step environment
next_obs, _env_reward, terminated, truncated, _info = env.step(action)
done = terminated or truncated
# Predict reward
policy_next_obs = make_policy_obs(next_obs, device=device)
reward = reward_classifier.predict_reward(policy_next_obs)
if reward >= 1.0 and not done: # success detected! halt episode
terminated = True
done = True
# In HIL-SERL, human interventions come from the teleop device
is_intervention = False
if hasattr(teleop_device, "get_teleop_events"):
# Real intervention detection from teleop device
teleop_events = teleop_device.get_teleop_events()
is_intervention = teleop_events.get(TeleopEvents.IS_INTERVENTION, False)
# Store transition with intervention metadata
transition = {
"state": policy_obs,
"action": action,
"reward": float(reward) if hasattr(reward, "item") else reward,
"next_state": policy_next_obs,
"done": done,
"truncated": truncated,
"complementary_info": {
"is_intervention": is_intervention,
},
}
episode_transitions.append(transition)
episode_reward += reward
step += 1
obs = next_obs
if done:
break
# Send episode transitions to learner
transitions_queue.put_nowait(episode_transitions)
except KeyboardInterrupt:
print("[ACTOR] Interrupted by user")
finally:
# Clean up
if hasattr(env, "robot") and env.robot.is_connected:
env.robot.disconnect()
if teleop_device and hasattr(teleop_device, "disconnect"):
teleop_device.disconnect()
if output_directory is not None:
policy_actor.save_pretrained(output_directory)
print(f"[ACTOR] Latest actor policy saved at: {output_directory}")
print("[ACTOR] Actor process finished")
def make_policy_obs(obs, device: torch.device = "cpu"):
return {
"observation.state": torch.from_numpy(obs["agent_pos"]).float().unsqueeze(0).to(device),
**{
f"observation.image.{k}": torch.from_numpy(obs["pixels"][k]).float().unsqueeze(0).to(device)
for k in obs["pixels"]
},
}
def main():
"""Main function - coordinates actor and learner processes."""
device = "mps" # or "cuda" or "cpu"
output_directory = Path("outputs/robot_learning_tutorial/hil_serl")
output_directory.mkdir(parents=True, exist_ok=True)
# find ports using lerobot-find-port
follower_port = ...
leader_port = ...
# the robot ids are used the load the right calibration files
follower_id = ...
leader_id = ...
# A pretrained model (to be used in-distribution!)
reward_classifier_id = "<user>/reward_classifier_hil_serl_example"
reward_classifier = Classifier.from_pretrained(reward_classifier_id)
reward_classifier.to(device)
reward_classifier.eval()
# Robot and environment configuration
robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id)
teleop_cfg = SO100LeaderConfig(port=leader_port, id=leader_id)
processor_cfg = HILSerlProcessorConfig(control_mode="leader")
env_cfg = HILSerlRobotEnvConfig(robot=robot_cfg, teleop=teleop_cfg, processor=processor_cfg)
# Create robot environment
env, teleop_device = make_robot_env(env_cfg)
obs_features = hw_to_dataset_features(env.robot.observation_features, "observation")
action_features = hw_to_dataset_features(env.robot.action_features, "action")
# Create SAC policy for action selection
policy_cfg = SACConfig(
device=device,
input_features=obs_features,
output_features=action_features,
)
policy_actor = SACPolicy(policy_cfg)
policy_learner = SACPolicy(policy_cfg)
demonstrations_repo_id = "lerobot/example_hil_serl_dataset"
offline_dataset = LeRobotDataset(repo_id=demonstrations_repo_id)
# Online buffer: initialized from scratch
online_replay_buffer = ReplayBuffer(device=device, state_keys=list(obs_features.keys()))
# Offline buffer: Created from dataset (pre-populated it with demonstrations)
offline_replay_buffer = ReplayBuffer.from_lerobot_dataset(
lerobot_dataset=offline_dataset, device=device, state_keys=list(obs_features.keys())
)
# Create communication channels between learner and actor processes
transitions_queue = mp.Queue(maxsize=10)
parameters_queue = mp.Queue(maxsize=2)
shutdown_event = mp.Event()
# Signal handler for graceful shutdown
def signal_handler(sig):
print(f"\nSignal {sig} received, shutting down...")
shutdown_event.set()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Create processes
learner_process = mp.Process(
target=run_learner,
args=(
transitions_queue,
parameters_queue,
shutdown_event,
policy_learner,
online_replay_buffer,
offline_replay_buffer,
),
kwargs={"device": device}, # can run on accelerated hardware for training
)
actor_process = mp.Process(
target=run_actor,
args=(
transitions_queue,
parameters_queue,
shutdown_event,
policy_actor,
reward_classifier,
env_cfg,
output_directory,
),
kwargs={"device": "cpu"}, # actor is frozen, can run on CPU or accelerate for inference
)
learner_process.start()
actor_process.start()
try:
# Wait for actor to finish (it controls the episode loop)
actor_process.join()
shutdown_event.set()
learner_process.join(timeout=10)
except KeyboardInterrupt:
print("Main process interrupted")
shutdown_event.set()
actor_process.join(timeout=5)
learner_process.join(timeout=10)
finally:
if learner_process.is_alive():
learner_process.terminate()
if actor_process.is_alive():
actor_process.terminate()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/rl/hilserl_example.py",
"license": "Apache License 2.0",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/lerobot:examples/tutorial/rl/reward_classifier_example.py | import torch
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.policies.factory import make_policy, make_pre_post_processors
from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
def main():
# Device to use for training
device = "mps" # or "cuda", or "cpu"
# Load the dataset used for training
repo_id = "lerobot/example_hil_serl_dataset"
dataset = LeRobotDataset(repo_id)
# Configure the policy to extract features from the image frames
camera_keys = dataset.meta.camera_keys
config = RewardClassifierConfig(
num_cameras=len(camera_keys),
device=device,
# backbone model to extract features from the image frames
model_name="microsoft/resnet-18",
)
# Make policy, preprocessor, and optimizer
policy = make_policy(config, ds_meta=dataset.meta)
optimizer = config.get_optimizer_preset().build(policy.parameters())
preprocessor, _ = make_pre_post_processors(policy_cfg=config, dataset_stats=dataset.meta.stats)
classifier_id = "<user>/reward_classifier_hil_serl_example"
# Instantiate a dataloader
dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True)
# Training loop
num_epochs = 5
for epoch in range(num_epochs):
total_loss = 0
total_accuracy = 0
for batch in dataloader:
# Preprocess the batch and move it to the correct device.
batch = preprocessor(batch)
# Forward pass
loss, output_dict = policy.forward(batch)
# Backward pass and optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_accuracy += output_dict["accuracy"]
avg_loss = total_loss / len(dataloader)
avg_accuracy = total_accuracy / len(dataloader)
print(f"Epoch {epoch + 1}/{num_epochs}, Loss: {avg_loss:.4f}, Accuracy: {avg_accuracy:.2f}%")
print("Training finished!")
# You can now save the trained policy.
policy.push_to_hub(classifier_id)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/rl/reward_classifier_example.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/lerobot:examples/tutorial/smolvla/using_smolvla_example.py | import torch
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.datasets.utils import hw_to_dataset_features
from lerobot.policies.factory import make_pre_post_processors
from lerobot.policies.smolvla.modeling_smolvla import SmolVLAPolicy
from lerobot.policies.utils import build_inference_frame, make_robot_action
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
MAX_EPISODES = 5
MAX_STEPS_PER_EPISODE = 20
def main():
device = torch.device("mps") # or "cuda" or "cpu"
model_id = "lerobot/smolvla_base"
model = SmolVLAPolicy.from_pretrained(model_id)
preprocess, postprocess = make_pre_post_processors(
model.config,
model_id,
# This overrides allows to run on MPS, otherwise defaults to CUDA (if available)
preprocessor_overrides={"device_processor": {"device": str(device)}},
)
# find ports using lerobot-find-port
follower_port = ... # something like "/dev/tty.usbmodem58760431631"
# the robot ids are used the load the right calibration files
follower_id = ... # something like "follower_so100"
# Robot and environment configuration
# Camera keys must match the name and resolutions of the ones used for training!
# You can check the camera keys expected by a model in the info.json card on the model card on the Hub
camera_config = {
"camera1": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30),
"camera2": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30),
}
robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_config)
robot = SO100Follower(robot_cfg)
robot.connect()
task = "" # something like "pick the red block"
robot_type = "" # something like "so100_follower" for multi-embodiment datasets
# This is used to match the raw observation keys to the keys expected by the policy
action_features = hw_to_dataset_features(robot.action_features, "action")
obs_features = hw_to_dataset_features(robot.observation_features, "observation")
dataset_features = {**action_features, **obs_features}
for _ in range(MAX_EPISODES):
for _ in range(MAX_STEPS_PER_EPISODE):
obs = robot.get_observation()
obs_frame = build_inference_frame(
observation=obs, ds_features=dataset_features, device=device, task=task, robot_type=robot_type
)
obs = preprocess(obs_frame)
action = model.select_action(obs)
action = postprocess(action)
action = make_robot_action(action, dataset_features)
robot.send_action(action)
print("Episode finished! Starting new episode...")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/tutorial/smolvla/using_smolvla_example.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/lerobot:src/lerobot/policies/groot/action_head/action_encoder.py | # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
def swish(x):
return x * torch.sigmoid(x)
class SinusoidalPositionalEncoding(nn.Module):
"""
Produces a sinusoidal encoding of shape (B, T, w)
given timesteps of shape (B, T).
"""
def __init__(self, embedding_dim):
super().__init__()
self.embedding_dim = embedding_dim
def forward(self, timesteps):
# timesteps: shape (B, T)
# We'll compute sin/cos frequencies across dim T
timesteps = timesteps.float() # ensure float
b, t = timesteps.shape
device = timesteps.device
half_dim = self.embedding_dim // 2
# typical log space frequencies for sinusoidal encoding
exponent = -torch.arange(half_dim, dtype=torch.float, device=device) * (
torch.log(torch.tensor(10000.0)) / half_dim
)
# Expand timesteps to (B, T, 1) then multiply
freqs = timesteps.unsqueeze(-1) * exponent.exp() # (B, T, half_dim)
sin = torch.sin(freqs)
cos = torch.cos(freqs)
enc = torch.cat([sin, cos], dim=-1) # (B, T, w)
return enc
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/action_head/action_encoder.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/action_head/cross_attention_dit.py | # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F # noqa: N812
from diffusers import ConfigMixin, ModelMixin
from diffusers.configuration_utils import register_to_config
from diffusers.models.attention import Attention, FeedForward
from diffusers.models.embeddings import (
SinusoidalPositionalEmbedding,
TimestepEmbedding,
Timesteps,
)
from torch import nn
class TimestepEncoder(nn.Module):
def __init__(self, embedding_dim, compute_dtype=torch.float32):
super().__init__()
self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=1)
self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
def forward(self, timesteps):
dtype = next(self.parameters()).dtype
timesteps_proj = self.time_proj(timesteps).to(dtype)
timesteps_emb = self.timestep_embedder(timesteps_proj) # (N, D)
return timesteps_emb
class AdaLayerNorm(nn.Module):
def __init__(
self,
embedding_dim: int,
norm_elementwise_affine: bool = False,
norm_eps: float = 1e-5,
chunk_dim: int = 0,
):
super().__init__()
self.chunk_dim = chunk_dim
output_dim = embedding_dim * 2
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, output_dim)
self.norm = nn.LayerNorm(output_dim // 2, norm_eps, norm_elementwise_affine)
def forward(
self,
x: torch.Tensor,
temb: torch.Tensor | None = None,
) -> torch.Tensor:
temb = self.linear(self.silu(temb))
scale, shift = temb.chunk(2, dim=1)
x = self.norm(x) * (1 + scale[:, None]) + shift[:, None]
return x
class BasicTransformerBlock(nn.Module):
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout=0.0,
cross_attention_dim: int | None = None,
activation_fn: str = "geglu",
attention_bias: bool = False,
upcast_attention: bool = False,
norm_elementwise_affine: bool = True,
norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen'
norm_eps: float = 1e-5,
final_dropout: bool = False,
attention_type: str = "default",
positional_embeddings: str | None = None,
num_positional_embeddings: int | None = None,
ff_inner_dim: int | None = None,
ff_bias: bool = True,
attention_out_bias: bool = True,
):
super().__init__()
self.dim = dim
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
self.dropout = dropout
self.cross_attention_dim = cross_attention_dim
self.activation_fn = activation_fn
self.attention_bias = attention_bias
self.norm_elementwise_affine = norm_elementwise_affine
self.positional_embeddings = positional_embeddings
self.num_positional_embeddings = num_positional_embeddings
self.norm_type = norm_type
if positional_embeddings and (num_positional_embeddings is None):
raise ValueError(
"If `positional_embeddings` type is defined, `num_positional_embeddings` must also be defined."
)
if positional_embeddings == "sinusoidal":
self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
else:
self.pos_embed = None
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if norm_type == "ada_norm":
self.norm1 = AdaLayerNorm(dim)
else:
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
cross_attention_dim=cross_attention_dim,
upcast_attention=upcast_attention,
out_bias=attention_out_bias,
)
# 3. Feed-forward
self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
self.ff = FeedForward(
dim,
dropout=dropout,
activation_fn=activation_fn,
final_dropout=final_dropout,
inner_dim=ff_inner_dim,
bias=ff_bias,
)
if final_dropout:
self.final_dropout = nn.Dropout(dropout)
else:
self.final_dropout = None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
encoder_hidden_states: torch.Tensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
temb: torch.LongTensor | None = None,
) -> torch.Tensor:
# 0. Self-Attention
if self.norm_type == "ada_norm":
norm_hidden_states = self.norm1(hidden_states, temb)
else:
norm_hidden_states = self.norm1(hidden_states)
if self.pos_embed is not None:
norm_hidden_states = self.pos_embed(norm_hidden_states)
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
# encoder_attention_mask=encoder_attention_mask,
)
if self.final_dropout:
attn_output = self.final_dropout(attn_output)
hidden_states = attn_output + hidden_states
if hidden_states.ndim == 4:
hidden_states = hidden_states.squeeze(1)
# 4. Feed-forward
norm_hidden_states = self.norm3(hidden_states)
ff_output = self.ff(norm_hidden_states)
hidden_states = ff_output + hidden_states
if hidden_states.ndim == 4:
hidden_states = hidden_states.squeeze(1)
return hidden_states
class DiT(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
num_attention_heads: int = 8,
attention_head_dim: int = 64,
output_dim: int = 26,
num_layers: int = 12,
dropout: float = 0.1,
attention_bias: bool = True,
activation_fn: str = "gelu-approximate",
num_embeds_ada_norm: int | None = 1000,
upcast_attention: bool = False,
norm_type: str = "ada_norm",
norm_elementwise_affine: bool = False,
norm_eps: float = 1e-5,
max_num_positional_embeddings: int = 512,
compute_dtype=torch.float32,
final_dropout: bool = True,
positional_embeddings: str | None = "sinusoidal",
interleave_self_attention=False,
cross_attention_dim: int | None = None,
):
super().__init__()
self.attention_head_dim = attention_head_dim
self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
self.gradient_checkpointing = False
# Timestep encoder
self.timestep_encoder = TimestepEncoder(
embedding_dim=self.inner_dim, compute_dtype=self.config.compute_dtype
)
all_blocks = []
for idx in range(self.config.num_layers):
use_self_attn = idx % 2 == 1 and interleave_self_attention
curr_cross_attention_dim = cross_attention_dim if not use_self_attn else None
all_blocks += [
BasicTransformerBlock(
self.inner_dim,
self.config.num_attention_heads,
self.config.attention_head_dim,
dropout=self.config.dropout,
activation_fn=self.config.activation_fn,
attention_bias=self.config.attention_bias,
upcast_attention=self.config.upcast_attention,
norm_type=norm_type,
norm_elementwise_affine=self.config.norm_elementwise_affine,
norm_eps=self.config.norm_eps,
positional_embeddings=positional_embeddings,
num_positional_embeddings=self.config.max_num_positional_embeddings,
final_dropout=final_dropout,
cross_attention_dim=curr_cross_attention_dim,
)
]
self.transformer_blocks = nn.ModuleList(all_blocks)
# Output blocks
self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6)
self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim)
self.proj_out_2 = nn.Linear(self.inner_dim, self.config.output_dim)
print(
"Total number of DiT parameters: ",
sum(p.numel() for p in self.parameters() if p.requires_grad),
)
def forward(
self,
hidden_states: torch.Tensor, # Shape: (B, T, D)
encoder_hidden_states: torch.Tensor, # Shape: (B, S, D)
timestep: torch.LongTensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
return_all_hidden_states: bool = False,
):
# Encode timesteps
temb = self.timestep_encoder(timestep)
# Process through transformer blocks - single pass through the blocks
hidden_states = hidden_states.contiguous()
encoder_hidden_states = encoder_hidden_states.contiguous()
all_hidden_states = [hidden_states]
# Process through transformer blocks
for idx, block in enumerate(self.transformer_blocks):
if idx % 2 == 1 and self.config.interleave_self_attention:
hidden_states = block(
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
temb=temb,
)
else:
hidden_states = block(
hidden_states,
attention_mask=None,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=None,
temb=temb,
)
all_hidden_states.append(hidden_states)
# Output processing
conditioning = temb
shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)
hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]
if return_all_hidden_states:
return self.proj_out_2(hidden_states), all_hidden_states
else:
return self.proj_out_2(hidden_states)
class SelfAttentionTransformer(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
num_attention_heads: int = 8,
attention_head_dim: int = 64,
output_dim: int = 26,
num_layers: int = 12,
dropout: float = 0.1,
attention_bias: bool = True,
activation_fn: str = "gelu-approximate",
num_embeds_ada_norm: int | None = 1000,
upcast_attention: bool = False,
max_num_positional_embeddings: int = 512,
compute_dtype=torch.float32,
final_dropout: bool = True,
positional_embeddings: str | None = "sinusoidal",
interleave_self_attention=False,
):
super().__init__()
self.attention_head_dim = attention_head_dim
self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
self.gradient_checkpointing = False
self.transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
self.inner_dim,
self.config.num_attention_heads,
self.config.attention_head_dim,
dropout=self.config.dropout,
activation_fn=self.config.activation_fn,
attention_bias=self.config.attention_bias,
upcast_attention=self.config.upcast_attention,
positional_embeddings=positional_embeddings,
num_positional_embeddings=self.config.max_num_positional_embeddings,
final_dropout=final_dropout,
)
for _ in range(self.config.num_layers)
]
)
print(
"Total number of SelfAttentionTransformer parameters: ",
sum(p.numel() for p in self.parameters() if p.requires_grad),
)
def forward(
self,
hidden_states: torch.Tensor, # Shape: (B, T, D)
return_all_hidden_states: bool = False,
):
# Process through transformer blocks - single pass through the blocks
hidden_states = hidden_states.contiguous()
all_hidden_states = [hidden_states]
# Process through transformer blocks
for _idx, block in enumerate(self.transformer_blocks):
hidden_states = block(hidden_states)
all_hidden_states.append(hidden_states)
if return_all_hidden_states:
return hidden_states, all_hidden_states
else:
return hidden_states
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/action_head/cross_attention_dit.py",
"license": "Apache License 2.0",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/action_head/flow_matching_action_head.py | # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
import torch
import torch.nn.functional as F # noqa: N812
from torch import nn
from torch.distributions import Beta
from lerobot.utils.import_utils import _transformers_available
# Conditional import for type checking and lazy loading
if TYPE_CHECKING or _transformers_available:
from transformers import PretrainedConfig
from transformers.feature_extraction_utils import BatchFeature
else:
PretrainedConfig = object
BatchFeature = None
from lerobot.policies.groot.action_head.action_encoder import (
SinusoidalPositionalEncoding,
swish,
)
from .cross_attention_dit import DiT, SelfAttentionTransformer
class CategorySpecificLinear(nn.Module):
def __init__(self, num_categories, input_dim, hidden_dim):
super().__init__()
self.num_categories = num_categories
# For each category, we have separate weights and biases.
self.W = nn.Parameter(0.02 * torch.randn(num_categories, input_dim, hidden_dim))
self.b = nn.Parameter(torch.zeros(num_categories, hidden_dim))
def forward(self, x, cat_ids):
selected_w = self.W[cat_ids]
selected_b = self.b[cat_ids]
return torch.bmm(x, selected_w) + selected_b.unsqueeze(1)
class CategorySpecificMLP(nn.Module):
def __init__(self, num_categories, input_dim, hidden_dim, output_dim):
super().__init__()
self.num_categories = num_categories
self.layer1 = CategorySpecificLinear(num_categories, input_dim, hidden_dim)
self.layer2 = CategorySpecificLinear(num_categories, hidden_dim, output_dim)
def forward(self, x, cat_ids):
hidden = F.relu(self.layer1(x, cat_ids))
return self.layer2(hidden, cat_ids)
class MultiEmbodimentActionEncoder(nn.Module):
def __init__(self, action_dim, hidden_size, num_embodiments):
super().__init__()
self.hidden_size = hidden_size
self.num_embodiments = num_embodiments
# W1: R^{w x d}, W2: R^{w x 2w}, W3: R^{w x w}
self.W1 = CategorySpecificLinear(num_embodiments, action_dim, hidden_size) # (d -> w)
self.W2 = CategorySpecificLinear(num_embodiments, 2 * hidden_size, hidden_size) # (2w -> w)
self.W3 = CategorySpecificLinear(num_embodiments, hidden_size, hidden_size) # (w -> w)
self.pos_encoding = SinusoidalPositionalEncoding(hidden_size)
def forward(self, actions, timesteps, cat_ids):
"""
actions: shape (B, T, action_dim)
timesteps: shape (B,) -- a single scalar per batch item
cat_ids: shape (B,)
returns: shape (B, T, hidden_size)
"""
b, t, _ = actions.shape
# 1) Expand each batch's single scalar time 'tau' across all T steps
# so that shape => (B, T)
# e.g. if timesteps is (B,), replicate across T
if timesteps.dim() == 1 and timesteps.shape[0] == b:
# shape (B,) => (B,T)
timesteps = timesteps.unsqueeze(1).expand(-1, t)
else:
raise ValueError("Expected `timesteps` to have shape (B,) so we can replicate across T.")
# 2) Standard action MLP step for shape => (B, T, w)
a_emb = self.W1(actions, cat_ids)
# 3) Get the sinusoidal encoding (B, T, w)
tau_emb = self.pos_encoding(timesteps).to(dtype=a_emb.dtype)
# 4) Concat along last dim => (B, T, 2w), then W2 => (B, T, w), swish
x = torch.cat([a_emb, tau_emb], dim=-1)
x = swish(self.W2(x, cat_ids))
# 5) Finally W3 => (B, T, w)
x = self.W3(x, cat_ids)
return x
@dataclass
class FlowmatchingActionHeadConfig(PretrainedConfig):
"""NOTE: N1.5 uses XEmbFlowmatchingPolicyHeadConfig as action head"""
add_pos_embed: bool = field(default=True, metadata={"help": "Whether to add positional embedding"})
model_dtype: str = field(default="float32", metadata={"help": "Model data type."})
diffusion_model_cfg: dict = field(default=None, metadata={"help": "Diffusion model configuration."})
input_embedding_dim: int = field(default=1536, metadata={"help": "Input embedding channel dimension."})
backbone_embedding_dim: int = field(
default=1536, metadata={"help": "Backbone embedding channel dimension."}
)
hidden_size: int = field(default=1024, metadata={"help": "Input embedding dimension."})
max_seq_len: int = field(default=1024, metadata={"help": "Maximum Sequence Length"})
action_dim: int = field(default=None, metadata={"help": "Action dimension."})
action_horizon: int = field(default=None, metadata={"help": "Action horizon."})
noise_beta_alpha: float = field(default=1.5, metadata={"help": ""})
noise_beta_beta: float = field(default=1.0, metadata={"help": ""})
noise_s: float = field(default=0.999, metadata={"help": "Flow matching noise Beta distribution s."})
num_timestep_buckets: int = field(
default=1000, metadata={"help": "Number of timestep discretization buckets."}
)
num_inference_timesteps: int = field(
default=None,
metadata={"help": "Number of inference steps for noise diffusion."},
)
max_num_embodiments: int = field(default=32, metadata={"help": "Number of embodiments."})
tune_projector: bool = field(default=True, metadata={"help": "Whether to tune the projector."})
tune_diffusion_model: bool = field(
default=True, metadata={"help": "Whether to tune the diffusion model."}
)
load_pretrained_det_decode_layer_path: str = field(
default=None, metadata={"help": "Path to pretrained detection model."}
)
detection_coeff: float = field(default=1.0, metadata={"help": "Detection coefficient."})
freeze_decode_layer: bool = field(default=False)
expand_batch: int = field(default=None)
use_vlln: bool = field(default=True)
vl_self_attention_cfg: dict = field(default=None)
num_target_vision_tokens: int = field(default=32, metadata={"help": "Number of target vision tokens."})
def __init__(self, **kwargs):
super().__init__(**kwargs)
for key, value in kwargs.items():
setattr(self, key, value)
class FlowmatchingActionHead(nn.Module):
config_class = FlowmatchingActionHeadConfig
supports_gradient_checkpointing = True
def __init__(
self,
config: FlowmatchingActionHeadConfig,
):
super().__init__()
self.hidden_size = config.hidden_size
self.input_embedding_dim = config.input_embedding_dim
self.model = DiT(**config.diffusion_model_cfg)
self.action_dim = config.action_dim
self.action_horizon = config.action_horizon
self.num_inference_timesteps = config.num_inference_timesteps
self.state_encoder = CategorySpecificMLP(
num_categories=config.max_num_embodiments,
input_dim=config.max_state_dim,
hidden_dim=self.hidden_size,
output_dim=self.input_embedding_dim,
)
self.action_encoder = MultiEmbodimentActionEncoder(
action_dim=config.action_dim,
hidden_size=self.input_embedding_dim,
num_embodiments=config.max_num_embodiments,
)
self.action_decoder = CategorySpecificMLP(
num_categories=config.max_num_embodiments,
input_dim=self.hidden_size,
hidden_dim=self.hidden_size,
output_dim=self.action_dim,
)
self.future_tokens = nn.Embedding(config.num_target_vision_tokens, self.input_embedding_dim)
nn.init.normal_(self.future_tokens.weight, mean=0.0, std=0.02)
self.vlln = nn.LayerNorm(config.backbone_embedding_dim) if config.use_vlln else nn.Identity()
self.vl_self_attention = (
SelfAttentionTransformer(**config.vl_self_attention_cfg) if config.use_vlln else nn.Identity()
)
if config.add_pos_embed:
self.position_embedding = nn.Embedding(config.max_seq_len, self.input_embedding_dim)
nn.init.normal_(self.position_embedding.weight, mean=0.0, std=0.02)
self.beta_dist = Beta(config.noise_beta_alpha, config.noise_beta_beta)
self.num_timestep_buckets = config.num_timestep_buckets
self.config = config
self.set_trainable_parameters(config.tune_projector, config.tune_diffusion_model)
def set_trainable_parameters(self, tune_projector: bool, tune_diffusion_model: bool):
self.tune_projector = tune_projector
self.tune_diffusion_model = tune_diffusion_model
for p in self.parameters():
p.requires_grad = True
if not tune_projector:
self.state_encoder.requires_grad_(False)
self.action_encoder.requires_grad_(False)
self.action_decoder.requires_grad_(False)
if self.config.add_pos_embed:
self.position_embedding.requires_grad_(False)
if not tune_diffusion_model:
self.model.requires_grad_(False)
print(f"Tune action head projector: {self.tune_projector}")
print(f"Tune action head diffusion model: {self.tune_diffusion_model}")
# Check if any parameters are still trainable. If not, print a warning.
if not tune_projector and not tune_diffusion_model:
for name, p in self.named_parameters():
if p.requires_grad:
print(f"Action head trainable parameter: {name}")
if not any(p.requires_grad for p in self.parameters()):
print("Warning: No action head trainable parameters found.")
def set_frozen_modules_to_eval_mode(self):
"""
Huggingface will call model.train() at each training_step. To ensure
the expected behaviors for modules like dropout, batchnorm, etc., we
need to call model.eval() for the frozen modules.
"""
if self.training:
if not self.tune_projector:
self.state_encoder.eval()
self.action_encoder.eval()
self.action_decoder.eval()
if self.config.add_pos_embed:
self.position_embedding.eval()
if not self.tune_diffusion_model:
self.model.eval()
def sample_time(self, batch_size, device, dtype):
sample = self.beta_dist.sample([batch_size]).to(device, dtype=dtype)
return (self.config.noise_s - sample) / self.config.noise_s
def prepare_input(self, batch: dict) -> BatchFeature:
return BatchFeature(data=batch)
def process_backbone_output(self, backbone_output: BatchFeature) -> BatchFeature:
backbone_features = backbone_output["backbone_features"]
backbone_features = self.vlln(backbone_features)
backbone_features = self.vl_self_attention(backbone_features)
backbone_output["backbone_features"] = backbone_features
return backbone_output
def forward(self, backbone_output: BatchFeature, action_input: BatchFeature) -> BatchFeature:
# Set frozen modules to eval
self.set_frozen_modules_to_eval_mode()
backbone_output = self.process_backbone_output(backbone_output)
if self.config.expand_batch is not None:
for k, v in backbone_output.items():
ndim = len(v.shape)
factors = [self.config.expand_batch]
while len(factors) < ndim:
factors.append(1)
factors = tuple(factors)
expanded = v.repeat(*factors)
backbone_output[k] = expanded
for k, v in action_input.items():
ndim = len(v.shape)
factors = [self.config.expand_batch]
while len(factors) < ndim:
factors.append(1)
factors = tuple(factors)
expanded = v.repeat(*factors)
action_input[k] = expanded
# Get vision and language embeddings.
vl_embs = backbone_output.backbone_features
device = vl_embs.device
# Get embodiment ID.
embodiment_id = action_input.embodiment_id
# Embed state.
state_features = self.state_encoder(action_input.state, embodiment_id)
# Embed noised action trajectory.
actions = action_input.action
noise = torch.randn(actions.shape, device=actions.device, dtype=actions.dtype)
t = self.sample_time(actions.shape[0], device=actions.device, dtype=actions.dtype)
t = t[:, None, None] # shape (B,1,1) for broadcast
noisy_trajectory = (1 - t) * noise + t * actions
velocity = actions - noise
# Convert (continuous) t -> discrete if needed
t_discretized = (t[:, 0, 0] * self.num_timestep_buckets).long()
action_features = self.action_encoder(noisy_trajectory, t_discretized, embodiment_id)
# Maybe add position embedding.
if self.config.add_pos_embed:
pos_ids = torch.arange(action_features.shape[1], dtype=torch.long, device=device)
pos_embs = self.position_embedding(pos_ids).unsqueeze(0)
action_features = action_features + pos_embs
# Join vision, language, state and action embedding along sequence dimension.
future_tokens = self.future_tokens.weight.unsqueeze(0).expand(vl_embs.shape[0], -1, -1)
sa_embs = torch.cat((state_features, future_tokens, action_features), dim=1)
vl_attn_mask = backbone_output.backbone_attention_mask
model_output = self.model(
hidden_states=sa_embs,
encoder_hidden_states=vl_embs,
encoder_attention_mask=vl_attn_mask,
timestep=t_discretized,
return_all_hidden_states=False, # NOTE (YL): not using flare now
)
pred = self.action_decoder(model_output, embodiment_id)
pred_actions = pred[:, -actions.shape[1] :]
# Slice out only the action portion of pred and target.
action_mask = action_input.action_mask
loss = F.mse_loss(pred_actions, velocity, reduction="none") * action_mask
loss = loss.sum() / action_mask.sum()
output_dict = {
"loss": loss,
}
return BatchFeature(data=output_dict)
@torch.no_grad()
def get_action(self, backbone_output: BatchFeature, action_input: BatchFeature) -> BatchFeature:
backbone_output = self.process_backbone_output(backbone_output)
# Get vision and language embeddings.
vl_embs = backbone_output.backbone_features
embodiment_id = action_input.embodiment_id
# Embed state.
state_features = self.state_encoder(action_input.state, embodiment_id)
# Set initial actions as the sampled noise.
batch_size = vl_embs.shape[0]
device = vl_embs.device
actions = torch.randn(
size=(batch_size, self.config.action_horizon, self.config.action_dim),
dtype=vl_embs.dtype,
device=device,
)
num_steps = self.num_inference_timesteps
dt = 1.0 / num_steps
# Run denoising steps.
for t in range(num_steps):
t_cont = t / float(num_steps) # e.g. goes 0, 1/N, 2/N, ...
t_discretized = int(t_cont * self.num_timestep_buckets)
# Embed noised action trajectory.
timesteps_tensor = torch.full(size=(batch_size,), fill_value=t_discretized, device=device)
action_features = self.action_encoder(actions, timesteps_tensor, embodiment_id)
# Maybe add position embedding.
if self.config.add_pos_embed:
pos_ids = torch.arange(action_features.shape[1], dtype=torch.long, device=device)
pos_embs = self.position_embedding(pos_ids).unsqueeze(0)
action_features = action_features + pos_embs
# Join vision, language, state and action embedding along sequence dimension.
future_tokens = self.future_tokens.weight.unsqueeze(0).expand(vl_embs.shape[0], -1, -1)
sa_embs = torch.cat((state_features, future_tokens, action_features), dim=1)
# Run model forward.
model_output = self.model(
hidden_states=sa_embs,
encoder_hidden_states=vl_embs,
timestep=timesteps_tensor,
)
pred = self.action_decoder(model_output, embodiment_id)
pred_velocity = pred[:, -self.action_horizon :]
# Update actions using euler integration.
actions = actions + dt * pred_velocity
return BatchFeature(data={"action_pred": actions})
@property
def device(self):
return next(iter(self.parameters())).device
@property
def dtype(self):
return next(iter(self.parameters())).dtype
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/action_head/flow_matching_action_head.py",
"license": "Apache License 2.0",
"lines": 337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/configuration_groot.py | #!/usr/bin/env python
# Copyright 2024 NVIDIA Corporation and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from lerobot.configs.policies import PreTrainedConfig
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.optim.optimizers import AdamWConfig
from lerobot.optim.schedulers import CosineDecayWithWarmupSchedulerConfig
from lerobot.utils.constants import ACTION, OBS_STATE
@PreTrainedConfig.register_subclass("groot")
@dataclass
class GrootConfig(PreTrainedConfig):
"""Configuration for Groot policy wrapper."""
# Basic policy settings
n_obs_steps: int = 1
chunk_size: int = 50
n_action_steps: int = 50
# Dimension settings (must match pretrained GR00T model expectations)
# Maximum state dimension. Shorter states will be zero-padded.
max_state_dim: int = 64
# Maximum action dimension. Shorter actions will be zero-padded.
max_action_dim: int = 32
# Normalization (start with identity, adjust as needed)
normalization_mapping: dict[str, NormalizationMode] = field(
default_factory=lambda: {
"VISUAL": NormalizationMode.IDENTITY,
"STATE": NormalizationMode.MEAN_STD,
"ACTION": NormalizationMode.MEAN_STD,
}
)
# Image preprocessing (adjust to match Groot's expected input)
image_size: tuple[int, int] = (224, 224)
# Groot-specific model parameters (from groot_finetune_script.py)
# Path or HuggingFace model ID for the base Groot model
base_model_path: str = "nvidia/GR00T-N1.5-3B"
# HF repo ID (or local path) that hosts vocab.json and merges.txt for Eagle tokenizer.
tokenizer_assets_repo: str = "lerobot/eagle2hg-processor-groot-n1p5"
# Embodiment tag to use for training (e.g. 'new_embodiment', 'gr1')
embodiment_tag: str = "new_embodiment"
# Fine-tuning control arguments
# Whether to fine-tune the llm backbone
tune_llm: bool = False
# Whether to fine-tune the vision tower
tune_visual: bool = False
# Whether to fine-tune the projector
tune_projector: bool = True
# Whether to fine-tune the diffusion model
tune_diffusion_model: bool = True
# LoRA parameters (from groot_finetune_script.py)
# Rank for the LORA model. If 0, no LORA will be used.
lora_rank: int = 0
# Alpha value for the LORA model
lora_alpha: int = 16
# Dropout rate for the LORA model
lora_dropout: float = 0.1
# Whether to use the full model for LORA
lora_full_model: bool = False
# Training parameters (matching groot_finetune_script.py)
optimizer_lr: float = 1e-4
optimizer_betas: tuple[float, float] = (0.95, 0.999)
optimizer_eps: float = 1e-8
optimizer_weight_decay: float = 1e-5
warmup_ratio: float = 0.05
use_bf16: bool = True
# Dataset parameters
# Video backend to use for training ('decord' or 'torchvision_av')
video_backend: str = "decord"
# Whether to balance dataset weights in mixture datasets
balance_dataset_weights: bool = True
# Whether to sample trajectories weighted by their length
balance_trajectory_weights: bool = True
# Optional dataset paths for delegating training to Isaac-GR00T runner
dataset_paths: list[str] | None = None
output_dir: str = "./tmp/gr00t"
save_steps: int = 1000
max_steps: int = 10000
batch_size: int = 32
dataloader_num_workers: int = 8
report_to: str = "wandb"
resume: bool = False
def __post_init__(self):
super().__post_init__()
if self.n_action_steps > self.chunk_size:
raise ValueError(
f"n_action_steps ({self.n_action_steps}) cannot exceed chunk_size ({self.chunk_size})"
)
# groot_repo_path is now optional since we ported the components
# No validation needed
def validate_features(self) -> None:
"""Validate and set up input/output features for Groot."""
image_features = [key for key, feat in self.input_features.items() if feat.type == FeatureType.VISUAL]
if not image_features:
raise ValueError(
"Groot policy requires at least one visual input feature. "
"No features of type FeatureType.VISUAL found in input_features."
)
if OBS_STATE not in self.input_features:
state_feature = PolicyFeature(
type=FeatureType.STATE,
shape=(self.max_state_dim,),
)
self.input_features[OBS_STATE] = state_feature
else:
state_shape = self.input_features[OBS_STATE].shape
state_dim = state_shape[0] if state_shape else 0
if state_dim > self.max_state_dim:
raise ValueError(
f"State dimension {state_dim} exceeds max_state_dim {self.max_state_dim}. "
f"Either reduce state dimension or increase max_state_dim in config."
)
if ACTION not in self.output_features:
action_feature = PolicyFeature(
type=FeatureType.ACTION,
shape=(self.max_action_dim,),
)
self.output_features[ACTION] = action_feature
else:
action_shape = self.output_features[ACTION].shape
action_dim = action_shape[0] if action_shape else 0
if action_dim > self.max_action_dim:
raise ValueError(
f"Action dimension {action_dim} exceeds max_action_dim {self.max_action_dim}. "
f"Either reduce action dimension or increase max_action_dim in config."
)
def get_optimizer_preset(self) -> AdamWConfig:
"""Return optimizer configuration."""
return AdamWConfig(
lr=self.optimizer_lr,
betas=self.optimizer_betas,
eps=self.optimizer_eps,
weight_decay=self.optimizer_weight_decay,
)
def get_scheduler_preset(self) -> CosineDecayWithWarmupSchedulerConfig:
"""Return scheduler configuration."""
return CosineDecayWithWarmupSchedulerConfig(
num_warmup_steps=int(10000 * self.warmup_ratio), # 5% warmup by default
num_decay_steps=10000, # Adjust based on training steps
peak_lr=self.optimizer_lr,
decay_lr=self.optimizer_lr * 0.1,
)
@property
def observation_delta_indices(self) -> None:
"""Return indices for delta observations (None for Groot)."""
return None
@property
def action_delta_indices(self) -> list[int]:
"""Return indices for delta actions."""
return list(range(min(self.chunk_size, 16)))
@property
def reward_delta_indices(self) -> None:
"""Return indices for delta rewards (None for Groot)."""
return None
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/configuration_groot.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/eagle2_hg_model/configuration_eagle2_5_vl.py | # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from transformers.configuration_utils import PretrainedConfig
from transformers.models.llama.configuration_llama import LlamaConfig
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
from transformers.models.qwen3.configuration_qwen3 import Qwen3Config
from transformers.models.siglip.configuration_siglip import SiglipVisionConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class Eagle25VLConfig(PretrainedConfig):
model_type = "eagle_2_5_vl"
is_composition = True
sub_configs = {"vision_config": SiglipVisionConfig, "text_config": Qwen2Config}
def __init__(
self,
vision_config=None,
text_config=None,
use_backbone_lora=0,
use_llm_lora=0,
pad2square=False,
select_layer=-4,
force_image_size=None,
downsample_ratio=0.5,
template=None,
dynamic_image_size=False,
use_thumbnail=False,
loss_version="v1",
min_dynamic_tiles=1,
max_dynamic_tiles=6,
mlp_checkpoint=False,
initializer_range=0.02,
_attn_implementation="flash_attention_2",
_attn_implementation_autoset=False,
llm_config=None,
image_token_index=None,
use_pixel_shuffle=True,
mlp_connector_layers=2,
**kwargs,
):
super().__init__(**kwargs)
if vision_config is None:
vision_config = {"model_type": "siglip_vision_model"}
logger.info("vision_config is None. Initializing the InternVisionConfig with default values.")
if text_config is None:
text_config = {"architectures": ["Qwen2ForCausalLM"]}
logger.info(
"text_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`)."
)
if vision_config["model_type"] == "siglip_vision_model":
self.vision_config = SiglipVisionConfig(**vision_config)
else:
raise ValueError("Unsupported model_type: {}".format(vision_config["model_type"]))
if text_config["architectures"][0] == "LlamaForCausalLM":
self.text_config = LlamaConfig(**text_config)
elif text_config["architectures"][0] == "Qwen2ForCausalLM":
self.text_config = Qwen2Config(**text_config)
elif text_config["architectures"][0] == "Qwen3ForCausalLM":
self.text_config = Qwen3Config(**text_config)
else:
raise ValueError("Unsupported architecture: {}".format(text_config["architectures"][0]))
self.use_backbone_lora = use_backbone_lora
self.use_llm_lora = use_llm_lora
self.mlp_checkpoint = mlp_checkpoint
self.pad2square = pad2square
self.select_layer = select_layer
self.force_image_size = force_image_size
self.downsample_ratio = downsample_ratio
self.template = template
self.dynamic_image_size = dynamic_image_size
self.use_thumbnail = use_thumbnail
self.loss_version = loss_version
self.initializer_range = initializer_range
self.min_dynamic_tiles = min_dynamic_tiles
self.max_dynamic_tiles = max_dynamic_tiles
self.tie_word_embeddings = self.text_config.tie_word_embeddings
self._attn_implementation = _attn_implementation
self._attn_implementation_autoset = _attn_implementation_autoset
self.image_token_index = image_token_index
self.use_pixel_shuffle = use_pixel_shuffle
self.mlp_connector_layers = mlp_connector_layers
logger.info(f"min_dynamic_tiles: {self.min_dynamic_tiles}")
logger.info(f"max_dynamic_tiles: {self.max_dynamic_tiles}")
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["vision_config"] = self.vision_config.to_dict()
output["text_config"] = self.text_config.to_dict()
output["model_type"] = self.__class__.model_type
output["use_backbone_lora"] = self.use_backbone_lora
output["use_llm_lora"] = self.use_llm_lora
output["pad2square"] = self.pad2square
output["select_layer"] = self.select_layer
output["force_image_size"] = self.force_image_size
output["downsample_ratio"] = self.downsample_ratio
output["template"] = self.template
output["dynamic_image_size"] = self.dynamic_image_size
output["use_thumbnail"] = self.use_thumbnail
output["min_dynamic_tiles"] = self.min_dynamic_tiles
output["max_dynamic_tiles"] = self.max_dynamic_tiles
output["tie_word_embeddings"] = self.tie_word_embeddings
output["_attn_implementation"] = self._attn_implementation
output["_attn_implementation_autoset"] = self._attn_implementation_autoset
output["use_pixel_shuffle"] = self.use_pixel_shuffle
output["mlp_connector_layers"] = self.mlp_connector_layers
return output
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/eagle2_hg_model/configuration_eagle2_5_vl.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/eagle2_hg_model/image_processing_eagle2_5_vl_fast.py | # --------------------------------------------------------
# NVIDIA
# Copyright (c) 2025 NVIDIA
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from __future__ import annotations
# copy from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava_onevision/image_processing_llava_onevision_fast.py
from transformers.image_processing_utils import (
BatchFeature,
get_patch_output_size,
)
from transformers.image_processing_utils_fast import (
BaseImageProcessorFast,
ImagesKwargs,
group_images_by_shape,
reorder_images,
)
from transformers.image_utils import (
IMAGENET_STANDARD_MEAN, # 0.5, 0.5, 0.5
IMAGENET_STANDARD_STD, # 0.5, 0.5, 0.5
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
get_image_size,
make_flat_list_of_images,
validate_kwargs,
)
from transformers.processing_utils import Unpack
from transformers.utils import (
TensorType,
add_start_docstrings,
is_torch_available,
is_torchvision_v2_available,
)
from transformers.video_utils import VideoInput
if is_torch_available():
import torch
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F # noqa: N812
from transformers.image_utils import pil_torch_interpolation_mapping
else:
from torchvision.transforms import functional as F # noqa: N812
def crop(img: torch.Tensor, left: int, top: int, right: int, bottom: int) -> torch.Tensor:
"""Crop the given numpy array.
Args:
img (torch.Tensor): Image to be cropped. Format should be (C, H, W).
left (int): The left coordinate of the crop box.
top (int): The top coordinate of the crop box.
right (int): The right coordinate of the crop box.
bottom (int): The bottom coordinate of the crop box.
Returns:
torch.Tensor: Cropped image.
"""
if not isinstance(img, torch.Tensor):
raise TypeError(f"img should be torch.Tensor. Got {type(img)}")
if img.ndim not in [2, 3]:
raise ValueError(f"Image should have 2 or 3 dimensions. Got {img.ndim}")
img_height = img.shape[1]
img_width = img.shape[2]
if top < 0 or left < 0 or bottom > img_height or right > img_width:
raise ValueError("Crop coordinates out of bounds")
if top >= bottom or left >= right:
raise ValueError("Invalid crop coordinates")
return img[:, top:bottom, left:right]
class Eagle25VLFastImageProcessorKwargs(ImagesKwargs):
max_dynamic_tiles: int | None
min_dynamic_tiles: int | None
use_thumbnail: bool | None
pad_during_tiling: bool | None
do_pad: bool | None
@add_start_docstrings(
"Constructs a fast ConvNeXT image processor. Based on [`SiglipImageProcessor`] with incorporation of processing each video frame.",
# BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, TODO: this was depreciated from transformers remove!
"""
image_grid_pinpoints (`List[List[int]]`, *optional*):
A list of possible resolutions to use for processing high resolution images. The best resolution is selected
based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
method. Not used for processing videos.
do_pad (`bool`, *optional*):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
""",
)
class Eagle25VLImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 448, "width": 448}
default_to_square = False
crop_size = None
do_resize = True
do_center_crop = None
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_pad = True
max_dynamic_tiles = 12
min_dynamic_tiles = 1
use_thumbnail = True
pad_during_tiling = False
valid_kwargs = Eagle25VLFastImageProcessorKwargs
model_input_names = ["pixel_values_videos"]
def __init__(self, **kwargs: Unpack[Eagle25VLFastImageProcessorKwargs]):
super().__init__(**kwargs)
@add_start_docstrings(
# BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, TODO: this was depreciated from transformers remove!
"""
max_dynamic_tiles (`int`, *optional*):
The maximum number of dynamic tiles to use for processing high resolution images.
min_dynamic_tiles (`int`, *optional*):
The minimum number of dynamic tiles to use for processing high resolution images.
use_thumbnail (`bool`, *optional*):
Whether to use a thumbnail for processing high resolution images.
pad_during_tiling (`bool`, *optional*):
Whether to pad the image during tiling.
do_pad (`bool`, *optional*):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
""",
)
# NOTE(YL): we will overload the preprocess method to add the image_flags
# def preprocess(
# self, images: ImageInput, **kwargs: Unpack[Eagle25VLFastImageProcessorKwargs]
# ) -> BatchFeature:
# return super().preprocess(images, **kwargs)
def _prepare_images_structure(
self,
images: ImageInput,
expected_ndims: int = 3,
) -> ImageInput:
"""
Prepare the images structure for processing.
Args:
images (`ImageInput`):
The input images to process.
expected_ndims (`int`, *optional*, defaults to 3):
Expected number of dimensions for the images (added for transformers >=4.53.0 compatibility).
Returns:
`ImageInput`: The images with a valid nesting.
"""
return make_flat_list_of_images(images)
def _resize_for_patching(
self,
image: torch.Tensor,
target_resolution: tuple,
interpolation: F.InterpolationMode,
input_data_format: ChannelDimension,
) -> torch.Tensor:
"""
Resizes an image to a target resolution while maintaining aspect ratio.
Args:
image ("torch.Tensor"):
The input image.
target_resolution (tuple):
The target resolution (height, width) of the image.
interpolation (`InterpolationMode`):
Resampling filter to use if resizing the image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
"torch.Tensor": The resized and padded image.
"""
new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)
# Resize the image
resized_image = F.resize(image, (new_height, new_width), interpolation=interpolation)
return resized_image
def find_closest_aspect_ratio(self, aspect_ratio, target_ratios, width, height, image_size):
"""
previous version mainly focus on ratio.
We also consider area ratio here.
"""
best_factor = float("-inf")
best_ratio = (1, 1)
area = width * height
for ratio in target_ratios:
target_aspect_ratio = ratio[0] / ratio[1]
# ratio_diff = abs(aspect_ratio - target_aspect_ratio)
# area_ratio = (ratio[0] * ratio[1] * image_size * image_size) / area
"""
new area > 60% of original image area is enough.
"""
factor_based_on_area_n_ratio = min(
(ratio[0] * ratio[1] * image_size * image_size) / area, 0.6
) * min(target_aspect_ratio / aspect_ratio, aspect_ratio / target_aspect_ratio)
if factor_based_on_area_n_ratio > best_factor:
best_factor = factor_based_on_area_n_ratio
best_ratio = ratio
return best_ratio
def _pad_for_patching(
self, image: torch.Tensor, target_resolution: tuple, input_data_format: ChannelDimension
) -> torch.Tensor:
"""
Pad an image to a target resolution while maintaining aspect ratio.
"""
target_height, target_width = target_resolution
new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)
paste_x = (target_width - new_width) // 2
paste_y = (target_height - new_height) // 2
padded_image = F.pad(image, padding=[paste_x, paste_y, paste_x, paste_y])
return padded_image
def _get_image_patches(
self,
image: torch.Tensor,
min_num: int,
max_num: int,
size: tuple,
tile_size: int,
use_thumbnail: bool,
interpolation: F.InterpolationMode,
pad_during_tiling: bool,
) -> list[torch.Tensor]:
image_size = get_image_size(image, channel_dim=ChannelDimension.FIRST)
orig_height, orig_width = image_size
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = {
(i, j)
for n in range(min_num, max_num + 1)
for i in range(1, n + 1)
for j in range(1, n + 1)
if i * j <= max_num and i * j >= min_num
}
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = self.find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, tile_size
)
# calculate the target width and height
target_width = tile_size * target_aspect_ratio[0]
target_height = tile_size * target_aspect_ratio[1]
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
if pad_during_tiling:
resized_image = self._resize_for_patching(
image,
(target_height, target_width),
interpolation=interpolation,
input_data_format=ChannelDimension.FIRST,
)
padded_image = self._pad_for_patching(
resized_image,
(target_height, target_width),
input_data_format=ChannelDimension.FIRST,
)
image_used_to_split = padded_image
else:
image_used_to_split = F.resize(image, (target_height, target_width), interpolation=interpolation)
processed_tiles = []
for i in range(blocks):
box = (
(i % (target_width // tile_size)) * tile_size,
(i // (target_width // tile_size)) * tile_size,
((i % (target_width // tile_size)) + 1) * tile_size,
((i // (target_width // tile_size)) + 1) * tile_size,
)
# split the image
split_img = crop(image_used_to_split, box[0], box[1], box[2], box[3])
processed_tiles.append(split_img)
assert len(processed_tiles) == blocks
if use_thumbnail and len(processed_tiles) != 1:
thumbnail_img = F.resize(image, (tile_size, tile_size), interpolation=interpolation)
processed_tiles.append(thumbnail_img)
return processed_tiles
def _pad_for_batching(
self,
pixel_values: list[torch.Tensor],
) -> list[torch.Tensor]:
"""
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`List[torch.Tensor]`):
An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)
Returns:
List[`torch.Tensor`]: The padded images.
"""
max_patch = max(len(x) for x in pixel_values)
pixel_values = [
torch.nn.functional.pad(image, pad=[0, 0, 0, 0, 0, 0, 0, max_patch - image.shape[0]])
for image in pixel_values
]
return pixel_values
def _preprocess(
self,
images: list[torch.Tensor],
do_resize: bool,
size: SizeDict,
max_dynamic_tiles: int,
min_dynamic_tiles: int,
use_thumbnail: bool,
pad_during_tiling: bool,
interpolation: F.InterpolationMode | None,
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
do_pad: bool,
return_tensors: str | TensorType | None,
pad_size: SizeDict | None = None, # Added for transformers >=4.53.0 compatibility
disable_grouping: bool | None = None, # Added for transformers >=4.53.0 compatibility
) -> BatchFeature:
processed_images = []
image_sizes = []
# Determine the size tuple
if size and size.height and size.width:
size_tuple = (size.height, size.width)
else:
size_tuple = (size.shortest_edge, size.shortest_edge)
# Determine the patch size
if crop_size and crop_size.height:
tile_size = crop_size.height
elif size and size.height:
tile_size = size.height
else:
tile_size = size.shortest_edge
for image in images:
image_patches = self._get_image_patches(
image,
min_num=min_dynamic_tiles,
max_num=max_dynamic_tiles,
size=size_tuple,
tile_size=tile_size,
use_thumbnail=use_thumbnail,
interpolation=interpolation,
pad_during_tiling=pad_during_tiling,
)
# Group images by size for batched processing
processed_image_patches_grouped = {}
# Added for transformers >=4.53.0 compatibility
grouped_image_patches, grouped_image_patches_index = group_images_by_shape(
image_patches,
disable_grouping=disable_grouping,
)
for shape, stacked_image_patches in grouped_image_patches.items():
if do_resize:
stacked_image_patches = self.resize(
image=stacked_image_patches,
size=size,
interpolation=interpolation,
)
if do_center_crop:
stacked_image_patches = self.center_crop(stacked_image_patches, crop_size)
# Fused rescale and normalize
stacked_image_patches = self.rescale_and_normalize(
stacked_image_patches,
do_rescale,
rescale_factor,
do_normalize,
image_mean,
image_std,
)
processed_image_patches_grouped[shape] = stacked_image_patches
processed_image_patches = reorder_images(
processed_image_patches_grouped, grouped_image_patches_index
)
processed_image_patches = (
torch.stack(processed_image_patches, dim=0) if return_tensors else processed_image_patches
)
processed_images.append(processed_image_patches)
image_sizes.append(get_image_size(image, ChannelDimension.FIRST))
if do_pad:
processed_images = self._pad_for_batching(processed_images)
# processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
processed_images = torch.cat(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(
data={"pixel_values": processed_images, "image_sizes": image_sizes},
tensor_type=return_tensors,
)
def preprocess(
self,
images: ImageInput,
videos: VideoInput = None,
**kwargs: Unpack[Eagle25VLFastImageProcessorKwargs],
) -> BatchFeature:
validate_kwargs(
captured_kwargs=kwargs.keys(),
valid_processor_keys=self.valid_kwargs.__annotations__.keys(),
)
# Set default kwargs from self. This ensures that if a kwarg is not provided
# by the user, it gets its default value from the instance, or is set to None.
for kwarg_name in self.valid_kwargs.__annotations__:
kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None))
# Extract parameters that are only used for preparing the input images
do_convert_rgb = kwargs.pop("do_convert_rgb")
input_data_format = kwargs.pop("input_data_format")
device = kwargs.pop("device")
# Prepare input images
# transformers >= 4.53.0: uses _prepare_image_like_inputs instead of _prepare_input_images
if images is not None:
images = self._prepare_image_like_inputs(
images=images,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
device=device,
)
if videos is not None:
videos = self._prepare_image_like_inputs(
images=videos,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
device=device,
)
# Update kwargs that need further processing before being validated
kwargs = self._further_process_kwargs(**kwargs)
# Validate kwargs
self._validate_preprocess_kwargs(**kwargs)
# torch resize uses interpolation instead of resample
# Added for transformers >=4.53.0 compatibility
resample = kwargs.pop("resample", self.resample)
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[resample]
if isinstance(resample, PILImageResampling | int)
else resample
)
# Filter kwargs to only include those accepted by _preprocess
valid_preprocess_kwargs = {
"do_resize",
"size",
"max_dynamic_tiles",
"min_dynamic_tiles",
"use_thumbnail",
"pad_during_tiling",
"interpolation",
"do_center_crop",
"crop_size",
"do_rescale",
"rescale_factor",
"do_normalize",
"image_mean",
"image_std",
"do_pad",
"return_tensors",
"pad_size",
"disable_grouping",
}
filtered_kwargs = {k: v for k, v in kwargs.items() if k in valid_preprocess_kwargs}
if images is not None:
return self._preprocess(images, **filtered_kwargs)
elif videos is not None:
return self._preprocess(videos, **filtered_kwargs)
__all__ = ["Eagle25VLImageProcessorFast"]
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/eagle2_hg_model/image_processing_eagle2_5_vl_fast.py",
"license": "Apache License 2.0",
"lines": 443,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/eagle2_hg_model/modeling_eagle2_5_vl.py | # --------------------------------------------------------
# NVIDIA
# Copyright (c) 2025 NVIDIA
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import inspect
import torch
import torch.utils.checkpoint as cp
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import GenerationConfig
from transformers.generation import GenerationMixin
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.models.llama.modeling_llama import LlamaForCausalLM
from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM
from transformers.models.qwen3.modeling_qwen3 import Qwen3ForCausalLM
from transformers.models.siglip.modeling_siglip import SiglipVisionModel
from transformers.utils import add_start_docstrings, logging
from .configuration_eagle2_5_vl import Eagle25VLConfig
logger = logging.get_logger(__name__)
# copy from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava_onevision/modeling_llava_onevision.py#L241C1-L280C1
EAGLE2_5_VL_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`Eagle25VLConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
@add_start_docstrings(
"The bare Eagle2_5_VL Model outputting raw hidden-states without any specific head on top.",
EAGLE2_5_VL_START_DOCSTRING,
)
class Eagle25VLPreTrainedModel(PreTrainedModel):
config_class = Eagle25VLConfig
base_model_prefix = "model"
main_input_name = "input_ids"
supports_gradient_checkpointing = True
_no_split_modules = [
"Qwen2DecoderLayer",
"LlamaDecoderLayer",
"Siglip2EncoderLayer",
"SiglipEncoderLayer",
]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn_2 = True
_supports_cache_class = True
_supports_static_cache = True
_supports_quantized_cache = True
_supports_sdpa = True
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear | nn.Conv2d):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
class Eagle25VLForConditionalGeneration(Eagle25VLPreTrainedModel, GenerationMixin):
config_class = Eagle25VLConfig
def __init__(self, config: Eagle25VLConfig, vision_model=None, language_model=None):
super().__init__(config)
image_size = config.force_image_size or config.vision_config.image_size
patch_size = config.vision_config.patch_size
self.patch_size = patch_size
if config.use_pixel_shuffle:
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio**2))
else:
self.num_image_token = int((image_size // patch_size) ** 2)
self.select_layer = config.select_layer
self.downsample_ratio = config.downsample_ratio
self.loss_version = config.loss_version
self.mlp_checkpoint = config.mlp_checkpoint
self.use_pixel_shuffle = config.use_pixel_shuffle
self.mlp_connector_layers = config.mlp_connector_layers
logger.info(f"num_image_token: {self.num_image_token}")
logger.info(f"mlp_checkpoint: {self.mlp_checkpoint}")
if vision_model is not None:
self.vision_model = vision_model
else:
if config.vision_config.model_type == "siglip_vision_model":
config.vision_config._attn_implementation = "flash_attention_2"
self.vision_model = SiglipVisionModel(config.vision_config)
else:
raise NotImplementedError(f"{config.vision_config.model_type} is not implemented.")
if language_model is not None:
self.language_model = language_model
else:
if config.text_config.architectures[0] == "LlamaForCausalLM":
self.language_model = LlamaForCausalLM(config.text_config)
elif config.text_config.architectures[0] == "Phi3ForCausalLM":
raise NotImplementedError("Phi3 is not implemented.")
# self.language_model = Phi3ForCausalLM(config.text_config)
elif config.text_config.architectures[0] == "Qwen2ForCausalLM":
assert config.text_config._attn_implementation == "flash_attention_2", (
f"Qwen2 must use flash_attention_2 but got {config.text_config._attn_implementation}"
)
self.language_model = Qwen2ForCausalLM(config.text_config)
elif config.text_config.architectures[0] == "Qwen3ForCausalLM":
self.language_model = Qwen3ForCausalLM(config.text_config)
else:
raise NotImplementedError(f"{config.text_config.architectures[0]} is not implemented.")
vit_hidden_size = config.vision_config.hidden_size
llm_hidden_size = config.text_config.hidden_size
if config.mlp_connector_layers == 2:
self.mlp1 = nn.Sequential(
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
nn.GELU(),
nn.Linear(llm_hidden_size, llm_hidden_size),
)
elif config.mlp_connector_layers == 1 and config.use_pixel_shuffle:
self.mlp1 = nn.Sequential(
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
)
elif config.mlp_connector_layers == 1 and not config.use_pixel_shuffle:
self.mlp1 = nn.Sequential(
nn.Linear(vit_hidden_size, llm_hidden_size),
)
else:
raise NotImplementedError(f"{config.mlp_connector_layers} is not implemented.")
self.image_token_index = config.image_token_index
self.neftune_alpha = None
if config.use_backbone_lora:
self.wrap_backbone_lora(r=config.use_backbone_lora, lora_alpha=2 * config.use_backbone_lora)
self.use_llm_lora = config.use_llm_lora
if config.use_llm_lora:
self.wrap_llm_lora(r=config.use_llm_lora, lora_alpha=2 * config.use_llm_lora)
self.check_forward_kwargs()
def check_forward_kwargs(self):
# We intentionally avoid using **kwargs in forward because Hugging Face Transformers
# has special handling for functions with **kwargs parameters that would affect
# how our model is processed during training and inference.
forward_params = inspect.signature(self.forward).parameters
assert not any(k.kind == inspect.Parameter.VAR_KEYWORD for k in forward_params.values())
def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
lora_config = LoraConfig(
r=r,
target_modules=[
"self_attn.q_proj",
"self_attn.k_proj",
"self_attn.v_proj",
"self_attn.out_proj",
"mlp.fc1",
"mlp.fc2",
],
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
)
self.vision_model = get_peft_model(self.vision_model, lora_config)
self.vision_model.print_trainable_parameters()
def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
lora_config = LoraConfig(
r=r,
target_modules=[
"self_attn.q_proj",
"self_attn.k_proj",
"self_attn.v_proj",
"self_attn.o_proj",
"mlp.gate_proj",
"mlp.down_proj",
"mlp.up_proj",
],
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
task_type="CAUSAL_LM",
)
self.language_model = get_peft_model(self.language_model, lora_config)
self.language_model.enable_input_require_grads()
self.language_model.print_trainable_parameters()
self.use_llm_lora = True
def forward(
self,
pixel_values: torch.FloatTensor,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
image_flags: torch.LongTensor | None = None,
past_key_values: list[torch.FloatTensor] | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
num_tiles_list: list[torch.Tensor] | None = None,
) -> tuple | CausalLMOutputWithPast:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_embeds = self.language_model.get_input_embeddings()(input_ids)
vit_embeds = self.extract_feature(pixel_values)
if image_flags is not None:
image_flags = image_flags.view(-1)
vit_embeds = vit_embeds[image_flags == 1]
b, n, c = input_embeds.shape
input_embeds = input_embeds.reshape(b * n, c)
input_ids = input_ids.reshape(b * n)
selected = input_ids == self.image_token_index
try:
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, c)
except Exception as e:
vit_embeds = vit_embeds.reshape(-1, c)
print(
f"warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, "
f"vit_embeds.shape={vit_embeds.shape}"
)
n_token = selected.sum()
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
input_embeds = input_embeds.reshape(b, n, c)
outputs = self.language_model(
inputs_embeds=input_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
logits = outputs.logits
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def pixel_shuffle(self, x, scale_factor=0.5):
n, w, h, c = x.size()
# N, W, H, C --> N, W, H * scale, C // scale
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
x = x.permute(0, 2, 1, 3).contiguous()
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
x = x.view(n, int(h * scale_factor), int(w * scale_factor), int(c / (scale_factor * scale_factor)))
x = x.permute(0, 2, 1, 3).contiguous()
return x
def extract_feature(self, pixel_values):
if self.select_layer == -1:
vit_embeds = self.vision_model(
pixel_values=pixel_values, output_hidden_states=False, return_dict=True
)
if hasattr(vit_embeds, "last_hidden_state"):
vit_embeds = vit_embeds.last_hidden_state
else:
vit_embeds = self.vision_model(
pixel_values=pixel_values, output_hidden_states=True, return_dict=True
).hidden_states[self.select_layer]
if self.use_pixel_shuffle:
h = w = int(vit_embeds.shape[1] ** 0.5)
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
vit_embeds = self.pixel_shuffle(
vit_embeds, scale_factor=self.downsample_ratio
) # torch.Size([B, 1024, 1024]) -> torch.Size([B, 16, 16, 4096])
vit_embeds = vit_embeds.reshape(
vit_embeds.shape[0], -1, vit_embeds.shape[-1]
) # torch.Size([B, 16, 16, 4096]) -> torch.Size([B, 256, 4096])
if self.mlp_checkpoint and vit_embeds.requires_grad:
vit_embeds = cp.checkpoint(self.mlp1, vit_embeds)
else:
vit_embeds = self.mlp1(vit_embeds)
return vit_embeds
@torch.no_grad()
def generate(
self,
pixel_values: torch.FloatTensor | None = None,
input_ids: torch.FloatTensor | None = None,
attention_mask: torch.LongTensor | None = None,
visual_features: torch.FloatTensor | None = None,
generation_config: GenerationConfig | None = None,
output_hidden_states: bool | None = None,
image_sizes: list[tuple[int, int]] | None = None,
**generate_kwargs,
) -> torch.LongTensor:
if pixel_values is not None:
if visual_features is not None:
vit_embeds = visual_features
else:
vit_embeds = self.extract_feature(pixel_values)
input_embeds = self.language_model.get_input_embeddings()(input_ids)
b, n, c = input_embeds.shape
input_embeds = input_embeds.reshape(b * n, c)
input_ids = input_ids.reshape(b * n)
selected = input_ids == self.config.image_token_index
assert selected.sum() != 0
input_embeds[selected] = vit_embeds.reshape(-1, c).to(input_embeds.device)
input_embeds = input_embeds.reshape(b, n, c)
else:
input_embeds = self.language_model.get_input_embeddings()(input_ids)
if "use_cache" not in generate_kwargs:
generate_kwargs["use_cache"] = True
outputs = self.language_model.generate(
inputs_embeds=input_embeds,
attention_mask=attention_mask,
generation_config=generation_config,
output_hidden_states=output_hidden_states,
**generate_kwargs,
)
return outputs
# Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.get_input_embeddings
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
# Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.set_input_embeddings
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
# Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.get_output_embeddings
def get_output_embeddings(self):
return self.language_model.get_output_embeddings()
# Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.set_output_embeddings
def set_output_embeddings(self, new_embeddings):
self.language_model.set_output_embeddings(new_embeddings)
# Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.set_decoder
def set_decoder(self, decoder):
self.language_model.set_decoder(decoder)
# Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.get_decoder
def get_decoder(self):
return self.language_model.get_decoder()
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/eagle2_hg_model/modeling_eagle2_5_vl.py",
"license": "Apache License 2.0",
"lines": 338,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/eagle2_hg_model/processing_eagle2_5_vl.py | # Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Eagle25VL.
copy from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava_onevision/processing_llava_onevision.py
"""
import base64
import os
import re
from io import BytesIO
import requests
import torch
from PIL import Image
from transformers.feature_extraction_utils import BatchFeature
from transformers.image_utils import ImageInput
from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
from transformers.utils import logging
from transformers.video_utils import VideoInput
logger = logging.get_logger(__name__)
FRAME_FACTOR = 2
FPS = 2.0
FPS_MIN_FRAMES = 4
FPS_MAX_FRAMES = 256
def to_rgb(pil_image: Image.Image) -> Image.Image:
if pil_image.mode == "RGBA":
white_background = Image.new("RGB", pil_image.size, (255, 255, 255))
white_background.paste(pil_image, mask=pil_image.split()[3]) # Use alpha channel as mask
return white_background
else:
return pil_image.convert("RGB")
def fetch_image(ele: dict[str, str | Image.Image]) -> Image.Image:
image = ele["image"] if "image" in ele else ele["image_url"]
image_obj = None
if isinstance(image, Image.Image):
image_obj = image
elif image.startswith("http://") or image.startswith("https://"):
response = requests.get(image, stream=True, timeout=10)
image_obj = Image.open(BytesIO(response.content))
elif image.startswith("file://"):
image_obj = Image.open(image[7:])
elif image.startswith("data:image"):
if "base64," in image:
_, base64_data = image.split("base64,", 1)
data = base64.b64decode(base64_data)
image_obj = Image.open(BytesIO(data))
else:
image_obj = Image.open(image)
if image_obj is None:
raise ValueError(
f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}"
)
image = to_rgb(image_obj)
if "scale_factor" in ele:
scale_factor = ele["scale_factor"]
image = image.resize((image.width * scale_factor, image.height * scale_factor), Image.BILINEAR)
return image
class Eagle25VLProcessorKwargs(ProcessingKwargs, total=False):
# see processing_utils.ProcessingKwargs documentation for usage.
_defaults = {
"text_kwargs": {
"padding": False,
},
"images_kwargs": {},
"videos_kwargs": {"max_dynamic_tiles": 1},
}
class Eagle25VLProcessor(ProcessorMixin):
r"""
Constructs a Eagle25VL processor which wraps a Eagle25VL video processor, Eagle25VL image processor and a Eagle25VL tokenizer into a single processor.
[`Eagle25VLProcessor`] offers all the functionalities of [`Eagle25VLVideoProcessor`], [`Eagle25VLImageProcessor`] and [`Eagle25VLTokenizer`]. See the
[`~Eagle25VLVideoProcessor.__call__`], [`~Eagle25VLProcessor.__call__`] and [`~Eagle25VLProcessor.decode`] for more information.
Args:
image_processor ([`LlavaOnevisionImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
num_image_tokens (`int`, *optional*):
Number of image tokens for one imagethat will be returned by vision tower.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Should be same as in model's config
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
video_token (`str`, *optional*, defaults to `"<video>"`):
Special token used to denote video location.
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"num_image_tokens",
"vision_feature_select_strategy",
"image_token",
"video_token",
"images_kwargs",
"videos_kwargs",
"text_kwargs",
]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
image_processor=None,
tokenizer=None,
vision_feature_select_strategy=None,
chat_template=None,
image_token="<IMG_CONTEXT>", # nosec: B107
video_token="<IMG_CONTEXT>", # nosec: B107
tokens_per_tile=256,
image_placeholder="image",
video_placeholder="video",
image_start_token="<img>",
image_end_token="</img>",
**kwargs,
):
self.vision_feature_select_strategy = vision_feature_select_strategy
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.video_token = tokenizer.video_token if hasattr(tokenizer, "video_token") else video_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.video_token_id = (
tokenizer.video_token_id
if getattr(tokenizer, "video_token_id", None)
else tokenizer.convert_tokens_to_ids(self.video_token)
)
self.image_placeholder = image_placeholder
self.video_placeholder = video_placeholder
self.tokens_per_tile = tokens_per_tile
self.image_start_token = image_start_token
self.image_end_token = image_end_token
if "auto_map" in kwargs:
self.auto_map = kwargs["auto_map"]
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def replace_media_placeholder(
self, text, image_list, video_list, timestamps_list, fps_list, **output_kwargs
):
num_of_images_in_this_sample = 0
num_of_videos_in_this_sample = 0
# Regular expression pattern to match formats like <image-1> or <video-2>
pattern = re.compile(rf"<({self.image_placeholder}|{self.video_placeholder})-(\d+)>")
unified_frame_list = []
# image_min_dynamic_tiles = output_kwargs["images_kwargs"].get(
# "min_dynamic_tiles", self.image_processor.min_dynamic_tiles
# )
# image_max_dynamic_tiles = output_kwargs["images_kwargs"].get(
# "max_dynamic_tiles", self.image_processor.max_dynamic_tiles
# )
# image_use_thumbnail = output_kwargs["images_kwargs"].get(
# "use_thumbnail", self.image_processor.use_thumbnail
# )
video_min_dynamic_tiles = output_kwargs["videos_kwargs"].get(
"min_dynamic_tiles", self.image_processor.min_dynamic_tiles
)
video_max_dynamic_tiles = output_kwargs["videos_kwargs"].get(
"max_dynamic_tiles", self.image_processor.max_dynamic_tiles
)
video_use_thumbnail = output_kwargs["videos_kwargs"].get(
"use_thumbnail", self.image_processor.use_thumbnail
)
tile_size = self.image_processor.size.get("height", 448)
# Function to replace tags in a single text
def replace_in_text(text):
# repl callback function for each match replacement operation
def repl(match):
nonlocal unified_frame_list
nonlocal num_of_images_in_this_sample
nonlocal num_of_videos_in_this_sample
media_type = match.group(1) # 'image' or 'video'
idx_in_list = int(match.group(2)) - 1 # Convert to list index (0-based)
# Select the corresponding path based on media type
idx_mapper = {
0: "first",
1: "second",
2: "third",
3: "fourth",
4: "fifth",
5: "sixth",
6: "seventh",
7: "eighth",
8: "ninth",
9: "tenth",
}
if media_type == "image":
image_inputs = self.image_processor(
images=[image_list[idx_in_list]],
videos=None,
**output_kwargs["images_kwargs"],
)
num_all_tiles = image_inputs["pixel_values"].shape[0]
special_placeholder = f"<image {idx_in_list + 1}>{self.image_start_token}{self.image_token * num_all_tiles * self.tokens_per_tile}{self.image_end_token}"
unified_frame_list.append(image_inputs)
num_of_images_in_this_sample += 1
elif media_type == "video":
video_inputs = self.image_processor(
images=None,
videos=[video_list[idx_in_list]],
**output_kwargs["videos_kwargs"],
)
num_all_tiles = video_inputs["pixel_values"].shape[0]
image_sizes = video_inputs["image_sizes"]
if timestamps_list is not None and -1 not in timestamps_list:
frame_timestamps = timestamps_list[idx_in_list]
else:
frame_timestamps = None
sampled_fps = fps_list[idx_in_list] if fps_list is not None else None
num_of_tiles_each_frame = [
self.get_number_tiles_based_on_image_size(
image_size,
video_min_dynamic_tiles,
video_max_dynamic_tiles,
video_use_thumbnail,
tile_size,
)
for image_size in image_sizes
]
assert sum(num_of_tiles_each_frame) == num_all_tiles, (
f"The number of tiles in each frame is not equal to the total number of tiles: {sum(num_of_tiles_each_frame)} != {num_all_tiles}"
)
if frame_timestamps is not None:
assert len(frame_timestamps) == len(num_of_tiles_each_frame), (
f"The number of timestamps is not equal to the number of frames: {len(frame_timestamps)} != {len(num_of_tiles_each_frame)}"
)
special_placeholder = [
f"Frame {i + 1} sample at {frame_timestamps[i]:.2f}s: {self.image_start_token}{self.image_token * num_of_tiles * self.tokens_per_tile}{self.image_end_token}"
for i, num_of_tiles in enumerate(num_of_tiles_each_frame)
]
else:
special_placeholder = [
f"Frame {i + 1}: {self.image_start_token}{self.image_token * num_of_tiles * self.tokens_per_tile}{self.image_end_token}"
for i, num_of_tiles in enumerate(num_of_tiles_each_frame)
]
if sampled_fps is not None:
special_placeholder = (
f"The {idx_mapper[idx_in_list]} video sampled with {sampled_fps:.2f} fps: "
+ "".join(special_placeholder)
)
else:
special_placeholder = f"The {idx_mapper[idx_in_list]} video: " + "".join(
special_placeholder
)
unified_frame_list.append(video_inputs)
num_of_videos_in_this_sample += 1
else:
raise ValueError(f"Unknown media type: {media_type}")
return special_placeholder
return pattern.sub(repl, text)
text = replace_in_text(text)
if len(unified_frame_list) > 0:
pixel_values = torch.cat([frame["pixel_values"] for frame in unified_frame_list])
image_sizes = torch.cat([frame["image_sizes"] for frame in unified_frame_list])
else:
pixel_values = None
image_sizes = None
return (
text,
pixel_values,
image_sizes,
num_of_images_in_this_sample,
num_of_videos_in_this_sample,
)
def __call__(
self,
images: ImageInput = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
audio=None,
videos: VideoInput = None,
**kwargs: Unpack[Eagle25VLProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of a video input to be fed to a model. Returned when `videos` is not `None`.
- **image_sizes** -- Size of each image that will be used to unpad an image. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Eagle25VLProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text_list = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
elif isinstance(text, list) and isinstance(text[0], str):
text_list = text
if images is None:
images = []
if videos is None:
videos = []
pixel_values_list = []
image_sizes_list = []
new_sample_list = []
image_start_idx = 0
video_start_idx = 0
timestamps_batch = output_kwargs["videos_kwargs"].pop("timestamps", None)
fps_batch = output_kwargs["videos_kwargs"].pop("fps", None)
for sample in text_list:
timestamps_list = timestamps_batch[video_start_idx:] if timestamps_batch is not None else None
fps_list = fps_batch[video_start_idx:] if fps_batch is not None else None
(
sample,
pixel_values,
image_sizes,
num_of_images_in_this_sample,
num_of_videos_in_this_sample,
) = self.replace_media_placeholder(
sample,
images[image_start_idx:],
videos[video_start_idx:],
timestamps_list,
fps_list,
**output_kwargs,
)
new_sample_list.append(sample)
if pixel_values is not None:
pixel_values_list.append(pixel_values)
image_sizes_list.append(image_sizes)
image_start_idx += num_of_images_in_this_sample
video_start_idx += num_of_videos_in_this_sample
if len(pixel_values_list) > 0:
image_inputs = {
"pixel_values": torch.cat(pixel_values_list),
"image_sizes": torch.cat(image_sizes_list),
}
else:
image_inputs = {}
video_inputs = {}
text_inputs = self.tokenizer(new_sample_list, **output_kwargs["text_kwargs"])
return BatchFeature(data={**text_inputs, **image_inputs, **video_inputs})
def get_number_tiles_based_on_image_size(
self, image_size: tuple, min_num: int, max_num: int, use_thumbnail: bool, tile_size: int
) -> int:
"""
Get the number of tiles based on the image size.
"""
orig_height, orig_width = image_size
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = {
(i, j)
for n in range(min_num, max_num + 1)
for i in range(1, n + 1)
for j in range(1, n + 1)
if i * j <= max_num and i * j >= min_num
}
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = self.image_processor.find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, tile_size
)
tiles_num = target_aspect_ratio[0] * target_aspect_ratio[1]
if use_thumbnail and tiles_num > 1:
tiles_num += 1
return tiles_num
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
# override to save video-config in a separate config file
def save_pretrained(self, save_directory, **kwargs):
if os.path.isfile(save_directory):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
outputs = super().save_pretrained(save_directory, **kwargs)
return outputs
# override to load video-config from a separate config file
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
processor = super().from_pretrained(pretrained_model_name_or_path, **kwargs)
# if return_unused_kwargs a tuple is returned where the second element is 'unused_kwargs'
if isinstance(processor, tuple):
processor = processor[0]
return processor
# Copy from https://github.com/QwenLM/Qwen2.5-VL/blob/main/qwen-vl-utils/src/qwen_vl_utils/vision_process.py
def process_vision_info(
self,
conversations: list[dict] | list[list[dict]],
return_video_kwargs: bool = False,
) -> tuple[list[Image.Image] | None, list[torch.Tensor | list[Image.Image]] | None, dict | None]:
vision_infos = self.extract_vision_info(conversations)
## Read images or videos
image_inputs = []
video_inputs = []
video_sample_fps_list = []
video_timestamps_list = []
for vision_info in vision_infos:
if "image" in vision_info or "image_url" in vision_info:
image_inputs.append(fetch_image(vision_info))
else:
raise ValueError("image, image_url or video should in content.")
if len(image_inputs) == 0:
image_inputs = None
if len(video_inputs) == 0:
video_inputs = None
if return_video_kwargs:
return (
image_inputs,
video_inputs,
{"fps": video_sample_fps_list, "timestamps": video_timestamps_list},
)
return image_inputs, video_inputs
def extract_vision_info(self, conversations: list[dict] | list[list[dict]]) -> list[dict]:
vision_infos = []
if isinstance(conversations[0], dict):
conversations = [conversations]
for conversation in conversations:
for message in conversation:
if isinstance(message["content"], list):
for ele in message["content"]:
if (
"image" in ele
or "image_url" in ele
or "video" in ele
or ele["type"] in ("image", "image_url", "video")
):
vision_infos.append(ele)
return vision_infos
__all__ = ["Eagle25VLProcessor"]
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/eagle2_hg_model/processing_eagle2_5_vl.py",
"license": "Apache License 2.0",
"lines": 469,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/groot_n1.py | # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING
import numpy as np
import torch
import torch.nn as nn
from huggingface_hub import snapshot_download
from huggingface_hub.errors import HFValidationError, RepositoryNotFoundError
from lerobot.utils.import_utils import _transformers_available
# Conditional import for type checking and lazy loading
if TYPE_CHECKING or _transformers_available:
from transformers import AutoConfig, AutoModel, PretrainedConfig, PreTrainedModel
from transformers.feature_extraction_utils import BatchFeature
else:
AutoConfig = None
AutoModel = None
PretrainedConfig = object
PreTrainedModel = object
BatchFeature = None
try:
import tree
except ImportError:
tree = None
from lerobot.policies.groot.action_head.flow_matching_action_head import (
FlowmatchingActionHead,
FlowmatchingActionHeadConfig,
)
from lerobot.policies.groot.utils import ensure_eagle_cache_ready
from lerobot.utils.constants import ACTION, HF_LEROBOT_HOME
DEFAULT_VENDOR_EAGLE_PATH = str((Path(__file__).resolve().parent / "eagle2_hg_model").resolve())
DEFAULT_TOKENIZER_ASSETS_REPO = "lerobot/eagle2hg-processor-groot-n1p5"
class EagleBackbone(nn.Module):
def __init__(
self,
tune_llm: bool = False,
tune_visual: bool = False,
select_layer: int = -1,
reproject_vision: bool = False,
use_flash_attention: bool = False,
load_bf16: bool = False,
eagle_path: str = DEFAULT_VENDOR_EAGLE_PATH,
tokenizer_assets_repo: str = DEFAULT_TOKENIZER_ASSETS_REPO,
project_to_dim: int = 1536,
):
"""
Args:
tune_llm: whether to tune the LLM model (default: True)
tune_visual: whether to tune the visual model (default: False)
"""
super().__init__()
assert not reproject_vision, "Reproject vision is not implemented here, set to False"
# Prefer loading Eagle model config from the cache directory where vendor files were copied.
vendor_dir = DEFAULT_VENDOR_EAGLE_PATH
cache_dir = HF_LEROBOT_HOME / tokenizer_assets_repo
try:
ensure_eagle_cache_ready(vendor_dir, cache_dir, tokenizer_assets_repo)
except Exception as exc: # nosec: B110
print(f"[GROOT] Warning: failed to prepare Eagle cache for backbone: {exc}")
config = AutoConfig.from_pretrained(str(cache_dir), trust_remote_code=True)
self.eagle_model = AutoModel.from_config(config, trust_remote_code=True)
if project_to_dim is not None:
self.eagle_linear = torch.nn.Linear(2048, project_to_dim)
else:
self.eagle_linear = torch.nn.Identity()
# needed since we don't use these layers. Also saves compute
while len(self.eagle_model.language_model.model.layers) > select_layer:
self.eagle_model.language_model.model.layers.pop(-1)
self.select_layer = select_layer
self.set_trainable_parameters(tune_llm, tune_visual)
def set_trainable_parameters(self, tune_llm: bool, tune_visual: bool):
self.tune_llm = tune_llm
self.tune_visual = tune_visual
for p in self.parameters():
p.requires_grad = True
if not tune_llm:
self.eagle_model.language_model.requires_grad_(False)
if not tune_visual:
self.eagle_model.vision_model.requires_grad_(False)
self.eagle_model.mlp1.requires_grad_(False)
print(f"Tune backbone llm: {self.tune_llm}")
print(f"Tune backbone visual: {self.tune_visual}")
# Check if any parameters are still trainable. If not, print a warning.
if not tune_llm and not tune_visual:
for name, p in self.named_parameters():
if p.requires_grad:
print(f"Backbone trainable parameter: {name}")
if not any(p.requires_grad for p in self.parameters()):
print("Warning: No backbone trainable parameters found.")
def set_frozen_modules_to_eval_mode(self):
"""
Huggingface will call model.train() at each training_step. To ensure
the expected behaviors for modules like dropout, batchnorm, etc., we
need to call model.eval() for the frozen modules.
"""
if self.training:
if self.eagle_model.language_model and not self.tune_llm:
self.eagle_model.language_model.eval()
if self.eagle_model.vision_model and not self.tune_visual:
self.eagle_model.vision_model.eval()
def prepare_input(self, batch: dict) -> BatchFeature:
return BatchFeature(data=batch)
def forward_eagle(self, vl_input: BatchFeature) -> BatchFeature:
eagle_prefix = "eagle_"
eagle_input = {
k.removeprefix(eagle_prefix): v for k, v in vl_input.items() if k.startswith(eagle_prefix)
}
del eagle_input["image_sizes"]
eagle_output = self.eagle_model(**eagle_input, output_hidden_states=True, return_dict=True)
eagle_features = eagle_output.hidden_states[self.select_layer]
eagle_features = self.eagle_linear(eagle_features)
return eagle_features, eagle_input["attention_mask"]
def forward(self, vl_input: BatchFeature) -> BatchFeature:
self.set_frozen_modules_to_eval_mode()
eagle_embeds, eagle_mask = self.forward_eagle(vl_input)
# YL (TODO HACK): to resolve DDP issue when tune_visual=True
# Ensure all trainable parameters in vision_model are used in the forward pass for DDP compatibility
if self.training and self.tune_visual:
dummy_term = torch.tensor(
0.0, device=eagle_embeds.device, dtype=eagle_embeds.dtype, requires_grad=True
)
for param in self.eagle_model.vision_model.parameters():
if param.requires_grad:
dummy_term = dummy_term + 0.0 * param.sum()
eagle_embeds = eagle_embeds + dummy_term
return BatchFeature(
data={"backbone_features": eagle_embeds, "backbone_attention_mask": eagle_mask}
) # [B, T2, hidden_size]
BACKBONE_FEATURE_KEY = "backbone_features"
ACTION_KEY = "action_pred"
LOSS_KEY = "loss"
ERROR_MSG = "Error: unexpected input/output"
N_COLOR_CHANNELS = 3
# config
@dataclass
class GR00TN15Config(PretrainedConfig):
model_type = "gr00t_n1_5"
backbone_cfg: dict = field(init=False, metadata={"help": "Backbone configuration."})
action_head_cfg: dict = field(init=False, metadata={"help": "Action head configuration."})
action_horizon: int = field(init=False, metadata={"help": "Action horizon."})
action_dim: int = field(init=False, metadata={"help": "Action dimension."})
compute_dtype: str = field(default="float32", metadata={"help": "Compute dtype."})
def __init__(self, **kwargs):
super().__init__(**kwargs)
for key, value in kwargs.items():
setattr(self, key, value)
# real model
class GR00TN15(PreTrainedModel):
supports_gradient_checkpointing = True
config_class = GR00TN15Config
"""
we expect the backbone output to have a key 'backbone_features' with shape (batch_size, n, hidden_size)
here n is variable and can be e.g. time, 1 or user specified
we expect the action head output to have a key 'action_pred' with shape (batch_size, time, action_dim) during inference time
we expect these to have type BatchFeature, and they can of course have many other user specified keys too
"""
def __init__(
self,
config: GR00TN15Config,
local_model_path: str,
):
assert isinstance(config.backbone_cfg, dict)
assert isinstance(config.action_head_cfg, dict)
super().__init__(config)
self.local_model_path = local_model_path
self.backbone = EagleBackbone(**config.backbone_cfg)
action_head_cfg = FlowmatchingActionHeadConfig(**config.action_head_cfg)
self.action_head = FlowmatchingActionHead(action_head_cfg)
self.action_horizon = config.action_horizon
self.action_dim = config.action_dim
self.compute_dtype = config.compute_dtype
def validate_inputs(self, inputs):
# NOTE -- this should be handled internally by the model
# however, doing that will likely be breaking changes -- so we'll need to do it after the deadline
detected_error = False
error_msg = ERROR_MSG
if ACTION in inputs:
action = inputs[ACTION]
# In inference, action may be omitted or None; validate only when it's a tensor.
if action is None:
pass # allow None during inference
elif isinstance(action, torch.Tensor):
shape_ok = (
len(action.shape) == 3
and action.shape[1] == self.action_horizon
and action.shape[2] == self.action_dim
)
if not shape_ok:
error_msg += f"\n{action.shape=}"
detected_error = True
else:
# Unexpected non-tensor type provided for action
error_msg += f"\nInvalid type for action: {type(action)}"
detected_error = True
if "video" in inputs:
video = inputs["video"]
type_ok = isinstance(video, np.ndarray)
dtype_ok = video.dtype == np.uint8
shape_ok = len(video.shape) == 6 and video.shape[3] == N_COLOR_CHANNELS
if not type_ok:
error_msg += f"\n{type(video)=}"
detected_error = True
if not dtype_ok:
error_msg += f"\n{video.dtype=}"
detected_error = True
if not shape_ok:
error_msg += f"\n{video.shape=}"
detected_error = True
if detected_error:
raise ValueError(error_msg)
def validate_data(self, action_head_outputs, backbone_outputs, is_training):
fail_backbone = (
not isinstance(backbone_outputs, BatchFeature) or BACKBONE_FEATURE_KEY not in backbone_outputs
)
if fail_backbone:
error_msg = ERROR_MSG
error_msg += f"\n{isinstance(backbone_outputs, BatchFeature)=}"
error_msg += f"\n{BACKBONE_FEATURE_KEY in backbone_outputs=}"
error_msg += f"\n{backbone_outputs[BACKBONE_FEATURE_KEY].shape=}"
raise ValueError(error_msg)
fail_action_head = (not isinstance(action_head_outputs, BatchFeature)) or not (
(
LOSS_KEY in action_head_outputs and is_training
) # there might not be an action prediction during training
or (
ACTION_KEY in action_head_outputs
and action_head_outputs[ACTION_KEY].shape[1] == self.action_horizon
and action_head_outputs[ACTION_KEY].shape[2] == self.action_dim
)
)
if fail_action_head:
error_msg = ERROR_MSG
error_msg += f"\n{isinstance(action_head_outputs, BatchFeature)=}"
error_msg += f"\n{LOSS_KEY in action_head_outputs=}"
error_msg += f"\n{action_head_outputs[ACTION_KEY].shape=}"
error_msg += f"\n{self.action_horizon=}"
error_msg += f"\n{self.action_dim=}"
raise ValueError(error_msg)
def forward(
self,
inputs: dict,
) -> BatchFeature:
backbone_inputs, action_inputs = self.prepare_input(inputs)
backbone_outputs = self.backbone(backbone_inputs)
action_head_outputs = self.action_head(backbone_outputs, action_inputs)
self.validate_data(action_head_outputs, backbone_outputs, is_training=True)
return action_head_outputs
def get_action(
self,
inputs: dict,
) -> BatchFeature:
backbone_inputs, action_inputs = self.prepare_input(inputs)
# Because the behavior of backbones remains the same for training and inference, we can use `forward` for backbones.
backbone_outputs = self.backbone(backbone_inputs)
action_head_outputs = self.action_head.get_action(backbone_outputs, action_inputs)
self.validate_data(action_head_outputs, backbone_outputs, is_training=False)
return action_head_outputs
def prepare_input(self, inputs) -> tuple[BatchFeature, BatchFeature]:
self.validate_inputs(inputs)
backbone_inputs = self.backbone.prepare_input(inputs)
action_inputs = self.action_head.prepare_input(inputs)
def to_device_with_maybe_dtype(x):
# Cast floating tensors to a memory-efficient compute dtype when requested.
# Rationale: Upcasting backbone activations to fp32 significantly increases VRAM.
# When compute_dtype is bfloat16, prefer bf16 for activations to match AMP behavior.
if not isinstance(x, torch.Tensor):
return x
if torch.is_floating_point(x):
if getattr(self, "compute_dtype", None) == "bfloat16":
return x.to(self.device, dtype=torch.bfloat16)
# Fallback: preserve previous behavior if not using bf16 compute
return x.to(self.device, dtype=self.action_head.dtype)
# Non-floating tensors: move device only
return x.to(self.device)
backbone_inputs = tree.map_structure(to_device_with_maybe_dtype, backbone_inputs)
action_inputs = tree.map_structure(to_device_with_maybe_dtype, action_inputs)
return backbone_inputs, action_inputs
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs):
tune_visual = kwargs.pop("tune_visual", True)
tune_llm = kwargs.pop("tune_llm", False)
tune_projector = kwargs.pop("tune_projector", True)
tune_diffusion_model = kwargs.pop("tune_diffusion_model", True)
print(f"Loading pretrained dual brain from {pretrained_model_name_or_path}")
print(f"Tune backbone vision tower: {tune_visual}")
print(f"Tune backbone LLM: {tune_llm}")
print(f"Tune action head projector: {tune_projector}")
print(f"Tune action head DiT: {tune_diffusion_model}")
# get the current model path being downloaded
try:
# NOTE(YL) This downloads the model to the local cache and returns the local path to the model
# saved in ~/.cache/huggingface/hub/
local_model_path = snapshot_download(pretrained_model_name_or_path, repo_type="model")
# HFValidationError, RepositoryNotFoundError
except (HFValidationError, RepositoryNotFoundError):
print(
f"Model not found or avail in the huggingface hub. Loading from local path: {pretrained_model_name_or_path}"
)
local_model_path = pretrained_model_name_or_path
pretrained_model = super().from_pretrained(
local_model_path, local_model_path=local_model_path, **kwargs
)
pretrained_model.backbone.set_trainable_parameters(tune_visual=tune_visual, tune_llm=tune_llm)
pretrained_model.action_head.set_trainable_parameters(
tune_projector=tune_projector, tune_diffusion_model=tune_diffusion_model
)
return pretrained_model
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/groot_n1.py",
"license": "Apache License 2.0",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/modeling_groot.py | #!/usr/bin/env python
# Copyright 2024 NVIDIA Corporation and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Groot Policy Wrapper for LeRobot Integration
Minimal integration that delegates to Isaac-GR00T components where possible
without porting their code. The intent is to:
- Download and load the pretrained GR00T model via GR00TN15.from_pretrained
- Optionally align action horizon similar to gr00t_finetune.py
- Expose predict_action via GR00T model.get_action
- Provide a training forward that can call the GR00T model forward if batch
structure matches.
Notes:
- Dataset loading and full training orchestration is handled by Isaac-GR00T
TrainRunner in their codebase. If you want to invoke that flow end-to-end
from LeRobot, see `GrootPolicy.finetune_with_groot_runner` below.
"""
import builtins
import os
from collections import deque
from pathlib import Path
from typing import TypeVar
import torch
from torch import Tensor
from lerobot.configs.types import FeatureType, PolicyFeature
from lerobot.policies.groot.configuration_groot import GrootConfig
from lerobot.policies.groot.groot_n1 import GR00TN15
from lerobot.policies.pretrained import PreTrainedPolicy
from lerobot.utils.constants import ACTION, OBS_IMAGES
T = TypeVar("T", bound="GrootPolicy")
class GrootPolicy(PreTrainedPolicy):
"""Wrapper around external Groot model for LeRobot integration."""
name = "groot"
config_class = GrootConfig
def __init__(self, config: GrootConfig, **kwargs):
"""Initialize Groot policy wrapper."""
super().__init__(config)
config.validate_features()
self.config = config
# Initialize GR00T model using ported components
self._groot_model = self._create_groot_model()
self.reset()
def _create_groot_model(self):
"""Create and initialize the GR00T model using Isaac-GR00T API.
This is only called when creating a NEW policy (not when loading from checkpoint).
Steps (delegating to Isaac-GR00T):
1) Download and load pretrained model via GR00TN15.from_pretrained
2) Align action horizon with data_config if provided
"""
# Handle Flash Attention compatibility issues
self._handle_flash_attention_compatibility()
model = GR00TN15.from_pretrained(
pretrained_model_name_or_path=self.config.base_model_path,
tune_llm=self.config.tune_llm,
tune_visual=self.config.tune_visual,
tune_projector=self.config.tune_projector,
tune_diffusion_model=self.config.tune_diffusion_model,
)
model.compute_dtype = "bfloat16" if self.config.use_bf16 else model.compute_dtype
model.config.compute_dtype = model.compute_dtype
return model
def reset(self):
"""Reset policy state when environment resets."""
self._action_queue = deque([], maxlen=self.config.n_action_steps)
@classmethod
def from_pretrained(
cls: builtins.type[T],
pretrained_name_or_path: str | Path,
*,
config: GrootConfig | None = None,
force_download: bool = False,
resume_download: bool | None = None,
proxies: dict | None = None,
token: str | bool | None = None,
cache_dir: str | Path | None = None,
local_files_only: bool = False,
revision: str | None = None,
strict: bool = True,
**kwargs,
) -> T:
"""Load Groot policy from pretrained model.
Handles two cases:
1. Base GR00T models (e.g., 'nvidia/GR00T-N1.5-3B') - loads the raw model
2. Fine-tuned LeRobot checkpoints - loads config and weights from safetensors
Args:
pretrained_name_or_path: Path to the GR00T model or fine-tuned checkpoint
config: Optional GrootConfig. If None, loads from checkpoint or creates default
force_download: Force download even if cached
resume_download: Resume interrupted download
proxies: Proxy settings
token: HuggingFace authentication token
cache_dir: Cache directory path
local_files_only: Only use local files
revision: Specific model revision
strict: Strict state dict loading
**kwargs: Additional arguments (passed to config)
Returns:
Initialized GrootPolicy instance with loaded model
"""
from huggingface_hub import hf_hub_download
from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE
from huggingface_hub.errors import HfHubHTTPError
print(
"The Groot policy is a wrapper around Nvidia's GR00T N1.5 model.\n"
f"Loading pretrained model from: {pretrained_name_or_path}"
)
model_id = str(pretrained_name_or_path)
is_finetuned_checkpoint = False
# Check if this is a fine-tuned LeRobot checkpoint (has model.safetensors)
try:
if os.path.isdir(model_id):
is_finetuned_checkpoint = os.path.exists(os.path.join(model_id, SAFETENSORS_SINGLE_FILE))
else:
# Try to download the safetensors file to check if it exists
try:
hf_hub_download(
repo_id=model_id,
filename=SAFETENSORS_SINGLE_FILE,
revision=revision,
cache_dir=cache_dir,
force_download=False, # Just check, don't force download
proxies=proxies,
token=token,
local_files_only=local_files_only,
)
is_finetuned_checkpoint = True
except HfHubHTTPError:
is_finetuned_checkpoint = False
except Exception:
is_finetuned_checkpoint = False
if is_finetuned_checkpoint:
# This is a fine-tuned LeRobot checkpoint - use parent class loading
print("Detected fine-tuned LeRobot checkpoint, loading with state dict...")
return super().from_pretrained(
pretrained_name_or_path=pretrained_name_or_path,
config=config,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
token=token,
cache_dir=cache_dir,
local_files_only=local_files_only,
revision=revision,
strict=strict,
**kwargs,
)
# This is a base GR00T model - load it fresh
print("Detected base GR00T model, loading from HuggingFace...")
if config is None:
# Create default config with the pretrained path
config = GrootConfig(base_model_path=str(pretrained_name_or_path))
# Add minimal visual feature required for validation
# validate_features() will automatically add state and action features
# These are placeholders - actual robot features come from the preprocessor
if not config.input_features:
config.input_features = {
f"{OBS_IMAGES}.camera": PolicyFeature(
type=FeatureType.VISUAL,
shape=(3, 224, 224), # Default image size from config
),
}
else:
# Override the base_model_path with the provided path
config.base_model_path = str(pretrained_name_or_path)
# Pass through any additional config overrides from kwargs
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
# Create a fresh policy instance - this will automatically load the GR00T model
# in __init__ via _create_groot_model()
policy = cls(config)
policy.eval()
return policy
def get_optim_params(self) -> dict:
return self.parameters()
def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]:
"""Training forward pass.
Delegates to Isaac-GR00T model.forward when inputs are compatible.
"""
# Build a clean input dict for GR00T: keep only tensors GR00T consumes
allowed_base = {"state", "state_mask", "action", "action_mask", "embodiment_id"}
groot_inputs = {
k: v
for k, v in batch.items()
if (k in allowed_base or k.startswith("eagle_")) and not (k.startswith("next.") or k == "info")
}
# Get device from model parameters
device = next(self.parameters()).device
# Run GR00T forward under bf16 autocast when enabled to reduce activation memory
# Rationale: Matches original GR00T finetuning (bf16 compute, fp32 params) and avoids fp32 upcasts.
with torch.autocast(device_type=device.type, dtype=torch.bfloat16, enabled=self.config.use_bf16):
outputs = self._groot_model.forward(groot_inputs)
# Isaac-GR00T returns a BatchFeature; loss key is typically 'loss'
loss = outputs.get("loss")
loss_dict = {"loss": loss.item()}
return loss, loss_dict
@torch.no_grad()
def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor:
"""Predict a chunk of actions for inference by delegating to Isaac-GR00T.
Returns a tensor of shape (B, n_action_steps, action_dim).
"""
self.eval()
# Build a clean input dict for GR00T: keep only tensors GR00T consumes
# Preprocessing is handled by the processor pipeline, so we just filter the batch
# NOTE: During inference, we should NOT pass action/action_mask (that's what we're predicting)
allowed_base = {"state", "state_mask", "embodiment_id"}
groot_inputs = {
k: v
for k, v in batch.items()
if (k in allowed_base or k.startswith("eagle_")) and not (k.startswith("next.") or k == "info")
}
# Get device from model parameters
device = next(self.parameters()).device
# Use bf16 autocast for inference to keep memory low and match backbone dtype
with torch.autocast(device_type=device.type, dtype=torch.bfloat16, enabled=self.config.use_bf16):
outputs = self._groot_model.get_action(groot_inputs)
actions = outputs.get("action_pred")
original_action_dim = self.config.output_features[ACTION].shape[0]
actions = actions[:, :, :original_action_dim]
return actions
@torch.no_grad()
def select_action(self, batch: dict[str, Tensor]) -> Tensor:
"""Select single action from action queue."""
self.eval()
if len(self._action_queue) == 0:
actions = self.predict_action_chunk(batch)
self._action_queue.extend(actions.transpose(0, 1))
return self._action_queue.popleft()
# -------------------------
# Internal helpers
# -------------------------
def _handle_flash_attention_compatibility(self) -> None:
"""Handle Flash Attention compatibility issues by setting environment variables.
This addresses the common 'undefined symbol' error that occurs when Flash Attention
is compiled against a different PyTorch version than what's currently installed.
"""
# Set environment variables to handle Flash Attention compatibility
# These help with symbol resolution issues
os.environ.setdefault("FLASH_ATTENTION_FORCE_BUILD", "0")
os.environ.setdefault("FLASH_ATTENTION_SKIP_CUDA_BUILD", "0")
# Try to import flash_attn and handle failures gracefully
try:
import flash_attn
print(f"[GROOT] Flash Attention version: {flash_attn.__version__}")
except ImportError as e:
print(f"[GROOT] Flash Attention not available: {e}")
print("[GROOT] Will use fallback attention mechanism")
except Exception as e:
if "undefined symbol" in str(e):
print(f"[GROOT] Flash Attention compatibility issue detected: {e}")
print("[GROOT] This is likely due to PyTorch/Flash Attention version mismatch")
print("[GROOT] Consider reinstalling Flash Attention with compatible version:")
print(" pip uninstall flash-attn")
print(" pip install --no-build-isolation flash-attn==2.6.3")
print("[GROOT] Continuing with fallback attention mechanism")
else:
print(f"[GROOT] Flash Attention error: {e}")
print("[GROOT] Continuing with fallback attention mechanism")
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/modeling_groot.py",
"license": "Apache License 2.0",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/processor_groot.py | #!/usr/bin/env python
# Copyright 2024 NVIDIA Corporation and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any
import numpy as np
import torch
from einops import rearrange
from PIL import Image
from lerobot.utils.import_utils import _transformers_available
if TYPE_CHECKING or _transformers_available:
from transformers import AutoProcessor, ProcessorMixin
else:
AutoProcessor = None
ProcessorMixin = object
from lerobot.configs.types import (
FeatureType,
NormalizationMode,
PolicyFeature,
)
from lerobot.policies.groot.configuration_groot import GrootConfig
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DeviceProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
ProcessorStep,
ProcessorStepRegistry,
RenameObservationsProcessorStep,
)
from lerobot.processor.converters import (
policy_action_to_transition,
transition_to_policy_action,
)
from lerobot.processor.core import EnvTransition, TransitionKey
from lerobot.utils.constants import (
ACTION,
HF_LEROBOT_HOME,
OBS_IMAGE,
OBS_IMAGES,
OBS_STATE,
POLICY_POSTPROCESSOR_DEFAULT_NAME,
POLICY_PREPROCESSOR_DEFAULT_NAME,
)
# Defaults for Eagle processor locations
DEFAULT_TOKENIZER_ASSETS_REPO = "lerobot/eagle2hg-processor-groot-n1p5"
def make_groot_pre_post_processors(
config: GrootConfig, dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""Create preprocessor and postprocessor for Groot policy.
This creates a processing pipeline that transforms LeRobot data format into
the format expected by Isaac-GR00T models:
Preprocessing steps:
1. Optional key renaming (dataset-specific key mapping)
2. Add batch dimension to unbatched data
3. Pack video/state/action/language/embodiment and apply optional min-max normalization before padding
4. Encode video+language with Eagle VLM into intermediate eagle_content
5. Collate eagle_content into batched eagle_* tensors
6. Move tensors to device (GPU)
NOTE: We optionally apply min-max normalization to STATE and ACTION using
dataset-provided statistics prior to padding, mapping values to [-1, 1].
This mirrors SO100-style preprocessing and keeps scales consistent with GR00T.
Args:
config: Groot configuration containing data_config, embodiment_tag, etc.
dataset_stats: Optional per-key min/max statistics for normalization before padding.
Returns:
Tuple of (preprocessor, postprocessor) pipelines
"""
# Get horizon/dimension parameters from config
# These should match the config used for the pretrained model
# Default values match most GR00T configs (state_horizon=1, action_horizon=16)
state_horizon = 1
# CRITICAL: Pretrained GR00T models use action_horizon=16 max!
# The model architecture hardcodes this limit
action_horizon = min(config.chunk_size, 16)
max_state_dim = config.max_state_dim
max_action_dim = config.max_action_dim
# Pass raw dataset_stats; normalization will occur inside pack step before padding
padded_stats = dataset_stats or {}
# Define feature specs for optional normalization steps
_features: dict[str, PolicyFeature] = {
# Observation features (only add those we may normalize)
OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(state_horizon, max_state_dim)),
# Action feature
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(action_horizon, max_action_dim)),
}
# Normalize STATE and ACTION with min_max (SO100-like default)
_norm_map = {
FeatureType.ACTION: NormalizationMode.MIN_MAX,
FeatureType.STATE: NormalizationMode.MIN_MAX,
}
# Determine env action dimension from config (simple, object-like PolicyFeature)
try:
env_action_dim = int(config.output_features[ACTION].shape[0])
except Exception:
env_action_dim = 0
input_steps: list[ProcessorStep] = [
# 1. Rename keys if needed (e.g., dataset-specific camera names)
# Leave empty for now - add mappings if your dataset uses different key names
RenameObservationsProcessorStep(rename_map={}),
# 2. Add batch dimension for single samples
AddBatchDimensionProcessorStep(),
# 3. Pack video/state/action/language/embodiment; apply optional min-max normalization before padding
GrootPackInputsStep(
state_horizon=state_horizon,
action_horizon=action_horizon,
max_state_dim=max_state_dim,
max_action_dim=max_action_dim,
language_key="task",
formalize_language=False,
embodiment_tag=config.embodiment_tag,
normalize_min_max=True,
stats=padded_stats,
),
# 4. Eagle encode (creates eagle_content)
GrootEagleEncodeStep(
tokenizer_assets_repo=config.tokenizer_assets_repo,
),
# 5. Collate eagle_content -> eagle_* tensors
GrootEagleCollateStep(
tokenizer_assets_repo=config.tokenizer_assets_repo,
),
# 6. Move to device
DeviceProcessorStep(device=config.device),
]
# Postprocessing: slice to env action dim and unnormalize to env scale, then move to CPU
output_steps: list[ProcessorStep] = [
GrootActionUnpackUnnormalizeStep(
env_action_dim=env_action_dim,
stats=padded_stats,
normalize_min_max=True,
),
# Finally, move to CPU for env interaction
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
name=POLICY_PREPROCESSOR_DEFAULT_NAME,
),
PolicyProcessorPipeline[PolicyAction, PolicyAction](
steps=output_steps,
name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
# GR00T specific processor steps
def _to_uint8_np_bhwc(img_t: torch.Tensor) -> np.ndarray:
# img_t: (B, C, H, W) float in [0,1] or uint8
if img_t.dtype.is_floating_point:
img_t = (img_t.clamp(0, 1) * 255.0).to(torch.uint8)
return rearrange(img_t.cpu().numpy(), "b c h w -> b h w c")
def _build_eagle_processor(tokenizer_assets_repo: str = DEFAULT_TOKENIZER_ASSETS_REPO) -> ProcessorMixin:
# Validate that the cache directory is ready. If not, instruct the user.
cache_dir = HF_LEROBOT_HOME / tokenizer_assets_repo
required = [
cache_dir / "processor_config.json",
cache_dir / "preprocessor_config.json",
cache_dir / "image_processing_eagle2_5_vl_fast.py",
]
if not all(p.exists() for p in required):
raise FileNotFoundError(
f"[GROOT] Eagle processor cache at '{cache_dir}' is not populated. "
"Vendor files are copied during model creation. Create the policy/model first, "
"or call ensure_eagle_cache_ready() before building processors."
)
proc = AutoProcessor.from_pretrained(str(cache_dir), trust_remote_code=True, use_fast=True)
proc.tokenizer.padding_side = "left"
return proc
@dataclass
@ProcessorStepRegistry.register(name="groot_pack_inputs_v3")
class GrootPackInputsStep(ProcessorStep):
state_horizon: int = 1
action_horizon: int = 16
max_state_dim: int = 64
max_action_dim: int = 32
language_key: str = "task"
formalize_language: bool = False
embodiment_tag: str = "new_embodiment"
embodiment_mapping: dict[str, int] = field(
default_factory=lambda: {
"new_embodiment": 31, # Match original GR00T EMBODIMENT_TAG_MAPPING
"oxe_droid": 17,
"agibot_genie1": 26,
"gr1": 24,
"so100": 2,
"unitree_g1": 3,
}
)
# Min-max normalization (SO100-like) applied BEFORE padding
normalize_min_max: bool = True
stats: dict[str, dict[str, Any]] | None = None
def __call__(self, transition: EnvTransition) -> EnvTransition:
obs = transition.get(TransitionKey.OBSERVATION, {}) or {}
comp = transition.get(TransitionKey.COMPLEMENTARY_DATA, {}) or {}
def _align_vec(vec: Any, target_dim: int, *, default: float) -> torch.Tensor:
t = torch.as_tensor(vec)
t = t.flatten().to(
dtype=torch.float32,
device=next(
(v.device for v in obs.values() if isinstance(v, torch.Tensor)), torch.device("cpu")
),
)
d = int(t.shape[-1]) if t.numel() > 0 else 0
if d == target_dim:
return t
if d < target_dim:
pad = torch.full((target_dim - d,), default, dtype=t.dtype, device=t.device)
return torch.cat([t, pad], dim=0)
return t[:target_dim]
def _min_max_norm(x: torch.Tensor, key: str) -> torch.Tensor:
if not self.normalize_min_max:
return x
if self.stats is None or key not in self.stats:
return x
stats_k = self.stats[key]
last_dim = x.shape[-1]
min_v = _align_vec(stats_k.get("min", torch.zeros(last_dim)), last_dim, default=0.0)
max_v = _align_vec(stats_k.get("max", torch.ones(last_dim)), last_dim, default=1.0)
denom = max_v - min_v
mask = denom != 0
safe_denom = torch.where(mask, denom, torch.ones_like(denom))
mapped = 2 * (x - min_v) / safe_denom - 1
return torch.where(mask, mapped, torch.zeros_like(mapped))
# 1) Video (B, T=1, V, H, W, C) uint8
img_keys = sorted([k for k in obs if k.startswith(OBS_IMAGES)])
if not img_keys and OBS_IMAGE in obs:
img_keys = [OBS_IMAGE]
if img_keys:
cams = [_to_uint8_np_bhwc(obs[k]) for k in img_keys]
video = np.stack(cams, axis=1) # (B, V, H, W, C)
video = np.expand_dims(video, axis=1) # (B, 1, V, H, W, C)
# GR00T validates that video.shape[3] == 3 (channels), so reorder to (B, T, V, C, H, W)
video = np.transpose(video, (0, 1, 2, 5, 3, 4)) # (B, 1, V, C, H, W)
obs["video"] = video
# Drop raw images to avoid confusion downstream
for k in img_keys:
obs.pop(k, None)
# 2) Language (string)
lang = comp.get(self.language_key)
if isinstance(lang, list):
lang = lang[0] if len(lang) > 0 else None
if not lang:
lang = "Perform the task."
if self.formalize_language:
lang = (lang or "").lower()
lang = "".join(ch for ch in lang if ch.isalnum() or ch.isspace())
comp["language"] = lang
# 3) State/state_mask -> (B, 1, max_state_dim)
if OBS_STATE in obs:
state = obs[OBS_STATE] # (B, D)
if state.dim() != 2:
raise ValueError(f"state must be (B, D), got {tuple(state.shape)}")
bsz, d = state.shape
# Normalize BEFORE padding
if self.normalize_min_max:
state = _min_max_norm(state, OBS_STATE)
state = state.unsqueeze(1) # (B, 1, D)
if d > self.max_state_dim:
state = state[:, :, : self.max_state_dim]
d = self.max_state_dim
elif d < self.max_state_dim:
pad = torch.zeros(bsz, 1, self.max_state_dim - d, dtype=state.dtype, device=state.device)
state = torch.cat([state, pad], dim=2)
state_mask = torch.zeros(bsz, 1, self.max_state_dim, dtype=torch.bool, device=state.device)
state_mask[:, :, :d] = True
obs["state"] = state
obs["state_mask"] = state_mask
# 4) Action/action_mask -> (B, action_horizon, max_action_dim)
action = transition.get(TransitionKey.ACTION)
if isinstance(action, torch.Tensor):
# Normalize BEFORE temporal expansion/padding
if self.normalize_min_max:
if action.dim() == 2:
action = _min_max_norm(action, ACTION)
elif action.dim() == 3:
b, t, d = action.shape
flat = action.reshape(b * t, d)
flat = _min_max_norm(flat, ACTION)
action = flat.view(b, t, d)
if action.dim() == 2:
action = action.unsqueeze(1).repeat(1, self.action_horizon, 1)
elif action.dim() == 3:
b, t, d = action.shape
if t < self.action_horizon:
last = action[:, -1:, :]
pad = last.repeat(1, self.action_horizon - t, 1)
action = torch.cat([action, pad], dim=1)
elif t > self.action_horizon:
action = action[:, : self.action_horizon, :]
else:
raise ValueError(f"action must be (B, D) or (B, T, D), got {tuple(action.shape)}")
b, t, d = action.shape
if d > self.max_action_dim:
action = action[:, :, : self.max_action_dim]
d = self.max_action_dim
elif d < self.max_action_dim:
pad = torch.zeros(b, t, self.max_action_dim - d, dtype=action.dtype, device=action.device)
action = torch.cat([action, pad], dim=2)
action_mask = torch.zeros(b, t, self.max_action_dim, dtype=torch.bool, device=action.device)
action_mask[:, :, :d] = True
transition[TransitionKey.ACTION] = action
comp["action_mask"] = action_mask
# 5) Embodiment id as LongTensor (B,)
emb_id = self.embodiment_mapping.get(self.embodiment_tag, 0)
# Infer batch size/device from any tensor in obs or action
bsz = None
device = torch.device("cpu")
for v in list(obs.values()) + [transition.get(TransitionKey.ACTION)]:
if isinstance(v, torch.Tensor):
bsz = v.shape[0]
device = v.device
break
if bsz is None and "video" in obs and isinstance(obs["video"], np.ndarray):
bsz = obs["video"].shape[0]
if bsz is None:
bsz = 1
comp["embodiment_id"] = torch.full((bsz,), emb_id, dtype=torch.long, device=device)
transition[TransitionKey.OBSERVATION] = obs
transition[TransitionKey.COMPLEMENTARY_DATA] = comp
return transition
# Pipeline API requirement: declare how features change (we keep it simple)
def transform_features(self, features):
return features
def get_config(self) -> dict[str, Any]:
"""
Returns a serializable dictionary of the processor's configuration.
Excludes 'stats' since they are saved separately via state_dict().
"""
return {
"state_horizon": self.state_horizon,
"action_horizon": self.action_horizon,
"max_state_dim": self.max_state_dim,
"max_action_dim": self.max_action_dim,
"language_key": self.language_key,
"formalize_language": self.formalize_language,
"embodiment_tag": self.embodiment_tag,
"embodiment_mapping": self.embodiment_mapping,
"normalize_min_max": self.normalize_min_max,
}
def state_dict(self) -> dict[str, torch.Tensor]:
"""
Returns normalization statistics as a flat state dictionary.
This enables saving stats to safetensors files, similar to normalizer_processor.
"""
if not self.stats:
return {}
flat: dict[str, torch.Tensor] = {}
for key, sub in self.stats.items():
for stat_name, value in sub.items():
tensor = torch.as_tensor(value).cpu()
flat[f"{key}.{stat_name}"] = tensor
return flat
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
"""
Loads normalization statistics from a flat state dictionary.
This enables loading stats from safetensors files during from_pretrained.
"""
if not state:
return
reconstructed: dict[str, dict[str, Any]] = {}
for flat_key, tensor in state.items():
if "." in flat_key:
key, stat_name = flat_key.rsplit(".", 1)
if key not in reconstructed:
reconstructed[key] = {}
reconstructed[key][stat_name] = tensor
if reconstructed:
self.stats = reconstructed
@dataclass
@ProcessorStepRegistry.register(name="groot_eagle_encode_v3")
class GrootEagleEncodeStep(ProcessorStep):
tokenizer_assets_repo: str = DEFAULT_TOKENIZER_ASSETS_REPO
_proc: ProcessorMixin | None = field(default=None, init=False, repr=False)
@property
def proc(self) -> ProcessorMixin:
if self._proc is None:
self._proc = _build_eagle_processor(self.tokenizer_assets_repo)
return self._proc
def __call__(self, transition: EnvTransition) -> EnvTransition:
obs = transition.get(TransitionKey.OBSERVATION, {}) or {}
comp = transition.get(TransitionKey.COMPLEMENTARY_DATA, {}) or {}
if "video" not in obs:
return transition
video = obs["video"] # (B, T, V, H, W, C) uint8
lang = comp.get("language", "Perform the task.")
if isinstance(lang, list):
lang = lang[0] if len(lang) > 0 else "Perform the task."
bsz = video.shape[0]
eagle_contents: list[dict[str, Any]] = []
for b in range(bsz):
vt = video[b] # (T, V, C, H, W) after reorder
if vt.ndim != 5:
# Fallback: assume (T, V, H, W, C)
t, v, h, w, c = vt.shape
flat = rearrange(vt, "t v h w c -> (t v) h w c")
else:
t, v, c, h, w = vt.shape
flat = rearrange(vt, "t v c h w -> (t v) h w c")
images = [Image.fromarray(flat[i]) for i in range(t * v)]
# Format language as string list representation to match Original GROOT
lang_formatted = str([lang])
text_content = [{"type": "text", "text": lang_formatted}]
image_content = [{"type": "image", "image": img} for img in images]
conv = [{"role": "user", "content": image_content + text_content}]
text_list = [self.proc.apply_chat_template(conv, tokenize=False, add_generation_prompt=True)]
img_inputs, vid_inputs = self.proc.process_vision_info(conv)
eagle_contents.append(
{
"text_list": text_list,
"image_inputs": img_inputs,
"video_inputs": vid_inputs,
}
)
comp["eagle_content"] = eagle_contents
transition[TransitionKey.OBSERVATION] = obs
transition[TransitionKey.COMPLEMENTARY_DATA] = comp
return transition
# Pipeline API requirement: declare how features change (no schema change here)
def transform_features(self, features):
return features
# Original GR00T-style collate: converts eagle_content -> eagle_* tensors
def collate(features: list[dict[str, Any]], eagle_processor: ProcessorMixin) -> dict[str, Any]:
batch: dict[str, Any] = {}
keys = features[0].keys()
for key in keys:
values = [elem[key] for elem in features]
if key == "eagle_content":
text_list: list[str] = []
image_inputs: list[Any] = []
for v in values:
curr_text_list = v["text_list"]
curr_image_inputs = v["image_inputs"]
text_list += curr_text_list
image_inputs += curr_image_inputs
eagle_inputs = eagle_processor(
text=text_list,
images=image_inputs,
images_kwargs={"min_dynamic_tiles": 1, "max_dynamic_tiles": 1, "use_thumbnail": False},
return_tensors="pt",
padding=True,
)
for k, v in eagle_inputs.items():
k = "eagle_" + k
batch[k] = v
elif key in ("pixel_values", "image_grid_thw", "attention_mask", "input_ids"):
# Concat in existing batch dimension.
batch[key] = torch.cat(values)
else:
# state, state_mask, action and action_mask.
# Stack to form the batch dimension.
batch[key] = torch.from_numpy(np.stack(values))
return batch
@dataclass
@ProcessorStepRegistry.register(name="groot_eagle_collate_v3")
class GrootEagleCollateStep(ProcessorStep):
tokenizer_assets_repo: str = DEFAULT_TOKENIZER_ASSETS_REPO
_proc: ProcessorMixin | None = field(default=None, init=False, repr=False)
@property
def proc(self) -> ProcessorMixin:
if self._proc is None:
self._proc = _build_eagle_processor(self.tokenizer_assets_repo)
return self._proc
def __call__(self, transition: EnvTransition) -> EnvTransition:
obs = transition.get(TransitionKey.OBSERVATION, {}) or {}
comp = transition.get(TransitionKey.COMPLEMENTARY_DATA, {}) or {}
contents = comp.get("eagle_content")
if not contents:
return transition
# Build features list as original API expects: one dict per batch item
features = [{"eagle_content": content} for content in contents]
batched = collate(features, self.proc)
# Inject eagle_* tensors and remove the temporary content and raw video to free memory
for k, v in batched.items():
comp[k] = v
comp.pop("eagle_content", None)
obs.pop(
"video", None
) # The video has been fully encoded into eagle_* tensors, so we don't need the raw video anymore
transition[TransitionKey.OBSERVATION] = obs
transition[TransitionKey.COMPLEMENTARY_DATA] = comp
return transition
def transform_features(self, features):
return features
@dataclass
@ProcessorStepRegistry.register(name="groot_action_unpack_unnormalize_v1")
class GrootActionUnpackUnnormalizeStep(ProcessorStep):
env_action_dim: int = 0
# Apply inverse of min-max normalization if it was used in preprocessor
normalize_min_max: bool = True
stats: dict[str, dict[str, Any]] | None = None
def __call__(self, transition: EnvTransition) -> EnvTransition:
# Expect model outputs to be in TransitionKey.ACTION as (B, T, D_model)
action = transition.get(TransitionKey.ACTION)
if not isinstance(action, torch.Tensor):
return transition
# Select last timestep and slice to env dimension
if action.dim() == 3:
action = action[:, -1, :]
# Now action is (B, D_model)
if self.env_action_dim and action.shape[-1] >= self.env_action_dim:
action = action[..., : self.env_action_dim]
# Inverse min-max normalization mirroring _min_max_norm:
# forward: y = 2 * (x - min) / denom - 1, with y=0 when denom==0
# inverse: x = (y+1)/2 * denom + min, and when denom==0 -> x = min
if self.normalize_min_max and self.stats is not None:
stats_k = self.stats.get(ACTION, {})
d = action.shape[-1]
min_v = torch.as_tensor(
stats_k.get("min", torch.zeros(d)), dtype=action.dtype, device=action.device
)
max_v = torch.as_tensor(
stats_k.get("max", torch.ones(d)), dtype=action.dtype, device=action.device
)
if min_v.numel() != d:
min_v = torch.nn.functional.pad(min_v.flatten()[:d], (0, max(0, d - min_v.numel())))
min_v = min_v.to(action.device, dtype=action.dtype)
if max_v.numel() != d:
max_v = torch.nn.functional.pad(max_v.flatten()[:d], (0, max(0, d - max_v.numel())))
max_v = max_v.to(action.device, dtype=action.dtype)
denom = max_v - min_v
mask = denom != 0
safe_denom = torch.where(mask, denom, torch.ones_like(denom))
inv = (action + 1.0) * 0.5 * safe_denom + min_v
action = torch.where(mask, inv, min_v)
transition[TransitionKey.ACTION] = action
return transition
def transform_features(self, features):
return features
def get_config(self) -> dict[str, Any]:
"""
Returns a serializable dictionary of the processor's configuration.
Excludes 'stats' since they are saved separately via state_dict().
"""
return {
"env_action_dim": self.env_action_dim,
"normalize_min_max": self.normalize_min_max,
}
def state_dict(self) -> dict[str, torch.Tensor]:
"""
Returns normalization statistics as a flat state dictionary.
This enables saving stats to safetensors files, similar to normalizer_processor.
"""
if not self.stats:
return {}
flat: dict[str, torch.Tensor] = {}
for key, sub in self.stats.items():
for stat_name, value in sub.items():
tensor = torch.as_tensor(value).cpu()
flat[f"{key}.{stat_name}"] = tensor
return flat
def load_state_dict(self, state: dict[str, torch.Tensor]) -> None:
"""
Loads normalization statistics from a flat state dictionary.
This enables loading stats from safetensors files during from_pretrained.
"""
if not state:
return
reconstructed: dict[str, dict[str, Any]] = {}
for flat_key, tensor in state.items():
if "." in flat_key:
key, stat_name = flat_key.rsplit(".", 1)
if key not in reconstructed:
reconstructed[key] = {}
reconstructed[key][stat_name] = tensor
if reconstructed:
self.stats = reconstructed
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/processor_groot.py",
"license": "Apache License 2.0",
"lines": 582,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/groot/utils.py | from pathlib import Path
from shutil import copytree
from huggingface_hub import hf_hub_download
def ensure_eagle_cache_ready(vendor_dir: Path, cache_dir: Path, assets_repo: str) -> None:
"""Populate the Eagle processor directory in cache and ensure tokenizer assets exist.
- Copies the vendored Eagle files into cache_dir (overwriting when needed).
- Downloads vocab.json and merges.txt into the same cache_dir if missing.
"""
cache_dir = Path(cache_dir)
vendor_dir = Path(vendor_dir)
try:
# Populate/refresh cache with vendor files to ensure a complete processor directory
print(f"[GROOT] Copying vendor Eagle files to cache: {vendor_dir} -> {cache_dir}")
copytree(vendor_dir, cache_dir, dirs_exist_ok=True)
except Exception as exc: # nosec: B110
print(f"[GROOT] Warning: Failed to copy vendor Eagle files to cache: {exc}")
required_assets = [
"vocab.json",
"merges.txt",
"added_tokens.json",
"chat_template.json",
"special_tokens_map.json",
"config.json",
"generation_config.json",
"preprocessor_config.json",
"processor_config.json",
"tokenizer_config.json",
]
print(f"[GROOT] Assets repo: {assets_repo} \n Cache dir: {cache_dir}")
for fname in required_assets:
dst = cache_dir / fname
if not dst.exists():
print(f"[GROOT] Fetching {fname}")
hf_hub_download(
repo_id=assets_repo,
filename=fname,
repo_type="model",
local_dir=str(cache_dir),
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/groot/utils.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/lerobot:tests/policies/groot/test_groot_lerobot.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test script for LeRobot's Groot policy forward and inference passes."""
import gc
import os
from copy import deepcopy
from typing import Any
import numpy as np
import pytest
import torch
from lerobot.policies.groot.configuration_groot import GrootConfig
from lerobot.policies.groot.modeling_groot import GrootPolicy
from lerobot.policies.groot.processor_groot import make_groot_pre_post_processors
from lerobot.processor import PolicyAction, PolicyProcessorPipeline
from lerobot.utils.utils import auto_select_torch_device
from tests.utils import require_cuda # noqa: E402
pytest.importorskip("transformers")
pytestmark = pytest.mark.skipif(
os.environ.get("CI") == "true" or os.environ.get("GITHUB_ACTIONS") == "true",
reason="This test requires local Groot installation and is not meant for CI",
)
# Define constants for dummy data
DUMMY_STATE_DIM = 44
DUMMY_ACTION_DIM = 44
DUMMY_ACTION_HORIZON = 16
IMAGE_SIZE = 256
DEVICE = auto_select_torch_device()
MODEL_PATH = "aractingi/bimanual-handover-groot-10k"
def cleanup_memory():
"""Clean up GPU/MPS memory to prevent OOM errors between tests."""
print("\nCleaning up memory...")
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize()
if torch.backends.mps.is_available():
torch.mps.empty_cache()
print("Memory cleanup complete.")
def set_seed_all(seed: int):
"""Set random seed for all RNG sources to ensure reproducibility."""
import random
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Set deterministic behavior
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True, warn_only=True)
def instantiate_lerobot_groot(
from_pretrained: bool = False,
model_path: str = MODEL_PATH,
) -> tuple[
GrootPolicy,
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""Instantiate LeRobot Groot policy with preprocessor and postprocessor."""
if from_pretrained:
policy = GrootPolicy.from_pretrained(
pretrained_name_or_path=model_path,
strict=False,
)
policy.config.embodiment_tag = "gr1"
else:
config = GrootConfig(
base_model_path=model_path,
n_action_steps=DUMMY_ACTION_HORIZON,
chunk_size=DUMMY_ACTION_HORIZON,
image_size=[IMAGE_SIZE, IMAGE_SIZE],
device=DEVICE,
embodiment_tag="gr1",
)
policy = GrootPolicy(config)
policy.to(DEVICE)
policy.config.device = DEVICE
preprocessor, postprocessor = make_groot_pre_post_processors(
config=policy.config,
dataset_stats=None, # Pass None for dataset_stats to disable normalization (original GR00T doesn't normalize)
)
return (policy, preprocessor, postprocessor)
def create_dummy_data(device=DEVICE):
"""Create a dummy data batch for testing."""
batch_size = 2
prompt = "Pick up the red cube and place it in the bin"
state = torch.randn(batch_size, DUMMY_STATE_DIM, dtype=torch.float32, device=device)
batch = {
"observation.state": state,
"action": torch.randn(
batch_size,
DUMMY_ACTION_HORIZON,
DUMMY_ACTION_DIM,
dtype=torch.float32,
device=device, # Action ground truth (for training)
),
"observation.images.ego_view": torch.rand(
batch_size,
3,
IMAGE_SIZE,
IMAGE_SIZE,
dtype=torch.float32,
device=device, # Images in [0, 1] range as expected by LeRobot
),
"task": [prompt for _ in range(batch_size)],
}
return batch
@require_cuda
def test_lerobot_groot_inference():
"""Test the inference pass (select_action) of LeRobot's Groot policy."""
print("Test: LeRobot Groot Inference Pass")
set_seed_all(42)
# Instantiate policy and processors
lerobot_policy, lerobot_preprocessor, lerobot_postprocessor = instantiate_lerobot_groot(
from_pretrained=True
)
batch = create_dummy_data()
print("\n[LeRobot] Running inference...")
lerobot_policy.eval()
batch_lerobot_processed = lerobot_preprocessor(deepcopy(batch))
# Ensure identical RNG state before inference
torch.manual_seed(42)
with torch.no_grad():
lerobot_action = lerobot_policy.select_action(batch_lerobot_processed)
print(f"\nInference successful. Output action shape: {lerobot_action.shape}")
print("Output actions (first 5 dims):")
print(lerobot_action[:, :5])
lerobot_action = lerobot_postprocessor(lerobot_action)
del lerobot_policy, lerobot_preprocessor, lerobot_postprocessor, batch
cleanup_memory()
@require_cuda
def test_lerobot_groot_forward_pass():
"""Test the forward pass of LeRobot's Groot policy."""
print("\n" + "=" * 50)
print("Test: LeRobot Groot Forward Pass (Training Mode)")
set_seed_all(42)
# Instantiate policy and processors
lerobot_policy, lerobot_preprocessor, _ = instantiate_lerobot_groot(from_pretrained=True)
batch = create_dummy_data()
lerobot_policy.eval()
print("\n[LeRobot] Running forward pass...")
batch_lerobot_processed = lerobot_preprocessor(deepcopy(batch))
set_seed_all(42)
with torch.no_grad():
lerobot_loss, lerobot_metrics = lerobot_policy.forward(batch_lerobot_processed)
print("\nForward pass successful.")
print(f" - Loss: {lerobot_loss.item():.6f}")
print(f" - Metrics: {lerobot_metrics}")
del lerobot_policy, lerobot_preprocessor, batch
cleanup_memory()
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/policies/groot/test_groot_lerobot.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/policies/groot/test_groot_vs_original.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test script to verify Groot policy integration with LeRobot vs the original implementation, only meant to be run locally!"""
import gc
import os
from copy import deepcopy
from typing import Any
import numpy as np
import pytest
import torch
from lerobot.policies.groot.configuration_groot import GrootConfig
from lerobot.policies.groot.modeling_groot import GrootPolicy
from lerobot.policies.groot.processor_groot import make_groot_pre_post_processors
from lerobot.processor import PolicyAction, PolicyProcessorPipeline
pytest.importorskip("gr00t")
pytest.importorskip("transformers")
pytestmark = pytest.mark.skipif(
os.environ.get("CI") == "true" or os.environ.get("GITHUB_ACTIONS") == "true",
reason="This test requires local Groot installation and is not meant for CI",
)
from gr00t.data.dataset import ModalityConfig # noqa: E402
from gr00t.data.embodiment_tags import EmbodimentTag # noqa: E402
from gr00t.data.transform.base import ComposedModalityTransform # noqa: E402
from gr00t.model.policy import Gr00tPolicy # noqa: E402
# GR1 humanoid dimensions (from pretrained model metadata)
# The actual GR1 robot has 44 dimensions for both state and action
# GR00TTransform will pad state to 64 and truncate action to 32
DUMMY_STATE_DIM = 44
DUMMY_ACTION_DIM = 44
DUMMY_ACTION_HORIZON = 16
IMAGE_SIZE = 256
DEVICE = "cpu"
MODEL_PATH = "nvidia/GR00T-N1.5-3B"
GR1_BODY_PARTS = {
"left_arm": 7,
"left_hand": 6,
"left_leg": 6,
"neck": 3,
"right_arm": 7,
"right_hand": 6,
"right_leg": 6,
"waist": 3,
}
def cleanup_memory():
"""Clean up GPU/MPS memory to prevent OOM errors between tests."""
print("\nCleaning up memory...")
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize()
if torch.backends.mps.is_available():
torch.mps.empty_cache()
print("Memory cleanup complete.")
def set_seed_all(seed: int):
"""Set random seed for all RNG sources to ensure reproducibility."""
import random
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Set deterministic behavior
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True, warn_only=True)
def instantiate_lerobot_groot(
from_pretrained: bool = False,
model_path: str = MODEL_PATH,
) -> tuple[
GrootPolicy,
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""Instantiate LeRobot Groot policy with preprocessor and postprocessor."""
if from_pretrained:
policy = GrootPolicy.from_pretrained(
pretrained_name_or_path=model_path,
strict=False,
)
policy.config.embodiment_tag = "gr1"
else:
config = GrootConfig(
base_model_path=model_path,
n_action_steps=DUMMY_ACTION_HORIZON,
chunk_size=DUMMY_ACTION_HORIZON,
image_size=[IMAGE_SIZE, IMAGE_SIZE],
device=DEVICE,
embodiment_tag="gr1",
)
policy = GrootPolicy(config)
policy.to(DEVICE)
policy.config.device = DEVICE
preprocessor, postprocessor = make_groot_pre_post_processors(
config=policy.config,
dataset_stats=None, # Pass None for dataset_stats to disable normalization (original GR00T doesn't normalize)
)
return (policy, preprocessor, postprocessor)
def instantiate_original_groot(
from_pretrained: bool = False,
model_path: str = MODEL_PATH,
):
"""Instantiate original Groot policy from NVIDIA's implementation."""
from gr00t.data.transform.concat import ConcatTransform
from gr00t.data.transform.state_action import StateActionToTensor
from gr00t.data.transform.video import VideoToNumpy, VideoToTensor
from gr00t.model.transforms import GR00TTransform
video_keys = ["video.ego_view"]
state_keys = [
"state"
] # Important: Use single concatenated "state" key (not split body parts) to match preprocessing
action_keys = [
"action.left_arm",
"action.right_arm",
"action.left_hand",
"action.right_hand",
"action.left_leg",
"action.right_leg",
"action.neck",
"action.waist",
]
language_keys = ["annotation.human.action.task_description"]
modality_config = {
"video": ModalityConfig(
delta_indices=[0], # Current frame only
modality_keys=video_keys,
),
"state": ModalityConfig(
delta_indices=[0],
modality_keys=state_keys,
),
"action": ModalityConfig(
delta_indices=list(range(DUMMY_ACTION_HORIZON)),
modality_keys=action_keys,
),
"language": ModalityConfig(
delta_indices=[0],
modality_keys=language_keys,
),
}
modality_transform = ComposedModalityTransform(
transforms=[
VideoToTensor(apply_to=video_keys),
VideoToNumpy(apply_to=video_keys), # Convert to numpy (GR00TTransform expects numpy arrays)
# State is already a single concatenated key, so no StateActionToTensor needed
# Convert action from numpy to tensor
StateActionToTensor(apply_to=action_keys),
# Concatenate only video and actions (state is already single key)
ConcatTransform(
video_concat_order=video_keys,
state_concat_order=[], # Empty:state is already single key
action_concat_order=action_keys,
),
GR00TTransform(
max_state_dim=64,
max_action_dim=32,
state_horizon=1,
action_horizon=DUMMY_ACTION_HORIZON,
training=False,
),
]
)
policy = Gr00tPolicy(
model_path=model_path,
embodiment_tag=EmbodimentTag.GR1,
modality_config=modality_config,
modality_transform=modality_transform,
device=DEVICE,
)
return policy, modality_config, modality_transform
def create_dummy_data(device=DEVICE):
"""Create dummy data for testing both implementations."""
batch_size = 2
prompt = "Pick up the red cube and place it in the bin"
state = torch.randn(batch_size, DUMMY_STATE_DIM, dtype=torch.float32, device=device)
batch = {
"observation.state": state,
"action": torch.randn(
batch_size,
DUMMY_ACTION_HORIZON,
DUMMY_ACTION_DIM,
dtype=torch.float32,
device=device, # Action ground truth (for training)
),
"observation.images.ego_view": torch.rand(
batch_size,
3,
IMAGE_SIZE,
IMAGE_SIZE,
dtype=torch.float32,
device=device, # Images in [0, 1] range as expected by LeRobot
),
"task": [prompt for _ in range(batch_size)],
}
return batch
def convert_lerobot_to_original_format(batch, modality_config):
"""Convert LeRobot batch format to original Groot format.
The original Groot expects observations in this format:
{
"video.<camera_name>": np.ndarray (T, H, W, C) or (B, T, H, W, C)
"state.<state_component>": np.ndarray (T, D) or (B, T, D)
"action.<action_component>": np.ndarray (T, D) or (B, T, D)
"annotation.<annotation_type>": str or list[str]
}
"""
# Original Groot expects (T, H, W, C) format for images
# LeRobot has (B, C, H, W) format, so we need to convert
observation = {}
for img_key in ["ego_view"]:
lerobot_key = f"observation.images.{img_key}"
if lerobot_key in batch:
img = batch[lerobot_key]
# Convert from (B, C, H, W) to (B, T=1, H, W, C)
img_np = img.permute(0, 2, 3, 1).unsqueeze(1).cpu().numpy()
# Convert [0, 1] to [0, 255] uint8 as expected by original
img_np = (img_np * 255).astype(np.uint8)
observation[f"video.{img_key}"] = img_np
# Important: The Original's GR00TTransform expects "state" as (B, T, D), not split body parts
if "observation.state" in batch:
state = batch["observation.state"]
state_np = state.unsqueeze(1).cpu().numpy() # (B, 1, D)
observation["state"] = state_np
if "action" in batch:
action = batch["action"]
action_np = action.cpu().numpy()
start_idx = 0
for part_name, part_dim in GR1_BODY_PARTS.items():
end_idx = start_idx + part_dim
observation[f"action.{part_name}"] = action_np[:, :, start_idx:end_idx]
start_idx = end_idx
if "task" in batch:
task_list = batch["task"]
# GR00TTransform expects language with (B, T) shape for batched data
# Create a (B, T=1) array where each element is the string directly
bsz = len(task_list)
task_array = np.empty((bsz, 1), dtype=object)
for i in range(bsz):
task_array[i, 0] = task_list[i] # Assign string directly to each (i, 0) position
observation["annotation.human.action.task_description"] = task_array
return observation
def test_groot_original_vs_lerobot_pretrained():
"""Test Groot original implementation vs LeRobot implementation with pretrained weights."""
print("Test: Groot Original vs LeRobot with Pretrained Weights (Inference)")
set_seed_all(42)
lerobot_policy, lerobot_preprocessor, lerobot_postprocessor = instantiate_lerobot_groot(
from_pretrained=True
)
original_policy, modality_config, modality_transform = instantiate_original_groot(from_pretrained=True)
batch = create_dummy_data()
batch_lerobot = deepcopy(batch)
print("\n[LeRobot] Running inference...")
lerobot_policy.eval()
batch_lerobot_processed = lerobot_preprocessor(batch_lerobot)
# Important: Reset seed immediately before inference to ensure identical RNG state
torch.manual_seed(42)
with torch.no_grad():
lerobot_actions = lerobot_policy.select_action(batch_lerobot_processed)
print("\n[Original] Running inference...")
original_policy.model.eval()
observation = convert_lerobot_to_original_format(batch, modality_config)
original_obs_transformed = modality_transform(deepcopy(observation))
# Important: Reset seed immediately before inference to ensure identical RNG state
torch.manual_seed(42)
with torch.no_grad():
original_model_output = original_policy.model.get_action(original_obs_transformed)
original_actions_raw = original_model_output["action_pred"] # [2, 16, 32]
# Take first timestep
original_actions = original_actions_raw[:, 0, :].to(lerobot_actions.device).to(lerobot_actions.dtype)
print("Action Comparison:")
diff = lerobot_actions - original_actions
abs_diff = torch.abs(diff)
for batch_idx in range(lerobot_actions.shape[0]):
print(f"\n{'=' * 60}")
print(f"Batch {batch_idx}")
print(f"{'=' * 60}")
print(f"{'Idx':<5} {'LeRobot':<14} {'Original':<14} {'Difference':<14}")
print("-" * 60)
for action_idx in range(lerobot_actions.shape[1]):
lr_val = lerobot_actions[batch_idx, action_idx].item()
orig_val = original_actions[batch_idx, action_idx].item()
diff_val = abs(lr_val - orig_val)
sign = "+" if (lr_val - orig_val) > 0 else "-"
print(f"{action_idx:<5} {lr_val:>13.6f} {orig_val:>13.6f} {sign}{diff_val:>12.6f}")
max_diff = abs_diff.max().item()
tolerance = 0.001
assert torch.allclose(lerobot_actions, original_actions, atol=tolerance), (
f"Actions differ by more than tolerance ({tolerance}): max diff = {max_diff:.6f}"
)
print(f"\nSuccess: Actions match within tolerance ({tolerance})!")
del lerobot_policy, lerobot_preprocessor, lerobot_postprocessor
del original_policy, modality_config, modality_transform
del batch, batch_lerobot, observation
cleanup_memory()
def test_groot_forward_pass_comparison():
"""Test forward pass comparison between LeRobot and Original Groot implementations."""
print("Test: Forward Pass Comparison (Training Mode)")
set_seed_all(42)
lerobot_policy, lerobot_preprocessor, lerobot_postprocessor = instantiate_lerobot_groot(
from_pretrained=True
)
original_policy, modality_config, modality_transform = instantiate_original_groot(from_pretrained=True)
batch = create_dummy_data()
lerobot_policy.eval()
original_policy.model.eval()
print("\n[LeRobot] Running forward pass...")
batch_lerobot = deepcopy(batch)
batch_lerobot_processed = lerobot_preprocessor(batch_lerobot)
set_seed_all(42)
with torch.no_grad():
lerobot_loss, lerobot_metrics = lerobot_policy.forward(batch_lerobot_processed)
print(f" Loss: {lerobot_loss.item():.6f}")
print("\n[Original] Running forward pass...")
observation = convert_lerobot_to_original_format(batch, modality_config)
transformed_obs = modality_transform(observation)
if "action" not in transformed_obs:
action_for_forward = batch_lerobot_processed["action"]
action_mask_for_forward = batch_lerobot_processed["action_mask"]
# Match action horizon if needed
if action_for_forward.shape[1] != original_policy.model.action_horizon:
if action_for_forward.shape[1] < original_policy.model.action_horizon:
pad_size = original_policy.model.action_horizon - action_for_forward.shape[1]
last_action = action_for_forward[:, -1:, :]
padding = last_action.repeat(1, pad_size, 1)
action_for_forward = torch.cat([action_for_forward, padding], dim=1)
mask_padding = torch.zeros(
action_mask_for_forward.shape[0],
pad_size,
action_mask_for_forward.shape[2],
dtype=action_mask_for_forward.dtype,
device=action_mask_for_forward.device,
)
action_mask_for_forward = torch.cat([action_mask_for_forward, mask_padding], dim=1)
else:
action_for_forward = action_for_forward[:, : original_policy.model.action_horizon, :]
action_mask_for_forward = action_mask_for_forward[
:, : original_policy.model.action_horizon, :
]
transformed_obs["action"] = action_for_forward
transformed_obs["action_mask"] = action_mask_for_forward
set_seed_all(42)
with torch.no_grad():
original_outputs = original_policy.model.forward(transformed_obs)
original_loss = original_outputs["loss"]
print(f" Loss: {original_loss.item():.6f}")
loss_diff = abs(lerobot_loss.item() - original_loss.item())
loss_rel_diff = loss_diff / (abs(original_loss.item()) + 1e-8) * 100
print("\nLoss Values:")
print(f" LeRobot: {lerobot_loss.item():.6f}")
print(f" Original: {original_loss.item():.6f}")
print(f" Absolute difference: {loss_diff:.6f}")
print(f" Relative difference: {loss_rel_diff:.2f}%")
del lerobot_policy, lerobot_preprocessor, lerobot_postprocessor
del original_policy, modality_config, modality_transform
del batch, batch_lerobot, observation, transformed_obs
cleanup_memory()
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/policies/groot/test_groot_vs_original.py",
"license": "Apache License 2.0",
"lines": 367,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/training/test_multi_gpu.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Multi-GPU Training Tests
This module tests multi-GPU training functionality with accelerate.
These tests are designed to run on machines with 2+ GPUs and are executed
in the nightly CI workflow.
The tests automatically generate accelerate configs and launch training
with subprocess to properly test the distributed training environment.
"""
import os
import subprocess
import tempfile
from pathlib import Path
import pytest
import torch
from lerobot.datasets.lerobot_dataset import LeRobotDataset
def get_num_available_gpus():
"""Returns the number of available GPUs."""
if not torch.cuda.is_available():
return 0
return torch.cuda.device_count()
def download_dataset(repo_id, episodes):
"""
Pre-download dataset to avoid race conditions in multi-GPU training.
Args:
repo_id: HuggingFace dataset repository ID
episodes: List of episode indices to download
"""
# Simply instantiating the dataset will download it
_ = LeRobotDataset(repo_id, episodes=episodes)
print(f"Dataset {repo_id} downloaded successfully")
def run_accelerate_training(config_args, num_processes=4, temp_dir=None):
"""
Helper function to run training with accelerate launch.
Args:
config_args: List of config arguments to pass to lerobot_train.py
num_processes: Number of processes (GPUs) to use
temp_dir: Temporary directory for outputs
Returns:
subprocess.CompletedProcess result
"""
config_path = Path(temp_dir) / "accelerate_config.yaml"
# Write YAML config
with open(config_path, "w") as f:
f.write("compute_environment: LOCAL_MACHINE\n")
f.write("distributed_type: MULTI_GPU\n")
f.write("mixed_precision: 'no'\n")
f.write(f"num_processes: {num_processes}\n")
f.write("use_cpu: false\n")
f.write("gpu_ids: all\n")
f.write("downcast_bf16: 'no'\n")
f.write("machine_rank: 0\n")
f.write("main_training_function: main\n")
f.write("num_machines: 1\n")
f.write("rdzv_backend: static\n")
f.write("same_network: true\n")
cmd = [
"accelerate",
"launch",
"--config_file",
str(config_path),
"-m",
"lerobot.scripts.lerobot_train",
] + config_args
result = subprocess.run(
cmd,
capture_output=True,
text=True,
env={**os.environ, "CUDA_VISIBLE_DEVICES": ",".join(map(str, range(num_processes)))},
)
return result
@pytest.mark.skipif(
get_num_available_gpus() < 2,
reason="Multi-GPU tests require at least 2 GPUs",
)
class TestMultiGPUTraining:
"""Test suite for multi-GPU training functionality."""
def test_basic_multi_gpu_training(self):
"""
Test that basic multi-GPU training runs successfully.
Verifies that the training completes without errors.
"""
# Pre-download dataset to avoid race conditions
download_dataset("lerobot/pusht", episodes=[0])
with tempfile.TemporaryDirectory() as temp_dir:
output_dir = Path(temp_dir) / "outputs"
config_args = [
"--dataset.repo_id=lerobot/pusht",
"--dataset.episodes=[0]",
"--policy.type=act",
"--policy.device=cuda",
"--policy.push_to_hub=false",
f"--output_dir={output_dir}",
"--batch_size=4",
"--steps=10",
"--eval_freq=-1",
"--log_freq=5",
"--save_freq=10",
"--seed=42",
"--num_workers=0",
]
result = run_accelerate_training(config_args, num_processes=4, temp_dir=temp_dir)
# Check that training completed successfully
assert result.returncode == 0, (
f"Multi-GPU training failed with return code {result.returncode}\n"
f"STDOUT:\n{result.stdout}\n"
f"STDERR:\n{result.stderr}"
)
# Verify checkpoint was saved
checkpoints_dir = output_dir / "checkpoints"
assert checkpoints_dir.exists(), "Checkpoints directory was not created"
# Verify that training completed
assert "End of training" in result.stdout or "End of training" in result.stderr
def test_checkpoint_saving_multi_gpu(self):
"""
Test that checkpoints are correctly saved during multi-GPU training.
Only the main process (rank 0) should save checkpoints.
"""
# Pre-download dataset to avoid race conditions
download_dataset("lerobot/pusht", episodes=[0])
with tempfile.TemporaryDirectory() as temp_dir:
output_dir = Path(temp_dir) / "outputs"
config_args = [
"--dataset.repo_id=lerobot/pusht",
"--dataset.episodes=[0]",
"--policy.type=act",
"--policy.device=cuda",
"--policy.push_to_hub=false",
f"--output_dir={output_dir}",
"--batch_size=4",
"--steps=20",
"--eval_freq=-1",
"--log_freq=5",
"--save_freq=10",
"--seed=42",
"--num_workers=0",
]
result = run_accelerate_training(config_args, num_processes=2, temp_dir=temp_dir)
assert result.returncode == 0, (
f"Training failed:\nSTDOUT:\n{result.stdout}\n\nSTDERR:\n{result.stderr}"
)
# Verify checkpoint directory exists
checkpoints_dir = output_dir / "checkpoints"
assert checkpoints_dir.exists(), "Checkpoints directory not created"
# Count checkpoint directories (should have checkpoint at step 10 and 20)
checkpoint_dirs = [d for d in checkpoints_dir.iterdir() if d.is_dir()]
assert len(checkpoint_dirs) >= 1, f"Expected at least 1 checkpoint, found {len(checkpoint_dirs)}"
# Verify checkpoint contents
for checkpoint_dir in checkpoint_dirs:
# Check for model files
model_files = list(checkpoint_dir.rglob("*.safetensors"))
assert len(model_files) > 0, f"No model files in checkpoint {checkpoint_dir}"
# Check for training state
training_state_dir = checkpoint_dir / "training_state"
assert training_state_dir.exists(), f"No training state in checkpoint {checkpoint_dir}"
# Verify optimizer state exists
optimizer_state = training_state_dir / "optimizer_state.safetensors"
assert optimizer_state.exists(), f"No optimizer state in checkpoint {checkpoint_dir}"
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/training/test_multi_gpu.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:src/lerobot/envs/metaworld.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from collections import defaultdict
from collections.abc import Callable, Sequence
from pathlib import Path
from typing import Any
import gymnasium as gym
import metaworld
import metaworld.policies as policies
import numpy as np
from gymnasium import spaces
from lerobot.processor import RobotObservation
# ---- Load configuration data from the external JSON file ----
CONFIG_PATH = Path(__file__).parent / "metaworld_config.json"
try:
with open(CONFIG_PATH) as f:
data = json.load(f)
except FileNotFoundError as err:
raise FileNotFoundError(
"Could not find 'metaworld_config.json'. "
"Please ensure the configuration file is in the same directory as the script."
) from err
except json.JSONDecodeError as err:
raise ValueError(
"Failed to decode 'metaworld_config.json'. Please ensure it is a valid JSON file."
) from err
# ---- Process the loaded data ----
# extract and type-check top-level dicts
task_descriptions_obj = data.get("TASK_DESCRIPTIONS")
if not isinstance(task_descriptions_obj, dict):
raise TypeError("Expected TASK_DESCRIPTIONS to be a dict[str, str]")
TASK_DESCRIPTIONS: dict[str, str] = task_descriptions_obj
task_name_to_id_obj = data.get("TASK_NAME_TO_ID")
if not isinstance(task_name_to_id_obj, dict):
raise TypeError("Expected TASK_NAME_TO_ID to be a dict[str, int]")
TASK_NAME_TO_ID: dict[str, int] = task_name_to_id_obj
# difficulty -> tasks mapping
difficulty_to_tasks = data.get("DIFFICULTY_TO_TASKS")
if not isinstance(difficulty_to_tasks, dict):
raise TypeError("Expected 'DIFFICULTY_TO_TASKS' to be a dict[str, list[str]]")
DIFFICULTY_TO_TASKS: dict[str, list[str]] = difficulty_to_tasks
# convert policy strings -> actual policy classes
task_policy_mapping = data.get("TASK_POLICY_MAPPING")
if not isinstance(task_policy_mapping, dict):
raise TypeError("Expected 'TASK_POLICY_MAPPING' to be a dict[str, str]")
TASK_POLICY_MAPPING: dict[str, Any] = {
task_name: getattr(policies, policy_class_name)
for task_name, policy_class_name in task_policy_mapping.items()
}
ACTION_DIM = 4
OBS_DIM = 4
class MetaworldEnv(gym.Env):
metadata = {"render_modes": ["rgb_array"], "render_fps": 80}
def __init__(
self,
task,
camera_name="corner2",
obs_type="pixels",
render_mode="rgb_array",
observation_width=480,
observation_height=480,
visualization_width=640,
visualization_height=480,
):
super().__init__()
self.task = task.replace("metaworld-", "")
self.obs_type = obs_type
self.render_mode = render_mode
self.observation_width = observation_width
self.observation_height = observation_height
self.visualization_width = visualization_width
self.visualization_height = visualization_height
self.camera_name = camera_name
self._env = self._make_envs_task(self.task)
self._max_episode_steps = self._env.max_path_length
self.task_description = TASK_DESCRIPTIONS[self.task]
self.expert_policy = TASK_POLICY_MAPPING[self.task]()
if self.obs_type == "state":
raise NotImplementedError()
elif self.obs_type == "pixels":
self.observation_space = spaces.Dict(
{
"pixels": spaces.Box(
low=0,
high=255,
shape=(self.observation_height, self.observation_width, 3),
dtype=np.uint8,
)
}
)
elif self.obs_type == "pixels_agent_pos":
self.observation_space = spaces.Dict(
{
"pixels": spaces.Box(
low=0,
high=255,
shape=(self.observation_height, self.observation_width, 3),
dtype=np.uint8,
),
"agent_pos": spaces.Box(
low=-1000.0,
high=1000.0,
shape=(OBS_DIM,),
dtype=np.float64,
),
}
)
self.action_space = spaces.Box(low=-1, high=1, shape=(ACTION_DIM,), dtype=np.float32)
def render(self) -> np.ndarray:
"""
Render the current environment frame.
Returns:
np.ndarray: The rendered RGB image from the environment.
"""
image = self._env.render()
if self.camera_name == "corner2":
# Images from this camera are flipped — correct them
image = np.flip(image, (0, 1))
return image
def _make_envs_task(self, env_name: str):
mt1 = metaworld.MT1(env_name, seed=42)
env = mt1.train_classes[env_name](render_mode="rgb_array", camera_name=self.camera_name)
env.set_task(mt1.train_tasks[0])
if self.camera_name == "corner2":
env.model.cam_pos[2] = [
0.75,
0.075,
0.7,
] # corner2 position, similar to https://arxiv.org/pdf/2206.14244
env.reset()
env._freeze_rand_vec = False # otherwise no randomization
return env
def _format_raw_obs(self, raw_obs: np.ndarray) -> RobotObservation:
image = None
if self._env is not None:
image = self._env.render()
if self.camera_name == "corner2":
# NOTE: The "corner2" camera in MetaWorld environments outputs images with both axes inverted.
image = np.flip(image, (0, 1))
agent_pos = raw_obs[:4]
if self.obs_type == "state":
raise NotImplementedError(
"'state' obs_type not implemented for MetaWorld. Use pixel modes instead."
)
elif self.obs_type in ("pixels", "pixels_agent_pos"):
assert image is not None, (
"Expected `image` to be rendered before constructing pixel-based observations. "
"This likely means `env.render()` returned None or the environment was not provided."
)
if self.obs_type == "pixels":
obs = {"pixels": image.copy()}
else: # pixels_agent_pos
obs = {
"pixels": image.copy(),
"agent_pos": agent_pos,
}
else:
raise ValueError(f"Unknown obs_type: {self.obs_type}")
return obs
def reset(
self,
seed: int | None = None,
**kwargs,
) -> tuple[RobotObservation, dict[str, Any]]:
"""
Reset the environment to its initial state.
Args:
seed (Optional[int]): Random seed for environment initialization.
Returns:
observation (RobotObservation): The initial formatted observation.
info (Dict[str, Any]): Additional info about the reset state.
"""
super().reset(seed=seed)
raw_obs, info = self._env.reset(seed=seed)
observation = self._format_raw_obs(raw_obs)
info = {"is_success": False}
return observation, info
def step(self, action: np.ndarray) -> tuple[RobotObservation, float, bool, bool, dict[str, Any]]:
"""
Perform one environment step.
Args:
action (np.ndarray): The action to execute, must be 1-D with shape (action_dim,).
Returns:
observation (RobotObservation): The formatted observation after the step.
reward (float): The scalar reward for this step.
terminated (bool): Whether the episode terminated successfully.
truncated (bool): Whether the episode was truncated due to a time limit.
info (Dict[str, Any]): Additional environment info.
"""
if action.ndim != 1:
raise ValueError(
f"Expected action to be 1-D (shape (action_dim,)), "
f"but got shape {action.shape} with ndim={action.ndim}"
)
raw_obs, reward, done, truncated, info = self._env.step(action)
# Determine whether the task was successful
is_success = bool(info.get("success", 0))
terminated = done or is_success
info.update(
{
"task": self.task,
"done": done,
"is_success": is_success,
}
)
# Format the raw observation into the expected structure
observation = self._format_raw_obs(raw_obs)
if terminated:
info["final_info"] = {
"task": self.task,
"done": bool(done),
"is_success": bool(is_success),
}
self.reset()
return observation, reward, terminated, truncated, info
def close(self):
self._env.close()
# ---- Main API ----------------------------------------------------------------
def create_metaworld_envs(
task: str,
n_envs: int,
gym_kwargs: dict[str, Any] | None = None,
env_cls: Callable[[Sequence[Callable[[], Any]]], Any] | None = None,
) -> dict[str, dict[int, Any]]:
"""
Create vectorized Meta-World environments with a consistent return shape.
Returns:
dict[task_group][task_id] -> vec_env (env_cls([...]) with exactly n_envs factories)
Notes:
- n_envs is the number of rollouts *per task* (episode_index = 0..n_envs-1).
- `task` can be a single difficulty group (e.g., "easy", "medium", "hard") or a comma-separated list.
- If a task name is not in DIFFICULTY_TO_TASKS, we treat it as a single custom task.
"""
if env_cls is None or not callable(env_cls):
raise ValueError("env_cls must be a callable that wraps a list of environment factory callables.")
if not isinstance(n_envs, int) or n_envs <= 0:
raise ValueError(f"n_envs must be a positive int; got {n_envs}.")
gym_kwargs = dict(gym_kwargs or {})
task_groups = [t.strip() for t in task.split(",") if t.strip()]
if not task_groups:
raise ValueError("`task` must contain at least one Meta-World task or difficulty group.")
print(f"Creating Meta-World envs | task_groups={task_groups} | n_envs(per task)={n_envs}")
out: dict[str, dict[int, Any]] = defaultdict(dict)
for group in task_groups:
# if not in difficulty presets, treat it as a single custom task
tasks = DIFFICULTY_TO_TASKS.get(group, [group])
for tid, task_name in enumerate(tasks):
print(f"Building vec env | group={group} | task_id={tid} | task={task_name}")
# build n_envs factories
fns = [(lambda tn=task_name: MetaworldEnv(task=tn, **gym_kwargs)) for _ in range(n_envs)]
out[group][tid] = env_cls(fns)
# return a plain dict for consistency
return {group: dict(task_map) for group, task_map in out.items()}
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/envs/metaworld.py",
"license": "Apache License 2.0",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/dataset/use_dataset_tools.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example script demonstrating dataset tools utilities.
This script shows how to:
1. Delete episodes from a dataset
2. Split a dataset into train/val sets
3. Add/remove features
4. Merge datasets
Usage:
python examples/dataset/use_dataset_tools.py
"""
import numpy as np
from lerobot.datasets.dataset_tools import (
add_features,
delete_episodes,
merge_datasets,
modify_features,
remove_feature,
split_dataset,
)
from lerobot.datasets.lerobot_dataset import LeRobotDataset
def main():
dataset = LeRobotDataset("lerobot/pusht")
print(f"Original dataset: {dataset.meta.total_episodes} episodes, {dataset.meta.total_frames} frames")
print(f"Features: {list(dataset.meta.features.keys())}")
print("\n1. Deleting episodes 0 and 2...")
filtered_dataset = delete_episodes(dataset, episode_indices=[0, 2], repo_id="lerobot/pusht_filtered")
print(f"Filtered dataset: {filtered_dataset.meta.total_episodes} episodes")
print("\n2. Splitting dataset into train/val...")
splits = split_dataset(
dataset,
splits={"train": 0.8, "val": 0.2},
)
print(f"Train split: {splits['train'].meta.total_episodes} episodes")
print(f"Val split: {splits['val'].meta.total_episodes} episodes")
print("\n3. Adding features...")
reward_values = np.random.randn(dataset.meta.total_frames).astype(np.float32)
def compute_success(row_dict, episode_index, frame_index):
episode_length = 10
return float(frame_index >= episode_length - 10)
dataset_with_features = add_features(
dataset,
features={
"reward": (
reward_values,
{"dtype": "float32", "shape": (1,), "names": None},
),
"success": (
compute_success,
{"dtype": "float32", "shape": (1,), "names": None},
),
},
repo_id="lerobot/pusht_with_features",
)
print(f"New features: {list(dataset_with_features.meta.features.keys())}")
print("\n4. Removing the success feature...")
dataset_cleaned = remove_feature(
dataset_with_features, feature_names="success", repo_id="lerobot/pusht_cleaned"
)
print(f"Features after removal: {list(dataset_cleaned.meta.features.keys())}")
print("\n5. Using modify_features to add and remove features simultaneously...")
dataset_modified = modify_features(
dataset_with_features,
add_features={
"discount": (
np.ones(dataset.meta.total_frames, dtype=np.float32) * 0.99,
{"dtype": "float32", "shape": (1,), "names": None},
),
},
remove_features="reward",
repo_id="lerobot/pusht_modified",
)
print(f"Modified features: {list(dataset_modified.meta.features.keys())}")
print("\n6. Merging train and val splits back together...")
merged = merge_datasets([splits["train"], splits["val"]], output_repo_id="lerobot/pusht_merged")
print(f"Merged dataset: {merged.meta.total_episodes} episodes")
print("\n7. Complex workflow example...")
if len(dataset.meta.camera_keys) > 1:
camera_to_remove = dataset.meta.camera_keys[0]
print(f"Removing camera: {camera_to_remove}")
dataset_no_cam = remove_feature(
dataset, feature_names=camera_to_remove, repo_id="pusht_no_first_camera"
)
print(f"Remaining cameras: {dataset_no_cam.meta.camera_keys}")
print("\nDone! Check ~/.cache/huggingface/lerobot/ for the created datasets.")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/dataset/use_dataset_tools.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/datasets/dataset_tools.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset tools utilities for LeRobotDataset.
This module provides utilities for:
- Deleting episodes from datasets
- Splitting datasets into multiple smaller datasets
- Adding/removing features from datasets
- Merging datasets (wrapper around aggregate functionality)
"""
import logging
import shutil
from collections.abc import Callable
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
import datasets
import numpy as np
import pandas as pd
import pyarrow.parquet as pq
import torch
from tqdm import tqdm
from lerobot.datasets.aggregate import aggregate_datasets
from lerobot.datasets.compute_stats import aggregate_stats
from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.datasets.utils import (
DATA_DIR,
DEFAULT_CHUNK_SIZE,
DEFAULT_DATA_FILE_SIZE_IN_MB,
DEFAULT_DATA_PATH,
DEFAULT_EPISODES_PATH,
get_parquet_file_size_in_mb,
load_episodes,
update_chunk_file_indices,
write_info,
write_stats,
write_tasks,
)
from lerobot.datasets.video_utils import encode_video_frames, get_video_info
from lerobot.utils.constants import HF_LEROBOT_HOME, OBS_IMAGE
def _load_episode_with_stats(src_dataset: LeRobotDataset, episode_idx: int) -> dict:
"""Load a single episode's metadata including stats from parquet file.
Args:
src_dataset: Source dataset
episode_idx: Episode index to load
Returns:
dict containing episode metadata and stats
"""
ep_meta = src_dataset.meta.episodes[episode_idx]
chunk_idx = ep_meta["meta/episodes/chunk_index"]
file_idx = ep_meta["meta/episodes/file_index"]
parquet_path = src_dataset.root / DEFAULT_EPISODES_PATH.format(chunk_index=chunk_idx, file_index=file_idx)
df = pd.read_parquet(parquet_path)
episode_row = df[df["episode_index"] == episode_idx].iloc[0]
return episode_row.to_dict()
def delete_episodes(
dataset: LeRobotDataset,
episode_indices: list[int],
output_dir: str | Path | None = None,
repo_id: str | None = None,
) -> LeRobotDataset:
"""Delete episodes from a LeRobotDataset and create a new dataset.
Args:
dataset: The source LeRobotDataset.
episode_indices: List of episode indices to delete.
output_dir: Root directory where the edited dataset will be stored. If not specified, defaults to $HF_LEROBOT_HOME/repo_id. Equivalent to new_root in EditDatasetConfig.
repo_id: Edited dataset identifier. Equivalent to new_repo_id in EditDatasetConfig.
"""
if not episode_indices:
raise ValueError("No episodes to delete")
valid_indices = set(range(dataset.meta.total_episodes))
invalid = set(episode_indices) - valid_indices
if invalid:
raise ValueError(f"Invalid episode indices: {invalid}")
logging.info(f"Deleting {len(episode_indices)} episodes from dataset")
if repo_id is None:
repo_id = f"{dataset.repo_id}_modified"
output_dir = Path(output_dir) if output_dir is not None else HF_LEROBOT_HOME / repo_id
episodes_to_keep = [i for i in range(dataset.meta.total_episodes) if i not in episode_indices]
if not episodes_to_keep:
raise ValueError("Cannot delete all episodes from dataset")
new_meta = LeRobotDatasetMetadata.create(
repo_id=repo_id,
fps=dataset.meta.fps,
features=dataset.meta.features,
robot_type=dataset.meta.robot_type,
root=output_dir,
use_videos=len(dataset.meta.video_keys) > 0,
)
episode_mapping = {old_idx: new_idx for new_idx, old_idx in enumerate(episodes_to_keep)}
video_metadata = None
if dataset.meta.video_keys:
video_metadata = _copy_and_reindex_videos(dataset, new_meta, episode_mapping)
data_metadata = _copy_and_reindex_data(dataset, new_meta, episode_mapping)
_copy_and_reindex_episodes_metadata(dataset, new_meta, episode_mapping, data_metadata, video_metadata)
new_dataset = LeRobotDataset(
repo_id=repo_id,
root=output_dir,
image_transforms=dataset.image_transforms,
delta_timestamps=dataset.delta_timestamps,
tolerance_s=dataset.tolerance_s,
)
logging.info(f"Created new dataset with {len(episodes_to_keep)} episodes")
return new_dataset
def split_dataset(
dataset: LeRobotDataset,
splits: dict[str, float | list[int]],
output_dir: str | Path | None = None,
) -> dict[str, LeRobotDataset]:
"""Split a LeRobotDataset into multiple smaller datasets.
Args:
dataset: The source LeRobotDataset to split.
splits: Either a dict mapping split names to episode indices, or a dict mapping
split names to fractions (must sum to <= 1.0).
output_dir: Root directory where the split datasets will be stored. If not specified, defaults to $HF_LEROBOT_HOME/repo_id.
Examples:
Split by specific episodes
splits = {"train": [0, 1, 2], "val": [3, 4]}
datasets = split_dataset(dataset, splits)
Split by fractions
splits = {"train": 0.8, "val": 0.2}
datasets = split_dataset(dataset, splits)
"""
if not splits:
raise ValueError("No splits provided")
if all(isinstance(v, float) for v in splits.values()):
splits = _fractions_to_episode_indices(dataset.meta.total_episodes, splits)
all_episodes = set()
for split_name, episodes in splits.items():
if not episodes:
raise ValueError(f"Split '{split_name}' has no episodes")
episode_set = set(episodes)
if episode_set & all_episodes:
raise ValueError("Episodes cannot appear in multiple splits")
all_episodes.update(episode_set)
valid_indices = set(range(dataset.meta.total_episodes))
invalid = all_episodes - valid_indices
if invalid:
raise ValueError(f"Invalid episode indices: {invalid}")
if output_dir is not None:
output_dir = Path(output_dir)
result_datasets = {}
for split_name, episodes in splits.items():
logging.info(f"Creating split '{split_name}' with {len(episodes)} episodes")
split_repo_id = f"{dataset.repo_id}_{split_name}"
split_output_dir = (
output_dir / split_name if output_dir is not None else HF_LEROBOT_HOME / split_repo_id
)
episode_mapping = {old_idx: new_idx for new_idx, old_idx in enumerate(sorted(episodes))}
new_meta = LeRobotDatasetMetadata.create(
repo_id=split_repo_id,
fps=dataset.meta.fps,
features=dataset.meta.features,
robot_type=dataset.meta.robot_type,
root=split_output_dir,
use_videos=len(dataset.meta.video_keys) > 0,
chunks_size=dataset.meta.chunks_size,
data_files_size_in_mb=dataset.meta.data_files_size_in_mb,
video_files_size_in_mb=dataset.meta.video_files_size_in_mb,
)
video_metadata = None
if dataset.meta.video_keys:
video_metadata = _copy_and_reindex_videos(dataset, new_meta, episode_mapping)
data_metadata = _copy_and_reindex_data(dataset, new_meta, episode_mapping)
_copy_and_reindex_episodes_metadata(dataset, new_meta, episode_mapping, data_metadata, video_metadata)
new_dataset = LeRobotDataset(
repo_id=split_repo_id,
root=split_output_dir,
image_transforms=dataset.image_transforms,
delta_timestamps=dataset.delta_timestamps,
tolerance_s=dataset.tolerance_s,
)
result_datasets[split_name] = new_dataset
return result_datasets
def merge_datasets(
datasets: list[LeRobotDataset],
output_repo_id: str,
output_dir: str | Path | None = None,
) -> LeRobotDataset:
"""Merge multiple LeRobotDatasets into a single dataset.
This is a wrapper around the aggregate_datasets functionality with a cleaner API.
Args:
datasets: List of LeRobotDatasets to merge.
output_repo_id: Merged dataset identifier.
output_dir: Root directory where the merged dataset will be stored. If not specified, defaults to $HF_LEROBOT_HOME/output_repo_id.
"""
if not datasets:
raise ValueError("No datasets to merge")
output_dir = Path(output_dir) if output_dir is not None else HF_LEROBOT_HOME / output_repo_id
repo_ids = [ds.repo_id for ds in datasets]
roots = [ds.root for ds in datasets]
aggregate_datasets(
repo_ids=repo_ids,
aggr_repo_id=output_repo_id,
roots=roots,
aggr_root=output_dir,
)
merged_dataset = LeRobotDataset(
repo_id=output_repo_id,
root=output_dir,
image_transforms=datasets[0].image_transforms,
delta_timestamps=datasets[0].delta_timestamps,
tolerance_s=datasets[0].tolerance_s,
)
return merged_dataset
def modify_features(
dataset: LeRobotDataset,
add_features: dict[str, tuple[np.ndarray | torch.Tensor | Callable, dict]] | None = None,
remove_features: str | list[str] | None = None,
output_dir: str | Path | None = None,
repo_id: str | None = None,
) -> LeRobotDataset:
"""Modify a LeRobotDataset by adding and/or removing features in a single pass.
This is the most efficient way to modify features, as it only copies the dataset once
regardless of how many features are being added or removed.
Args:
dataset: The source LeRobotDataset.
add_features: Optional dict mapping feature names to (feature_values, feature_info) tuples.
remove_features: Optional feature name(s) to remove. Can be a single string or list.
output_dir: Root directory where the edited dataset will be stored. If not specified, defaults to $HF_LEROBOT_HOME/repo_id. Equivalent to new_root in EditDatasetConfig.
repo_id: Edited dataset identifier. Equivalent to new_repo_id in EditDatasetConfig.
Returns:
New dataset with features modified.
Example:
new_dataset = modify_features(
dataset,
add_features={
"reward": (reward_array, {"dtype": "float32", "shape": [1], "names": None}),
},
remove_features=["old_feature"],
output_dir="./output",
)
"""
if add_features is None and remove_features is None:
raise ValueError("Must specify at least one of add_features or remove_features")
remove_features_list: list[str] = []
if remove_features is not None:
remove_features_list = [remove_features] if isinstance(remove_features, str) else remove_features
if add_features:
required_keys = {"dtype", "shape"}
for feature_name, (_, feature_info) in add_features.items():
if feature_name in dataset.meta.features:
raise ValueError(f"Feature '{feature_name}' already exists in dataset")
if not required_keys.issubset(feature_info.keys()):
raise ValueError(f"feature_info for '{feature_name}' must contain keys: {required_keys}")
if remove_features_list:
for name in remove_features_list:
if name not in dataset.meta.features:
raise ValueError(f"Feature '{name}' not found in dataset")
required_features = {"timestamp", "frame_index", "episode_index", "index", "task_index"}
if any(name in required_features for name in remove_features_list):
raise ValueError(f"Cannot remove required features: {required_features}")
if repo_id is None:
repo_id = f"{dataset.repo_id}_modified"
output_dir = Path(output_dir) if output_dir is not None else HF_LEROBOT_HOME / repo_id
new_features = dataset.meta.features.copy()
if remove_features_list:
for name in remove_features_list:
new_features.pop(name, None)
if add_features:
for feature_name, (_, feature_info) in add_features.items():
new_features[feature_name] = feature_info
video_keys_to_remove = [name for name in remove_features_list if name in dataset.meta.video_keys]
remaining_video_keys = [k for k in dataset.meta.video_keys if k not in video_keys_to_remove]
new_meta = LeRobotDatasetMetadata.create(
repo_id=repo_id,
fps=dataset.meta.fps,
features=new_features,
robot_type=dataset.meta.robot_type,
root=output_dir,
use_videos=len(remaining_video_keys) > 0,
)
_copy_data_with_feature_changes(
dataset=dataset,
new_meta=new_meta,
add_features=add_features,
remove_features=remove_features_list if remove_features_list else None,
)
if new_meta.video_keys:
_copy_videos(dataset, new_meta, exclude_keys=video_keys_to_remove if video_keys_to_remove else None)
new_dataset = LeRobotDataset(
repo_id=repo_id,
root=output_dir,
image_transforms=dataset.image_transforms,
delta_timestamps=dataset.delta_timestamps,
tolerance_s=dataset.tolerance_s,
)
return new_dataset
def add_features(
dataset: LeRobotDataset,
features: dict[str, tuple[np.ndarray | torch.Tensor | Callable, dict]],
output_dir: str | Path | None = None,
repo_id: str | None = None,
) -> LeRobotDataset:
"""Add multiple features to a LeRobotDataset in a single pass.
This is more efficient than calling add_feature() multiple times, as it only
copies the dataset once regardless of how many features are being added.
Args:
dataset: The source LeRobotDataset.
features: Dictionary mapping feature names to (feature_values, feature_info) tuples.
output_dir: Root directory where the edited dataset will be stored. If not specified, defaults to $HF_LEROBOT_HOME/repo_id. Equivalent to new_root in EditDatasetConfig.
repo_id: Edited dataset identifier. Equivalent to new_repo_id in EditDatasetConfig.
Returns:
New dataset with all features added.
Example:
features = {
"task_embedding": (task_emb_array, {"dtype": "float32", "shape": [384], "names": None}),
"cam1_embedding": (cam1_emb_array, {"dtype": "float32", "shape": [768], "names": None}),
"cam2_embedding": (cam2_emb_array, {"dtype": "float32", "shape": [768], "names": None}),
}
new_dataset = add_features(dataset, features, output_dir="./output", repo_id="my_dataset")
"""
if not features:
raise ValueError("No features provided")
return modify_features(
dataset=dataset,
add_features=features,
remove_features=None,
output_dir=output_dir,
repo_id=repo_id,
)
def remove_feature(
dataset: LeRobotDataset,
feature_names: str | list[str],
output_dir: str | Path | None = None,
repo_id: str | None = None,
) -> LeRobotDataset:
"""Remove features from a LeRobotDataset.
Args:
dataset: The source LeRobotDataset.
feature_names: Name(s) of features to remove. Can be a single string or list.
output_dir: Root directory where the edited dataset will be stored. If not specified, defaults to $HF_LEROBOT_HOME/repo_id. Equivalent to new_root in EditDatasetConfig.
repo_id: Edited dataset identifier. Equivalent to new_repo_id in EditDatasetConfig.
Returns:
New dataset with features removed.
"""
return modify_features(
dataset=dataset,
add_features=None,
remove_features=feature_names,
output_dir=output_dir,
repo_id=repo_id,
)
def _fractions_to_episode_indices(
total_episodes: int,
splits: dict[str, float],
) -> dict[str, list[int]]:
"""Convert split fractions to episode indices."""
if sum(splits.values()) > 1.0:
raise ValueError("Split fractions must sum to <= 1.0")
indices = list(range(total_episodes))
result = {}
start_idx = 0
for split_name, fraction in splits.items():
num_episodes = int(total_episodes * fraction)
if num_episodes == 0:
logging.warning(f"Split '{split_name}' has no episodes, skipping...")
continue
end_idx = start_idx + num_episodes
if split_name == list(splits.keys())[-1]:
end_idx = total_episodes
result[split_name] = indices[start_idx:end_idx]
start_idx = end_idx
return result
def _copy_and_reindex_data(
src_dataset: LeRobotDataset,
dst_meta: LeRobotDatasetMetadata,
episode_mapping: dict[int, int],
) -> dict[int, dict]:
"""Copy and filter data files, only modifying files with deleted episodes.
Args:
src_dataset: Source dataset to copy from
dst_meta: Destination metadata object
episode_mapping: Mapping from old episode indices to new indices
Returns:
dict mapping episode index to its data file metadata (chunk_index, file_index, etc.)
"""
if src_dataset.meta.episodes is None:
src_dataset.meta.episodes = load_episodes(src_dataset.meta.root)
file_to_episodes: dict[Path, set[int]] = {}
for old_idx in episode_mapping:
file_path = src_dataset.meta.get_data_file_path(old_idx)
if file_path not in file_to_episodes:
file_to_episodes[file_path] = set()
file_to_episodes[file_path].add(old_idx)
global_index = 0
episode_data_metadata: dict[int, dict] = {}
if dst_meta.tasks is None:
all_task_indices = set()
for src_path in file_to_episodes:
df = pd.read_parquet(src_dataset.root / src_path)
mask = df["episode_index"].isin(list(episode_mapping.keys()))
task_series: pd.Series = df[mask]["task_index"]
all_task_indices.update(task_series.unique().tolist())
tasks = [src_dataset.meta.tasks.iloc[idx].name for idx in all_task_indices]
dst_meta.save_episode_tasks(list(set(tasks)))
task_mapping = {}
for old_task_idx in range(len(src_dataset.meta.tasks)):
task_name = src_dataset.meta.tasks.iloc[old_task_idx].name
new_task_idx = dst_meta.get_task_index(task_name)
if new_task_idx is not None:
task_mapping[old_task_idx] = new_task_idx
for src_path in tqdm(sorted(file_to_episodes.keys()), desc="Processing data files"):
df = pd.read_parquet(src_dataset.root / src_path)
all_episodes_in_file = set(df["episode_index"].unique())
episodes_to_keep = file_to_episodes[src_path]
if all_episodes_in_file == episodes_to_keep:
df["episode_index"] = df["episode_index"].replace(episode_mapping)
df["index"] = range(global_index, global_index + len(df))
df["task_index"] = df["task_index"].replace(task_mapping)
first_ep_old_idx = min(episodes_to_keep)
src_ep = src_dataset.meta.episodes[first_ep_old_idx]
chunk_idx = src_ep["data/chunk_index"]
file_idx = src_ep["data/file_index"]
else:
mask = df["episode_index"].isin(list(episode_mapping.keys()))
df = df[mask].copy().reset_index(drop=True)
if len(df) == 0:
continue
df["episode_index"] = df["episode_index"].replace(episode_mapping)
df["index"] = range(global_index, global_index + len(df))
df["task_index"] = df["task_index"].replace(task_mapping)
first_ep_old_idx = min(episodes_to_keep)
src_ep = src_dataset.meta.episodes[first_ep_old_idx]
chunk_idx = src_ep["data/chunk_index"]
file_idx = src_ep["data/file_index"]
dst_path = dst_meta.root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx)
dst_path.parent.mkdir(parents=True, exist_ok=True)
_write_parquet(df, dst_path, dst_meta)
for ep_old_idx in episodes_to_keep:
ep_new_idx = episode_mapping[ep_old_idx]
ep_df = df[df["episode_index"] == ep_new_idx]
episode_data_metadata[ep_new_idx] = {
"data/chunk_index": chunk_idx,
"data/file_index": file_idx,
"dataset_from_index": int(ep_df["index"].min()),
"dataset_to_index": int(ep_df["index"].max() + 1),
}
global_index += len(df)
return episode_data_metadata
def _keep_episodes_from_video_with_av(
input_path: Path,
output_path: Path,
episodes_to_keep: list[tuple[int, int]],
fps: float,
vcodec: str = "libsvtav1",
pix_fmt: str = "yuv420p",
) -> None:
"""Keep only specified episodes from a video file using PyAV.
This function decodes frames from specified frame ranges and re-encodes them with
properly reset timestamps to ensure monotonic progression.
Args:
input_path: Source video file path.
output_path: Destination video file path.
episodes_to_keep: List of (start_frame, end_frame) tuples for episodes to keep.
Ranges are half-open intervals: [start_frame, end_frame), where start_frame
is inclusive and end_frame is exclusive.
fps: Frame rate of the video.
vcodec: Video codec to use for encoding.
pix_fmt: Pixel format for output video.
"""
from fractions import Fraction
import av
if not episodes_to_keep:
raise ValueError("No episodes to keep")
in_container = av.open(str(input_path))
# Check if video stream exists.
if not in_container.streams.video:
raise ValueError(
f"No video streams found in {input_path}. "
"The video file may be corrupted or empty. "
"Try re-downloading the dataset or checking the video file."
)
v_in = in_container.streams.video[0]
out = av.open(str(output_path), mode="w")
# Convert fps to Fraction for PyAV compatibility.
fps_fraction = Fraction(fps).limit_denominator(1000)
v_out = out.add_stream(vcodec, rate=fps_fraction)
# PyAV type stubs don't distinguish video streams from audio/subtitle streams.
v_out.width = v_in.codec_context.width
v_out.height = v_in.codec_context.height
v_out.pix_fmt = pix_fmt
# Set time_base to match the frame rate for proper timestamp handling.
v_out.time_base = Fraction(1, int(fps))
out.start_encoding()
# Create set of (start, end) ranges for fast lookup.
# Convert to a sorted list for efficient checking.
frame_ranges = sorted(episodes_to_keep)
# Track frame index for setting PTS and current range being processed.
src_frame_count = 0
frame_count = 0
range_idx = 0
# Read through entire video once and filter frames.
for packet in in_container.demux(v_in):
for frame in packet.decode():
if frame is None:
continue
# Check if frame is in any of our desired frame ranges.
# Skip ranges that have already passed.
while range_idx < len(frame_ranges) and src_frame_count >= frame_ranges[range_idx][1]:
range_idx += 1
# If we've passed all ranges, stop processing.
if range_idx >= len(frame_ranges):
break
# Check if frame is in current range.
start_frame = frame_ranges[range_idx][0]
if src_frame_count < start_frame:
src_frame_count += 1
continue
# Frame is in range - create a new frame with reset timestamps.
# We need to create a copy to avoid modifying the original.
new_frame = frame.reformat(width=v_out.width, height=v_out.height, format=v_out.pix_fmt)
new_frame.pts = frame_count
new_frame.time_base = Fraction(1, int(fps))
# Encode and mux the frame.
for pkt in v_out.encode(new_frame):
out.mux(pkt)
src_frame_count += 1
frame_count += 1
# Flush encoder.
for pkt in v_out.encode():
out.mux(pkt)
out.close()
in_container.close()
def _copy_and_reindex_videos(
src_dataset: LeRobotDataset,
dst_meta: LeRobotDatasetMetadata,
episode_mapping: dict[int, int],
vcodec: str = "libsvtav1",
pix_fmt: str = "yuv420p",
) -> dict[int, dict]:
"""Copy and filter video files, only re-encoding files with deleted episodes.
For video files that only contain kept episodes, we copy them directly.
For files with mixed kept/deleted episodes, we use PyAV filters to efficiently
re-encode only the desired segments.
Args:
src_dataset: Source dataset to copy from
dst_meta: Destination metadata object
episode_mapping: Mapping from old episode indices to new indices
Returns:
dict mapping episode index to its video metadata (chunk_index, file_index, timestamps)
"""
if src_dataset.meta.episodes is None:
src_dataset.meta.episodes = load_episodes(src_dataset.meta.root)
episodes_video_metadata: dict[int, dict] = {new_idx: {} for new_idx in episode_mapping.values()}
for video_key in src_dataset.meta.video_keys:
logging.info(f"Processing videos for {video_key}")
if dst_meta.video_path is None:
raise ValueError("Destination metadata has no video_path defined")
file_to_episodes: dict[tuple[int, int], list[int]] = {}
for old_idx in episode_mapping:
src_ep = src_dataset.meta.episodes[old_idx]
chunk_idx = src_ep[f"videos/{video_key}/chunk_index"]
file_idx = src_ep[f"videos/{video_key}/file_index"]
file_key = (chunk_idx, file_idx)
if file_key not in file_to_episodes:
file_to_episodes[file_key] = []
file_to_episodes[file_key].append(old_idx)
for (src_chunk_idx, src_file_idx), episodes_in_file in tqdm(
sorted(file_to_episodes.items()), desc=f"Processing {video_key} video files"
):
all_episodes_in_file = [
ep_idx
for ep_idx in range(src_dataset.meta.total_episodes)
if src_dataset.meta.episodes[ep_idx].get(f"videos/{video_key}/chunk_index") == src_chunk_idx
and src_dataset.meta.episodes[ep_idx].get(f"videos/{video_key}/file_index") == src_file_idx
]
episodes_to_keep_set = set(episodes_in_file)
all_in_file_set = set(all_episodes_in_file)
if all_in_file_set == episodes_to_keep_set:
assert src_dataset.meta.video_path is not None
src_video_path = src_dataset.root / src_dataset.meta.video_path.format(
video_key=video_key, chunk_index=src_chunk_idx, file_index=src_file_idx
)
dst_video_path = dst_meta.root / dst_meta.video_path.format(
video_key=video_key, chunk_index=src_chunk_idx, file_index=src_file_idx
)
dst_video_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(src_video_path, dst_video_path)
for old_idx in episodes_in_file:
new_idx = episode_mapping[old_idx]
src_ep = src_dataset.meta.episodes[old_idx]
episodes_video_metadata[new_idx][f"videos/{video_key}/chunk_index"] = src_chunk_idx
episodes_video_metadata[new_idx][f"videos/{video_key}/file_index"] = src_file_idx
episodes_video_metadata[new_idx][f"videos/{video_key}/from_timestamp"] = src_ep[
f"videos/{video_key}/from_timestamp"
]
episodes_video_metadata[new_idx][f"videos/{video_key}/to_timestamp"] = src_ep[
f"videos/{video_key}/to_timestamp"
]
else:
# Build list of frame ranges to keep, in sorted order.
sorted_keep_episodes = sorted(episodes_in_file, key=lambda x: episode_mapping[x])
episodes_to_keep_ranges: list[tuple[int, int]] = []
for old_idx in sorted_keep_episodes:
src_ep = src_dataset.meta.episodes[old_idx]
from_frame = round(src_ep[f"videos/{video_key}/from_timestamp"] * src_dataset.meta.fps)
to_frame = round(src_ep[f"videos/{video_key}/to_timestamp"] * src_dataset.meta.fps)
assert src_ep["length"] == to_frame - from_frame, (
f"Episode length mismatch: {src_ep['length']} vs {to_frame - from_frame}"
)
episodes_to_keep_ranges.append((from_frame, to_frame))
# Use PyAV filters to efficiently re-encode only the desired segments.
assert src_dataset.meta.video_path is not None
src_video_path = src_dataset.root / src_dataset.meta.video_path.format(
video_key=video_key, chunk_index=src_chunk_idx, file_index=src_file_idx
)
dst_video_path = dst_meta.root / dst_meta.video_path.format(
video_key=video_key, chunk_index=src_chunk_idx, file_index=src_file_idx
)
dst_video_path.parent.mkdir(parents=True, exist_ok=True)
logging.info(
f"Re-encoding {video_key} (chunk {src_chunk_idx}, file {src_file_idx}) "
f"with {len(episodes_to_keep_ranges)} episodes"
)
_keep_episodes_from_video_with_av(
src_video_path,
dst_video_path,
episodes_to_keep_ranges,
src_dataset.meta.fps,
vcodec,
pix_fmt,
)
cumulative_ts = 0.0
for old_idx in sorted_keep_episodes:
new_idx = episode_mapping[old_idx]
src_ep = src_dataset.meta.episodes[old_idx]
ep_length = src_ep["length"]
ep_duration = ep_length / src_dataset.meta.fps
episodes_video_metadata[new_idx][f"videos/{video_key}/chunk_index"] = src_chunk_idx
episodes_video_metadata[new_idx][f"videos/{video_key}/file_index"] = src_file_idx
episodes_video_metadata[new_idx][f"videos/{video_key}/from_timestamp"] = cumulative_ts
episodes_video_metadata[new_idx][f"videos/{video_key}/to_timestamp"] = (
cumulative_ts + ep_duration
)
cumulative_ts += ep_duration
return episodes_video_metadata
def _copy_and_reindex_episodes_metadata(
src_dataset: LeRobotDataset,
dst_meta: LeRobotDatasetMetadata,
episode_mapping: dict[int, int],
data_metadata: dict[int, dict],
video_metadata: dict[int, dict] | None = None,
) -> None:
"""Copy and reindex episodes metadata using provided data and video metadata.
Args:
src_dataset: Source dataset to copy from
dst_meta: Destination metadata object
episode_mapping: Mapping from old episode indices to new indices
data_metadata: Dict mapping new episode index to its data file metadata
video_metadata: Optional dict mapping new episode index to its video metadata
"""
from lerobot.datasets.utils import flatten_dict
if src_dataset.meta.episodes is None:
src_dataset.meta.episodes = load_episodes(src_dataset.meta.root)
all_stats = []
total_frames = 0
for old_idx, new_idx in tqdm(
sorted(episode_mapping.items(), key=lambda x: x[1]), desc="Processing episodes metadata"
):
src_episode_full = _load_episode_with_stats(src_dataset, old_idx)
src_episode = src_dataset.meta.episodes[old_idx]
episode_meta = data_metadata[new_idx].copy()
if video_metadata and new_idx in video_metadata:
episode_meta.update(video_metadata[new_idx])
# Extract episode statistics from parquet metadata.
# Note (maractingi): When pandas/pyarrow serializes numpy arrays with shape (3, 1, 1) to parquet,
# they are being deserialized as nested object arrays like:
# array([array([array([0.])]), array([array([0.])]), array([array([0.])])])
# This happens particularly with image/video statistics. We need to detect and flatten
# these nested structures back to proper (3, 1, 1) arrays so aggregate_stats can process them.
episode_stats = {}
for key in src_episode_full:
if key.startswith("stats/"):
stat_key = key.replace("stats/", "")
parts = stat_key.split("/")
if len(parts) == 2:
feature_name, stat_name = parts
if feature_name not in episode_stats:
episode_stats[feature_name] = {}
value = src_episode_full[key]
if feature_name in src_dataset.meta.features:
feature_dtype = src_dataset.meta.features[feature_name]["dtype"]
if feature_dtype in ["image", "video"] and stat_name != "count":
if isinstance(value, np.ndarray) and value.dtype == object:
flat_values = []
for item in value:
while isinstance(item, np.ndarray):
item = item.flatten()[0]
flat_values.append(item)
value = np.array(flat_values, dtype=np.float64).reshape(3, 1, 1)
elif isinstance(value, np.ndarray) and value.shape == (3,):
value = value.reshape(3, 1, 1)
episode_stats[feature_name][stat_name] = value
all_stats.append(episode_stats)
episode_dict = {
"episode_index": new_idx,
"tasks": src_episode["tasks"],
"length": src_episode["length"],
}
episode_dict.update(episode_meta)
episode_dict.update(flatten_dict({"stats": episode_stats}))
dst_meta._save_episode_metadata(episode_dict)
total_frames += src_episode["length"]
dst_meta._close_writer()
dst_meta.info.update(
{
"total_episodes": len(episode_mapping),
"total_frames": total_frames,
"total_tasks": len(dst_meta.tasks) if dst_meta.tasks is not None else 0,
"splits": {"train": f"0:{len(episode_mapping)}"},
}
)
write_info(dst_meta.info, dst_meta.root)
if not all_stats:
logging.warning("No statistics found to aggregate")
return
logging.info(f"Aggregating statistics for {len(all_stats)} episodes")
aggregated_stats = aggregate_stats(all_stats)
filtered_stats = {k: v for k, v in aggregated_stats.items() if k in dst_meta.features}
write_stats(filtered_stats, dst_meta.root)
def _write_parquet(df: pd.DataFrame, path: Path, meta: LeRobotDatasetMetadata) -> None:
"""Write DataFrame to parquet
This ensures images are properly embedded and the file can be loaded correctly by HF datasets.
"""
from lerobot.datasets.utils import embed_images, get_hf_features_from_features
hf_features = get_hf_features_from_features(meta.features)
ep_dataset = datasets.Dataset.from_dict(df.to_dict(orient="list"), features=hf_features, split="train")
if len(meta.image_keys) > 0:
ep_dataset = embed_images(ep_dataset)
table = ep_dataset.with_format("arrow")[:]
writer = pq.ParquetWriter(path, schema=table.schema, compression="snappy", use_dictionary=True)
writer.write_table(table)
writer.close()
def _save_data_chunk(
df: pd.DataFrame,
meta: LeRobotDatasetMetadata,
chunk_idx: int = 0,
file_idx: int = 0,
) -> tuple[int, int, dict[int, dict]]:
"""Save a data chunk and return updated indices and episode metadata.
Returns:
tuple: (next_chunk_idx, next_file_idx, episode_metadata_dict)
where episode_metadata_dict maps episode_index to its data file metadata
"""
path = meta.root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx)
path.parent.mkdir(parents=True, exist_ok=True)
_write_parquet(df, path, meta)
episode_metadata = {}
for ep_idx in df["episode_index"].unique():
ep_df = df[df["episode_index"] == ep_idx]
episode_metadata[ep_idx] = {
"data/chunk_index": chunk_idx,
"data/file_index": file_idx,
"dataset_from_index": int(ep_df["index"].min()),
"dataset_to_index": int(ep_df["index"].max() + 1),
}
file_size = get_parquet_file_size_in_mb(path)
if file_size >= DEFAULT_DATA_FILE_SIZE_IN_MB * 0.9:
chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE)
return chunk_idx, file_idx, episode_metadata
def _copy_data_with_feature_changes(
dataset: LeRobotDataset,
new_meta: LeRobotDatasetMetadata,
add_features: dict[str, tuple] | None = None,
remove_features: list[str] | None = None,
) -> None:
"""Copy data while adding or removing features."""
data_dir = dataset.root / DATA_DIR
parquet_files = sorted(data_dir.glob("*/*.parquet"))
if not parquet_files:
raise ValueError(f"No parquet files found in {data_dir}")
frame_idx = 0
for src_path in tqdm(parquet_files, desc="Processing data files"):
df = pd.read_parquet(src_path).reset_index(drop=True)
relative_path = src_path.relative_to(dataset.root)
chunk_dir = relative_path.parts[1]
file_name = relative_path.parts[2]
chunk_idx = int(chunk_dir.split("-")[1])
file_idx = int(file_name.split("-")[1].split(".")[0])
if remove_features:
df = df.drop(columns=remove_features, errors="ignore")
if add_features:
end_idx = frame_idx + len(df)
for feature_name, (values, _) in add_features.items():
if callable(values):
feature_values = []
for _, row in df.iterrows():
ep_idx = row["episode_index"]
frame_in_ep = row["frame_index"]
value = values(row.to_dict(), ep_idx, frame_in_ep)
if isinstance(value, np.ndarray) and value.size == 1:
value = value.item()
feature_values.append(value)
df[feature_name] = feature_values
else:
feature_slice = values[frame_idx:end_idx]
if len(feature_slice.shape) > 1 and feature_slice.shape[1] == 1:
df[feature_name] = feature_slice.flatten()
else:
df[feature_name] = feature_slice
frame_idx = end_idx
# Write using the same chunk/file structure as source
dst_path = new_meta.root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx)
dst_path.parent.mkdir(parents=True, exist_ok=True)
_write_parquet(df, dst_path, new_meta)
_copy_episodes_metadata_and_stats(dataset, new_meta)
def _copy_videos(
src_dataset: LeRobotDataset,
dst_meta: LeRobotDatasetMetadata,
exclude_keys: list[str] | None = None,
) -> None:
"""Copy video files, optionally excluding certain keys."""
if exclude_keys is None:
exclude_keys = []
for video_key in src_dataset.meta.video_keys:
if video_key in exclude_keys:
continue
video_files = set()
for ep_idx in range(len(src_dataset.meta.episodes)):
try:
video_files.add(src_dataset.meta.get_video_file_path(ep_idx, video_key))
except KeyError:
continue
for src_path in tqdm(sorted(video_files), desc=f"Copying {video_key} videos"):
dst_path = dst_meta.root / src_path
dst_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(src_dataset.root / src_path, dst_path)
def _copy_episodes_metadata_and_stats(
src_dataset: LeRobotDataset,
dst_meta: LeRobotDatasetMetadata,
) -> None:
"""Copy episodes metadata and recalculate stats."""
if src_dataset.meta.tasks is not None:
write_tasks(src_dataset.meta.tasks, dst_meta.root)
dst_meta.tasks = src_dataset.meta.tasks.copy()
episodes_dir = src_dataset.root / "meta/episodes"
dst_episodes_dir = dst_meta.root / "meta/episodes"
if episodes_dir.exists():
shutil.copytree(episodes_dir, dst_episodes_dir, dirs_exist_ok=True)
dst_meta.info.update(
{
"total_episodes": src_dataset.meta.total_episodes,
"total_frames": src_dataset.meta.total_frames,
"total_tasks": src_dataset.meta.total_tasks,
"splits": src_dataset.meta.info.get("splits", {"train": f"0:{src_dataset.meta.total_episodes}"}),
}
)
if dst_meta.video_keys and src_dataset.meta.video_keys:
for key in dst_meta.video_keys:
if key in src_dataset.meta.features:
dst_meta.info["features"][key]["info"] = src_dataset.meta.info["features"][key].get(
"info", {}
)
write_info(dst_meta.info, dst_meta.root)
if set(dst_meta.features.keys()) != set(src_dataset.meta.features.keys()):
logging.info("Recalculating dataset statistics...")
if src_dataset.meta.stats:
new_stats = {}
for key in dst_meta.features:
if key in src_dataset.meta.stats:
new_stats[key] = src_dataset.meta.stats[key]
write_stats(new_stats, dst_meta.root)
else:
if src_dataset.meta.stats:
write_stats(src_dataset.meta.stats, dst_meta.root)
def _save_episode_images_for_video(
dataset: LeRobotDataset,
imgs_dir: Path,
img_key: str,
episode_index: int,
num_workers: int = 4,
) -> None:
"""Save images from a specific episode and camera to disk for video encoding.
Args:
dataset: The LeRobot dataset to extract images from
imgs_dir: Directory to save images to
img_key: The image key (camera) to extract
episode_index: Index of the episode to save
num_workers: Number of threads for parallel image saving
"""
# Create directory
imgs_dir.mkdir(parents=True, exist_ok=True)
# Get dataset without torch format for PIL image access
hf_dataset = dataset.hf_dataset.with_format(None)
# Select only this camera's images
imgs_dataset = hf_dataset.select_columns(img_key)
# Get episode start and end indices
from_idx = dataset.meta.episodes["dataset_from_index"][episode_index]
to_idx = dataset.meta.episodes["dataset_to_index"][episode_index]
# Get all items for this episode
episode_dataset = imgs_dataset.select(range(from_idx, to_idx))
# Define function to save a single image
def save_single_image(i_item_tuple):
i, item = i_item_tuple
img = item[img_key]
# Use frame-XXXXXX.png format to match encode_video_frames expectations
img.save(str(imgs_dir / f"frame-{i:06d}.png"), quality=100)
return i
# Save images with proper naming convention for encode_video_frames (frame-XXXXXX.png)
items = list(enumerate(episode_dataset))
with ThreadPoolExecutor(max_workers=num_workers) as executor:
futures = [executor.submit(save_single_image, item) for item in items]
for future in as_completed(futures):
future.result() # This will raise any exceptions that occurred
def _save_batch_episodes_images(
dataset: LeRobotDataset,
imgs_dir: Path,
img_key: str,
episode_indices: list[int],
num_workers: int = 4,
) -> list[float]:
"""Save images from multiple episodes to disk for batch video encoding.
Args:
dataset: The LeRobot dataset to extract images from
imgs_dir: Directory to save images to
img_key: The image key (camera) to extract
episode_indices: List of episode indices to save
num_workers: Number of threads for parallel image saving
Returns:
List of episode durations in seconds
"""
imgs_dir.mkdir(parents=True, exist_ok=True)
hf_dataset = dataset.hf_dataset.with_format(None)
imgs_dataset = hf_dataset.select_columns(img_key)
# Define function to save a single image with global frame index
# Defined once outside the loop to avoid repeated closure creation
def save_single_image(i_item_tuple, base_frame_idx, img_key_param):
i, item = i_item_tuple
img = item[img_key_param]
# Use global frame index for naming
img.save(str(imgs_dir / f"frame-{base_frame_idx + i:06d}.png"), quality=100)
return i
episode_durations = []
frame_idx = 0
for ep_idx in episode_indices:
# Get episode range
from_idx = dataset.meta.episodes["dataset_from_index"][ep_idx]
to_idx = dataset.meta.episodes["dataset_to_index"][ep_idx]
episode_length = to_idx - from_idx
episode_durations.append(episode_length / dataset.fps)
# Get episode images
episode_dataset = imgs_dataset.select(range(from_idx, to_idx))
# Save images
items = list(enumerate(episode_dataset))
with ThreadPoolExecutor(max_workers=num_workers) as executor:
futures = [executor.submit(save_single_image, item, frame_idx, img_key) for item in items]
for future in as_completed(futures):
future.result()
frame_idx += episode_length
return episode_durations
def _iter_episode_batches(
episode_indices: list[int],
episode_lengths: dict[int, int],
size_per_frame_mb: float,
video_file_size_limit: float,
max_episodes: int | None,
max_frames: int | None,
):
"""Generator that yields batches of episode indices for video encoding.
Groups episodes into batches that respect size and memory constraints:
- Stays under video file size limit
- Respects maximum episodes per batch (if specified)
- Respects maximum frames per batch (if specified)
Args:
episode_indices: List of episode indices to batch
episode_lengths: Dictionary mapping episode index to episode length
size_per_frame_mb: Estimated size per frame in MB
video_file_size_limit: Maximum video file size in MB
max_episodes: Maximum number of episodes per batch (None = no limit)
max_frames: Maximum number of frames per batch (None = no limit)
Yields:
List of episode indices for each batch
"""
batch_episodes = []
estimated_size = 0.0
total_frames = 0
for ep_idx in episode_indices:
ep_length = episode_lengths[ep_idx]
ep_estimated_size = ep_length * size_per_frame_mb
# we check if adding this episode would exceed any constraint
would_exceed_size = estimated_size > 0 and estimated_size + ep_estimated_size >= video_file_size_limit
would_exceed_episodes = max_episodes is not None and len(batch_episodes) >= max_episodes
would_exceed_frames = max_frames is not None and total_frames + ep_length > max_frames
if batch_episodes and (would_exceed_size or would_exceed_episodes or would_exceed_frames):
# yield current batch before adding this episode
yield batch_episodes
# start a new batch with current episode
batch_episodes = [ep_idx]
estimated_size = ep_estimated_size
total_frames = ep_length
else:
# add to current batch
batch_episodes.append(ep_idx)
estimated_size += ep_estimated_size
total_frames += ep_length
# yield final batch if not empty
if batch_episodes:
yield batch_episodes
def _estimate_frame_size_via_calibration(
dataset: LeRobotDataset,
img_key: str,
episode_indices: list[int],
temp_dir: Path,
fps: int,
vcodec: str,
pix_fmt: str,
g: int,
crf: int,
fast_decode: int,
num_calibration_frames: int = 30,
) -> float:
"""Estimate MB per frame by encoding a small calibration sample.
Encodes a representative sample of frames using the exact codec parameters
to measure actual compression ratio, which is more accurate than heuristics.
Args:
dataset: Source dataset with images.
img_key: Image key to calibrate (e.g., "observation.images.top").
episode_indices: List of episode indices being processed.
temp_dir: Temporary directory for calibration files.
fps: Frames per second for video encoding.
vcodec: Video codec (libsvtav1, h264, hevc).
pix_fmt: Pixel format (yuv420p, etc.).
g: GOP size (group of pictures).
crf: Constant Rate Factor (quality).
fast_decode: Fast decode tuning parameter.
num_calibration_frames: Number of frames to use for calibration (default: 30).
Returns:
Estimated size in MB per frame based on actual encoding.
"""
calibration_dir = temp_dir / "calibration" / img_key
calibration_dir.mkdir(parents=True, exist_ok=True)
try:
# Select a representative episode (prefer middle episode if available)
calibration_ep_idx = episode_indices[len(episode_indices) // 2]
# Get episode range
from_idx = dataset.meta.episodes["dataset_from_index"][calibration_ep_idx]
to_idx = dataset.meta.episodes["dataset_to_index"][calibration_ep_idx]
episode_length = to_idx - from_idx
# Use up to num_calibration_frames from this episode
num_frames = min(num_calibration_frames, episode_length)
# Get frames from dataset
hf_dataset = dataset.hf_dataset.with_format(None)
sample_indices = range(from_idx, from_idx + num_frames)
# Save calibration frames
for i, idx in enumerate(sample_indices):
img = hf_dataset[idx][img_key]
img.save(str(calibration_dir / f"frame-{i:06d}.png"), quality=100)
# Encode calibration video
calibration_video_path = calibration_dir / "calibration.mp4"
encode_video_frames(
imgs_dir=calibration_dir,
video_path=calibration_video_path,
fps=fps,
vcodec=vcodec,
pix_fmt=pix_fmt,
g=g,
crf=crf,
fast_decode=fast_decode,
overwrite=True,
)
# Measure actual compressed size
video_size_bytes = calibration_video_path.stat().st_size
video_size_mb = video_size_bytes / BYTES_PER_MIB
size_per_frame_mb = video_size_mb / num_frames
logging.info(
f" Calibration: {num_frames} frames -> {video_size_mb:.2f} MB "
f"= {size_per_frame_mb:.4f} MB/frame for {img_key}"
)
return size_per_frame_mb
finally:
# Clean up calibration files
if calibration_dir.exists():
shutil.rmtree(calibration_dir)
def _copy_data_without_images(
src_dataset: LeRobotDataset,
dst_meta: LeRobotDatasetMetadata,
episode_indices: list[int],
img_keys: list[str],
) -> None:
"""Copy data files without image columns.
Args:
src_dataset: Source dataset
dst_meta: Destination metadata
episode_indices: Episodes to include
img_keys: Image keys to remove
"""
from lerobot.datasets.utils import DATA_DIR
data_dir = src_dataset.root / DATA_DIR
parquet_files = sorted(data_dir.glob("*/*.parquet"))
if not parquet_files:
raise ValueError(f"No parquet files found in {data_dir}")
episode_set = set(episode_indices)
for src_path in tqdm(parquet_files, desc="Processing data files"):
df = pd.read_parquet(src_path).reset_index(drop=True)
# Filter to only include selected episodes
df = df[df["episode_index"].isin(episode_set)].copy()
if len(df) == 0:
continue
# Remove image columns
columns_to_drop = [col for col in img_keys if col in df.columns]
if columns_to_drop:
df = df.drop(columns=columns_to_drop)
# Get chunk and file indices from path
relative_path = src_path.relative_to(src_dataset.root)
chunk_dir = relative_path.parts[1]
file_name = relative_path.parts[2]
chunk_idx = int(chunk_dir.split("-")[1])
file_idx = int(file_name.split("-")[1].split(".")[0])
# Write to destination without pandas index
dst_path = dst_meta.root / f"data/chunk-{chunk_idx:03d}/file-{file_idx:03d}.parquet"
dst_path.parent.mkdir(parents=True, exist_ok=True)
df.to_parquet(dst_path, index=False)
# Video conversion constants
BYTES_PER_KIB = 1024
BYTES_PER_MIB = BYTES_PER_KIB * BYTES_PER_KIB
def modify_tasks(
dataset: LeRobotDataset,
new_task: str | None = None,
episode_tasks: dict[int, str] | None = None,
) -> LeRobotDataset:
"""Modify tasks in a LeRobotDataset.
This function allows you to either:
1. Set a single task for the entire dataset (using `new_task`)
2. Set specific tasks for specific episodes (using `episode_tasks`)
You can combine both: `new_task` sets the default, and `episode_tasks` overrides
specific episodes.
The dataset is modified in-place, updating only the task-related files:
- meta/tasks.parquet
- data/**/*.parquet (task_index column)
- meta/episodes/**/*.parquet (tasks column)
- meta/info.json (total_tasks)
Args:
dataset: The source LeRobotDataset to modify.
new_task: A single task string to apply to all episodes. If None and episode_tasks
is also None, raises an error.
episode_tasks: Optional dict mapping episode indices to their task strings.
Overrides `new_task` for specific episodes.
Examples:
Set a single task for all episodes:
dataset = modify_tasks(dataset, new_task="Pick up the cube")
Set different tasks for specific episodes:
dataset = modify_tasks(
dataset,
episode_tasks={0: "Task A", 1: "Task B", 2: "Task A"}
)
Set a default task with overrides:
dataset = modify_tasks(
dataset,
new_task="Default task",
episode_tasks={5: "Special task for episode 5"}
)
"""
if new_task is None and episode_tasks is None:
raise ValueError("Must specify at least one of new_task or episode_tasks")
if episode_tasks is not None:
valid_indices = set(range(dataset.meta.total_episodes))
invalid = set(episode_tasks.keys()) - valid_indices
if invalid:
raise ValueError(f"Invalid episode indices: {invalid}")
# Ensure episodes metadata is loaded
if dataset.meta.episodes is None:
dataset.meta.episodes = load_episodes(dataset.root)
# Build the mapping from episode index to task string
episode_to_task: dict[int, str] = {}
for ep_idx in range(dataset.meta.total_episodes):
if episode_tasks and ep_idx in episode_tasks:
episode_to_task[ep_idx] = episode_tasks[ep_idx]
elif new_task is not None:
episode_to_task[ep_idx] = new_task
else:
# Keep original task if not overridden and no default provided
original_tasks = dataset.meta.episodes[ep_idx]["tasks"]
if not original_tasks:
raise ValueError(f"Episode {ep_idx} has no tasks and no default task was provided")
episode_to_task[ep_idx] = original_tasks[0]
# Collect all unique tasks and create new task mapping
unique_tasks = sorted(set(episode_to_task.values()))
new_task_df = pd.DataFrame(
{"task_index": list(range(len(unique_tasks)))}, index=pd.Index(unique_tasks, name="task")
)
task_to_index = {task: idx for idx, task in enumerate(unique_tasks)}
logging.info(f"Modifying tasks in {dataset.repo_id}")
logging.info(f"New tasks: {unique_tasks}")
root = dataset.root
# Update data files - modify task_index column
logging.info("Updating data files...")
data_dir = root / DATA_DIR
for parquet_path in tqdm(sorted(data_dir.rglob("*.parquet")), desc="Updating data"):
df = pd.read_parquet(parquet_path)
# Build a mapping from episode_index to new task_index for rows in this file
episode_indices_in_file = df["episode_index"].unique()
ep_to_new_task_idx = {
ep_idx: task_to_index[episode_to_task[ep_idx]] for ep_idx in episode_indices_in_file
}
# Update task_index column
df["task_index"] = df["episode_index"].map(ep_to_new_task_idx)
df.to_parquet(parquet_path, index=False)
# Update episodes metadata - modify tasks column
logging.info("Updating episodes metadata...")
episodes_dir = root / "meta" / "episodes"
for parquet_path in tqdm(sorted(episodes_dir.rglob("*.parquet")), desc="Updating episodes"):
df = pd.read_parquet(parquet_path)
# Update tasks column
df["tasks"] = df["episode_index"].apply(lambda ep_idx: [episode_to_task[ep_idx]])
df.to_parquet(parquet_path, index=False)
# Write new tasks.parquet
write_tasks(new_task_df, root)
# Update info.json
dataset.meta.info["total_tasks"] = len(unique_tasks)
write_info(dataset.meta.info, root)
# Reload metadata to reflect changes
dataset.meta.tasks = new_task_df
dataset.meta.episodes = load_episodes(root)
logging.info(f"Tasks: {unique_tasks}")
return dataset
def convert_image_to_video_dataset(
dataset: LeRobotDataset,
output_dir: Path | None = None,
repo_id: str | None = None,
vcodec: str = "libsvtav1",
pix_fmt: str = "yuv420p",
g: int = 2,
crf: int = 30,
fast_decode: int = 0,
episode_indices: list[int] | None = None,
num_workers: int = 4,
max_episodes_per_batch: int | None = None,
max_frames_per_batch: int | None = None,
) -> LeRobotDataset:
"""Convert image-to-video dataset.
Creates a new LeRobotDataset with images encoded as videos, following the proper
LeRobot dataset structure with videos stored in chunked MP4 files.
Args:
dataset: The source LeRobot dataset with images
output_dir: Root directory where the edited dataset will be stored. If not specified, defaults to $HF_LEROBOT_HOME/repo_id. Equivalent to new_root in EditDatasetConfig.
repo_id: Edited dataset identifier. Equivalent to new_repo_id in EditDatasetConfig.
vcodec: Video codec (default: libsvtav1)
pix_fmt: Pixel format (default: yuv420p)
g: Group of pictures size (default: 2)
crf: Constant rate factor (default: 30)
fast_decode: Fast decode tuning (default: 0)
episode_indices: List of episode indices to convert (None = all episodes)
num_workers: Number of threads for parallel processing (default: 4)
max_episodes_per_batch: Maximum episodes per video batch to avoid memory issues (None = no limit)
max_frames_per_batch: Maximum frames per video batch to avoid memory issues (None = no limit)
Returns:
New LeRobotDataset with images encoded as videos
"""
# Check that it's an image dataset
if len(dataset.meta.video_keys) > 0:
raise ValueError(
f"This operation is for image datasets only. Video dataset provided: {dataset.repo_id}"
)
# Get all image keys
hf_dataset = dataset.hf_dataset.with_format(None)
img_keys = [key for key in hf_dataset.features if key.startswith(OBS_IMAGE)]
if len(img_keys) == 0:
raise ValueError(f"No image keys found in dataset {dataset.repo_id}")
# Determine which episodes to process
if episode_indices is None:
episode_indices = list(range(dataset.meta.total_episodes))
if repo_id is None:
repo_id = f"{dataset.repo_id}_video"
logging.info(
f"Converting {len(episode_indices)} episodes with {len(img_keys)} cameras from {dataset.repo_id}"
)
logging.info(f"Video codec: {vcodec}, pixel format: {pix_fmt}, GOP: {g}, CRF: {crf}")
# Create new features dict, converting image features to video features
new_features = {}
for key, value in dataset.meta.features.items():
if key not in img_keys:
new_features[key] = value
else:
# Convert image key to video format
new_features[key] = value.copy()
new_features[key]["dtype"] = "video" # Change dtype from "image" to "video"
# Video info will be updated after episodes are encoded
# Create new metadata for video dataset
output_dir = Path(output_dir) if output_dir is not None else HF_LEROBOT_HOME / repo_id
new_meta = LeRobotDatasetMetadata.create(
repo_id=repo_id,
fps=dataset.meta.fps,
features=new_features,
robot_type=dataset.meta.robot_type,
root=output_dir,
use_videos=True,
chunks_size=dataset.meta.chunks_size,
data_files_size_in_mb=dataset.meta.data_files_size_in_mb,
video_files_size_in_mb=dataset.meta.video_files_size_in_mb,
)
# Create temporary directory for image extraction
temp_dir = output_dir / "temp_images"
temp_dir.mkdir(parents=True, exist_ok=True)
# Process all episodes and batch encode videos
# Use dictionary for O(1) episode metadata lookups instead of O(n) linear search
all_episode_metadata = {}
fps = int(dataset.fps)
try:
# Build episode metadata entries first
logging.info("Building episode metadata...")
cumulative_frame_idx = 0
for ep_idx in episode_indices:
src_episode = dataset.meta.episodes[ep_idx]
ep_length = src_episode["length"]
ep_meta = {
"episode_index": ep_idx,
"length": ep_length,
"dataset_from_index": cumulative_frame_idx,
"dataset_to_index": cumulative_frame_idx + ep_length,
}
if "data/chunk_index" in src_episode:
ep_meta["data/chunk_index"] = src_episode["data/chunk_index"]
ep_meta["data/file_index"] = src_episode["data/file_index"]
all_episode_metadata[ep_idx] = ep_meta
cumulative_frame_idx += ep_length
# Process each camera and batch encode multiple episodes together
video_file_size_limit = new_meta.video_files_size_in_mb
# Pre-compute episode lengths for batching
episode_lengths = {ep_idx: dataset.meta.episodes["length"][ep_idx] for ep_idx in episode_indices}
for img_key in tqdm(img_keys, desc="Processing cameras"):
# Estimate size per frame by encoding a small calibration sample
# This provides accurate compression ratio for the specific codec parameters
size_per_frame_mb = _estimate_frame_size_via_calibration(
dataset=dataset,
img_key=img_key,
episode_indices=episode_indices,
temp_dir=temp_dir,
fps=fps,
vcodec=vcodec,
pix_fmt=pix_fmt,
g=g,
crf=crf,
fast_decode=fast_decode,
)
logging.info(f"Processing camera: {img_key}")
chunk_idx, file_idx = 0, 0
cumulative_timestamp = 0.0
# Process episodes in batches to stay under size limit
for batch_episodes in _iter_episode_batches(
episode_indices=episode_indices,
episode_lengths=episode_lengths,
size_per_frame_mb=size_per_frame_mb,
video_file_size_limit=video_file_size_limit,
max_episodes=max_episodes_per_batch,
max_frames=max_frames_per_batch,
):
total_frames_in_batch = sum(episode_lengths[idx] for idx in batch_episodes)
logging.info(
f" Encoding batch of {len(batch_episodes)} episodes "
f"({batch_episodes[0]}-{batch_episodes[-1]}) = {total_frames_in_batch} frames"
)
# Save images for all episodes in this batch
imgs_dir = temp_dir / f"batch_{chunk_idx}_{file_idx}" / img_key
episode_durations = _save_batch_episodes_images(
dataset=dataset,
imgs_dir=imgs_dir,
img_key=img_key,
episode_indices=batch_episodes,
num_workers=num_workers,
)
# Encode all batched episodes into single video
video_path = new_meta.root / new_meta.video_path.format(
video_key=img_key, chunk_index=chunk_idx, file_index=file_idx
)
video_path.parent.mkdir(parents=True, exist_ok=True)
encode_video_frames(
imgs_dir=imgs_dir,
video_path=video_path,
fps=fps,
vcodec=vcodec,
pix_fmt=pix_fmt,
g=g,
crf=crf,
fast_decode=fast_decode,
overwrite=True,
)
# Clean up temporary images
shutil.rmtree(imgs_dir)
# Update metadata for each episode in the batch
for ep_idx, duration in zip(batch_episodes, episode_durations, strict=True):
from_timestamp = cumulative_timestamp
to_timestamp = cumulative_timestamp + duration
cumulative_timestamp = to_timestamp
# Find episode metadata entry and add video metadata (O(1) dictionary lookup)
ep_meta = all_episode_metadata[ep_idx]
ep_meta[f"videos/{img_key}/chunk_index"] = chunk_idx
ep_meta[f"videos/{img_key}/file_index"] = file_idx
ep_meta[f"videos/{img_key}/from_timestamp"] = from_timestamp
ep_meta[f"videos/{img_key}/to_timestamp"] = to_timestamp
# Move to next video file for next batch
chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, new_meta.chunks_size)
cumulative_timestamp = 0.0
# Copy and transform data files (removing image columns)
_copy_data_without_images(dataset, new_meta, episode_indices, img_keys)
# Save episode metadata
episodes_df = pd.DataFrame(list(all_episode_metadata.values()))
episodes_path = new_meta.root / "meta" / "episodes" / "chunk-000" / "file-000.parquet"
episodes_path.parent.mkdir(parents=True, exist_ok=True)
episodes_df.to_parquet(episodes_path, index=False)
# Update metadata info
new_meta.info["total_episodes"] = len(episode_indices)
new_meta.info["total_frames"] = sum(ep["length"] for ep in all_episode_metadata.values())
new_meta.info["total_tasks"] = dataset.meta.total_tasks
new_meta.info["splits"] = {"train": f"0:{len(episode_indices)}"}
# Update video info for all image keys (now videos)
# We need to manually set video info since update_video_info() checks video_keys first
for img_key in img_keys:
if not new_meta.features[img_key].get("info", None):
video_path = new_meta.root / new_meta.video_path.format(
video_key=img_key, chunk_index=0, file_index=0
)
new_meta.info["features"][img_key]["info"] = get_video_info(video_path)
write_info(new_meta.info, new_meta.root)
# Copy stats and tasks
if dataset.meta.stats is not None:
# Remove image stats
new_stats = {k: v for k, v in dataset.meta.stats.items() if k not in img_keys}
write_stats(new_stats, new_meta.root)
if dataset.meta.tasks is not None:
write_tasks(dataset.meta.tasks, new_meta.root)
finally:
# Clean up temporary directory
if temp_dir.exists():
shutil.rmtree(temp_dir)
logging.info(f"Completed converting {dataset.repo_id} to video format")
logging.info(f"New dataset saved to: {output_dir}")
# Return new dataset
return LeRobotDataset(repo_id=repo_id, root=output_dir)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/datasets/dataset_tools.py",
"license": "Apache License 2.0",
"lines": 1439,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/scripts/lerobot_edit_dataset.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Edit LeRobot datasets using various transformation tools.
This script allows you to delete episodes, split datasets, merge datasets,
remove features, modify tasks, and convert image datasets to video format.
When new_repo_id is specified, creates a new dataset.
Path semantics (v2): --root and --new_root are exact dataset folders containing
meta/, data/, videos/. When omitted, defaults to $HF_LEROBOT_HOME/{repo_id}.
Usage Examples:
Delete episodes 0, 2, and 5 from a dataset:
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--operation.type delete_episodes \
--operation.episode_indices "[0, 2, 5]"
Delete episodes from a local dataset at a specific path:
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--root /path/to/pusht \
--operation.type delete_episodes \
--operation.episode_indices "[0, 2, 5]"
Delete episodes and save to a new dataset at a specific path and with a new repo_id:
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--new_repo_id lerobot/pusht_filtered \
--new_root /path/to/pusht_filtered \
--operation.type delete_episodes \
--operation.episode_indices "[0, 2, 5]"
Split dataset by fractions (pusht_train, pusht_val):
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--operation.type split \
--operation.splits '{"train": 0.8, "val": 0.2}'
Split dataset by fractions and save split datasets to a specific folder (base_folder/train, base_folder/val):
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--new_root /path/to/base_folder \
--operation.type split \
--operation.splits '{"train": 0.8, "val": 0.2}'
Split dataset by episode indices:
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--operation.type split \
--operation.splits '{"train": [0, 1, 2, 3], "val": [4, 5]}'
Split into more than two splits:
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--operation.type split \
--operation.splits '{"train": 0.6, "val": 0.2, "test": 0.2}'
Merge multiple datasets:
lerobot-edit-dataset \
--new_repo_id lerobot/pusht_merged \
--operation.type merge \
--operation.repo_ids "['lerobot/pusht_train', 'lerobot/pusht_val']"
Merge multiple datasets to a specific output path:
lerobot-edit-dataset \
--new_repo_id lerobot/pusht_merged \
--new_root /path/to/pusht_merged \
--operation.type merge \
--operation.repo_ids "['lerobot/pusht_train', 'lerobot/pusht_val']"
Merge multiple datasets from a list of local dataset paths:
lerobot-edit-dataset \
--new_repo_id lerobot/pusht_merged \
--operation.type merge \
--operation.repo_ids "['pusht_train', 'pusht_val']" \
--operation.roots "['/path/to/pusht_train', '/path/to/pusht_val']"
Remove camera feature:
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--operation.type remove_feature \
--operation.feature_names "['observation.image']"
Modify tasks - set a single task for all episodes (WARNING: modifies in-place):
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--operation.type modify_tasks \
--operation.new_task "Pick up the cube and place it"
Modify tasks - set different tasks for specific episodes (WARNING: modifies in-place):
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--operation.type modify_tasks \
--operation.episode_tasks '{"0": "Task A", "1": "Task B", "2": "Task A"}'
Modify tasks - set default task with overrides for specific episodes (WARNING: modifies in-place):
lerobot-edit-dataset \
--repo_id lerobot/pusht \
--operation.type modify_tasks \
--operation.new_task "Default task" \
--operation.episode_tasks '{"5": "Special task for episode 5"}'
Convert image dataset to video format and save locally:
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--new_root /path/to/output/pusht_video \
--operation.type convert_image_to_video
Convert image dataset to video format and save with new repo_id:
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--new_repo_id lerobot/pusht_video \
--operation.type convert_image_to_video
Convert image dataset to video format and push to hub:
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--new_repo_id lerobot/pusht_video \
--operation.type convert_image_to_video \
--push_to_hub true
Show dataset information:
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--operation.type info \
--operation.show_features true
Show dataset information without feature details:
lerobot-edit-dataset \
--repo_id lerobot/pusht_image \
--operation.type info \
--operation.show_features false
Using JSON config file:
lerobot-edit-dataset \
--config_path path/to/edit_config.json
"""
import abc
import logging
import shutil
import sys
from dataclasses import dataclass
from pathlib import Path
import draccus
from lerobot.configs import parser
from lerobot.datasets.dataset_tools import (
convert_image_to_video_dataset,
delete_episodes,
merge_datasets,
modify_tasks,
remove_feature,
split_dataset,
)
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.utils.constants import HF_LEROBOT_HOME
from lerobot.utils.utils import init_logging
@dataclass
class OperationConfig(draccus.ChoiceRegistry, abc.ABC):
@property
def type(self) -> str:
return self.get_choice_name(self.__class__)
@OperationConfig.register_subclass("delete_episodes")
@dataclass
class DeleteEpisodesConfig(OperationConfig):
episode_indices: list[int] | None = None
@OperationConfig.register_subclass("split")
@dataclass
class SplitConfig(OperationConfig):
splits: dict[str, float | list[int]] | None = None
@OperationConfig.register_subclass("merge")
@dataclass
class MergeConfig(OperationConfig):
repo_ids: list[str] | None = None
roots: list[str] | None = None
@OperationConfig.register_subclass("remove_feature")
@dataclass
class RemoveFeatureConfig(OperationConfig):
feature_names: list[str] | None = None
@OperationConfig.register_subclass("modify_tasks")
@dataclass
class ModifyTasksConfig(OperationConfig):
new_task: str | None = None
episode_tasks: dict[str, str] | None = None
@OperationConfig.register_subclass("convert_image_to_video")
@dataclass
class ConvertImageToVideoConfig(OperationConfig):
output_dir: str | None = None
vcodec: str = "libsvtav1"
pix_fmt: str = "yuv420p"
g: int = 2
crf: int = 30
fast_decode: int = 0
episode_indices: list[int] | None = None
num_workers: int = 4
max_episodes_per_batch: int | None = None
max_frames_per_batch: int | None = None
@OperationConfig.register_subclass("info")
@dataclass
class InfoConfig(OperationConfig):
show_features: bool = False
@dataclass
class EditDatasetConfig:
# Operation configuration.
operation: OperationConfig
# Input dataset identifier. Always required unless for Merge operation.
repo_id: str | None = None
# Root directory where the input dataset is stored. If not specified, defaults to $HF_LEROBOT_HOME/repo_id.
root: str | None = None
# Edited dataset identifier. When both new_repo_id (resp. new_root) and repo_id (resp. root) are identical, modifications are applied in-place and a backup of the original dataset is created. Required for Merge operation.
new_repo_id: str | None = None
# Root directory where the edited dataset will be stored. If not specified, defaults to $HF_LEROBOT_HOME/new_repo_id. For Split operation, this is the base directory for the split datasets.
new_root: str | None = None
# Upload dataset to Hugging Face hub.
push_to_hub: bool = False
def get_output_path(
repo_id: str,
new_repo_id: str | None,
root: Path | str | None,
new_root: Path | str | None,
) -> tuple[str, Path]:
input_path = Path(root) if root else HF_LEROBOT_HOME / repo_id
output_repo_id = new_repo_id if new_repo_id else repo_id
output_path = Path(new_root) if new_root else HF_LEROBOT_HOME / output_repo_id
# In case of in-place modification, create a backup of the original dataset (if it exists)
if output_path == input_path:
backup_path = input_path.with_name(input_path.name + "_old")
if input_path.exists():
if backup_path.exists():
shutil.rmtree(backup_path)
shutil.move(input_path, backup_path)
return output_repo_id, output_path
def handle_delete_episodes(cfg: EditDatasetConfig) -> None:
if not isinstance(cfg.operation, DeleteEpisodesConfig):
raise ValueError("Operation config must be DeleteEpisodesConfig")
if not cfg.operation.episode_indices:
raise ValueError("episode_indices must be specified for delete_episodes operation")
dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
output_repo_id, output_dir = get_output_path(
cfg.repo_id,
new_repo_id=cfg.new_repo_id,
root=cfg.root,
new_root=cfg.new_root,
)
# In case of in-place modification, make the dataset point to the backup directory
if output_dir == dataset.root:
dataset.root = dataset.root.with_name(dataset.root.name + "_old")
logging.info(f"Deleting episodes {cfg.operation.episode_indices} from {cfg.repo_id}")
new_dataset = delete_episodes(
dataset,
episode_indices=cfg.operation.episode_indices,
output_dir=output_dir,
repo_id=output_repo_id,
)
logging.info(f"Dataset saved to {output_dir}")
logging.info(f"Episodes: {new_dataset.meta.total_episodes}, Frames: {new_dataset.meta.total_frames}")
if cfg.push_to_hub:
logging.info(f"Pushing to hub as {output_repo_id}")
LeRobotDataset(output_repo_id, root=output_dir).push_to_hub()
def handle_split(cfg: EditDatasetConfig) -> None:
if not isinstance(cfg.operation, SplitConfig):
raise ValueError("Operation config must be SplitConfig")
if not cfg.operation.splits:
raise ValueError(
"splits dict must be specified with split names as keys and fractions/episode lists as values"
)
if cfg.new_repo_id is not None:
logging.warning(
"split uses the original dataset identifier --repo_id to generate split names. The --new_repo_id parameter is ignored."
)
dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
logging.info(f"Splitting dataset {cfg.repo_id} with splits: {cfg.operation.splits}")
split_datasets = split_dataset(
dataset,
splits=cfg.operation.splits,
output_dir=cfg.new_root,
)
for split_name, split_ds in split_datasets.items():
logging.info(
f"{split_name}: {split_ds.meta.total_episodes} episodes, {split_ds.meta.total_frames} frames"
)
if cfg.push_to_hub:
logging.info(f"Pushing {split_name} split to hub as {split_ds.repo_id}")
LeRobotDataset(split_ds.repo_id, root=split_ds.root).push_to_hub()
def handle_merge(cfg: EditDatasetConfig) -> None:
if not isinstance(cfg.operation, MergeConfig):
raise ValueError("Operation config must be MergeConfig")
if not cfg.operation.repo_ids:
raise ValueError("repo_ids must be specified for merge operation")
if cfg.repo_id is not None or cfg.root is not None:
logging.warning(
"merge uses --new_repo_id and --new_root for the merged dataset. The --repo_id and --root parameters are ignored."
)
if cfg.operation.roots:
if len(cfg.operation.roots) != len(cfg.operation.repo_ids):
raise ValueError("repo_ids and roots must have the same length for merge operation")
logging.info(f"Loading {len(cfg.operation.roots)} datasets to merge")
datasets = [
LeRobotDataset(repo_id=repo_id, root=root)
for repo_id, root in zip(cfg.operation.repo_ids, cfg.operation.roots, strict=True)
]
else:
logging.info(f"Loading {len(cfg.operation.repo_ids)} datasets to merge")
datasets = [LeRobotDataset(repo_id) for repo_id in cfg.operation.repo_ids]
output_dir = Path(cfg.new_root) if cfg.new_root else HF_LEROBOT_HOME / cfg.new_repo_id
logging.info(f"Merging datasets into {cfg.new_repo_id}")
merged_dataset = merge_datasets(
datasets,
output_repo_id=cfg.new_repo_id,
output_dir=output_dir,
)
logging.info(f"Merged dataset saved to {output_dir}")
logging.info(
f"Episodes: {merged_dataset.meta.total_episodes}, Frames: {merged_dataset.meta.total_frames}"
)
if cfg.push_to_hub:
logging.info(f"Pushing to hub as {cfg.new_repo_id}")
LeRobotDataset(merged_dataset.repo_id, root=output_dir).push_to_hub()
def handle_remove_feature(cfg: EditDatasetConfig) -> None:
if not isinstance(cfg.operation, RemoveFeatureConfig):
raise ValueError("Operation config must be RemoveFeatureConfig")
if not cfg.operation.feature_names:
raise ValueError("feature_names must be specified for remove_feature operation")
dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
output_repo_id, output_dir = get_output_path(
cfg.repo_id,
new_repo_id=cfg.new_repo_id,
root=cfg.root,
new_root=cfg.new_root,
)
# In case of in-place modification, make the dataset point to the backup directory
if output_dir == dataset.root:
dataset.root = dataset.root.with_name(dataset.root.name + "_old")
logging.info(f"Removing features {cfg.operation.feature_names} from {cfg.repo_id}")
new_dataset = remove_feature(
dataset,
feature_names=cfg.operation.feature_names,
output_dir=output_dir,
repo_id=output_repo_id,
)
logging.info(f"Dataset saved to {output_dir}")
logging.info(f"Remaining features: {list(new_dataset.meta.features.keys())}")
if cfg.push_to_hub:
logging.info(f"Pushing to hub as {output_repo_id}")
LeRobotDataset(output_repo_id, root=output_dir).push_to_hub()
def handle_modify_tasks(cfg: EditDatasetConfig) -> None:
if not isinstance(cfg.operation, ModifyTasksConfig):
raise ValueError("Operation config must be ModifyTasksConfig")
new_task = cfg.operation.new_task
episode_tasks_raw = cfg.operation.episode_tasks
if new_task is None and episode_tasks_raw is None:
raise ValueError("Must specify at least one of new_task or episode_tasks for modify_tasks operation")
if cfg.new_repo_id is not None or cfg.new_root is not None:
logging.warning(
"modify_tasks modifies datasets in-place. The --new_repo_id and --new_root parameters are ignored."
)
dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
logging.warning(f"Modifying dataset in-place at {dataset.root}. Original data will be overwritten.")
# Convert episode_tasks keys from string to int if needed (CLI passes strings)
episode_tasks: dict[int, str] | None = None
if episode_tasks_raw is not None:
episode_tasks = {int(k): v for k, v in episode_tasks_raw.items()}
logging.info(f"Modifying tasks in {cfg.repo_id}")
if new_task:
logging.info(f" Default task: '{new_task}'")
if episode_tasks:
logging.info(f" Episode-specific tasks: {episode_tasks}")
modified_dataset = modify_tasks(
dataset,
new_task=new_task,
episode_tasks=episode_tasks,
)
logging.info(f"Dataset modified at {dataset.root}")
logging.info(f"Tasks: {list(modified_dataset.meta.tasks.index)}")
if cfg.push_to_hub:
logging.info(f"Pushing to hub as {cfg.repo_id}")
modified_dataset.push_to_hub()
def handle_convert_image_to_video(cfg: EditDatasetConfig) -> None:
# Note: Parser may create any config type with the right fields, so we access fields directly
# instead of checking isinstance()
dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
# Determine output directory and repo_id
# Priority: 1) new_root, 2) new_repo_id, 3) operation.output_dir, 4) auto-generated name
output_dir_config = getattr(cfg.operation, "output_dir", None)
if output_dir_config:
logging.warning(
"--operation.output_dir is deprecated and will be removed in future versions. "
"Please use --new_root instead."
)
if cfg.new_root:
output_dir = Path(cfg.new_root)
output_repo_id = cfg.new_repo_id or f"{cfg.repo_id}_video"
logging.info(f"Saving to new_root: {output_dir} as {output_repo_id}")
elif cfg.new_repo_id:
output_repo_id = cfg.new_repo_id
output_dir = HF_LEROBOT_HOME / cfg.new_repo_id
logging.info(f"Saving to new dataset: {cfg.new_repo_id} at {output_dir}")
elif output_dir_config:
output_dir = Path(output_dir_config)
output_repo_id = output_dir.name
logging.info(f"Saving to local directory: {output_dir} as {output_repo_id}")
else:
output_repo_id = f"{cfg.repo_id}_video"
output_dir = HF_LEROBOT_HOME / output_repo_id
logging.info(f"Saving to auto-generated location: {output_dir} as {output_repo_id}")
logging.info(f"Converting dataset {cfg.repo_id} to video format")
new_dataset = convert_image_to_video_dataset(
dataset=dataset,
output_dir=output_dir,
repo_id=output_repo_id,
vcodec=getattr(cfg.operation, "vcodec", "libsvtav1"),
pix_fmt=getattr(cfg.operation, "pix_fmt", "yuv420p"),
g=getattr(cfg.operation, "g", 2),
crf=getattr(cfg.operation, "crf", 30),
fast_decode=getattr(cfg.operation, "fast_decode", 0),
episode_indices=getattr(cfg.operation, "episode_indices", None),
num_workers=getattr(cfg.operation, "num_workers", 4),
max_episodes_per_batch=getattr(cfg.operation, "max_episodes_per_batch", None),
max_frames_per_batch=getattr(cfg.operation, "max_frames_per_batch", None),
)
logging.info("Video dataset created successfully!")
logging.info(f"Location: {output_dir}")
logging.info(f"Episodes: {new_dataset.meta.total_episodes}")
logging.info(f"Frames: {new_dataset.meta.total_frames}")
if cfg.push_to_hub:
logging.info(f"Pushing to hub as {output_repo_id}...")
new_dataset.push_to_hub()
logging.info("✓ Successfully pushed to hub!")
else:
logging.info("Dataset saved locally (not pushed to hub)")
def _get_dataset_size(repo_path):
import os
total = 0
with os.scandir(repo_path) as it:
for entry in it:
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += _get_dataset_size(entry.path)
return total
def handle_info(cfg: EditDatasetConfig):
if not isinstance(cfg.operation, InfoConfig):
raise ValueError("Operation config must be InfoConfig")
dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
sys.stdout.write(f"======Info {dataset.meta.repo_id}\n")
sys.stdout.write(f"Repository ID: {dataset.meta.repo_id} \n")
sys.stdout.write(f"Total episode: {dataset.meta.total_episodes} \n")
sys.stdout.write(f"Total task: {dataset.meta.total_tasks} \n")
sys.stdout.write(f"Total frame(Actual Count): {dataset.meta.total_frames}({len(dataset)}) \n")
sys.stdout.write(
f"Average frame per episode: {dataset.meta.total_frames / dataset.meta.total_episodes:.1f}\n"
)
sys.stdout.write(
f"Average episode time(sec): {(dataset.meta.total_frames / dataset.meta.total_episodes) / dataset.meta.fps:.1f}\n"
)
sys.stdout.write(f"FPS: {dataset.meta.fps}\n")
total_file_size = _get_dataset_size(dataset.root)
sys.stdout.write(f"Size: {total_file_size / (1024 * 1024):.1f} MB\n")
if cfg.operation.show_features:
import json
feature_dump_str = json.dumps(
dataset.meta.features, ensure_ascii=False, indent=4, sort_keys=True, separators=(",", ": ")
)
sys.stdout.write("Features:\n")
sys.stdout.write(f"{feature_dump_str}\n")
def _validate_config(cfg: EditDatasetConfig) -> None:
if isinstance(cfg.operation, MergeConfig):
if not cfg.new_repo_id:
raise ValueError("--new_repo_id is required for merge operation (the merged dataset identifier)")
else:
if not cfg.repo_id:
raise ValueError(
f"--repo_id is required for {cfg.operation.type} operation (the input dataset identifier)"
)
@parser.wrap()
def edit_dataset(cfg: EditDatasetConfig) -> None:
_validate_config(cfg)
operation_type = cfg.operation.type
if operation_type == "delete_episodes":
handle_delete_episodes(cfg)
elif operation_type == "split":
handle_split(cfg)
elif operation_type == "merge":
handle_merge(cfg)
elif operation_type == "remove_feature":
handle_remove_feature(cfg)
elif operation_type == "modify_tasks":
handle_modify_tasks(cfg)
elif operation_type == "convert_image_to_video":
handle_convert_image_to_video(cfg)
elif operation_type == "info":
handle_info(cfg)
else:
available = ", ".join(OperationConfig.get_known_choices())
raise ValueError(f"Unknown operation: {operation_type}\nAvailable operations: {available}")
def main() -> None:
init_logging()
edit_dataset()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/scripts/lerobot_edit_dataset.py",
"license": "Apache License 2.0",
"lines": 491,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:tests/datasets/test_dataset_tools.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset tools utilities."""
from unittest.mock import patch
import numpy as np
import pytest
import torch
from lerobot.datasets.dataset_tools import (
add_features,
delete_episodes,
merge_datasets,
modify_features,
modify_tasks,
remove_feature,
split_dataset,
)
from lerobot.scripts.lerobot_edit_dataset import convert_image_to_video_dataset
@pytest.fixture
def sample_dataset(tmp_path, empty_lerobot_dataset_factory):
"""Create a sample dataset for testing."""
features = {
"action": {"dtype": "float32", "shape": (6,), "names": None},
"observation.state": {"dtype": "float32", "shape": (4,), "names": None},
"observation.images.top": {"dtype": "image", "shape": (224, 224, 3), "names": None},
}
dataset = empty_lerobot_dataset_factory(
root=tmp_path / "test_dataset",
features=features,
)
for ep_idx in range(5):
for _ in range(10):
frame = {
"action": np.random.randn(6).astype(np.float32),
"observation.state": np.random.randn(4).astype(np.float32),
"observation.images.top": np.random.randint(0, 255, size=(224, 224, 3), dtype=np.uint8),
"task": f"task_{ep_idx % 2}",
}
dataset.add_frame(frame)
dataset.save_episode()
dataset.finalize()
return dataset
def test_delete_single_episode(sample_dataset, tmp_path):
"""Test deleting a single episode."""
output_dir = tmp_path / "filtered"
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(output_dir)
new_dataset = delete_episodes(
sample_dataset,
episode_indices=[2],
output_dir=output_dir,
)
assert new_dataset.meta.total_episodes == 4
assert new_dataset.meta.total_frames == 40
episode_indices = {int(idx.item()) for idx in new_dataset.hf_dataset["episode_index"]}
assert episode_indices == {0, 1, 2, 3}
assert len(new_dataset) == 40
def test_delete_multiple_episodes(sample_dataset, tmp_path):
"""Test deleting multiple episodes."""
output_dir = tmp_path / "filtered"
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(output_dir)
new_dataset = delete_episodes(
sample_dataset,
episode_indices=[1, 3],
output_dir=output_dir,
)
assert new_dataset.meta.total_episodes == 3
assert new_dataset.meta.total_frames == 30
episode_indices = {int(idx.item()) for idx in new_dataset.hf_dataset["episode_index"]}
assert episode_indices == {0, 1, 2}
def test_delete_invalid_episodes(sample_dataset, tmp_path):
"""Test error handling for invalid episode indices."""
with pytest.raises(ValueError, match="Invalid episode indices"):
delete_episodes(
sample_dataset,
episode_indices=[10, 20],
output_dir=tmp_path / "filtered",
)
def test_delete_all_episodes(sample_dataset, tmp_path):
"""Test error when trying to delete all episodes."""
with pytest.raises(ValueError, match="Cannot delete all episodes"):
delete_episodes(
sample_dataset,
episode_indices=list(range(5)),
output_dir=tmp_path / "filtered",
)
def test_delete_empty_list(sample_dataset, tmp_path):
"""Test error when no episodes specified."""
with pytest.raises(ValueError, match="No episodes to delete"):
delete_episodes(
sample_dataset,
episode_indices=[],
output_dir=tmp_path / "filtered",
)
def test_split_by_episodes(sample_dataset, tmp_path):
"""Test splitting dataset by specific episode indices."""
splits = {
"train": [0, 1, 2],
"val": [3, 4],
}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
def mock_snapshot(repo_id, **kwargs):
if "train" in repo_id:
return str(tmp_path / f"{sample_dataset.repo_id}_train")
elif "val" in repo_id:
return str(tmp_path / f"{sample_dataset.repo_id}_val")
return str(kwargs.get("local_dir", tmp_path))
mock_snapshot_download.side_effect = mock_snapshot
result = split_dataset(
sample_dataset,
splits=splits,
output_dir=tmp_path,
)
assert set(result.keys()) == {"train", "val"}
assert result["train"].meta.total_episodes == 3
assert result["train"].meta.total_frames == 30
assert result["val"].meta.total_episodes == 2
assert result["val"].meta.total_frames == 20
train_episodes = {int(idx.item()) for idx in result["train"].hf_dataset["episode_index"]}
assert train_episodes == {0, 1, 2}
val_episodes = {int(idx.item()) for idx in result["val"].hf_dataset["episode_index"]}
assert val_episodes == {0, 1}
def test_split_by_fractions(sample_dataset, tmp_path):
"""Test splitting dataset by fractions."""
splits = {
"train": 0.6,
"val": 0.4,
}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
def mock_snapshot(repo_id, **kwargs):
for split_name in splits:
if split_name in repo_id:
return str(tmp_path / f"{sample_dataset.repo_id}_{split_name}")
return str(kwargs.get("local_dir", tmp_path))
mock_snapshot_download.side_effect = mock_snapshot
result = split_dataset(
sample_dataset,
splits=splits,
output_dir=tmp_path,
)
assert result["train"].meta.total_episodes == 3
assert result["val"].meta.total_episodes == 2
def test_split_overlapping_episodes(sample_dataset, tmp_path):
"""Test error when episodes appear in multiple splits."""
splits = {
"train": [0, 1, 2],
"val": [2, 3, 4],
}
with pytest.raises(ValueError, match="Episodes cannot appear in multiple splits"):
split_dataset(sample_dataset, splits=splits, output_dir=tmp_path)
def test_split_invalid_fractions(sample_dataset, tmp_path):
"""Test error when fractions sum to more than 1."""
splits = {
"train": 0.7,
"val": 0.5,
}
with pytest.raises(ValueError, match="Split fractions must sum to <= 1.0"):
split_dataset(sample_dataset, splits=splits, output_dir=tmp_path)
def test_split_empty(sample_dataset, tmp_path):
"""Test error with empty splits."""
with pytest.raises(ValueError, match="No splits provided"):
split_dataset(sample_dataset, splits={}, output_dir=tmp_path)
def test_merge_two_datasets(sample_dataset, tmp_path, empty_lerobot_dataset_factory):
"""Test merging two datasets."""
features = {
"action": {"dtype": "float32", "shape": (6,), "names": None},
"observation.state": {"dtype": "float32", "shape": (4,), "names": None},
"observation.images.top": {"dtype": "image", "shape": (224, 224, 3), "names": None},
}
dataset2 = empty_lerobot_dataset_factory(
root=tmp_path / "test_dataset2",
features=features,
)
for ep_idx in range(3):
for _ in range(10):
frame = {
"action": np.random.randn(6).astype(np.float32),
"observation.state": np.random.randn(4).astype(np.float32),
"observation.images.top": np.random.randint(0, 255, size=(224, 224, 3), dtype=np.uint8),
"task": f"task_{ep_idx % 2}",
}
dataset2.add_frame(frame)
dataset2.save_episode()
dataset2.finalize()
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "merged_dataset")
merged = merge_datasets(
[sample_dataset, dataset2],
output_repo_id="merged_dataset",
output_dir=tmp_path / "merged_dataset",
)
assert merged.meta.total_episodes == 8 # 5 + 3
assert merged.meta.total_frames == 80 # 50 + 30
episode_indices = sorted({int(idx.item()) for idx in merged.hf_dataset["episode_index"]})
assert episode_indices == list(range(8))
def test_merge_empty_list(tmp_path):
"""Test error when merging empty list."""
with pytest.raises(ValueError, match="No datasets to merge"):
merge_datasets([], output_repo_id="merged", output_dir=tmp_path)
def test_add_features_with_values(sample_dataset, tmp_path):
"""Test adding a feature with pre-computed values."""
num_frames = sample_dataset.meta.total_frames
reward_values = np.random.randn(num_frames, 1).astype(np.float32)
feature_info = {
"dtype": "float32",
"shape": (1,),
"names": None,
}
features = {
"reward": (reward_values, feature_info),
}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "with_reward")
new_dataset = add_features(
dataset=sample_dataset,
features=features,
output_dir=tmp_path / "with_reward",
)
assert "reward" in new_dataset.meta.features
assert new_dataset.meta.features["reward"] == feature_info
assert len(new_dataset) == num_frames
sample_item = new_dataset[0]
assert "reward" in sample_item
assert isinstance(sample_item["reward"], torch.Tensor)
def test_add_features_with_callable(sample_dataset, tmp_path):
"""Test adding a feature with a callable."""
def compute_reward(frame_dict, episode_idx, frame_idx):
return float(episode_idx * 10 + frame_idx)
feature_info = {
"dtype": "float32",
"shape": (1,),
"names": None,
}
features = {
"reward": (compute_reward, feature_info),
}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "with_reward")
new_dataset = add_features(
dataset=sample_dataset,
features=features,
output_dir=tmp_path / "with_reward",
)
assert "reward" in new_dataset.meta.features
items = [new_dataset[i] for i in range(10)]
first_episode_items = [item for item in items if item["episode_index"] == 0]
assert len(first_episode_items) == 10
first_frame = first_episode_items[0]
assert first_frame["frame_index"] == 0
assert float(first_frame["reward"]) == 0.0
def test_add_existing_feature(sample_dataset, tmp_path):
"""Test error when adding an existing feature."""
feature_info = {"dtype": "float32", "shape": (1,)}
features = {
"action": (np.zeros(50), feature_info),
}
with pytest.raises(ValueError, match="Feature 'action' already exists"):
add_features(
dataset=sample_dataset,
features=features,
output_dir=tmp_path / "modified",
)
def test_add_feature_invalid_info(sample_dataset, tmp_path):
"""Test error with invalid feature info."""
with pytest.raises(ValueError, match="feature_info for 'reward' must contain keys"):
add_features(
dataset=sample_dataset,
features={
"reward": (np.zeros(50), {"dtype": "float32"}),
},
output_dir=tmp_path / "modified",
)
def test_modify_features_add_and_remove(sample_dataset, tmp_path):
"""Test modifying features by adding and removing simultaneously."""
feature_info = {"dtype": "float32", "shape": (1,), "names": None}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "modified")
# First add a feature we'll later remove
dataset_with_reward = add_features(
sample_dataset,
features={"reward": (np.random.randn(50, 1).astype(np.float32), feature_info)},
output_dir=tmp_path / "with_reward",
)
# Now use modify_features to add "success" and remove "reward" in one pass
modified_dataset = modify_features(
dataset_with_reward,
add_features={
"success": (np.random.randn(50, 1).astype(np.float32), feature_info),
},
remove_features="reward",
output_dir=tmp_path / "modified",
)
assert "success" in modified_dataset.meta.features
assert "reward" not in modified_dataset.meta.features
assert len(modified_dataset) == 50
def test_modify_features_only_add(sample_dataset, tmp_path):
"""Test that modify_features works with only add_features."""
feature_info = {"dtype": "float32", "shape": (1,), "names": None}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "modified")
modified_dataset = modify_features(
sample_dataset,
add_features={
"reward": (np.random.randn(50, 1).astype(np.float32), feature_info),
},
output_dir=tmp_path / "modified",
)
assert "reward" in modified_dataset.meta.features
assert len(modified_dataset) == 50
def test_modify_features_only_remove(sample_dataset, tmp_path):
"""Test that modify_features works with only remove_features."""
feature_info = {"dtype": "float32", "shape": (1,), "names": None}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.side_effect = lambda repo_id, **kwargs: str(kwargs.get("local_dir", tmp_path))
dataset_with_reward = add_features(
sample_dataset,
features={"reward": (np.random.randn(50, 1).astype(np.float32), feature_info)},
output_dir=tmp_path / "with_reward",
)
modified_dataset = modify_features(
dataset_with_reward,
remove_features="reward",
output_dir=tmp_path / "modified",
)
assert "reward" not in modified_dataset.meta.features
def test_modify_features_no_changes(sample_dataset, tmp_path):
"""Test error when modify_features is called with no changes."""
with pytest.raises(ValueError, match="Must specify at least one of add_features or remove_features"):
modify_features(
sample_dataset,
output_dir=tmp_path / "modified",
)
def test_remove_single_feature(sample_dataset, tmp_path):
"""Test removing a single feature."""
feature_info = {"dtype": "float32", "shape": (1,), "names": None}
features = {
"reward": (np.random.randn(50, 1).astype(np.float32), feature_info),
}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.side_effect = lambda repo_id, **kwargs: str(kwargs.get("local_dir", tmp_path))
dataset_with_reward = add_features(
dataset=sample_dataset,
features=features,
output_dir=tmp_path / "with_reward",
)
dataset_without_reward = remove_feature(
dataset_with_reward,
feature_names="reward",
output_dir=tmp_path / "without_reward",
)
assert "reward" not in dataset_without_reward.meta.features
sample_item = dataset_without_reward[0]
assert "reward" not in sample_item
def test_remove_multiple_features(sample_dataset, tmp_path):
"""Test removing multiple features at once."""
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.side_effect = lambda repo_id, **kwargs: str(kwargs.get("local_dir", tmp_path))
dataset = sample_dataset
features = {}
for feature_name in ["reward", "success"]:
feature_info = {"dtype": "float32", "shape": (1,), "names": None}
features[feature_name] = (
np.random.randn(dataset.meta.total_frames, 1).astype(np.float32),
feature_info,
)
dataset_with_features = add_features(
dataset, features=features, output_dir=tmp_path / "with_features"
)
dataset_clean = remove_feature(
dataset_with_features, feature_names=["reward", "success"], output_dir=tmp_path / "clean"
)
assert "reward" not in dataset_clean.meta.features
assert "success" not in dataset_clean.meta.features
def test_remove_nonexistent_feature(sample_dataset, tmp_path):
"""Test error when removing non-existent feature."""
with pytest.raises(ValueError, match="Feature 'nonexistent' not found"):
remove_feature(
sample_dataset,
feature_names="nonexistent",
output_dir=tmp_path / "modified",
)
def test_remove_required_feature(sample_dataset, tmp_path):
"""Test error when trying to remove required features."""
with pytest.raises(ValueError, match="Cannot remove required features"):
remove_feature(
sample_dataset,
feature_names="timestamp",
output_dir=tmp_path / "modified",
)
def test_remove_camera_feature(sample_dataset, tmp_path):
"""Test removing a camera feature."""
camera_keys = sample_dataset.meta.camera_keys
if not camera_keys:
pytest.skip("No camera keys in dataset")
camera_to_remove = camera_keys[0]
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "without_camera")
dataset_without_camera = remove_feature(
sample_dataset,
feature_names=camera_to_remove,
output_dir=tmp_path / "without_camera",
)
assert camera_to_remove not in dataset_without_camera.meta.features
assert camera_to_remove not in dataset_without_camera.meta.camera_keys
sample_item = dataset_without_camera[0]
assert camera_to_remove not in sample_item
def test_complex_workflow_integration(sample_dataset, tmp_path):
"""Test a complex workflow combining multiple operations."""
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.side_effect = lambda repo_id, **kwargs: str(kwargs.get("local_dir", tmp_path))
dataset = add_features(
sample_dataset,
features={
"reward": (
np.random.randn(50, 1).astype(np.float32),
{"dtype": "float32", "shape": (1,), "names": None},
)
},
output_dir=tmp_path / "step1",
)
dataset = delete_episodes(
dataset,
episode_indices=[2],
output_dir=tmp_path / "step2",
)
splits = split_dataset(
dataset,
splits={"train": 0.75, "val": 0.25},
output_dir=tmp_path / "step3",
)
merged = merge_datasets(
list(splits.values()),
output_repo_id="final_dataset",
output_dir=tmp_path / "step4",
)
assert merged.meta.total_episodes == 4
assert merged.meta.total_frames == 40
assert "reward" in merged.meta.features
assert len(merged) == 40
sample_item = merged[0]
assert "reward" in sample_item
def test_delete_episodes_preserves_stats(sample_dataset, tmp_path):
"""Test that deleting episodes preserves statistics correctly."""
output_dir = tmp_path / "filtered"
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(output_dir)
new_dataset = delete_episodes(
sample_dataset,
episode_indices=[2],
output_dir=output_dir,
)
assert new_dataset.meta.stats is not None
for feature in ["action", "observation.state"]:
assert feature in new_dataset.meta.stats
assert "mean" in new_dataset.meta.stats[feature]
assert "std" in new_dataset.meta.stats[feature]
def test_delete_episodes_preserves_tasks(sample_dataset, tmp_path):
"""Test that tasks are preserved correctly after deletion."""
output_dir = tmp_path / "filtered"
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(output_dir)
new_dataset = delete_episodes(
sample_dataset,
episode_indices=[0],
output_dir=output_dir,
)
assert new_dataset.meta.tasks is not None
assert len(new_dataset.meta.tasks) == 2
tasks_in_dataset = {str(item["task"]) for item in new_dataset}
assert len(tasks_in_dataset) > 0
def test_split_three_ways(sample_dataset, tmp_path):
"""Test splitting dataset into three splits."""
splits = {
"train": 0.6,
"val": 0.2,
"test": 0.2,
}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
def mock_snapshot(repo_id, **kwargs):
for split_name in splits:
if split_name in repo_id:
return str(tmp_path / f"{sample_dataset.repo_id}_{split_name}")
return str(kwargs.get("local_dir", tmp_path))
mock_snapshot_download.side_effect = mock_snapshot
result = split_dataset(
sample_dataset,
splits=splits,
output_dir=tmp_path,
)
assert set(result.keys()) == {"train", "val", "test"}
assert result["train"].meta.total_episodes == 3
assert result["val"].meta.total_episodes == 1
assert result["test"].meta.total_episodes == 1
total_frames = sum(ds.meta.total_frames for ds in result.values())
assert total_frames == sample_dataset.meta.total_frames
def test_split_preserves_stats(sample_dataset, tmp_path):
"""Test that statistics are preserved when splitting."""
splits = {"train": [0, 1, 2], "val": [3, 4]}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
def mock_snapshot(repo_id, **kwargs):
for split_name in splits:
if split_name in repo_id:
return str(tmp_path / f"{sample_dataset.repo_id}_{split_name}")
return str(kwargs.get("local_dir", tmp_path))
mock_snapshot_download.side_effect = mock_snapshot
result = split_dataset(
sample_dataset,
splits=splits,
output_dir=tmp_path,
)
for split_ds in result.values():
assert split_ds.meta.stats is not None
for feature in ["action", "observation.state"]:
assert feature in split_ds.meta.stats
assert "mean" in split_ds.meta.stats[feature]
assert "std" in split_ds.meta.stats[feature]
def test_merge_three_datasets(sample_dataset, tmp_path, empty_lerobot_dataset_factory):
"""Test merging three datasets."""
features = {
"action": {"dtype": "float32", "shape": (6,), "names": None},
"observation.state": {"dtype": "float32", "shape": (4,), "names": None},
"observation.images.top": {"dtype": "image", "shape": (224, 224, 3), "names": None},
}
datasets = [sample_dataset]
for i in range(2):
dataset = empty_lerobot_dataset_factory(
root=tmp_path / f"test_dataset{i + 2}",
features=features,
)
for ep_idx in range(2):
for _ in range(10):
frame = {
"action": np.random.randn(6).astype(np.float32),
"observation.state": np.random.randn(4).astype(np.float32),
"observation.images.top": np.random.randint(0, 255, size=(224, 224, 3), dtype=np.uint8),
"task": f"task_{ep_idx}",
}
dataset.add_frame(frame)
dataset.save_episode()
dataset.finalize()
datasets.append(dataset)
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "merged_dataset")
merged = merge_datasets(
datasets,
output_repo_id="merged_dataset",
output_dir=tmp_path / "merged_dataset",
)
assert merged.meta.total_episodes == 9
assert merged.meta.total_frames == 90
def test_merge_preserves_stats(sample_dataset, tmp_path, empty_lerobot_dataset_factory):
"""Test that statistics are computed for merged datasets."""
features = {
"action": {"dtype": "float32", "shape": (6,), "names": None},
"observation.state": {"dtype": "float32", "shape": (4,), "names": None},
"observation.images.top": {"dtype": "image", "shape": (224, 224, 3), "names": None},
}
dataset2 = empty_lerobot_dataset_factory(
root=tmp_path / "test_dataset2",
features=features,
)
for ep_idx in range(3):
for _ in range(10):
frame = {
"action": np.random.randn(6).astype(np.float32),
"observation.state": np.random.randn(4).astype(np.float32),
"observation.images.top": np.random.randint(0, 255, size=(224, 224, 3), dtype=np.uint8),
"task": f"task_{ep_idx % 2}",
}
dataset2.add_frame(frame)
dataset2.save_episode()
dataset2.finalize()
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "merged_dataset")
merged = merge_datasets(
[sample_dataset, dataset2],
output_repo_id="merged_dataset",
output_dir=tmp_path / "merged_dataset",
)
assert merged.meta.stats is not None
for feature in ["action", "observation.state"]:
assert feature in merged.meta.stats
assert "mean" in merged.meta.stats[feature]
assert "std" in merged.meta.stats[feature]
def test_add_features_preserves_existing_stats(sample_dataset, tmp_path):
"""Test that adding a feature preserves existing stats."""
num_frames = sample_dataset.meta.total_frames
reward_values = np.random.randn(num_frames, 1).astype(np.float32)
feature_info = {
"dtype": "float32",
"shape": (1,),
"names": None,
}
features = {
"reward": (reward_values, feature_info),
}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "with_reward")
new_dataset = add_features(
dataset=sample_dataset,
features=features,
output_dir=tmp_path / "with_reward",
)
assert new_dataset.meta.stats is not None
for feature in ["action", "observation.state"]:
assert feature in new_dataset.meta.stats
assert "mean" in new_dataset.meta.stats[feature]
assert "std" in new_dataset.meta.stats[feature]
def test_remove_feature_updates_stats(sample_dataset, tmp_path):
"""Test that removing a feature removes it from stats."""
feature_info = {"dtype": "float32", "shape": (1,), "names": None}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.side_effect = lambda repo_id, **kwargs: str(kwargs.get("local_dir", tmp_path))
dataset_with_reward = add_features(
sample_dataset,
features={
"reward": (np.random.randn(50, 1).astype(np.float32), feature_info),
},
output_dir=tmp_path / "with_reward",
)
dataset_without_reward = remove_feature(
dataset_with_reward,
feature_names="reward",
output_dir=tmp_path / "without_reward",
)
if dataset_without_reward.meta.stats:
assert "reward" not in dataset_without_reward.meta.stats
def test_delete_consecutive_episodes(sample_dataset, tmp_path):
"""Test deleting consecutive episodes."""
output_dir = tmp_path / "filtered"
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(output_dir)
new_dataset = delete_episodes(
sample_dataset,
episode_indices=[1, 2, 3],
output_dir=output_dir,
)
assert new_dataset.meta.total_episodes == 2
assert new_dataset.meta.total_frames == 20
episode_indices = sorted({int(idx.item()) for idx in new_dataset.hf_dataset["episode_index"]})
assert episode_indices == [0, 1]
def test_delete_first_and_last_episodes(sample_dataset, tmp_path):
"""Test deleting first and last episodes."""
output_dir = tmp_path / "filtered"
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(output_dir)
new_dataset = delete_episodes(
sample_dataset,
episode_indices=[0, 4],
output_dir=output_dir,
)
assert new_dataset.meta.total_episodes == 3
assert new_dataset.meta.total_frames == 30
episode_indices = sorted({int(idx.item()) for idx in new_dataset.hf_dataset["episode_index"]})
assert episode_indices == [0, 1, 2]
def test_split_all_episodes_assigned(sample_dataset, tmp_path):
"""Test that all episodes can be explicitly assigned to splits."""
splits = {
"split1": [0, 1],
"split2": [2, 3],
"split3": [4],
}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
def mock_snapshot(repo_id, **kwargs):
for split_name in splits:
if split_name in repo_id:
return str(tmp_path / f"{sample_dataset.repo_id}_{split_name}")
return str(kwargs.get("local_dir", tmp_path))
mock_snapshot_download.side_effect = mock_snapshot
result = split_dataset(
sample_dataset,
splits=splits,
output_dir=tmp_path,
)
total_episodes = sum(ds.meta.total_episodes for ds in result.values())
assert total_episodes == sample_dataset.meta.total_episodes
def test_modify_features_preserves_file_structure(sample_dataset, tmp_path):
"""Test that modifying features preserves chunk_idx and file_idx from source dataset."""
feature_info = {"dtype": "float32", "shape": (1,), "names": None}
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
def mock_snapshot(repo_id, **kwargs):
return str(kwargs.get("local_dir", tmp_path / repo_id.split("/")[-1]))
mock_snapshot_download.side_effect = mock_snapshot
# First split the dataset to create a non-zero starting chunk/file structure
splits = split_dataset(
sample_dataset,
splits={"train": [0, 1, 2], "val": [3, 4]},
output_dir=tmp_path / "splits",
)
train_dataset = splits["train"]
# Get original chunk/file indices from first episode
if train_dataset.meta.episodes is None:
from lerobot.datasets.utils import load_episodes
train_dataset.meta.episodes = load_episodes(train_dataset.meta.root)
original_chunk_indices = [ep["data/chunk_index"] for ep in train_dataset.meta.episodes]
original_file_indices = [ep["data/file_index"] for ep in train_dataset.meta.episodes]
# Now add a feature to the split dataset
modified_dataset = add_features(
train_dataset,
features={
"reward": (
np.random.randn(train_dataset.meta.total_frames, 1).astype(np.float32),
feature_info,
),
},
output_dir=tmp_path / "modified",
)
# Check that chunk/file indices are preserved
if modified_dataset.meta.episodes is None:
from lerobot.datasets.utils import load_episodes
modified_dataset.meta.episodes = load_episodes(modified_dataset.meta.root)
new_chunk_indices = [ep["data/chunk_index"] for ep in modified_dataset.meta.episodes]
new_file_indices = [ep["data/file_index"] for ep in modified_dataset.meta.episodes]
assert new_chunk_indices == original_chunk_indices, "Chunk indices should be preserved"
assert new_file_indices == original_file_indices, "File indices should be preserved"
assert "reward" in modified_dataset.meta.features
def test_modify_tasks_single_task_for_all(sample_dataset):
"""Test setting a single task for all episodes."""
new_task = "Pick up the cube and place it"
modified_dataset = modify_tasks(sample_dataset, new_task=new_task)
# Verify all episodes have the new task
assert len(modified_dataset.meta.tasks) == 1
assert new_task in modified_dataset.meta.tasks.index
# Verify task_index is 0 for all frames (only one task)
for i in range(len(modified_dataset)):
item = modified_dataset[i]
assert item["task_index"].item() == 0
assert item["task"] == new_task
def test_modify_tasks_episode_specific(sample_dataset):
"""Test setting different tasks for specific episodes."""
episode_tasks = {
0: "Task A",
1: "Task B",
2: "Task A",
3: "Task C",
4: "Task B",
}
modified_dataset = modify_tasks(sample_dataset, episode_tasks=episode_tasks)
# Verify correct number of unique tasks
unique_tasks = set(episode_tasks.values())
assert len(modified_dataset.meta.tasks) == len(unique_tasks)
# Verify each episode has the correct task
for ep_idx, expected_task in episode_tasks.items():
ep_data = modified_dataset.meta.episodes[ep_idx]
assert ep_data["tasks"][0] == expected_task
def test_modify_tasks_default_with_overrides(sample_dataset):
"""Test setting a default task with specific overrides."""
default_task = "Default task"
override_task = "Special task"
episode_tasks = {2: override_task, 4: override_task}
modified_dataset = modify_tasks(
sample_dataset,
new_task=default_task,
episode_tasks=episode_tasks,
)
# Verify correct number of unique tasks
assert len(modified_dataset.meta.tasks) == 2
assert default_task in modified_dataset.meta.tasks.index
assert override_task in modified_dataset.meta.tasks.index
# Verify episodes have correct tasks
for ep_idx in range(5):
ep_data = modified_dataset.meta.episodes[ep_idx]
if ep_idx in episode_tasks:
assert ep_data["tasks"][0] == override_task
else:
assert ep_data["tasks"][0] == default_task
def test_modify_tasks_no_task_specified(sample_dataset):
"""Test error when no task is specified."""
with pytest.raises(ValueError, match="Must specify at least one of new_task or episode_tasks"):
modify_tasks(sample_dataset)
def test_modify_tasks_invalid_episode_indices(sample_dataset):
"""Test error with invalid episode indices."""
with pytest.raises(ValueError, match="Invalid episode indices"):
modify_tasks(sample_dataset, episode_tasks={10: "Task", 20: "Task"})
def test_modify_tasks_updates_info_json(sample_dataset):
"""Test that total_tasks is updated in info.json."""
episode_tasks = {0: "Task A", 1: "Task B", 2: "Task C", 3: "Task A", 4: "Task B"}
modified_dataset = modify_tasks(sample_dataset, episode_tasks=episode_tasks)
# Verify total_tasks is updated
assert modified_dataset.meta.total_tasks == 3
def test_modify_tasks_preserves_other_metadata(sample_dataset):
"""Test that modifying tasks preserves other metadata."""
original_frames = sample_dataset.meta.total_frames
original_episodes = sample_dataset.meta.total_episodes
original_fps = sample_dataset.meta.fps
modified_dataset = modify_tasks(sample_dataset, new_task="New task")
# Verify other metadata is preserved
assert modified_dataset.meta.total_frames == original_frames
assert modified_dataset.meta.total_episodes == original_episodes
assert modified_dataset.meta.fps == original_fps
def test_modify_tasks_task_index_correct(sample_dataset):
"""Test that task_index values are correct in data files."""
# Create tasks that will have predictable indices (sorted alphabetically)
episode_tasks = {
0: "Alpha task", # Will be index 0
1: "Beta task", # Will be index 1
2: "Alpha task", # Will be index 0
3: "Gamma task", # Will be index 2
4: "Beta task", # Will be index 1
}
modified_dataset = modify_tasks(sample_dataset, episode_tasks=episode_tasks)
# Verify task indices are correct
task_to_expected_idx = {
"Alpha task": 0,
"Beta task": 1,
"Gamma task": 2,
}
for i in range(len(modified_dataset)):
item = modified_dataset[i]
ep_idx = item["episode_index"].item()
expected_task = episode_tasks[ep_idx]
expected_idx = task_to_expected_idx[expected_task]
assert item["task_index"].item() == expected_idx
assert item["task"] == expected_task
def test_modify_tasks_in_place(sample_dataset):
"""Test that modify_tasks modifies the dataset in-place."""
original_root = sample_dataset.root
modified_dataset = modify_tasks(sample_dataset, new_task="New task")
# Verify same instance is returned and root is unchanged
assert modified_dataset is sample_dataset
assert modified_dataset.root == original_root
def test_modify_tasks_keeps_original_when_not_overridden(sample_dataset):
"""Test that original tasks are kept when using episode_tasks without new_task."""
from lerobot.datasets.utils import load_episodes
# Ensure episodes metadata is loaded
if sample_dataset.meta.episodes is None:
sample_dataset.meta.episodes = load_episodes(sample_dataset.meta.root)
# Get original tasks for episodes not being overridden
original_task_ep0 = sample_dataset.meta.episodes[0]["tasks"][0]
original_task_ep1 = sample_dataset.meta.episodes[1]["tasks"][0]
# Only override episodes 2, 3, 4
episode_tasks = {2: "New Task A", 3: "New Task B", 4: "New Task A"}
modified_dataset = modify_tasks(sample_dataset, episode_tasks=episode_tasks)
# Verify original tasks are kept for episodes 0 and 1
assert modified_dataset.meta.episodes[0]["tasks"][0] == original_task_ep0
assert modified_dataset.meta.episodes[1]["tasks"][0] == original_task_ep1
# Verify new tasks for overridden episodes
assert modified_dataset.meta.episodes[2]["tasks"][0] == "New Task A"
assert modified_dataset.meta.episodes[3]["tasks"][0] == "New Task B"
assert modified_dataset.meta.episodes[4]["tasks"][0] == "New Task A"
def test_convert_image_to_video_dataset(tmp_path):
"""Test converting lerobot/pusht_image dataset to video format."""
from lerobot.datasets.lerobot_dataset import LeRobotDataset
# Load the actual lerobot/pusht_image dataset (only first 2 episodes for speed)
source_dataset = LeRobotDataset("lerobot/pusht_image", episodes=[0, 1])
output_dir = tmp_path / "pusht_video"
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(output_dir)
# Verify source dataset has images, not videos
assert len(source_dataset.meta.video_keys) == 0
assert "observation.image" in source_dataset.meta.features
# Convert to video dataset (only first 2 episodes for speed)
video_dataset = convert_image_to_video_dataset(
dataset=source_dataset,
output_dir=output_dir,
repo_id="lerobot/pusht_video",
vcodec="libsvtav1",
pix_fmt="yuv420p",
g=2,
crf=30,
episode_indices=[0, 1],
num_workers=2,
)
# Verify new dataset has videos
assert len(video_dataset.meta.video_keys) > 0
assert "observation.image" in video_dataset.meta.video_keys
# Verify correct number of episodes and frames (2 episodes)
assert video_dataset.meta.total_episodes == 2
# Compare against the actual number of frames in the loaded episodes, not metadata total
assert len(video_dataset) == len(source_dataset)
# Verify video files exist
for ep_idx in range(video_dataset.meta.total_episodes):
for video_key in video_dataset.meta.video_keys:
video_path = video_dataset.root / video_dataset.meta.get_video_file_path(ep_idx, video_key)
assert video_path.exists(), f"Video file should exist: {video_path}"
# Verify we can load the dataset and access it
assert len(video_dataset) == video_dataset.meta.total_frames
# Test that we can actually get an item from the video dataset
item = video_dataset[0]
assert "observation.image" in item
assert "action" in item
# Cleanup
import shutil
if output_dir.exists():
shutil.rmtree(output_dir)
def test_convert_image_to_video_dataset_subset_episodes(tmp_path):
"""Test converting only specific episodes from lerobot/pusht_image to video format."""
from lerobot.datasets.lerobot_dataset import LeRobotDataset
# Load the actual lerobot/pusht_image dataset (only first 3 episodes)
source_dataset = LeRobotDataset("lerobot/pusht_image", episodes=[0, 1, 2])
output_dir = tmp_path / "pusht_video_subset"
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(output_dir)
# Convert only episode 0 to video (subset of loaded episodes)
episode_indices = [0]
video_dataset = convert_image_to_video_dataset(
dataset=source_dataset,
output_dir=output_dir,
repo_id="lerobot/pusht_video_subset",
episode_indices=episode_indices,
num_workers=2,
)
# Verify correct number of episodes
assert video_dataset.meta.total_episodes == len(episode_indices)
# Verify video files exist for selected episodes
assert len(video_dataset.meta.video_keys) > 0
assert "observation.image" in video_dataset.meta.video_keys
# Cleanup
import shutil
if output_dir.exists():
shutil.rmtree(output_dir)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/datasets/test_dataset_tools.py",
"license": "Apache License 2.0",
"lines": 1035,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:src/lerobot/datasets/v30/augment_dataset_quantile_stats.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script augments existing LeRobot datasets with quantile statistics.
Most datasets created before the quantile feature was added do not contain
quantile statistics (q01, q10, q50, q90, q99) in their metadata. This script:
1. Loads an existing LeRobot dataset in v3.0 format
2. Checks if it already contains quantile statistics
3. If missing, computes quantile statistics for all features
4. Updates the dataset metadata with the new quantile statistics
Usage:
```bash
python src/lerobot/datasets/v30/augment_dataset_quantile_stats.py \
--repo-id=lerobot/pusht \
```
"""
import argparse
import concurrent.futures
import logging
from pathlib import Path
import numpy as np
import torch
from huggingface_hub import HfApi
from requests import HTTPError
from tqdm import tqdm
from lerobot.datasets.compute_stats import DEFAULT_QUANTILES, aggregate_stats, get_feature_stats
from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
from lerobot.datasets.utils import write_stats
from lerobot.utils.utils import init_logging
def has_quantile_stats(stats: dict[str, dict] | None, quantile_list_keys: list[str] | None = None) -> bool:
"""Check if dataset statistics already contain quantile information.
Args:
stats: Dataset statistics dictionary
Returns:
True if quantile statistics are present, False otherwise
"""
if quantile_list_keys is None:
quantile_list_keys = [f"q{int(q * 100):02d}" for q in DEFAULT_QUANTILES]
if stats is None:
return False
for feature_stats in stats.values():
if any(q_key in feature_stats for q_key in quantile_list_keys):
return True
return False
def process_single_episode(dataset: LeRobotDataset, episode_idx: int) -> dict:
"""Process a single episode and return its statistics.
Args:
dataset: The LeRobot dataset
episode_idx: Index of the episode to process
Returns:
Dictionary containing episode statistics
"""
logging.info(f"Computing stats for episode {episode_idx}")
start_idx = dataset.meta.episodes[episode_idx]["dataset_from_index"]
end_idx = dataset.meta.episodes[episode_idx]["dataset_to_index"]
collected_data: dict[str, list] = {}
for idx in range(start_idx, end_idx):
item = dataset[idx]
for key, value in item.items():
if key not in dataset.features:
continue
if key not in collected_data:
collected_data[key] = []
collected_data[key].append(value)
ep_stats = {}
for key, data_list in collected_data.items():
if dataset.features[key]["dtype"] == "string":
continue
data = torch.stack(data_list).cpu().numpy()
if dataset.features[key]["dtype"] in ["image", "video"]:
if data.dtype == np.uint8:
data = data.astype(np.float32) / 255.0
axes_to_reduce = (0, 2, 3)
keepdims = True
else:
axes_to_reduce = 0
keepdims = data.ndim == 1
ep_stats[key] = get_feature_stats(
data, axis=axes_to_reduce, keepdims=keepdims, quantile_list=DEFAULT_QUANTILES
)
if dataset.features[key]["dtype"] in ["image", "video"]:
ep_stats[key] = {
k: v if k == "count" else np.squeeze(v, axis=0) for k, v in ep_stats[key].items()
}
return ep_stats
def compute_quantile_stats_for_dataset(dataset: LeRobotDataset) -> dict[str, dict]:
"""Compute quantile statistics for all episodes in the dataset.
Args:
dataset: The LeRobot dataset to compute statistics for
Returns:
Dictionary containing aggregated statistics with quantiles
Note:
Video decoding operations are not thread-safe, so we process episodes sequentially
when video keys are present. For datasets without videos, we use parallel processing
with ThreadPoolExecutor for better performance.
"""
logging.info(f"Computing quantile statistics for dataset with {dataset.num_episodes} episodes")
episode_stats_list = []
has_videos = len(dataset.meta.video_keys) > 0
if has_videos:
logging.info("Dataset contains video keys - using sequential processing for thread safety")
for episode_idx in tqdm(range(dataset.num_episodes), desc="Processing episodes"):
ep_stats = process_single_episode(dataset, episode_idx)
episode_stats_list.append(ep_stats)
else:
logging.info("Dataset has no video keys - using parallel processing for better performance")
max_workers = min(dataset.num_episodes, 16)
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_episode = {
executor.submit(process_single_episode, dataset, episode_idx): episode_idx
for episode_idx in range(dataset.num_episodes)
}
episode_results = {}
with tqdm(total=dataset.num_episodes, desc="Processing episodes") as pbar:
for future in concurrent.futures.as_completed(future_to_episode):
episode_idx = future_to_episode[future]
ep_stats = future.result()
episode_results[episode_idx] = ep_stats
pbar.update(1)
for episode_idx in range(dataset.num_episodes):
if episode_idx in episode_results:
episode_stats_list.append(episode_results[episode_idx])
if not episode_stats_list:
raise ValueError("No episode data found for computing statistics")
logging.info(f"Aggregating statistics from {len(episode_stats_list)} episodes")
return aggregate_stats(episode_stats_list)
def augment_dataset_with_quantile_stats(
repo_id: str,
root: str | Path | None = None,
overwrite: bool = False,
) -> None:
"""Augment a dataset with quantile statistics if they are missing.
Args:
repo_id: Repository ID of the dataset
root: Local root directory for the dataset
overwrite: Overwrite existing quantile statistics if they already exist
"""
logging.info(f"Loading dataset: {repo_id}")
dataset = LeRobotDataset(
repo_id=repo_id,
root=root,
)
if not overwrite and has_quantile_stats(dataset.meta.stats):
logging.info("Dataset already contains quantile statistics. No action needed.")
return
logging.info("Dataset does not contain quantile statistics. Computing them now...")
new_stats = compute_quantile_stats_for_dataset(dataset)
logging.info("Updating dataset metadata with new quantile statistics")
dataset.meta.stats = new_stats
write_stats(new_stats, dataset.meta.root)
logging.info("Successfully updated dataset with quantile statistics")
dataset.push_to_hub()
hub_api = HfApi()
try:
hub_api.delete_tag(repo_id, tag=CODEBASE_VERSION, repo_type="dataset")
except HTTPError as e:
logging.info(f"tag={CODEBASE_VERSION} probably doesn't exist. Skipping exception ({e})")
pass
hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=None, repo_type="dataset")
def main():
"""Main function to run the augmentation script."""
parser = argparse.ArgumentParser(description="Augment LeRobot dataset with quantile statistics")
parser.add_argument(
"--repo-id",
type=str,
required=True,
help="Repository ID of the dataset (e.g., 'lerobot/pusht')",
)
parser.add_argument(
"--root",
type=str,
help="Local root directory for the dataset",
)
parser.add_argument(
"--overwrite",
action="store_true",
help="Overwrite existing quantile statistics if they already exist",
)
args = parser.parse_args()
root = Path(args.root) if args.root else None
init_logging()
augment_dataset_with_quantile_stats(
repo_id=args.repo_id,
root=root,
overwrite=args.overwrite,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/datasets/v30/augment_dataset_quantile_stats.py",
"license": "Apache License 2.0",
"lines": 200,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/pi05/configuration_pi05.py | #!/usr/bin/env python
# Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from lerobot.configs.policies import PreTrainedConfig
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.optim.optimizers import AdamWConfig
from lerobot.optim.schedulers import CosineDecayWithWarmupSchedulerConfig
from lerobot.policies.rtc.configuration_rtc import RTCConfig
from lerobot.utils.constants import ACTION, OBS_IMAGES, OBS_STATE
DEFAULT_IMAGE_SIZE = 224
@PreTrainedConfig.register_subclass("pi05")
@dataclass
class PI05Config(PreTrainedConfig):
paligemma_variant: str = "gemma_2b"
action_expert_variant: str = "gemma_300m"
dtype: str = "float32" # Options: "bfloat16", "float32"
n_obs_steps: int = 1
chunk_size: int = 50 # Number of action steps to predict, in openpi called "action_horizon"
n_action_steps: int = 50 # Number of action steps to execute
# Shorter state and action vectors will be padded to these dimensions
max_state_dim: int = 32
max_action_dim: int = 32
# Flow matching parameters: see openpi `PI0Pytorch`
num_inference_steps: int = 10
time_sampling_beta_alpha: float = 1.5
time_sampling_beta_beta: float = 1.0
time_sampling_scale: float = 0.999
time_sampling_offset: float = 0.001
min_period: float = 4e-3
max_period: float = 4.0
# Real-Time Chunking (RTC) configuration
rtc_config: RTCConfig | None = None
image_resolution: tuple[int, int] = (
DEFAULT_IMAGE_SIZE,
DEFAULT_IMAGE_SIZE,
) # see openpi `preprocessing_pytorch.py`
# Add empty images. Used to add empty cameras when no image features are present.
empty_cameras: int = 0
tokenizer_max_length: int = 200 # see openpi `__post_init__`
normalization_mapping: dict[str, NormalizationMode] = field(
default_factory=lambda: {
"VISUAL": NormalizationMode.IDENTITY,
"STATE": NormalizationMode.QUANTILES, # Pi0.5 uses quantiles for state
"ACTION": NormalizationMode.QUANTILES, # Pi0.5 uses quantiles for action
}
)
# Training settings
gradient_checkpointing: bool = False # Enable gradient checkpointing for memory optimization
compile_model: bool = False # Whether to use torch.compile for model optimization
compile_mode: str = "max-autotune" # Torch compile mode
device: str | None = None # Device to use for the model (None = auto-detect)
# Finetuning settings
freeze_vision_encoder: bool = False # Freeze only the vision encoder
train_expert_only: bool = False # Freeze entire VLM, train only action expert and projections
# Optimizer settings: see openpi `AdamW`
optimizer_lr: float = 2.5e-5 # see openpi `CosineDecaySchedule: peak_lr`
optimizer_betas: tuple[float, float] = (0.9, 0.95)
optimizer_eps: float = 1e-8
optimizer_weight_decay: float = 0.01
optimizer_grad_clip_norm: float = 1.0
# Scheduler settings: see openpi `CosineDecaySchedule`
# Note: These will auto-scale if --steps < scheduler_decay_steps
# For example, --steps=3000 will scale warmup to 100 and decay to 3000
scheduler_warmup_steps: int = 1_000
scheduler_decay_steps: int = 30_000
scheduler_decay_lr: float = 2.5e-6
tokenizer_max_length: int = 200 # see openpi `__post_init__`
def __post_init__(self):
super().__post_init__()
# Validate configuration
if self.n_action_steps > self.chunk_size:
raise ValueError(
f"n_action_steps ({self.n_action_steps}) cannot be greater than chunk_size ({self.chunk_size})"
)
if self.paligemma_variant not in ["gemma_300m", "gemma_2b"]:
raise ValueError(f"Invalid paligemma_variant: {self.paligemma_variant}")
if self.action_expert_variant not in ["gemma_300m", "gemma_2b"]:
raise ValueError(f"Invalid action_expert_variant: {self.action_expert_variant}")
if self.dtype not in ["bfloat16", "float32"]:
raise ValueError(f"Invalid dtype: {self.dtype}")
def validate_features(self) -> None:
"""Validate and set up input/output features."""
for i in range(self.empty_cameras):
key = OBS_IMAGES + f".empty_camera_{i}"
empty_camera = PolicyFeature(
type=FeatureType.VISUAL,
shape=(3, *self.image_resolution), # Use configured image resolution
)
self.input_features[key] = empty_camera
if OBS_STATE not in self.input_features:
state_feature = PolicyFeature(
type=FeatureType.STATE,
shape=(self.max_state_dim,), # Padded to max_state_dim
)
self.input_features[OBS_STATE] = state_feature
if ACTION not in self.output_features:
action_feature = PolicyFeature(
type=FeatureType.ACTION,
shape=(self.max_action_dim,), # Padded to max_action_dim
)
self.output_features[ACTION] = action_feature
def get_optimizer_preset(self) -> AdamWConfig:
return AdamWConfig(
lr=self.optimizer_lr,
betas=self.optimizer_betas,
eps=self.optimizer_eps,
weight_decay=self.optimizer_weight_decay,
grad_clip_norm=self.optimizer_grad_clip_norm,
)
def get_scheduler_preset(self):
return CosineDecayWithWarmupSchedulerConfig(
peak_lr=self.optimizer_lr,
decay_lr=self.scheduler_decay_lr,
num_warmup_steps=self.scheduler_warmup_steps,
num_decay_steps=self.scheduler_decay_steps,
)
@property
def observation_delta_indices(self) -> None:
return None
@property
def action_delta_indices(self) -> list:
return list(range(self.chunk_size))
@property
def reward_delta_indices(self) -> None:
return None
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/pi05/configuration_pi05.py",
"license": "Apache License 2.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/pi05/modeling_pi05.py | #!/usr/bin/env python
# Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import copy
import logging
import math
from collections import deque
from pathlib import Path
from typing import TYPE_CHECKING, Literal, TypedDict, Unpack
import torch
import torch.nn.functional as F # noqa: N812
from torch import Tensor, nn
from lerobot.utils.import_utils import _transformers_available
# Conditional import for type checking and lazy loading
if TYPE_CHECKING or _transformers_available:
from transformers.models.auto import CONFIG_MAPPING
from transformers.models.gemma import modeling_gemma
from lerobot.policies.pi_gemma import (
PaliGemmaForConditionalGenerationWithPiGemma,
PiGemmaForCausalLM,
_gated_residual,
layernorm_forward,
)
else:
CONFIG_MAPPING = None
modeling_gemma = None
PiGemmaForCausalLM = None
_gated_residual = None
layernorm_forward = None
PaliGemmaForConditionalGenerationWithPiGemma = None
from lerobot.configs.policies import PreTrainedConfig
from lerobot.policies.pi05.configuration_pi05 import DEFAULT_IMAGE_SIZE, PI05Config
from lerobot.policies.pretrained import PreTrainedPolicy, T
from lerobot.policies.rtc.modeling_rtc import RTCProcessor
from lerobot.utils.constants import (
ACTION,
OBS_LANGUAGE_ATTENTION_MASK,
OBS_LANGUAGE_TOKENS,
OPENPI_ATTENTION_MASK_VALUE,
)
class ActionSelectKwargs(TypedDict, total=False):
inference_delay: int | None
prev_chunk_left_over: Tensor | None
execution_horizon: int | None
def get_safe_dtype(target_dtype, device_type):
"""Get a safe dtype for the given device type."""
if device_type == "mps" and target_dtype == torch.float64:
return torch.float32
if device_type == "cpu":
# CPU doesn't support bfloat16, use float32 instead
if target_dtype == torch.bfloat16:
return torch.float32
if target_dtype == torch.float64:
return torch.float64
return target_dtype
def create_sinusoidal_pos_embedding( # see openpi `create_sinusoidal_pos_embedding` (exact copy)
time: torch.Tensor, dimension: int, min_period: float, max_period: float, device="cpu"
) -> Tensor:
"""Computes sine-cosine positional embedding vectors for scalar positions."""
if dimension % 2 != 0:
raise ValueError(f"dimension ({dimension}) must be divisible by 2")
if time.ndim != 1:
raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.")
dtype = get_safe_dtype(torch.float64, device.type)
fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device)
period = min_period * (max_period / min_period) ** fraction
# Compute the outer product
scaling_factor = 1.0 / period * 2 * math.pi
sin_input = scaling_factor[None, :] * time[:, None]
return torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1)
def sample_beta(alpha, beta, bsize, device): # see openpi `sample_beta` (exact copy)
# Beta sampling uses _sample_dirichlet which isn't implemented for MPS, so sample on CPU
alpha_t = torch.tensor(alpha, dtype=torch.float32)
beta_t = torch.tensor(beta, dtype=torch.float32)
dist = torch.distributions.Beta(alpha_t, beta_t)
return dist.sample((bsize,)).to(device)
def make_att_2d_masks(pad_masks, att_masks): # see openpi `make_att_2d_masks` (exact copy)
"""Copied from big_vision.
Tokens can attend to valid inputs tokens which have a cumulative mask_ar
smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to
setup several types of attention, for example:
[[1 1 1 1 1 1]]: pure causal attention.
[[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
themselves and the last 3 tokens have a causal attention. The first
entry could also be a 1 without changing behaviour.
[[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
block can attend all previous blocks and all tokens on the same block.
Args:
input_mask: bool[B, N] true if its part of the input, false if padding.
mask_ar: int32[B, N] mask that's 1 where previous tokens cannot depend on
it and 0 where it shares the same attention mask as the previous token.
"""
if att_masks.ndim != 2:
raise ValueError(att_masks.ndim)
if pad_masks.ndim != 2:
raise ValueError(pad_masks.ndim)
cumsum = torch.cumsum(att_masks, dim=1)
att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None]
pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None]
return att_2d_masks & pad_2d_masks
def pad_vector(vector, new_dim):
"""Pad the last dimension of a vector to new_dim with zeros.
Can be (batch_size x sequence_length x features_dimension)
or (batch_size x features_dimension)
"""
if vector.shape[-1] >= new_dim:
return vector
return F.pad(vector, (0, new_dim - vector.shape[-1]))
def resize_with_pad_torch( # see openpi `resize_with_pad_torch` (exact copy)
images: torch.Tensor,
height: int,
width: int,
mode: str = "bilinear",
) -> torch.Tensor:
"""PyTorch version of resize_with_pad. Resizes an image to a target height and width without distortion
by padding with black. If the image is float32, it must be in the range [-1, 1].
Args:
images: Tensor of shape [*b, h, w, c] or [*b, c, h, w]
height: Target height
width: Target width
mode: Interpolation mode ('bilinear', 'nearest', etc.)
Returns:
Resized and padded tensor with same shape format as input
"""
# Check if input is in channels-last format [*b, h, w, c] or channels-first [*b, c, h, w]
if images.shape[-1] <= 4: # Assume channels-last format
channels_last = True
if images.dim() == 3:
images = images.unsqueeze(0) # Add batch dimension
images = images.permute(0, 3, 1, 2) # [b, h, w, c] -> [b, c, h, w]
else:
channels_last = False
if images.dim() == 3:
images = images.unsqueeze(0) # Add batch dimension
batch_size, channels, cur_height, cur_width = images.shape
# Calculate resize ratio
ratio = max(cur_width / width, cur_height / height)
resized_height = int(cur_height / ratio)
resized_width = int(cur_width / ratio)
# Resize
resized_images = F.interpolate(
images,
size=(resized_height, resized_width),
mode=mode,
align_corners=False if mode == "bilinear" else None,
)
# Handle dtype-specific clipping
if images.dtype == torch.uint8:
resized_images = torch.round(resized_images).clamp(0, 255).to(torch.uint8)
elif images.dtype == torch.float32:
resized_images = resized_images.clamp(0.0, 1.0)
else:
raise ValueError(f"Unsupported image dtype: {images.dtype}")
# Calculate padding
pad_h0, remainder_h = divmod(height - resized_height, 2)
pad_h1 = pad_h0 + remainder_h
pad_w0, remainder_w = divmod(width - resized_width, 2)
pad_w1 = pad_w0 + remainder_w
# Pad
constant_value = 0 if images.dtype == torch.uint8 else 0.0
padded_images = F.pad(
resized_images,
(pad_w0, pad_w1, pad_h0, pad_h1), # left, right, top, bottom
mode="constant",
value=constant_value,
)
# Convert back to original format if needed
if channels_last:
padded_images = padded_images.permute(0, 2, 3, 1) # [b, c, h, w] -> [b, h, w, c]
return padded_images
# Define the complete layer computation function for gradient checkpointing
def compute_layer_complete(
layer_idx, inputs_embeds, attention_mask, position_ids, adarms_cond, paligemma, gemma_expert
):
models = [paligemma.model.language_model, gemma_expert.model]
query_states = []
key_states = []
value_states = []
gates = []
for i, hidden_states in enumerate(inputs_embeds):
layer = models[i].layers[layer_idx]
hidden_states, gate = layernorm_forward(layer.input_layernorm, hidden_states, adarms_cond[i])
gates.append(gate)
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, layer.self_attn.head_dim)
query_state = layer.self_attn.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_state = layer.self_attn.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_state = layer.self_attn.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
query_states.append(query_state)
key_states.append(key_state)
value_states.append(value_state)
# Concatenate and process attention
query_states = torch.cat(query_states, dim=2)
key_states = torch.cat(key_states, dim=2)
value_states = torch.cat(value_states, dim=2)
dummy_tensor = torch.zeros(
query_states.shape[0],
query_states.shape[2],
query_states.shape[-1],
device=query_states.device,
dtype=query_states.dtype,
)
cos, sin = paligemma.model.language_model.rotary_emb(dummy_tensor, position_ids)
query_states, key_states = modeling_gemma.apply_rotary_pos_emb(
query_states, key_states, cos, sin, unsqueeze_dim=1
)
batch_size = query_states.shape[0]
scaling = paligemma.model.language_model.layers[layer_idx].self_attn.scaling
# Attention computation
att_output, _ = modeling_gemma.eager_attention_forward(
paligemma.model.language_model.layers[layer_idx].self_attn,
query_states,
key_states,
value_states,
attention_mask,
scaling,
)
# Get head_dim from the current layer, not from the model
head_dim = paligemma.model.language_model.layers[layer_idx].self_attn.head_dim
att_output = att_output.reshape(batch_size, -1, 1 * 8 * head_dim)
# Process layer outputs
outputs_embeds = []
start_pos = 0
for i, hidden_states in enumerate(inputs_embeds):
layer = models[i].layers[layer_idx]
end_pos = start_pos + hidden_states.shape[1]
if att_output.dtype != layer.self_attn.o_proj.weight.dtype:
att_output = att_output.to(layer.self_attn.o_proj.weight.dtype)
out_emb = layer.self_attn.o_proj(att_output[:, start_pos:end_pos])
# first residual
out_emb = _gated_residual(hidden_states, out_emb, gates[i])
after_first_residual = out_emb.clone()
out_emb, gate = layernorm_forward(layer.post_attention_layernorm, out_emb, adarms_cond[i])
# Convert to bfloat16 if the next layer (mlp) uses bfloat16
if layer.mlp.up_proj.weight.dtype == torch.bfloat16:
out_emb = out_emb.to(dtype=torch.bfloat16)
out_emb = layer.mlp(out_emb)
# second residual
out_emb = _gated_residual(after_first_residual, out_emb, gate)
outputs_embeds.append(out_emb)
start_pos = end_pos
return outputs_embeds
class GemmaConfig: # see openpi `gemma.py: Config`
"""Configuration for Gemma model variants."""
def __init__(self, width, depth, mlp_dim, num_heads, num_kv_heads, head_dim):
self.width = width
self.depth = depth
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.num_kv_heads = num_kv_heads
self.head_dim = head_dim
def get_gemma_config(variant: str) -> GemmaConfig: # see openpi `gemma.py: get_config`
"""Returns config for specified gemma variant."""
if variant == "gemma_300m":
return GemmaConfig(
width=1024,
depth=18,
mlp_dim=4096,
num_heads=8,
num_kv_heads=1,
head_dim=256,
)
elif variant == "gemma_2b":
return GemmaConfig(
width=2048,
depth=18,
mlp_dim=16_384,
num_heads=8,
num_kv_heads=1,
head_dim=256,
)
else:
raise ValueError(f"Unknown variant: {variant}")
class PaliGemmaWithExpertModel(
nn.Module
): # see openpi `gemma_pytorch.py: PaliGemmaWithExpertModel` this class is almost a exact copy of PaliGemmaWithExpertModel in openpi
"""PaliGemma model with action expert for PI05."""
def __init__(
self,
vlm_config,
action_expert_config,
use_adarms=None,
precision: Literal["bfloat16", "float32"] = "bfloat16",
image_size: int = DEFAULT_IMAGE_SIZE,
freeze_vision_encoder: bool = False,
train_expert_only: bool = False,
):
if use_adarms is None:
use_adarms = [False, False]
super().__init__()
self.freeze_vision_encoder = freeze_vision_encoder
self.train_expert_only = train_expert_only
vlm_config_hf = CONFIG_MAPPING["paligemma"]()
vlm_config_hf._vocab_size = 257152 # noqa: SLF001
vlm_config_hf.image_token_index = 257152
vlm_config_hf.text_config.hidden_size = vlm_config.width
vlm_config_hf.text_config.intermediate_size = vlm_config.mlp_dim
vlm_config_hf.text_config.num_attention_heads = vlm_config.num_heads
vlm_config_hf.text_config.head_dim = vlm_config.head_dim
vlm_config_hf.text_config.num_hidden_layers = vlm_config.depth
vlm_config_hf.text_config.num_key_value_heads = vlm_config.num_kv_heads
vlm_config_hf.text_config.hidden_activation = "gelu_pytorch_tanh"
vlm_config_hf.text_config.dtype = "float32"
vlm_config_hf.text_config.vocab_size = 257152
vlm_config_hf.text_config.use_adarms = use_adarms[0]
vlm_config_hf.text_config.adarms_cond_dim = vlm_config.width if use_adarms[0] else None
vlm_config_hf.vision_config.image_size = image_size
vlm_config_hf.vision_config.intermediate_size = 4304
vlm_config_hf.vision_config.projection_dim = 2048
vlm_config_hf.vision_config.projector_hidden_act = "gelu_fast"
vlm_config_hf.vision_config.dtype = "float32"
action_expert_config_hf = CONFIG_MAPPING["gemma"](
head_dim=action_expert_config.head_dim,
hidden_size=action_expert_config.width,
intermediate_size=action_expert_config.mlp_dim,
num_attention_heads=action_expert_config.num_heads,
num_hidden_layers=action_expert_config.depth,
num_key_value_heads=action_expert_config.num_kv_heads,
vocab_size=257152,
hidden_activation="gelu_pytorch_tanh",
dtype="float32",
use_adarms=use_adarms[1],
adarms_cond_dim=action_expert_config.width if use_adarms[1] else None,
)
self.paligemma = PaliGemmaForConditionalGenerationWithPiGemma(config=vlm_config_hf)
self.gemma_expert = PiGemmaForCausalLM(config=action_expert_config_hf)
self.gemma_expert.model.embed_tokens = None
self.to_bfloat16_for_selected_params(precision)
self._set_requires_grad()
def to_bfloat16_for_selected_params(self, precision: Literal["bfloat16", "float32"] = "bfloat16"):
if precision == "bfloat16":
self.to(dtype=torch.bfloat16)
elif precision == "float32":
self.to(dtype=torch.float32)
return
else:
raise ValueError(f"Invalid precision: {precision}")
# Keep full vision path in float32 so we never toggle (toggle causes optimizer
# "same dtype" error). Saves memory vs full float32; more memory than only 3 params.
params_to_keep_float32 = [
"vision_tower",
"multi_modal_projector",
"input_layernorm",
"post_attention_layernorm",
"model.norm",
]
for name, param in self.named_parameters():
if any(selector in name for selector in params_to_keep_float32):
param.data = param.data.to(dtype=torch.float32)
def _set_requires_grad(self):
if self.freeze_vision_encoder:
self.paligemma.model.vision_tower.eval()
for param in self.paligemma.model.vision_tower.parameters():
param.requires_grad = False
if self.train_expert_only:
self.paligemma.eval()
for param in self.paligemma.parameters():
param.requires_grad = False
def train(self, mode: bool = True):
super().train(mode)
if self.freeze_vision_encoder:
self.paligemma.model.vision_tower.eval()
if self.train_expert_only:
self.paligemma.eval()
def embed_image(self, image: torch.Tensor):
# Vision tower and multi_modal_projector are kept in float32 (params_to_keep_float32).
out_dtype = image.dtype
if image.dtype != torch.float32:
image = image.to(torch.float32)
image_outputs = self.paligemma.model.get_image_features(image)
features = image_outputs.pooler_output * self.paligemma.config.text_config.hidden_size**0.5
if features.dtype != out_dtype:
features = features.to(out_dtype)
return features
def embed_language_tokens(self, tokens: torch.Tensor):
return self.paligemma.model.language_model.embed_tokens(tokens)
def forward(
self,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: list[torch.FloatTensor] | None = None,
inputs_embeds: list[torch.FloatTensor] | None = None,
use_cache: bool | None = None,
adarms_cond: list[torch.Tensor] | None = None,
):
if adarms_cond is None:
adarms_cond = [None, None]
if inputs_embeds[1] is None:
prefix_output = self.paligemma.model.language_model.forward(
inputs_embeds=inputs_embeds[0],
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
adarms_cond=adarms_cond[0] if adarms_cond is not None else None,
)
prefix_past_key_values = prefix_output.past_key_values
prefix_output = prefix_output.last_hidden_state
suffix_output = None
elif inputs_embeds[0] is None:
suffix_output = self.gemma_expert.model.forward(
inputs_embeds=inputs_embeds[1],
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
adarms_cond=adarms_cond[1] if adarms_cond is not None else None,
)
suffix_output = suffix_output.last_hidden_state
prefix_output = None
prefix_past_key_values = None
else:
models = [self.paligemma.model.language_model, self.gemma_expert.model]
num_layers = self.paligemma.config.text_config.num_hidden_layers
# Check if gradient checkpointing is enabled for any of the models
use_gradient_checkpointing = (
hasattr(self.gemma_expert.model, "gradient_checkpointing")
and self.gemma_expert.model.gradient_checkpointing
and self.training
) or (hasattr(self, "gradient_checkpointing") and self.gradient_checkpointing and self.training)
# Process all layers with gradient checkpointing if enabled
for layer_idx in range(num_layers):
if use_gradient_checkpointing:
inputs_embeds = torch.utils.checkpoint.checkpoint(
compute_layer_complete,
layer_idx,
inputs_embeds,
attention_mask,
position_ids,
adarms_cond,
use_reentrant=False,
preserve_rng_state=False,
paligemma=self.paligemma,
gemma_expert=self.gemma_expert,
)
else:
inputs_embeds = compute_layer_complete(
layer_idx,
inputs_embeds,
attention_mask,
position_ids,
adarms_cond,
paligemma=self.paligemma,
gemma_expert=self.gemma_expert,
)
# final norm
def compute_final_norms(inputs_embeds, adarms_cond):
outputs_embeds = []
for i, hidden_states in enumerate(inputs_embeds):
out_emb, _ = layernorm_forward(models[i].norm, hidden_states, adarms_cond[i])
outputs_embeds.append(out_emb)
return outputs_embeds
# Apply gradient checkpointing to final norm if enabled
if use_gradient_checkpointing:
outputs_embeds = torch.utils.checkpoint.checkpoint(
compute_final_norms,
inputs_embeds,
adarms_cond,
use_reentrant=False,
preserve_rng_state=False,
)
else:
outputs_embeds = compute_final_norms(inputs_embeds, adarms_cond)
prefix_output = outputs_embeds[0]
suffix_output = outputs_embeds[1]
prefix_past_key_values = None
return [prefix_output, suffix_output], prefix_past_key_values
class PI05Pytorch(nn.Module): # see openpi `PI0Pytorch`
"""Core PI05 PyTorch model."""
def __init__(self, config: PI05Config, rtc_processor: RTCProcessor | None = None):
super().__init__()
self.config = config
self.rtc_processor = rtc_processor
paligemma_config = get_gemma_config(config.paligemma_variant)
action_expert_config = get_gemma_config(config.action_expert_variant)
if config.image_resolution[0] != config.image_resolution[1]:
raise ValueError(
f"PaliGemma expects square image resolution, invalid resolution: {config.image_resolution}"
)
self.paligemma_with_expert = PaliGemmaWithExpertModel(
paligemma_config,
action_expert_config,
use_adarms=[False, True],
precision=config.dtype,
image_size=config.image_resolution[0],
freeze_vision_encoder=config.freeze_vision_encoder,
train_expert_only=config.train_expert_only,
)
self.action_in_proj = nn.Linear(config.max_action_dim, action_expert_config.width)
self.action_out_proj = nn.Linear(action_expert_config.width, config.max_action_dim)
self.time_mlp_in = nn.Linear(action_expert_config.width, action_expert_config.width)
self.time_mlp_out = nn.Linear(action_expert_config.width, action_expert_config.width)
# Initialize gradient checkpointing flag
self.gradient_checkpointing_enabled = False
# Compile model if requested
if config.compile_model:
torch.set_float32_matmul_precision("high")
self.sample_actions = torch.compile(self.sample_actions, mode=config.compile_mode)
# Also compile the main forward pass used during training
self.forward = torch.compile(self.forward, mode=config.compile_mode)
def gradient_checkpointing_enable(self):
"""Enable gradient checkpointing for memory optimization."""
self.gradient_checkpointing_enabled = True
self.paligemma_with_expert.paligemma.model.language_model.gradient_checkpointing = True
self.paligemma_with_expert.paligemma.model.vision_tower.gradient_checkpointing = True
self.paligemma_with_expert.gemma_expert.model.gradient_checkpointing = True
logging.info("Enabled gradient checkpointing for PI05Pytorch model")
def gradient_checkpointing_disable(self):
"""Disable gradient checkpointing."""
self.gradient_checkpointing_enabled = False
self.paligemma_with_expert.paligemma.model.language_model.gradient_checkpointing = False
self.paligemma_with_expert.paligemma.model.vision_tower.gradient_checkpointing = False
self.paligemma_with_expert.gemma_expert.model.gradient_checkpointing = False
logging.info("Disabled gradient checkpointing for PI05Pytorch model")
def _rtc_enabled(self):
return self.config.rtc_config is not None and self.config.rtc_config.enabled
def _apply_checkpoint(self, func, *args, **kwargs):
"""Helper method to apply gradient checkpointing if enabled."""
if self.gradient_checkpointing_enabled and self.training:
return torch.utils.checkpoint.checkpoint(
func, *args, use_reentrant=False, preserve_rng_state=False, **kwargs
)
return func(*args, **kwargs)
def _prepare_attention_masks_4d(self, att_2d_masks):
"""Helper method to prepare 4D attention masks for transformer."""
att_2d_masks_4d = att_2d_masks[:, None, :, :]
return torch.where(att_2d_masks_4d, 0.0, OPENPI_ATTENTION_MASK_VALUE)
def sample_noise(self, shape, device):
return torch.normal(
mean=0.0,
std=1.0,
size=shape,
dtype=torch.float32,
device=device,
)
def sample_time(self, bsize, device):
time_beta = sample_beta(
self.config.time_sampling_beta_alpha, self.config.time_sampling_beta_beta, bsize, device
)
time = time_beta * self.config.time_sampling_scale + self.config.time_sampling_offset
return time.to(dtype=torch.float32, device=device)
def embed_prefix(
self, images, img_masks, tokens, masks
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Embed images with SigLIP and language tokens with embedding layer."""
embs = []
pad_masks = []
att_masks = []
# Process images
for img, img_mask in zip(images, img_masks, strict=True):
def image_embed_func(img):
return self.paligemma_with_expert.embed_image(img)
img_emb = self._apply_checkpoint(image_embed_func, img)
bsize, num_img_embs = img_emb.shape[:2]
embs.append(img_emb)
pad_masks.append(img_mask[:, None].expand(bsize, num_img_embs))
att_masks += [0] * num_img_embs
# Process language tokens
def lang_embed_func(tokens):
lang_emb = self.paligemma_with_expert.embed_language_tokens(tokens)
lang_emb_dim = lang_emb.shape[-1]
return lang_emb * math.sqrt(lang_emb_dim)
lang_emb = self._apply_checkpoint(lang_embed_func, tokens)
embs.append(lang_emb)
pad_masks.append(masks)
num_lang_embs = lang_emb.shape[1]
att_masks += [0] * num_lang_embs
embs = torch.cat(embs, dim=1)
pad_masks = torch.cat(pad_masks, dim=1)
att_masks = torch.tensor(att_masks, dtype=torch.bool, device=pad_masks.device)
bsize = pad_masks.shape[0]
att_masks = att_masks[None, :].expand(bsize, len(att_masks))
return embs, pad_masks, att_masks
def embed_suffix(self, noisy_actions, timestep):
"""Embed noisy_actions, timestep to prepare for Expert Gemma processing."""
embs = []
pad_masks = []
att_masks = []
# Embed timestep using sine-cosine positional encoding
time_emb = create_sinusoidal_pos_embedding(
timestep,
self.action_in_proj.out_features,
min_period=self.config.min_period,
max_period=self.config.max_period,
device=timestep.device,
)
time_emb = time_emb.type(dtype=timestep.dtype)
# Fuse timestep + action information using an MLP
def action_proj_func(noisy_actions):
return self.action_in_proj(noisy_actions)
action_emb = self._apply_checkpoint(action_proj_func, noisy_actions)
def time_mlp_func(time_emb):
x = self.time_mlp_in(time_emb)
x = F.silu(x)
x = self.time_mlp_out(x)
return F.silu(x)
time_emb = self._apply_checkpoint(time_mlp_func, time_emb)
action_time_emb = action_emb
adarms_cond = time_emb
embs.append(action_time_emb)
bsize, action_time_dim = action_time_emb.shape[:2]
action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=timestep.device)
pad_masks.append(action_time_mask)
# Set attention masks so that image, language and state inputs do not attend to action tokens
att_masks += [1] + ([0] * (self.config.chunk_size - 1))
embs = torch.cat(embs, dim=1)
pad_masks = torch.cat(pad_masks, dim=1)
att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device)
att_masks = att_masks[None, :].expand(bsize, len(att_masks))
return embs, pad_masks, att_masks, adarms_cond
def forward(self, images, img_masks, tokens, masks, actions, noise=None, time=None) -> Tensor:
"""Do a full training forward pass and compute the loss."""
if noise is None:
noise = self.sample_noise(actions.shape, actions.device)
if time is None:
time = self.sample_time(actions.shape[0], actions.device)
time_expanded = time[:, None, None]
x_t = time_expanded * noise + (1 - time_expanded) * actions
u_t = noise - actions
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, tokens, masks)
suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(x_t, time)
if (
self.paligemma_with_expert.paligemma.model.language_model.layers[0].self_attn.q_proj.weight.dtype
== torch.bfloat16
):
suffix_embs = suffix_embs.to(dtype=torch.bfloat16)
prefix_embs = prefix_embs.to(dtype=torch.bfloat16)
pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1)
att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1)
att_2d_masks = make_att_2d_masks(pad_masks, att_masks)
position_ids = torch.cumsum(pad_masks, dim=1) - 1
att_2d_masks_4d = self._prepare_attention_masks_4d(att_2d_masks)
def forward_func(prefix_embs, suffix_embs, att_2d_masks_4d, position_ids, adarms_cond):
(_, suffix_out), _ = self.paligemma_with_expert.forward(
attention_mask=att_2d_masks_4d,
position_ids=position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, suffix_embs],
use_cache=False,
adarms_cond=[None, adarms_cond],
)
return suffix_out
suffix_out = self._apply_checkpoint(
forward_func, prefix_embs, suffix_embs, att_2d_masks_4d, position_ids, adarms_cond
)
suffix_out = suffix_out[:, -self.config.chunk_size :]
suffix_out = suffix_out.to(dtype=torch.float32)
def action_out_proj_func(suffix_out):
return self.action_out_proj(suffix_out)
v_t = self._apply_checkpoint(action_out_proj_func, suffix_out)
return F.mse_loss(u_t, v_t, reduction="none")
@torch.no_grad() # see openpi `sample_actions` (slightly adapted)
def sample_actions(
self,
images,
img_masks,
tokens,
masks,
noise=None,
num_steps=None,
**kwargs: Unpack[ActionSelectKwargs],
) -> Tensor:
"""Do a full inference forward and compute the action."""
if num_steps is None:
num_steps = self.config.num_inference_steps
bsize = tokens.shape[0]
device = tokens.device
if noise is None:
# Sample noise with padded dimension as expected by action_in_proj
actions_shape = (
bsize,
self.config.chunk_size,
self.config.max_action_dim,
) # Use config max_action_dim for internal processing
noise = self.sample_noise(actions_shape, device)
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, tokens, masks)
prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
prefix_att_2d_masks_4d = self._prepare_attention_masks_4d(prefix_att_2d_masks)
self.paligemma_with_expert.paligemma.model.language_model.config._attn_implementation = "eager" # noqa: SLF001
_, past_key_values = self.paligemma_with_expert.forward(
attention_mask=prefix_att_2d_masks_4d,
position_ids=prefix_position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, None],
use_cache=True,
)
dt = -1.0 / num_steps
x_t = noise
for step in range(num_steps):
time = 1.0 + step * dt
time_tensor = torch.tensor(time, dtype=torch.float32, device=device).expand(bsize)
def denoise_step_partial_call(input_x_t, current_timestep=time_tensor):
return self.denoise_step(
prefix_pad_masks=prefix_pad_masks,
past_key_values=past_key_values,
x_t=input_x_t,
timestep=current_timestep,
)
if self._rtc_enabled():
inference_delay = kwargs.get("inference_delay")
prev_chunk_left_over = kwargs.get("prev_chunk_left_over")
execution_horizon = kwargs.get("execution_horizon")
v_t = self.rtc_processor.denoise_step(
x_t=x_t,
prev_chunk_left_over=prev_chunk_left_over,
inference_delay=inference_delay,
time=time,
original_denoise_step_partial=denoise_step_partial_call,
execution_horizon=execution_horizon,
)
else:
v_t = denoise_step_partial_call(x_t)
x_t = x_t + dt * v_t
if self.rtc_processor is not None and self.rtc_processor.is_debug_enabled():
self.rtc_processor.track(time=time, x_t=x_t, v_t=v_t)
return x_t
def denoise_step(
self,
prefix_pad_masks,
past_key_values,
x_t,
timestep,
):
"""Apply one denoising step of the noise `x_t` at a given timestep."""
suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(x_t, timestep)
suffix_len = suffix_pad_masks.shape[1]
batch_size = prefix_pad_masks.shape[0]
prefix_len = prefix_pad_masks.shape[1]
prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len)
suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks)
full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2)
prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None]
position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1
full_att_2d_masks_4d = self._prepare_attention_masks_4d(full_att_2d_masks)
self.paligemma_with_expert.gemma_expert.model.config._attn_implementation = "eager" # noqa: SLF001
past_key_values = copy.deepcopy(past_key_values)
outputs_embeds, _ = self.paligemma_with_expert.forward(
attention_mask=full_att_2d_masks_4d,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=[None, suffix_embs],
use_cache=False,
adarms_cond=[None, adarms_cond],
)
suffix_out = outputs_embeds[1]
suffix_out = suffix_out[:, -self.config.chunk_size :]
suffix_out = suffix_out.to(dtype=torch.float32)
return self.action_out_proj(suffix_out)
class PI05Policy(PreTrainedPolicy):
"""PI05 Policy for LeRobot."""
config_class = PI05Config
name = "pi05"
def __init__(
self,
config: PI05Config,
**kwargs,
):
"""
Args:
config: Policy configuration class instance.
"""
super().__init__(config)
config.validate_features()
self.config = config
# Initialize the core PI05 model
self.init_rtc_processor()
self.model = PI05Pytorch(config, rtc_processor=self.rtc_processor)
# Enable gradient checkpointing if requested
if config.gradient_checkpointing:
self.model.gradient_checkpointing_enable()
self.model.to(config.device)
self.reset()
@classmethod
def from_pretrained(
cls: builtins.type[T],
pretrained_name_or_path: str | Path,
*,
config: PreTrainedConfig | None = None,
force_download: bool = False,
resume_download: bool | None = None,
proxies: dict | None = None,
token: str | bool | None = None,
cache_dir: str | Path | None = None,
local_files_only: bool = False,
revision: str | None = None,
strict: bool = True,
**kwargs,
) -> T:
"""Override the from_pretrained method to handle key remapping and display important disclaimer."""
print(
"The PI05 model is a direct port of the OpenPI implementation. \n"
"This implementation follows the original OpenPI structure for compatibility. \n"
"Original implementation: https://github.com/Physical-Intelligence/openpi"
)
if pretrained_name_or_path is None:
raise ValueError("pretrained_name_or_path is required")
# Use provided config if available, otherwise create default config
if config is None:
config = PreTrainedConfig.from_pretrained(
pretrained_name_or_path=pretrained_name_or_path,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
token=token,
cache_dir=cache_dir,
local_files_only=local_files_only,
revision=revision,
**kwargs,
)
# Initialize model without loading weights
# Check if dataset_stats were provided in kwargs
model = cls(config, **kwargs)
# Load state dict (expects keys with "model." prefix)
try:
print(f"Loading model from: {pretrained_name_or_path}")
try:
from transformers.utils import cached_file
resolved_file = cached_file(
pretrained_name_or_path,
"model.safetensors",
cache_dir=kwargs.get("cache_dir"),
force_download=kwargs.get("force_download", False),
resume_download=kwargs.get("resume_download"),
proxies=kwargs.get("proxies"),
token=kwargs.get("token"),
revision=kwargs.get("revision"),
local_files_only=kwargs.get("local_files_only", False),
)
from safetensors.torch import load_file
original_state_dict = load_file(resolved_file)
print("✓ Loaded state dict from model.safetensors")
except Exception as e:
print(f"Could not load state dict from remote files: {e}")
print("Returning model without loading pretrained weights")
return model
# First, fix any key differences (see openpi model.py, _fix_pytorch_state_dict_keys)
fixed_state_dict = model._fix_pytorch_state_dict_keys(original_state_dict, model.config)
# Then add "model." prefix for all keys that don't already have it
remapped_state_dict = {}
remap_count = 0
for key, value in fixed_state_dict.items():
if not key.startswith("model."):
new_key = f"model.{key}"
remapped_state_dict[new_key] = value
remap_count += 1
else:
remapped_state_dict[key] = value
if remap_count > 0:
print(f"Remapped {remap_count} state dict keys")
# Load the remapped state dict into the model
missing_keys, unexpected_keys = model.load_state_dict(remapped_state_dict, strict=strict)
if missing_keys:
print(f"Missing keys when loading state dict: {len(missing_keys)} keys")
if len(missing_keys) <= 5:
for key in missing_keys:
print(f" - {key}")
else:
for key in missing_keys[:5]:
print(f" - {key}")
print(f" ... and {len(missing_keys) - 5} more")
if unexpected_keys:
print(f"Unexpected keys when loading state dict: {len(unexpected_keys)} keys")
if len(unexpected_keys) <= 5:
for key in unexpected_keys:
print(f" - {key}")
else:
for key in unexpected_keys[:5]:
print(f" - {key}")
print(f" ... and {len(unexpected_keys) - 5} more")
if not missing_keys and not unexpected_keys:
print("All keys loaded successfully!")
except Exception as e:
print(f"Warning: Could not load state dict: {e}")
return model
def _fix_pytorch_state_dict_keys(
self, state_dict, model_config
): # see openpi `BaseModelConfig, _fix_pytorch_state_dict_keys`
"""Fix state dict keys to match current model architecture."""
import re
fixed_state_dict = {}
for key, value in state_dict.items():
new_key = key
# Handle layer norm structure changes: .weight -> .dense.weight + .dense.bias
# For gemma expert layers
if re.match(
r"paligemma_with_expert\.gemma_expert\.model\.layers\.\d+\.(input_layernorm|post_attention_layernorm)\.weight",
key,
):
# Check if the model actually has adaRMS enabled for the expert
expert_uses_adarms = getattr(
self.model.paligemma_with_expert.gemma_expert.config, "use_adarms", False
)
if expert_uses_adarms:
logging.warning(f"Skipping layer norm key (adaRMS mismatch): {key}")
continue
if re.match(r"paligemma_with_expert\.gemma_expert\.model\.norm\.weight", key):
# Check if the model actually has adaRMS enabled for the expert
expert_uses_adarms = getattr(
self.model.paligemma_with_expert.gemma_expert.config, "use_adarms", False
)
if expert_uses_adarms:
logging.warning(f"Skipping norm key (adaRMS mismatch): {key}")
continue
# Handle MLP naming changes for pi05
# pi05 model expects time_mlp_*, but checkpoint might have action_time_mlp_*
if key.startswith("action_time_mlp_in."):
new_key = key.replace("action_time_mlp_in.", "time_mlp_in.")
elif key.startswith("action_time_mlp_out."):
new_key = key.replace("action_time_mlp_out.", "time_mlp_out.")
# Also handle state_proj which shouldn't exist in pi05
if key.startswith("state_proj."):
logging.warning(f"Skipping state_proj key in pi05 mode: {key}")
continue
# Handle vision tower embedding layer potential differences
if "patch_embedding" in key:
# Some checkpoints might have this, but current model expects different structure
logging.warning(f"Vision embedding key might need handling: {key}")
if (
key == "model.paligemma_with_expert.paligemma.lm_head.weight"
or key == "paligemma_with_expert.paligemma.lm_head.weight"
):
fixed_state_dict[
"model.paligemma_with_expert.paligemma.model.language_model.embed_tokens.weight"
] = value.clone()
fixed_state_dict[new_key] = value
return fixed_state_dict
def get_optim_params(self) -> dict:
return self.parameters()
def reset(self):
"""Reset internal state - called when environment resets."""
self._action_queue = deque(maxlen=self.config.n_action_steps)
self._queues = {
ACTION: deque(maxlen=self.config.n_action_steps),
}
def init_rtc_processor(self):
"""Initialize RTC processor if RTC is enabled in config."""
self.rtc_processor = None
# Create processor if config provided
# If RTC is not enabled - we can still track the denoising data
if self.config.rtc_config is not None:
self.rtc_processor = RTCProcessor(self.config.rtc_config)
model_value = getattr(self, "model", None)
if model_value is not None:
model_value.rtc_processor = self.rtc_processor
def _rtc_enabled(self) -> bool:
return self.config.rtc_config is not None and self.config.rtc_config.enabled
def _preprocess_images(self, batch: dict[str, Tensor]) -> tuple[list[Tensor], list[Tensor]]:
"""Preprocess images for the model.
Images from LeRobot are typically in [B, C, H, W] format and normalized to [0, 1].
PaliGemma expects images in [B, C, H, W] format and normalized to [-1, 1].
"""
images = []
img_masks = []
# Get device from model parameters
device = next(self.parameters()).device
present_img_keys = [key for key in self.config.image_features if key in batch]
missing_img_keys = [key for key in self.config.image_features if key not in batch]
if len(present_img_keys) == 0:
raise ValueError(
f"All image features are missing from the batch. At least one expected. "
f"(batch: {batch.keys()}) (image_features: {self.config.image_features})"
)
# Preprocess image features present in the batch
for key in present_img_keys:
img = batch[key]
# Ensure tensor is on the same device as the model
if img.device != device:
img = img.to(device)
# Ensure float32 dtype for consistency
if img.dtype != torch.float32:
img = img.to(torch.float32)
# from openpi preprocess_observation_pytorch: Handle both [B, C, H, W] and [B, H, W, C] formats
is_channels_first = img.shape[1] == 3 # Check if channels are in dimension 1
if is_channels_first:
# Convert [B, C, H, W] to [B, H, W, C] for processing
img = img.permute(0, 2, 3, 1)
# from openpi preprocess_observation_pytorch: Resize with padding if needed
if img.shape[1:3] != self.config.image_resolution:
img = resize_with_pad_torch(img, *self.config.image_resolution)
# Normalize from [0,1] to [-1,1] as expected by siglip
img = img * 2.0 - 1.0
# from openpi preprocess_observation_pytorch: Convert back to [B, C, H, W] format if it was originally channels-first
if is_channels_first:
img = img.permute(0, 3, 1, 2) # [B, H, W, C] -> [B, C, H, W]
images.append(img)
# Create mask (all ones for real images)
bsize = img.shape[0]
mask = torch.ones(bsize, dtype=torch.bool, device=device)
img_masks.append(mask)
# Create image features not present in the batch as fully 0 padded images
for _num_empty_cameras in range(len(missing_img_keys)):
img = torch.ones_like(img) * -1 # Padded with -1 for SigLIP
mask = torch.zeros_like(mask) # Mask is zero for empty cameras
images.append(img)
img_masks.append(mask)
return images, img_masks
def prepare_action(self, batch):
"""Pad action"""
actions = pad_vector(batch[ACTION], self.config.max_action_dim)
return actions
@torch.no_grad()
def select_action(self, batch: dict[str, Tensor]) -> Tensor:
"""Select a single action given environment observations."""
assert not self._rtc_enabled(), (
"RTC is not supported for select_action, use it with predict_action_chunk"
)
self.eval()
# Action queue logic for n_action_steps > 1
if len(self._action_queue) == 0:
actions = self.predict_action_chunk(batch)[:, : self.config.n_action_steps]
# Transpose to get shape (n_action_steps, batch_size, action_dim)
self._action_queue.extend(actions.transpose(0, 1))
return self._action_queue.popleft()
@torch.no_grad()
def predict_action_chunk(self, batch: dict[str, Tensor], **kwargs: Unpack[ActionSelectKwargs]) -> Tensor:
"""Predict a chunk of actions given environment observations."""
self.eval()
# Prepare inputs
images, img_masks = self._preprocess_images(batch)
tokens, masks = batch[f"{OBS_LANGUAGE_TOKENS}"], batch[f"{OBS_LANGUAGE_ATTENTION_MASK}"]
# Sample actions using the model (pass through RTC kwargs, no separate state needed for PI05)
actions = self.model.sample_actions(images, img_masks, tokens, masks, **kwargs)
# Unpad actions to actual action dimension
original_action_dim = self.config.output_features[ACTION].shape[0]
actions = actions[:, :, :original_action_dim]
return actions
def forward(self, batch: dict[str, Tensor], reduction: str = "mean") -> tuple[Tensor, dict]:
"""Run the batch through the model and compute the loss for training.
Args:
batch: Training batch containing observations and actions.
reduction: How to reduce the loss. Options:
- "mean": Return scalar mean loss (default, backward compatible)
- "none": Return per-sample losses of shape (batch_size,) for RA-BC weighting
"""
# Prepare inputs
images, img_masks = self._preprocess_images(batch)
tokens, masks = batch[f"{OBS_LANGUAGE_TOKENS}"], batch[f"{OBS_LANGUAGE_ATTENTION_MASK}"]
actions = self.prepare_action(batch)
# Compute loss (no separate state needed for PI05)
losses = self.model.forward(images, img_masks, tokens, masks, actions)
# Truncate losses to actual action dimensions
original_action_dim = self.config.output_features[ACTION].shape[0]
losses = losses[:, :, :original_action_dim]
loss_dict = {
"loss_per_dim": losses.mean(dim=[0, 1]).detach().cpu().numpy().tolist(),
}
if reduction == "none":
# Return per-sample losses (B,) by averaging over time and action dims
per_sample_loss = losses.mean(dim=(1, 2))
loss_dict["loss"] = per_sample_loss.mean().item()
return per_sample_loss, loss_dict
else:
# Default: return scalar mean loss
loss = losses.mean()
loss_dict["loss"] = loss.item()
return loss, loss_dict
def _get_default_peft_targets(self) -> dict[str, any]:
"""Return default PEFT target modules for PI0.5 fine-tuning."""
common_projections = (
"state_proj|action_in_proj|action_out_proj|action_time_mlp_in|action_time_mlp_out"
)
target_modules = rf"(.*\.gemma_expert\..*\.self_attn\.(q|v)_proj|model\.({common_projections}))"
return {
"target_modules": target_modules,
"modules_to_save": [],
}
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/pi05/modeling_pi05.py",
"license": "Apache License 2.0",
"lines": 1079,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/pi05/processor_pi05.py | #!/usr/bin/env python
# Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from dataclasses import dataclass
from typing import Any
import numpy as np
import torch
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from lerobot.policies.pi05.configuration_pi05 import PI05Config
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DeviceProcessorStep,
NormalizerProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
ProcessorStep,
ProcessorStepRegistry,
RenameObservationsProcessorStep,
TokenizerProcessorStep,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
from lerobot.processor.core import EnvTransition, TransitionKey
from lerobot.utils.constants import (
OBS_STATE,
POLICY_POSTPROCESSOR_DEFAULT_NAME,
POLICY_PREPROCESSOR_DEFAULT_NAME,
)
@ProcessorStepRegistry.register(name="pi05_prepare_state_tokenizer_processor_step")
@dataclass
class Pi05PrepareStateTokenizerProcessorStep(ProcessorStep):
"""
Processor step to prepare the state and tokenize the language input.
"""
max_state_dim: int = 32
task_key: str = "task"
def __call__(self, transition: EnvTransition) -> EnvTransition:
transition = transition.copy()
state = transition.get(TransitionKey.OBSERVATION, {}).get(OBS_STATE)
if state is None:
raise ValueError("State is required for PI05")
tasks = transition.get(TransitionKey.COMPLEMENTARY_DATA, {}).get(self.task_key)
if tasks is None:
raise ValueError("No task found in complementary data")
# TODO: check if this necessary
state = deepcopy(state)
# State should already be normalized to [-1, 1] by the NormalizerProcessorStep that runs before this step
# Discretize into 256 bins (see openpi `PaligemmaTokenizer.tokenize()`)
state_np = state.cpu().numpy()
discretized_states = np.digitize(state_np, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1
full_prompts = []
for i, task in enumerate(tasks):
cleaned_text = task.strip().replace("_", " ").replace("\n", " ")
state_str = " ".join(map(str, discretized_states[i]))
full_prompt = f"Task: {cleaned_text}, State: {state_str};\nAction: "
full_prompts.append(full_prompt)
transition[TransitionKey.COMPLEMENTARY_DATA][self.task_key] = full_prompts
# Normalize state to [-1, 1] range if needed (assuming it's already normalized by normalizer processor step!!)
# Discretize into 256 bins (see openpi `PaligemmaTokenizer.tokenize()`)
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
This step does not alter the feature definitions.
"""
return features
def make_pi05_pre_post_processors(
config: PI05Config,
dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""
Constructs pre-processor and post-processor pipelines for the PI0 policy.
The pre-processing pipeline prepares input data for the model by:
1. Renaming features to match pretrained configurations.
2. Normalizing input and output features based on dataset statistics.
3. Adding a batch dimension.
4. Appending a newline character to the task description for tokenizer compatibility.
5. Tokenizing the text prompt using the PaliGemma tokenizer.
6. Moving all data to the specified device.
The post-processing pipeline handles the model's output by:
1. Moving data to the CPU.
2. Unnormalizing the output features to their original scale.
Args:
config: The configuration object for the PI0 policy.
dataset_stats: A dictionary of statistics for normalization.
preprocessor_kwargs: Additional arguments for the pre-processor pipeline.
postprocessor_kwargs: Additional arguments for the post-processor pipeline.
Returns:
A tuple containing the configured pre-processor and post-processor pipelines.
"""
# Add remaining processors
input_steps: list[ProcessorStep] = [
RenameObservationsProcessorStep(rename_map={}), # To mimic the same processor as pretrained one
AddBatchDimensionProcessorStep(),
# NOTE: NormalizerProcessorStep MUST come before Pi05PrepareStateTokenizerProcessorStep
# because the tokenizer step expects normalized state in [-1, 1] range for discretization
NormalizerProcessorStep(
features={**config.input_features, **config.output_features},
norm_map=config.normalization_mapping,
stats=dataset_stats,
),
Pi05PrepareStateTokenizerProcessorStep(max_state_dim=config.max_state_dim),
TokenizerProcessorStep(
tokenizer_name="google/paligemma-3b-pt-224",
max_length=config.tokenizer_max_length,
padding_side="right",
padding="max_length",
),
DeviceProcessorStep(device=config.device),
]
output_steps: list[ProcessorStep] = [
UnnormalizerProcessorStep(
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
name=POLICY_PREPROCESSOR_DEFAULT_NAME,
),
PolicyProcessorPipeline[PolicyAction, PolicyAction](
steps=output_steps,
name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/pi05/processor_pi05.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:tests/datasets/test_quantiles_dataset_integration.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for quantile functionality in LeRobotDataset."""
import numpy as np
import pytest
from lerobot.datasets.lerobot_dataset import LeRobotDataset
def mock_load_image_as_numpy(path, dtype, channel_first):
"""Mock image loading for consistent test results."""
return np.ones((3, 32, 32), dtype=dtype) if channel_first else np.ones((32, 32, 3), dtype=dtype)
@pytest.fixture
def simple_features():
"""Simple feature configuration for testing."""
return {
"action": {
"dtype": "float32",
"shape": (4,),
"names": ["arm_x", "arm_y", "arm_z", "gripper"],
},
"observation.state": {
"dtype": "float32",
"shape": (10,),
"names": [f"joint_{i}" for i in range(10)],
},
}
def test_create_dataset_with_fixed_quantiles(tmp_path, simple_features):
"""Test creating dataset with fixed quantiles."""
dataset = LeRobotDataset.create(
repo_id="test_dataset_fixed_quantiles",
fps=30,
features=simple_features,
root=tmp_path / "create_fixed_quantiles",
)
# Dataset should be created successfully
assert dataset is not None
def test_save_episode_computes_all_quantiles(tmp_path, simple_features):
"""Test that all fixed quantiles are computed when saving an episode."""
dataset = LeRobotDataset.create(
repo_id="test_dataset_save_episode",
fps=30,
features=simple_features,
root=tmp_path / "save_episode_quantiles",
)
# Add some frames
for _ in range(10):
dataset.add_frame(
{
"action": np.random.randn(4).astype(np.float32), # Correct shape for action
"observation.state": np.random.randn(10).astype(np.float32),
"task": "test_task",
}
)
dataset.save_episode()
# Check that all fixed quantiles were computed
stats = dataset.meta.stats
for key in ["action", "observation.state"]:
assert "q01" in stats[key]
assert "q10" in stats[key]
assert "q50" in stats[key]
assert "q90" in stats[key]
assert "q99" in stats[key]
def test_quantile_values_ordering(tmp_path, simple_features):
"""Test that quantile values are properly ordered."""
dataset = LeRobotDataset.create(
repo_id="test_dataset_quantile_ordering",
fps=30,
features=simple_features,
root=tmp_path / "quantile_ordering",
)
# Add data with known distribution
np.random.seed(42)
for _ in range(100):
dataset.add_frame(
{
"action": np.random.randn(4).astype(np.float32), # Correct shape for action
"observation.state": np.random.randn(10).astype(np.float32),
"task": "test_task",
}
)
dataset.save_episode()
stats = dataset.meta.stats
# Verify quantile ordering
for key in ["action", "observation.state"]:
assert np.all(stats[key]["q01"] <= stats[key]["q10"])
assert np.all(stats[key]["q10"] <= stats[key]["q50"])
assert np.all(stats[key]["q50"] <= stats[key]["q90"])
assert np.all(stats[key]["q90"] <= stats[key]["q99"])
def test_save_episode_with_fixed_quantiles(tmp_path, simple_features):
"""Test saving episode always computes fixed quantiles."""
dataset = LeRobotDataset.create(
repo_id="test_dataset_save_fixed",
fps=30,
features=simple_features,
root=tmp_path / "save_fixed_quantiles",
)
# Add frames to episode
np.random.seed(42)
for _ in range(50):
frame = {
"action": np.random.normal(0, 1, (4,)).astype(np.float32),
"observation.state": np.random.normal(0, 1, (10,)).astype(np.float32),
"task": "test_task",
}
dataset.add_frame(frame)
dataset.save_episode()
# Check that all fixed quantiles are included
stats = dataset.meta.stats
for key in ["action", "observation.state"]:
feature_stats = stats[key]
expected_keys = {"min", "max", "mean", "std", "count", "q01", "q10", "q50", "q90", "q99"}
assert set(feature_stats.keys()) == expected_keys
def test_quantile_aggregation_across_episodes(tmp_path, simple_features):
"""Test quantile aggregation across multiple episodes."""
dataset = LeRobotDataset.create(
repo_id="test_dataset_aggregation",
fps=30,
features=simple_features,
root=tmp_path / "quantile_aggregation",
)
# Add frames to episode
np.random.seed(42)
for _ in range(100):
frame = {
"action": np.random.normal(0, 1, (4,)).astype(np.float32),
"observation.state": np.random.normal(2, 1, (10,)).astype(np.float32),
"task": "test_task",
}
dataset.add_frame(frame)
dataset.save_episode()
# Check stats include all fixed quantiles
stats = dataset.meta.stats
for key in ["action", "observation.state"]:
feature_stats = stats[key]
expected_keys = {"min", "max", "mean", "std", "count", "q01", "q10", "q50", "q90", "q99"}
assert set(feature_stats.keys()) == expected_keys
assert feature_stats["q01"].shape == (simple_features[key]["shape"][0],)
assert feature_stats["q50"].shape == (simple_features[key]["shape"][0],)
assert feature_stats["q99"].shape == (simple_features[key]["shape"][0],)
assert np.all(feature_stats["q01"] <= feature_stats["q50"])
assert np.all(feature_stats["q50"] <= feature_stats["q99"])
def test_save_multiple_episodes_with_quantiles(tmp_path, simple_features):
"""Test quantile aggregation across multiple episodes."""
dataset = LeRobotDataset.create(
repo_id="test_dataset_multiple_episodes",
fps=30,
features=simple_features,
root=tmp_path / "multiple_episodes",
)
# Save multiple episodes
np.random.seed(42)
for episode_idx in range(3):
for _ in range(50):
frame = {
"action": np.random.normal(episode_idx * 2.0, 1, (4,)).astype(np.float32),
"observation.state": np.random.normal(-episode_idx * 1.5, 1, (10,)).astype(np.float32),
"task": f"task_{episode_idx}",
}
dataset.add_frame(frame)
dataset.save_episode()
# Verify final stats include properly aggregated quantiles
stats = dataset.meta.stats
for key in ["action", "observation.state"]:
feature_stats = stats[key]
assert "q01" in feature_stats and "q99" in feature_stats
assert feature_stats["count"][0] == 150 # 3 episodes * 50 frames
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/datasets/test_quantiles_dataset_integration.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/policies/pi0_pi05/test_pi0.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test script to verify PI0 policy integration with LeRobot"""
import pytest
import torch
pytest.importorskip("transformers")
from lerobot.policies.factory import make_policy_config # noqa: E402
from lerobot.policies.pi0 import ( # noqa: E402
PI0Config,
PI0Policy,
make_pi0_pre_post_processors, # noqa: E402
)
from lerobot.utils.random_utils import set_seed # noqa: E402
from tests.utils import require_cuda, require_hf_token # noqa: E402
@require_cuda
@require_hf_token
def test_policy_instantiation():
# Create config
set_seed(42)
config = PI0Config(max_action_dim=7, max_state_dim=14, dtype="float32")
# Set up input_features and output_features in the config
from lerobot.configs.types import FeatureType, PolicyFeature
config.input_features = {
"observation.state": PolicyFeature(
type=FeatureType.STATE,
shape=(14,),
),
"observation.images.base_0_rgb": PolicyFeature(
type=FeatureType.VISUAL,
shape=(3, 224, 224),
),
}
config.output_features = {
"action": PolicyFeature(
type=FeatureType.ACTION,
shape=(7,),
),
}
# Create dummy dataset stats
dataset_stats = {
"observation.state": {
"mean": torch.zeros(14),
"std": torch.ones(14),
},
"action": {
"mean": torch.zeros(7),
"std": torch.ones(7),
},
"observation.images.base_0_rgb": {
"mean": torch.zeros(3, 224, 224),
"std": torch.ones(3, 224, 224),
},
}
# Instantiate policy
policy = PI0Policy(config)
preprocessor, postprocessor = make_pi0_pre_post_processors(config=config, dataset_stats=dataset_stats)
# Test forward pass with dummy data
batch_size = 1
device = config.device
batch = {
"observation.state": torch.randn(batch_size, 14, dtype=torch.float32, device=device),
"action": torch.randn(batch_size, config.chunk_size, 7, dtype=torch.float32, device=device),
"observation.images.base_0_rgb": torch.rand(
batch_size, 3, 224, 224, dtype=torch.float32, device=device
), # Use rand for [0,1] range
"task": ["Pick up the object"] * batch_size,
}
batch = preprocessor(batch)
try:
loss, loss_dict = policy.forward(batch)
print(f"Forward pass successful. Loss: {loss_dict['loss']:.4f}")
except Exception as e:
print(f"Forward pass failed: {e}")
raise
try:
with torch.no_grad():
action = policy.select_action(batch)
action = postprocessor(action)
print(f"Action: {action}")
print(f"Action prediction successful. Action shape: {action.shape}")
except Exception as e:
print(f"Action prediction failed: {e}")
raise
@require_cuda
@require_hf_token
def test_config_creation():
"""Test policy config creation through factory."""
try:
config = make_policy_config(
policy_type="pi0",
max_action_dim=7,
max_state_dim=14,
)
print("Config created successfully through factory")
print(f" Config type: {type(config).__name__}")
print(f" PaliGemma variant: {config.paligemma_variant}")
print(f" Action expert variant: {config.action_expert_variant}")
except Exception as e:
print(f"Config creation failed: {e}")
raise
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/policies/pi0_pi05/test_pi0.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/policies/pi0_pi05/test_pi05.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test script to verify PI0.5 (pi05) support in PI0 policy"""
import pytest
import torch
pytest.importorskip("transformers")
from lerobot.policies.factory import make_policy_config # noqa: E402
from lerobot.policies.pi05 import ( # noqa: E402
PI05Config,
PI05Policy,
make_pi05_pre_post_processors, # noqa: E402
)
from lerobot.utils.random_utils import set_seed
from tests.utils import require_cuda, require_hf_token # noqa: E402
@require_cuda
@require_hf_token
def test_policy_instantiation():
# Create config
set_seed(42)
config = PI05Config(max_action_dim=7, max_state_dim=14, dtype="float32")
# Set up input_features and output_features in the config
from lerobot.configs.types import FeatureType, PolicyFeature
config.input_features = {
"observation.state": PolicyFeature(
type=FeatureType.STATE,
shape=(14,),
),
"observation.images.base_0_rgb": PolicyFeature(
type=FeatureType.VISUAL,
shape=(3, 224, 224),
),
}
config.output_features = {
"action": PolicyFeature(
type=FeatureType.ACTION,
shape=(7,),
),
}
assert config.tokenizer_max_length == 200, (
f"Expected tokenizer_max_length=200 for pi05, got {config.tokenizer_max_length}"
)
# Create dummy dataset stats
dataset_stats = {
"observation.state": {
"mean": torch.zeros(14),
"std": torch.ones(14),
"min": torch.zeros(14),
"max": torch.ones(14),
"q01": torch.zeros(14),
"q99": torch.ones(14),
},
"action": {
"mean": torch.zeros(7),
"std": torch.ones(7),
"min": torch.zeros(7),
"max": torch.ones(7),
"q01": torch.zeros(7),
"q99": torch.ones(7),
},
"observation.images.base_0_rgb": {
"mean": torch.zeros(3, 224, 224),
"std": torch.ones(3, 224, 224),
"q01": torch.zeros(3, 224, 224),
"q99": torch.ones(3, 224, 224),
},
}
# Instantiate policy
policy = PI05Policy(config)
# Test forward pass with dummy data
batch_size = 1
preprocessor, postprocessor = make_pi05_pre_post_processors(config=config, dataset_stats=dataset_stats)
device = config.device
batch = {
"observation.state": torch.randn(batch_size, 14, dtype=torch.float32, device=device),
"action": torch.randn(batch_size, config.chunk_size, 7, dtype=torch.float32, device=device),
"observation.images.base_0_rgb": torch.rand(
batch_size, 3, 224, 224, dtype=torch.float32, device=device
), # Use rand for [0,1] range
"task": ["Pick up the object"] * batch_size,
}
batch = preprocessor(batch)
try:
loss, loss_dict = policy.forward(batch)
print(f"Forward pass successful. Loss: {loss_dict['loss']:.4f}")
except Exception as e:
print(f"Forward pass failed: {e}")
raise
try:
with torch.no_grad():
action = policy.select_action(batch)
action = postprocessor(action)
print(f"Action: {action}")
print(f"Action prediction successful. Action shape: {action.shape}")
except Exception as e:
print(f"Action prediction failed: {e}")
raise
# Verify pi05 model components exist
# Check that time_mlp layers exist (for AdaRMS conditioning)
assert hasattr(policy.model, "time_mlp_in"), "Missing time_mlp_in layer for pi05"
assert hasattr(policy.model, "time_mlp_out"), "Missing time_mlp_out layer for pi05"
# Check that action_time_mlp layers don't exist (pi0 only)
assert not hasattr(policy.model, "action_time_mlp_in"), "action_time_mlp_in should not exist in pi05 mode"
assert not hasattr(policy.model, "action_time_mlp_out"), (
"action_time_mlp_out should not exist in pi05 mode"
)
# Check that state_proj doesn't exist in pi05 mode
assert not hasattr(policy.model, "state_proj"), "state_proj should not exist in pi05 mode"
# Check AdaRMS configuration in the underlying model
adarms_config = policy.model.paligemma_with_expert.paligemma.config.text_config.use_adarms
assert adarms_config == False, f"PaliGemma should not use AdaRMS, got {adarms_config}" # noqa: E712
adarms_expert_config = policy.model.paligemma_with_expert.gemma_expert.config.use_adarms
assert adarms_expert_config == True, ( # noqa: E712
f"Action expert should use AdaRMS in pi05, got {adarms_expert_config}"
)
@require_cuda
@require_hf_token
def test_config_creation():
"""Test policy config creation through factory."""
try:
config = make_policy_config(
policy_type="pi0",
max_action_dim=7,
max_state_dim=14,
)
print("Config created successfully through factory")
print(f" Config type: {type(config).__name__}")
print(f" PaliGemma variant: {config.paligemma_variant}")
print(f" Action expert variant: {config.action_expert_variant}")
except Exception as e:
print(f"Config creation failed: {e}")
raise
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/policies/pi0_pi05/test_pi05.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/policies/pi0_pi05/test_pi05_original_vs_lerobot.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test script to verify PI0OpenPI policy integration with LeRobot vs the original implementation"""
import os
from copy import deepcopy
from typing import Any
import numpy as np
import pytest
import torch
# Skip if openpi or transformers is not available
pytest.importorskip("openpi")
pytest.importorskip("transformers")
# Skip this entire module in CI
pytestmark = pytest.mark.skipif(
os.environ.get("CI") == "true" or os.environ.get("GITHUB_ACTIONS") == "true",
reason="This test requires local OpenPI installation and is not meant for CI",
)
from openpi.models_pytorch import preprocessing_pytorch as openpi_preprocessing # noqa: E402
# NOTE: Assumes PYTHONPATH is set to include OpenPI src as per instructions.
from openpi.models_pytorch.pi0_pytorch import PI0Pytorch # noqa: E402
from transformers import AutoTokenizer # noqa: E402
from lerobot.policies.pi05 import PI05Config, PI05Policy # noqa: E402
from lerobot.policies.pi05.processor_pi05 import make_pi05_pre_post_processors # noqa: E402
from lerobot.processor import PolicyAction, PolicyProcessorPipeline # noqa: E402
# TODO: ADDING DEFAULT IMAGES_FEATURES TO CONFIG
DUMMY_ACTION_DIM = 32
DUMMY_STATE_DIM = 32
DUMMY_ACTION_HORIZON = 50
DUMMY_MAX_TOKEN_LEN = 200
DEVICE = "cpu" # Use CPU to avoid memory issues for testing
DUMMY_DATASET_STATS = {
"observation.state": {
"mean": torch.zeros(DUMMY_STATE_DIM),
"std": torch.ones(DUMMY_STATE_DIM),
"q01": torch.zeros(DUMMY_STATE_DIM),
"q99": torch.ones(DUMMY_STATE_DIM),
},
"action": {
"mean": torch.zeros(DUMMY_ACTION_DIM),
"std": torch.ones(DUMMY_ACTION_DIM),
"q01": torch.zeros(DUMMY_ACTION_DIM),
"q99": torch.ones(DUMMY_ACTION_DIM),
},
"images": {
"base_0_rgb": {
"mean": torch.zeros(3, 224, 224),
"std": torch.ones(3, 224, 224),
"q01": torch.zeros(3, 224, 224),
"q99": torch.ones(3, 224, 224),
},
"left_wrist_0_rgb": {
"mean": torch.zeros(3, 224, 224),
"std": torch.ones(3, 224, 224),
"q01": torch.zeros(3, 224, 224),
"q99": torch.ones(3, 224, 224),
},
"right_wrist_0_rgb": {
"mean": torch.zeros(3, 224, 224),
"std": torch.ones(3, 224, 224),
"q01": torch.zeros(3, 224, 224),
"q99": torch.ones(3, 224, 224),
},
},
}
class PI05BaseOriginalConfig:
action_dim: int = DUMMY_ACTION_DIM
action_horizon: int = DUMMY_ACTION_HORIZON
paligemma_variant: str = "gemma_2b"
action_expert_variant: str = "gemma_300m"
precision: str = "float32"
pi05: bool = True
dtype: str = "float32"
def instantiate_lerobot_pi05(
from_pretrained: bool = False,
) -> tuple[
PI05Policy,
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
if from_pretrained:
# Load the policy first
policy = PI05Policy.from_pretrained(pretrained_name_or_path="lerobot/pi05_base", strict=True)
else:
config = PI05Config(max_action_dim=DUMMY_ACTION_DIM, max_state_dim=DUMMY_STATE_DIM, dtype="float32")
policy = PI05Policy(config)
policy.to(DEVICE)
policy.config.device = DEVICE
preprocessor, postprocessor = make_pi05_pre_post_processors(
config=policy.config, dataset_stats=DUMMY_DATASET_STATS
)
return (policy, preprocessor, postprocessor)
def instantiate_original_pi05(from_pretrained: bool = False, model_path: str | None = None):
config = PI05BaseOriginalConfig()
policy = PI0Pytorch(config)
if from_pretrained:
try:
print("Loading converted PyTorch weights from HuggingFace Hub (lerobot/pi05_base)...")
# Download the model from HuggingFace Hub
import safetensors.torch
from huggingface_hub import snapshot_download
# Download the entire repository
if model_path and os.path.exists(model_path):
cache_dir = model_path
print(f"Using cached model from: {cache_dir}")
else:
cache_dir = snapshot_download(repo_id="lerobot/pi05_base", repo_type="model")
print(f"Downloaded model to: {cache_dir}")
# Try to load safetensors format first
model_file = os.path.join(cache_dir, "model.safetensors")
if os.path.exists(model_file):
state_dict = safetensors.torch.load_file(model_file)
print(f"Loaded {len(state_dict)} parameters from safetensors")
else:
raise FileNotFoundError(f"No safetensors file found in {cache_dir}")
# Load the state dict into the model
missing_keys, unexpected_keys = policy.load_state_dict(state_dict, strict=False)
if missing_keys:
print(f"Missing keys: {len(missing_keys)}")
if len(missing_keys) <= 5:
for key in missing_keys:
print(f" - {key}")
else:
for key in missing_keys[:5]:
print(f" - {key}")
print(f" ... and {len(missing_keys) - 5} more")
if unexpected_keys:
print(f"Unexpected keys: {len(unexpected_keys)}")
if len(unexpected_keys) <= 5:
for key in unexpected_keys:
print(f" - {key}")
else:
for key in unexpected_keys[:5]:
print(f" - {key}")
print(f" ... and {len(unexpected_keys) - 5} more")
if not missing_keys and not unexpected_keys:
print("All pretrained weights loaded successfully!")
else:
print("Pretrained weights loaded with some missing/unexpected keys (this may be normal)")
except Exception as e:
print(f"Failed to load pretrained weights: {e}")
print(" Using randomly initialized weights...")
import traceback
traceback.print_exc()
policy.to(DEVICE)
return policy
def create_dummy_data():
batch_size = 2 # Reduce batch size for testing
device = DEVICE
# Use the exact same prompt for both implementations
prompt = "Pick up the red block and place it in the bin"
batch = {
"observation.state": torch.randn(batch_size, DUMMY_STATE_DIM, dtype=torch.float32, device=device),
"action": torch.randn(
batch_size, DUMMY_ACTION_HORIZON, DUMMY_ACTION_DIM, dtype=torch.float32, device=device
),
# Create images in [0, 1] range as expected by LeRobot (will be converted to [-1, 1] internally)
"observation.images.base_0_rgb": torch.rand(
batch_size, 3, 224, 224, dtype=torch.float32, device=device
),
"observation.images.left_wrist_0_rgb": torch.rand(
batch_size, 3, 224, 224, dtype=torch.float32, device=device
),
"observation.images.right_wrist_0_rgb": torch.rand(
batch_size, 3, 224, 224, dtype=torch.float32, device=device
),
# Add the task prompt for LeRobot - provide as list with single element to trigger expansion
"task": [prompt for _ in range(batch_size)],
}
return batch
def extract_lerobot_processed_inputs(lerobot_pi0, batch):
"""Extract the exact same processed inputs that LeRobot uses internally."""
# Get the tokenized language from LeRobot's internal method
lang_tokens, lang_masks = lerobot_pi0._tokenize_language(batch)
# Get the preprocessed images from LeRobot's internal method
images, img_masks = lerobot_pi0._preprocess_images(batch, train=False)
# Create dummy token_ar_mask and token_loss_mask for original implementation
token_ar_mask = torch.zeros_like(lang_tokens, dtype=torch.int32)
token_loss_mask = torch.ones_like(lang_masks, dtype=torch.bool)
return images, img_masks, lang_tokens, lang_masks, token_ar_mask, token_loss_mask
class PI05Observation:
"""Observation class that matches the original OpenPI format."""
def __init__(
self,
state,
images,
image_masks,
tokenized_prompt,
tokenized_prompt_mask,
token_ar_mask,
token_loss_mask,
):
self.state = state
self.images = images
self.image_masks = image_masks
self.tokenized_prompt = tokenized_prompt
self.tokenized_prompt_mask = tokenized_prompt_mask
self.token_ar_mask = token_ar_mask
self.token_loss_mask = token_loss_mask
def create_original_observation_with_openpi_preprocessing(batch):
"""Create observation object for OpenPI using OpenPI's own preprocessing with pi05 state tokenizer."""
batch_size = batch["observation.state"].shape[0]
device = batch["observation.state"].device
# Create tokenizer for OpenPI (same as LeRobot uses)
tokenizer = AutoTokenizer.from_pretrained("google/paligemma-3b-pt-224")
# Get task description (pi05 processor handles all text formatting)
tasks = batch.get("task", ["Pick up the object"] * batch_size)
if isinstance(tasks, str):
tasks = [tasks] * batch_size
elif len(tasks) == 1:
tasks = tasks * batch_size
# Use pi05 state and input tokenizer logic (same as Pi05PrepareStateTokenizerProcessorStep)
state = batch["observation.state"]
state = deepcopy(state)
# Prepare state (pad to max_state_dim)
from lerobot.policies.pi05.modeling_pi05 import pad_vector
state = pad_vector(state, DUMMY_STATE_DIM)
# Normalize state to [-1, 1] range if needed (assuming it's already normalized from normalize_inputs)
# Discretize into 256 bins (see openpi `PaligemmaTokenizer.tokenize()`)
state_np = state.cpu().numpy()
discretized_states = np.digitize(state_np, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1
# Create pi05-formatted prompts that include state information
full_prompts = []
for i, task in enumerate(tasks):
cleaned_text = task.strip().replace("_", " ").replace("\n", " ")
state_str = " ".join(map(str, discretized_states[i]))
full_prompt = f"Task: {cleaned_text}, State: {state_str};\nAction: "
full_prompts.append(full_prompt)
# Tokenize with max_length padding to match OpenPI's expected format
tokenized = tokenizer(
full_prompts,
padding="max_length",
padding_side="right",
truncation=True,
max_length=DUMMY_MAX_TOKEN_LEN,
return_tensors="pt",
)
lang_tokens = tokenized["input_ids"].to(device)
lang_masks = tokenized["attention_mask"].to(device, dtype=torch.bool)
# Create dummy token_ar_mask and token_loss_mask for OpenPI
token_ar_mask = torch.zeros_like(lang_tokens, dtype=torch.int32)
token_loss_mask = torch.ones_like(lang_masks, dtype=torch.bool)
# Convert LeRobot images format to OpenPI format (convert [0,1] to [-1,1] range)
image_dict = {
"base_0_rgb": batch["observation.images.base_0_rgb"] * 2.0 - 1.0,
"left_wrist_0_rgb": batch["observation.images.left_wrist_0_rgb"] * 2.0 - 1.0,
"right_wrist_0_rgb": batch["observation.images.right_wrist_0_rgb"] * 2.0 - 1.0,
}
# Create image masks (all ones for real images)
image_masks_dict = {}
for key in image_dict:
image_masks_dict[key] = torch.ones(batch_size, dtype=torch.bool, device=device)
# Create raw observation object (before preprocessing)
raw_observation = PI05Observation(
state=batch["observation.state"],
images=image_dict,
image_masks=image_masks_dict,
tokenized_prompt=lang_tokens,
tokenized_prompt_mask=lang_masks,
token_ar_mask=token_ar_mask,
token_loss_mask=token_loss_mask,
)
# Now use OpenPI's preprocessing
processed_obs = openpi_preprocessing.preprocess_observation_pytorch(raw_observation, train=False)
return processed_obs
def create_original_observation_from_lerobot(lerobot_pi0, batch):
"""Create observation object compatible with original OpenPI using the exact same inputs as LeRobot."""
_batch_size = batch["observation.state"].shape[0]
_device = batch["observation.state"].device
# Extract the exact same processed inputs that LeRobot uses
images, img_masks, lang_tokens, lang_masks, token_ar_mask, token_loss_mask = (
extract_lerobot_processed_inputs(lerobot_pi0, batch)
)
# Convert images list to dict with original OpenPI keys
image_dict = {
"base_0_rgb": images[0],
"left_wrist_0_rgb": images[1],
"right_wrist_0_rgb": images[2],
}
# Convert image masks list to dict with original OpenPI keys
image_masks_dict = {
"base_0_rgb": img_masks[0],
"left_wrist_0_rgb": img_masks[1],
"right_wrist_0_rgb": img_masks[2],
}
return PI05Observation(
state=batch["observation.state"],
images=image_dict,
image_masks=image_masks_dict,
tokenized_prompt=lang_tokens,
tokenized_prompt_mask=lang_masks,
token_ar_mask=token_ar_mask,
token_loss_mask=token_loss_mask,
)
def test_pi05_original_vs_lerobot():
"""Test PI05 original implementation vs LeRobot implementation."""
print("Initializing models...")
lerobot_pi05, lerobot_preprocessor, lerobot_postprocessor = instantiate_lerobot_pi05(
from_pretrained=True
) # Load pretrained LeRobot model
original_pi0 = instantiate_original_pi05(
from_pretrained=True
) # Load pretrained OpenPI model from HuggingFace Hub
print("Creating dummy data...")
batch = create_dummy_data()
batch_lerobot = deepcopy(batch)
# Test each model with its own preprocessing (more realistic end-to-end test)
print("\nTest each model with its own preprocessing")
print("Creating observation for OpenPI using OpenPI's own preprocessing...")
pi0_obs_openpi = create_original_observation_with_openpi_preprocessing(batch)
print(f"Task prompt: '{batch['task'][0]}'")
print(f"OpenPI tokenized prompt shape: {pi0_obs_openpi.tokenized_prompt.shape}")
print(f"OpenPI image shapes: {[img.shape for img in pi0_obs_openpi.images.values()]}")
print(f"OpenPI state shape: {pi0_obs_openpi.state.shape}")
print("Testing OpenPI with own preprocessing...")
original_pi0.eval()
torch.manual_seed(42) # Set seed for reproducibility
batch_size = batch["observation.state"].shape[0]
noise_shape = (batch_size, DUMMY_ACTION_HORIZON, DUMMY_ACTION_DIM)
fixed_noise = torch.randn(noise_shape, dtype=torch.float32, device=DEVICE)
with torch.no_grad():
openpi_actions = original_pi0.sample_actions(
device=DEVICE, observation=pi0_obs_openpi, noise=fixed_noise, num_steps=10
)
openpi_actions_unit = openpi_actions[:, 0, :]
print(f"OpenPI (own preprocessing) Actions shape: {openpi_actions.shape}")
print(f"OpenPI (own preprocessing) Actions unit shape: {openpi_actions_unit.shape}")
print(f"OpenPI (own preprocessing) Actions mean: {openpi_actions.mean().item():.6f}")
print(f"OpenPI (own preprocessing) Actions std: {openpi_actions.std().item():.6f}")
print("Testing LeRobot with own preprocessing...")
lerobot_pi05.eval()
torch.manual_seed(42) # Set the same seed
batch_lerobot_processed = lerobot_preprocessor(batch_lerobot)
with torch.no_grad():
lerobot_actions_own = lerobot_pi05.predict_action_chunk(
batch_lerobot_processed
) # batch_size, n_action_steps, action_dim
lerobot_actions_unit = lerobot_actions_own[:, 0, :]
print(f"LeRobot (own preprocessing) Actions shape: {lerobot_actions_own.shape}")
print(f"LeRobot (own preprocessing) Actions unit shape: {lerobot_actions_unit.shape}")
print(f"LeRobot (own preprocessing) Actions mean: {lerobot_actions_own.mean().item():.6f}")
print(f"LeRobot (own preprocessing) Actions std: {lerobot_actions_own.std().item():.6f}")
print("\nComparing end-to-end implementations:")
print(f"Actions close (atol=1e-4): {torch.allclose(lerobot_actions_own, openpi_actions, atol=1e-4)}")
print(f"Actions close (atol=1e-2): {torch.allclose(lerobot_actions_own, openpi_actions, atol=1e-2)}")
print(f"Max absolute difference: {torch.abs(lerobot_actions_own - openpi_actions).max().item():.6f}")
assert torch.allclose(lerobot_actions_own, openpi_actions, atol=1e-4)
assert torch.allclose(lerobot_actions_own, openpi_actions, atol=1e-2)
assert torch.abs(lerobot_actions_own - openpi_actions).max().item() < 1e-4
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/policies/pi0_pi05/test_pi05_original_vs_lerobot.py",
"license": "Apache License 2.0",
"lines": 360,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/policies/pi0_pi05/test_pi0_original_vs_lerobot.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test script to verify PI0 policy integration with LeRobot vs the original implementation"""
import os
from copy import deepcopy
from typing import Any
import pytest
import torch
# Skip if openpi or transformers is not available
pytest.importorskip("openpi")
pytest.importorskip("transformers")
# Skip this entire module in CI
pytestmark = pytest.mark.skipif(
os.environ.get("CI") == "true" or os.environ.get("GITHUB_ACTIONS") == "true",
reason="This test requires local OpenPI installation and is not meant for CI",
)
from openpi.models_pytorch import preprocessing_pytorch as openpi_preprocessing # noqa: E402
# NOTE: Assumes PYTHONPATH is set to include OpenPI src as per instructions.
from openpi.models_pytorch.pi0_pytorch import PI0Pytorch # noqa: E402
from transformers import AutoTokenizer # noqa: E402
from lerobot.policies.pi0 import PI0Config, PI0Policy # noqa: E402
from lerobot.policies.pi0.processor_pi0 import make_pi0_pre_post_processors # noqa: E402
from lerobot.processor import PolicyAction, PolicyProcessorPipeline # noqa: E402
# TODO: ADDING DEFAULT IMAGES_FEATURES TO CONFIG
DUMMY_ACTION_DIM = 32
DUMMY_STATE_DIM = 32
DUMMY_ACTION_HORIZON = 50
DUMMY_MAX_TOKEN_LEN = 48 # Default for PI0 (non-pi05)
DEVICE = "cpu" # Use CPU to avoid memory issues for testing
DUMMY_DATASET_STATS = {
"observation.state": {
"mean": torch.zeros(DUMMY_STATE_DIM),
"std": torch.ones(DUMMY_STATE_DIM),
"q01": torch.zeros(DUMMY_STATE_DIM),
"q99": torch.ones(DUMMY_STATE_DIM),
},
"action": {
"mean": torch.zeros(DUMMY_ACTION_DIM),
"std": torch.ones(DUMMY_ACTION_DIM),
"q01": torch.zeros(DUMMY_ACTION_DIM),
"q99": torch.ones(DUMMY_ACTION_DIM),
},
"images": {
"base_0_rgb": {
"mean": torch.zeros(3, 224, 224),
"std": torch.ones(3, 224, 224),
"q01": torch.zeros(3, 224, 224),
"q99": torch.ones(3, 224, 224),
},
"left_wrist_0_rgb": {
"mean": torch.zeros(3, 224, 224),
"std": torch.ones(3, 224, 224),
"q01": torch.zeros(3, 224, 224),
"q99": torch.ones(3, 224, 224),
},
"right_wrist_0_rgb": {
"mean": torch.zeros(3, 224, 224),
"std": torch.ones(3, 224, 224),
"q01": torch.zeros(3, 224, 224),
"q99": torch.ones(3, 224, 224),
},
},
}
class PI0BaseOriginalConfig:
action_dim: int = DUMMY_ACTION_DIM
action_horizon: int = DUMMY_ACTION_HORIZON
paligemma_variant: str = "gemma_2b"
action_expert_variant: str = "gemma_300m"
precision: str = "float32"
pi05: bool = False
dtype: str = "float32"
def instantiate_lerobot_pi0(
from_pretrained: bool = False,
) -> tuple[
PI0Policy,
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
if from_pretrained:
# Load the policy first
policy = PI0Policy.from_pretrained(pretrained_name_or_path="lerobot/pi0_base", strict=True)
else:
config = PI0Config(max_action_dim=DUMMY_ACTION_DIM, max_state_dim=DUMMY_STATE_DIM, dtype="float32")
policy = PI0Policy(config)
policy.to(DEVICE)
policy.config.device = DEVICE
preprocessor, postprocessor = make_pi0_pre_post_processors(
config=policy.config, dataset_stats=DUMMY_DATASET_STATS
)
return (policy, preprocessor, postprocessor)
def instantiate_original_pi0(from_pretrained: bool = False, model_path: str = None):
config = PI0BaseOriginalConfig()
policy = PI0Pytorch(config)
if from_pretrained:
try:
print("Loading converted PyTorch weights from HuggingFace Hub (lerobot/pi0_base)...")
# Download the model from HuggingFace Hub
import safetensors.torch
from huggingface_hub import snapshot_download
# Download the entire repository
if model_path and os.path.exists(model_path):
cache_dir = model_path
print(f"Using cached model from: {cache_dir}")
else:
cache_dir = snapshot_download(repo_id="lerobot/pi0_base", repo_type="model")
print(f"Downloaded model to: {cache_dir}")
# Try to load safetensors format first
model_file = os.path.join(cache_dir, "model.safetensors")
if os.path.exists(model_file):
state_dict = safetensors.torch.load_file(model_file)
print(f"Loaded {len(state_dict)} parameters from safetensors")
else:
raise FileNotFoundError(f"No safetensors file found in {cache_dir}")
# Load the state dict into the model
missing_keys, unexpected_keys = policy.load_state_dict(state_dict, strict=False)
if missing_keys:
print(f"Missing keys: {len(missing_keys)}")
if len(missing_keys) <= 5:
for key in missing_keys:
print(f" - {key}")
else:
for key in missing_keys[:5]:
print(f" - {key}")
print(f" ... and {len(missing_keys) - 5} more")
if unexpected_keys:
print(f"Unexpected keys: {len(unexpected_keys)}")
if len(unexpected_keys) <= 5:
for key in unexpected_keys:
print(f" - {key}")
else:
for key in unexpected_keys[:5]:
print(f" - {key}")
print(f" ... and {len(unexpected_keys) - 5} more")
if not missing_keys and not unexpected_keys:
print("All pretrained weights loaded successfully!")
else:
print("Pretrained weights loaded with some missing/unexpected keys (this may be normal)")
except Exception as e:
print(f"Failed to load pretrained weights: {e}")
print(" Using randomly initialized weights...")
import traceback
traceback.print_exc()
policy.to(DEVICE)
return policy
def create_dummy_data():
batch_size = 2 # Reduce batch size for testing
device = DEVICE
# Use the exact same prompt for both implementations
prompt = "Pick up the red block and place it in the bin"
batch = {
"observation.state": torch.randn(batch_size, DUMMY_STATE_DIM, dtype=torch.float32, device=device),
"action": torch.randn(
batch_size, DUMMY_ACTION_HORIZON, DUMMY_ACTION_DIM, dtype=torch.float32, device=device
),
# Create images in [0, 1] range as expected by LeRobot (will be converted to [-1, 1] internally)
"observation.images.base_0_rgb": torch.rand(
batch_size, 3, 224, 224, dtype=torch.float32, device=device
),
"observation.images.left_wrist_0_rgb": torch.rand(
batch_size, 3, 224, 224, dtype=torch.float32, device=device
),
"observation.images.right_wrist_0_rgb": torch.rand(
batch_size, 3, 224, 224, dtype=torch.float32, device=device
),
# Add the task prompt for LeRobot - provide as list with single element to trigger expansion
"task": [prompt for _ in range(batch_size)],
}
return batch
def extract_lerobot_processed_inputs(lerobot_pi0, batch):
"""Extract the exact same processed inputs that LeRobot uses internally."""
# Get the tokenized language from LeRobot's internal method
lang_tokens, lang_masks = lerobot_pi0._tokenize_language(batch)
# Get the preprocessed images from LeRobot's internal method
images, img_masks = lerobot_pi0._preprocess_images(batch, train=False)
# Create dummy token_ar_mask and token_loss_mask for original implementation
token_ar_mask = torch.zeros_like(lang_tokens, dtype=torch.int32)
token_loss_mask = torch.ones_like(lang_masks, dtype=torch.bool)
return images, img_masks, lang_tokens, lang_masks, token_ar_mask, token_loss_mask
class PI0Observation:
"""Observation class that matches the original OpenPI format."""
def __init__(
self,
state,
images,
image_masks,
tokenized_prompt,
tokenized_prompt_mask,
token_ar_mask,
token_loss_mask,
):
self.state = state
self.images = images
self.image_masks = image_masks
self.tokenized_prompt = tokenized_prompt
self.tokenized_prompt_mask = tokenized_prompt_mask
self.token_ar_mask = token_ar_mask
self.token_loss_mask = token_loss_mask
def create_original_observation_with_openpi_preprocessing(batch):
"""Create observation object for OpenPI using OpenPI's own preprocessing."""
batch_size = batch["observation.state"].shape[0]
device = batch["observation.state"].device
# Create tokenizer for OpenPI (same as LeRobot uses)
tokenizer = AutoTokenizer.from_pretrained("google/paligemma-3b-pt-224")
# Get task description
if "task" in batch:
tasks = batch["task"]
if isinstance(tasks, str):
# Single string: add newline if not present, then convert to list
if not tasks.endswith("\n"):
tasks = f"{tasks}\n"
tasks = [tasks]
elif isinstance(tasks, list) and all(isinstance(t, str) for t in tasks):
# List of strings: add newline to each if not present
tasks = [t if t.endswith("\n") else f"{t}\n" for t in tasks]
if len(tasks) == 1:
# Expand to batch size
tasks = tasks * batch_size
if len(tasks) != batch_size:
raise ValueError(f"Expected batch size {batch_size}, got {len(tasks)}")
# If task is neither string nor list of strings, leave unchanged
else:
# Default task if not provided
tasks = ["Pick up the object\n"] * batch_size
# Tokenize with max_length padding to match OpenPI's expected format
tokenized = tokenizer(
tasks,
padding="max_length",
padding_side="right",
truncation=True,
max_length=DUMMY_MAX_TOKEN_LEN,
return_tensors="pt",
)
lang_tokens = tokenized["input_ids"].to(device)
lang_masks = tokenized["attention_mask"].to(device, dtype=torch.bool)
# Create dummy token_ar_mask and token_loss_mask for OpenPI
token_ar_mask = torch.zeros_like(lang_tokens, dtype=torch.int32)
token_loss_mask = torch.ones_like(lang_masks, dtype=torch.bool)
# Convert LeRobot images format to OpenPI format (convert [0,1] to [-1,1] range)
image_dict = {
"base_0_rgb": batch["observation.images.base_0_rgb"] * 2.0 - 1.0,
"left_wrist_0_rgb": batch["observation.images.left_wrist_0_rgb"] * 2.0 - 1.0,
"right_wrist_0_rgb": batch["observation.images.right_wrist_0_rgb"] * 2.0 - 1.0,
}
# Create image masks (all ones for real images)
image_masks_dict = {}
for key in image_dict:
image_masks_dict[key] = torch.ones(batch_size, dtype=torch.bool, device=device)
# Create raw observation object (before preprocessing)
raw_observation = PI0Observation(
state=batch["observation.state"],
images=image_dict,
image_masks=image_masks_dict,
tokenized_prompt=lang_tokens,
tokenized_prompt_mask=lang_masks,
token_ar_mask=token_ar_mask,
token_loss_mask=token_loss_mask,
)
# Now use OpenPI's preprocessing
processed_obs = openpi_preprocessing.preprocess_observation_pytorch(raw_observation, train=False)
return processed_obs
def create_original_observation_from_lerobot(lerobot_pi0, batch):
"""Create observation object compatible with original OpenPI using the exact same inputs as LeRobot."""
_batch_size = batch["observation.state"].shape[0]
_device = batch["observation.state"].device
# Extract the exact same processed inputs that LeRobot uses
images, img_masks, lang_tokens, lang_masks, token_ar_mask, token_loss_mask = (
extract_lerobot_processed_inputs(lerobot_pi0, batch)
)
# Convert images list to dict with original OpenPI keys
image_dict = {
"base_0_rgb": images[0],
"left_wrist_0_rgb": images[1],
"right_wrist_0_rgb": images[2],
}
# Convert image masks list to dict with original OpenPI keys
image_masks_dict = {
"base_0_rgb": img_masks[0],
"left_wrist_0_rgb": img_masks[1],
"right_wrist_0_rgb": img_masks[2],
}
return PI0Observation(
state=batch["observation.state"],
images=image_dict,
image_masks=image_masks_dict,
tokenized_prompt=lang_tokens,
tokenized_prompt_mask=lang_masks,
token_ar_mask=token_ar_mask,
token_loss_mask=token_loss_mask,
)
def test_pi0_original_vs_lerobot():
"""Test PI0 original implementation vs LeRobot implementation."""
print("Initializing models...")
lerobot_pi0, lerobot_preprocessor, lerobot_postprocessor = instantiate_lerobot_pi0(
from_pretrained=True
) # Load pretrained LeRobot model
original_pi0 = instantiate_original_pi0(
from_pretrained=True
) # Load pretrained OpenPI model from HuggingFace Hub
print("Creating dummy data...")
batch = create_dummy_data()
batch_lerobot = deepcopy(batch)
# Test each model with its own preprocessing (more realistic end-to-end test)
print("\nTest each model with its own preprocessing")
print("Creating observation for OpenPI using OpenPI's own preprocessing...")
pi0_obs_openpi = create_original_observation_with_openpi_preprocessing(batch)
print(f"Task prompt: '{batch['task'][0]}'")
print(f"OpenPI tokenized prompt shape: {pi0_obs_openpi.tokenized_prompt.shape}")
print(f"OpenPI image shapes: {[img.shape for img in pi0_obs_openpi.images.values()]}")
print(f"OpenPI state shape: {pi0_obs_openpi.state.shape}")
print("Testing OpenPI with own preprocessing...")
original_pi0.eval()
torch.manual_seed(42) # Set seed for reproducibility
batch_size = batch["observation.state"].shape[0]
noise_shape = (batch_size, DUMMY_ACTION_HORIZON, DUMMY_ACTION_DIM)
fixed_noise = torch.randn(noise_shape, dtype=torch.float32, device=DEVICE)
with torch.no_grad():
openpi_actions = original_pi0.sample_actions(
device=DEVICE, observation=pi0_obs_openpi, noise=fixed_noise, num_steps=10
)
openpi_actions_unit = openpi_actions[:, 0, :]
print(f"OpenPI (own preprocessing) Actions shape: {openpi_actions.shape}")
print(f"OpenPI (own preprocessing) Actions unit shape: {openpi_actions_unit.shape}")
print(f"OpenPI (own preprocessing) Actions mean: {openpi_actions.mean().item():.6f}")
print(f"OpenPI (own preprocessing) Actions std: {openpi_actions.std().item():.6f}")
print("Testing LeRobot with own preprocessing...")
lerobot_pi0.eval()
torch.manual_seed(42) # Set the same seed
batch_lerobot_processed = lerobot_preprocessor(batch_lerobot)
with torch.no_grad():
lerobot_actions_own = lerobot_pi0.predict_action_chunk(
batch_lerobot_processed
) # batch_size, n_action_steps, action_dim
lerobot_actions_unit = lerobot_actions_own[:, 0, :]
print(f"LeRobot (own preprocessing) Actions shape: {lerobot_actions_own.shape}")
print(f"LeRobot (own preprocessing) Actions unit shape: {lerobot_actions_unit.shape}")
print(f"LeRobot (own preprocessing) Actions mean: {lerobot_actions_own.mean().item():.6f}")
print(f"LeRobot (own preprocessing) Actions std: {lerobot_actions_own.std().item():.6f}")
print("\nComparing end-to-end implementations:")
print(f"Actions close (atol=1e-4): {torch.allclose(lerobot_actions_own, openpi_actions, atol=1e-4)}")
print(f"Actions close (atol=1e-2): {torch.allclose(lerobot_actions_own, openpi_actions, atol=1e-2)}")
print(f"Max absolute difference: {torch.abs(lerobot_actions_own - openpi_actions).max().item():.6f}")
assert torch.allclose(lerobot_actions_own, openpi_actions, atol=1e-4)
assert torch.allclose(lerobot_actions_own, openpi_actions, atol=1e-2)
assert torch.abs(lerobot_actions_own - openpi_actions).max().item() < 1e-4
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/policies/pi0_pi05/test_pi0_original_vs_lerobot.py",
"license": "Apache License 2.0",
"lines": 356,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:src/lerobot/scripts/lerobot_info.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Use this script to get a quick summary of your system config.
It should be able to run without any of LeRobot's dependencies or LeRobot itself installed.
Example:
```shell
lerobot-info
```
"""
import importlib
import platform
import shutil
import subprocess
from importlib.metadata import PackageNotFoundError, distribution
PACKAGE_NAME = "lerobot"
def get_ffmpeg_version() -> str:
"""Get the ffmpeg version if installed, otherwise return 'N/A'."""
command_path = shutil.which("ffmpeg")
if command_path is None:
return "N/A"
try:
result = subprocess.run([command_path, "-version"], capture_output=True, text=True, check=True)
first_line = result.stdout.splitlines()[0]
version_info = first_line.split(" ")[2]
return version_info
except (subprocess.SubprocessError, IndexError):
return "Installed (version parsing failed)"
def get_package_version(package_name: str) -> str:
"""Get the version of a package if it exists, otherwise return 'N/A'."""
try:
module = importlib.import_module(package_name)
return getattr(module, "__version__", "Installed (version not found)")
except ImportError:
return "N/A"
def get_sys_info() -> dict[str, str]:
"""Run this to get basic system info to help for tracking issues & bugs."""
# General package versions
info = {
"LeRobot version": get_package_version(PACKAGE_NAME),
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface Hub version": get_package_version("huggingface_hub"),
"Datasets version": get_package_version("datasets"),
"Numpy version": get_package_version("numpy"),
"FFmpeg version": get_ffmpeg_version(),
}
# PyTorch and GPU specific information
torch_version = "N/A"
torch_cuda_available = "N/A"
cuda_version = "N/A"
gpu_model = "N/A"
try:
import torch
torch_version = str(torch.__version__)
torch_cuda_available = torch.cuda.is_available()
if torch_cuda_available:
cuda_version = str(torch.version.cuda)
# Gets the name of the first available GPU
gpu_model = torch.cuda.get_device_name(0)
except ImportError:
# If torch is not installed, the default "N/A" values will be used.
pass
info.update(
{
"PyTorch version": torch_version,
"Is PyTorch built with CUDA support?": str(torch_cuda_available),
"Cuda version": cuda_version,
"GPU model": gpu_model,
"Using GPU in script?": "<fill in>",
}
)
scripts = "N/A"
try:
dist = distribution(PACKAGE_NAME)
scripts = [ep.name for ep in dist.entry_points if ep.group == "console_scripts"]
except PackageNotFoundError:
pass
info.update({f"{PACKAGE_NAME} scripts": str(scripts)})
return info
def format_dict_for_markdown(d: dict[str, str]) -> str:
"""Formats a dictionary into a markdown-friendly bulleted list."""
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()])
def main():
"""
Main function to print system info in markdown format.
"""
system_info = get_sys_info()
print(format_dict_for_markdown(system_info))
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/scripts/lerobot_info.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/envs/libero.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
from collections import defaultdict
from collections.abc import Callable, Iterable, Mapping, Sequence
from functools import partial
from pathlib import Path
from typing import Any
import gymnasium as gym
import numpy as np
import torch
from gymnasium import spaces
from libero.libero import benchmark, get_libero_path
from libero.libero.envs import OffScreenRenderEnv
from lerobot.processor import RobotObservation
def _parse_camera_names(camera_name: str | Sequence[str]) -> list[str]:
"""Normalize camera_name into a non-empty list of strings."""
if isinstance(camera_name, str):
cams = [c.strip() for c in camera_name.split(",") if c.strip()]
elif isinstance(camera_name, (list | tuple)):
cams = [str(c).strip() for c in camera_name if str(c).strip()]
else:
raise TypeError(f"camera_name must be str or sequence[str], got {type(camera_name).__name__}")
if not cams:
raise ValueError("camera_name resolved to an empty list.")
return cams
def _get_suite(name: str) -> benchmark.Benchmark:
"""Instantiate a LIBERO suite by name with clear validation."""
bench = benchmark.get_benchmark_dict()
if name not in bench:
raise ValueError(f"Unknown LIBERO suite '{name}'. Available: {', '.join(sorted(bench.keys()))}")
suite = bench[name]()
if not getattr(suite, "tasks", None):
raise ValueError(f"Suite '{name}' has no tasks.")
return suite
def _select_task_ids(total_tasks: int, task_ids: Iterable[int] | None) -> list[int]:
"""Validate/normalize task ids. If None → all tasks."""
if task_ids is None:
return list(range(total_tasks))
ids = sorted({int(t) for t in task_ids})
for t in ids:
if t < 0 or t >= total_tasks:
raise ValueError(f"task_id {t} out of range [0, {total_tasks - 1}].")
return ids
def get_task_init_states(task_suite: Any, i: int) -> np.ndarray:
init_states_path = (
Path(get_libero_path("init_states"))
/ task_suite.tasks[i].problem_folder
/ task_suite.tasks[i].init_states_file
)
init_states = torch.load(init_states_path, weights_only=False) # nosec B614
return init_states
def get_libero_dummy_action():
"""Get dummy/no-op action, used to roll out the simulation while the robot does nothing."""
return [0, 0, 0, 0, 0, 0, -1]
ACTION_DIM = 7
ACTION_LOW = -1.0
ACTION_HIGH = 1.0
TASK_SUITE_MAX_STEPS: dict[str, int] = {
"libero_spatial": 280, # longest training demo has 193 steps
"libero_object": 280, # longest training demo has 254 steps
"libero_goal": 300, # longest training demo has 270 steps
"libero_10": 520, # longest training demo has 505 steps
"libero_90": 400, # longest training demo has 373 steps
}
class LiberoEnv(gym.Env):
metadata = {"render_modes": ["rgb_array"], "render_fps": 80}
def __init__(
self,
task_suite: Any,
task_id: int,
task_suite_name: str,
episode_length: int | None = None,
camera_name: str | Sequence[str] = "agentview_image,robot0_eye_in_hand_image",
obs_type: str = "pixels",
render_mode: str = "rgb_array",
observation_width: int = 256,
observation_height: int = 256,
visualization_width: int = 640,
visualization_height: int = 480,
init_states: bool = True,
episode_index: int = 0,
n_envs: int = 1,
camera_name_mapping: dict[str, str] | None = None,
num_steps_wait: int = 10,
control_mode: str = "relative",
):
super().__init__()
self.task_id = task_id
self.obs_type = obs_type
self.render_mode = render_mode
self.observation_width = observation_width
self.observation_height = observation_height
self.visualization_width = visualization_width
self.visualization_height = visualization_height
self.init_states = init_states
self.camera_name = _parse_camera_names(
camera_name
) # agentview_image (main) or robot0_eye_in_hand_image (wrist)
# Map raw camera names to "image1" and "image2".
# The preprocessing step `preprocess_observation` will then prefix these with `.images.*`,
# following the LeRobot convention (e.g., `observation.images.image`, `observation.images.image2`).
# This ensures the policy consistently receives observations in the
# expected format regardless of the original camera naming.
if camera_name_mapping is None:
camera_name_mapping = {
"agentview_image": "image",
"robot0_eye_in_hand_image": "image2",
}
self.camera_name_mapping = camera_name_mapping
self.num_steps_wait = num_steps_wait
self.episode_index = episode_index
self.episode_length = episode_length
# Load once and keep
self._init_states = get_task_init_states(task_suite, self.task_id) if self.init_states else None
self._reset_stride = n_envs # when performing a reset, append `_reset_stride` to `init_state_id`.
self.init_state_id = self.episode_index # tie each sub-env to a fixed init state
self._env = self._make_envs_task(task_suite, self.task_id)
default_steps = 500
self._max_episode_steps = (
TASK_SUITE_MAX_STEPS.get(task_suite_name, default_steps)
if self.episode_length is None
else self.episode_length
)
self.control_mode = control_mode
images = {}
for cam in self.camera_name:
images[self.camera_name_mapping[cam]] = spaces.Box(
low=0,
high=255,
shape=(self.observation_height, self.observation_width, 3),
dtype=np.uint8,
)
if self.obs_type == "state":
raise NotImplementedError(
"The 'state' observation type is not supported in LiberoEnv. "
"Please switch to an image-based obs_type (e.g. 'pixels', 'pixels_agent_pos')."
)
elif self.obs_type == "pixels":
self.observation_space = spaces.Dict(
{
"pixels": spaces.Dict(images),
}
)
elif self.obs_type == "pixels_agent_pos":
self.observation_space = spaces.Dict(
{
"pixels": spaces.Dict(images),
"robot_state": spaces.Dict(
{
"eef": spaces.Dict(
{
"pos": spaces.Box(low=-np.inf, high=np.inf, shape=(3,), dtype=np.float64),
"quat": spaces.Box(
low=-np.inf, high=np.inf, shape=(4,), dtype=np.float64
),
"mat": spaces.Box(
low=-np.inf, high=np.inf, shape=(3, 3), dtype=np.float64
),
}
),
"gripper": spaces.Dict(
{
"qpos": spaces.Box(
low=-np.inf, high=np.inf, shape=(2,), dtype=np.float64
),
"qvel": spaces.Box(
low=-np.inf, high=np.inf, shape=(2,), dtype=np.float64
),
}
),
"joints": spaces.Dict(
{
"pos": spaces.Box(low=-np.inf, high=np.inf, shape=(7,), dtype=np.float64),
"vel": spaces.Box(low=-np.inf, high=np.inf, shape=(7,), dtype=np.float64),
}
),
}
),
}
)
self.action_space = spaces.Box(
low=ACTION_LOW, high=ACTION_HIGH, shape=(ACTION_DIM,), dtype=np.float32
)
def render(self):
raw_obs = self._env.env._get_observations()
image = self._format_raw_obs(raw_obs)["pixels"]["image"]
image = image[::-1, ::-1] # flip both H and W for visualization
return image
def _make_envs_task(self, task_suite: Any, task_id: int = 0):
task = task_suite.get_task(task_id)
self.task = task.name
self.task_description = task.language
task_bddl_file = os.path.join(get_libero_path("bddl_files"), task.problem_folder, task.bddl_file)
env_args = {
"bddl_file_name": task_bddl_file,
"camera_heights": self.observation_height,
"camera_widths": self.observation_width,
}
env = OffScreenRenderEnv(**env_args)
env.reset()
return env
def _format_raw_obs(self, raw_obs: RobotObservation) -> RobotObservation:
images = {}
for camera_name in self.camera_name:
image = raw_obs[camera_name]
images[self.camera_name_mapping[camera_name]] = image
eef_pos = raw_obs.get("robot0_eef_pos")
eef_quat = raw_obs.get("robot0_eef_quat")
# rotation matrix from controller
eef_mat = self._env.robots[0].controller.ee_ori_mat if eef_pos is not None else None
gripper_qpos = raw_obs.get("robot0_gripper_qpos")
gripper_qvel = raw_obs.get("robot0_gripper_qvel")
joint_pos = raw_obs.get("robot0_joint_pos")
joint_vel = raw_obs.get("robot0_joint_vel")
obs = {
"pixels": images,
"robot_state": {
"eef": {
"pos": eef_pos, # (3,)
"quat": eef_quat, # (4,)
"mat": eef_mat, # (3, 3)
},
"gripper": {
"qpos": gripper_qpos, # (2,)
"qvel": gripper_qvel, # (2,)
},
"joints": {
"pos": joint_pos, # (7,)
"vel": joint_vel, # (7,)
},
},
}
if self.obs_type == "pixels":
return {"pixels": images.copy()}
if self.obs_type == "pixels_agent_pos":
# Validate required fields are present
if eef_pos is None or eef_quat is None or gripper_qpos is None:
raise ValueError(
f"Missing required robot state fields in raw observation. "
f"Got eef_pos={eef_pos is not None}, eef_quat={eef_quat is not None}, "
f"gripper_qpos={gripper_qpos is not None}"
)
return obs
raise NotImplementedError(
f"The observation type '{self.obs_type}' is not supported in LiberoEnv. "
"Please switch to an image-based obs_type (e.g. 'pixels', 'pixels_agent_pos')."
)
def reset(self, seed=None, **kwargs):
super().reset(seed=seed)
self._env.seed(seed)
raw_obs = self._env.reset()
if self.init_states and self._init_states is not None:
raw_obs = self._env.set_init_state(self._init_states[self.init_state_id % len(self._init_states)])
self.init_state_id += self._reset_stride # Change init_state_id when reset
# After reset, objects may be unstable (slightly floating, intersecting, etc.).
# Step the simulator with a no-op action for a few frames so everything settles.
# Increasing this value can improve determinism and reproducibility across resets.
for _ in range(self.num_steps_wait):
raw_obs, _, _, _ = self._env.step(get_libero_dummy_action())
if self.control_mode == "absolute":
for robot in self._env.robots:
robot.controller.use_delta = False
elif self.control_mode == "relative":
for robot in self._env.robots:
robot.controller.use_delta = True
else:
raise ValueError(f"Invalid control mode: {self.control_mode}")
observation = self._format_raw_obs(raw_obs)
info = {"is_success": False}
return observation, info
def step(self, action: np.ndarray) -> tuple[RobotObservation, float, bool, bool, dict[str, Any]]:
if action.ndim != 1:
raise ValueError(
f"Expected action to be 1-D (shape (action_dim,)), "
f"but got shape {action.shape} with ndim={action.ndim}"
)
raw_obs, reward, done, info = self._env.step(action)
is_success = self._env.check_success()
terminated = done or is_success
info.update(
{
"task": self.task,
"task_id": self.task_id,
"done": done,
"is_success": is_success,
}
)
observation = self._format_raw_obs(raw_obs)
if terminated:
info["final_info"] = {
"task": self.task,
"task_id": self.task_id,
"done": bool(done),
"is_success": bool(is_success),
}
self.reset()
truncated = False
return observation, reward, terminated, truncated, info
def close(self):
self._env.close()
def _make_env_fns(
*,
suite,
suite_name: str,
task_id: int,
n_envs: int,
camera_names: list[str],
episode_length: int | None,
init_states: bool,
gym_kwargs: Mapping[str, Any],
control_mode: str,
) -> list[Callable[[], LiberoEnv]]:
"""Build n_envs factory callables for a single (suite, task_id)."""
def _make_env(episode_index: int, **kwargs) -> LiberoEnv:
local_kwargs = dict(kwargs)
return LiberoEnv(
task_suite=suite,
task_id=task_id,
task_suite_name=suite_name,
camera_name=camera_names,
init_states=init_states,
episode_length=episode_length,
episode_index=episode_index,
n_envs=n_envs,
control_mode=control_mode,
**local_kwargs,
)
fns: list[Callable[[], LiberoEnv]] = []
for episode_index in range(n_envs):
fns.append(partial(_make_env, episode_index, **gym_kwargs))
return fns
# ---- Main API ----------------------------------------------------------------
def create_libero_envs(
task: str,
n_envs: int,
gym_kwargs: dict[str, Any] | None = None,
camera_name: str | Sequence[str] = "agentview_image,robot0_eye_in_hand_image",
init_states: bool = True,
env_cls: Callable[[Sequence[Callable[[], Any]]], Any] | None = None,
control_mode: str = "relative",
episode_length: int | None = None,
) -> dict[str, dict[int, Any]]:
"""
Create vectorized LIBERO environments with a consistent return shape.
Returns:
dict[suite_name][task_id] -> vec_env (env_cls([...]) with exactly n_envs factories)
Notes:
- n_envs is the number of rollouts *per task* (episode_index = 0..n_envs-1).
- `task` can be a single suite or a comma-separated list of suites.
- You may pass `task_ids` (list[int]) inside `gym_kwargs` to restrict tasks per suite.
"""
if env_cls is None or not callable(env_cls):
raise ValueError("env_cls must be a callable that wraps a list of environment factory callables.")
if not isinstance(n_envs, int) or n_envs <= 0:
raise ValueError(f"n_envs must be a positive int; got {n_envs}.")
gym_kwargs = dict(gym_kwargs or {})
task_ids_filter = gym_kwargs.pop("task_ids", None) # optional: limit to specific tasks
camera_names = _parse_camera_names(camera_name)
suite_names = [s.strip() for s in str(task).split(",") if s.strip()]
if not suite_names:
raise ValueError("`task` must contain at least one LIBERO suite name.")
print(
f"Creating LIBERO envs | suites={suite_names} | n_envs(per task)={n_envs} | init_states={init_states}"
)
if task_ids_filter is not None:
print(f"Restricting to task_ids={task_ids_filter}")
out: dict[str, dict[int, Any]] = defaultdict(dict)
for suite_name in suite_names:
suite = _get_suite(suite_name)
total = len(suite.tasks)
selected = _select_task_ids(total, task_ids_filter)
if not selected:
raise ValueError(f"No tasks selected for suite '{suite_name}' (available: {total}).")
for tid in selected:
fns = _make_env_fns(
suite=suite,
episode_length=episode_length,
suite_name=suite_name,
task_id=tid,
n_envs=n_envs,
camera_names=camera_names,
init_states=init_states,
gym_kwargs=gym_kwargs,
control_mode=control_mode,
)
out[suite_name][tid] = env_cls(fns)
print(f"Built vec env | suite={suite_name} | task_id={tid} | n_envs={n_envs}")
# return plain dicts for predictability
return {suite: dict(task_map) for suite, task_map in out.items()}
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/envs/libero.py",
"license": "Apache License 2.0",
"lines": 403,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/dataset/use_dataset_image_transforms.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example demonstrates how to use image transforms with LeRobot datasets for data augmentation during training.
Image transforms are applied to camera frames to improve model robustness and generalization. They are applied
at training time only, not during dataset recording, allowing you to experiment with different augmentations
without re-recording data.
"""
import torch
from torchvision.transforms import v2
from torchvision.transforms.functional import to_pil_image
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.transforms import ImageTransformConfig, ImageTransforms, ImageTransformsConfig
def save_image(tensor, filename):
"""Helper function to save a tensor as an image file."""
if tensor.dim() == 3: # [C, H, W]
if tensor.max() > 1.0:
tensor = tensor / 255.0
tensor = torch.clamp(tensor, 0.0, 1.0)
pil_image = to_pil_image(tensor)
pil_image.save(filename)
print(f"Saved: {filename}")
else:
print(f"Skipped {filename}: unexpected tensor shape {tensor.shape}")
def example_1_default_transforms():
"""Example 1: Use default transform configuration and save original vs transformed images"""
print("\n Example 1: Default Transform Configuration with Image Saving")
repo_id = "pepijn223/record_main_0" # Example dataset
try:
# Load dataset without transforms (original)
dataset_original = LeRobotDataset(repo_id=repo_id)
# Load dataset with transforms enabled
transforms_config = ImageTransformsConfig(
enable=True, # Enable transforms (disabled by default)
max_num_transforms=2, # Apply up to 2 transforms per frame
random_order=False, # Apply in standard order
)
dataset_with_transforms = LeRobotDataset(
repo_id=repo_id, image_transforms=ImageTransforms(transforms_config)
)
# Save original and transformed images for comparison
if len(dataset_original) > 0:
frame_idx = 0 # Use first frame
original_sample = dataset_original[frame_idx]
transformed_sample = dataset_with_transforms[frame_idx]
print(f"Saving comparison images (frame {frame_idx}):")
for cam_key in dataset_original.meta.camera_keys:
if cam_key in original_sample and cam_key in transformed_sample:
cam_name = cam_key.replace(".", "_").replace("/", "_")
# Save original and transformed images
save_image(original_sample[cam_key], f"{cam_name}_original.png")
save_image(transformed_sample[cam_key], f"{cam_name}_transformed.png")
except Exception as e:
print(f"Could not load dataset '{repo_id}': {e}")
def example_2_custom_transforms():
"""Example 2: Create custom transform configuration and save examples"""
print("\n Example 2: Custom Transform Configuration")
repo_id = "pepijn223/record_main_0" # Example dataset
try:
# Create custom transform configuration with strong effects
custom_transforms_config = ImageTransformsConfig(
enable=True,
max_num_transforms=2, # Apply up to 2 transforms per frame
random_order=True, # Apply transforms in random order
tfs={
"brightness": ImageTransformConfig(
weight=1.0,
type="ColorJitter",
kwargs={"brightness": (0.5, 1.5)}, # Strong brightness range
),
"contrast": ImageTransformConfig(
weight=1.0, # Higher weight = more likely to be selected
type="ColorJitter",
kwargs={"contrast": (0.6, 1.4)}, # Strong contrast
),
"sharpness": ImageTransformConfig(
weight=0.5, # Lower weight = less likely to be selected
type="SharpnessJitter",
kwargs={"sharpness": (0.2, 2.0)}, # Strong sharpness variation
),
},
)
dataset_with_custom_transforms = LeRobotDataset(
repo_id=repo_id, image_transforms=ImageTransforms(custom_transforms_config)
)
# Save examples with strong transforms
if len(dataset_with_custom_transforms) > 0:
sample = dataset_with_custom_transforms[0]
print("Saving custom transform examples:")
for cam_key in dataset_with_custom_transforms.meta.camera_keys:
if cam_key in sample:
cam_name = cam_key.replace(".", "_").replace("/", "_")
save_image(sample[cam_key], f"{cam_name}_custom_transforms.png")
except Exception as e:
print(f"Could not load dataset '{repo_id}': {e}")
def example_3_torchvision_transforms():
"""Example 3: Use pure torchvision transforms and save examples"""
print("\n Example 3: Pure Torchvision Transforms")
repo_id = "pepijn223/record_main_0" # Example dataset
try:
# Create torchvision transform pipeline
torchvision_transforms = v2.Compose(
[
v2.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1),
v2.GaussianBlur(kernel_size=3, sigma=(0.1, 2.0)),
v2.RandomRotation(degrees=10), # Small rotation
]
)
dataset_with_torchvision = LeRobotDataset(repo_id=repo_id, image_transforms=torchvision_transforms)
# Save examples with torchvision transforms
if len(dataset_with_torchvision) > 0:
sample = dataset_with_torchvision[0]
print("Saving torchvision transform examples:")
for cam_key in dataset_with_torchvision.meta.camera_keys:
if cam_key in sample:
cam_name = cam_key.replace(".", "_").replace("/", "_")
save_image(sample[cam_key], f"{cam_name}_torchvision.png")
except Exception as e:
print(f"Could not load dataset '{repo_id}': {e}")
def main():
"""Run all examples"""
print("LeRobot Dataset Image Transforms Examples")
example_1_default_transforms()
example_2_custom_transforms()
example_3_torchvision_transforms()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/dataset/use_dataset_image_transforms.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/phone_to_so100/evaluate.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.configs.types import FeatureType, PolicyFeature
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.utils import combine_feature_dicts
from lerobot.model.kinematics import RobotKinematics
from lerobot.policies.act.modeling_act import ACTPolicy
from lerobot.policies.factory import make_pre_post_processors
from lerobot.processor import (
RobotAction,
RobotObservation,
RobotProcessorPipeline,
make_default_teleop_action_processor,
)
from lerobot.processor.converters import (
observation_to_transition,
robot_action_observation_to_transition,
transition_to_observation,
transition_to_robot_action,
)
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.robot_kinematic_processor import (
ForwardKinematicsJointsToEE,
InverseKinematicsEEToJoints,
)
from lerobot.scripts.lerobot_record import record_loop
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun
NUM_EPISODES = 5
FPS = 30
EPISODE_TIME_SEC = 60
TASK_DESCRIPTION = "My task description"
HF_MODEL_ID = "<hf_username>/<model_repo_id>"
HF_DATASET_ID = "<hf_username>/<dataset_repo_id>"
def main():
# Create the robot configuration & robot
camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
robot_config = SO100FollowerConfig(
port="/dev/tty.usbmodem58760434471",
id="my_awesome_follower_arm",
cameras=camera_config,
use_degrees=True,
)
robot = SO100Follower(robot_config)
# Create policy
policy = ACTPolicy.from_pretrained(HF_MODEL_ID)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(robot.bus.motors.keys()),
)
# Build pipeline to convert EE action to joints action
robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
steps=[
InverseKinematicsEEToJoints(
kinematics=kinematics_solver,
motor_names=list(robot.bus.motors.keys()),
initial_guess_current_joints=True,
),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Build pipeline to convert joints observation to EE observation
robot_joints_to_ee_pose_processor = RobotProcessorPipeline[RobotObservation, RobotObservation](
steps=[
ForwardKinematicsJointsToEE(
kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())
)
],
to_transition=observation_to_transition,
to_output=transition_to_observation,
)
# Create the dataset
dataset = LeRobotDataset.create(
repo_id=HF_DATASET_ID,
fps=FPS,
features=combine_feature_dicts(
aggregate_pipeline_dataset_features(
pipeline=robot_joints_to_ee_pose_processor,
initial_features=create_initial_features(observation=robot.observation_features),
use_videos=True,
),
# User for now should be explicit on the feature keys that were used for record
# Alternatively, the user can pass the processor step that has the right features
aggregate_pipeline_dataset_features(
pipeline=make_default_teleop_action_processor(),
initial_features=create_initial_features(
action={
f"ee.{k}": PolicyFeature(type=FeatureType.ACTION, shape=(1,))
for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]
}
),
use_videos=True,
),
),
robot_type=robot.name,
use_videos=True,
image_writer_threads=4,
)
# Build Policy Processors
preprocessor, postprocessor = make_pre_post_processors(
policy_cfg=policy,
pretrained_path=HF_MODEL_ID,
dataset_stats=dataset.meta.stats,
# The inference device is automatically set to match the detected hardware, overriding any previous device settings from training to ensure compatibility.
preprocessor_overrides={"device_processor": {"device": str(policy.config.device)}},
)
# Connect the robot
robot.connect()
# Initialize the keyboard listener and rerun visualization
listener, events = init_keyboard_listener()
init_rerun(session_name="phone_so100_evaluate")
try:
if not robot.is_connected:
raise ValueError("Robot is not connected!")
print("Starting evaluate loop...")
episode_idx = 0
for episode_idx in range(NUM_EPISODES):
log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}")
# Main record loop
record_loop(
robot=robot,
events=events,
fps=FPS,
policy=policy,
preprocessor=preprocessor, # Pass the pre and post policy processors
postprocessor=postprocessor,
dataset=dataset,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=make_default_teleop_action_processor(),
robot_action_processor=robot_ee_to_joints_processor,
robot_observation_processor=robot_joints_to_ee_pose_processor,
)
# Reset the environment if not stopping or re-recording
if not events["stop_recording"] and (
(episode_idx < NUM_EPISODES - 1) or events["rerecord_episode"]
):
log_say("Reset the environment")
record_loop(
robot=robot,
events=events,
fps=FPS,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=make_default_teleop_action_processor(),
robot_action_processor=robot_ee_to_joints_processor,
robot_observation_processor=robot_joints_to_ee_pose_processor,
)
if events["rerecord_episode"]:
log_say("Re-record episode")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
# Save episode
dataset.save_episode()
episode_idx += 1
finally:
# Clean up
log_say("Stop recording")
robot.disconnect()
listener.stop()
dataset.finalize()
dataset.push_to_hub()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/phone_to_so100/evaluate.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/phone_to_so100/record.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.utils import combine_feature_dicts
from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline
from lerobot.processor.converters import (
observation_to_transition,
robot_action_observation_to_transition,
transition_to_observation,
transition_to_robot_action,
)
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.robot_kinematic_processor import (
EEBoundsAndSafety,
EEReferenceAndDelta,
ForwardKinematicsJointsToEE,
GripperVelocityToJoint,
InverseKinematicsEEToJoints,
)
from lerobot.scripts.lerobot_record import record_loop
from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS
from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction
from lerobot.teleoperators.phone.teleop_phone import Phone
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun
NUM_EPISODES = 2
FPS = 30
EPISODE_TIME_SEC = 60
RESET_TIME_SEC = 30
TASK_DESCRIPTION = "My task description"
HF_REPO_ID = "<hf_username>/<dataset_repo_id>"
def main():
# Create the robot and teleoperator configurations
camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
robot_config = SO100FollowerConfig(
port="/dev/tty.usbmodem5A460814411",
id="my_awesome_follower_arm",
cameras=camera_config,
use_degrees=True,
)
teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID
# Initialize the robot and teleoperator
robot = SO100Follower(robot_config)
phone = Phone(teleop_config)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(robot.bus.motors.keys()),
)
# Build pipeline to convert phone action to EE action
phone_to_robot_ee_pose_processor = RobotProcessorPipeline[
tuple[RobotAction, RobotObservation], RobotAction
](
steps=[
MapPhoneActionToRobotAction(platform=teleop_config.phone_os),
EEReferenceAndDelta(
kinematics=kinematics_solver,
end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5},
motor_names=list(robot.bus.motors.keys()),
use_latched_reference=True,
),
EEBoundsAndSafety(
end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]},
max_ee_step_m=0.20,
),
GripperVelocityToJoint(speed_factor=20.0),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Build pipeline to convert EE action to joints action
robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
steps=[
InverseKinematicsEEToJoints(
kinematics=kinematics_solver,
motor_names=list(robot.bus.motors.keys()),
initial_guess_current_joints=True,
),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Build pipeline to convert joint observation to EE observation
robot_joints_to_ee_pose = RobotProcessorPipeline[RobotObservation, RobotObservation](
steps=[
ForwardKinematicsJointsToEE(
kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())
)
],
to_transition=observation_to_transition,
to_output=transition_to_observation,
)
# Create the dataset
dataset = LeRobotDataset.create(
repo_id=HF_REPO_ID,
fps=FPS,
features=combine_feature_dicts(
# Run the feature contract of the pipelines
# This tells you how the features would look like after the pipeline steps
aggregate_pipeline_dataset_features(
pipeline=phone_to_robot_ee_pose_processor,
initial_features=create_initial_features(action=phone.action_features),
use_videos=True,
),
aggregate_pipeline_dataset_features(
pipeline=robot_joints_to_ee_pose,
initial_features=create_initial_features(observation=robot.observation_features),
use_videos=True,
),
),
robot_type=robot.name,
use_videos=True,
image_writer_threads=4,
)
# Connect the robot and teleoperator
robot.connect()
phone.connect()
# Initialize the keyboard listener and rerun visualization
listener, events = init_keyboard_listener()
init_rerun(session_name="phone_so100_record")
try:
if not robot.is_connected or not phone.is_connected:
raise ValueError("Robot or teleop is not connected!")
print("Starting record loop. Move your phone to teleoperate the robot...")
episode_idx = 0
while episode_idx < NUM_EPISODES and not events["stop_recording"]:
log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}")
# Main record loop
record_loop(
robot=robot,
events=events,
fps=FPS,
teleop=phone,
dataset=dataset,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=phone_to_robot_ee_pose_processor,
robot_action_processor=robot_ee_to_joints_processor,
robot_observation_processor=robot_joints_to_ee_pose,
)
# Reset the environment if not stopping or re-recording
if not events["stop_recording"] and (
episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]
):
log_say("Reset the environment")
record_loop(
robot=robot,
events=events,
fps=FPS,
teleop=phone,
control_time_s=RESET_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=phone_to_robot_ee_pose_processor,
robot_action_processor=robot_ee_to_joints_processor,
robot_observation_processor=robot_joints_to_ee_pose,
)
if events["rerecord_episode"]:
log_say("Re-recording episode")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
# Save episode
dataset.save_episode()
episode_idx += 1
finally:
# Clean up
log_say("Stop recording")
robot.disconnect()
phone.disconnect()
listener.stop()
dataset.finalize()
dataset.push_to_hub()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/phone_to_so100/record.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/phone_to_so100/replay.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline
from lerobot.processor.converters import (
robot_action_observation_to_transition,
transition_to_robot_action,
)
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.robot_kinematic_processor import (
InverseKinematicsEEToJoints,
)
from lerobot.utils.constants import ACTION
from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import log_say
EPISODE_IDX = 0
HF_REPO_ID = "<hf_username>/<dataset_repo_id>"
def main():
# Initialize the robot config
robot_config = SO100FollowerConfig(
port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", use_degrees=True
)
# Initialize the robot
robot = SO100Follower(robot_config)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(robot.bus.motors.keys()),
)
# Build pipeline to convert EE action to joints action
robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
steps=[
InverseKinematicsEEToJoints(
kinematics=kinematics_solver,
motor_names=list(robot.bus.motors.keys()),
initial_guess_current_joints=False, # Because replay is open loop
),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Fetch the dataset to replay
dataset = LeRobotDataset(HF_REPO_ID, episodes=[EPISODE_IDX])
# Filter dataset to only include frames from the specified episode since episodes are chunked in dataset V3.0
episode_frames = dataset.hf_dataset.filter(lambda x: x["episode_index"] == EPISODE_IDX)
actions = episode_frames.select_columns(ACTION)
# Connect to the robot
robot.connect()
try:
if not robot.is_connected:
raise ValueError("Robot is not connected!")
print("Starting replay loop...")
log_say(f"Replaying episode {EPISODE_IDX}")
for idx in range(len(episode_frames)):
t0 = time.perf_counter()
# Get recorded action from dataset
ee_action = {
name: float(actions[idx][ACTION][i])
for i, name in enumerate(dataset.features[ACTION]["names"])
}
# Get robot observation
robot_obs = robot.get_observation()
# Dataset EE -> robot joints
joint_action = robot_ee_to_joints_processor((ee_action, robot_obs))
# Send action to robot
_ = robot.send_action(joint_action)
precise_sleep(max(1.0 / dataset.fps - (time.perf_counter() - t0), 0.0))
finally:
# Clean up
robot.disconnect()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/phone_to_so100/replay.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/phone_to_so100/teleoperate.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specif
import time
from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline
from lerobot.processor.converters import (
robot_action_observation_to_transition,
transition_to_robot_action,
)
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.robot_kinematic_processor import (
EEBoundsAndSafety,
EEReferenceAndDelta,
GripperVelocityToJoint,
InverseKinematicsEEToJoints,
)
from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS
from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction
from lerobot.teleoperators.phone.teleop_phone import Phone
from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
FPS = 30
def main():
# Initialize the robot and teleoperator
robot_config = SO100FollowerConfig(
port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", use_degrees=True
)
teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID
# Initialize the robot and teleoperator
robot = SO100Follower(robot_config)
teleop_device = Phone(teleop_config)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(robot.bus.motors.keys()),
)
# Build pipeline to convert phone action to ee pose action to joint action
phone_to_robot_joints_processor = RobotProcessorPipeline[
tuple[RobotAction, RobotObservation], RobotAction
](
steps=[
MapPhoneActionToRobotAction(platform=teleop_config.phone_os),
EEReferenceAndDelta(
kinematics=kinematics_solver,
end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5},
motor_names=list(robot.bus.motors.keys()),
use_latched_reference=True,
),
EEBoundsAndSafety(
end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]},
max_ee_step_m=0.10,
),
GripperVelocityToJoint(
speed_factor=20.0,
),
InverseKinematicsEEToJoints(
kinematics=kinematics_solver,
motor_names=list(robot.bus.motors.keys()),
initial_guess_current_joints=True,
),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Connect to the robot and teleoperator
robot.connect()
teleop_device.connect()
# Init rerun viewer
init_rerun(session_name="phone_so100_teleop")
if not robot.is_connected or not teleop_device.is_connected:
raise ValueError("Robot or teleop is not connected!")
print("Starting teleop loop. Move your phone to teleoperate the robot...")
while True:
t0 = time.perf_counter()
# Get robot observation
robot_obs = robot.get_observation()
# Get teleop action
phone_obs = teleop_device.get_action()
# Phone -> EE pose -> Joints transition
joint_action = phone_to_robot_joints_processor((phone_obs, robot_obs))
# Send action to robot
_ = robot.send_action(joint_action)
# Visualize
log_rerun_data(observation=phone_obs, action=joint_action)
precise_sleep(max(1.0 / FPS - (time.perf_counter() - t0), 0.0))
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/phone_to_so100/teleoperate.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/so100_to_so100_EE/evaluate.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.configs.types import FeatureType, PolicyFeature
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.utils import combine_feature_dicts
from lerobot.model.kinematics import RobotKinematics
from lerobot.policies.act.modeling_act import ACTPolicy
from lerobot.policies.factory import make_pre_post_processors
from lerobot.processor import (
RobotAction,
RobotObservation,
RobotProcessorPipeline,
make_default_teleop_action_processor,
)
from lerobot.processor.converters import (
observation_to_transition,
robot_action_observation_to_transition,
transition_to_observation,
transition_to_robot_action,
)
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.robot_kinematic_processor import (
ForwardKinematicsJointsToEE,
InverseKinematicsEEToJoints,
)
from lerobot.scripts.lerobot_record import record_loop
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun
NUM_EPISODES = 5
FPS = 30
EPISODE_TIME_SEC = 60
TASK_DESCRIPTION = "My task description"
HF_MODEL_ID = "<hf_username>/<model_repo_id>"
HF_DATASET_ID = "<hf_username>/<dataset_repo_id>"
def main():
# Create the robot configuration & robot
camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
robot_config = SO100FollowerConfig(
port="/dev/tty.usbmodem5A460814411",
id="my_awesome_follower_arm",
cameras=camera_config,
use_degrees=True,
)
robot = SO100Follower(robot_config)
# Create policy
policy = ACTPolicy.from_pretrained(HF_MODEL_ID)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(robot.bus.motors.keys()),
)
# Build pipeline to convert EE action to joints action
robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
steps=[
InverseKinematicsEEToJoints(
kinematics=kinematics_solver,
motor_names=list(robot.bus.motors.keys()),
initial_guess_current_joints=True,
),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Build pipeline to convert joints observation to EE observation
robot_joints_to_ee_pose_processor = RobotProcessorPipeline[RobotObservation, RobotObservation](
steps=[
ForwardKinematicsJointsToEE(
kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())
)
],
to_transition=observation_to_transition,
to_output=transition_to_observation,
)
# Create the dataset
dataset = LeRobotDataset.create(
repo_id=HF_DATASET_ID,
fps=FPS,
features=combine_feature_dicts(
aggregate_pipeline_dataset_features(
pipeline=robot_joints_to_ee_pose_processor,
initial_features=create_initial_features(observation=robot.observation_features),
use_videos=True,
),
# User for now should be explicit on the feature keys that were used for record
# Alternatively, the user can pass the processor step that has the right features
aggregate_pipeline_dataset_features(
pipeline=make_default_teleop_action_processor(),
initial_features=create_initial_features(
action={
f"ee.{k}": PolicyFeature(type=FeatureType.ACTION, shape=(1,))
for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]
}
),
use_videos=True,
),
),
robot_type=robot.name,
use_videos=True,
image_writer_threads=4,
)
# Build Policy Processors
preprocessor, postprocessor = make_pre_post_processors(
policy_cfg=policy,
pretrained_path=HF_MODEL_ID,
dataset_stats=dataset.meta.stats,
# The inference device is automatically set to match the detected hardware, overriding any previous device settings from training to ensure compatibility.
preprocessor_overrides={"device_processor": {"device": str(policy.config.device)}},
)
# Connect the robot and teleoperator
robot.connect()
# Initialize the keyboard listener and rerun visualization
listener, events = init_keyboard_listener()
init_rerun(session_name="so100_so100_evaluate")
try:
if not robot.is_connected:
raise ValueError("Robot is not connected!")
print("Starting evaluate loop...")
episode_idx = 0
for episode_idx in range(NUM_EPISODES):
log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}")
# Main record loop
record_loop(
robot=robot,
events=events,
fps=FPS,
policy=policy,
preprocessor=preprocessor, # Pass the pre and post policy processors
postprocessor=postprocessor,
dataset=dataset,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=make_default_teleop_action_processor(),
robot_action_processor=robot_ee_to_joints_processor,
robot_observation_processor=robot_joints_to_ee_pose_processor,
)
# Reset the environment if not stopping or re-recording
if not events["stop_recording"] and (
(episode_idx < NUM_EPISODES - 1) or events["rerecord_episode"]
):
log_say("Reset the environment")
record_loop(
robot=robot,
events=events,
fps=FPS,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=make_default_teleop_action_processor(),
robot_action_processor=robot_ee_to_joints_processor,
robot_observation_processor=robot_joints_to_ee_pose_processor,
)
if events["rerecord_episode"]:
log_say("Re-record episode")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
# Save episode
dataset.save_episode()
episode_idx += 1
finally:
# Clean up
log_say("Stop recording")
robot.disconnect()
listener.stop()
dataset.finalize()
dataset.push_to_hub()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/so100_to_so100_EE/evaluate.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/so100_to_so100_EE/record.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
from lerobot.datasets.utils import combine_feature_dicts
from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline
from lerobot.processor.converters import (
observation_to_transition,
robot_action_observation_to_transition,
transition_to_observation,
transition_to_robot_action,
)
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.robot_kinematic_processor import (
EEBoundsAndSafety,
ForwardKinematicsJointsToEE,
InverseKinematicsEEToJoints,
)
from lerobot.scripts.lerobot_record import record_loop
from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig
from lerobot.utils.control_utils import init_keyboard_listener
from lerobot.utils.utils import log_say
from lerobot.utils.visualization_utils import init_rerun
NUM_EPISODES = 2
FPS = 30
EPISODE_TIME_SEC = 60
RESET_TIME_SEC = 30
TASK_DESCRIPTION = "My task description"
HF_REPO_ID = "<hf_username>/<dataset_repo_id>"
def main():
# Create the robot and teleoperator configurations
camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)}
follower_config = SO100FollowerConfig(
port="/dev/tty.usbmodem5A460814411",
id="my_awesome_follower_arm",
cameras=camera_config,
use_degrees=True,
)
leader_config = SO100LeaderConfig(port="/dev/tty.usbmodem5A460819811", id="my_awesome_leader_arm")
# Initialize the robot and teleoperator
follower = SO100Follower(follower_config)
leader = SO100Leader(leader_config)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
follower_kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(follower.bus.motors.keys()),
)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
leader_kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(leader.bus.motors.keys()),
)
# Build pipeline to convert follower joints to EE observation
follower_joints_to_ee = RobotProcessorPipeline[RobotObservation, RobotObservation](
steps=[
ForwardKinematicsJointsToEE(
kinematics=follower_kinematics_solver, motor_names=list(follower.bus.motors.keys())
),
],
to_transition=observation_to_transition,
to_output=transition_to_observation,
)
# Build pipeline to convert leader joints to EE action
leader_joints_to_ee = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
steps=[
ForwardKinematicsJointsToEE(
kinematics=leader_kinematics_solver, motor_names=list(leader.bus.motors.keys())
),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Build pipeline to convert EE action to follower joints
ee_to_follower_joints = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
[
EEBoundsAndSafety(
end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]},
max_ee_step_m=0.10,
),
InverseKinematicsEEToJoints(
kinematics=follower_kinematics_solver,
motor_names=list(follower.bus.motors.keys()),
initial_guess_current_joints=True,
),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Create the dataset
dataset = LeRobotDataset.create(
repo_id=HF_REPO_ID,
fps=FPS,
features=combine_feature_dicts(
# Run the feature contract of the pipelines
# This tells you how the features would look like after the pipeline steps
aggregate_pipeline_dataset_features(
pipeline=leader_joints_to_ee,
initial_features=create_initial_features(action=leader.action_features),
use_videos=True,
),
aggregate_pipeline_dataset_features(
pipeline=follower_joints_to_ee,
initial_features=create_initial_features(observation=follower.observation_features),
use_videos=True,
),
),
robot_type=follower.name,
use_videos=True,
image_writer_threads=4,
)
# Connect the robot and teleoperator
leader.connect()
follower.connect()
# Initialize the keyboard listener and rerun visualization
listener, events = init_keyboard_listener()
init_rerun(session_name="recording_phone")
try:
if not leader.is_connected or not follower.is_connected:
raise ValueError("Robot or teleop is not connected!")
print("Starting record loop...")
episode_idx = 0
while episode_idx < NUM_EPISODES and not events["stop_recording"]:
log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}")
# Main record loop
record_loop(
robot=follower,
events=events,
fps=FPS,
teleop=leader,
dataset=dataset,
control_time_s=EPISODE_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=leader_joints_to_ee,
robot_action_processor=ee_to_follower_joints,
robot_observation_processor=follower_joints_to_ee,
)
# Reset the environment if not stopping or re-recording
if not events["stop_recording"] and (
episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]
):
log_say("Reset the environment")
record_loop(
robot=follower,
events=events,
fps=FPS,
teleop=leader,
control_time_s=RESET_TIME_SEC,
single_task=TASK_DESCRIPTION,
display_data=True,
teleop_action_processor=leader_joints_to_ee,
robot_action_processor=ee_to_follower_joints,
robot_observation_processor=follower_joints_to_ee,
)
if events["rerecord_episode"]:
log_say("Re-recording episode")
events["rerecord_episode"] = False
events["exit_early"] = False
dataset.clear_episode_buffer()
continue
# Save episode
dataset.save_episode()
episode_idx += 1
finally:
# Clean up
log_say("Stop recording")
leader.disconnect()
follower.disconnect()
listener.stop()
dataset.finalize()
dataset.push_to_hub()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/so100_to_so100_EE/record.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/so100_to_so100_EE/replay.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline
from lerobot.processor.converters import (
robot_action_observation_to_transition,
transition_to_robot_action,
)
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.robot_kinematic_processor import (
InverseKinematicsEEToJoints,
)
from lerobot.utils.constants import ACTION
from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.utils import log_say
EPISODE_IDX = 0
HF_REPO_ID = "<hf_username>/<dataset_repo_id>"
def main():
# Initialize the robot config
robot_config = SO100FollowerConfig(
port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", use_degrees=True
)
# Initialize the robot
robot = SO100Follower(robot_config)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(robot.bus.motors.keys()),
)
# Build pipeline to convert EE action to joints action
robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
steps=[
InverseKinematicsEEToJoints(
kinematics=kinematics_solver,
motor_names=list(robot.bus.motors.keys()),
initial_guess_current_joints=False, # Because replay is open loop
),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Fetch the dataset to replay
dataset = LeRobotDataset(HF_REPO_ID, episodes=[EPISODE_IDX])
# Filter dataset to only include frames from the specified episode since episodes are chunked in dataset V3.0
episode_frames = dataset.hf_dataset.filter(lambda x: x["episode_index"] == EPISODE_IDX)
actions = episode_frames.select_columns(ACTION)
# Connect to the robot
robot.connect()
try:
if not robot.is_connected:
raise ValueError("Robot is not connected!")
print("Starting replay loop...")
log_say(f"Replaying episode {EPISODE_IDX}")
for idx in range(len(episode_frames)):
t0 = time.perf_counter()
# Get recorded action from dataset
ee_action = {
name: float(actions[idx][ACTION][i])
for i, name in enumerate(dataset.features[ACTION]["names"])
}
# Get robot observation
robot_obs = robot.get_observation()
# Dataset EE -> robot joints
joint_action = robot_ee_to_joints_processor((ee_action, robot_obs))
# Send action to robot
_ = robot.send_action(joint_action)
precise_sleep(max(1.0 / dataset.fps - (time.perf_counter() - t0), 0.0))
finally:
# Clean up
robot.disconnect()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/so100_to_so100_EE/replay.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/so100_to_so100_EE/teleoperate.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from lerobot.model.kinematics import RobotKinematics
from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline
from lerobot.processor.converters import (
robot_action_observation_to_transition,
robot_action_to_transition,
transition_to_robot_action,
)
from lerobot.robots.so_follower import SO100Follower, SO100FollowerConfig
from lerobot.robots.so_follower.robot_kinematic_processor import (
EEBoundsAndSafety,
ForwardKinematicsJointsToEE,
InverseKinematicsEEToJoints,
)
from lerobot.teleoperators.so_leader import SO100Leader, SO100LeaderConfig
from lerobot.utils.robot_utils import precise_sleep
from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
FPS = 30
def main():
# Initialize the robot and teleoperator config
follower_config = SO100FollowerConfig(
port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", use_degrees=True
)
leader_config = SO100LeaderConfig(port="/dev/tty.usbmodem5A460819811", id="my_awesome_leader_arm")
# Initialize the robot and teleoperator
follower = SO100Follower(follower_config)
leader = SO100Leader(leader_config)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
follower_kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(follower.bus.motors.keys()),
)
# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
leader_kinematics_solver = RobotKinematics(
urdf_path="./SO101/so101_new_calib.urdf",
target_frame_name="gripper_frame_link",
joint_names=list(leader.bus.motors.keys()),
)
# Build pipeline to convert teleop joints to EE action
leader_to_ee = RobotProcessorPipeline[RobotAction, RobotAction](
steps=[
ForwardKinematicsJointsToEE(
kinematics=leader_kinematics_solver, motor_names=list(leader.bus.motors.keys())
),
],
to_transition=robot_action_to_transition,
to_output=transition_to_robot_action,
)
# build pipeline to convert EE action to robot joints
ee_to_follower_joints = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
[
EEBoundsAndSafety(
end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]},
max_ee_step_m=0.10,
),
InverseKinematicsEEToJoints(
kinematics=follower_kinematics_solver,
motor_names=list(follower.bus.motors.keys()),
initial_guess_current_joints=False,
),
],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
# Connect to the robot and teleoperator
follower.connect()
leader.connect()
# Init rerun viewer
init_rerun(session_name="so100_so100_EE_teleop")
print("Starting teleop loop...")
while True:
t0 = time.perf_counter()
# Get robot observation
robot_obs = follower.get_observation()
# Get teleop observation
leader_joints_obs = leader.get_action()
# teleop joints -> teleop EE action
leader_ee_act = leader_to_ee(leader_joints_obs)
# teleop EE -> robot joints
follower_joints_act = ee_to_follower_joints((leader_ee_act, robot_obs))
# Send action to robot
_ = follower.send_action(follower_joints_act)
# Visualize
log_rerun_data(observation=leader_ee_act, action=follower_joints_act)
precise_sleep(max(1.0 / FPS - (time.perf_counter() - t0), 0.0))
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/so100_to_so100_EE/teleoperate.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/datasets/pipeline_features.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections.abc import Sequence
from typing import Any
from lerobot.configs.types import PipelineFeatureType
from lerobot.datasets.utils import hw_to_dataset_features
from lerobot.processor import DataProcessorPipeline, RobotAction, RobotObservation
from lerobot.utils.constants import ACTION, OBS_IMAGES, OBS_STATE, OBS_STR
def create_initial_features(
action: RobotAction | None = None, observation: RobotObservation | None = None
) -> dict[PipelineFeatureType, dict[str, Any]]:
"""
Creates the initial features dict for the dataset from action and observation specs.
Args:
action: A dictionary of action feature names to their types/shapes.
observation: A dictionary of observation feature names to their types/shapes.
Returns:
The initial features dictionary structured by PipelineFeatureType.
"""
features = {PipelineFeatureType.ACTION: {}, PipelineFeatureType.OBSERVATION: {}}
if action:
features[PipelineFeatureType.ACTION] = action
if observation:
features[PipelineFeatureType.OBSERVATION] = observation
return features
# Helper to filter state/action keys based on regex patterns.
def should_keep(key: str, patterns: tuple[str]) -> bool:
if patterns is None:
return True
return any(re.search(pat, key) for pat in patterns)
def strip_prefix(key: str, prefixes_to_strip: tuple[str]) -> str:
for prefix in prefixes_to_strip:
if key.startswith(prefix):
return key[len(prefix) :]
return key
# Define prefixes to strip from feature keys for clean names.
# Handles both fully qualified (e.g., "action.state") and short (e.g., "state") forms.
PREFIXES_TO_STRIP = tuple(
f"{token}." for const in (ACTION, OBS_STATE, OBS_IMAGES) for token in (const, const.split(".")[-1])
)
def aggregate_pipeline_dataset_features(
pipeline: DataProcessorPipeline,
initial_features: dict[PipelineFeatureType, dict[str, Any]],
*,
use_videos: bool = True,
patterns: Sequence[str] | None = None,
) -> dict[str, dict]:
"""
Aggregates and filters pipeline features to create a dataset-ready features dictionary.
This function transforms initial features using the pipeline, categorizes them as action or observations
(image or state), filters them based on `use_videos` and `patterns`, and finally
formats them for use with a Hugging Face LeRobot Dataset.
Args:
pipeline: The DataProcessorPipeline to apply.
initial_features: A dictionary of raw feature specs for actions and observations.
use_videos: If False, image features are excluded.
patterns: A sequence of regex patterns to filter action and state features.
Image features are not affected by this filter.
Returns:
A dictionary of features formatted for a Hugging Face LeRobot Dataset.
"""
all_features = pipeline.transform_features(initial_features)
# Intermediate storage for categorized and filtered features.
processed_features: dict[str, dict[str, Any]] = {
ACTION: {},
OBS_STR: {},
}
images_token = OBS_IMAGES.split(".")[-1]
# Iterate through all features transformed by the pipeline.
for ptype, feats in all_features.items():
if ptype not in [PipelineFeatureType.ACTION, PipelineFeatureType.OBSERVATION]:
continue
for key, value in feats.items():
# 1. Categorize the feature.
is_action = ptype == PipelineFeatureType.ACTION
# Observations are classified as images if their key matches image-related tokens or if the shape of the feature is 3.
# All other observations are treated as state.
is_image = not is_action and (
(isinstance(value, tuple) and len(value) == 3)
or (
key.startswith(f"{OBS_IMAGES}.")
or key.startswith(f"{images_token}.")
or f".{images_token}." in key
)
)
# 2. Apply filtering rules.
if is_image and not use_videos:
continue
if not is_image and not should_keep(key, patterns):
continue
# 3. Add the feature to the appropriate group with a clean name.
name = strip_prefix(key, PREFIXES_TO_STRIP)
if is_action:
processed_features[ACTION][name] = value
else:
processed_features[OBS_STR][name] = value
# Convert the processed features into the final dataset format.
dataset_features = {}
if processed_features[ACTION]:
dataset_features.update(hw_to_dataset_features(processed_features[ACTION], ACTION, use_videos))
if processed_features[OBS_STR]:
dataset_features.update(hw_to_dataset_features(processed_features[OBS_STR], OBS_STR, use_videos))
return dataset_features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/datasets/pipeline_features.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/act/processor_act.py | #!/usr/bin/env python
# Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from lerobot.policies.act.configuration_act import ACTConfig
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DeviceProcessorStep,
NormalizerProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
RenameObservationsProcessorStep,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME
def make_act_pre_post_processors(
config: ACTConfig,
dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""Creates the pre- and post-processing pipelines for the ACT policy.
The pre-processing pipeline handles normalization, batching, and device placement for the model inputs.
The post-processing pipeline handles unnormalization and moves the model outputs back to the CPU.
Args:
config (ACTConfig): The ACT policy configuration object.
dataset_stats (dict[str, dict[str, torch.Tensor]] | None): A dictionary containing dataset
statistics (e.g., mean and std) used for normalization. Defaults to None.
Returns:
tuple[PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], PolicyProcessorPipeline[PolicyAction, PolicyAction]]: A tuple containing the
pre-processor pipeline and the post-processor pipeline.
"""
input_steps = [
RenameObservationsProcessorStep(rename_map={}),
AddBatchDimensionProcessorStep(),
DeviceProcessorStep(device=config.device),
NormalizerProcessorStep(
features={**config.input_features, **config.output_features},
norm_map=config.normalization_mapping,
stats=dataset_stats,
device=config.device,
),
]
output_steps = [
UnnormalizerProcessorStep(
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
name=POLICY_PREPROCESSOR_DEFAULT_NAME,
),
PolicyProcessorPipeline[PolicyAction, PolicyAction](
steps=output_steps,
name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/act/processor_act.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/diffusion/processor_diffusion.py | #!/usr/bin/env python
# Copyright 2024 Columbia Artificial Intelligence, Robotics Lab,
# and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DeviceProcessorStep,
NormalizerProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
RenameObservationsProcessorStep,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME
def make_diffusion_pre_post_processors(
config: DiffusionConfig,
dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""
Constructs pre-processor and post-processor pipelines for a diffusion policy.
The pre-processing pipeline prepares the input data for the model by:
1. Renaming features.
2. Normalizing the input and output features based on dataset statistics.
3. Adding a batch dimension.
4. Moving the data to the specified device.
The post-processing pipeline handles the model's output by:
1. Moving the data to the CPU.
2. Unnormalizing the output features to their original scale.
Args:
config: The configuration object for the diffusion policy,
containing feature definitions, normalization mappings, and device information.
dataset_stats: A dictionary of statistics used for normalization.
Defaults to None.
Returns:
A tuple containing the configured pre-processor and post-processor pipelines.
"""
input_steps = [
RenameObservationsProcessorStep(rename_map={}),
AddBatchDimensionProcessorStep(),
DeviceProcessorStep(device=config.device),
NormalizerProcessorStep(
features={**config.input_features, **config.output_features},
norm_map=config.normalization_mapping,
stats=dataset_stats,
),
]
output_steps = [
UnnormalizerProcessorStep(
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
name=POLICY_PREPROCESSOR_DEFAULT_NAME,
),
PolicyProcessorPipeline[PolicyAction, PolicyAction](
steps=output_steps,
name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/diffusion/processor_diffusion.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/pi0/processor_pi0.py | #!/usr/bin/env python
# Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from lerobot.policies.pi0.configuration_pi0 import PI0Config
from lerobot.processor import (
AddBatchDimensionProcessorStep,
ComplementaryDataProcessorStep,
DeviceProcessorStep,
NormalizerProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
ProcessorStep,
ProcessorStepRegistry,
RenameObservationsProcessorStep,
TokenizerProcessorStep,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME
@ProcessorStepRegistry.register(name="pi0_new_line_processor")
class Pi0NewLineProcessor(ComplementaryDataProcessorStep):
"""
Ensures that the task description string ends with a newline character.
This processing step is required for compatibility with the PaliGemma tokenizer,
which expects a newline at the end of the text prompt. It handles both single
strings and lists of strings for the 'task' key in complementary data.
"""
def complementary_data(self, complementary_data):
"""
Adds a newline to the 'task' field if it doesn't already have one.
Args:
complementary_data: A dictionary that may contain a 'task' key with a
string or list of strings.
Returns:
A new dictionary with the modified 'task' field.
"""
if "task" not in complementary_data:
return complementary_data
task = complementary_data["task"]
if task is None:
return complementary_data
new_complementary_data = dict(complementary_data)
# Handle both string and list of strings
if isinstance(task, str):
# Single string: add newline if not present
if not task.endswith("\n"):
new_complementary_data["task"] = f"{task}\n"
elif isinstance(task, list) and all(isinstance(t, str) for t in task):
# List of strings: add newline to each if not present
new_complementary_data["task"] = [t if t.endswith("\n") else f"{t}\n" for t in task]
# If task is neither string nor list of strings, leave unchanged
return new_complementary_data
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
This step does not alter the feature definitions.
Args:
features: The input feature dictionary.
Returns:
The unchanged feature dictionary.
"""
return features
def make_pi0_pre_post_processors(
config: PI0Config,
dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""
Constructs pre-processor and post-processor pipelines for the PI0 policy.
The pre-processing pipeline prepares input data for the model by:
1. Renaming features to match pretrained configurations.
2. Normalizing input and output features based on dataset statistics.
3. Adding a batch dimension.
4. Appending a newline character to the task description for tokenizer compatibility.
5. Tokenizing the text prompt using the PaliGemma tokenizer.
6. Moving all data to the specified device.
The post-processing pipeline handles the model's output by:
1. Moving data to the CPU.
2. Unnormalizing the output features to their original scale.
Args:
config: The configuration object for the PI0 policy.
dataset_stats: A dictionary of statistics for normalization.
preprocessor_kwargs: Additional arguments for the pre-processor pipeline.
postprocessor_kwargs: Additional arguments for the post-processor pipeline.
Returns:
A tuple containing the configured pre-processor and post-processor pipelines.
"""
# Add remaining processors
input_steps: list[ProcessorStep] = [
RenameObservationsProcessorStep(rename_map={}), # To mimic the same processor as pretrained one
AddBatchDimensionProcessorStep(),
Pi0NewLineProcessor(), # Add newlines before tokenization for PaliGemma
TokenizerProcessorStep(
tokenizer_name="google/paligemma-3b-pt-224",
max_length=config.tokenizer_max_length,
padding_side="right",
padding="max_length",
),
DeviceProcessorStep(device=config.device),
NormalizerProcessorStep(
features={**config.input_features, **config.output_features},
norm_map=config.normalization_mapping,
stats=dataset_stats,
),
]
output_steps: list[ProcessorStep] = [
UnnormalizerProcessorStep(
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
name=POLICY_PREPROCESSOR_DEFAULT_NAME,
),
PolicyProcessorPipeline[PolicyAction, PolicyAction](
steps=output_steps,
name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/pi0/processor_pi0.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/sac/processor_sac.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from lerobot.policies.sac.configuration_sac import SACConfig
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DeviceProcessorStep,
NormalizerProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
RenameObservationsProcessorStep,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME
def make_sac_pre_post_processors(
config: SACConfig,
dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""
Constructs pre-processor and post-processor pipelines for the SAC policy.
The pre-processing pipeline prepares input data for the model by:
1. Renaming features to match pretrained configurations.
2. Normalizing input and output features based on dataset statistics.
3. Adding a batch dimension.
4. Moving all data to the specified device.
The post-processing pipeline handles the model's output by:
1. Moving data to the CPU.
2. Unnormalizing the output features to their original scale.
Args:
config: The configuration object for the SAC policy.
dataset_stats: A dictionary of statistics for normalization.
Returns:
A tuple containing the configured pre-processor and post-processor pipelines.
"""
# Add remaining processors
input_steps = [
RenameObservationsProcessorStep(rename_map={}),
AddBatchDimensionProcessorStep(),
DeviceProcessorStep(device=config.device),
NormalizerProcessorStep(
features={**config.input_features, **config.output_features},
norm_map=config.normalization_mapping,
stats=dataset_stats,
),
]
output_steps = [
UnnormalizerProcessorStep(
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
name=POLICY_PREPROCESSOR_DEFAULT_NAME,
),
PolicyProcessorPipeline[PolicyAction, PolicyAction](
steps=output_steps,
name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/sac/processor_sac.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/sac/reward_model/processor_classifier.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
from lerobot.processor import (
DeviceProcessorStep,
IdentityProcessorStep,
NormalizerProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
)
from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
def make_classifier_processor(
config: RewardClassifierConfig,
dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""
Constructs pre-processor and post-processor pipelines for the reward classifier.
The pre-processing pipeline prepares input data for the classifier by:
1. Normalizing both input and output features based on dataset statistics.
2. Moving the data to the specified device.
The post-processing pipeline handles the classifier's output by:
1. Moving the data to the CPU.
2. Applying an identity step, as no unnormalization is needed for the output logits.
Args:
config: The configuration object for the RewardClassifier.
dataset_stats: A dictionary of statistics for normalization.
preprocessor_kwargs: Additional arguments for the pre-processor pipeline.
postprocessor_kwargs: Additional arguments for the post-processor pipeline.
Returns:
A tuple containing the configured pre-processor and post-processor pipelines.
"""
input_steps = [
NormalizerProcessorStep(
features=config.input_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
NormalizerProcessorStep(
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
DeviceProcessorStep(device=config.device),
]
output_steps = [DeviceProcessorStep(device="cpu"), IdentityProcessorStep()]
return (
PolicyProcessorPipeline(
steps=input_steps,
name="classifier_preprocessor",
),
PolicyProcessorPipeline(
steps=output_steps,
name="classifier_postprocessor",
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/sac/reward_model/processor_classifier.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/smolvla/processor_smolvla.py | #!/usr/bin/env python
# Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig
from lerobot.processor import (
AddBatchDimensionProcessorStep,
ComplementaryDataProcessorStep,
DeviceProcessorStep,
NormalizerProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
ProcessorStepRegistry,
RenameObservationsProcessorStep,
TokenizerProcessorStep,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME
def make_smolvla_pre_post_processors(
config: SmolVLAConfig,
dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""
Constructs pre-processor and post-processor pipelines for the SmolVLA policy.
The pre-processing pipeline prepares input data for the model by:
1. Renaming features to match pretrained configurations.
2. Normalizing input and output features based on dataset statistics.
3. Adding a batch dimension.
4. Ensuring the language task description ends with a newline character.
5. Tokenizing the language task description.
6. Moving all data to the specified device.
The post-processing pipeline handles the model's output by:
1. Moving data to the CPU.
2. Unnormalizing the output actions to their original scale.
Args:
config: The configuration object for the SmolVLA policy.
dataset_stats: A dictionary of statistics for normalization.
Returns:
A tuple containing the configured pre-processor and post-processor pipelines.
"""
input_steps = [
RenameObservationsProcessorStep(rename_map={}), # To mimic the same processor as pretrained one
AddBatchDimensionProcessorStep(),
SmolVLANewLineProcessor(),
TokenizerProcessorStep(
tokenizer_name=config.vlm_model_name,
padding=config.pad_language_to,
padding_side="right",
max_length=config.tokenizer_max_length,
),
DeviceProcessorStep(device=config.device),
NormalizerProcessorStep(
features={**config.input_features, **config.output_features},
norm_map=config.normalization_mapping,
stats=dataset_stats,
),
]
output_steps = [
UnnormalizerProcessorStep(
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
name=POLICY_PREPROCESSOR_DEFAULT_NAME,
),
PolicyProcessorPipeline[PolicyAction, PolicyAction](
steps=output_steps,
name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
@ProcessorStepRegistry.register(name="smolvla_new_line_processor")
class SmolVLANewLineProcessor(ComplementaryDataProcessorStep):
"""
A processor step that ensures the 'task' description ends with a newline character.
This step is necessary for certain tokenizers (e.g., PaliGemma) that expect a
newline at the end of the prompt. It handles both single string tasks and lists
of string tasks.
"""
def complementary_data(self, complementary_data):
if "task" not in complementary_data:
return complementary_data
task = complementary_data["task"]
if task is None:
return complementary_data
new_complementary_data = dict(complementary_data)
# Handle both string and list of strings
if isinstance(task, str):
# Single string: add newline if not present
if not task.endswith("\n"):
new_complementary_data["task"] = f"{task}\n"
elif isinstance(task, list) and all(isinstance(t, str) for t in task):
# List of strings: add newline to each if not present
new_complementary_data["task"] = [t if t.endswith("\n") else f"{t}\n" for t in task]
# If task is neither string nor list of strings, leave unchanged
return new_complementary_data
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/smolvla/processor_smolvla.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/tdmpc/processor_tdmpc.py | #!/usr/bin/env python
# Copyright 2024 Nicklas Hansen, Xiaolong Wang, Hao Su,
# and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DeviceProcessorStep,
NormalizerProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
RenameObservationsProcessorStep,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME
def make_tdmpc_pre_post_processors(
config: TDMPCConfig,
dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""
Constructs pre-processor and post-processor pipelines for the TDMPC policy.
The pre-processing pipeline prepares input data for the model by:
1. Renaming features to match pretrained configurations.
2. Normalizing input and output features based on dataset statistics.
3. Adding a batch dimension.
4. Moving all data to the specified device.
The post-processing pipeline handles the model's output by:
1. Moving data to the CPU.
2. Unnormalizing the output features to their original scale.
Args:
config: The configuration object for the TDMPC policy.
dataset_stats: A dictionary of statistics for normalization.
Returns:
A tuple containing the configured pre-processor and post-processor pipelines.
"""
input_steps = [
RenameObservationsProcessorStep(rename_map={}),
AddBatchDimensionProcessorStep(),
DeviceProcessorStep(device=config.device),
NormalizerProcessorStep(
features={**config.input_features, **config.output_features},
norm_map=config.normalization_mapping,
stats=dataset_stats,
),
]
output_steps = [
UnnormalizerProcessorStep(
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
name=POLICY_PREPROCESSOR_DEFAULT_NAME,
),
PolicyProcessorPipeline[PolicyAction, PolicyAction](
steps=output_steps,
name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/tdmpc/processor_tdmpc.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/policies/vqbet/processor_vqbet.py | #!/usr/bin/env python
# Copyright 2024 Seungjae Lee and Yibin Wang and Haritheja Etukuru
# and H. Jin Kim and Nur Muhammad Mahi Shafiullah and Lerrel Pinto
# and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DeviceProcessorStep,
NormalizerProcessorStep,
PolicyAction,
PolicyProcessorPipeline,
RenameObservationsProcessorStep,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action
from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME
def make_vqbet_pre_post_processors(
config: VQBeTConfig,
dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None,
) -> tuple[
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
PolicyProcessorPipeline[PolicyAction, PolicyAction],
]:
"""
Constructs pre-processor and post-processor pipelines for the VQ-BeT policy.
The pre-processing pipeline prepares input data for the model by:
1. Renaming features, allowing customization to match pretrained configurations.
2. Normalizing input and output features based on dataset statistics.
3. Adding a batch dimension.
4. Moving all data to the specified device.
The post-processing pipeline handles the model's output by:
1. Moving data to the CPU.
2. Unnormalizing the output features to their original scale.
Args:
config: The configuration object for the VQ-BeT policy.
dataset_stats: A dictionary of statistics for normalization.
Returns:
A tuple containing the configured pre-processor and post-processor pipelines.
"""
input_steps = [
RenameObservationsProcessorStep(rename_map={}), # Let the possibility to the user to rename the keys
AddBatchDimensionProcessorStep(),
DeviceProcessorStep(device=config.device),
NormalizerProcessorStep(
features={**config.input_features, **config.output_features},
norm_map=config.normalization_mapping,
stats=dataset_stats,
),
]
output_steps = [
UnnormalizerProcessorStep(
features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats
),
DeviceProcessorStep(device="cpu"),
]
return (
PolicyProcessorPipeline[dict[str, Any], dict[str, Any]](
steps=input_steps,
name=POLICY_PREPROCESSOR_DEFAULT_NAME,
),
PolicyProcessorPipeline[PolicyAction, PolicyAction](
steps=output_steps,
name=POLICY_POSTPROCESSOR_DEFAULT_NAME,
to_transition=policy_action_to_transition,
to_output=transition_to_policy_action,
),
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/policies/vqbet/processor_vqbet.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/batch_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script defines processor steps for adding a batch dimension to various components of an environment transition.
These steps are designed to process actions, observations, and complementary data, making them suitable for batch processing by adding a leading dimension. This is a common requirement before feeding data into a neural network model.
"""
from dataclasses import dataclass, field
from torch import Tensor
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from lerobot.utils.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE
from .core import EnvTransition, PolicyAction
from .pipeline import (
ComplementaryDataProcessorStep,
ObservationProcessorStep,
PolicyActionProcessorStep,
ProcessorStep,
ProcessorStepRegistry,
TransitionKey,
)
@dataclass
@ProcessorStepRegistry.register(name="to_batch_processor_action")
class AddBatchDimensionActionStep(PolicyActionProcessorStep):
"""
Processor step to add a batch dimension to a 1D tensor action.
This is useful for creating a batch of size 1 from a single action sample.
"""
def action(self, action: PolicyAction) -> PolicyAction:
"""
Adds a batch dimension to the action if it's a 1D tensor.
Args:
action: The action tensor.
Returns:
The action tensor with an added batch dimension.
"""
if action.dim() != 1:
return action
return action.unsqueeze(0)
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
Returns the input features unchanged.
Adding a batch dimension does not alter the feature definition.
Args:
features: A dictionary of policy features.
Returns:
The original dictionary of policy features.
"""
return features
@dataclass
@ProcessorStepRegistry.register(name="to_batch_processor_observation")
class AddBatchDimensionObservationStep(ObservationProcessorStep):
"""
Processor step to add a batch dimension to observations.
It handles different types of observations:
- State vectors (1D tensors).
- Single images (3D tensors).
- Dictionaries of multiple images (3D tensors).
"""
def observation(self, observation: dict[str, Tensor]) -> dict[str, Tensor]:
"""
Adds a batch dimension to tensor-based observations in the observation dictionary.
Args:
observation: The observation dictionary.
Returns:
The observation dictionary with batch dimensions added to tensors.
"""
# Process state observations - add batch dim if 1D
for state_key in [OBS_STATE, OBS_ENV_STATE]:
if state_key in observation:
state_value = observation[state_key]
if isinstance(state_value, Tensor) and state_value.dim() == 1:
observation[state_key] = state_value.unsqueeze(0)
# Process single image observation - add batch dim if 3D
if OBS_IMAGE in observation:
image_value = observation[OBS_IMAGE]
if isinstance(image_value, Tensor) and image_value.dim() == 3:
observation[OBS_IMAGE] = image_value.unsqueeze(0)
# Process multiple image observations - add batch dim if 3D
for key, value in observation.items():
if key.startswith(f"{OBS_IMAGES}.") and isinstance(value, Tensor) and value.dim() == 3:
observation[key] = value.unsqueeze(0)
return observation
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
Returns the input features unchanged.
Adding a batch dimension does not alter the feature definition.
Args:
features: A dictionary of policy features.
Returns:
The original dictionary of policy features.
"""
return features
@dataclass
@ProcessorStepRegistry.register(name="to_batch_processor_complementary_data")
class AddBatchDimensionComplementaryDataStep(ComplementaryDataProcessorStep):
"""
Processor step to add a batch dimension to complementary data fields.
Handles specific keys like 'task', 'index', and 'task_index' to make them batched.
- 'task' (str) is wrapped in a list.
- 'index' and 'task_index' (0D tensors) get a batch dimension.
"""
def complementary_data(self, complementary_data: dict) -> dict:
"""
Adds a batch dimension to specific fields in the complementary data dictionary.
Args:
complementary_data: The complementary data dictionary.
Returns:
The complementary data dictionary with batch dimensions added.
"""
# Process task field - wrap string in list to add batch dimension
if "task" in complementary_data:
task_value = complementary_data["task"]
if isinstance(task_value, str):
complementary_data["task"] = [task_value]
# Process index field - add batch dim if 0D
if "index" in complementary_data:
index_value = complementary_data["index"]
if isinstance(index_value, Tensor) and index_value.dim() == 0:
complementary_data["index"] = index_value.unsqueeze(0)
# Process task_index field - add batch dim if 0D
if "task_index" in complementary_data:
task_index_value = complementary_data["task_index"]
if isinstance(task_index_value, Tensor) and task_index_value.dim() == 0:
complementary_data["task_index"] = task_index_value.unsqueeze(0)
return complementary_data
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
Returns the input features unchanged.
Adding a batch dimension does not alter the feature definition.
Args:
features: A dictionary of policy features.
Returns:
The original dictionary of policy features.
"""
return features
@dataclass
@ProcessorStepRegistry.register(name="to_batch_processor")
class AddBatchDimensionProcessorStep(ProcessorStep):
"""
A composite processor step that adds a batch dimension to the entire environment transition.
This step combines individual processors for actions, observations, and complementary data
to create a batched transition (batch size 1) from a single-instance transition.
Attributes:
to_batch_action_processor: Processor for the action component.
to_batch_observation_processor: Processor for the observation component.
to_batch_complementary_data_processor: Processor for the complementary data component.
"""
to_batch_action_processor: AddBatchDimensionActionStep = field(
default_factory=AddBatchDimensionActionStep
)
to_batch_observation_processor: AddBatchDimensionObservationStep = field(
default_factory=AddBatchDimensionObservationStep
)
to_batch_complementary_data_processor: AddBatchDimensionComplementaryDataStep = field(
default_factory=AddBatchDimensionComplementaryDataStep
)
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""
Applies the batching process to all relevant parts of an environment transition.
Args:
transition: The environment transition to process.
Returns:
The environment transition with a batch dimension added.
"""
if transition[TransitionKey.ACTION] is not None:
transition = self.to_batch_action_processor(transition)
if transition[TransitionKey.OBSERVATION] is not None:
transition = self.to_batch_observation_processor(transition)
if transition[TransitionKey.COMPLEMENTARY_DATA] is not None:
transition = self.to_batch_complementary_data_processor(transition)
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
Returns the input features unchanged.
Adding a batch dimension does not alter the feature definition.
Args:
features: A dictionary of policy features.
Returns:
The original dictionary of policy features.
"""
# NOTE: We ignore the batch dimension when transforming features
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/batch_processor.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/converters.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Sequence
from functools import singledispatch
from typing import Any
import numpy as np
import torch
from lerobot.utils.constants import ACTION, DONE, INFO, OBS_PREFIX, REWARD, TRUNCATED
from .core import EnvTransition, PolicyAction, RobotAction, RobotObservation, TransitionKey
@singledispatch
def to_tensor(
value: Any,
*,
dtype: torch.dtype | None = torch.float32,
device: torch.device | str | None = None,
) -> torch.Tensor:
"""
Convert various data types to PyTorch tensors with configurable options.
This is a unified tensor conversion function using single dispatch to handle
different input types appropriately.
Args:
value: Input value to convert (tensor, array, scalar, sequence, etc.).
dtype: Target tensor dtype. If None, preserves original dtype.
device: Target device for the tensor.
Returns:
A PyTorch tensor.
Raises:
TypeError: If the input type is not supported.
"""
raise TypeError(f"Unsupported type for tensor conversion: {type(value)}")
@to_tensor.register(torch.Tensor)
def _(value: torch.Tensor, *, dtype=torch.float32, device=None, **kwargs) -> torch.Tensor:
"""Handle conversion for existing PyTorch tensors."""
if dtype is not None:
value = value.to(dtype=dtype)
if device is not None:
value = value.to(device=device)
return value
@to_tensor.register(np.ndarray)
def _(
value: np.ndarray,
*,
dtype=torch.float32,
device=None,
**kwargs,
) -> torch.Tensor:
"""Handle conversion for numpy arrays."""
# Check for numpy scalars (0-dimensional arrays) and treat them as scalars.
if value.ndim == 0:
# Numpy scalars should be converted to 0-dimensional tensors.
scalar_value = value.item()
return torch.tensor(scalar_value, dtype=dtype, device=device)
# Create tensor from numpy array.
tensor = torch.from_numpy(value)
# Apply dtype and device conversion if specified.
if dtype is not None:
tensor = tensor.to(dtype=dtype)
if device is not None:
tensor = tensor.to(device=device)
return tensor
@to_tensor.register(int)
@to_tensor.register(float)
@to_tensor.register(np.integer)
@to_tensor.register(np.floating)
def _(value, *, dtype=torch.float32, device=None, **kwargs) -> torch.Tensor:
"""Handle conversion for scalar values including numpy scalars."""
return torch.tensor(value, dtype=dtype, device=device)
@to_tensor.register(list)
@to_tensor.register(tuple)
def _(value: Sequence, *, dtype=torch.float32, device=None, **kwargs) -> torch.Tensor:
"""Handle conversion for sequences (lists, tuples)."""
return torch.tensor(value, dtype=dtype, device=device)
@to_tensor.register(dict)
def _(value: dict, *, device=None, **kwargs) -> dict:
"""Handle conversion for dictionaries by recursively converting their values to tensors."""
if not value:
return {}
result = {}
for key, sub_value in value.items():
if sub_value is None:
continue
if isinstance(sub_value, dict):
# Recursively process nested dictionaries.
result[key] = to_tensor(
sub_value,
device=device,
**kwargs,
)
continue
# Convert individual values to tensors.
result[key] = to_tensor(
sub_value,
device=device,
**kwargs,
)
return result
def from_tensor_to_numpy(x: torch.Tensor | Any) -> np.ndarray | float | int | Any:
"""
Convert a PyTorch tensor to a numpy array or scalar if applicable.
If the input is not a tensor, it is returned unchanged.
Args:
x: The input, which can be a tensor or any other type.
Returns:
A numpy array, a scalar, or the original input.
"""
if isinstance(x, torch.Tensor):
return x.item() if x.numel() == 1 else x.detach().cpu().numpy()
return x
def _extract_complementary_data(batch: dict[str, Any]) -> dict[str, Any]:
"""
Extract complementary data from a batch dictionary.
This includes padding flags, task description, and indices.
Args:
batch: The batch dictionary.
Returns:
A dictionary with the extracted complementary data.
"""
pad_keys = {k: v for k, v in batch.items() if "_is_pad" in k}
task_key = {"task": batch["task"]} if "task" in batch else {}
subtask_key = {"subtask": batch["subtask"]} if "subtask" in batch else {}
index_key = {"index": batch["index"]} if "index" in batch else {}
task_index_key = {"task_index": batch["task_index"]} if "task_index" in batch else {}
episode_index_key = {"episode_index": batch["episode_index"]} if "episode_index" in batch else {}
return {**pad_keys, **task_key, **subtask_key, **index_key, **task_index_key, **episode_index_key}
def create_transition(
observation: RobotObservation | None = None,
action: PolicyAction | RobotAction | None = None,
reward: float = 0.0,
done: bool = False,
truncated: bool = False,
info: dict[str, Any] | None = None,
complementary_data: dict[str, Any] | None = None,
) -> EnvTransition:
"""
Create an `EnvTransition` dictionary with sensible defaults.
Args:
observation: Observation dictionary.
action: Action dictionary.
reward: Scalar reward value.
done: Episode termination flag.
truncated: Episode truncation flag.
info: Additional info dictionary.
complementary_data: Complementary data dictionary.
Returns:
A complete `EnvTransition` dictionary.
"""
return {
TransitionKey.OBSERVATION: observation,
TransitionKey.ACTION: action,
TransitionKey.REWARD: reward,
TransitionKey.DONE: done,
TransitionKey.TRUNCATED: truncated,
TransitionKey.INFO: info if info is not None else {},
TransitionKey.COMPLEMENTARY_DATA: complementary_data if complementary_data is not None else {},
}
def robot_action_observation_to_transition(
action_observation: tuple[RobotAction, RobotObservation],
) -> EnvTransition:
"""
Convert a raw robot action and observation dictionary into a standardized `EnvTransition`.
Args:
action: The raw action dictionary from a teleoperation device or controller.
observation: The raw observation dictionary from the environment.
Returns:
An `EnvTransition` containing the formatted observation.
"""
if not isinstance(action_observation, tuple):
raise ValueError("action_observation should be a tuple type with an action and observation")
action, observation = action_observation
if action is not None and not isinstance(action, dict):
raise ValueError(f"Action should be a RobotAction type got {type(action)}")
if observation is not None and not isinstance(observation, dict):
raise ValueError(f"Observation should be a RobotObservation type got {type(observation)}")
return create_transition(action=action, observation=observation)
def robot_action_to_transition(action: RobotAction) -> EnvTransition:
"""
Convert a raw robot action dictionary into a standardized `EnvTransition`.
Args:
action: The raw action dictionary from a teleoperation device or controller.
Returns:
An `EnvTransition` containing the formatted action.
"""
if not isinstance(action, dict):
raise ValueError(f"Action should be a RobotAction type got {type(action)}")
return create_transition(action=action)
def observation_to_transition(observation: RobotObservation) -> EnvTransition:
"""
Convert a raw robot observation dictionary into a standardized `EnvTransition`.
Args:
observation: The raw observation dictionary from the environment.
Returns:
An `EnvTransition` containing the formatted observation.
"""
if not isinstance(observation, dict):
raise ValueError(f"Observation should be a RobotObservation type got {type(observation)}")
return create_transition(observation=observation)
def transition_to_robot_action(transition: EnvTransition) -> RobotAction:
"""
Extract a raw robot action dictionary for a robot from an `EnvTransition`.
This function searches for keys in the format "action.*.pos" or "action.*.vel"
and converts them into a flat dictionary suitable for sending to a robot controller.
Args:
transition: The `EnvTransition` containing the action.
Returns:
A dictionary representing the raw robot action.
"""
if not isinstance(transition, dict):
raise ValueError(f"Transition should be a EnvTransition type (dict) got {type(transition)}")
action = transition.get(TransitionKey.ACTION)
if not isinstance(action, dict):
raise ValueError(f"Action should be a RobotAction type (dict) got {type(action)}")
return transition.get(TransitionKey.ACTION)
def transition_to_policy_action(transition: EnvTransition) -> PolicyAction:
"""
Convert an `EnvTransition` to a `PolicyAction`.
"""
if not isinstance(transition, dict):
raise ValueError(f"Transition should be a EnvTransition type (dict) got {type(transition)}")
action = transition.get(TransitionKey.ACTION)
if not isinstance(action, PolicyAction):
raise ValueError(f"Action should be a PolicyAction type got {type(action)}")
return action
def transition_to_observation(transition: EnvTransition) -> RobotObservation:
"""
Convert an `EnvTransition` to a `RobotObservation`.
"""
if not isinstance(transition, dict):
raise ValueError(f"Transition should be a EnvTransition type (dict) got {type(transition)}")
observation = transition.get(TransitionKey.OBSERVATION)
if not isinstance(observation, dict):
raise ValueError(f"Observation should be a RobotObservation (dict) type got {type(observation)}")
return observation
def policy_action_to_transition(action: PolicyAction) -> EnvTransition:
"""
Convert a `PolicyAction` to an `EnvTransition`.
"""
if not isinstance(action, PolicyAction):
raise ValueError(f"Action should be a PolicyAction type got {type(action)}")
return create_transition(action=action)
def batch_to_transition(batch: dict[str, Any]) -> EnvTransition:
"""
Convert a batch dictionary from a dataset/dataloader into an `EnvTransition`.
This function maps recognized keys from a batch to the `EnvTransition` structure,
filling in missing keys with sensible defaults.
Args:
batch: A batch dictionary.
Returns:
An `EnvTransition` dictionary.
Raises:
ValueError: If the input is not a dictionary.
"""
# Validate input type.
if not isinstance(batch, dict):
raise ValueError(f"EnvTransition must be a dictionary. Got {type(batch).__name__}")
action = batch.get(ACTION)
if action is not None and not isinstance(action, PolicyAction):
raise ValueError(f"Action should be a PolicyAction type got {type(action)}")
# Extract observation and complementary data keys.
observation_keys = {k: v for k, v in batch.items() if k.startswith(OBS_PREFIX)}
complementary_data = _extract_complementary_data(batch)
return create_transition(
observation=observation_keys if observation_keys else None,
action=batch.get(ACTION),
reward=batch.get(REWARD, 0.0),
done=batch.get(DONE, False),
truncated=batch.get(TRUNCATED, False),
info=batch.get("info", {}),
complementary_data=complementary_data if complementary_data else None,
)
def transition_to_batch(transition: EnvTransition) -> dict[str, Any]:
"""
Convert an `EnvTransition` back to the canonical batch format used in LeRobot.
This is the inverse of `batch_to_transition`.
Args:
transition: The `EnvTransition` to convert.
Returns:
A batch dictionary with canonical LeRobot field names.
"""
if not isinstance(transition, dict):
raise ValueError(f"Transition should be a EnvTransition type (dict) got {type(transition)}")
batch = {
ACTION: transition.get(TransitionKey.ACTION),
REWARD: transition.get(TransitionKey.REWARD, 0.0),
DONE: transition.get(TransitionKey.DONE, False),
TRUNCATED: transition.get(TransitionKey.TRUNCATED, False),
INFO: transition.get(TransitionKey.INFO, {}),
}
# Add complementary data.
comp_data = transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
if comp_data:
batch.update(comp_data)
# Flatten observation dictionary.
observation = transition.get(TransitionKey.OBSERVATION)
if isinstance(observation, dict):
batch.update(observation)
return batch
def identity_transition(transition: EnvTransition) -> EnvTransition:
"""
An identity function for transitions, returning the input unchanged.
Useful as a default or placeholder in processing pipelines.
Args:
tr: An `EnvTransition`.
Returns:
The same `EnvTransition`.
"""
return transition
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/converters.py",
"license": "Apache License 2.0",
"lines": 319,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/core.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from enum import Enum
from typing import Any, TypedDict
import numpy as np
import torch
class TransitionKey(str, Enum):
"""Keys for accessing EnvTransition dictionary components."""
# TODO(Steven): Use consts
OBSERVATION = "observation"
ACTION = "action"
REWARD = "reward"
DONE = "done"
TRUNCATED = "truncated"
INFO = "info"
COMPLEMENTARY_DATA = "complementary_data"
PolicyAction = torch.Tensor
RobotAction = dict[str, Any]
EnvAction = np.ndarray
RobotObservation = dict[str, Any]
EnvTransition = TypedDict(
"EnvTransition",
{
TransitionKey.OBSERVATION.value: RobotObservation | None,
TransitionKey.ACTION.value: PolicyAction | RobotAction | EnvAction | None,
TransitionKey.REWARD.value: float | torch.Tensor | None,
TransitionKey.DONE.value: bool | torch.Tensor | None,
TransitionKey.TRUNCATED.value: bool | torch.Tensor | None,
TransitionKey.INFO.value: dict[str, Any] | None,
TransitionKey.COMPLEMENTARY_DATA.value: dict[str, Any] | None,
},
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/core.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/delta_action_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature
from .core import PolicyAction, RobotAction
from .pipeline import ActionProcessorStep, ProcessorStepRegistry, RobotActionProcessorStep
@ProcessorStepRegistry.register("map_tensor_to_delta_action_dict")
@dataclass
class MapTensorToDeltaActionDictStep(ActionProcessorStep):
"""
Maps a flat action tensor from a policy to a structured delta action dictionary.
This step is typically used after a policy outputs a continuous action vector.
It decomposes the vector into named components for delta movements of the
end-effector (x, y, z) and optionally the gripper.
Attributes:
use_gripper: If True, assumes the 4th element of the tensor is the
gripper action.
"""
use_gripper: bool = True
def action(self, action: PolicyAction) -> RobotAction:
if not isinstance(action, PolicyAction):
raise ValueError("Only PolicyAction is supported for this processor")
if action.dim() > 1:
action = action.squeeze(0)
# TODO (maractingi): add rotation
delta_action = {
"delta_x": action[0].item(),
"delta_y": action[1].item(),
"delta_z": action[2].item(),
}
if self.use_gripper:
delta_action["gripper"] = action[3].item()
return delta_action
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
for axis in ["x", "y", "z"]:
features[PipelineFeatureType.ACTION][f"delta_{axis}"] = PolicyFeature(
type=FeatureType.ACTION, shape=(1,)
)
if self.use_gripper:
features[PipelineFeatureType.ACTION]["gripper"] = PolicyFeature(
type=FeatureType.ACTION, shape=(1,)
)
return features
@ProcessorStepRegistry.register("map_delta_action_to_robot_action")
@dataclass
class MapDeltaActionToRobotActionStep(RobotActionProcessorStep):
"""
Maps delta actions from teleoperators to robot target actions for inverse kinematics.
This step converts a dictionary of delta movements (e.g., from a gamepad)
into a target action format that includes an "enabled" flag and target
end-effector positions. It also handles scaling and noise filtering.
Attributes:
position_scale: A factor to scale the delta position inputs.
noise_threshold: The magnitude below which delta inputs are considered noise
and do not trigger an "enabled" state.
"""
# Scale factors for delta movements
position_scale: float = 1.0
noise_threshold: float = 1e-3 # 1 mm threshold to filter out noise
def action(self, action: RobotAction) -> RobotAction:
# NOTE (maractingi): Action can be a dict from the teleop_devices or a tensor from the policy
# TODO (maractingi): changing this target_xyz naming convention from the teleop_devices
delta_x = action.pop("delta_x")
delta_y = action.pop("delta_y")
delta_z = action.pop("delta_z")
gripper = action.pop("gripper")
# Determine if the teleoperator is actively providing input
# Consider enabled if any significant movement delta is detected
position_magnitude = (delta_x**2 + delta_y**2 + delta_z**2) ** 0.5 # Use Euclidean norm for position
enabled = position_magnitude > self.noise_threshold # Small threshold to avoid noise
# Scale the deltas appropriately
scaled_delta_x = delta_x * self.position_scale
scaled_delta_y = delta_y * self.position_scale
scaled_delta_z = delta_z * self.position_scale
# For gamepad/keyboard, we don't have rotation input, so set to 0
# These could be extended in the future for more sophisticated teleoperators
target_wx = 0.0
target_wy = 0.0
target_wz = 0.0
# Update action with robot target format
action = {
"enabled": enabled,
"target_x": scaled_delta_x,
"target_y": scaled_delta_y,
"target_z": scaled_delta_z,
"target_wx": target_wx,
"target_wy": target_wy,
"target_wz": target_wz,
"gripper_vel": float(gripper),
}
return action
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
for axis in ["x", "y", "z", "gripper"]:
features[PipelineFeatureType.ACTION].pop(f"delta_{axis}", None)
for feat in ["enabled", "target_x", "target_y", "target_z", "target_wx", "target_wy", "target_wz"]:
features[PipelineFeatureType.ACTION][f"{feat}"] = PolicyFeature(
type=FeatureType.ACTION, shape=(1,)
)
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/delta_action_processor.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/factory.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .converters import (
observation_to_transition,
robot_action_observation_to_transition,
transition_to_observation,
transition_to_robot_action,
)
from .core import RobotAction, RobotObservation
from .pipeline import IdentityProcessorStep, RobotProcessorPipeline
def make_default_teleop_action_processor() -> RobotProcessorPipeline[
tuple[RobotAction, RobotObservation], RobotAction
]:
teleop_action_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
steps=[IdentityProcessorStep()],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
return teleop_action_processor
def make_default_robot_action_processor() -> RobotProcessorPipeline[
tuple[RobotAction, RobotObservation], RobotAction
]:
robot_action_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction](
steps=[IdentityProcessorStep()],
to_transition=robot_action_observation_to_transition,
to_output=transition_to_robot_action,
)
return robot_action_processor
def make_default_robot_observation_processor() -> RobotProcessorPipeline[RobotObservation, RobotObservation]:
robot_observation_processor = RobotProcessorPipeline[RobotObservation, RobotObservation](
steps=[IdentityProcessorStep()],
to_transition=observation_to_transition,
to_output=transition_to_observation,
)
return robot_observation_processor
def make_default_processors():
teleop_action_processor = make_default_teleop_action_processor()
robot_action_processor = make_default_robot_action_processor()
robot_observation_processor = make_default_robot_observation_processor()
return (teleop_action_processor, robot_action_processor, robot_observation_processor)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/factory.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/gym_action_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from .converters import to_tensor
from .core import EnvAction, EnvTransition, PolicyAction
from .hil_processor import TELEOP_ACTION_KEY
from .pipeline import ActionProcessorStep, ProcessorStep, ProcessorStepRegistry
@ProcessorStepRegistry.register("torch2numpy_action_processor")
@dataclass
class Torch2NumpyActionProcessorStep(ActionProcessorStep):
"""
Converts a PyTorch tensor action to a NumPy array.
This step is useful when the output of a policy (typically a torch.Tensor)
needs to be passed to an environment or component that expects a NumPy array.
Attributes:
squeeze_batch_dim: If True, removes the first dimension of the array
if it is of size 1. This is useful for converting a
batched action of size (1, D) to a single action of size (D,).
"""
squeeze_batch_dim: bool = True
def action(self, action: PolicyAction) -> EnvAction:
if not isinstance(action, PolicyAction):
raise TypeError(
f"Expected PolicyAction or None, got {type(action).__name__}. "
"Use appropriate processor for non-tensor actions."
)
numpy_action = action.detach().cpu().numpy()
# Remove batch dimensions but preserve action dimensions.
# Only squeeze if there's a batch dimension (first dim == 1).
if (
self.squeeze_batch_dim
and numpy_action.shape
and len(numpy_action.shape) > 1
and numpy_action.shape[0] == 1
):
numpy_action = numpy_action.squeeze(0)
return numpy_action
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
@ProcessorStepRegistry.register("numpy2torch_action_processor")
@dataclass
class Numpy2TorchActionProcessorStep(ProcessorStep):
"""Converts a NumPy array action to a PyTorch tensor when action is present."""
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""Converts numpy action to torch tensor if action exists, otherwise passes through."""
from .core import TransitionKey
self._current_transition = transition.copy()
new_transition = self._current_transition
action = new_transition.get(TransitionKey.ACTION)
if action is not None:
if not isinstance(action, EnvAction):
raise TypeError(
f"Expected np.ndarray or None, got {type(action).__name__}. "
"Use appropriate processor for non-tensor actions."
)
torch_action = to_tensor(action, dtype=None) # Preserve original dtype
new_transition[TransitionKey.ACTION] = torch_action
complementary_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
if TELEOP_ACTION_KEY in complementary_data:
teleop_action = complementary_data[TELEOP_ACTION_KEY]
if isinstance(teleop_action, EnvAction):
complementary_data[TELEOP_ACTION_KEY] = to_tensor(teleop_action)
new_transition[TransitionKey.COMPLEMENTARY_DATA] = complementary_data
return new_transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/gym_action_processor.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/hil_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable
import numpy as np
import torch
import torchvision.transforms.functional as F # noqa: N812
from lerobot.configs.types import PipelineFeatureType, PolicyFeature
from lerobot.teleoperators.utils import TeleopEvents
if TYPE_CHECKING:
from lerobot.teleoperators.teleoperator import Teleoperator
from .core import EnvTransition, PolicyAction, TransitionKey
from .pipeline import (
ComplementaryDataProcessorStep,
InfoProcessorStep,
ObservationProcessorStep,
ProcessorStep,
ProcessorStepRegistry,
TruncatedProcessorStep,
)
GRIPPER_KEY = "gripper"
DISCRETE_PENALTY_KEY = "discrete_penalty"
TELEOP_ACTION_KEY = "teleop_action"
@runtime_checkable
class HasTeleopEvents(Protocol):
"""
Minimal protocol for objects that provide teleoperation events.
This protocol defines the `get_teleop_events()` method, allowing processor
steps to interact with teleoperators that support event-based controls
(like episode termination or success flagging) without needing to know the
teleoperator's specific class.
"""
def get_teleop_events(self) -> dict[str, Any]:
"""
Get extra control events from the teleoperator.
Returns:
A dictionary containing control events such as:
- `is_intervention`: bool - Whether the human is currently intervening.
- `terminate_episode`: bool - Whether to terminate the current episode.
- `success`: bool - Whether the episode was successful.
- `rerecord_episode`: bool - Whether to rerecord the episode.
"""
...
# Type variable constrained to Teleoperator subclasses that also implement events
TeleopWithEvents = TypeVar("TeleopWithEvents", bound="Teleoperator")
def _check_teleop_with_events(teleop: "Teleoperator") -> None:
"""
Runtime check that a teleoperator implements the `HasTeleopEvents` protocol.
Args:
teleop: The teleoperator instance to check.
Raises:
TypeError: If the teleoperator does not have a `get_teleop_events` method.
"""
if not isinstance(teleop, HasTeleopEvents):
raise TypeError(
f"Teleoperator {type(teleop).__name__} must implement get_teleop_events() method. "
f"Compatible teleoperators: GamepadTeleop, KeyboardEndEffectorTeleop"
)
@ProcessorStepRegistry.register("add_teleop_action_as_complementary_data")
@dataclass
class AddTeleopActionAsComplimentaryDataStep(ComplementaryDataProcessorStep):
"""
Adds the raw action from a teleoperator to the transition's complementary data.
This is useful for human-in-the-loop scenarios where the human's input needs to
be available to downstream processors, for example, to override a policy's action
during an intervention.
Attributes:
teleop_device: The teleoperator instance to get the action from.
"""
teleop_device: "Teleoperator"
def complementary_data(self, complementary_data: dict) -> dict:
"""
Retrieves the teleoperator's action and adds it to the complementary data.
Args:
complementary_data: The incoming complementary data dictionary.
Returns:
A new dictionary with the teleoperator action added under the
`teleop_action` key.
"""
new_complementary_data = dict(complementary_data)
new_complementary_data[TELEOP_ACTION_KEY] = self.teleop_device.get_action()
return new_complementary_data
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
@ProcessorStepRegistry.register("add_teleop_action_as_info")
@dataclass
class AddTeleopEventsAsInfoStep(InfoProcessorStep):
"""
Adds teleoperator control events (e.g., terminate, success) to the transition's info.
This step extracts control events from teleoperators that support event-based
interaction, making these signals available to other parts of the system.
Attributes:
teleop_device: An instance of a teleoperator that implements the
`HasTeleopEvents` protocol.
"""
teleop_device: TeleopWithEvents
def __post_init__(self):
"""Validates that the provided teleoperator supports events after initialization."""
_check_teleop_with_events(self.teleop_device)
def info(self, info: dict) -> dict:
"""
Retrieves teleoperator events and updates the info dictionary.
Args:
info: The incoming info dictionary.
Returns:
A new dictionary including the teleoperator events.
"""
new_info = dict(info)
teleop_events = self.teleop_device.get_teleop_events()
new_info.update(teleop_events)
return new_info
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
@ProcessorStepRegistry.register("image_crop_resize_processor")
@dataclass
class ImageCropResizeProcessorStep(ObservationProcessorStep):
"""
Crops and/or resizes image observations.
This step iterates through all image keys in an observation dictionary and applies
the specified transformations. It handles device placement, moving tensors to the
CPU if necessary for operations not supported on certain accelerators like MPS.
Attributes:
crop_params_dict: A dictionary mapping image keys to cropping parameters
(top, left, height, width).
resize_size: A tuple (height, width) to resize all images to.
"""
crop_params_dict: dict[str, tuple[int, int, int, int]] | None = None
resize_size: tuple[int, int] | None = None
def observation(self, observation: dict) -> dict:
"""
Applies cropping and resizing to all images in the observation dictionary.
Args:
observation: The observation dictionary, potentially containing image tensors.
Returns:
A new observation dictionary with transformed images.
"""
if self.resize_size is None and not self.crop_params_dict:
return observation
new_observation = dict(observation)
# Process all image keys in the observation
for key in observation:
if "image" not in key:
continue
image = observation[key]
device = image.device
# NOTE (maractingi): No mps kernel for crop and resize, so we need to move to cpu
if device.type == "mps":
image = image.cpu()
# Crop if crop params are provided for this key
if self.crop_params_dict is not None and key in self.crop_params_dict:
crop_params = self.crop_params_dict[key]
image = F.crop(image, *crop_params)
if self.resize_size is not None:
image = F.resize(image, self.resize_size)
image = image.clamp(0.0, 1.0)
new_observation[key] = image.to(device)
return new_observation
def get_config(self) -> dict[str, Any]:
"""
Returns the configuration of the step for serialization.
Returns:
A dictionary with the crop parameters and resize dimensions.
"""
return {
"crop_params_dict": self.crop_params_dict,
"resize_size": self.resize_size,
}
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
Updates the image feature shapes in the policy features dictionary if resizing is applied.
Args:
features: The policy features dictionary.
Returns:
The updated policy features dictionary with new image shapes.
"""
if self.resize_size is None:
return features
for key in features[PipelineFeatureType.OBSERVATION]:
if "image" in key:
nb_channel = features[PipelineFeatureType.OBSERVATION][key].shape[0]
features[PipelineFeatureType.OBSERVATION][key] = PolicyFeature(
type=features[PipelineFeatureType.OBSERVATION][key].type,
shape=(nb_channel, *self.resize_size),
)
return features
@dataclass
@ProcessorStepRegistry.register("time_limit_processor")
class TimeLimitProcessorStep(TruncatedProcessorStep):
"""
Tracks episode steps and enforces a time limit by truncating the episode.
Attributes:
max_episode_steps: The maximum number of steps allowed per episode.
current_step: The current step count for the active episode.
"""
max_episode_steps: int
current_step: int = 0
def truncated(self, truncated: bool) -> bool:
"""
Increments the step counter and sets the truncated flag if the time limit is reached.
Args:
truncated: The incoming truncated flag.
Returns:
True if the episode step limit is reached, otherwise the incoming value.
"""
self.current_step += 1
if self.current_step >= self.max_episode_steps:
truncated = True
# TODO (steven): missing an else truncated = False?
return truncated
def get_config(self) -> dict[str, Any]:
"""
Returns the configuration of the step for serialization.
Returns:
A dictionary containing the `max_episode_steps`.
"""
return {
"max_episode_steps": self.max_episode_steps,
}
def reset(self) -> None:
"""Resets the step counter, typically called at the start of a new episode."""
self.current_step = 0
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
@ProcessorStepRegistry.register("gym_hil_adapter_processor")
class GymHILAdapterProcessorStep(ProcessorStep):
"""
Adapts the output of the `gym-hil` environment to the format expected by `lerobot` processors.
This step normalizes the `transition` object by:
1. Copying `teleop_action` from `info` to `complementary_data`.
2. Copying `is_intervention` from `info` (using the string key) to `info` (using the enum key).
"""
def __call__(self, transition: EnvTransition) -> EnvTransition:
info = transition.get(TransitionKey.INFO, {})
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
if TELEOP_ACTION_KEY in info:
complementary_data[TELEOP_ACTION_KEY] = info[TELEOP_ACTION_KEY]
if "is_intervention" in info:
info[TeleopEvents.IS_INTERVENTION] = info["is_intervention"]
transition[TransitionKey.INFO] = info
transition[TransitionKey.COMPLEMENTARY_DATA] = complementary_data
return transition
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
@dataclass
@ProcessorStepRegistry.register("gripper_penalty_processor")
class GripperPenaltyProcessorStep(ProcessorStep):
"""
Applies a penalty for inefficient gripper usage.
This step penalizes actions that attempt to close an already closed gripper or
open an already open one, based on position thresholds.
Attributes:
penalty: The negative reward value to apply.
max_gripper_pos: The maximum position value for the gripper, used for normalization.
"""
penalty: float = -0.01
max_gripper_pos: float = 30.0
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""
Calculates the gripper penalty and adds it to the complementary data.
Args:
transition: The incoming environment transition.
Returns:
The modified transition with the penalty added to complementary data.
"""
new_transition = transition.copy()
action = new_transition.get(TransitionKey.ACTION)
complementary_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
raw_joint_positions = complementary_data.get("raw_joint_positions")
if raw_joint_positions is None:
return new_transition
current_gripper_pos = raw_joint_positions.get(GRIPPER_KEY, None)
if current_gripper_pos is None:
return new_transition
# Gripper action is a PolicyAction at this stage
gripper_action = action[-1].item()
gripper_action_normalized = gripper_action / self.max_gripper_pos
# Normalize gripper state and action
gripper_state_normalized = current_gripper_pos / self.max_gripper_pos
# Calculate penalty boolean as in original
gripper_penalty_bool = (gripper_state_normalized < 0.5 and gripper_action_normalized > 0.5) or (
gripper_state_normalized > 0.75 and gripper_action_normalized < 0.5
)
gripper_penalty = self.penalty * int(gripper_penalty_bool)
# Update complementary data with penalty info
new_complementary_data = dict(complementary_data)
new_complementary_data[DISCRETE_PENALTY_KEY] = gripper_penalty
new_transition[TransitionKey.COMPLEMENTARY_DATA] = new_complementary_data
return new_transition
def get_config(self) -> dict[str, Any]:
"""
Returns the configuration of the step for serialization.
Returns:
A dictionary containing the penalty value and max gripper position.
"""
return {
"penalty": self.penalty,
"max_gripper_pos": self.max_gripper_pos,
}
def reset(self) -> None:
"""Resets the processor's internal state."""
pass
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
@dataclass
@ProcessorStepRegistry.register("intervention_action_processor")
class InterventionActionProcessorStep(ProcessorStep):
"""
Handles human intervention, overriding policy actions and managing episode termination.
When an intervention is detected (via teleoperator events in the `info` dict),
this step replaces the policy's action with the human's teleoperated action.
It also processes signals to terminate the episode or flag success.
Attributes:
use_gripper: Whether to include the gripper in the teleoperated action.
terminate_on_success: If True, automatically sets the `done` flag when a
`success` event is received.
"""
use_gripper: bool = False
terminate_on_success: bool = True
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""
Processes the transition to handle interventions.
Args:
transition: The incoming environment transition.
Returns:
The modified transition, potentially with an overridden action, updated
reward, and termination status.
"""
action = transition.get(TransitionKey.ACTION)
if not isinstance(action, PolicyAction):
raise ValueError(f"Action should be a PolicyAction type got {type(action)}")
# Get intervention signals from complementary data
info = transition.get(TransitionKey.INFO, {})
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
teleop_action = complementary_data.get(TELEOP_ACTION_KEY, {})
is_intervention = info.get(TeleopEvents.IS_INTERVENTION, False)
terminate_episode = info.get(TeleopEvents.TERMINATE_EPISODE, False)
success = info.get(TeleopEvents.SUCCESS, False)
rerecord_episode = info.get(TeleopEvents.RERECORD_EPISODE, False)
new_transition = transition.copy()
# Override action if intervention is active
if is_intervention and teleop_action is not None:
if isinstance(teleop_action, dict):
# Convert teleop_action dict to tensor format
action_list = [
teleop_action.get("delta_x", 0.0),
teleop_action.get("delta_y", 0.0),
teleop_action.get("delta_z", 0.0),
]
if self.use_gripper:
action_list.append(teleop_action.get(GRIPPER_KEY, 1.0))
elif isinstance(teleop_action, np.ndarray):
action_list = teleop_action.tolist()
else:
action_list = teleop_action
teleop_action_tensor = torch.tensor(action_list, dtype=action.dtype, device=action.device)
new_transition[TransitionKey.ACTION] = teleop_action_tensor
# Handle episode termination
new_transition[TransitionKey.DONE] = bool(terminate_episode) or (
self.terminate_on_success and success
)
new_transition[TransitionKey.REWARD] = float(success)
# Update info with intervention metadata
info = new_transition.get(TransitionKey.INFO, {})
info[TeleopEvents.IS_INTERVENTION] = is_intervention
info[TeleopEvents.RERECORD_EPISODE] = rerecord_episode
info[TeleopEvents.SUCCESS] = success
new_transition[TransitionKey.INFO] = info
# Update complementary data with teleop action
complementary_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
complementary_data[TELEOP_ACTION_KEY] = new_transition.get(TransitionKey.ACTION)
new_transition[TransitionKey.COMPLEMENTARY_DATA] = complementary_data
return new_transition
def get_config(self) -> dict[str, Any]:
"""
Returns the configuration of the step for serialization.
Returns:
A dictionary containing the step's configuration attributes.
"""
return {
"use_gripper": self.use_gripper,
"terminate_on_success": self.terminate_on_success,
}
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
@dataclass
@ProcessorStepRegistry.register("reward_classifier_processor")
class RewardClassifierProcessorStep(ProcessorStep):
"""
Applies a pretrained reward classifier to image observations to predict success.
This step uses a model to determine if the current state is successful, updating
the reward and potentially terminating the episode.
Attributes:
pretrained_path: Path to the pretrained reward classifier model.
device: The device to run the classifier on.
success_threshold: The probability threshold to consider a prediction as successful.
success_reward: The reward value to assign on success.
terminate_on_success: If True, terminates the episode upon successful classification.
reward_classifier: The loaded classifier model instance.
"""
pretrained_path: str | None = None
device: str = "cpu"
success_threshold: float = 0.5
success_reward: float = 1.0
terminate_on_success: bool = True
reward_classifier: Any = None
def __post_init__(self):
"""Initializes the reward classifier model after the dataclass is created."""
if self.pretrained_path is not None:
from lerobot.policies.sac.reward_model.modeling_classifier import Classifier
self.reward_classifier = Classifier.from_pretrained(self.pretrained_path)
self.reward_classifier.to(self.device)
self.reward_classifier.eval()
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""
Processes a transition, applying the reward classifier to its image observations.
Args:
transition: The incoming environment transition.
Returns:
The modified transition with an updated reward and done flag based on the
classifier's prediction.
"""
new_transition = transition.copy()
observation = new_transition.get(TransitionKey.OBSERVATION)
if observation is None or self.reward_classifier is None:
return new_transition
# Extract images from observation
images = {key: value for key, value in observation.items() if "image" in key}
if not images:
return new_transition
# Run reward classifier
start_time = time.perf_counter()
with torch.inference_mode():
success = self.reward_classifier.predict_reward(images, threshold=self.success_threshold)
classifier_frequency = 1 / (time.perf_counter() - start_time)
# Calculate reward and termination
reward = new_transition.get(TransitionKey.REWARD, 0.0)
terminated = new_transition.get(TransitionKey.DONE, False)
if math.isclose(success, 1, abs_tol=1e-2):
reward = self.success_reward
if self.terminate_on_success:
terminated = True
# Update transition
new_transition[TransitionKey.REWARD] = reward
new_transition[TransitionKey.DONE] = terminated
# Update info with classifier frequency
info = new_transition.get(TransitionKey.INFO, {})
info["reward_classifier_frequency"] = classifier_frequency
new_transition[TransitionKey.INFO] = info
return new_transition
def get_config(self) -> dict[str, Any]:
"""
Returns the configuration of the step for serialization.
Returns:
A dictionary containing the step's configuration attributes.
"""
return {
"device": self.device,
"success_threshold": self.success_threshold,
"success_reward": self.success_reward,
"terminate_on_success": self.terminate_on_success,
}
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/hil_processor.py",
"license": "Apache License 2.0",
"lines": 494,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/migrate_policy_normalization.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A generic script to migrate LeRobot policies with built-in normalization layers to the new
pipeline-based processor system.
This script performs the following steps:
1. Loads a pretrained policy model and its configuration from a local path or the
Hugging Face Hub.
2. Scans the model's state dictionary to extract normalization statistics (e.g., mean,
std, min, max) for all features.
3. Creates two new processor pipelines:
- A preprocessor that normalizes inputs (observations) and outputs (actions).
- A postprocessor that unnormalizes outputs (actions) for inference.
4. Removes the original normalization layers from the model's state dictionary,
creating a "clean" model.
5. Saves the new clean model, the preprocessor, the postprocessor, and a generated
model card to a new directory.
6. Optionally pushes all the new artifacts to the Hugging Face Hub.
Usage:
python src/lerobot/processor/migrate_policy_normalization.py \
--pretrained-path lerobot/act_aloha_sim_transfer_cube_human \
--push-to-hub \
--branch main
Note: This script now uses the modern `make_pre_post_processors` and `make_policy_config`
factory functions from `lerobot.policies.factory` to create processors and configurations,
ensuring consistency with the current codebase.
The script extracts normalization statistics from the old model's state_dict, creates clean
processor pipelines using the factory functions, and saves a migrated model that is compatible
with the new PolicyProcessorPipeline architecture.
"""
import argparse
import json
import os
from pathlib import Path
from typing import Any
import torch
from huggingface_hub import HfApi, hf_hub_download
from safetensors.torch import load_file as load_safetensors
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.policies.factory import get_policy_class, make_policy_config, make_pre_post_processors
from lerobot.utils.constants import ACTION
def extract_normalization_stats(state_dict: dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]:
"""
Scans a model's state_dict to find and extract normalization statistics.
This function identifies keys corresponding to normalization layers (e.g., those
for mean, std, min, max) based on a set of predefined patterns and organizes
them into a nested dictionary.
Args:
state_dict: The state dictionary of a pretrained policy model.
Returns:
A nested dictionary where outer keys are feature names (e.g.,
'observation.state') and inner keys are statistic types ('mean', 'std'),
mapping to their corresponding tensor values.
"""
stats = {}
# Define patterns to match and their prefixes to remove
normalization_patterns = [
"normalize_inputs.buffer_",
"unnormalize_outputs.buffer_",
"normalize_targets.buffer_",
"normalize.", # Must come after normalize_* patterns
"unnormalize.", # Must come after unnormalize_* patterns
"input_normalizer.",
"output_normalizer.",
"normalalize_inputs.",
"unnormalize_outputs.",
"normalize_targets.",
"unnormalize_targets.",
]
# Process each key in state_dict
for key, tensor in state_dict.items():
# Try each pattern
for pattern in normalization_patterns:
if key.startswith(pattern):
# Extract the remaining part after the pattern
remaining = key[len(pattern) :]
parts = remaining.split(".")
# Need at least feature name and stat type
if len(parts) >= 2:
# Last part is the stat type (mean, std, min, max, etc.)
stat_type = parts[-1]
# Everything else is the feature name
feature_name = ".".join(parts[:-1]).replace("_", ".")
# Add to stats
if feature_name not in stats:
stats[feature_name] = {}
stats[feature_name][stat_type] = tensor.clone()
# Only process the first matching pattern
break
return stats
def detect_features_and_norm_modes(
config: dict[str, Any], stats: dict[str, dict[str, torch.Tensor]]
) -> tuple[dict[str, PolicyFeature], dict[FeatureType, NormalizationMode]]:
"""
Infers policy features and normalization modes from the model config and stats.
This function first attempts to find feature definitions and normalization
mappings directly from the policy's configuration file. If this information is
not present, it infers it from the extracted normalization statistics, using
tensor shapes to determine feature shapes and the presence of specific stat
keys (e.g., 'mean'/'std' vs 'min'/'max') to determine the normalization mode.
It applies sensible defaults if inference is not possible.
Args:
config: The policy's configuration dictionary from `config.json`.
stats: The normalization statistics extracted from the model's state_dict.
Returns:
A tuple containing:
- A dictionary mapping feature names to `PolicyFeature` objects.
- A dictionary mapping `FeatureType` enums to `NormalizationMode` enums.
"""
features = {}
norm_modes = {}
# First, check if there's a normalization_mapping in the config
if "normalization_mapping" in config:
print(f"Found normalization_mapping in config: {config['normalization_mapping']}")
# Extract normalization modes from config
for feature_type_str, mode_str in config["normalization_mapping"].items():
# Convert string to FeatureType enum
try:
if feature_type_str == "VISUAL":
feature_type = FeatureType.VISUAL
elif feature_type_str == "STATE":
feature_type = FeatureType.STATE
elif feature_type_str == "ACTION":
feature_type = FeatureType.ACTION
else:
print(f"Warning: Unknown feature type '{feature_type_str}', skipping")
continue
except (AttributeError, ValueError):
print(f"Warning: Could not parse feature type '{feature_type_str}', skipping")
continue
# Convert string to NormalizationMode enum
try:
if mode_str == "MEAN_STD":
mode = NormalizationMode.MEAN_STD
elif mode_str == "MIN_MAX":
mode = NormalizationMode.MIN_MAX
elif mode_str == "IDENTITY":
mode = NormalizationMode.IDENTITY
else:
print(
f"Warning: Unknown normalization mode '{mode_str}' for feature type '{feature_type_str}'"
)
continue
except (AttributeError, ValueError):
print(f"Warning: Could not parse normalization mode '{mode_str}', skipping")
continue
norm_modes[feature_type] = mode
# Try to extract from config
if "features" in config:
for key, feature_config in config["features"].items():
shape = feature_config.get("shape", feature_config.get("dim"))
shape = (shape,) if isinstance(shape, int) else tuple(shape)
# Determine feature type
if "image" in key or "visual" in key:
feature_type = FeatureType.VISUAL
elif "state" in key:
feature_type = FeatureType.STATE
elif ACTION in key:
feature_type = FeatureType.ACTION
else:
feature_type = FeatureType.STATE # Default
features[key] = PolicyFeature(feature_type, shape)
# If no features in config, infer from stats
if not features:
for key, stat_dict in stats.items():
# Get shape from any stat tensor
tensor = next(iter(stat_dict.values()))
shape = tuple(tensor.shape)
# Determine feature type based on key
if "image" in key or "visual" in key or "pixels" in key:
feature_type = FeatureType.VISUAL
elif "state" in key or "joint" in key or "position" in key:
feature_type = FeatureType.STATE
elif ACTION in key:
feature_type = FeatureType.ACTION
else:
feature_type = FeatureType.STATE
features[key] = PolicyFeature(feature_type, shape)
# If normalization modes weren't in config, determine based on available stats
if not norm_modes:
for key, stat_dict in stats.items():
if key in features:
if "mean" in stat_dict and "std" in stat_dict:
feature_type = features[key].type
if feature_type not in norm_modes:
norm_modes[feature_type] = NormalizationMode.MEAN_STD
elif "min" in stat_dict and "max" in stat_dict:
feature_type = features[key].type
if feature_type not in norm_modes:
norm_modes[feature_type] = NormalizationMode.MIN_MAX
# Default normalization modes if not detected
if FeatureType.VISUAL not in norm_modes:
norm_modes[FeatureType.VISUAL] = NormalizationMode.MEAN_STD
if FeatureType.STATE not in norm_modes:
norm_modes[FeatureType.STATE] = NormalizationMode.MIN_MAX
if FeatureType.ACTION not in norm_modes:
norm_modes[FeatureType.ACTION] = NormalizationMode.MEAN_STD
return features, norm_modes
def remove_normalization_layers(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""
Creates a new state_dict with all normalization-related layers removed.
This function filters the original state dictionary, excluding any keys that
match a set of predefined patterns associated with normalization modules.
Args:
state_dict: The original model state dictionary.
Returns:
A new state dictionary containing only the core model weights, without
any normalization parameters.
"""
new_state_dict = {}
# Patterns to remove
remove_patterns = [
"normalize_inputs.",
"unnormalize_outputs.",
"normalize_targets.", # Added pattern for target normalization
"normalize.",
"unnormalize.",
"input_normalizer.",
"output_normalizer.",
"normalizer.",
]
for key, tensor in state_dict.items():
should_remove = any(pattern in key for pattern in remove_patterns)
if not should_remove:
new_state_dict[key] = tensor
return new_state_dict
def clean_state_dict(
state_dict: dict[str, torch.Tensor], remove_str: str = "._orig_mod"
) -> dict[str, torch.Tensor]:
"""
Remove a substring (e.g. '._orig_mod') from all keys in a state dict.
Args:
state_dict (dict): The original state dict.
remove_str (str): The substring to remove from the keys.
Returns:
dict: A new state dict with cleaned keys.
"""
new_state_dict = {}
for k, v in state_dict.items():
new_k = k.replace(remove_str, "")
new_state_dict[new_k] = v
return new_state_dict
def load_state_dict_with_missing_key_handling(
policy: torch.nn.Module,
state_dict: dict[str, torch.Tensor],
policy_type: str,
known_missing_keys_whitelist: dict[str, list[str]],
) -> list[str]:
"""
Load state dict into policy with graceful handling of missing keys.
This function loads the state dict with strict=False, filters out whitelisted
missing keys, and provides detailed reporting about any issues found.
Args:
policy: The policy model to load the state dict into.
state_dict: The cleaned state dictionary to load.
policy_type: The type of policy (used for whitelist lookup).
known_missing_keys_whitelist: Dictionary mapping policy types to lists of
known acceptable missing keys.
Returns:
List of problematic missing keys that weren't in the whitelist.
"""
# Load the cleaned state dict with strict=False to capture missing/unexpected keys
load_result = policy.load_state_dict(state_dict, strict=False)
# Check for missing keys
missing_keys = load_result.missing_keys
unexpected_keys = load_result.unexpected_keys
# Filter out whitelisted missing keys
policy_type_lower = policy_type.lower()
whitelisted_keys = known_missing_keys_whitelist.get(policy_type_lower, [])
problematic_missing_keys = [key for key in missing_keys if key not in whitelisted_keys]
if missing_keys:
if problematic_missing_keys:
print(f"WARNING: Found {len(problematic_missing_keys)} unexpected missing keys:")
for key in problematic_missing_keys:
print(f" - {key}")
if len(missing_keys) > len(problematic_missing_keys):
whitelisted_missing = [key for key in missing_keys if key in whitelisted_keys]
print(f"INFO: Found {len(whitelisted_missing)} expected missing keys (whitelisted):")
for key in whitelisted_missing:
print(f" - {key}")
if unexpected_keys:
print(f"WARNING: Found {len(unexpected_keys)} unexpected keys:")
for key in unexpected_keys:
print(f" - {key}")
if not missing_keys and not unexpected_keys:
print("Successfully loaded cleaned state dict into policy model (all keys matched)")
else:
print("State dict loaded with some missing/unexpected keys (see details above)")
return problematic_missing_keys
def convert_features_to_policy_features(features_dict: dict[str, dict]) -> dict[str, PolicyFeature]:
"""
Converts a feature dictionary from the old config format to the new `PolicyFeature` format.
Args:
features_dict: The feature dictionary in the old format, where values are
simple dictionaries (e.g., `{"shape": [7]}`).
Returns:
A dictionary mapping feature names to `PolicyFeature` dataclass objects.
"""
converted_features = {}
for key, feature_dict in features_dict.items():
# Determine feature type based on key
if "image" in key or "visual" in key:
feature_type = FeatureType.VISUAL
elif "state" in key:
feature_type = FeatureType.STATE
elif ACTION in key:
feature_type = FeatureType.ACTION
else:
feature_type = FeatureType.STATE
# Get shape from feature dict
shape = feature_dict.get("shape", feature_dict.get("dim"))
shape = (shape,) if isinstance(shape, int) else tuple(shape) if shape is not None else ()
converted_features[key] = PolicyFeature(feature_type, shape)
return converted_features
def display_migration_summary_with_warnings(problematic_missing_keys: list[str]) -> None:
"""
Display final migration summary with warnings about problematic missing keys.
Args:
problematic_missing_keys: List of missing keys that weren't in the whitelist.
"""
if not problematic_missing_keys:
return
print("\n" + "=" * 60)
print("IMPORTANT: MIGRATION COMPLETED WITH WARNINGS")
print("=" * 60)
print(
f"The migration was successful, but {len(problematic_missing_keys)} unexpected missing keys were found:"
)
print()
for key in problematic_missing_keys:
print(f" - {key}")
print()
print("These missing keys may indicate:")
print(" • The model architecture has changed")
print(" • Some components were not properly saved in the original model")
print(" • The migration script needs to be updated for this policy type")
print()
print("What to do next:")
print(" 1. Test your migrated model carefully to ensure it works as expected")
print(" 2. If you encounter issues, please open an issue at:")
print(" https://github.com/huggingface/lerobot/issues")
print(" 3. Include this migration log and the missing keys listed above")
print()
print("If the model works correctly despite these warnings, the missing keys")
print("might be expected for your policy type and can be added to the whitelist.")
print("=" * 60)
def load_model_from_hub(
repo_id: str, revision: str | None = None
) -> tuple[dict[str, torch.Tensor], dict[str, Any], dict[str, Any] | None]:
"""
Downloads and loads a model's state_dict and configs from the Hugging Face Hub.
Args:
repo_id: The repository ID on the Hub (e.g., 'lerobot/aloha').
revision: The specific git revision (branch, tag, or commit hash) to use.
Returns:
A tuple containing the model's state dictionary, the policy configuration,
and the training configuration (None if train_config.json is not found).
"""
# Download files.
safetensors_path = hf_hub_download(repo_id=repo_id, filename="model.safetensors", revision=revision)
config_path = hf_hub_download(repo_id=repo_id, filename="config.json", revision=revision)
# Load state_dict
state_dict = load_safetensors(safetensors_path)
# Load config
with open(config_path) as f:
config = json.load(f)
# Try to load train_config (optional)
train_config = None
try:
train_config_path = hf_hub_download(repo_id=repo_id, filename="train_config.json", revision=revision)
with open(train_config_path) as f:
train_config = json.load(f)
except FileNotFoundError:
print("train_config.json not found - continuing without training configuration")
return state_dict, config, train_config
def main():
parser = argparse.ArgumentParser(
description="Migrate policy models with normalization layers to new pipeline system"
)
parser.add_argument(
"--pretrained-path",
type=str,
required=True,
help="Path to pretrained model (hub repo or local directory)",
)
parser.add_argument(
"--output-dir",
type=str,
default=None,
help="Output directory for migrated model (default: same as pretrained-path)",
)
parser.add_argument("--push-to-hub", action="store_true", help="Push migrated model to hub")
parser.add_argument(
"--hub-repo-id",
type=str,
default=None,
help="Hub repository ID for pushing (default: same as pretrained-path)",
)
parser.add_argument("--revision", type=str, default=None, help="Revision of the model to load")
parser.add_argument("--private", action="store_true", help="Make the hub repository private")
parser.add_argument(
"--branch",
type=str,
default=None,
help="Git branch to use when pushing to hub. If specified, a PR will be created automatically (default: push directly to main)",
)
args = parser.parse_args()
# Load model and config
print(f"Loading model from {args.pretrained_path}...")
if os.path.isdir(args.pretrained_path):
# Local directory
state_dict = load_safetensors(os.path.join(args.pretrained_path, "model.safetensors"))
with open(os.path.join(args.pretrained_path, "config.json")) as f:
config = json.load(f)
# Try to load train_config (optional)
train_config = None
train_config_path = os.path.join(args.pretrained_path, "train_config.json")
if os.path.exists(train_config_path):
with open(train_config_path) as f:
train_config = json.load(f)
else:
print("train_config.json not found - continuing without training configuration")
else:
# Hub repository
state_dict, config, train_config = load_model_from_hub(args.pretrained_path, args.revision)
# Extract normalization statistics
print("Extracting normalization statistics...")
stats = extract_normalization_stats(state_dict)
print(f"Found normalization statistics for: {list(stats.keys())}")
# Detect input features and normalization modes
print("Detecting features and normalization modes...")
features, norm_map = detect_features_and_norm_modes(config, stats)
print(f"Detected features: {list(features.keys())}")
print(f"Normalization modes: {norm_map}")
# Remove normalization layers from state_dict
print("Removing normalization layers from model...")
new_state_dict = remove_normalization_layers(state_dict)
new_state_dict = clean_state_dict(new_state_dict, remove_str="._orig_mod")
removed_keys = set(state_dict.keys()) - set(new_state_dict.keys())
if removed_keys:
print(f"Removed {len(removed_keys)} normalization layer keys")
# Determine output path
if args.output_dir:
output_dir = Path(args.output_dir)
else:
if os.path.isdir(args.pretrained_path):
output_dir = Path(args.pretrained_path).parent / f"{Path(args.pretrained_path).name}_migrated"
else:
output_dir = Path(f"./{args.pretrained_path.replace('/', '_')}_migrated")
output_dir.mkdir(parents=True, exist_ok=True)
# Extract policy type from config
if "type" not in config:
raise ValueError("Policy type not found in config.json. The config must contain a 'type' field.")
policy_type = config["type"]
print(f"Detected policy type: {policy_type}")
# Clean up config - remove fields that shouldn't be passed to config constructor
cleaned_config = dict(config)
# Remove fields that are not part of the config class constructors
fields_to_remove = ["normalization_mapping", "type"]
for field in fields_to_remove:
if field in cleaned_config:
print(f"Removing '{field}' field from config")
del cleaned_config[field]
# Convert input_features and output_features to PolicyFeature objects if they exist
if "input_features" in cleaned_config:
cleaned_config["input_features"] = convert_features_to_policy_features(
cleaned_config["input_features"]
)
if "output_features" in cleaned_config:
cleaned_config["output_features"] = convert_features_to_policy_features(
cleaned_config["output_features"]
)
# Add normalization mapping to config
cleaned_config["normalization_mapping"] = norm_map
# Create policy configuration using the factory
print(f"Creating {policy_type} policy configuration...")
policy_config = make_policy_config(policy_type, **cleaned_config)
# Create policy instance using the factory
print(f"Instantiating {policy_type} policy...")
policy_class = get_policy_class(policy_type)
policy = policy_class(policy_config)
# Define whitelist of known missing keys that are acceptable (for example weight tie) for certain policy types
known_missing_keys_whitelist = {
"pi0": ["model.paligemma_with_expert.paligemma.model.language_model.embed_tokens.weight"],
# Add other policy types and their known missing keys here as needed
}
# Load state dict with graceful missing key handling
problematic_missing_keys = load_state_dict_with_missing_key_handling(
policy=policy,
state_dict=new_state_dict,
policy_type=policy_type,
known_missing_keys_whitelist=known_missing_keys_whitelist,
)
policy.to(torch.float32)
# Create preprocessor and postprocessor using the factory
print("Creating preprocessor and postprocessor using make_pre_post_processors...")
preprocessor, postprocessor = make_pre_post_processors(policy_cfg=policy_config, dataset_stats=stats)
# Determine hub repo ID if pushing to hub
hub_repo_id = None
if args.push_to_hub:
if args.hub_repo_id:
hub_repo_id = args.hub_repo_id
else:
if not os.path.isdir(args.pretrained_path):
# Use same repo with "_migrated" suffix
hub_repo_id = f"{args.pretrained_path}_migrated"
else:
raise ValueError("--hub-repo-id must be specified when pushing local model to hub")
# Save all components to local directory first
print(f"Saving preprocessor to {output_dir}...")
preprocessor.save_pretrained(output_dir)
print(f"Saving postprocessor to {output_dir}...")
postprocessor.save_pretrained(output_dir)
print(f"Saving model to {output_dir}...")
policy.save_pretrained(output_dir)
# Generate and save model card
print("Generating model card...")
# Get metadata from original config
dataset_repo_id = "unknown"
if train_config is not None:
dataset_repo_id = train_config.get("repo_id", "unknown")
license = config.get("license", "apache-2.0")
tags = config.get("tags", ["robotics", "lerobot", policy_type]) or ["robotics", "lerobot", policy_type]
tags = set(tags).union({"robotics", "lerobot", policy_type})
tags = list(tags)
# Generate model card
card = policy.generate_model_card(
dataset_repo_id=dataset_repo_id, model_type=policy_type, license=license, tags=tags
)
# Save model card locally
card.save(str(output_dir / "README.md"))
print(f"Model card saved to {output_dir / 'README.md'}")
# Push all files to hub in a single operation if requested
if args.push_to_hub and hub_repo_id:
api = HfApi()
# Determine if we should create a PR (automatically if branch is specified)
create_pr = args.branch is not None
target_location = f"branch '{args.branch}'" if args.branch else "main branch"
print(f"Pushing all migrated files to {hub_repo_id} on {target_location}...")
# Upload all files in a single commit with automatic PR creation if branch specified
commit_message = "Migrate policy to PolicyProcessorPipeline system"
commit_description = None
if create_pr:
# Separate commit description for PR body
commit_description = """**Automated Policy Migration to PolicyProcessorPipeline**
This PR migrates your model to the new LeRobot policy format using the modern PolicyProcessorPipeline architecture.
## What Changed
### **New Architecture - PolicyProcessorPipeline**
Your model now uses external PolicyProcessorPipeline components for data processing instead of built-in normalization layers. This provides:
- **Modularity**: Separate preprocessing and postprocessing pipelines
- **Flexibility**: Easy to swap, configure, and debug processing steps
- **Compatibility**: Works with the latest LeRobot ecosystem
### **Normalization Extraction**
We've extracted normalization statistics from your model's state_dict and removed the built-in normalization layers:
- **Extracted patterns**: `normalize_inputs.*`, `unnormalize_outputs.*`, `normalize.*`, `unnormalize.*`, `input_normalizer.*`, `output_normalizer.*`
- **Statistics preserved**: Mean, std, min, max values for all features
- **Clean model**: State dict now contains only core model weights
### **Files Added**
- **preprocessor_config.json**: Configuration for input preprocessing pipeline
- **postprocessor_config.json**: Configuration for output postprocessing pipeline
- **model.safetensors**: Clean model weights without normalization layers
- **config.json**: Updated model configuration
- **train_config.json**: Training configuration
- **README.md**: Updated model card with migration information
### **Benefits**
- **Backward Compatible**: Your model behavior remains identical
- **Future Ready**: Compatible with latest LeRobot features and updates
- **Debuggable**: Easy to inspect and modify processing steps
- **Portable**: Processors can be shared and reused across models
### **Usage**
```python
# Load your migrated model
from lerobot.policies import get_policy_class
from lerobot.processor import PolicyProcessorPipeline
# The preprocessor and postprocessor are now external
preprocessor = PolicyProcessorPipeline.from_pretrained("your-model-repo", config_filename="preprocessor_config.json")
postprocessor = PolicyProcessorPipeline.from_pretrained("your-model-repo", config_filename="postprocessor_config.json")
policy = get_policy_class("your-policy-type").from_pretrained("your-model-repo")
# Process data through the pipeline
processed_batch = preprocessor(raw_batch)
action = policy(processed_batch)
final_action = postprocessor(action)
```
*Generated automatically by the LeRobot policy migration script*"""
upload_kwargs = {
"repo_id": hub_repo_id,
"folder_path": output_dir,
"repo_type": "model",
"commit_message": commit_message,
"revision": args.branch,
"create_pr": create_pr,
"allow_patterns": ["*.json", "*.safetensors", "*.md"],
"ignore_patterns": ["*.tmp", "*.log"],
}
# Add commit_description for PR body if creating PR
if create_pr and commit_description:
upload_kwargs["commit_description"] = commit_description
api.upload_folder(**upload_kwargs)
if create_pr:
print("All files pushed and pull request created successfully!")
else:
print("All files pushed to main branch successfully!")
print("\nMigration complete!")
print(f"Migrated model saved to: {output_dir}")
if args.push_to_hub and hub_repo_id:
if args.branch:
print(
f"Successfully pushed all files to branch '{args.branch}' and created PR on https://huggingface.co/{hub_repo_id}"
)
else:
print(f"Successfully pushed to https://huggingface.co/{hub_repo_id}")
if args.branch:
print(f"\nView the branch at: https://huggingface.co/{hub_repo_id}/tree/{args.branch}")
print(
f"View the PR at: https://huggingface.co/{hub_repo_id}/discussions (look for the most recent PR)"
)
else:
print(f"\nView the changes at: https://huggingface.co/{hub_repo_id}")
# Display final summary about any problematic missing keys
display_migration_summary_with_warnings(problematic_missing_keys)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/migrate_policy_normalization.py",
"license": "Apache License 2.0",
"lines": 634,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/policy_robot_bridge.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import asdict, dataclass
from typing import Any
import torch
from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature
from lerobot.processor import ActionProcessorStep, PolicyAction, ProcessorStepRegistry, RobotAction
from lerobot.utils.constants import ACTION
@dataclass
@ProcessorStepRegistry.register("robot_action_to_policy_action_processor")
class RobotActionToPolicyActionProcessorStep(ActionProcessorStep):
"""Processor step to map a dictionary to a tensor action."""
motor_names: list[str]
def action(self, action: RobotAction) -> PolicyAction:
if len(self.motor_names) != len(action):
raise ValueError(f"Action must have {len(self.motor_names)} elements, got {len(action)}")
return torch.tensor([action[f"{name}.pos"] for name in self.motor_names])
def get_config(self) -> dict[str, Any]:
return asdict(self)
def transform_features(self, features):
features[PipelineFeatureType.ACTION][ACTION] = PolicyFeature(
type=FeatureType.ACTION, shape=(len(self.motor_names),)
)
return features
@dataclass
@ProcessorStepRegistry.register("policy_action_to_robot_action_processor")
class PolicyActionToRobotActionProcessorStep(ActionProcessorStep):
"""Processor step to map a policy action to a robot action."""
motor_names: list[str]
def action(self, action: PolicyAction) -> RobotAction:
if len(self.motor_names) != len(action):
raise ValueError(f"Action must have {len(self.motor_names)} elements, got {len(action)}")
return {f"{name}.pos": action[i] for i, name in enumerate(self.motor_names)}
def get_config(self) -> dict[str, Any]:
return asdict(self)
def transform_features(self, features):
for name in self.motor_names:
features[PipelineFeatureType.ACTION][f"{name}.pos"] = PolicyFeature(
type=FeatureType.ACTION, shape=(1,)
)
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/policy_robot_bridge.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/processor/tokenizer_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script defines a processor for tokenizing natural language instructions from an environment transition.
It uses a tokenizer from the Hugging Face `transformers` library to convert task descriptions (text) into
token IDs and attention masks, which are then added to the observation dictionary.
"""
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any
import torch
from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature
from lerobot.utils.constants import (
ACTION_TOKEN_MASK,
ACTION_TOKENS,
OBS_LANGUAGE_ATTENTION_MASK,
OBS_LANGUAGE_SUBTASK_ATTENTION_MASK,
OBS_LANGUAGE_SUBTASK_TOKENS,
OBS_LANGUAGE_TOKENS,
)
from lerobot.utils.import_utils import _transformers_available
from .core import EnvTransition, RobotObservation, TransitionKey
from .pipeline import ActionProcessorStep, ObservationProcessorStep, ProcessorStepRegistry
# Conditional import for type checking and lazy loading
if TYPE_CHECKING or _transformers_available:
from transformers import AutoProcessor, AutoTokenizer
else:
AutoProcessor = None
AutoTokenizer = None
@dataclass
@ProcessorStepRegistry.register(name="tokenizer_processor")
class TokenizerProcessorStep(ObservationProcessorStep):
"""
Processor step to tokenize a natural language task description.
This step extracts a task string from the `complementary_data` of an `EnvTransition`,
tokenizes it using a Hugging Face `transformers` tokenizer, and adds the resulting
token IDs and attention mask to the `observation` dictionary.
Requires the `transformers` library to be installed.
Attributes:
tokenizer_name: The name of a pretrained tokenizer from the Hugging Face Hub (e.g., "bert-base-uncased").
tokenizer: A pre-initialized tokenizer object. If provided, `tokenizer_name` is ignored.
max_length: The maximum length to pad or truncate sequences to.
task_key: The key in `complementary_data` where the task string is stored.
padding_side: The side to pad on ('left' or 'right').
padding: The padding strategy ('max_length', 'longest', etc.).
truncation: Whether to truncate sequences longer than `max_length`.
input_tokenizer: The internal tokenizer instance, loaded during initialization.
"""
tokenizer_name: str | None = None
tokenizer: Any | None = None # Use `Any` for compatibility without a hard dependency
max_length: int = 512
task_key: str = "task"
padding_side: str = "right"
padding: str = "max_length"
truncation: bool = True
# Internal tokenizer instance (not part of the config)
input_tokenizer: Any = field(default=None, init=False, repr=False)
def __post_init__(self):
"""
Initializes the tokenizer after the dataclass is created.
It checks for the availability of the `transformers` library and loads the tokenizer
either from a provided object or by name from the Hugging Face Hub.
Raises:
ImportError: If the `transformers` library is not installed.
ValueError: If neither `tokenizer` nor `tokenizer_name` is provided.
"""
if not _transformers_available:
raise ImportError(
"The 'transformers' library is not installed. "
"Please install it with `pip install 'lerobot[transformers-dep]'` to use TokenizerProcessorStep."
)
if self.tokenizer is not None:
# Use provided tokenizer object directly
self.input_tokenizer = self.tokenizer
elif self.tokenizer_name is not None:
if AutoTokenizer is None:
raise ImportError("AutoTokenizer is not available")
self.input_tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
else:
raise ValueError(
"Either 'tokenizer' or 'tokenizer_name' must be provided. "
"Pass a tokenizer object directly or a tokenizer name to auto-load."
)
def get_task(self, transition: EnvTransition) -> list[str] | None:
"""
Extracts the task description(s) from the transition's complementary data.
Args:
transition: The environment transition.
Returns:
A list of task strings, or None if the task key is not found or the value is None.
"""
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA)
if complementary_data is None:
raise ValueError("Complementary data is None so no task can be extracted from it")
task = complementary_data[self.task_key]
if task is None:
raise ValueError("Task extracted from Complementary data is None")
# Standardize to a list of strings for the tokenizer
if isinstance(task, str):
return [task]
elif isinstance(task, list) and all(isinstance(t, str) for t in task):
return task
return None
def get_subtask(self, transition: EnvTransition) -> list[str] | None:
"""
Extracts the subtask from the transition's complementary data.
Args:
transition: The environment transition.
Returns:
A list of subtask strings, or None if the subtask key is not found or the value is None.
"""
complementary_data = transition.get(TransitionKey.COMPLEMENTARY_DATA)
if complementary_data is None:
return None
subtask = complementary_data.get("subtask")
if subtask is None:
return None
# Standardize to a list of strings for the tokenizer
if isinstance(subtask, str):
return [subtask]
elif isinstance(subtask, list) and all(isinstance(t, str) for t in subtask):
return subtask
return None
def observation(self, observation: RobotObservation) -> RobotObservation:
"""
Tokenizes the task description and adds it to the observation dictionary.
This method retrieves the task, tokenizes it, moves the resulting tensors to the
same device as other data in the transition, and updates the observation.
Args:
observation: The original observation dictionary.
Returns:
The updated observation dictionary including token IDs and an attention mask.
"""
task = self.get_task(self.transition)
if task is None:
raise ValueError("Task cannot be None")
# Tokenize the task (this will create CPU tensors)
tokenized_prompt = self._tokenize_text(task)
# Detect the device from existing tensors in the transition to ensure consistency
target_device = self._detect_device(self.transition)
# Move new tokenized tensors to the detected device
if target_device is not None:
tokenized_prompt = {
k: v.to(target_device) if isinstance(v, torch.Tensor) else v
for k, v in tokenized_prompt.items()
}
# Create a new observation dict to avoid modifying the original in place
new_observation = dict(observation)
# Add tokenized data to the observation
new_observation[OBS_LANGUAGE_TOKENS] = tokenized_prompt["input_ids"]
new_observation[OBS_LANGUAGE_ATTENTION_MASK] = tokenized_prompt["attention_mask"].to(dtype=torch.bool)
# Tokenize subtask if available
subtask = self.get_subtask(self.transition)
if subtask is not None:
tokenized_subtask = self._tokenize_text(subtask)
# Move new tokenized tensors to the detected device
if target_device is not None:
tokenized_subtask = {
k: v.to(target_device) if isinstance(v, torch.Tensor) else v
for k, v in tokenized_subtask.items()
}
# Add tokenized subtask to the observation
new_observation[OBS_LANGUAGE_SUBTASK_TOKENS] = tokenized_subtask["input_ids"]
new_observation[OBS_LANGUAGE_SUBTASK_ATTENTION_MASK] = tokenized_subtask["attention_mask"].to(
dtype=torch.bool
)
return new_observation
def _detect_device(self, transition: EnvTransition) -> torch.device | None:
"""
Detects the torch.device from existing tensors in the transition.
It checks tensors in the observation dictionary first, then the action tensor.
Args:
transition: The environment transition.
Returns:
The detected `torch.device`, or None if no tensors are found.
"""
# Check observation tensors first (most likely place to find tensors)
observation = transition.get(TransitionKey.OBSERVATION)
if observation:
for value in observation.values():
if isinstance(value, torch.Tensor):
return value.device
# Fallback to checking the action tensor
action = transition.get(TransitionKey.ACTION)
if isinstance(action, torch.Tensor):
return action.device
return None # No tensors found, default will be CPU
def _tokenize_text(self, text: str | list[str]) -> dict[str, torch.Tensor]:
"""
A wrapper around the tokenizer call.
Args:
text: A string or list of strings to tokenize.
Returns:
A dictionary containing tokenized 'input_ids' and 'attention_mask' as PyTorch tensors.
"""
return self.input_tokenizer(
text,
max_length=self.max_length,
truncation=self.truncation,
padding=self.padding,
padding_side=self.padding_side,
return_tensors="pt",
)
def get_config(self) -> dict[str, Any]:
"""
Returns the serializable configuration of the processor.
Note: The tokenizer object itself is not serialized. If the processor was initialized
with a tokenizer name, that name will be included in the config.
Returns:
A dictionary with the processor's configuration parameters.
"""
config = {
"max_length": self.max_length,
"task_key": self.task_key,
"padding_side": self.padding_side,
"padding": self.padding,
"truncation": self.truncation,
}
# Only save tokenizer_name if it was used to create the tokenizer
if self.tokenizer_name is not None and self.tokenizer is None:
config["tokenizer_name"] = self.tokenizer_name
return config
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
Adds feature definitions for the language tokens and attention mask.
This updates the policy features dictionary to include the new data added to the
observation, ensuring downstream components are aware of their shape and type.
Args:
features: The dictionary of existing policy features.
Returns:
The updated dictionary of policy features.
"""
# Add a feature for the token IDs if it doesn't already exist
if OBS_LANGUAGE_TOKENS not in features[PipelineFeatureType.OBSERVATION]:
features[PipelineFeatureType.OBSERVATION][OBS_LANGUAGE_TOKENS] = PolicyFeature(
type=FeatureType.LANGUAGE, shape=(self.max_length,)
)
# Add a feature for the attention mask if it doesn't already exist
if OBS_LANGUAGE_ATTENTION_MASK not in features[PipelineFeatureType.OBSERVATION]:
features[PipelineFeatureType.OBSERVATION][OBS_LANGUAGE_ATTENTION_MASK] = PolicyFeature(
type=FeatureType.LANGUAGE, shape=(self.max_length,)
)
return features
@dataclass
@ProcessorStepRegistry.register(name="action_tokenizer_processor")
class ActionTokenizerProcessorStep(ActionProcessorStep):
"""
Processor step to tokenize action data using a fast action tokenizer.
This step takes action tensors from an `EnvTransition`, tokenizes them using
a Hugging Face `transformers` AutoProcessor (such as the Physical Intelligence "fast" tokenizer),
and returns the tokenized action.
Requires the `transformers` library to be installed.
Attributes:
tokenizer_name: The name of a pretrained processor from the Hugging Face Hub (e.g., "lerobot/fast-action-tokenizer").
tokenizer: A pre-initialized processor/tokenizer object. If provided, `tokenizer_name` is ignored.
trust_remote_code: Whether to trust remote code when loading the tokenizer (required for some tokenizers).
action_tokenizer: The internal tokenizer/processor instance, loaded during initialization.
paligemma_tokenizer_name: The name of a pretrained PaliGemma tokenizer from the Hugging Face Hub (e.g., "google/paligemma-3b-pt-224").
"""
action_tokenizer_name: str | None = None
action_tokenizer_input_object: Any | None = None
trust_remote_code: bool = True
max_action_tokens: int = 256
fast_skip_tokens: int = 128
paligemma_tokenizer_name: str = "google/paligemma-3b-pt-224"
# Internal tokenizer instance (not part of the config)
action_tokenizer: Any = field(default=None, init=False, repr=False)
_paligemma_tokenizer: Any = field(default=None, init=False, repr=False)
def __post_init__(self):
"""
Initializes the action tokenizer after the dataclass is created.
It checks for the availability of the `transformers` library and loads the tokenizer
either from a provided object or by name from the Hugging Face Hub.
Raises:
ImportError: If the `transformers` library is not installed.
ValueError: If neither `tokenizer` nor `tokenizer_name` is provided.
"""
if not _transformers_available:
raise ImportError(
"The 'transformers' library is not installed. "
"Please install it with `pip install 'lerobot[transformers-dep]'` to use ActionTokenizerProcessorStep."
)
if self.action_tokenizer_input_object is not None:
self.action_tokenizer = self.action_tokenizer_input_object
elif self.action_tokenizer_name is not None:
if AutoProcessor is None:
raise ImportError("AutoProcessor is not available")
self.action_tokenizer = AutoProcessor.from_pretrained(
self.action_tokenizer_name, trust_remote_code=self.trust_remote_code
)
else:
raise ValueError(
"Either 'action_tokenizer' or 'action_tokenizer_name' must be provided. "
"Pass a tokenizer object directly or a tokenizer name to auto-load."
)
self._paligemma_tokenizer = AutoTokenizer.from_pretrained(
self.paligemma_tokenizer_name,
trust_remote_code=self.trust_remote_code,
add_eos_token=True,
add_bos_token=False,
)
def __call__(self, transition: EnvTransition) -> EnvTransition:
"""
Applies action tokenization to the transition.
This overrides the base class to handle both tokens and mask.
Args:
transition: The input transition with action data.
Returns:
The processed transition with tokenized actions and mask in complementary data.
"""
self._current_transition = transition.copy()
new_transition = self._current_transition
action = new_transition.get(TransitionKey.ACTION)
if action is None:
# During inference, no action is available, skip tokenization
return new_transition
# Tokenize and get both tokens and mask
tokens, mask = self._tokenize_action(action)
# Store mask in complementary data
complementary_data = new_transition.get(TransitionKey.COMPLEMENTARY_DATA, {})
if complementary_data is None:
complementary_data = {}
complementary_data[ACTION_TOKEN_MASK] = mask
complementary_data[ACTION_TOKENS] = tokens
new_transition[TransitionKey.COMPLEMENTARY_DATA] = complementary_data
return new_transition
def _act_tokens_to_paligemma_tokens(self, tokens: torch.Tensor) -> torch.Tensor:
"""
Converts action tokens to PaliGemma tokens.
"""
return self._paligemma_tokenizer.vocab_size - 1 - self.fast_skip_tokens - tokens
def _tokenize_action(self, action: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Tokenizes the action tensor and creates a mask.
Args:
action: The input action tensor to tokenize. Shape: (B, H, action_dim) or (H, action_dim,)
Returns:
A tuple of (tokens, mask) where:
- tokens: Tensor of token IDs with shape (B, max_action_tokens)
- mask: Boolean mask with shape (B, max_action_tokens), True for real tokens, False for padding
"""
if action is None:
raise ValueError("Action cannot be None")
# Get the device and dtype of the input action
device = action.device if isinstance(action, torch.Tensor) else None
# Handle single sample (add batch dimension)
single_sample = action.dim() == 1
if single_sample:
action = action.unsqueeze(0)
batch_size = action.shape[0]
# Tokenize the action batch
# The fast tokenizer expects action data and returns token IDs
tokens_list = []
masks_list = []
for i in range(batch_size):
# Tokenize single action (move to CPU first as tokenizer uses scipy which requires numpy)
action_cpu = action[i : i + 1].cpu()
tokens = self.action_tokenizer(action_cpu)
# Convert to numpy array if it's a list
if isinstance(tokens, list) or not isinstance(tokens, torch.Tensor):
tokens = torch.tensor(tokens, dtype=torch.long, device=action.device)
else:
# Move tokens back to the same device as input action
tokens = tokens.to(device=action.device)
# Flatten to 1D if needed
if tokens.dim() > 1:
tokens = tokens.flatten()
bos_id = self._paligemma_tokenizer.bos_token_id
# add bos
tokens = torch.cat(
[
torch.tensor([bos_id], device=action.device),
torch.tensor(
self._paligemma_tokenizer.encode("Action: ", add_special_tokens=False),
device=action.device,
),
self._act_tokens_to_paligemma_tokens(tokens),
torch.tensor(self._paligemma_tokenizer.encode("|"), device=action.device),
]
)
# Truncate or pad to max_action_tokens
if len(tokens) > self.max_action_tokens:
logging.warning(
f"Token length ({len(tokens)}) exceeds max length ({self.max_action_tokens}), truncating. "
"Consider increasing the `max_action_tokens` in your model config if this happens frequently."
)
tokens = tokens[: self.max_action_tokens]
mask = torch.ones(self.max_action_tokens, dtype=torch.bool, device=action.device)
else:
mask = torch.cat(
[
torch.ones(len(tokens), dtype=torch.bool, device=action.device),
torch.zeros(
self.max_action_tokens - len(tokens), dtype=torch.bool, device=action.device
),
]
)
# Pad tokens with zeros
tokens = torch.nn.functional.pad(tokens, (0, self.max_action_tokens - len(tokens)), value=0)
tokens_list.append(tokens)
masks_list.append(mask)
# Stack into batched tensors
tokens_batch = torch.stack(tokens_list, dim=0) # (B, max_action_tokens)
masks_batch = torch.stack(masks_list, dim=0) # (B, max_action_tokens)
# Remove batch dimension if input was single sample
if single_sample:
tokens_batch = tokens_batch.squeeze(0)
masks_batch = masks_batch.squeeze(0)
# Move to the same device as the input
if device is not None:
tokens_batch = tokens_batch.to(device)
masks_batch = masks_batch.to(device)
return tokens_batch, masks_batch
def action(self, action: torch.Tensor) -> torch.Tensor:
"""
This method is not used since we override __call__.
Required by ActionProcessorStep ABC.
"""
tokens, _ = self._tokenize_action(action)
return tokens
def get_config(self) -> dict[str, Any]:
"""
Returns the serializable configuration of the processor.
Note: The tokenizer object itself is not serialized. If the processor was initialized
with a tokenizer name, that name will be included in the config.
Returns:
A dictionary with the processor's configuration parameters.
"""
config = {
"trust_remote_code": self.trust_remote_code,
"max_action_tokens": self.max_action_tokens,
}
# Only save tokenizer_name if it was used to create the tokenizer
if self.action_tokenizer_name is not None and self.action_tokenizer_input_object is None:
config["action_tokenizer_name"] = self.action_tokenizer_name
return config
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
"""
Updates feature definitions to reflect tokenized actions.
This updates the policy features dictionary to indicate that the action
has been tokenized into a sequence of token IDs with shape (max_action_tokens,).
Args:
features: The dictionary of existing policy features.
Returns:
The updated dictionary of policy features.
"""
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/processor/tokenizer_processor.py",
"license": "Apache License 2.0",
"lines": 464,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/teleoperators/phone/config_phone.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from enum import Enum
import numpy as np
from ..config import TeleoperatorConfig
class PhoneOS(Enum):
ANDROID = "android"
IOS = "ios"
@TeleoperatorConfig.register_subclass("phone")
@dataclass
class PhoneConfig(TeleoperatorConfig):
phone_os: PhoneOS = PhoneOS.IOS
camera_offset = np.array(
[0.0, -0.02, 0.04]
) # iPhone 14 Pro camera is 2cm off center and 4cm above center
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/teleoperators/phone/config_phone.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/teleoperators/phone/phone_processor.py | # !/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature
from lerobot.processor import ProcessorStepRegistry, RobotAction, RobotActionProcessorStep
from lerobot.teleoperators.phone.config_phone import PhoneOS
@ProcessorStepRegistry.register("map_phone_action_to_robot_action")
@dataclass
class MapPhoneActionToRobotAction(RobotActionProcessorStep):
"""
Maps calibrated phone pose actions to standardized robot action inputs.
This processor step acts as a bridge between the phone teleoperator's output
and the robot's expected action format. It remaps the phone's 6-DoF pose
(position and rotation) to the robot's target end-effector pose, applying
necessary axis inversions and swaps. It also interprets platform-specific
button presses to generate a gripper command.
Attributes:
platform: The operating system of the phone (iOS or Android), used
to determine the correct button mappings for the gripper.
"""
# TODO(Steven): Gripper vel could be output of phone_teleop directly
platform: PhoneOS
_enabled_prev: bool = field(default=False, init=False, repr=False)
def action(self, action: RobotAction) -> RobotAction:
"""
Processes the phone action dictionary to create a robot action dictionary.
Args:
act: The input action dictionary from the phone teleoperator.
Returns:
A new action dictionary formatted for the robot controller.
Raises:
ValueError: If 'pos' or 'rot' keys are missing from the input action.
"""
# Pop them from the action
enabled = bool(action.pop("phone.enabled"))
pos = action.pop("phone.pos")
rot = action.pop("phone.rot")
inputs = action.pop("phone.raw_inputs")
if pos is None or rot is None:
raise ValueError("pos and rot must be present in action")
rotvec = rot.as_rotvec() # Absolute orientation as rotvec
# Map certain inputs to certain actions
if self.platform == PhoneOS.IOS:
gripper_vel = float(inputs.get("a3", 0.0))
else:
a = float(inputs.get("reservedButtonA", 0.0))
b = float(inputs.get("reservedButtonB", 0.0))
gripper_vel = (
a - b
) # Positive if a is pressed, negative if b is pressed, 0 if both or neither are pressed
# For some actions we need to invert the axis
action["enabled"] = enabled
action["target_x"] = -pos[1] if enabled else 0.0
action["target_y"] = pos[0] if enabled else 0.0
action["target_z"] = pos[2] if enabled else 0.0
action["target_wx"] = rotvec[1] if enabled else 0.0
action["target_wy"] = rotvec[0] if enabled else 0.0
action["target_wz"] = -rotvec[2] if enabled else 0.0
action["gripper_vel"] = gripper_vel # Still send gripper action when disabled
return action
def transform_features(
self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
for feat in ["enabled", "pos", "rot", "raw_inputs"]:
features[PipelineFeatureType.ACTION].pop(f"phone.{feat}", None)
for feat in [
"enabled",
"target_x",
"target_y",
"target_z",
"target_wx",
"target_wy",
"target_wz",
"gripper_vel",
]:
features[PipelineFeatureType.ACTION][f"{feat}"] = PolicyFeature(
type=FeatureType.ACTION, shape=(1,)
)
return features
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/teleoperators/phone/phone_processor.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/teleoperators/phone/teleop_phone.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Docs:
# hebi: https://docs.hebi.us/tools.html#mobile-io
# teleop: https://github.com/SpesRobotics/teleop
import logging
import threading
import time
import hebi
import numpy as np
from teleop import Teleop
from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS
from lerobot.teleoperators.teleoperator import Teleoperator
from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
from lerobot.utils.rotation import Rotation
logger = logging.getLogger(__name__)
class BasePhone:
_enabled: bool = False
_calib_pos: np.ndarray | None = None
_calib_rot_inv: Rotation | None = None
def _reapply_position_calibration(self, pos: np.ndarray) -> None:
self._calib_pos = pos.copy()
@property
def is_calibrated(self) -> bool:
return (self._calib_pos is not None) and (self._calib_rot_inv is not None)
@property
def action_features(self) -> dict[str, type]:
return {
"phone.pos": np.ndarray, # shape (3,)
"phone.rot": Rotation, # scipy.spatial.transform.Rotation
"phone.raw_inputs": dict, # analogs/buttons or webXR meta
"phone.enabled": bool,
}
@property
def feedback_features(self) -> dict[str, type]:
# No haptic or other feedback implemented yet
pass
def configure(self) -> None:
# No additional configuration required for phone teleop
pass
def send_feedback(self, feedback: dict[str, float]) -> None:
# We could add haptic feedback (vibrations) here, but it's not implemented yet
raise NotImplementedError
class IOSPhone(BasePhone, Teleoperator):
name = "ios_phone"
def __init__(self, config: PhoneConfig):
super().__init__(config)
self.config = config
self._group = None
@property
def is_connected(self) -> bool:
return self._group is not None
@check_if_already_connected
def connect(self) -> None:
logger.info("Connecting to IPhone, make sure to open the HEBI Mobile I/O app.")
lookup = hebi.Lookup()
time.sleep(2.0)
group = lookup.get_group_from_names(["HEBI"], ["mobileIO"])
if group is None:
raise RuntimeError("Mobile I/O not found — check name/family settings in the app.")
self._group = group
logger.info(f"{self} connected to HEBI group with {group.size} module(s).")
self.calibrate()
def calibrate(self) -> None:
print(
"Hold the phone so that: top edge points forward in same direction as the robot (robot +x) and screen points up (robot +z)"
)
print("Press and hold B1 in the HEBI Mobile I/O app to capture this pose...\n")
position, rotation = self._wait_for_capture_trigger()
self._calib_pos = position.copy()
self._calib_rot_inv = rotation.inv()
self._enabled = False
print("Calibration done\n")
def _wait_for_capture_trigger(self) -> tuple[np.ndarray, Rotation]:
"""
Blocks execution until the calibration trigger is detected from the iOS device.
This method enters a loop, continuously reading the phone's state. It waits for the user to press
and hold the 'B1' button in the HEBI Mobile I/O app. Once B1 is pressed, the loop breaks and
returns the phone's pose at that exact moment.
Returns:
A tuple containing the position (np.ndarray) and rotation (Rotation) of the phone at the
moment the trigger was activated.
"""
while True:
has_pose, position, rotation, fb_pose = self._read_current_pose()
if not has_pose:
time.sleep(0.01)
continue
io = getattr(fb_pose, "io", None)
button_b = getattr(io, "b", None) if io is not None else None
button_b1_pressed = False
if button_b is not None:
button_b1_pressed = bool(button_b.get_int(1))
if button_b1_pressed:
return position, rotation
time.sleep(0.01)
def _read_current_pose(self) -> tuple[bool, np.ndarray | None, Rotation | None, object | None]:
"""
Reads the instantaneous 6-DoF pose from the connected iOS device via the HEBI SDK.
This method fetches the latest feedback packet from the HEBI group, extracts the ARKit
position and orientation, and converts them into a standard format. It also applies a
configured camera offset to adjust the pose from the camera's frame to the phone's
physical frame.
Returns:
A tuple containing:
- A boolean indicating if a valid pose was successfully read.
- The 3D position as a NumPy array, or None if not available.
- The orientation as a `Rotation` object, or None if not available.
- The raw HEBI feedback object for accessing other data like button presses.
"""
fbk = self._group.get_next_feedback()
pose = fbk[0]
ar_pos = getattr(pose, "ar_position", None)
ar_quat = getattr(pose, "ar_orientation", None)
if ar_pos is None or ar_quat is None:
return False, None, None, None
# HEBI provides orientation in w, x, y, z format.
# Scipy's Rotation expects x, y, z, w.
quat_xyzw = np.concatenate((ar_quat[1:], [ar_quat[0]])) # wxyz to xyzw
rot = Rotation.from_quat(quat_xyzw)
pos = ar_pos - rot.apply(self.config.camera_offset)
return True, pos, rot, pose
@check_if_not_connected
def get_action(self) -> dict:
has_pose, raw_position, raw_rotation, fb_pose = self._read_current_pose()
if not has_pose or not self.is_calibrated:
return {}
# Collect raw inputs (B1 / analogs on iOS, move/scale on Android)
raw_inputs: dict[str, float | int | bool] = {}
io = getattr(fb_pose, "io", None)
if io is not None:
bank_a, bank_b = io.a, io.b
if bank_a:
for ch in range(1, 9):
if bank_a.has_float(ch):
raw_inputs[f"a{ch}"] = float(bank_a.get_float(ch))
if bank_b:
for ch in range(1, 9):
if bank_b.has_int(ch):
raw_inputs[f"b{ch}"] = int(bank_b.get_int(ch))
elif hasattr(bank_b, "has_bool") and bank_b.has_bool(ch):
raw_inputs[f"b{ch}"] = int(bank_b.get_bool(ch))
enable = bool(raw_inputs.get("b1", 0))
# Rising edge then re-capture calibration immediately from current raw pose
if enable and not self._enabled:
self._reapply_position_calibration(raw_position)
# Apply calibration
pos_cal = self._calib_rot_inv.apply(raw_position - self._calib_pos)
rot_cal = self._calib_rot_inv * raw_rotation
self._enabled = enable
return {
"phone.pos": pos_cal,
"phone.rot": rot_cal,
"phone.raw_inputs": raw_inputs,
"phone.enabled": self._enabled,
}
@check_if_not_connected
def disconnect(self) -> None:
self._group = None
class AndroidPhone(BasePhone, Teleoperator):
name = "android_phone"
def __init__(self, config: PhoneConfig):
super().__init__(config)
self.config = config
self._teleop = None
self._teleop_thread = None
self._latest_pose = None
self._latest_message = None
self._android_lock = threading.Lock()
@property
def is_connected(self) -> bool:
return self._teleop is not None
@check_if_already_connected
def connect(self) -> None:
logger.info("Starting teleop stream for Android...")
self._teleop = Teleop()
self._teleop.subscribe(self._android_callback)
self._teleop_thread = threading.Thread(target=self._teleop.run, daemon=True)
self._teleop_thread.start()
logger.info(f"{self} connected, teleop stream started.")
self.calibrate()
def calibrate(self) -> None:
print(
"Hold the phone so that: top edge points forward in same direction as the robot (robot +x) and screen points up (robot +z)"
)
print("Touch and move on the WebXR page to capture this pose...\n")
pos, rot = self._wait_for_capture_trigger()
self._calib_pos = pos.copy()
self._calib_rot_inv = rot.inv()
self._enabled = False
print("Calibration done\n")
def _wait_for_capture_trigger(self) -> tuple[np.ndarray, Rotation]:
"""
Blocks execution until the calibration trigger is detected from the Android device.
This method enters a loop, continuously checking the latest message received from the WebXR
session. It waits for the user to touch and move their finger on the screen, which generates
a `move` event. Once this event is detected, the loop breaks and returns the phone's current
pose.
Returns:
A tuple containing the position (np.ndarray) and rotation (Rotation) of the phone at the
moment the trigger was activated.
"""
while True:
with self._android_lock:
msg = self._latest_message or {}
if bool(msg.get("move", False)):
ok, pos, rot, _pose = self._read_current_pose()
if ok:
return pos, rot
time.sleep(0.01)
def _read_current_pose(self) -> tuple[bool, np.ndarray | None, Rotation | None, object | None]:
"""
Reads the latest 6-DoF pose received from the Android device's WebXR session.
This method accesses the most recent pose data stored by the `_android_callback`. It uses a
thread lock to safely read the shared `_latest_pose` variable. The pose, a 4x4 matrix, is
then decomposed into position and rotation, and the configured camera offset is applied.
Returns:
A tuple containing:
- A boolean indicating if a valid pose was available.
- The 3D position as a NumPy array, or None if no pose has been received yet.
- The orientation as a `Rotation` object, or None if no pose has been received.
- The raw 4x4 pose matrix as received from the teleop stream.
"""
with self._android_lock:
if self._latest_pose is None:
return False, None, None, None
p = self._latest_pose.copy()
pose = self._latest_pose
rot = Rotation.from_matrix(p[:3, :3])
pos = p[:3, 3] - rot.apply(self.config.camera_offset)
return True, pos, rot, pose
def _android_callback(self, pose: np.ndarray, message: dict) -> None:
"""
Callback function to handle incoming data from the Android teleop stream.
This method is executed by the `teleop` package's subscriber thread whenever a new
pose and message are received from the WebXR session on the Android phone. It updates
the internal state (`_latest_pose` and `_latest_message`) with the new data.
A thread lock is used to ensure that these shared variables are updated atomically,
preventing race conditions with the main thread that reads them.
Args:
pose: A 4x4 NumPy array representing the phone's transformation matrix.
message: A dictionary containing additional data, such as button presses or touch events.
"""
with self._android_lock:
self._latest_pose = pose
self._latest_message = message
@check_if_not_connected
def get_action(self) -> dict:
ok, raw_pos, raw_rot, pose = self._read_current_pose()
if not ok or not self.is_calibrated:
return {}
# Collect raw inputs (B1 / analogs on iOS, move/scale on Android)
raw_inputs: dict[str, float | int | bool] = {}
msg = self._latest_message or {}
raw_inputs["move"] = bool(msg.get("move", False))
raw_inputs["scale"] = float(msg.get("scale", 1.0))
raw_inputs["reservedButtonA"] = bool(msg.get("reservedButtonA", False))
raw_inputs["reservedButtonB"] = bool(msg.get("reservedButtonB", False))
enable = bool(raw_inputs.get("move", False))
# Rising edge then re-capture calibration immediately from current raw pose
if enable and not self._enabled:
self._reapply_position_calibration(raw_pos)
# Apply calibration
pos_cal = self._calib_rot_inv.apply(raw_pos - self._calib_pos)
rot_cal = self._calib_rot_inv * raw_rot
self._enabled = enable
return {
"phone.pos": pos_cal,
"phone.rot": rot_cal,
"phone.raw_inputs": raw_inputs,
"phone.enabled": self._enabled,
}
@check_if_not_connected
def disconnect(self) -> None:
self._teleop = None
if self._teleop_thread and self._teleop_thread.is_alive():
self._teleop_thread.join(timeout=1.0)
self._teleop_thread = None
self._latest_pose = None
class Phone(Teleoperator):
"""
Phone-based teleoperator using ARKit (iOS via HEBI Mobile I/O App) or the teleop Python package (Android via WebXR API).
For HEBI Mobile I/O we also expose 8 analog (a1-a8) and 8 digital (b1-b8) inputs.
Press and hold **B1** to enable teleoperation. While enabled, the first B1 press
captures a reference pose and rotation, when disabled and pressed again the position is reapplied.
"""
config_class = PhoneConfig
name = "phone"
def __init__(self, config: PhoneConfig):
super().__init__(config)
self.config = config
self._phone_impl: Teleoperator
if self.config.phone_os == PhoneOS.IOS:
self._phone_impl = IOSPhone(config)
elif self.config.phone_os == PhoneOS.ANDROID:
self._phone_impl = AndroidPhone(config)
else:
raise ValueError(f"Invalid config phone_os: {self.config.phone_os}")
@property
def is_connected(self) -> bool:
return self._phone_impl.is_connected
def connect(self) -> None:
return self._phone_impl.connect()
def calibrate(self) -> None:
return self._phone_impl.calibrate()
@property
def is_calibrated(self) -> bool:
return self._phone_impl.is_calibrated
@property
def action_features(self) -> dict[str, type]:
return self._phone_impl.action_features
@property
def feedback_features(self) -> dict[str, type]:
return self._phone_impl.feedback_features
def configure(self) -> None:
return self._phone_impl.configure()
def get_action(self) -> dict:
return self._phone_impl.get_action()
def send_feedback(self, feedback: dict[str, float]) -> None:
return self._phone_impl.send_feedback(feedback)
def disconnect(self) -> None:
return self._phone_impl.disconnect()
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/teleoperators/phone/teleop_phone.py",
"license": "Apache License 2.0",
"lines": 334,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/utils/rotation.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom rotation utilities to replace scipy.spatial.transform.Rotation."""
import numpy as np
class Rotation:
"""
Custom rotation class that provides a subset of scipy.spatial.transform.Rotation functionality.
Supports conversions between rotation vectors, rotation matrices, and quaternions.
"""
def __init__(self, quat: np.ndarray) -> None:
"""Initialize rotation from quaternion [x, y, z, w]."""
self._quat = np.asarray(quat, dtype=float)
# Normalize quaternion
norm = np.linalg.norm(self._quat)
if norm > 0:
self._quat = self._quat / norm
@classmethod
def from_rotvec(cls, rotvec: np.ndarray) -> "Rotation":
"""
Create rotation from rotation vector using Rodrigues' formula.
Args:
rotvec: Rotation vector [x, y, z] where magnitude is angle in radians
Returns:
Rotation instance
"""
rotvec = np.asarray(rotvec, dtype=float)
angle = np.linalg.norm(rotvec)
if angle < 1e-8:
# For very small angles, use identity quaternion
quat = np.array([0.0, 0.0, 0.0, 1.0])
else:
axis = rotvec / angle
half_angle = angle / 2.0
sin_half = np.sin(half_angle)
cos_half = np.cos(half_angle)
# Quaternion [x, y, z, w]
quat = np.array([axis[0] * sin_half, axis[1] * sin_half, axis[2] * sin_half, cos_half])
return cls(quat)
@classmethod
def from_matrix(cls, matrix: np.ndarray) -> "Rotation":
"""
Create rotation from 3x3 rotation matrix.
Args:
matrix: 3x3 rotation matrix
Returns:
Rotation instance
"""
matrix = np.asarray(matrix, dtype=float)
# Shepherd's method for converting rotation matrix to quaternion
trace = np.trace(matrix)
if trace > 0:
s = np.sqrt(trace + 1.0) * 2 # s = 4 * qw
qw = 0.25 * s
qx = (matrix[2, 1] - matrix[1, 2]) / s
qy = (matrix[0, 2] - matrix[2, 0]) / s
qz = (matrix[1, 0] - matrix[0, 1]) / s
elif matrix[0, 0] > matrix[1, 1] and matrix[0, 0] > matrix[2, 2]:
s = np.sqrt(1.0 + matrix[0, 0] - matrix[1, 1] - matrix[2, 2]) * 2 # s = 4 * qx
qw = (matrix[2, 1] - matrix[1, 2]) / s
qx = 0.25 * s
qy = (matrix[0, 1] + matrix[1, 0]) / s
qz = (matrix[0, 2] + matrix[2, 0]) / s
elif matrix[1, 1] > matrix[2, 2]:
s = np.sqrt(1.0 + matrix[1, 1] - matrix[0, 0] - matrix[2, 2]) * 2 # s = 4 * qy
qw = (matrix[0, 2] - matrix[2, 0]) / s
qx = (matrix[0, 1] + matrix[1, 0]) / s
qy = 0.25 * s
qz = (matrix[1, 2] + matrix[2, 1]) / s
else:
s = np.sqrt(1.0 + matrix[2, 2] - matrix[0, 0] - matrix[1, 1]) * 2 # s = 4 * qz
qw = (matrix[1, 0] - matrix[0, 1]) / s
qx = (matrix[0, 2] + matrix[2, 0]) / s
qy = (matrix[1, 2] + matrix[2, 1]) / s
qz = 0.25 * s
quat = np.array([qx, qy, qz, qw])
return cls(quat)
@classmethod
def from_quat(cls, quat: np.ndarray) -> "Rotation":
"""
Create rotation from quaternion.
Args:
quat: Quaternion [x, y, z, w] or [w, x, y, z] (specify convention in docstring)
This implementation expects [x, y, z, w] format
Returns:
Rotation instance
"""
return cls(quat)
def as_matrix(self) -> np.ndarray:
"""
Convert rotation to 3x3 rotation matrix.
Returns:
3x3 rotation matrix
"""
qx, qy, qz, qw = self._quat
# Compute rotation matrix from quaternion
return np.array(
[
[1 - 2 * (qy * qy + qz * qz), 2 * (qx * qy - qz * qw), 2 * (qx * qz + qy * qw)],
[2 * (qx * qy + qz * qw), 1 - 2 * (qx * qx + qz * qz), 2 * (qy * qz - qx * qw)],
[2 * (qx * qz - qy * qw), 2 * (qy * qz + qx * qw), 1 - 2 * (qx * qx + qy * qy)],
],
dtype=float,
)
def as_rotvec(self) -> np.ndarray:
"""
Convert rotation to rotation vector.
Returns:
Rotation vector [x, y, z] where magnitude is angle in radians
"""
qx, qy, qz, qw = self._quat
# Ensure qw is positive for unique representation
if qw < 0:
qx, qy, qz, qw = -qx, -qy, -qz, -qw
# Compute angle and axis
angle = 2.0 * np.arccos(np.clip(abs(qw), 0.0, 1.0))
sin_half_angle = np.sqrt(1.0 - qw * qw)
if sin_half_angle < 1e-8:
# For very small angles, use linearization: rotvec ≈ 2 * [qx, qy, qz]
return 2.0 * np.array([qx, qy, qz])
# Extract axis and scale by angle
axis = np.array([qx, qy, qz]) / sin_half_angle
return angle * axis
def as_quat(self) -> np.ndarray:
"""
Get quaternion representation.
Returns:
Quaternion [x, y, z, w]
"""
return self._quat.copy()
def apply(self, vectors: np.ndarray, inverse: bool = False) -> np.ndarray:
"""
Apply this rotation to a set of vectors.
This is equivalent to applying the rotation matrix to the vectors:
self.as_matrix() @ vectors (or self.as_matrix().T @ vectors if inverse=True).
Args:
vectors: Array of shape (3,) or (N, 3) representing vectors in 3D space
inverse: If True, apply the inverse of the rotation. Default is False.
Returns:
Rotated vectors with shape:
- (3,) if input was single vector with shape (3,)
- (N, 3) in all other cases
"""
vectors = np.asarray(vectors, dtype=float)
original_shape = vectors.shape
# Handle single vector case - ensure it's 2D for matrix multiplication
if vectors.ndim == 1:
if len(vectors) != 3:
raise ValueError("Single vector must have length 3")
vectors = vectors.reshape(1, 3)
single_vector = True
elif vectors.ndim == 2:
if vectors.shape[1] != 3:
raise ValueError("Vectors must have shape (N, 3)")
single_vector = False
else:
raise ValueError("Vectors must be 1D or 2D array")
# Get rotation matrix
rotation_matrix = self.as_matrix()
# Apply inverse if requested (transpose for orthogonal rotation matrices)
if inverse:
rotation_matrix = rotation_matrix.T
# Apply rotation: (N, 3) @ (3, 3).T -> (N, 3)
rotated_vectors = vectors @ rotation_matrix.T
# Return original shape for single vector case
if single_vector and original_shape == (3,):
return rotated_vectors.flatten()
return rotated_vectors
def inv(self) -> "Rotation":
"""
Invert this rotation.
Composition of a rotation with its inverse results in an identity transformation.
Returns:
Rotation instance containing the inverse of this rotation
"""
qx, qy, qz, qw = self._quat
# For a unit quaternion, the inverse is the conjugate: [-x, -y, -z, w]
inverse_quat = np.array([-qx, -qy, -qz, qw])
return Rotation(inverse_quat)
def __mul__(self, other: "Rotation") -> "Rotation":
"""
Compose this rotation with another rotation using the * operator.
The composition `r2 * r1` means "apply r1 first, then r2".
This is equivalent to applying rotation matrices: r2.as_matrix() @ r1.as_matrix()
Args:
other: Another Rotation instance to compose with
Returns:
Rotation instance representing the composition of rotations
"""
if not isinstance(other, Rotation):
return NotImplemented
# Get quaternions [x, y, z, w]
x1, y1, z1, w1 = other._quat # Apply first
x2, y2, z2, w2 = self._quat # Apply second
# Quaternion multiplication: q2 * q1 (apply q1 first, then q2)
composed_quat = np.array(
[
w2 * x1 + x2 * w1 + y2 * z1 - z2 * y1, # x component
w2 * y1 - x2 * z1 + y2 * w1 + z2 * x1, # y component
w2 * z1 + x2 * y1 - y2 * x1 + z2 * w1, # z component
w2 * w1 - x2 * x1 - y2 * y1 - z2 * z1, # w component
]
)
return Rotation(composed_quat)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/utils/rotation.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:tests/datasets/test_dataset_utils.py | #!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from datasets import Dataset
from huggingface_hub import DatasetCard
from lerobot.datasets.push_dataset_to_hub.utils import calculate_episode_data_index
from lerobot.datasets.utils import combine_feature_dicts, create_lerobot_dataset_card, hf_transform_to_torch
from lerobot.utils.constants import ACTION, OBS_IMAGES
def test_default_parameters():
card = create_lerobot_dataset_card()
assert isinstance(card, DatasetCard)
assert card.data.tags == ["LeRobot"]
assert card.data.task_categories == ["robotics"]
assert card.data.configs == [
{
"config_name": "default",
"data_files": "data/*/*.parquet",
}
]
def test_with_tags():
tags = ["tag1", "tag2"]
card = create_lerobot_dataset_card(tags=tags)
assert card.data.tags == ["LeRobot", "tag1", "tag2"]
def test_calculate_episode_data_index():
dataset = Dataset.from_dict(
{
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
"index": [0, 1, 2, 3, 4, 5],
"episode_index": [0, 0, 1, 2, 2, 2],
},
)
dataset.set_transform(hf_transform_to_torch)
episode_data_index = calculate_episode_data_index(dataset)
assert torch.equal(episode_data_index["from"], torch.tensor([0, 2, 3]))
assert torch.equal(episode_data_index["to"], torch.tensor([2, 3, 6]))
def test_merge_simple_vectors():
g1 = {
ACTION: {
"dtype": "float32",
"shape": (2,),
"names": ["ee.x", "ee.y"],
}
}
g2 = {
ACTION: {
"dtype": "float32",
"shape": (2,),
"names": ["ee.y", "ee.z"],
}
}
out = combine_feature_dicts(g1, g2)
assert ACTION in out
assert out[ACTION]["dtype"] == "float32"
# Names merged with preserved order and de-dupuplication
assert out[ACTION]["names"] == ["ee.x", "ee.y", "ee.z"]
# Shape correctly recomputed from names length
assert out[ACTION]["shape"] == (3,)
def test_merge_multiple_groups_order_and_dedup():
g1 = {ACTION: {"dtype": "float32", "shape": (2,), "names": ["a", "b"]}}
g2 = {ACTION: {"dtype": "float32", "shape": (2,), "names": ["b", "c"]}}
g3 = {ACTION: {"dtype": "float32", "shape": (3,), "names": ["a", "c", "d"]}}
out = combine_feature_dicts(g1, g2, g3)
assert out[ACTION]["names"] == ["a", "b", "c", "d"]
assert out[ACTION]["shape"] == (4,)
def test_non_vector_last_wins_for_images():
# Non-vector (images) with same name should be overwritten by the last image specified
g1 = {
f"{OBS_IMAGES}.front": {
"dtype": "image",
"shape": (3, 480, 640),
"names": ["channels", "height", "width"],
}
}
g2 = {
f"{OBS_IMAGES}.front": {
"dtype": "image",
"shape": (3, 720, 1280),
"names": ["channels", "height", "width"],
}
}
out = combine_feature_dicts(g1, g2)
assert out[f"{OBS_IMAGES}.front"]["shape"] == (3, 720, 1280)
assert out[f"{OBS_IMAGES}.front"]["dtype"] == "image"
def test_dtype_mismatch_raises():
g1 = {ACTION: {"dtype": "float32", "shape": (1,), "names": ["a"]}}
g2 = {ACTION: {"dtype": "float64", "shape": (1,), "names": ["b"]}}
with pytest.raises(ValueError, match="dtype mismatch for 'action'"):
_ = combine_feature_dicts(g1, g2)
def test_non_dict_passthrough_last_wins():
g1 = {"misc": 123}
g2 = {"misc": 456}
out = combine_feature_dicts(g1, g2)
# For non-dict entries the last one wins
assert out["misc"] == 456
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/datasets/test_dataset_utils.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_act_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ACT policy processor."""
import tempfile
import pytest
import torch
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.policies.act.configuration_act import ACTConfig
from lerobot.policies.act.processor_act import make_act_pre_post_processors
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DataProcessorPipeline,
DeviceProcessorStep,
NormalizerProcessorStep,
RenameObservationsProcessorStep,
TransitionKey,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import create_transition, transition_to_batch
from lerobot.utils.constants import ACTION, OBS_STATE
def create_default_config():
"""Create a default ACT configuration for testing."""
config = ACTConfig()
config.input_features = {
OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(7,)),
}
config.output_features = {
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(4,)),
}
config.normalization_mapping = {
FeatureType.STATE: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MEAN_STD,
}
config.device = "cpu"
return config
def create_default_stats():
"""Create default dataset statistics for testing."""
return {
OBS_STATE: {"mean": torch.zeros(7), "std": torch.ones(7)},
ACTION: {"mean": torch.zeros(4), "std": torch.ones(4)},
}
def test_make_act_processor_basic():
"""Test basic creation of ACT processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_act_pre_post_processors(config, stats)
# Check processor names
assert preprocessor.name == "policy_preprocessor"
assert postprocessor.name == "policy_postprocessor"
# Check steps in preprocessor
assert len(preprocessor.steps) == 4
assert isinstance(preprocessor.steps[0], RenameObservationsProcessorStep)
assert isinstance(preprocessor.steps[1], AddBatchDimensionProcessorStep)
assert isinstance(preprocessor.steps[2], DeviceProcessorStep)
assert isinstance(preprocessor.steps[3], NormalizerProcessorStep)
# Check steps in postprocessor
assert len(postprocessor.steps) == 2
assert isinstance(postprocessor.steps[0], UnnormalizerProcessorStep)
assert isinstance(postprocessor.steps[1], DeviceProcessorStep)
def test_act_processor_normalization():
"""Test that ACT processor correctly normalizes and unnormalizes data."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_act_pre_post_processors(
config,
stats,
)
# Create test data
observation = {OBS_STATE: torch.randn(7)}
action = torch.randn(4)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is normalized and batched
assert processed[OBS_STATE].shape == (1, 7)
assert processed[TransitionKey.ACTION.value].shape == (1, 4)
# Process action through postprocessor
postprocessed = postprocessor(processed[TransitionKey.ACTION.value])
# Check that action is unnormalized
assert postprocessed.shape == (1, 4)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_act_processor_cuda():
"""Test ACT processor with CUDA device."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, postprocessor = make_act_pre_post_processors(
config,
stats,
)
# Create CPU data
observation = {OBS_STATE: torch.randn(7)}
action = torch.randn(4)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is on CUDA
assert processed[OBS_STATE].device.type == "cuda"
assert processed[TransitionKey.ACTION.value].device.type == "cuda"
# Process through postprocessor
postprocessed = postprocessor(processed[TransitionKey.ACTION.value])
# Check that action is back on CPU
assert postprocessed.device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_act_processor_accelerate_scenario():
"""Test ACT processor in simulated Accelerate scenario (data already on GPU)."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_act_pre_post_processors(
config,
stats,
)
# Simulate Accelerate: data already on GPU
device = torch.device("cuda:0")
observation = {OBS_STATE: torch.randn(1, 7).to(device)} # Already batched and on GPU
action = torch.randn(1, 4).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on same GPU (not moved unnecessarily)
assert processed[OBS_STATE].device == device
assert processed[TransitionKey.ACTION.value].device == device
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_act_processor_multi_gpu():
"""Test ACT processor with multi-GPU setup."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_act_pre_post_processors(
config,
stats,
)
# Simulate data on different GPU (like in multi-GPU training)
device = torch.device("cuda:1")
observation = {OBS_STATE: torch.randn(1, 7).to(device)}
action = torch.randn(1, 4).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on cuda:1 (not moved to cuda:0)
assert processed[OBS_STATE].device == device
assert processed[TransitionKey.ACTION.value].device == device
def test_act_processor_without_stats():
"""Test ACT processor creation without dataset statistics."""
config = create_default_config()
preprocessor, postprocessor = make_act_pre_post_processors(
config,
dataset_stats=None,
)
# Should still create processors, but normalization won't have stats
assert preprocessor is not None
assert postprocessor is not None
# Process should still work (but won't normalize without stats)
observation = {OBS_STATE: torch.randn(7)}
action = torch.randn(4)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed is not None
def test_act_processor_save_and_load():
"""Test saving and loading ACT processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_act_pre_post_processors(
config,
stats,
)
with tempfile.TemporaryDirectory() as tmpdir:
# Save preprocessor
preprocessor.save_pretrained(tmpdir)
# Load preprocessor
loaded_preprocessor = DataProcessorPipeline.from_pretrained(
tmpdir, config_filename="policy_preprocessor.json"
)
# Test that loaded processor works
observation = {OBS_STATE: torch.randn(7)}
action = torch.randn(4)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = loaded_preprocessor(batch)
assert processed[OBS_STATE].shape == (1, 7)
assert processed[TransitionKey.ACTION.value].shape == (1, 4)
def test_act_processor_device_placement_preservation():
"""Test that ACT processor preserves device placement correctly."""
config = create_default_config()
stats = create_default_stats()
# Test with CPU config
config.device = "cpu"
preprocessor, _ = make_act_pre_post_processors(
config,
stats,
)
# Process CPU data
observation = {OBS_STATE: torch.randn(7)}
action = torch.randn(4)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed[OBS_STATE].device.type == "cpu"
assert processed[TransitionKey.ACTION.value].device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_act_processor_mixed_precision():
"""Test ACT processor with mixed precision (float16)."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
# Modify the device processor to use float16
preprocessor, postprocessor = make_act_pre_post_processors(
config,
stats,
)
# Replace DeviceProcessorStep with one that uses float16
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="float16"))
elif isinstance(step, NormalizerProcessorStep):
# Update normalizer to use the same device as the device processor
norm_step = step # Now type checker knows this is NormalizerProcessorStep
modified_steps.append(
NormalizerProcessorStep(
features=norm_step.features,
norm_map=norm_step.norm_map,
stats=norm_step.stats,
device=config.device,
dtype=torch.float16, # Match the float16 dtype
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Create test data
observation = {OBS_STATE: torch.randn(7, dtype=torch.float32)}
action = torch.randn(4, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is converted to float16
assert processed[OBS_STATE].dtype == torch.float16
assert processed[TransitionKey.ACTION.value].dtype == torch.float16
def test_act_processor_batch_consistency():
"""Test that ACT processor handles different batch sizes correctly."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_act_pre_post_processors(
config,
stats,
)
# Test single sample (unbatched)
observation = {OBS_STATE: torch.randn(7)}
action = torch.randn(4)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed[OBS_STATE].shape[0] == 1 # Batched
# Test already batched data
observation_batched = {OBS_STATE: torch.randn(8, 7)} # Batch of 8
action_batched = torch.randn(8, 4)
transition_batched = create_transition(observation_batched, action_batched)
batch_batched = transition_to_batch(transition_batched)
processed_batched = preprocessor(batch_batched)
assert processed_batched[OBS_STATE].shape[0] == 8
assert processed_batched[TransitionKey.ACTION.value].shape[0] == 8
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_act_processor_bfloat16_device_float32_normalizer():
"""Test: DeviceProcessor(bfloat16) + NormalizerProcessor(float32) → output bfloat16 via automatic adaptation"""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, _ = make_act_pre_post_processors(
config,
stats,
)
# Modify the pipeline to use bfloat16 device processor with float32 normalizer
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
# Device processor converts to bfloat16
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="bfloat16"))
elif isinstance(step, NormalizerProcessorStep):
# Normalizer stays configured as float32 (will auto-adapt to bfloat16)
norm_step = step # Now type checker knows this is NormalizerProcessorStep
modified_steps.append(
NormalizerProcessorStep(
features=norm_step.features,
norm_map=norm_step.norm_map,
stats=norm_step.stats,
device=config.device,
dtype=torch.float32, # Deliberately configured as float32
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Verify initial normalizer configuration
normalizer_step = preprocessor.steps[3] # NormalizerProcessorStep
assert normalizer_step.dtype == torch.float32
# Create test data
observation = {OBS_STATE: torch.randn(7, dtype=torch.float32)} # Start with float32
action = torch.randn(4, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through full pipeline
processed = preprocessor(batch)
# Verify: DeviceProcessor → bfloat16, NormalizerProcessor adapts → final output is bfloat16
assert processed[OBS_STATE].dtype == torch.bfloat16
assert processed[TransitionKey.ACTION.value].dtype == torch.bfloat16
# Verify normalizer automatically adapted its internal state
assert normalizer_step.dtype == torch.bfloat16
for stat_tensor in normalizer_step._tensor_stats[OBS_STATE].values():
assert stat_tensor.dtype == torch.bfloat16
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_act_processor.py",
"license": "Apache License 2.0",
"lines": 329,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_batch_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from pathlib import Path
import numpy as np
import pytest
import torch
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DataProcessorPipeline,
ProcessorStepRegistry,
TransitionKey,
)
from lerobot.processor.converters import create_transition, identity_transition
from lerobot.utils.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE
def test_state_1d_to_2d():
"""Test that 1D state tensors get unsqueezed to 2D."""
processor = AddBatchDimensionProcessorStep()
# Test observation.state
state_1d = torch.randn(7)
observation = {OBS_STATE: state_1d}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_state = result[TransitionKey.OBSERVATION][OBS_STATE]
assert processed_state.shape == (1, 7)
assert torch.allclose(processed_state.squeeze(0), state_1d)
def test_env_state_1d_to_2d():
"""Test that 1D environment state tensors get unsqueezed to 2D."""
processor = AddBatchDimensionProcessorStep()
# Test observation.environment_state
env_state_1d = torch.randn(10)
observation = {OBS_ENV_STATE: env_state_1d}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_env_state = result[TransitionKey.OBSERVATION][OBS_ENV_STATE]
assert processed_env_state.shape == (1, 10)
assert torch.allclose(processed_env_state.squeeze(0), env_state_1d)
def test_image_3d_to_4d():
"""Test that 3D image tensors get unsqueezed to 4D."""
processor = AddBatchDimensionProcessorStep()
# Test observation.image
image_3d = torch.randn(224, 224, 3)
observation = {OBS_IMAGE: image_3d}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_image = result[TransitionKey.OBSERVATION][OBS_IMAGE]
assert processed_image.shape == (1, 224, 224, 3)
assert torch.allclose(processed_image.squeeze(0), image_3d)
def test_multiple_images_3d_to_4d():
"""Test that 3D image tensors in observation.images.* get unsqueezed to 4D."""
processor = AddBatchDimensionProcessorStep()
# Test observation.images.camera1 and observation.images.camera2
image1_3d = torch.randn(64, 64, 3)
image2_3d = torch.randn(128, 128, 3)
observation = {
f"{OBS_IMAGES}.camera1": image1_3d,
f"{OBS_IMAGES}.camera2": image2_3d,
}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
processed_image1 = processed_obs[f"{OBS_IMAGES}.camera1"]
processed_image2 = processed_obs[f"{OBS_IMAGES}.camera2"]
assert processed_image1.shape == (1, 64, 64, 3)
assert processed_image2.shape == (1, 128, 128, 3)
assert torch.allclose(processed_image1.squeeze(0), image1_3d)
assert torch.allclose(processed_image2.squeeze(0), image2_3d)
def test_already_batched_tensors_unchanged():
"""Test that already batched tensors remain unchanged."""
processor = AddBatchDimensionProcessorStep()
# Create already batched tensors
state_2d = torch.randn(1, 7)
env_state_2d = torch.randn(1, 10)
image_4d = torch.randn(1, 224, 224, 3)
observation = {
OBS_STATE: state_2d,
OBS_ENV_STATE: env_state_2d,
OBS_IMAGE: image_4d,
}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Should remain unchanged
assert torch.allclose(processed_obs[OBS_STATE], state_2d)
assert torch.allclose(processed_obs[OBS_ENV_STATE], env_state_2d)
assert torch.allclose(processed_obs[OBS_IMAGE], image_4d)
def test_higher_dimensional_tensors_unchanged():
"""Test that tensors with more dimensions than expected remain unchanged."""
processor = AddBatchDimensionProcessorStep()
# Create tensors with more dimensions
state_3d = torch.randn(2, 7, 5) # More than 1D
image_5d = torch.randn(2, 3, 224, 224, 1) # More than 3D
observation = {
OBS_STATE: state_3d,
OBS_IMAGE: image_5d,
}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Should remain unchanged
assert torch.allclose(processed_obs[OBS_STATE], state_3d)
assert torch.allclose(processed_obs[OBS_IMAGE], image_5d)
def test_non_tensor_values_unchanged():
"""Test that non-tensor values in observations remain unchanged."""
processor = AddBatchDimensionProcessorStep()
observation = {
OBS_STATE: [1, 2, 3], # List, not tensor
OBS_IMAGE: "not_a_tensor", # String
"custom_key": 42, # Integer
"another_key": {"nested": "dict"}, # Dict
}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Should remain unchanged
assert processed_obs[OBS_STATE] == [1, 2, 3]
assert processed_obs[OBS_IMAGE] == "not_a_tensor"
assert processed_obs["custom_key"] == 42
assert processed_obs["another_key"] == {"nested": "dict"}
def test_none_observation():
"""Test processor handles None observation gracefully."""
processor = AddBatchDimensionProcessorStep()
transition = create_transition(observation={}, action=torch.empty(0))
result = processor(transition)
assert result[TransitionKey.OBSERVATION] == {}
def test_empty_observation():
"""Test processor handles empty observation dict."""
processor = AddBatchDimensionProcessorStep()
observation = {}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
assert result[TransitionKey.OBSERVATION] == {}
def test_mixed_observation():
"""Test processor with mixed observation containing various types and dimensions."""
processor = AddBatchDimensionProcessorStep()
state_1d = torch.randn(5)
env_state_2d = torch.randn(1, 8) # Already batched
image_3d = torch.randn(32, 32, 3)
other_tensor = torch.randn(3, 3, 3, 3) # 4D, should be unchanged
observation = {
OBS_STATE: state_1d,
OBS_ENV_STATE: env_state_2d,
OBS_IMAGE: image_3d,
f"{OBS_IMAGES}.front": torch.randn(64, 64, 3), # 3D, should be batched
f"{OBS_IMAGES}.back": torch.randn(1, 64, 64, 3), # 4D, should be unchanged
"other_tensor": other_tensor,
"non_tensor": "string_value",
}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check transformations
assert processed_obs[OBS_STATE].shape == (1, 5)
assert processed_obs[OBS_ENV_STATE].shape == (1, 8) # Unchanged
assert processed_obs[OBS_IMAGE].shape == (1, 32, 32, 3)
assert processed_obs[f"{OBS_IMAGES}.front"].shape == (1, 64, 64, 3)
assert processed_obs[f"{OBS_IMAGES}.back"].shape == (1, 64, 64, 3) # Unchanged
assert processed_obs["other_tensor"].shape == (3, 3, 3, 3) # Unchanged
assert processed_obs["non_tensor"] == "string_value" # Unchanged
def test_integration_with_robot_processor():
"""Test AddBatchDimensionProcessorStep integration with RobotProcessor."""
to_batch_processor = AddBatchDimensionProcessorStep()
pipeline = DataProcessorPipeline(
[to_batch_processor], to_transition=identity_transition, to_output=identity_transition
)
# Create unbatched observation
observation = {
OBS_STATE: torch.randn(7),
OBS_IMAGE: torch.randn(224, 224, 3),
}
transition = create_transition(observation=observation, action=torch.empty(0))
result = pipeline(transition)
processed_obs = result[TransitionKey.OBSERVATION]
assert processed_obs[OBS_STATE].shape == (1, 7)
assert processed_obs[OBS_IMAGE].shape == (1, 224, 224, 3)
def test_serialization_methods():
"""Test get_config, state_dict, load_state_dict, and reset methods."""
processor = AddBatchDimensionProcessorStep()
# Test get_config
config = processor.get_config()
assert isinstance(config, dict)
assert config == {}
# Test state_dict
state = processor.state_dict()
assert isinstance(state, dict)
assert state == {}
# Test load_state_dict (should not raise an error)
processor.load_state_dict({})
# Test reset (should not raise an error)
processor.reset()
def test_save_and_load_pretrained():
"""Test saving and loading AddBatchDimensionProcessorStep with RobotProcessor."""
processor = AddBatchDimensionProcessorStep()
pipeline = DataProcessorPipeline(
[processor], name="BatchPipeline", to_transition=identity_transition, to_output=identity_transition
)
with tempfile.TemporaryDirectory() as tmp_dir:
# Save pipeline
pipeline.save_pretrained(tmp_dir)
# Check config file exists
config_path = Path(tmp_dir) / "batchpipeline.json"
assert config_path.exists()
# Load pipeline
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="batchpipeline.json",
to_transition=identity_transition,
to_output=identity_transition,
)
assert loaded_pipeline.name == "BatchPipeline"
assert len(loaded_pipeline) == 1
assert isinstance(loaded_pipeline.steps[0], AddBatchDimensionProcessorStep)
# Test functionality of loaded processor
observation = {OBS_STATE: torch.randn(5)}
transition = create_transition(observation=observation, action=torch.empty(0))
result = loaded_pipeline(transition)
assert result[TransitionKey.OBSERVATION][OBS_STATE].shape == (1, 5)
def test_registry_functionality():
"""Test that AddBatchDimensionProcessorStep is properly registered."""
# Check that the processor is registered
registered_class = ProcessorStepRegistry.get("to_batch_processor")
assert registered_class is AddBatchDimensionProcessorStep
# Check that it's in the list of registered processors
assert "to_batch_processor" in ProcessorStepRegistry.list()
def test_registry_based_save_load():
"""Test saving and loading using registry name."""
processor = AddBatchDimensionProcessorStep()
pipeline = DataProcessorPipeline(
[processor], to_transition=identity_transition, to_output=identity_transition
)
with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
config_filename="dataprocessorpipeline.json",
to_transition=identity_transition,
to_output=identity_transition,
)
# Verify the loaded processor works
observation = {
OBS_STATE: torch.randn(3),
OBS_IMAGE: torch.randn(100, 100, 3),
}
transition = create_transition(observation=observation, action=torch.empty(0))
result = loaded_pipeline(transition)
processed_obs = result[TransitionKey.OBSERVATION]
assert processed_obs[OBS_STATE].shape == (1, 3)
assert processed_obs[OBS_IMAGE].shape == (1, 100, 100, 3)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_device_compatibility():
"""Test processor works with tensors on different devices."""
processor = AddBatchDimensionProcessorStep()
# Create tensors on GPU
state_1d = torch.randn(7, device="cuda")
image_3d = torch.randn(64, 64, 3, device="cuda")
observation = {
OBS_STATE: state_1d,
OBS_IMAGE: image_3d,
}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# Check shapes and that tensors stayed on GPU
assert processed_obs[OBS_STATE].shape == (1, 7)
assert processed_obs[OBS_IMAGE].shape == (1, 64, 64, 3)
assert processed_obs[OBS_STATE].device.type == "cuda"
assert processed_obs[OBS_IMAGE].device.type == "cuda"
def test_processor_preserves_other_transition_keys():
"""Test that processor only modifies observation and preserves other transition keys."""
processor = AddBatchDimensionProcessorStep()
action = torch.randn(5)
reward = 1.5
done = True
truncated = False
info = {"step": 10}
comp_data = {"extra": "data"}
observation = {OBS_STATE: torch.randn(7)}
transition = create_transition(
observation=observation,
action=action,
reward=reward,
done=done,
truncated=truncated,
info=info,
complementary_data=comp_data,
)
result = processor(transition)
# Check that non-observation keys are preserved
assert torch.allclose(result[TransitionKey.ACTION], action)
assert result[TransitionKey.REWARD] == reward
assert result[TransitionKey.DONE] == done
assert result[TransitionKey.TRUNCATED] == truncated
assert result[TransitionKey.INFO] == info
assert result[TransitionKey.COMPLEMENTARY_DATA] == comp_data
# Check that observation was processed
assert result[TransitionKey.OBSERVATION][OBS_STATE].shape == (1, 7)
def test_edge_case_zero_dimensional_tensors():
"""Test processor handles 0D tensors (scalars) correctly."""
processor = AddBatchDimensionProcessorStep()
# 0D tensors should not be modified
scalar_tensor = torch.tensor(42.0)
observation = {
OBS_STATE: scalar_tensor,
"scalar_value": scalar_tensor,
}
transition = create_transition(observation=observation, action=torch.empty(0))
result = processor(transition)
processed_obs = result[TransitionKey.OBSERVATION]
# 0D tensors should remain unchanged
assert torch.allclose(processed_obs[OBS_STATE], scalar_tensor)
assert torch.allclose(processed_obs["scalar_value"], scalar_tensor)
# Action-specific tests
def test_action_1d_to_2d():
"""Test that 1D action tensors get batch dimension added."""
processor = AddBatchDimensionProcessorStep()
# Create 1D action tensor
action_1d = torch.randn(4)
transition = create_transition(observation={}, action=action_1d)
result = processor(transition)
# Should add batch dimension
assert result[TransitionKey.ACTION].shape == (1, 4)
assert torch.equal(result[TransitionKey.ACTION][0], action_1d)
def test_action_already_batched():
"""Test that already batched action tensors remain unchanged."""
processor = AddBatchDimensionProcessorStep()
# Test various batch sizes
action_batched_1 = torch.randn(1, 4)
action_batched_5 = torch.randn(5, 4)
# Single batch
transition = create_transition(action=action_batched_1, observation={})
result = processor(transition)
assert torch.equal(result[TransitionKey.ACTION], action_batched_1)
# Multiple batch
transition = create_transition(action=action_batched_5, observation={})
result = processor(transition)
assert torch.equal(result[TransitionKey.ACTION], action_batched_5)
def test_action_higher_dimensional():
"""Test that higher dimensional action tensors remain unchanged."""
processor = AddBatchDimensionProcessorStep()
# 3D action tensor (e.g., sequence of actions)
action_3d = torch.randn(2, 4, 3)
transition = create_transition(action=action_3d, observation={})
result = processor(transition)
assert torch.equal(result[TransitionKey.ACTION], action_3d)
# 4D action tensor
action_4d = torch.randn(2, 10, 4, 3)
transition = create_transition(action=action_4d, observation={})
result = processor(transition)
assert torch.equal(result[TransitionKey.ACTION], action_4d)
def test_action_scalar_tensor():
"""Test that scalar (0D) action tensors remain unchanged."""
processor = AddBatchDimensionProcessorStep()
action_scalar = torch.tensor(1.5)
transition = create_transition(action=action_scalar, observation={})
result = processor(transition)
# Should remain scalar
assert result[TransitionKey.ACTION].dim() == 0
assert torch.equal(result[TransitionKey.ACTION], action_scalar)
def test_action_non_tensor_raises_error():
"""Test that non-tensor actions raise ValueError for PolicyAction processors."""
processor = AddBatchDimensionProcessorStep()
# List action should raise error
action_list = [0.1, 0.2, 0.3, 0.4]
transition = create_transition(action=action_list)
with pytest.raises(ValueError, match="Action should be a PolicyAction type"):
processor(transition)
# Numpy array action should raise error
action_numpy = np.array([1, 2, 3, 4])
transition = create_transition(action=action_numpy)
with pytest.raises(ValueError, match="Action should be a PolicyAction type"):
processor(transition)
# String action should raise error
action_string = "forward"
transition = create_transition(action=action_string)
with pytest.raises(ValueError, match="Action should be a PolicyAction type"):
processor(transition)
# Dict action should raise error
action_dict = {"linear": [0.5, 0.0], "angular": 0.2}
transition = create_transition(action=action_dict)
with pytest.raises(ValueError, match="Action should be a PolicyAction type"):
processor(transition)
def test_action_none():
"""Test that empty action tensor is handled correctly."""
processor = AddBatchDimensionProcessorStep()
transition = create_transition(action=torch.empty(0), observation={})
result = processor(transition)
# Empty 1D tensor becomes empty 2D tensor with batch dimension
assert result[TransitionKey.ACTION].shape == (1, 0)
def test_action_with_observation():
"""Test action processing together with observation processing."""
processor = AddBatchDimensionProcessorStep()
# Both need batching
observation = {
OBS_STATE: torch.randn(7),
OBS_IMAGE: torch.randn(64, 64, 3),
}
action = torch.randn(4)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Both should be batched
assert result[TransitionKey.OBSERVATION][OBS_STATE].shape == (1, 7)
assert result[TransitionKey.OBSERVATION][OBS_IMAGE].shape == (1, 64, 64, 3)
assert result[TransitionKey.ACTION].shape == (1, 4)
def test_action_different_sizes():
"""Test action processing with various action dimensions."""
processor = AddBatchDimensionProcessorStep()
# Different action sizes (robot with different DOF)
action_sizes = [1, 2, 4, 7, 10, 20]
for size in action_sizes:
action = torch.randn(size)
transition = create_transition(action=action, observation={})
result = processor(transition)
assert result[TransitionKey.ACTION].shape == (1, size)
assert torch.equal(result[TransitionKey.ACTION][0], action)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_action_device_compatibility():
"""Test action processing on different devices."""
processor = AddBatchDimensionProcessorStep()
# CUDA action
action_cuda = torch.randn(4, device="cuda")
transition = create_transition(action=action_cuda, observation={})
result = processor(transition)
assert result[TransitionKey.ACTION].shape == (1, 4)
assert result[TransitionKey.ACTION].device.type == "cuda"
# CPU action
action_cpu = torch.randn(4, device="cpu")
transition = create_transition(action=action_cpu, observation={})
result = processor(transition)
assert result[TransitionKey.ACTION].shape == (1, 4)
assert result[TransitionKey.ACTION].device.type == "cpu"
def test_action_dtype_preservation():
"""Test that action dtype is preserved during processing."""
processor = AddBatchDimensionProcessorStep()
# Different dtypes
dtypes = [torch.float32, torch.float64, torch.int32, torch.int64]
for dtype in dtypes:
action = torch.randn(4).to(dtype)
transition = create_transition(action=action, observation={})
result = processor(transition)
assert result[TransitionKey.ACTION].dtype == dtype
assert result[TransitionKey.ACTION].shape == (1, 4)
def test_empty_action_tensor():
"""Test handling of empty action tensors."""
processor = AddBatchDimensionProcessorStep()
# Empty 1D tensor
action_empty = torch.tensor([])
transition = create_transition(action=action_empty, observation={})
result = processor(transition)
# Should add batch dimension even to empty tensor
assert result[TransitionKey.ACTION].shape == (1, 0)
# Empty 2D tensor (already batched)
action_empty_2d = torch.randn(1, 0)
transition = create_transition(action=action_empty_2d, observation={})
result = processor(transition)
# Should remain unchanged
assert result[TransitionKey.ACTION].shape == (1, 0)
# Task-specific tests
def test_task_string_to_list():
"""Test that string tasks get wrapped in lists to add batch dimension."""
processor = AddBatchDimensionProcessorStep()
# Create complementary data with string task
complementary_data = {"task": "pick_cube"}
transition = create_transition(
action=torch.empty(0), observation={}, complementary_data=complementary_data
)
result = processor(transition)
# String task should be wrapped in list
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["task"] == ["pick_cube"]
assert isinstance(processed_comp_data["task"], list)
assert len(processed_comp_data["task"]) == 1
def test_task_string_validation():
"""Test that only string and list of strings are valid task values."""
processor = AddBatchDimensionProcessorStep()
# Valid string task - should be converted to list
complementary_data = {"task": "valid_task"}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["task"] == ["valid_task"]
# Valid list of strings - should remain unchanged
complementary_data = {"task": ["task1", "task2"]}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["task"] == ["task1", "task2"]
def test_task_list_of_strings():
"""Test that lists of strings remain unchanged (already batched)."""
processor = AddBatchDimensionProcessorStep()
# Test various list of strings
test_lists = [
["pick_cube"], # Single string in list
["pick_cube", "place_cube"], # Multiple strings
["task1", "task2", "task3"], # Three strings
[], # Empty list
[""], # List with empty string
["task with spaces", "task_with_underscores"], # Mixed formats
]
for task_list in test_lists:
complementary_data = {"task": task_list}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
# Should remain unchanged since it's already a list
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["task"] == task_list
assert isinstance(processed_comp_data["task"], list)
def test_complementary_data_none():
"""Test processor handles None complementary_data gracefully."""
processor = AddBatchDimensionProcessorStep()
transition = create_transition(complementary_data=None, action=torch.empty(0), observation={})
result = processor(transition)
assert result[TransitionKey.COMPLEMENTARY_DATA] == {}
def test_complementary_data_empty():
"""Test processor handles empty complementary_data dict."""
processor = AddBatchDimensionProcessorStep()
complementary_data = {}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
assert result[TransitionKey.COMPLEMENTARY_DATA] == {}
def test_complementary_data_no_task():
"""Test processor handles complementary_data without task field."""
processor = AddBatchDimensionProcessorStep()
complementary_data = {
"episode_id": 123,
"timestamp": 1234567890.0,
"extra_info": "some data",
}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
# Should remain unchanged
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data == complementary_data
def test_complementary_data_mixed():
"""Test processor with mixed complementary_data containing task and other fields."""
processor = AddBatchDimensionProcessorStep()
complementary_data = {
"task": "stack_blocks",
"episode_id": 456,
"difficulty": "hard",
"metadata": {"scene": "kitchen"},
}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
# Task should be batched
assert processed_comp_data["task"] == ["stack_blocks"]
# Other fields should remain unchanged
assert processed_comp_data["episode_id"] == 456
assert processed_comp_data["difficulty"] == "hard"
assert processed_comp_data["metadata"] == {"scene": "kitchen"}
def test_task_with_observation_and_action():
"""Test task processing together with observation and action processing."""
processor = AddBatchDimensionProcessorStep()
# All components need batching
observation = {
OBS_STATE: torch.randn(5),
OBS_IMAGE: torch.randn(32, 32, 3),
}
action = torch.randn(4)
complementary_data = {"task": "navigate_to_goal"}
transition = create_transition(
observation=observation, action=action, complementary_data=complementary_data
)
result = processor(transition)
# All should be batched
assert result[TransitionKey.OBSERVATION][OBS_STATE].shape == (1, 5)
assert result[TransitionKey.OBSERVATION][OBS_IMAGE].shape == (1, 32, 32, 3)
assert result[TransitionKey.ACTION].shape == (1, 4)
assert result[TransitionKey.COMPLEMENTARY_DATA]["task"] == ["navigate_to_goal"]
def test_task_comprehensive_string_cases():
"""Test task processing with comprehensive string cases and edge cases."""
processor = AddBatchDimensionProcessorStep()
# Test various string formats
string_tasks = [
"pick_and_place",
"navigate",
"open_drawer",
"", # Empty string (valid but edge case)
"task with spaces",
"task_with_underscores",
"task-with-dashes",
"UPPERCASE_TASK",
"MixedCaseTask",
"task123",
"数字任务", # Unicode task
"🤖 robot task", # Emoji in task
"task\nwith\nnewlines", # Special characters
"task\twith\ttabs",
"task with 'quotes'",
'task with "double quotes"',
]
# Test that all string tasks get properly batched
for task in string_tasks:
complementary_data = {"task": task}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["task"] == [task]
assert isinstance(processed_comp_data["task"], list)
assert len(processed_comp_data["task"]) == 1
# Test various list of strings (should remain unchanged)
list_tasks = [
["single_task"],
["task1", "task2"],
["pick", "place", "navigate"],
[], # Empty list
[""], # List with empty string
["task with spaces", "task_with_underscores", "UPPERCASE"],
["🤖 task", "数字任务", "normal_task"], # Mixed formats
]
for task_list in list_tasks:
complementary_data = {"task": task_list}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["task"] == task_list
assert isinstance(processed_comp_data["task"], list)
def test_task_preserves_other_keys():
"""Test that task processing preserves other keys in complementary_data."""
processor = AddBatchDimensionProcessorStep()
complementary_data = {
"task": "clean_table",
"robot_id": "robot_123",
"motor_id": "motor_456",
"config": {"speed": "slow", "precision": "high"},
"metrics": [1.0, 2.0, 3.0],
}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
# Task should be processed
assert processed_comp_data["task"] == ["clean_table"]
# All other keys should be preserved exactly
assert processed_comp_data["robot_id"] == "robot_123"
assert processed_comp_data["motor_id"] == "motor_456"
assert processed_comp_data["config"] == {"speed": "slow", "precision": "high"}
assert processed_comp_data["metrics"] == [1.0, 2.0, 3.0]
# Index and task_index specific tests
def test_index_scalar_to_1d():
"""Test that 0D index tensor gets unsqueezed to 1D."""
processor = AddBatchDimensionProcessorStep()
# Create 0D index tensor (scalar)
index_0d = torch.tensor(42, dtype=torch.int64)
complementary_data = {"index": index_0d}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["index"].shape == (1,)
assert processed_comp_data["index"].dtype == torch.int64
assert processed_comp_data["index"][0] == 42
def test_task_index_scalar_to_1d():
"""Test that 0D task_index tensor gets unsqueezed to 1D."""
processor = AddBatchDimensionProcessorStep()
# Create 0D task_index tensor (scalar)
task_index_0d = torch.tensor(7, dtype=torch.int64)
complementary_data = {"task_index": task_index_0d}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["task_index"].shape == (1,)
assert processed_comp_data["task_index"].dtype == torch.int64
assert processed_comp_data["task_index"][0] == 7
def test_index_and_task_index_together():
"""Test processing both index and task_index together."""
processor = AddBatchDimensionProcessorStep()
# Create 0D tensors for both
index_0d = torch.tensor(100, dtype=torch.int64)
task_index_0d = torch.tensor(3, dtype=torch.int64)
complementary_data = {
"index": index_0d,
"task_index": task_index_0d,
"task": "pick_object",
}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
# Check index
assert processed_comp_data["index"].shape == (1,)
assert processed_comp_data["index"][0] == 100
# Check task_index
assert processed_comp_data["task_index"].shape == (1,)
assert processed_comp_data["task_index"][0] == 3
# Check task is also processed
assert processed_comp_data["task"] == ["pick_object"]
def test_index_already_batched():
"""Test that already batched index tensors remain unchanged."""
processor = AddBatchDimensionProcessorStep()
# Create already batched tensors
index_1d = torch.tensor([42], dtype=torch.int64)
index_2d = torch.tensor([[42, 43]], dtype=torch.int64)
# Test 1D (already batched)
complementary_data = {"index": index_1d}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
assert torch.equal(result[TransitionKey.COMPLEMENTARY_DATA]["index"], index_1d)
# Test 2D
complementary_data = {"index": index_2d}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
assert torch.equal(result[TransitionKey.COMPLEMENTARY_DATA]["index"], index_2d)
def test_task_index_already_batched():
"""Test that already batched task_index tensors remain unchanged."""
processor = AddBatchDimensionProcessorStep()
# Create already batched tensors
task_index_1d = torch.tensor([7], dtype=torch.int64)
task_index_2d = torch.tensor([[7, 8]], dtype=torch.int64)
# Test 1D (already batched)
complementary_data = {"task_index": task_index_1d}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
assert torch.equal(result[TransitionKey.COMPLEMENTARY_DATA]["task_index"], task_index_1d)
# Test 2D
complementary_data = {"task_index": task_index_2d}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
assert torch.equal(result[TransitionKey.COMPLEMENTARY_DATA]["task_index"], task_index_2d)
def test_index_non_tensor_unchanged():
"""Test that non-tensor index values remain unchanged."""
processor = AddBatchDimensionProcessorStep()
complementary_data = {
"index": 42, # Plain int, not tensor
"task_index": [1, 2, 3], # List, not tensor
}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["index"] == 42
assert processed_comp_data["task_index"] == [1, 2, 3]
def test_index_dtype_preservation():
"""Test that index and task_index dtype is preserved during processing."""
processor = AddBatchDimensionProcessorStep()
# Test different dtypes
dtypes = [torch.int32, torch.int64, torch.long]
for dtype in dtypes:
index_0d = torch.tensor(42, dtype=dtype)
task_index_0d = torch.tensor(7, dtype=dtype)
complementary_data = {
"index": index_0d,
"task_index": task_index_0d,
}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["index"].dtype == dtype
assert processed_comp_data["task_index"].dtype == dtype
def test_index_with_full_transition():
"""Test index/task_index processing with full transition data."""
processor = AddBatchDimensionProcessorStep()
# Create full transition with all components
observation = {
OBS_STATE: torch.randn(7),
OBS_IMAGE: torch.randn(64, 64, 3),
}
action = torch.randn(4)
complementary_data = {
"task": "navigate_to_goal",
"index": torch.tensor(1000, dtype=torch.int64),
"task_index": torch.tensor(5, dtype=torch.int64),
"episode_id": 123,
}
transition = create_transition(
observation=observation,
action=action,
reward=0.5,
done=False,
complementary_data=complementary_data,
)
result = processor(transition)
# Check all components are processed correctly
assert result[TransitionKey.OBSERVATION][OBS_STATE].shape == (1, 7)
assert result[TransitionKey.OBSERVATION][OBS_IMAGE].shape == (1, 64, 64, 3)
assert result[TransitionKey.ACTION].shape == (1, 4)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["task"] == ["navigate_to_goal"]
assert processed_comp_data["index"].shape == (1,)
assert processed_comp_data["index"][0] == 1000
assert processed_comp_data["task_index"].shape == (1,)
assert processed_comp_data["task_index"][0] == 5
assert processed_comp_data["episode_id"] == 123 # Non-tensor field unchanged
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_index_device_compatibility():
"""Test processor works with index/task_index tensors on different devices."""
processor = AddBatchDimensionProcessorStep()
# Create tensors on GPU
index_0d = torch.tensor(42, dtype=torch.int64, device="cuda")
task_index_0d = torch.tensor(7, dtype=torch.int64, device="cuda")
complementary_data = {
"index": index_0d,
"task_index": task_index_0d,
}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
# Check shapes and that tensors stayed on GPU
assert processed_comp_data["index"].shape == (1,)
assert processed_comp_data["task_index"].shape == (1,)
assert processed_comp_data["index"].device.type == "cuda"
assert processed_comp_data["task_index"].device.type == "cuda"
def test_empty_index_tensor():
"""Test handling of empty index tensors."""
processor = AddBatchDimensionProcessorStep()
# Empty 0D tensor doesn't make sense, but test empty 1D
index_empty = torch.tensor([], dtype=torch.int64)
complementary_data = {"index": index_empty}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
result = processor(transition)
# Should remain unchanged (already 1D)
assert result[TransitionKey.COMPLEMENTARY_DATA]["index"].shape == (0,)
def test_action_processing_creates_new_transition():
"""Test that the processor creates a new transition object with correctly processed action."""
processor = AddBatchDimensionProcessorStep()
action = torch.randn(4)
transition = create_transition(action=action, observation={})
# Store reference to original transition
original_transition = transition
# Process
result = processor(transition)
# Should be a different object (functional design, not in-place mutation)
assert result is not original_transition
# Original transition should remain unchanged
assert original_transition[TransitionKey.ACTION].shape == (4,)
# Result should have correctly processed action with batch dimension
assert result[TransitionKey.ACTION].shape == (1, 4)
assert torch.equal(result[TransitionKey.ACTION][0], action)
def test_task_processing_creates_new_transition():
"""Test that the processor creates a new transition object with correctly processed task."""
processor = AddBatchDimensionProcessorStep()
complementary_data = {"task": "sort_objects"}
transition = create_transition(
complementary_data=complementary_data, observation={}, action=torch.empty(0)
)
# Store reference to original transition and complementary_data
original_transition = transition
original_comp_data = complementary_data
# Process
result = processor(transition)
# Should be different transition object (functional design)
assert result is not original_transition
# The task should be processed correctly (wrapped in list)
assert result[TransitionKey.COMPLEMENTARY_DATA]["task"] == ["sort_objects"]
# Original complementary data is also modified (current behavior)
assert original_comp_data["task"] == "sort_objects"
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_batch_processor.py",
"license": "Apache License 2.0",
"lines": 894,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_classifier_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Reward Classifier processor."""
import tempfile
import pytest
import torch
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig
from lerobot.policies.sac.reward_model.processor_classifier import make_classifier_processor
from lerobot.processor import (
DataProcessorPipeline,
DeviceProcessorStep,
IdentityProcessorStep,
NormalizerProcessorStep,
TransitionKey,
)
from lerobot.processor.converters import create_transition, transition_to_batch
from lerobot.utils.constants import OBS_IMAGE, OBS_STATE
def create_default_config():
"""Create a default Reward Classifier configuration for testing."""
config = RewardClassifierConfig()
config.input_features = {
OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(10,)),
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
}
config.output_features = {
"reward": PolicyFeature(type=FeatureType.ACTION, shape=(1,)), # Classifier output
}
config.normalization_mapping = {
FeatureType.STATE: NormalizationMode.MEAN_STD,
FeatureType.VISUAL: NormalizationMode.IDENTITY,
FeatureType.ACTION: NormalizationMode.IDENTITY, # No normalization for classifier output
}
config.device = "cpu"
return config
def create_default_stats():
"""Create default dataset statistics for testing."""
return {
OBS_STATE: {"mean": torch.zeros(10), "std": torch.ones(10)},
OBS_IMAGE: {}, # No normalization for images
"reward": {}, # No normalization for classifier output
}
def test_make_classifier_processor_basic():
"""Test basic creation of Classifier processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_classifier_processor(config, stats)
# Check processor names
assert preprocessor.name == "classifier_preprocessor"
assert postprocessor.name == "classifier_postprocessor"
# Check steps in preprocessor
assert len(preprocessor.steps) == 3
assert isinstance(preprocessor.steps[0], NormalizerProcessorStep) # For input features
assert isinstance(preprocessor.steps[1], NormalizerProcessorStep) # For output features
assert isinstance(preprocessor.steps[2], DeviceProcessorStep)
# Check steps in postprocessor
assert len(postprocessor.steps) == 2
assert isinstance(postprocessor.steps[0], DeviceProcessorStep)
assert isinstance(postprocessor.steps[1], IdentityProcessorStep)
def test_classifier_processor_normalization():
"""Test that Classifier processor correctly normalizes data."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_classifier_processor(
config,
stats,
)
# Create test data
observation = {
OBS_STATE: torch.randn(10),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(1) # Dummy action/reward
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is processed
assert processed[OBS_STATE].shape == (10,)
assert processed[OBS_IMAGE].shape == (3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (1,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_classifier_processor_cuda():
"""Test Classifier processor with CUDA device."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, postprocessor = make_classifier_processor(
config,
stats,
)
# Create CPU data
observation = {
OBS_STATE: torch.randn(10),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(1)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is on CUDA
assert processed[OBS_STATE].device.type == "cuda"
assert processed[OBS_IMAGE].device.type == "cuda"
assert processed[TransitionKey.ACTION.value].device.type == "cuda"
# Process through postprocessor
postprocessed = postprocessor(processed[TransitionKey.ACTION.value])
# Check that output is back on CPU
assert postprocessed.device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_classifier_processor_accelerate_scenario():
"""Test Classifier processor in simulated Accelerate scenario."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_classifier_processor(
config,
stats,
)
# Simulate Accelerate: data already on GPU
device = torch.device("cuda:0")
observation = {
OBS_STATE: torch.randn(10).to(device),
OBS_IMAGE: torch.randn(3, 224, 224).to(device),
}
action = torch.randn(1).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on same GPU
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_classifier_processor_multi_gpu():
"""Test Classifier processor with multi-GPU setup."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_classifier_processor(config, stats)
# Simulate data on different GPU
device = torch.device("cuda:1")
observation = {
OBS_STATE: torch.randn(10).to(device),
OBS_IMAGE: torch.randn(3, 224, 224).to(device),
}
action = torch.randn(1).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on cuda:1
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
def test_classifier_processor_without_stats():
"""Test Classifier processor creation without dataset statistics."""
config = create_default_config()
preprocessor, postprocessor = make_classifier_processor(config, dataset_stats=None)
# Should still create processors
assert preprocessor is not None
assert postprocessor is not None
# Process should still work
observation = {
OBS_STATE: torch.randn(10),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(1)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed is not None
def test_classifier_processor_save_and_load():
"""Test saving and loading Classifier processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_classifier_processor(config, stats)
with tempfile.TemporaryDirectory() as tmpdir:
# Save preprocessor
preprocessor.save_pretrained(tmpdir)
# Load preprocessor
loaded_preprocessor = DataProcessorPipeline.from_pretrained(
tmpdir, config_filename="classifier_preprocessor.json"
)
# Test that loaded processor works
observation = {
OBS_STATE: torch.randn(10),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(1)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = loaded_preprocessor(batch)
assert processed[OBS_STATE].shape == (10,)
assert processed[OBS_IMAGE].shape == (3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (1,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_classifier_processor_mixed_precision():
"""Test Classifier processor with mixed precision."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, postprocessor = make_classifier_processor(config, stats)
# Replace DeviceProcessorStep with one that uses float16
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="float16"))
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Create test data
observation = {
OBS_STATE: torch.randn(10, dtype=torch.float32),
OBS_IMAGE: torch.randn(3, 224, 224, dtype=torch.float32),
}
action = torch.randn(1, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is converted to float16
assert processed[OBS_STATE].dtype == torch.float16
assert processed[OBS_IMAGE].dtype == torch.float16
assert processed[TransitionKey.ACTION.value].dtype == torch.float16
def test_classifier_processor_batch_data():
"""Test Classifier processor with batched data."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_classifier_processor(
config,
stats,
)
# Test with batched data
batch_size = 16
observation = {
OBS_STATE: torch.randn(batch_size, 10),
OBS_IMAGE: torch.randn(batch_size, 3, 224, 224),
}
action = torch.randn(batch_size, 1)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that batch dimension is preserved
assert processed[OBS_STATE].shape == (batch_size, 10)
assert processed[OBS_IMAGE].shape == (batch_size, 3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (batch_size, 1)
def test_classifier_processor_postprocessor_identity():
"""Test that Classifier postprocessor uses IdentityProcessor correctly."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_classifier_processor(
config,
stats,
)
# Create test data for postprocessor
reward = torch.tensor([[0.8], [0.3], [0.9]]) # Batch of rewards/predictions
transition = create_transition(action=reward)
_ = transition_to_batch(transition)
# Process through postprocessor
processed = postprocessor(reward)
# IdentityProcessor should leave values unchanged (except device)
assert torch.allclose(processed.cpu(), reward.cpu())
assert processed.device.type == "cpu"
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_classifier_processor.py",
"license": "Apache License 2.0",
"lines": 278,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_converters.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from lerobot.processor import TransitionKey
from lerobot.processor.converters import (
batch_to_transition,
create_transition,
to_tensor,
transition_to_batch,
)
from lerobot.utils.constants import ACTION, DONE, OBS_STATE, OBS_STR, REWARD
# Tests for the unified to_tensor function
def test_to_tensor_numpy_arrays():
"""Test to_tensor with various numpy arrays."""
# Regular numpy array
arr = np.array([1.0, 2.0, 3.0])
result = to_tensor(arr)
assert isinstance(result, torch.Tensor)
assert result.dtype == torch.float32
assert torch.allclose(result, torch.tensor([1.0, 2.0, 3.0]))
# Different numpy dtypes should convert to float32 by default
int_arr = np.array([1, 2, 3], dtype=np.int64)
result = to_tensor(int_arr)
assert isinstance(result, torch.Tensor)
assert result.dtype == torch.float32
assert torch.allclose(result, torch.tensor([1.0, 2.0, 3.0]))
# uint8 arrays (previously "preserved") should now convert
uint8_arr = np.array([100, 150, 200], dtype=np.uint8)
result = to_tensor(uint8_arr)
assert isinstance(result, torch.Tensor)
assert result.dtype == torch.float32
assert torch.allclose(result, torch.tensor([100.0, 150.0, 200.0]))
def test_to_tensor_numpy_scalars():
"""Test to_tensor with numpy scalars (0-dimensional arrays)."""
# numpy float32 scalar
scalar = np.float32(3.14)
result = to_tensor(scalar)
assert isinstance(result, torch.Tensor)
assert result.ndim == 0 # Should be 0-dimensional tensor
assert result.dtype == torch.float32
assert result.item() == pytest.approx(3.14)
# numpy int32 scalar
int_scalar = np.int32(42)
result = to_tensor(int_scalar)
assert isinstance(result, torch.Tensor)
assert result.ndim == 0
assert result.dtype == torch.float32
assert result.item() == pytest.approx(42.0)
def test_to_tensor_python_scalars():
"""Test to_tensor with Python scalars."""
# Python int
result = to_tensor(42)
assert isinstance(result, torch.Tensor)
assert result.dtype == torch.float32
assert result.item() == pytest.approx(42.0)
# Python float
result = to_tensor(3.14)
assert isinstance(result, torch.Tensor)
assert result.dtype == torch.float32
assert result.item() == pytest.approx(3.14)
def test_to_tensor_sequences():
"""Test to_tensor with lists and tuples."""
# List
result = to_tensor([1, 2, 3])
assert isinstance(result, torch.Tensor)
assert result.dtype == torch.float32
assert torch.allclose(result, torch.tensor([1.0, 2.0, 3.0]))
# Tuple
result = to_tensor((4.5, 5.5, 6.5))
assert isinstance(result, torch.Tensor)
assert result.dtype == torch.float32
assert torch.allclose(result, torch.tensor([4.5, 5.5, 6.5]))
def test_to_tensor_existing_tensors():
"""Test to_tensor with existing PyTorch tensors."""
# Tensor with same dtype should pass through with potential device change
tensor = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32)
result = to_tensor(tensor)
assert isinstance(result, torch.Tensor)
assert result.dtype == torch.float32
assert torch.allclose(result, tensor)
# Tensor with different dtype should convert
int_tensor = torch.tensor([1, 2, 3], dtype=torch.int64)
result = to_tensor(int_tensor)
assert isinstance(result, torch.Tensor)
assert result.dtype == torch.float32
assert torch.allclose(result, torch.tensor([1.0, 2.0, 3.0]))
def test_to_tensor_dictionaries():
"""Test to_tensor with nested dictionaries."""
# Simple dictionary
data = {"mean": [0.1, 0.2], "std": np.array([1.0, 2.0]), "count": 42}
result = to_tensor(data)
assert isinstance(result, dict)
assert isinstance(result["mean"], torch.Tensor)
assert isinstance(result["std"], torch.Tensor)
assert isinstance(result["count"], torch.Tensor)
assert torch.allclose(result["mean"], torch.tensor([0.1, 0.2]))
assert torch.allclose(result["std"], torch.tensor([1.0, 2.0]))
assert result["count"].item() == pytest.approx(42.0)
# Nested dictionary
nested = {
ACTION: {"mean": [0.1, 0.2], "std": [1.0, 2.0]},
OBS_STR: {"mean": np.array([0.5, 0.6]), "count": 10},
}
result = to_tensor(nested)
assert isinstance(result, dict)
assert isinstance(result[ACTION], dict)
assert isinstance(result[OBS_STR], dict)
assert isinstance(result[ACTION]["mean"], torch.Tensor)
assert isinstance(result[OBS_STR]["mean"], torch.Tensor)
assert torch.allclose(result[ACTION]["mean"], torch.tensor([0.1, 0.2]))
assert torch.allclose(result[OBS_STR]["mean"], torch.tensor([0.5, 0.6]))
def test_to_tensor_none_filtering():
"""Test that None values are filtered out from dictionaries."""
data = {"valid": [1, 2, 3], "none_value": None, "nested": {"valid": [4, 5], "also_none": None}}
result = to_tensor(data)
assert "none_value" not in result
assert "also_none" not in result["nested"]
assert "valid" in result
assert "valid" in result["nested"]
assert torch.allclose(result["valid"], torch.tensor([1.0, 2.0, 3.0]))
def test_to_tensor_dtype_parameter():
"""Test to_tensor with different dtype parameters."""
arr = np.array([1, 2, 3])
# Default dtype (float32)
result = to_tensor(arr)
assert result.dtype == torch.float32
# Explicit float32
result = to_tensor(arr, dtype=torch.float32)
assert result.dtype == torch.float32
# Float64
result = to_tensor(arr, dtype=torch.float64)
assert result.dtype == torch.float64
# Preserve original dtype
float64_arr = np.array([1.0, 2.0, 3.0], dtype=np.float64)
result = to_tensor(float64_arr, dtype=None)
assert result.dtype == torch.float64
def test_to_tensor_device_parameter():
"""Test to_tensor with device parameter."""
arr = np.array([1.0, 2.0, 3.0])
# CPU device (default)
result = to_tensor(arr, device="cpu")
assert result.device.type == "cpu"
# CUDA device (if available)
if torch.cuda.is_available():
result = to_tensor(arr, device="cuda")
assert result.device.type == "cuda"
def test_to_tensor_empty_dict():
"""Test to_tensor with empty dictionary."""
result = to_tensor({})
assert isinstance(result, dict)
assert len(result) == 0
def test_to_tensor_unsupported_type():
"""Test to_tensor with unsupported types raises TypeError."""
with pytest.raises(TypeError, match="Unsupported type for tensor conversion"):
to_tensor("unsupported_string")
with pytest.raises(TypeError, match="Unsupported type for tensor conversion"):
to_tensor(object())
def test_batch_to_transition_with_index_fields():
"""Test that batch_to_transition handles index and task_index fields correctly."""
# Create batch with index and task_index fields
batch = {
OBS_STATE: torch.randn(1, 7),
ACTION: torch.randn(1, 4),
REWARD: 1.5,
DONE: False,
"task": ["pick_cube"],
"index": torch.tensor([42], dtype=torch.int64),
"task_index": torch.tensor([3], dtype=torch.int64),
}
transition = batch_to_transition(batch)
# Check basic transition structure
assert TransitionKey.OBSERVATION in transition
assert TransitionKey.ACTION in transition
assert TransitionKey.COMPLEMENTARY_DATA in transition
# Check that index and task_index are in complementary_data
comp_data = transition[TransitionKey.COMPLEMENTARY_DATA]
assert "index" in comp_data
assert "task_index" in comp_data
assert "task" in comp_data
# Verify values
assert torch.equal(comp_data["index"], batch["index"])
assert torch.equal(comp_data["task_index"], batch["task_index"])
assert comp_data["task"] == batch["task"]
def testtransition_to_batch_with_index_fields():
"""Test that transition_to_batch handles index and task_index fields correctly."""
# Create transition with index and task_index in complementary_data
transition = create_transition(
observation={OBS_STATE: torch.randn(1, 7)},
action=torch.randn(1, 4),
reward=1.5,
done=False,
complementary_data={
"task": ["navigate"],
"index": torch.tensor([100], dtype=torch.int64),
"task_index": torch.tensor([5], dtype=torch.int64),
},
)
batch = transition_to_batch(transition)
# Check that index and task_index are in the batch
assert "index" in batch
assert "task_index" in batch
assert "task" in batch
# Verify values
assert torch.equal(batch["index"], transition[TransitionKey.COMPLEMENTARY_DATA]["index"])
assert torch.equal(batch["task_index"], transition[TransitionKey.COMPLEMENTARY_DATA]["task_index"])
assert batch["task"] == transition[TransitionKey.COMPLEMENTARY_DATA]["task"]
def test_batch_to_transition_without_index_fields():
"""Test that conversion works without index and task_index fields."""
# Batch without index/task_index
batch = {
OBS_STATE: torch.randn(1, 7),
ACTION: torch.randn(1, 4),
"task": ["pick_cube"],
}
transition = batch_to_transition(batch)
comp_data = transition[TransitionKey.COMPLEMENTARY_DATA]
# Should have task but not index/task_index
assert "task" in comp_data
assert "index" not in comp_data
assert "task_index" not in comp_data
def test_transition_to_batch_without_index_fields():
"""Test that conversion works without index and task_index fields."""
# Transition without index/task_index
transition = create_transition(
observation={OBS_STATE: torch.randn(1, 7)},
action=torch.randn(1, 4),
complementary_data={"task": ["navigate"]},
)
batch = transition_to_batch(transition)
# Should have task but not index/task_index
assert "task" in batch
assert "index" not in batch
assert "task_index" not in batch
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_converters.py",
"license": "Apache License 2.0",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_device_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import pytest
import torch
from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature
from lerobot.processor import DataProcessorPipeline, DeviceProcessorStep, TransitionKey
from lerobot.processor.converters import create_transition, identity_transition
from lerobot.utils.constants import ACTION, OBS_IMAGE, OBS_STATE
def test_basic_functionality():
"""Test basic device processor functionality on CPU."""
processor = DeviceProcessorStep(device="cpu")
# Create a transition with CPU tensors
observation = {OBS_STATE: torch.randn(10), OBS_IMAGE: torch.randn(3, 224, 224)}
action = torch.randn(5)
reward = torch.tensor(1.0)
done = torch.tensor(False)
truncated = torch.tensor(False)
transition = create_transition(
observation=observation, action=action, reward=reward, done=done, truncated=truncated
)
result = processor(transition)
# Check that all tensors are on CPU
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cpu"
assert result[TransitionKey.OBSERVATION][OBS_IMAGE].device.type == "cpu"
assert result[TransitionKey.ACTION].device.type == "cpu"
assert result[TransitionKey.REWARD].device.type == "cpu"
assert result[TransitionKey.DONE].device.type == "cpu"
assert result[TransitionKey.TRUNCATED].device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_cuda_functionality():
"""Test device processor functionality on CUDA."""
processor = DeviceProcessorStep(device="cuda")
# Create a transition with CPU tensors
observation = {OBS_STATE: torch.randn(10), OBS_IMAGE: torch.randn(3, 224, 224)}
action = torch.randn(5)
reward = torch.tensor(1.0)
done = torch.tensor(False)
truncated = torch.tensor(False)
transition = create_transition(
observation=observation, action=action, reward=reward, done=done, truncated=truncated
)
result = processor(transition)
# Check that all tensors are on CUDA
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cuda"
assert result[TransitionKey.OBSERVATION][OBS_IMAGE].device.type == "cuda"
assert result[TransitionKey.ACTION].device.type == "cuda"
assert result[TransitionKey.REWARD].device.type == "cuda"
assert result[TransitionKey.DONE].device.type == "cuda"
assert result[TransitionKey.TRUNCATED].device.type == "cuda"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_specific_cuda_device():
"""Test device processor with specific CUDA device."""
processor = DeviceProcessorStep(device="cuda:0")
observation = {OBS_STATE: torch.randn(10)}
action = torch.randn(5)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cuda"
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.index == 0
assert result[TransitionKey.ACTION].device.type == "cuda"
assert result[TransitionKey.ACTION].device.index == 0
def test_non_tensor_values():
"""Test that non-tensor values are preserved."""
processor = DeviceProcessorStep(device="cpu")
observation = {
OBS_STATE: torch.randn(10),
"observation.metadata": {"key": "value"}, # Non-tensor data
"observation.list": [1, 2, 3], # Non-tensor data
}
action = torch.randn(5)
info = {"episode": 1, "step": 42}
transition = create_transition(observation=observation, action=action, info=info)
result = processor(transition)
# Check tensors are processed
assert isinstance(result[TransitionKey.OBSERVATION][OBS_STATE], torch.Tensor)
assert isinstance(result[TransitionKey.ACTION], torch.Tensor)
# Check non-tensor values are preserved
assert result[TransitionKey.OBSERVATION]["observation.metadata"] == {"key": "value"}
assert result[TransitionKey.OBSERVATION]["observation.list"] == [1, 2, 3]
assert result[TransitionKey.INFO] == {"episode": 1, "step": 42}
def test_none_values():
"""Test handling of None values."""
processor = DeviceProcessorStep(device="cpu")
# Test with None observation
transition = create_transition(observation=None, action=torch.randn(5))
result = processor(transition)
assert result[TransitionKey.OBSERVATION] is None
assert result[TransitionKey.ACTION].device.type == "cpu"
# Test with None action
transition = create_transition(observation={OBS_STATE: torch.randn(10)}, action=None)
result = processor(transition)
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cpu"
assert result[TransitionKey.ACTION] is None
def test_empty_observation():
"""Test handling of empty observation dictionary."""
processor = DeviceProcessorStep(device="cpu")
transition = create_transition(observation={}, action=torch.randn(5))
result = processor(transition)
assert result[TransitionKey.OBSERVATION] == {}
assert result[TransitionKey.ACTION].device.type == "cpu"
def test_scalar_tensors():
"""Test handling of scalar tensors."""
processor = DeviceProcessorStep(device="cpu")
observation = {"observation.scalar": torch.tensor(1.5)}
action = torch.tensor(2.0)
reward = torch.tensor(0.5)
transition = create_transition(observation=observation, action=action, reward=reward)
result = processor(transition)
assert result[TransitionKey.OBSERVATION]["observation.scalar"].item() == 1.5
assert result[TransitionKey.ACTION].item() == 2.0
assert result[TransitionKey.REWARD].item() == 0.5
def test_dtype_preservation():
"""Test that tensor dtypes are preserved."""
processor = DeviceProcessorStep(device="cpu")
observation = {
"observation.float32": torch.randn(5, dtype=torch.float32),
"observation.float64": torch.randn(5, dtype=torch.float64),
"observation.int32": torch.randint(0, 10, (5,), dtype=torch.int32),
"observation.bool": torch.tensor([True, False, True], dtype=torch.bool),
}
action = torch.randn(3, dtype=torch.float16)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
assert result[TransitionKey.OBSERVATION]["observation.float32"].dtype == torch.float32
assert result[TransitionKey.OBSERVATION]["observation.float64"].dtype == torch.float64
assert result[TransitionKey.OBSERVATION]["observation.int32"].dtype == torch.int32
assert result[TransitionKey.OBSERVATION]["observation.bool"].dtype == torch.bool
assert result[TransitionKey.ACTION].dtype == torch.float16
def test_shape_preservation():
"""Test that tensor shapes are preserved."""
processor = DeviceProcessorStep(device="cpu")
observation = {
"observation.1d": torch.randn(10),
"observation.2d": torch.randn(5, 10),
"observation.3d": torch.randn(3, 224, 224),
"observation.4d": torch.randn(2, 3, 224, 224),
}
action = torch.randn(2, 5, 3)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
assert result[TransitionKey.OBSERVATION]["observation.1d"].shape == (10,)
assert result[TransitionKey.OBSERVATION]["observation.2d"].shape == (5, 10)
assert result[TransitionKey.OBSERVATION]["observation.3d"].shape == (3, 224, 224)
assert result[TransitionKey.OBSERVATION]["observation.4d"].shape == (2, 3, 224, 224)
assert result[TransitionKey.ACTION].shape == (2, 5, 3)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_mixed_devices():
"""Test handling of tensors already on different devices."""
processor = DeviceProcessorStep(device="cuda")
# Create tensors on different devices
observation = {
"observation.cpu": torch.randn(5), # CPU
"observation.cuda": torch.randn(5).cuda(), # Already on CUDA
}
action = torch.randn(3).cuda() # Already on CUDA
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# All should be on CUDA
assert result[TransitionKey.OBSERVATION]["observation.cpu"].device.type == "cuda"
assert result[TransitionKey.OBSERVATION]["observation.cuda"].device.type == "cuda"
assert result[TransitionKey.ACTION].device.type == "cuda"
def test_non_blocking_flag():
"""Test that non_blocking flag is set correctly."""
# CPU processor should have non_blocking=False
cpu_processor = DeviceProcessorStep(device="cpu")
assert cpu_processor.non_blocking is False
if torch.cuda.is_available():
# CUDA processor should have non_blocking=True
cuda_processor = DeviceProcessorStep(device="cuda")
assert cuda_processor.non_blocking is True
cuda_0_processor = DeviceProcessorStep(device="cuda:0")
assert cuda_0_processor.non_blocking is True
def test_serialization_methods():
"""Test get_config, state_dict, and load_state_dict methods."""
device = "cuda" if torch.cuda.is_available() else "cpu"
processor = DeviceProcessorStep(device=device)
# Test get_config
config = processor.get_config()
assert config == {"device": device, "float_dtype": None}
# Test state_dict (should be empty)
state = processor.state_dict()
assert state == {}
# Test load_state_dict (should be no-op)
processor.load_state_dict({})
assert processor.device == device
# Test reset (should be no-op)
processor.reset()
assert processor.device == device
def test_features():
"""Test that features returns features unchanged."""
processor = DeviceProcessorStep(device="cpu")
features = {
PipelineFeatureType.OBSERVATION: {OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(10,))},
PipelineFeatureType.ACTION: {ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(5,))},
}
result = processor.transform_features(features)
assert result == features
assert result is features # Should return the same object
def test_integration_with_robot_processor():
"""Test integration with RobotProcessor."""
from lerobot.processor import AddBatchDimensionProcessorStep
from lerobot.utils.constants import OBS_STATE
# Create a pipeline with DeviceProcessorStep
device_processor = DeviceProcessorStep(device="cpu")
batch_processor = AddBatchDimensionProcessorStep()
processor = DataProcessorPipeline(
steps=[batch_processor, device_processor],
name="test_pipeline",
to_transition=identity_transition,
to_output=identity_transition,
)
# Create test data
observation = {OBS_STATE: torch.randn(10)}
action = torch.randn(5)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Check that tensors are batched and on correct device
# The result has TransitionKey.OBSERVATION as the key, with observation.state inside
assert result[TransitionKey.OBSERVATION][OBS_STATE].shape[0] == 1 # Batched
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cpu"
assert result[TransitionKey.ACTION].shape[0] == 1 # Batched
assert result[TransitionKey.ACTION].device.type == "cpu"
def test_save_and_load_pretrained():
"""Test saving and loading processor with DeviceProcessorStep."""
device = "cuda:0" if torch.cuda.is_available() else "cpu"
processor = DeviceProcessorStep(device=device, float_dtype="float16")
robot_processor = DataProcessorPipeline(steps=[processor], name="device_test_processor")
with tempfile.TemporaryDirectory() as tmpdir:
# Save
robot_processor.save_pretrained(tmpdir)
# Load
loaded_processor = DataProcessorPipeline.from_pretrained(
tmpdir, config_filename="device_test_processor.json"
)
assert len(loaded_processor.steps) == 1
loaded_device_processor = loaded_processor.steps[0]
assert isinstance(loaded_device_processor, DeviceProcessorStep)
# Use getattr to access attributes safely
assert (
getattr(loaded_device_processor, "device", None) == device.split(":")[0]
) # Device normalizes cuda:0 to cuda
assert getattr(loaded_device_processor, "float_dtype", None) == "float16"
def test_registry_functionality():
"""Test that DeviceProcessorStep is properly registered."""
from lerobot.processor import ProcessorStepRegistry
# Check that DeviceProcessorStep is registered
registered_class = ProcessorStepRegistry.get("device_processor")
assert registered_class is DeviceProcessorStep
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_performance_with_large_tensors():
"""Test performance with large tensors and non_blocking flag."""
processor = DeviceProcessorStep(device="cuda")
# Create large tensors
observation = {
"observation.large_image": torch.randn(10, 3, 512, 512), # Large image batch
"observation.features": torch.randn(10, 2048), # Large feature vector
}
action = torch.randn(10, 100) # Large action space
transition = create_transition(observation=observation, action=action)
# Process should not raise any errors
result = processor(transition)
# Verify all tensors are on CUDA
assert result[TransitionKey.OBSERVATION]["observation.large_image"].device.type == "cuda"
assert result[TransitionKey.OBSERVATION]["observation.features"].device.type == "cuda"
assert result[TransitionKey.ACTION].device.type == "cuda"
def test_reward_done_truncated_types():
"""Test handling of different types for reward, done, and truncated."""
processor = DeviceProcessorStep(device="cpu")
# Test with scalar values (not tensors)
transition = create_transition(
observation={OBS_STATE: torch.randn(5)},
action=torch.randn(3),
reward=1.0, # float
done=False, # bool
truncated=True, # bool
)
result = processor(transition)
# Non-tensor values should be preserved as-is
assert result[TransitionKey.REWARD] == 1.0
assert result[TransitionKey.DONE] is False
assert result[TransitionKey.TRUNCATED] is True
# Test with tensor values
transition = create_transition(
observation={OBS_STATE: torch.randn(5)},
action=torch.randn(3),
reward=torch.tensor(1.0),
done=torch.tensor(False),
truncated=torch.tensor(True),
)
result = processor(transition)
# Tensor values should be moved to device
assert isinstance(result[TransitionKey.REWARD], torch.Tensor)
assert isinstance(result[TransitionKey.DONE], torch.Tensor)
assert isinstance(result[TransitionKey.TRUNCATED], torch.Tensor)
assert result[TransitionKey.REWARD].device.type == "cpu"
assert result[TransitionKey.DONE].device.type == "cpu"
assert result[TransitionKey.TRUNCATED].device.type == "cpu"
def test_complementary_data_preserved():
"""Test that complementary_data is preserved unchanged."""
processor = DeviceProcessorStep(device="cpu")
complementary_data = {
"task": "pick_object",
"episode_id": 42,
"metadata": {"sensor": "camera_1"},
"observation_is_pad": torch.tensor([False, False, True]), # This should be moved to device
}
transition = create_transition(
observation={OBS_STATE: torch.randn(5)}, complementary_data=complementary_data
)
result = processor(transition)
# Check that complementary_data is preserved
assert TransitionKey.COMPLEMENTARY_DATA in result
assert result[TransitionKey.COMPLEMENTARY_DATA]["task"] == "pick_object"
assert result[TransitionKey.COMPLEMENTARY_DATA]["episode_id"] == 42
assert result[TransitionKey.COMPLEMENTARY_DATA]["metadata"] == {"sensor": "camera_1"}
# Note: Currently DeviceProcessorStep doesn't process tensors in complementary_data
# This is intentional as complementary_data is typically metadata
def test_float_dtype_conversion():
"""Test float dtype conversion functionality."""
processor = DeviceProcessorStep(device="cpu", float_dtype="float16")
# Create tensors of different types
observation = {
"observation.float32": torch.randn(5, dtype=torch.float32),
"observation.float64": torch.randn(5, dtype=torch.float64),
"observation.int32": torch.randint(0, 10, (5,), dtype=torch.int32),
"observation.int64": torch.randint(0, 10, (5,), dtype=torch.int64),
"observation.bool": torch.tensor([True, False, True], dtype=torch.bool),
}
action = torch.randn(3, dtype=torch.float32)
reward = torch.tensor(1.0, dtype=torch.float32)
transition = create_transition(observation=observation, action=action, reward=reward)
result = processor(transition)
# Check that float tensors are converted to float16
assert result[TransitionKey.OBSERVATION]["observation.float32"].dtype == torch.float16
assert result[TransitionKey.OBSERVATION]["observation.float64"].dtype == torch.float16
assert result[TransitionKey.ACTION].dtype == torch.float16
assert result[TransitionKey.REWARD].dtype == torch.float16
# Check that non-float tensors are preserved
assert result[TransitionKey.OBSERVATION]["observation.int32"].dtype == torch.int32
assert result[TransitionKey.OBSERVATION]["observation.int64"].dtype == torch.int64
assert result[TransitionKey.OBSERVATION]["observation.bool"].dtype == torch.bool
def test_float_dtype_none():
"""Test that when float_dtype is None, no dtype conversion occurs."""
processor = DeviceProcessorStep(device="cpu", float_dtype=None)
observation = {
"observation.float32": torch.randn(5, dtype=torch.float32),
"observation.float64": torch.randn(5, dtype=torch.float64),
"observation.int32": torch.randint(0, 10, (5,), dtype=torch.int32),
}
action = torch.randn(3, dtype=torch.float64)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Check that dtypes are preserved when float_dtype is None
assert result[TransitionKey.OBSERVATION]["observation.float32"].dtype == torch.float32
assert result[TransitionKey.OBSERVATION]["observation.float64"].dtype == torch.float64
assert result[TransitionKey.OBSERVATION]["observation.int32"].dtype == torch.int32
assert result[TransitionKey.ACTION].dtype == torch.float64
def test_float_dtype_bfloat16():
"""Test conversion to bfloat16."""
processor = DeviceProcessorStep(device="cpu", float_dtype="bfloat16")
observation = {OBS_STATE: torch.randn(5, dtype=torch.float32)}
action = torch.randn(3, dtype=torch.float64)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
assert result[TransitionKey.OBSERVATION][OBS_STATE].dtype == torch.bfloat16
assert result[TransitionKey.ACTION].dtype == torch.bfloat16
def test_float_dtype_float64():
"""Test conversion to float64."""
processor = DeviceProcessorStep(device="cpu", float_dtype="float64")
observation = {OBS_STATE: torch.randn(5, dtype=torch.float16)}
action = torch.randn(3, dtype=torch.float32)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
assert result[TransitionKey.OBSERVATION][OBS_STATE].dtype == torch.float64
assert result[TransitionKey.ACTION].dtype == torch.float64
def test_float_dtype_invalid():
"""Test that invalid float_dtype raises ValueError."""
with pytest.raises(ValueError, match="Invalid float_dtype 'invalid_dtype'"):
DeviceProcessorStep(device="cpu", float_dtype="invalid_dtype")
def test_float_dtype_aliases():
"""Test that dtype aliases work correctly."""
# Test 'half' alias for float16
processor_half = DeviceProcessorStep(device="cpu", float_dtype="half")
assert processor_half._target_float_dtype == torch.float16
# Test 'float' alias for float32
processor_float = DeviceProcessorStep(device="cpu", float_dtype="float")
assert processor_float._target_float_dtype == torch.float32
# Test 'double' alias for float64
processor_double = DeviceProcessorStep(device="cpu", float_dtype="double")
assert processor_double._target_float_dtype == torch.float64
def test_float_dtype_with_mixed_tensors():
"""Test float dtype conversion with mixed tensor types."""
processor = DeviceProcessorStep(device="cpu", float_dtype="float32")
observation = {
OBS_IMAGE: torch.randint(0, 255, (3, 64, 64), dtype=torch.uint8), # Should not convert
OBS_STATE: torch.randn(10, dtype=torch.float64), # Should convert
"observation.mask": torch.tensor([True, False, True], dtype=torch.bool), # Should not convert
"observation.indices": torch.tensor([1, 2, 3], dtype=torch.long), # Should not convert
}
action = torch.randn(5, dtype=torch.float16) # Should convert
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Check conversions
assert result[TransitionKey.OBSERVATION][OBS_IMAGE].dtype == torch.uint8 # Unchanged
assert result[TransitionKey.OBSERVATION][OBS_STATE].dtype == torch.float32 # Converted
assert result[TransitionKey.OBSERVATION]["observation.mask"].dtype == torch.bool # Unchanged
assert result[TransitionKey.OBSERVATION]["observation.indices"].dtype == torch.long # Unchanged
assert result[TransitionKey.ACTION].dtype == torch.float32 # Converted
def test_float_dtype_serialization():
"""Test that float_dtype is properly serialized in get_config."""
device = "cuda" if torch.cuda.is_available() else "cpu"
processor = DeviceProcessorStep(device=device, float_dtype="float16")
config = processor.get_config()
assert config == {"device": device, "float_dtype": "float16"}
# Test with None float_dtype
processor_none = DeviceProcessorStep(device="cpu", float_dtype=None)
config_none = processor_none.get_config()
assert config_none == {"device": "cpu", "float_dtype": None}
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_float_dtype_with_cuda():
"""Test float dtype conversion combined with CUDA device."""
processor = DeviceProcessorStep(device="cuda", float_dtype="float16")
# Create tensors on CPU with different dtypes
observation = {
"observation.float32": torch.randn(5, dtype=torch.float32),
"observation.int64": torch.tensor([1, 2, 3], dtype=torch.int64),
}
action = torch.randn(3, dtype=torch.float64)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Check that tensors are on CUDA and float types are converted
assert result[TransitionKey.OBSERVATION]["observation.float32"].device.type == "cuda"
assert result[TransitionKey.OBSERVATION]["observation.float32"].dtype == torch.float16
assert result[TransitionKey.OBSERVATION]["observation.int64"].device.type == "cuda"
assert result[TransitionKey.OBSERVATION]["observation.int64"].dtype == torch.int64 # Unchanged
assert result[TransitionKey.ACTION].device.type == "cuda"
assert result[TransitionKey.ACTION].dtype == torch.float16
def test_complementary_data_index_fields():
"""Test processing of index and task_index fields in complementary_data."""
processor = DeviceProcessorStep(device="cpu")
# Create transition with index and task_index in complementary_data
complementary_data = {
"task": ["pick_cube"],
"index": torch.tensor([42], dtype=torch.int64),
"task_index": torch.tensor([3], dtype=torch.int64),
"episode_id": 123, # Non-tensor field
}
transition = create_transition(
observation={OBS_STATE: torch.randn(1, 7)},
action=torch.randn(1, 4),
complementary_data=complementary_data,
)
result = processor(transition)
# Check that tensors in complementary_data are processed
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
# Check index tensor
assert isinstance(processed_comp_data["index"], torch.Tensor)
assert processed_comp_data["index"].device.type == "cpu"
assert torch.equal(processed_comp_data["index"], complementary_data["index"])
# Check task_index tensor
assert isinstance(processed_comp_data["task_index"], torch.Tensor)
assert processed_comp_data["task_index"].device.type == "cpu"
assert torch.equal(processed_comp_data["task_index"], complementary_data["task_index"])
# Check non-tensor fields remain unchanged
assert processed_comp_data["task"] == ["pick_cube"]
assert processed_comp_data["episode_id"] == 123
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_complementary_data_index_fields_cuda():
"""Test moving index and task_index fields to CUDA."""
processor = DeviceProcessorStep(device="cuda:0")
# Create CPU tensors
complementary_data = {
"index": torch.tensor([100, 101], dtype=torch.int64),
"task_index": torch.tensor([5], dtype=torch.int64),
}
transition = create_transition(complementary_data=complementary_data)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
# Check tensors moved to CUDA
assert processed_comp_data["index"].device.type == "cuda"
assert processed_comp_data["index"].device.index == 0
assert processed_comp_data["task_index"].device.type == "cuda"
assert processed_comp_data["task_index"].device.index == 0
def test_complementary_data_without_index_fields():
"""Test that complementary_data without index/task_index fields works correctly."""
processor = DeviceProcessorStep(device="cpu")
complementary_data = {
"task": ["navigate"],
"episode_id": 456,
}
transition = create_transition(complementary_data=complementary_data)
result = processor(transition)
# Should process without errors and preserve non-tensor fields
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["task"] == ["navigate"]
assert processed_comp_data["episode_id"] == 456
def test_complementary_data_mixed_tensors():
"""Test complementary_data with mix of tensors and non-tensors."""
processor = DeviceProcessorStep(device="cpu")
complementary_data = {
"task": ["pick_and_place"],
"index": torch.tensor([42], dtype=torch.int64),
"task_index": torch.tensor([3], dtype=torch.int64),
"metrics": [1.0, 2.0, 3.0], # List, not tensor
"config": {"speed": "fast"}, # Dict
"episode_id": 789, # Int
}
transition = create_transition(complementary_data=complementary_data)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
# Check tensors are processed
assert isinstance(processed_comp_data["index"], torch.Tensor)
assert isinstance(processed_comp_data["task_index"], torch.Tensor)
# Check non-tensors remain unchanged
assert processed_comp_data["task"] == ["pick_and_place"]
assert processed_comp_data["metrics"] == [1.0, 2.0, 3.0]
assert processed_comp_data["config"] == {"speed": "fast"}
assert processed_comp_data["episode_id"] == 789
def test_complementary_data_float_dtype_conversion():
"""Test that float dtype conversion doesn't affect int tensors in complementary_data."""
processor = DeviceProcessorStep(device="cpu", float_dtype="float16")
complementary_data = {
"index": torch.tensor([42], dtype=torch.int64),
"task_index": torch.tensor([3], dtype=torch.int64),
"float_tensor": torch.tensor([1.5, 2.5], dtype=torch.float32), # Should be converted
}
transition = create_transition(complementary_data=complementary_data)
result = processor(transition)
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
# Int tensors should keep their dtype
assert processed_comp_data["index"].dtype == torch.int64
assert processed_comp_data["task_index"].dtype == torch.int64
# Float tensor should be converted
assert processed_comp_data["float_tensor"].dtype == torch.float16
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_complementary_data_full_pipeline_cuda():
"""Test full transition with complementary_data on CUDA."""
processor = DeviceProcessorStep(device="cuda:0", float_dtype="float16")
# Create full transition with mixed CPU tensors
observation = {OBS_STATE: torch.randn(1, 7, dtype=torch.float32)}
action = torch.randn(1, 4, dtype=torch.float32)
reward = torch.tensor(1.5, dtype=torch.float32)
done = torch.tensor(False)
complementary_data = {
"task": ["reach_target"],
"index": torch.tensor([1000], dtype=torch.int64),
"task_index": torch.tensor([10], dtype=torch.int64),
}
transition = create_transition(
observation=observation,
action=action,
reward=reward,
done=done,
complementary_data=complementary_data,
)
result = processor(transition)
# Check all components moved to CUDA
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cuda"
assert result[TransitionKey.ACTION].device.type == "cuda"
assert result[TransitionKey.REWARD].device.type == "cuda"
assert result[TransitionKey.DONE].device.type == "cuda"
# Check complementary_data tensors
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["index"].device.type == "cuda"
assert processed_comp_data["task_index"].device.type == "cuda"
# Check float conversion happened for float tensors
assert result[TransitionKey.OBSERVATION][OBS_STATE].dtype == torch.float16
assert result[TransitionKey.ACTION].dtype == torch.float16
assert result[TransitionKey.REWARD].dtype == torch.float16
# Check int tensors kept their dtype
assert processed_comp_data["index"].dtype == torch.int64
assert processed_comp_data["task_index"].dtype == torch.int64
def test_complementary_data_empty():
"""Test empty complementary_data handling."""
processor = DeviceProcessorStep(device="cpu")
transition = create_transition(
observation={OBS_STATE: torch.randn(1, 7)},
complementary_data={},
)
result = processor(transition)
# Should have empty dict
assert result[TransitionKey.COMPLEMENTARY_DATA] == {}
def test_complementary_data_none():
"""Test None complementary_data handling."""
processor = DeviceProcessorStep(device="cpu")
transition = create_transition(
observation={OBS_STATE: torch.randn(1, 7)},
complementary_data=None,
)
result = processor(transition)
# Complementary data should not be in the result (same as input)
assert result[TransitionKey.COMPLEMENTARY_DATA] == {}
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_preserves_gpu_placement():
"""Test that DeviceProcessorStep preserves GPU placement when tensor is already on GPU."""
processor = DeviceProcessorStep(device="cuda:0")
# Create tensors already on GPU
observation = {
OBS_STATE: torch.randn(10).cuda(), # Already on GPU
OBS_IMAGE: torch.randn(3, 224, 224).cuda(), # Already on GPU
}
action = torch.randn(5).cuda() # Already on GPU
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Check that tensors remain on their original GPU
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cuda"
assert result[TransitionKey.OBSERVATION][OBS_IMAGE].device.type == "cuda"
assert result[TransitionKey.ACTION].device.type == "cuda"
# Verify no unnecessary copies were made (same data pointer)
assert torch.equal(result[TransitionKey.OBSERVATION][OBS_STATE], observation[OBS_STATE])
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_multi_gpu_preservation():
"""Test that DeviceProcessorStep preserves placement on different GPUs in multi-GPU setup."""
# Test 1: GPU-to-GPU preservation (cuda:0 config, cuda:1 input)
processor_gpu = DeviceProcessorStep(device="cuda:0")
# Create tensors on cuda:1 (simulating Accelerate placement)
cuda1_device = torch.device("cuda:1")
observation = {
OBS_STATE: torch.randn(10).to(cuda1_device),
OBS_IMAGE: torch.randn(3, 224, 224).to(cuda1_device),
}
action = torch.randn(5).to(cuda1_device)
transition = create_transition(observation=observation, action=action)
result = processor_gpu(transition)
# Check that tensors remain on cuda:1 (not moved to cuda:0)
assert result[TransitionKey.OBSERVATION][OBS_STATE].device == cuda1_device
assert result[TransitionKey.OBSERVATION][OBS_IMAGE].device == cuda1_device
assert result[TransitionKey.ACTION].device == cuda1_device
# Test 2: GPU-to-CPU should move to CPU (not preserve GPU)
processor_cpu = DeviceProcessorStep(device="cpu")
transition_gpu = create_transition(
observation={OBS_STATE: torch.randn(10).cuda()}, action=torch.randn(5).cuda()
)
result_cpu = processor_cpu(transition_gpu)
# Check that tensors are moved to CPU
assert result_cpu[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cpu"
assert result_cpu[TransitionKey.ACTION].device.type == "cpu"
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_multi_gpu_with_cpu_tensors():
"""Test that CPU tensors are moved to configured device even in multi-GPU context."""
# Processor configured for cuda:1
processor = DeviceProcessorStep(device="cuda:1")
# Mix of CPU and GPU tensors
observation = {
"observation.cpu": torch.randn(10), # CPU tensor
"observation.gpu0": torch.randn(10).cuda(0), # Already on cuda:0
"observation.gpu1": torch.randn(10).cuda(1), # Already on cuda:1
}
action = torch.randn(5) # CPU tensor
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# CPU tensor should move to configured device (cuda:1)
assert result[TransitionKey.OBSERVATION]["observation.cpu"].device.type == "cuda"
assert result[TransitionKey.OBSERVATION]["observation.cpu"].device.index == 1
assert result[TransitionKey.ACTION].device.type == "cuda"
assert result[TransitionKey.ACTION].device.index == 1
# GPU tensors should stay on their original devices
assert result[TransitionKey.OBSERVATION]["observation.gpu0"].device.index == 0
assert result[TransitionKey.OBSERVATION]["observation.gpu1"].device.index == 1
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_multi_gpu_with_float_dtype():
"""Test float dtype conversion works correctly with multi-GPU preservation."""
processor = DeviceProcessorStep(device="cuda:0", float_dtype="float16")
# Create float tensors on different GPUs
observation = {
"observation.gpu0": torch.randn(5, dtype=torch.float32).cuda(0),
"observation.gpu1": torch.randn(5, dtype=torch.float32).cuda(1),
"observation.cpu": torch.randn(5, dtype=torch.float32), # CPU
}
transition = create_transition(observation=observation)
result = processor(transition)
# Check device placement
assert result[TransitionKey.OBSERVATION]["observation.gpu0"].device.index == 0
assert result[TransitionKey.OBSERVATION]["observation.gpu1"].device.index == 1
assert result[TransitionKey.OBSERVATION]["observation.cpu"].device.index == 0 # Moved to cuda:0
# Check dtype conversion happened for all
assert result[TransitionKey.OBSERVATION]["observation.gpu0"].dtype == torch.float16
assert result[TransitionKey.OBSERVATION]["observation.gpu1"].dtype == torch.float16
assert result[TransitionKey.OBSERVATION]["observation.cpu"].dtype == torch.float16
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_simulated_accelerate_scenario():
"""Test a scenario simulating how Accelerate would use the processor."""
# Simulate different processes getting different GPU assignments
for gpu_id in range(min(torch.cuda.device_count(), 2)):
# Each "process" has a processor configured for cuda:0
# but data comes in already placed on the process's GPU
processor = DeviceProcessorStep(device="cuda:0")
# Simulate data already placed by Accelerate
device = torch.device(f"cuda:{gpu_id}")
observation = {OBS_STATE: torch.randn(1, 10).to(device)}
action = torch.randn(1, 5).to(device)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Verify data stays on the GPU where Accelerate placed it
assert result[TransitionKey.OBSERVATION][OBS_STATE].device == device
assert result[TransitionKey.ACTION].device == device
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_policy_processor_integration():
"""Test integration with policy processors - input on GPU, output on CPU."""
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.processor import (
AddBatchDimensionProcessorStep,
NormalizerProcessorStep,
UnnormalizerProcessorStep,
)
from lerobot.utils.constants import ACTION, OBS_STATE
# Create features and stats
features = {
OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(10,)),
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(5,)),
}
stats = {
OBS_STATE: {"mean": torch.zeros(10), "std": torch.ones(10)},
ACTION: {"mean": torch.zeros(5), "std": torch.ones(5)},
}
norm_map = {FeatureType.STATE: NormalizationMode.MEAN_STD, FeatureType.ACTION: NormalizationMode.MEAN_STD}
# Create input processor (preprocessor) that moves to GPU
input_processor = DataProcessorPipeline(
steps=[
NormalizerProcessorStep(features=features, norm_map=norm_map, stats=stats),
AddBatchDimensionProcessorStep(),
DeviceProcessorStep(device="cuda"),
],
name="test_preprocessor",
to_transition=identity_transition,
to_output=identity_transition,
)
# Create output processor (postprocessor) that moves to CPU
output_processor = DataProcessorPipeline(
steps=[
DeviceProcessorStep(device="cpu"),
UnnormalizerProcessorStep(features={ACTION: features[ACTION]}, norm_map=norm_map, stats=stats),
],
name="test_postprocessor",
to_transition=identity_transition,
to_output=identity_transition,
)
# Test data on CPU
observation = {OBS_STATE: torch.randn(10)}
action = torch.randn(5)
transition = create_transition(observation=observation, action=action)
# Process through input processor
input_result = input_processor(transition)
# Verify tensors are on GPU and batched
# The result has TransitionKey.OBSERVATION as the key, with observation.state inside
assert input_result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cuda"
assert input_result[TransitionKey.OBSERVATION][OBS_STATE].shape[0] == 1
assert input_result[TransitionKey.ACTION].device.type == "cuda"
assert input_result[TransitionKey.ACTION].shape[0] == 1
# Simulate model output on GPU
model_output = create_transition(action=torch.randn(1, 5).cuda())
# Process through output processor
output_result = output_processor(model_output)
# Verify action is back on CPU and unnormalized
assert output_result[TransitionKey.ACTION].device.type == "cpu"
assert output_result[TransitionKey.ACTION].shape == (1, 5)
@pytest.mark.skipif(not torch.backends.mps.is_available(), reason="MPS not available")
def test_mps_float64_compatibility():
"""Test MPS device compatibility with float64 tensors (automatic conversion to float32)."""
processor = DeviceProcessorStep(device="mps")
# Create tensors with different dtypes, including float64 which MPS doesn't support
observation = {
"observation.float64": torch.randn(5, dtype=torch.float64), # Should be converted to float32
"observation.float32": torch.randn(5, dtype=torch.float32), # Should remain float32
"observation.float16": torch.randn(5, dtype=torch.float16), # Should remain float16
"observation.int64": torch.randint(0, 10, (5,), dtype=torch.int64), # Should remain int64
"observation.bool": torch.tensor([True, False, True], dtype=torch.bool), # Should remain bool
}
action = torch.randn(3, dtype=torch.float64) # Should be converted to float32
reward = torch.tensor(1.0, dtype=torch.float64) # Should be converted to float32
done = torch.tensor(False, dtype=torch.bool) # Should remain bool
truncated = torch.tensor(True, dtype=torch.bool) # Should remain bool
transition = create_transition(
observation=observation, action=action, reward=reward, done=done, truncated=truncated
)
result = processor(transition)
# Check that all tensors are on MPS device
assert result[TransitionKey.OBSERVATION]["observation.float64"].device.type == "mps"
assert result[TransitionKey.OBSERVATION]["observation.float32"].device.type == "mps"
assert result[TransitionKey.OBSERVATION]["observation.float16"].device.type == "mps"
assert result[TransitionKey.OBSERVATION]["observation.int64"].device.type == "mps"
assert result[TransitionKey.OBSERVATION]["observation.bool"].device.type == "mps"
assert result[TransitionKey.ACTION].device.type == "mps"
assert result[TransitionKey.REWARD].device.type == "mps"
assert result[TransitionKey.DONE].device.type == "mps"
assert result[TransitionKey.TRUNCATED].device.type == "mps"
# Check that float64 tensors were automatically converted to float32
assert result[TransitionKey.OBSERVATION]["observation.float64"].dtype == torch.float32
assert result[TransitionKey.ACTION].dtype == torch.float32
assert result[TransitionKey.REWARD].dtype == torch.float32
# Check that other dtypes were preserved
assert result[TransitionKey.OBSERVATION]["observation.float32"].dtype == torch.float32
assert result[TransitionKey.OBSERVATION]["observation.float16"].dtype == torch.float16
assert result[TransitionKey.OBSERVATION]["observation.int64"].dtype == torch.int64
assert result[TransitionKey.OBSERVATION]["observation.bool"].dtype == torch.bool
assert result[TransitionKey.DONE].dtype == torch.bool
assert result[TransitionKey.TRUNCATED].dtype == torch.bool
@pytest.mark.skipif(not torch.backends.mps.is_available(), reason="MPS not available")
def test_mps_float64_with_complementary_data():
"""Test MPS float64 conversion with complementary_data tensors."""
processor = DeviceProcessorStep(device="mps")
# Create complementary_data with float64 tensors
complementary_data = {
"task": ["pick_object"],
"index": torch.tensor([42], dtype=torch.int64), # Should remain int64
"task_index": torch.tensor([3], dtype=torch.int64), # Should remain int64
"float64_tensor": torch.tensor([1.5, 2.5], dtype=torch.float64), # Should convert to float32
"float32_tensor": torch.tensor([3.5], dtype=torch.float32), # Should remain float32
}
transition = create_transition(
observation={OBS_STATE: torch.randn(5, dtype=torch.float64)},
action=torch.randn(3, dtype=torch.float64),
complementary_data=complementary_data,
)
result = processor(transition)
# Check that all tensors are on MPS device
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "mps"
assert result[TransitionKey.ACTION].device.type == "mps"
processed_comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
assert processed_comp_data["index"].device.type == "mps"
assert processed_comp_data["task_index"].device.type == "mps"
assert processed_comp_data["float64_tensor"].device.type == "mps"
assert processed_comp_data["float32_tensor"].device.type == "mps"
# Check dtype conversions
assert result[TransitionKey.OBSERVATION][OBS_STATE].dtype == torch.float32 # Converted
assert result[TransitionKey.ACTION].dtype == torch.float32 # Converted
assert processed_comp_data["float64_tensor"].dtype == torch.float32 # Converted
assert processed_comp_data["float32_tensor"].dtype == torch.float32 # Unchanged
assert processed_comp_data["index"].dtype == torch.int64 # Unchanged
assert processed_comp_data["task_index"].dtype == torch.int64 # Unchanged
# Check non-tensor data preserved
assert processed_comp_data["task"] == ["pick_object"]
@pytest.mark.skipif(not torch.backends.mps.is_available(), reason="MPS not available")
def test_mps_with_explicit_float_dtype():
"""Test MPS device with explicit float_dtype setting."""
# Test that explicit float_dtype still works on MPS
processor = DeviceProcessorStep(device="mps", float_dtype="float16")
observation = {
"observation.float64": torch.randn(
5, dtype=torch.float64
), # First converted to float32, then to float16
"observation.float32": torch.randn(5, dtype=torch.float32), # Converted to float16
"observation.int32": torch.randint(0, 10, (5,), dtype=torch.int32), # Should remain int32
}
action = torch.randn(3, dtype=torch.float64)
transition = create_transition(observation=observation, action=action)
result = processor(transition)
# Check device placement
assert result[TransitionKey.OBSERVATION]["observation.float64"].device.type == "mps"
assert result[TransitionKey.OBSERVATION]["observation.float32"].device.type == "mps"
assert result[TransitionKey.OBSERVATION]["observation.int32"].device.type == "mps"
assert result[TransitionKey.ACTION].device.type == "mps"
# Check that all float tensors end up as float16 (the target dtype)
assert result[TransitionKey.OBSERVATION]["observation.float64"].dtype == torch.float16
assert result[TransitionKey.OBSERVATION]["observation.float32"].dtype == torch.float16
assert result[TransitionKey.ACTION].dtype == torch.float16
# Check that non-float tensors are preserved
assert result[TransitionKey.OBSERVATION]["observation.int32"].dtype == torch.int32
@pytest.mark.skipif(not torch.backends.mps.is_available(), reason="MPS not available")
def test_mps_serialization():
"""Test that MPS device processor can be serialized and loaded correctly."""
processor = DeviceProcessorStep(device="mps", float_dtype="float32")
# Test get_config
config = processor.get_config()
assert config == {"device": "mps", "float_dtype": "float32"}
# Test state_dict (should be empty)
state = processor.state_dict()
assert state == {}
# Test load_state_dict (should be no-op)
processor.load_state_dict({})
assert processor.device == "mps"
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_device_processor.py",
"license": "Apache License 2.0",
"lines": 884,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_diffusion_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Diffusion policy processor."""
import tempfile
import pytest
import torch
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig
from lerobot.policies.diffusion.processor_diffusion import make_diffusion_pre_post_processors
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DataProcessorPipeline,
DeviceProcessorStep,
NormalizerProcessorStep,
RenameObservationsProcessorStep,
TransitionKey,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import create_transition, transition_to_batch
from lerobot.utils.constants import ACTION, OBS_IMAGE, OBS_STATE
def create_default_config():
"""Create a default Diffusion configuration for testing."""
config = DiffusionConfig()
config.input_features = {
OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(7,)),
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
}
config.output_features = {
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(6,)),
}
config.normalization_mapping = {
FeatureType.STATE: NormalizationMode.MEAN_STD,
FeatureType.VISUAL: NormalizationMode.IDENTITY,
FeatureType.ACTION: NormalizationMode.MIN_MAX,
}
config.device = "cpu"
return config
def create_default_stats():
"""Create default dataset statistics for testing."""
return {
OBS_STATE: {"mean": torch.zeros(7), "std": torch.ones(7)},
OBS_IMAGE: {}, # No normalization for images
ACTION: {"min": torch.full((6,), -1.0), "max": torch.ones(6)},
}
def test_make_diffusion_processor_basic():
"""Test basic creation of Diffusion processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_diffusion_pre_post_processors(config, stats)
# Check processor names
assert preprocessor.name == "policy_preprocessor"
assert postprocessor.name == "policy_postprocessor"
# Check steps in preprocessor
assert len(preprocessor.steps) == 4
assert isinstance(preprocessor.steps[0], RenameObservationsProcessorStep)
assert isinstance(preprocessor.steps[1], AddBatchDimensionProcessorStep)
assert isinstance(preprocessor.steps[2], DeviceProcessorStep)
assert isinstance(preprocessor.steps[3], NormalizerProcessorStep)
# Check steps in postprocessor
assert len(postprocessor.steps) == 2
assert isinstance(postprocessor.steps[0], UnnormalizerProcessorStep)
assert isinstance(postprocessor.steps[1], DeviceProcessorStep)
def test_diffusion_processor_with_images():
"""Test Diffusion processor with image observations."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_diffusion_pre_post_processors(
config,
stats,
)
# Create test data with images
observation = {
OBS_STATE: torch.randn(7),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is batched
assert processed[OBS_STATE].shape == (1, 7)
assert processed[OBS_IMAGE].shape == (1, 3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (1, 6)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_diffusion_processor_cuda():
"""Test Diffusion processor with CUDA device."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, postprocessor = make_diffusion_pre_post_processors(
config,
stats,
)
# Create CPU data
observation = {
OBS_STATE: torch.randn(7),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is on CUDA
assert processed[OBS_STATE].device.type == "cuda"
assert processed[OBS_IMAGE].device.type == "cuda"
assert processed[TransitionKey.ACTION.value].device.type == "cuda"
# Process through postprocessor
postprocessed = postprocessor(processed[TransitionKey.ACTION.value])
# Check that action is back on CPU
assert postprocessed.device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_diffusion_processor_accelerate_scenario():
"""Test Diffusion processor in simulated Accelerate scenario."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_diffusion_pre_post_processors(
config,
stats,
)
# Simulate Accelerate: data already on GPU
device = torch.device("cuda:0")
observation = {
OBS_STATE: torch.randn(1, 7).to(device),
OBS_IMAGE: torch.randn(1, 3, 224, 224).to(device),
}
action = torch.randn(1, 6).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on same GPU
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_diffusion_processor_multi_gpu():
"""Test Diffusion processor with multi-GPU setup."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_diffusion_pre_post_processors(config, stats)
# Simulate data on different GPU
device = torch.device("cuda:1")
observation = {
OBS_STATE: torch.randn(1, 7).to(device),
OBS_IMAGE: torch.randn(1, 3, 224, 224).to(device),
}
action = torch.randn(1, 6).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on cuda:1
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
def test_diffusion_processor_without_stats():
"""Test Diffusion processor creation without dataset statistics."""
config = create_default_config()
preprocessor, postprocessor = make_diffusion_pre_post_processors(
config,
dataset_stats=None,
)
# Should still create processors
assert preprocessor is not None
assert postprocessor is not None
# Process should still work
observation = {
OBS_STATE: torch.randn(7),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed is not None
def test_diffusion_processor_save_and_load():
"""Test saving and loading Diffusion processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_diffusion_pre_post_processors(config, stats)
with tempfile.TemporaryDirectory() as tmpdir:
# Save preprocessor
preprocessor.save_pretrained(tmpdir)
# Load preprocessor
loaded_preprocessor = DataProcessorPipeline.from_pretrained(
tmpdir, config_filename="policy_preprocessor.json"
)
# Test that loaded processor works
observation = {
OBS_STATE: torch.randn(7),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = loaded_preprocessor(batch)
assert processed[OBS_STATE].shape == (1, 7)
assert processed[OBS_IMAGE].shape == (1, 3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (1, 6)
def test_diffusion_processor_identity_normalization():
"""Test that images with IDENTITY normalization are not normalized."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_diffusion_pre_post_processors(
config,
stats,
)
# Create test data
image_value = torch.rand(3, 224, 224) * 255 # Large values
observation = {
OBS_STATE: torch.randn(7),
OBS_IMAGE: image_value.clone(),
}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Image should not be normalized (IDENTITY mode)
# Just batched
assert torch.allclose(processed[OBS_IMAGE][0], image_value, rtol=1e-5)
def test_diffusion_processor_batch_consistency():
"""Test Diffusion processor with different batch sizes."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_diffusion_pre_post_processors(
config,
stats,
)
# Test with different batch sizes
for batch_size in [1, 8, 32]:
observation = {
OBS_STATE: torch.randn(batch_size, 7) if batch_size > 1 else torch.randn(7),
OBS_IMAGE: torch.randn(batch_size, 3, 224, 224) if batch_size > 1 else torch.randn(3, 224, 224),
}
action = torch.randn(batch_size, 6) if batch_size > 1 else torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
# Check correct batch size
expected_batch = batch_size if batch_size > 1 else 1
assert processed[OBS_STATE].shape[0] == expected_batch
assert processed[OBS_IMAGE].shape[0] == expected_batch
assert processed[TransitionKey.ACTION.value].shape[0] == expected_batch
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_diffusion_processor_bfloat16_device_float32_normalizer():
"""Test: DeviceProcessor(bfloat16) + NormalizerProcessor(float32) → output bfloat16 via automatic adaptation"""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, _ = make_diffusion_pre_post_processors(config, stats)
# Modify the pipeline to use bfloat16 device processor with float32 normalizer
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
# Device processor converts to bfloat16
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="bfloat16"))
elif isinstance(step, NormalizerProcessorStep):
# Normalizer stays configured as float32 (will auto-adapt to bfloat16)
norm_step = step # Now type checker knows this is NormalizerProcessorStep
modified_steps.append(
NormalizerProcessorStep(
features=norm_step.features,
norm_map=norm_step.norm_map,
stats=norm_step.stats,
device=config.device,
dtype=torch.float32, # Deliberately configured as float32
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Verify initial normalizer configuration
normalizer_step = preprocessor.steps[3] # NormalizerProcessorStep
assert normalizer_step.dtype == torch.float32
# Create test data with both state and visual observations
observation = {
OBS_STATE: torch.randn(7, dtype=torch.float32),
OBS_IMAGE: torch.randn(3, 224, 224, dtype=torch.float32),
}
action = torch.randn(6, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through full pipeline
processed = preprocessor(batch)
# Verify: DeviceProcessor → bfloat16, NormalizerProcessor adapts → final output is bfloat16
assert processed[OBS_STATE].dtype == torch.bfloat16
assert processed[OBS_IMAGE].dtype == torch.bfloat16 # IDENTITY normalization still gets dtype conversion
assert processed[TransitionKey.ACTION.value].dtype == torch.bfloat16
# Verify normalizer automatically adapted its internal state
assert normalizer_step.dtype == torch.bfloat16
# Check state stats (has normalization)
for stat_tensor in normalizer_step._tensor_stats[OBS_STATE].values():
assert stat_tensor.dtype == torch.bfloat16
# OBS_IMAGE uses IDENTITY normalization, so no stats to check
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_diffusion_processor.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_migration_detection.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for processor migration detection functionality.
"""
import json
import tempfile
from pathlib import Path
import pytest
from lerobot.processor.pipeline import DataProcessorPipeline, ProcessorMigrationError
from lerobot.utils.constants import ACTION, OBS_STATE
def test_is_processor_config_valid_configs():
"""Test processor config detection with valid configurations."""
valid_configs = [
{"steps": []}, # Empty steps
{"steps": [{"class": "MyClass"}]}, # Class-based step
{"steps": [{"registry_name": "my_step"}]}, # Registry-based step
{"steps": [{"class": "A"}, {"registry_name": "B"}]}, # Mixed
{"name": "Test", "steps": [{"class": "MyClass"}]}, # With name
]
for i, config in enumerate(valid_configs):
assert DataProcessorPipeline._is_processor_config(config), (
f"Valid config {i} should be detected as processor config: {config}"
)
def test_is_processor_config_invalid_configs():
"""Test processor config detection with invalid configurations."""
invalid_configs = [
{}, # No steps field
{"steps": "not a list"}, # Steps is not a list
{"steps": [{}]}, # Step without class or registry_name
{"steps": ["not a dict"]}, # Step is not a dict
{"steps": [{"other_field": "value"}]}, # Step with wrong fields
{"other_field": "value"}, # Completely different structure
]
for i, config in enumerate(invalid_configs):
assert not DataProcessorPipeline._is_processor_config(config), (
f"Invalid config {i} should not be detected as processor config: {config}"
)
def test_should_suggest_migration_with_processor_config():
"""Test that migration is NOT suggested when processor config exists."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create a valid processor config
processor_config = {
"name": "TestProcessor",
"steps": [
{
"class": "lerobot.processor.normalize.NormalizeStep",
"config": {"mean": 0.0, "std": 1.0},
}
],
}
with open(tmp_path / "processor.json", "w") as f:
json.dump(processor_config, f)
# Should NOT suggest migration (processor config exists)
result = DataProcessorPipeline._should_suggest_migration(tmp_path)
assert not result
def test_should_suggest_migration_with_empty_processor_config():
"""Test that migration is NOT suggested when empty processor config exists."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create an empty processor config
empty_processor_config = {
"name": "EmptyProcessor",
"steps": [], # Empty steps is valid
}
with open(tmp_path / "empty_processor.json", "w") as f:
json.dump(empty_processor_config, f)
# Should NOT suggest migration (processor config exists, even if empty)
result = DataProcessorPipeline._should_suggest_migration(tmp_path)
assert not result
def test_should_suggest_migration_with_model_config_only():
"""Test that migration IS suggested when only model config exists."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create a model config (like old LeRobot format)
model_config = {
"type": "act",
"input_features": {OBS_STATE: {"shape": [7]}},
"output_features": {ACTION: {"shape": [7]}},
"hidden_dim": 256,
"n_obs_steps": 1,
"n_action_steps": 1,
}
with open(tmp_path / "config.json", "w") as f:
json.dump(model_config, f)
# SHOULD suggest migration (model config exists but no processor)
result = DataProcessorPipeline._should_suggest_migration(tmp_path)
assert result
def test_should_suggest_migration_no_json_files():
"""Test that migration is NOT suggested when no JSON files exist."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create some non-JSON files
with open(tmp_path / "model.safetensors", "w") as f:
f.write("fake model data")
with open(tmp_path / "README.md", "w") as f:
f.write("# Model README")
# Should NOT suggest migration (no JSON files)
result = DataProcessorPipeline._should_suggest_migration(tmp_path)
assert not result
def test_should_suggest_migration_random_json_files():
"""Test that migration IS suggested when JSON files exist but none are processor configs."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create some random JSON file (not a processor config)
random_config = {"some_field": "some_value", "another_field": 123}
with open(tmp_path / "random.json", "w") as f:
json.dump(random_config, f)
# SHOULD suggest migration (JSON files exist but none are processor configs)
result = DataProcessorPipeline._should_suggest_migration(tmp_path)
assert result
def test_should_suggest_migration_mixed_configs():
"""Test that migration is NOT suggested when processor config exists alongside other configs."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create both a processor config and a model config
processor_config = {"name": "TestProcessor", "steps": [{"registry_name": "normalize_step"}]}
model_config = {"type": "diffusion", "hidden_dim": 512}
with open(tmp_path / "processor.json", "w") as f:
json.dump(processor_config, f)
with open(tmp_path / "config.json", "w") as f:
json.dump(model_config, f)
# Should NOT suggest migration (processor config exists)
result = DataProcessorPipeline._should_suggest_migration(tmp_path)
assert not result
def test_should_suggest_migration_invalid_json():
"""Test that invalid JSON is handled gracefully."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create an invalid JSON file
with open(tmp_path / "invalid.json", "w") as f:
f.write("{ invalid json")
# Create a valid non-processor config
model_config = {"type": "act"}
with open(tmp_path / "model.json", "w") as f:
json.dump(model_config, f)
# SHOULD suggest migration (invalid JSON is ignored, but we have non-processor JSON)
result = DataProcessorPipeline._should_suggest_migration(tmp_path)
assert result
def test_from_pretrained_multiple_json_files_migration_error():
"""Test that multiple JSON files trigger ProcessorMigrationError."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create multiple non-processor configs
model_config = {"type": "act", "hidden_dim": 128}
train_config = {"batch_size": 32, "lr": 0.001}
with open(tmp_path / "config.json", "w") as f:
json.dump(model_config, f)
with open(tmp_path / "train_config.json", "w") as f:
json.dump(train_config, f)
# Should raise ProcessorMigrationError
with pytest.raises(ProcessorMigrationError) as exc_info:
DataProcessorPipeline.from_pretrained(tmp_path, config_filename="config.json")
# Check the error details
error = exc_info.value
assert str(tmp_path) in str(error.model_path)
assert "migrate_policy_normalization.py" in error.migration_command
assert "not a valid processor configuration" in error.original_error
def test_from_pretrained_no_processor_config_migration_error():
"""Test that missing processor config triggers ProcessorMigrationError."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create a model config but no processor
model_config = {"type": "diffusion", "hidden_dim": 256}
with open(tmp_path / "config.json", "w") as f:
json.dump(model_config, f)
# Should raise ProcessorMigrationError
with pytest.raises(ProcessorMigrationError) as exc_info:
DataProcessorPipeline.from_pretrained(tmp_path, config_filename="config.json")
# Check the error details
error = exc_info.value
assert str(tmp_path) in str(error.model_path)
assert "migrate_policy_normalization.py" in error.migration_command
assert "not a valid processor configuration" in error.original_error
def test_from_pretrained_valid_processor_no_migration_error():
"""Test that valid processor config does NOT trigger migration error."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create a valid processor config
processor_config = {
"name": "TestProcessor",
"steps": [], # Empty is valid
}
with open(tmp_path / "processor.json", "w") as f:
json.dump(processor_config, f)
# Should succeed and create pipeline
pipeline = DataProcessorPipeline.from_pretrained(tmp_path, config_filename="processor.json")
assert pipeline is not None
assert pipeline.name == "TestProcessor"
assert len(pipeline) == 0
def test_from_pretrained_no_json_files_no_migration_error():
"""Test that directories with no JSON files don't trigger migration errors."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create some non-JSON files
with open(tmp_path / "model.safetensors", "w") as f:
f.write("fake model data")
# Should raise FileNotFoundError (config file not found)
with pytest.raises(FileNotFoundError, match="not found in directory"):
DataProcessorPipeline.from_pretrained(tmp_path, config_filename="processor.json")
def test_processor_migration_error_creation():
"""Test that ProcessorMigrationError is created correctly."""
model_path = "/path/to/model"
migration_command = "python migrate.py --path /path/to/model"
original_error = "Config not found"
error = ProcessorMigrationError(model_path, migration_command, original_error)
assert error.model_path == model_path
assert error.migration_command == migration_command
assert error.original_error == original_error
assert model_path in str(error)
assert migration_command in str(error)
assert original_error in str(error)
def test_processor_migration_error_attributes():
"""Test that ProcessorMigrationError has correct attributes."""
model_path = Path("/test/path")
migration_command = "python test.py"
original_error = "Test error"
error = ProcessorMigrationError(model_path, migration_command, original_error)
# Test that attributes are accessible
assert hasattr(error, "model_path")
assert hasattr(error, "migration_command")
assert hasattr(error, "original_error")
# Test that it's still an Exception
assert isinstance(error, Exception)
def test_migration_suggestion_raises_error():
"""Test that migration suggestion always raises ProcessorMigrationError."""
with pytest.raises(ProcessorMigrationError) as exc_info:
DataProcessorPipeline._suggest_processor_migration("/test/path", "Test error")
error = exc_info.value
assert "/test/path" in str(error.model_path)
assert "Test error" in error.original_error
assert "migrate_policy_normalization.py" in error.migration_command
def test_migration_error_always_raised_for_invalid_configs():
"""Test that ProcessorMigrationError is always raised for invalid configs."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create a model config
model_config = {"type": "test", "param": "value"}
with open(tmp_path / "config.json", "w") as f:
json.dump(model_config, f)
# Should always raise ProcessorMigrationError
with pytest.raises(ProcessorMigrationError):
DataProcessorPipeline.from_pretrained(tmp_path, config_filename="config.json")
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_migration_detection.py",
"license": "Apache License 2.0",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_pipeline_from_pretrained_helpers.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for DataProcessorPipeline.from_pretrained helper methods.
These tests focus on the individual private methods that were extracted from
the main from_pretrained method to improve modularity and testability.
"""
import json
import tempfile
from pathlib import Path
import pytest
from lerobot.processor.pipeline import DataProcessorPipeline, ProcessorMigrationError
# Simplified Config Loading Tests
def test_load_config_directory():
"""Test loading config from directory."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create a config file
config_file = tmp_path / "processor.json"
test_config = {"name": "TestProcessor", "steps": []}
config_file.write_text(json.dumps(test_config))
# Load from directory
loaded_config, base_path = DataProcessorPipeline._load_config(str(tmp_path), "processor.json", {})
assert loaded_config == test_config
assert base_path == tmp_path
def test_load_config_single_file():
"""Test loading config from a single file path."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create a config file
config_file = tmp_path / "processor.json"
test_config = {"name": "TestProcessor", "steps": []}
config_file.write_text(json.dumps(test_config))
# Load using file path directly
loaded_config, base_path = DataProcessorPipeline._load_config(
str(config_file), "any_filename_ignored", {}
)
assert loaded_config == test_config
assert base_path == tmp_path
def test_load_config_directory_file_not_found():
"""Test directory loading when config file doesn't exist."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Directory exists but no processor.json
with pytest.raises(FileNotFoundError, match="not found in directory"):
DataProcessorPipeline._load_config(str(tmp_path), "processor.json", {})
def test_load_config_directory_with_migration_detection():
"""Test that missing config triggers migration detection."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create old-style config to trigger migration
(tmp_path / "config.json").write_text(json.dumps({"type": "act"}))
# Try to load processor.json (doesn't exist), should trigger migration
with pytest.raises(ProcessorMigrationError):
DataProcessorPipeline._load_config(str(tmp_path), "processor.json", {})
def test_load_config_nonexistent_path_tries_hub():
"""Test that nonexistent paths try Hub (simplified logic)."""
# This path doesn't exist locally, should try Hub
with pytest.raises(FileNotFoundError, match="on the HuggingFace Hub"):
DataProcessorPipeline._load_config("nonexistent/path", "processor.json", {})
# Config Validation Tests
def test_validate_loaded_config_valid_config():
"""Test validation with valid processor config."""
valid_config = {"name": "TestProcessor", "steps": []}
# Should not raise any exception
DataProcessorPipeline._validate_loaded_config("any-path", valid_config, "processor.json")
def test_validate_loaded_config_invalid_config():
"""Test validation with invalid processor config."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create non-processor config to trigger migration
(tmp_path / "config.json").write_text(json.dumps({"type": "act"}))
invalid_config = {"type": "act", "hidden_dim": 256}
with pytest.raises(ProcessorMigrationError):
DataProcessorPipeline._validate_loaded_config(str(tmp_path), invalid_config, "config.json")
def test_validate_loaded_config_invalid_config_no_migration():
"""Test validation with invalid config when no migration is detected."""
# Non-directory path (Hub repo) - no migration detection
invalid_config = {"type": "act", "hidden_dim": 256}
with pytest.raises(ValueError, match="not a valid processor configuration"):
DataProcessorPipeline._validate_loaded_config("user/repo", invalid_config, "config.json")
# Step Class Resolution Tests
def test_resolve_step_class_registry_name():
"""Test resolution using registry name."""
from lerobot.processor.pipeline import ProcessorStep, ProcessorStepRegistry
# Register a test step
@ProcessorStepRegistry.register("test_step")
class TestStep(ProcessorStep):
def __call__(self, transition):
return transition
def transform_features(self, features):
return features
try:
step_entry = {"registry_name": "test_step"}
step_class, step_key = DataProcessorPipeline._resolve_step_class(step_entry)
assert step_class is TestStep
assert step_key == "test_step"
finally:
ProcessorStepRegistry.unregister("test_step")
def test_resolve_step_class_registry_name_not_found():
"""Test resolution with non-existent registry name."""
step_entry = {"registry_name": "nonexistent_step"}
with pytest.raises(ImportError, match="Failed to load processor step from registry"):
DataProcessorPipeline._resolve_step_class(step_entry)
def test_resolve_step_class_import_path():
"""Test resolution using full import path."""
# Use a valid existing class (this should work)
step_entry = {"class": "lerobot.processor.pipeline.ProcessorStep"}
# This should succeed - ProcessorStep can be imported, just not instantiated
step_class, step_key = DataProcessorPipeline._resolve_step_class(step_entry)
from lerobot.processor.pipeline import ProcessorStep
assert step_class is ProcessorStep
assert step_key == "ProcessorStep"
def test_resolve_step_class_invalid_import_path():
"""Test resolution with invalid import path."""
step_entry = {"class": "nonexistent.module.ClassName"}
with pytest.raises(ImportError, match="Failed to load processor step"):
DataProcessorPipeline._resolve_step_class(step_entry)
# Override Validation Tests
def test_validate_overrides_used_all_used():
"""Test validation when all overrides are used."""
# Empty set means all overrides were used
remaining_overrides = set()
config = {"steps": [{"class": "SomeStep"}]}
# Should not raise
DataProcessorPipeline._validate_overrides_used(remaining_overrides, config)
def test_validate_overrides_used_some_unused():
"""Test validation when some overrides are unused."""
remaining_overrides = {"NonExistentStep", "AnotherMissingStep"}
config = {
"steps": [
{"registry_name": "normalize_step"},
{"class": "some.module.TransformStep"},
]
}
with pytest.raises(KeyError, match="Override keys.*do not match any step"):
DataProcessorPipeline._validate_overrides_used(remaining_overrides, config)
def test_validate_overrides_used_helpful_error_message():
"""Test that error message includes available step keys."""
remaining_overrides = {"WrongStep"}
config = {
"steps": [
{"registry_name": "correct_step"},
{"class": "module.path.CorrectClass"},
]
}
with pytest.raises(KeyError) as exc_info:
DataProcessorPipeline._validate_overrides_used(remaining_overrides, config)
error_msg = str(exc_info.value)
assert "Available step keys" in error_msg
assert "correct_step" in error_msg
assert "CorrectClass" in error_msg
# Integration Tests for Simplified Logic
def test_simplified_three_way_loading():
"""Test that the simplified 3-way loading logic works correctly."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Test 1: Directory loading
config_file = tmp_path / "processor.json"
test_config = {"name": "DirectoryTest", "steps": []}
config_file.write_text(json.dumps(test_config))
loaded_config, base_path = DataProcessorPipeline._load_config(str(tmp_path), "processor.json", {})
assert loaded_config["name"] == "DirectoryTest"
assert base_path == tmp_path
# Test 2: Single file loading
loaded_config, base_path = DataProcessorPipeline._load_config(
str(config_file), "ignored_filename", {}
)
assert loaded_config["name"] == "DirectoryTest"
assert base_path == tmp_path
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_pipeline_from_pretrained_helpers.py",
"license": "Apache License 2.0",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_policy_robot_bridge.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from pathlib import Path
import pytest
import torch
from lerobot.configs.types import FeatureType, PipelineFeatureType
from lerobot.processor import (
DataProcessorPipeline,
PolicyActionToRobotActionProcessorStep,
ProcessorStepRegistry,
RobotActionToPolicyActionProcessorStep,
)
from lerobot.processor.converters import identity_transition
from lerobot.utils.constants import ACTION
from tests.conftest import assert_contract_is_typed
def test_robot_to_policy_basic_action_conversion():
"""Test basic robot action to policy action conversion."""
motor_names = ["joint1", "joint2", "joint3"]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
robot_action = {
"joint1.pos": 1.0,
"joint2.pos": 2.0,
"joint3.pos": 3.0,
}
policy_action = processor.action(robot_action)
assert isinstance(policy_action, torch.Tensor)
assert policy_action.shape == (3,)
torch.testing.assert_close(policy_action, torch.tensor([1.0, 2.0, 3.0]))
def test_robot_to_policy_action_conversion_preserves_order():
"""Test that motor names order is preserved in conversion."""
motor_names = ["gripper", "arm", "wrist"]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
robot_action = {
"arm.pos": 10.0,
"gripper.pos": 5.0,
"wrist.pos": 15.0,
}
policy_action = processor.action(robot_action)
expected = torch.tensor([5.0, 10.0, 15.0])
torch.testing.assert_close(policy_action, expected)
def test_robot_to_policy_action_conversion_with_floats_and_tensors():
"""Test conversion with mixed float and tensor values."""
motor_names = ["joint1", "joint2"]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
robot_action = {
"joint1.pos": torch.tensor(1.5),
"joint2.pos": 2.5, # Regular float
}
policy_action = processor.action(robot_action)
assert isinstance(policy_action, torch.Tensor)
torch.testing.assert_close(policy_action, torch.tensor([1.5, 2.5]))
def test_robot_to_policy_action_length_mismatch_error():
"""Test error when robot action length doesn't match motor names."""
motor_names = ["joint1", "joint2", "joint3"]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
# Too few actions
robot_action = {"joint1.pos": 1.0, "joint2.pos": 2.0}
with pytest.raises(ValueError, match="Action must have 3 elements, got 2"):
processor.action(robot_action)
robot_action = {
"joint1.pos": 1.0,
"joint2.pos": 2.0,
"joint3.pos": 3.0,
"extra.pos": 4.0,
}
with pytest.raises(ValueError, match="Action must have 3 elements, got 4"):
processor.action(robot_action)
def test_robot_to_policy_missing_motor_key_error():
"""Test error when robot action is missing expected motor keys."""
motor_names = ["joint1", "joint2"]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
robot_action = {
"joint1.pos": 1.0,
"wrong_key.pos": 2.0,
}
with pytest.raises(KeyError):
processor.action(robot_action)
def test_robot_to_policy_transform_features():
"""Test feature transformation for robot to policy action processor."""
motor_names = ["joint1", "joint2", "joint3"]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
features = {
PipelineFeatureType.ACTION: {
"joint1.pos": {"type": FeatureType.ACTION, "shape": (1,)},
"joint2.pos": {"type": FeatureType.ACTION, "shape": (1,)},
"joint3.pos": {"type": FeatureType.ACTION, "shape": (1,)},
"other_data": {"type": FeatureType.ENV, "shape": (1,)},
}
}
transformed = processor.transform_features(features)
assert ACTION in transformed[PipelineFeatureType.ACTION]
action_feature = transformed[PipelineFeatureType.ACTION][ACTION]
assert action_feature.type == FeatureType.ACTION
assert action_feature.shape == (3,)
assert "joint1.pos" in transformed[PipelineFeatureType.ACTION]
assert "joint2.pos" in transformed[PipelineFeatureType.ACTION]
assert "joint3.pos" in transformed[PipelineFeatureType.ACTION]
assert "other_data" in transformed[PipelineFeatureType.ACTION]
def test_robot_to_policy_get_config():
"""Test configuration serialization."""
motor_names = ["motor1", "motor2"]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
config = processor.get_config()
assert config == {"motor_names": motor_names}
def test_robot_to_policy_state_dict():
"""Test state dict operations."""
processor = RobotActionToPolicyActionProcessorStep(motor_names=["joint1"])
state = processor.state_dict()
assert state == {}
processor.load_state_dict({})
def test_robot_to_policy_single_motor():
"""Test with single motor."""
processor = RobotActionToPolicyActionProcessorStep(motor_names=["single_joint"])
robot_action = {"single_joint.pos": 42.0}
policy_action = processor.action(robot_action)
assert policy_action.shape == (1,)
torch.testing.assert_close(policy_action, torch.tensor([42.0]))
def test_policy_to_robot_basic_action_conversion():
"""Test basic policy action to robot action conversion."""
motor_names = ["joint1", "joint2", "joint3"]
processor = PolicyActionToRobotActionProcessorStep(motor_names=motor_names)
policy_action = torch.tensor([1.0, 2.0, 3.0])
robot_action = processor.action(policy_action)
assert isinstance(robot_action, dict)
assert len(robot_action) == 3
expected = {
"joint1.pos": 1.0,
"joint2.pos": 2.0,
"joint3.pos": 3.0,
}
for key, expected_value in expected.items():
assert key in robot_action
actual_value = robot_action[key]
if isinstance(actual_value, torch.Tensor):
actual_value = actual_value.item()
assert actual_value == pytest.approx(expected_value)
def test_policy_to_robot_action_conversion_preserves_order():
"""Test that motor names order corresponds to tensor indices."""
motor_names = ["gripper", "arm", "wrist"]
processor = PolicyActionToRobotActionProcessorStep(motor_names=motor_names)
policy_action = torch.tensor([5.0, 10.0, 15.0])
robot_action = processor.action(policy_action)
assert robot_action["gripper.pos"] == pytest.approx(5.0)
assert robot_action["arm.pos"] == pytest.approx(10.0)
assert robot_action["wrist.pos"] == pytest.approx(15.0)
def test_policy_to_robot_action_conversion_with_numpy_input():
"""Test conversion with numpy array input."""
import numpy as np
motor_names = ["joint1", "joint2"]
processor = PolicyActionToRobotActionProcessorStep(motor_names=motor_names)
policy_action = np.array([1.5, 2.5])
robot_action = processor.action(policy_action)
assert robot_action["joint1.pos"] == pytest.approx(1.5)
assert robot_action["joint2.pos"] == pytest.approx(2.5)
def test_policy_to_robot_action_length_mismatch_error():
"""Test error when policy action length doesn't match motor names."""
motor_names = ["joint1", "joint2", "joint3"]
processor = PolicyActionToRobotActionProcessorStep(motor_names=motor_names)
policy_action = torch.tensor([1.0, 2.0])
with pytest.raises(ValueError, match="Action must have 3 elements, got 2"):
processor.action(policy_action)
policy_action = torch.tensor([1.0, 2.0, 3.0, 4.0])
with pytest.raises(ValueError, match="Action must have 3 elements, got 4"):
processor.action(policy_action)
def test_policy_to_robot_transform_features():
"""Test feature transformation for policy to robot action processor."""
motor_names = ["joint1", "joint2"]
processor = PolicyActionToRobotActionProcessorStep(motor_names=motor_names)
features = {
PipelineFeatureType.ACTION: {
ACTION: {"type": FeatureType.ACTION, "shape": (2,)},
"other_data": {"type": FeatureType.ENV, "shape": (1,)},
}
}
transformed = processor.transform_features(features)
assert "joint1.pos" in transformed[PipelineFeatureType.ACTION]
assert "joint2.pos" in transformed[PipelineFeatureType.ACTION]
for motor in motor_names:
motor_feature = transformed[PipelineFeatureType.ACTION][f"{motor}.pos"]
assert motor_feature.type == FeatureType.ACTION
assert motor_feature.shape == (1,)
assert ACTION in transformed[PipelineFeatureType.ACTION]
assert "other_data" in transformed[PipelineFeatureType.ACTION]
def test_policy_to_robot_get_config():
"""Test configuration serialization."""
motor_names = ["motor1", "motor2"]
processor = PolicyActionToRobotActionProcessorStep(motor_names=motor_names)
config = processor.get_config()
assert config == {"motor_names": motor_names}
def test_policy_to_robot_state_dict():
"""Test state dict operations."""
processor = PolicyActionToRobotActionProcessorStep(motor_names=["joint1"])
state = processor.state_dict()
assert state == {}
processor.load_state_dict({})
def test_policy_to_robot_single_motor():
"""Test with single motor."""
processor = PolicyActionToRobotActionProcessorStep(motor_names=["single_joint"])
policy_action = torch.tensor([42.0])
robot_action = processor.action(policy_action)
assert len(robot_action) == 1
assert robot_action["single_joint.pos"] == pytest.approx(42.0)
def test_robot_to_policy_registry():
"""Test RobotActionToPolicyActionProcessorStep registry."""
assert "robot_action_to_policy_action_processor" in ProcessorStepRegistry.list()
retrieved_class = ProcessorStepRegistry.get("robot_action_to_policy_action_processor")
assert retrieved_class is RobotActionToPolicyActionProcessorStep
instance = retrieved_class(motor_names=["test"])
assert isinstance(instance, RobotActionToPolicyActionProcessorStep)
assert instance.motor_names == ["test"]
def test_policy_to_robot_registry():
"""Test PolicyActionToRobotActionProcessorStep registry."""
assert "policy_action_to_robot_action_processor" in ProcessorStepRegistry.list()
retrieved_class = ProcessorStepRegistry.get("policy_action_to_robot_action_processor")
assert retrieved_class is PolicyActionToRobotActionProcessorStep
instance = retrieved_class(motor_names=["test"])
assert isinstance(instance, PolicyActionToRobotActionProcessorStep)
assert instance.motor_names == ["test"]
def test_save_and_load_robot_to_policy():
"""Test saving and loading RobotActionToPolicyActionProcessorStep."""
motor_names = ["joint1", "joint2", "joint3"]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
pipeline = DataProcessorPipeline([processor], name="TestRobotToPolicy")
with tempfile.TemporaryDirectory() as tmp_dir:
# Save pipeline
pipeline.save_pretrained(tmp_dir)
# Check config file exists
config_path = Path(tmp_dir) / "testrobottopolicy.json"
assert config_path.exists()
# Load pipeline
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
"testrobottopolicy.json",
to_transition=identity_transition,
to_output=identity_transition,
)
assert loaded_pipeline.name == "TestRobotToPolicy"
assert len(loaded_pipeline) == 1
# Check loaded processor
loaded_processor = loaded_pipeline.steps[0]
assert isinstance(loaded_processor, RobotActionToPolicyActionProcessorStep)
assert loaded_processor.motor_names == motor_names
# Test functionality after loading
robot_action = {"joint1.pos": 1.0, "joint2.pos": 2.0, "joint3.pos": 3.0}
policy_action = loaded_processor.action(robot_action)
torch.testing.assert_close(policy_action, torch.tensor([1.0, 2.0, 3.0]))
def test_save_and_load_policy_to_robot():
"""Test saving and loading PolicyActionToRobotActionProcessorStep."""
motor_names = ["motor_a", "motor_b"]
processor = PolicyActionToRobotActionProcessorStep(motor_names=motor_names)
pipeline = DataProcessorPipeline([processor], name="TestPolicyToRobot")
with tempfile.TemporaryDirectory() as tmp_dir:
# Save pipeline
pipeline.save_pretrained(tmp_dir)
# Load pipeline
loaded_pipeline = DataProcessorPipeline.from_pretrained(
tmp_dir,
"testpolicytorobot.json",
to_transition=identity_transition,
to_output=identity_transition,
)
loaded_processor = loaded_pipeline.steps[0]
assert isinstance(loaded_processor, PolicyActionToRobotActionProcessorStep)
assert loaded_processor.motor_names == motor_names
policy_action = torch.tensor([10.0, 20.0])
robot_action = loaded_processor.action(policy_action)
assert robot_action["motor_a.pos"] == pytest.approx(10.0)
assert robot_action["motor_b.pos"] == pytest.approx(20.0)
# Integration and chaining tests
def test_round_trip_conversion():
"""Test that robot->policy->robot conversion preserves values."""
motor_names = ["joint1", "joint2", "joint3"]
robot_to_policy = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
policy_to_robot = PolicyActionToRobotActionProcessorStep(motor_names=motor_names)
original_robot_action = {
"joint1.pos": 1.5,
"joint2.pos": -2.3,
"joint3.pos": 0.7,
}
policy_action = robot_to_policy.action(original_robot_action)
final_robot_action = policy_to_robot.action(policy_action)
for key in original_robot_action:
original_val = original_robot_action[key]
final_val = final_robot_action[key]
if isinstance(final_val, torch.Tensor):
final_val = final_val.item()
assert final_val == pytest.approx(original_val, abs=1e-6)
def test_chained_processors_in_pipeline():
"""Test both processors chained in a pipeline."""
motor_names = ["joint1", "joint2"]
robot_to_policy = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
policy_to_robot = PolicyActionToRobotActionProcessorStep(motor_names=motor_names)
pipeline = DataProcessorPipeline(
[robot_to_policy, policy_to_robot],
to_transition=identity_transition,
to_output=identity_transition,
)
assert len(pipeline.steps) == 2
assert isinstance(pipeline.steps[0], RobotActionToPolicyActionProcessorStep)
assert isinstance(pipeline.steps[1], PolicyActionToRobotActionProcessorStep)
def test_robot_to_policy_features_contract(policy_feature_factory):
"""Test feature transformation maintains proper typing contract."""
processor = RobotActionToPolicyActionProcessorStep(motor_names=["j1", "j2"])
features = {
PipelineFeatureType.ACTION: {
"j1.pos": policy_feature_factory(FeatureType.ACTION, (1,)),
"j2.pos": policy_feature_factory(FeatureType.ACTION, (1,)),
"other": policy_feature_factory(FeatureType.ENV, (3,)),
}
}
out = processor.transform_features(features.copy())
assert_contract_is_typed(out)
assert ACTION in out[PipelineFeatureType.ACTION]
action_feature = out[PipelineFeatureType.ACTION][ACTION]
assert action_feature.type == FeatureType.ACTION
assert action_feature.shape == (2,)
def test_policy_to_robot_features_contract(policy_feature_factory):
"""Test feature transformation maintains proper typing contract."""
processor = PolicyActionToRobotActionProcessorStep(motor_names=["m1", "m2", "m3"])
features = {
PipelineFeatureType.ACTION: {
ACTION: policy_feature_factory(FeatureType.ACTION, (3,)),
"other": policy_feature_factory(FeatureType.ENV, (1,)),
}
}
out = processor.transform_features(features.copy())
assert_contract_is_typed(out)
for motor in ["m1", "m2", "m3"]:
key = f"{motor}.pos"
assert key in out[PipelineFeatureType.ACTION]
motor_feature = out[PipelineFeatureType.ACTION][key]
assert motor_feature.type == FeatureType.ACTION
assert motor_feature.shape == (1,)
def test_empty_motor_names_list():
"""Test behavior with empty motor names list."""
processor = RobotActionToPolicyActionProcessorStep(motor_names=[])
robot_action = {}
policy_action = processor.action(robot_action)
assert isinstance(policy_action, torch.Tensor)
assert policy_action.shape == (0,)
def test_empty_motor_names_list_policy_to_robot():
"""Test PolicyActionToRobotActionProcessorStep with empty motor names."""
processor = PolicyActionToRobotActionProcessorStep(motor_names=[])
policy_action = torch.tensor([])
robot_action = processor.action(policy_action)
assert isinstance(robot_action, dict)
assert len(robot_action) == 0
def test_very_long_motor_names():
"""Test with many motor names."""
motor_names = [f"joint_{i}" for i in range(100)]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
robot_action = {f"joint_{i}.pos": float(i) for i in range(100)}
policy_action = processor.action(robot_action)
assert policy_action.shape == (100,)
expected = torch.tensor([float(i) for i in range(100)])
torch.testing.assert_close(policy_action, expected)
def test_special_characters_in_motor_names():
"""Test with special characters in motor names."""
motor_names = ["motor-1", "motor_2", "motor.3"]
processor = RobotActionToPolicyActionProcessorStep(motor_names=motor_names)
robot_action = {
"motor-1.pos": 1.0,
"motor_2.pos": 2.0,
"motor.3.pos": 3.0,
}
policy_action = processor.action(robot_action)
torch.testing.assert_close(policy_action, torch.tensor([1.0, 2.0, 3.0]))
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_policy_robot_bridge.py",
"license": "Apache License 2.0",
"lines": 380,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_sac_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SAC policy processor."""
import tempfile
import pytest
import torch
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.policies.sac.configuration_sac import SACConfig
from lerobot.policies.sac.processor_sac import make_sac_pre_post_processors
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DataProcessorPipeline,
DeviceProcessorStep,
NormalizerProcessorStep,
RenameObservationsProcessorStep,
TransitionKey,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import create_transition, transition_to_batch
from lerobot.utils.constants import ACTION, OBS_STATE
def create_default_config():
"""Create a default SAC configuration for testing."""
config = SACConfig()
config.input_features = {
OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(10,)),
}
config.output_features = {
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(5,)),
}
config.normalization_mapping = {
FeatureType.STATE: NormalizationMode.MEAN_STD,
FeatureType.ACTION: NormalizationMode.MIN_MAX,
}
config.device = "cpu"
return config
def create_default_stats():
"""Create default dataset statistics for testing."""
return {
OBS_STATE: {"mean": torch.zeros(10), "std": torch.ones(10)},
ACTION: {"min": torch.full((5,), -1.0), "max": torch.ones(5)},
}
def test_make_sac_processor_basic():
"""Test basic creation of SAC processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_sac_pre_post_processors(
config,
stats,
)
# Check processor names
assert preprocessor.name == "policy_preprocessor"
assert postprocessor.name == "policy_postprocessor"
# Check steps in preprocessor
assert len(preprocessor.steps) == 4
assert isinstance(preprocessor.steps[0], RenameObservationsProcessorStep)
assert isinstance(preprocessor.steps[1], AddBatchDimensionProcessorStep)
assert isinstance(preprocessor.steps[2], DeviceProcessorStep)
assert isinstance(preprocessor.steps[3], NormalizerProcessorStep)
# Check steps in postprocessor
assert len(postprocessor.steps) == 2
assert isinstance(postprocessor.steps[0], UnnormalizerProcessorStep)
assert isinstance(postprocessor.steps[1], DeviceProcessorStep)
def test_sac_processor_normalization_modes():
"""Test that SAC processor correctly handles different normalization modes."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_sac_pre_post_processors(
config,
stats,
)
# Create test data
observation = {OBS_STATE: torch.randn(10) * 2} # Larger values to test normalization
action = torch.rand(5) * 2 - 1 # Range [-1, 1]
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is normalized and batched
# State should be mean-std normalized
# Action should be min-max normalized to [-1, 1]
assert processed[OBS_STATE].shape == (1, 10)
assert processed[TransitionKey.ACTION.value].shape == (1, 5)
# Process action through postprocessor
postprocessed = postprocessor(processed[TransitionKey.ACTION.value])
# Check that action is unnormalized (but still batched)
assert postprocessed.shape == (1, 5)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_sac_processor_cuda():
"""Test SAC processor with CUDA device."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, postprocessor = make_sac_pre_post_processors(
config,
stats,
)
# Create CPU data
observation = {OBS_STATE: torch.randn(10)}
action = torch.randn(5)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is on CUDA
assert processed[OBS_STATE].device.type == "cuda"
assert processed[TransitionKey.ACTION.value].device.type == "cuda"
# Process through postprocessor
postprocessed = postprocessor(processed[TransitionKey.ACTION.value])
# Check that action is back on CPU
assert postprocessed.device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_sac_processor_accelerate_scenario():
"""Test SAC processor in simulated Accelerate scenario."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_sac_pre_post_processors(
config,
stats,
)
# Simulate Accelerate: data already on GPU
device = torch.device("cuda:0")
observation = {OBS_STATE: torch.randn(10).to(device)}
action = torch.randn(5).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on same GPU
assert processed[OBS_STATE].device == device
assert processed[TransitionKey.ACTION.value].device == device
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_sac_processor_multi_gpu():
"""Test SAC processor with multi-GPU setup."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_sac_pre_post_processors(
config,
stats,
)
# Simulate data on different GPU
device = torch.device("cuda:1")
observation = {OBS_STATE: torch.randn(10).to(device)}
action = torch.randn(5).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on cuda:1
assert processed[OBS_STATE].device == device
assert processed[TransitionKey.ACTION.value].device == device
def test_sac_processor_without_stats():
"""Test SAC processor creation without dataset statistics."""
config = create_default_config()
preprocessor, postprocessor = make_sac_pre_post_processors(config, dataset_stats=None)
# Should still create processors
assert preprocessor is not None
assert postprocessor is not None
# Process should still work
observation = {OBS_STATE: torch.randn(10)}
action = torch.randn(5)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed is not None
def test_sac_processor_save_and_load():
"""Test saving and loading SAC processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_sac_pre_post_processors(
config,
stats,
)
with tempfile.TemporaryDirectory() as tmpdir:
# Save preprocessor
preprocessor.save_pretrained(tmpdir)
# Load preprocessor
loaded_preprocessor = DataProcessorPipeline.from_pretrained(
tmpdir, config_filename="policy_preprocessor.json"
)
# Test that loaded processor works
observation = {OBS_STATE: torch.randn(10)}
action = torch.randn(5)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = loaded_preprocessor(batch)
assert processed[OBS_STATE].shape == (1, 10)
assert processed[TransitionKey.ACTION.value].shape == (1, 5)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_sac_processor_mixed_precision():
"""Test SAC processor with mixed precision."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
# Create processor
preprocessor, postprocessor = make_sac_pre_post_processors(
config,
stats,
)
# Replace DeviceProcessorStep with one that uses float16
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="float16"))
elif isinstance(step, NormalizerProcessorStep):
# Update normalizer to use the same device as the device processor
norm_step = step # Now type checker knows this is NormalizerProcessorStep
modified_steps.append(
NormalizerProcessorStep(
features=norm_step.features,
norm_map=norm_step.norm_map,
stats=norm_step.stats,
device=config.device,
dtype=torch.float16, # Match the float16 dtype
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Create test data
observation = {OBS_STATE: torch.randn(10, dtype=torch.float32)}
action = torch.randn(5, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is converted to float16
assert processed[OBS_STATE].dtype == torch.float16
assert processed[TransitionKey.ACTION.value].dtype == torch.float16
def test_sac_processor_batch_data():
"""Test SAC processor with batched data."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_sac_pre_post_processors(
config,
stats,
)
# Test with batched data
batch_size = 32
observation = {OBS_STATE: torch.randn(batch_size, 10)}
action = torch.randn(batch_size, 5)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that batch dimension is preserved
assert processed[OBS_STATE].shape == (batch_size, 10)
assert processed[TransitionKey.ACTION.value].shape == (batch_size, 5)
def test_sac_processor_edge_cases():
"""Test SAC processor with edge cases."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_sac_pre_post_processors(
config,
stats,
)
# Test with observation that has no state key but still exists
observation = {"observation.dummy": torch.randn(1)} # Some dummy observation to pass validation
action = torch.randn(5)
batch = {TransitionKey.ACTION.value: action, **observation}
processed = preprocessor(batch)
# observation.state wasn't in original, so it won't be in processed
assert OBS_STATE not in processed
assert processed[TransitionKey.ACTION.value].shape == (1, 5)
# Test with zero action (representing "null" action)
transition = create_transition(observation={OBS_STATE: torch.randn(10)}, action=torch.zeros(5))
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed[OBS_STATE].shape == (1, 10)
# Action should be present and batched, even if it's zeros
assert processed[TransitionKey.ACTION.value].shape == (1, 5)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_sac_processor_bfloat16_device_float32_normalizer():
"""Test: DeviceProcessor(bfloat16) + NormalizerProcessor(float32) → output bfloat16 via automatic adaptation"""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, _ = make_sac_pre_post_processors(
config,
stats,
)
# Modify the pipeline to use bfloat16 device processor with float32 normalizer
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
# Device processor converts to bfloat16
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="bfloat16"))
elif isinstance(step, NormalizerProcessorStep):
# Normalizer stays configured as float32 (will auto-adapt to bfloat16)
norm_step = step # Now type checker knows this is NormalizerProcessorStep
modified_steps.append(
NormalizerProcessorStep(
features=norm_step.features,
norm_map=norm_step.norm_map,
stats=norm_step.stats,
device=config.device,
dtype=torch.float32, # Deliberately configured as float32
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Verify initial normalizer configuration
normalizer_step = preprocessor.steps[3] # NormalizerProcessorStep
assert normalizer_step.dtype == torch.float32
# Create test data
observation = {OBS_STATE: torch.randn(10, dtype=torch.float32)} # Start with float32
action = torch.randn(5, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through full pipeline
processed = preprocessor(batch)
# Verify: DeviceProcessor → bfloat16, NormalizerProcessor adapts → final output is bfloat16
assert processed[OBS_STATE].dtype == torch.bfloat16
assert processed[TransitionKey.ACTION.value].dtype == torch.bfloat16
# Verify normalizer automatically adapted its internal state
assert normalizer_step.dtype == torch.bfloat16
for stat_tensor in normalizer_step._tensor_stats[OBS_STATE].values():
assert stat_tensor.dtype == torch.bfloat16
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_sac_processor.py",
"license": "Apache License 2.0",
"lines": 332,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_smolvla_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SmolVLA policy processor."""
from unittest.mock import patch
import pytest
import torch
from lerobot.configs.types import FeatureType, NormalizationMode, PipelineFeatureType, PolicyFeature
from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig
from lerobot.policies.smolvla.processor_smolvla import (
SmolVLANewLineProcessor,
make_smolvla_pre_post_processors,
)
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DeviceProcessorStep,
EnvTransition,
NormalizerProcessorStep,
ProcessorStep,
RenameObservationsProcessorStep,
TransitionKey,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import create_transition, transition_to_batch
from lerobot.utils.constants import ACTION, OBS_IMAGE, OBS_STATE
class MockTokenizerProcessorStep(ProcessorStep):
"""Mock tokenizer processor step for testing."""
def __init__(self, *args, **kwargs):
# Accept any arguments to mimic the real TokenizerProcessorStep interface
pass
def __call__(self, transition: EnvTransition) -> EnvTransition:
# Pass through transition unchanged
return transition
def transform_features(self, features):
# Pass through features unchanged
return features
def create_default_config():
"""Create a default SmolVLA configuration for testing."""
config = SmolVLAConfig()
config.input_features = {
OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(8,)),
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
}
config.output_features = {
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(7,)),
}
config.normalization_mapping = {
FeatureType.STATE: NormalizationMode.MEAN_STD,
FeatureType.VISUAL: NormalizationMode.IDENTITY,
FeatureType.ACTION: NormalizationMode.MIN_MAX,
}
config.device = "cpu"
config.vlm_model_name = "HuggingFaceTB/SmolVLM-Instruct"
config.pad_language_to = "max_length"
config.tokenizer_max_length = 100
return config
def create_default_stats():
"""Create default dataset statistics for testing."""
return {
OBS_STATE: {"mean": torch.zeros(8), "std": torch.ones(8)},
OBS_IMAGE: {}, # No normalization for images
ACTION: {"min": torch.full((7,), -1.0), "max": torch.ones(7)},
}
def test_make_smolvla_processor_basic():
"""Test basic creation of SmolVLA processor."""
config = create_default_config()
stats = create_default_stats()
with patch(
"lerobot.policies.smolvla.processor_smolvla.TokenizerProcessorStep", MockTokenizerProcessorStep
):
preprocessor, postprocessor = make_smolvla_pre_post_processors(
config,
stats,
)
# Check processor names
assert preprocessor.name == "policy_preprocessor"
assert postprocessor.name == "policy_postprocessor"
# Check steps in preprocessor
assert len(preprocessor.steps) == 6
assert isinstance(preprocessor.steps[0], RenameObservationsProcessorStep)
assert isinstance(preprocessor.steps[1], AddBatchDimensionProcessorStep)
assert isinstance(preprocessor.steps[2], SmolVLANewLineProcessor)
# Step 3 would be TokenizerProcessorStep but it's mocked
assert isinstance(preprocessor.steps[4], DeviceProcessorStep)
assert isinstance(preprocessor.steps[5], NormalizerProcessorStep)
# Check steps in postprocessor
assert len(postprocessor.steps) == 2
assert isinstance(postprocessor.steps[0], UnnormalizerProcessorStep)
assert isinstance(postprocessor.steps[1], DeviceProcessorStep)
def test_smolvla_newline_processor_single_task():
"""Test SmolVLANewLineProcessor with single task string."""
processor = SmolVLANewLineProcessor()
# Test with task that doesn't have newline
transition = create_transition(complementary_data={"task": "test task"})
result = processor(transition)
assert result[TransitionKey.COMPLEMENTARY_DATA]["task"] == "test task\n"
# Test with task that already has newline
transition = create_transition(complementary_data={"task": "test task\n"})
result = processor(transition)
assert result[TransitionKey.COMPLEMENTARY_DATA]["task"] == "test task\n"
def test_smolvla_newline_processor_list_of_tasks():
"""Test SmolVLANewLineProcessor with list of task strings."""
processor = SmolVLANewLineProcessor()
# Test with list of tasks
tasks = ["task1", "task2\n", "task3"]
transition = create_transition(complementary_data={"task": tasks})
result = processor(transition)
expected = ["task1\n", "task2\n", "task3\n"]
assert result[TransitionKey.COMPLEMENTARY_DATA]["task"] == expected
def test_smolvla_newline_processor_empty_transition():
"""Test SmolVLANewLineProcessor with empty transition."""
processor = SmolVLANewLineProcessor()
# Test with no complementary_data
transition = create_transition()
result = processor(transition)
assert result == transition
# Test with complementary_data but no task
transition = create_transition(complementary_data={"other": "data"})
result = processor(transition)
assert result == transition
# Test with None task
transition = create_transition(complementary_data={"task": None})
result = processor(transition)
assert result == transition
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_smolvla_processor_cuda():
"""Test SmolVLA processor with CUDA device."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
# Mock the tokenizer processor to act as pass-through
class MockTokenizerProcessorStep(ProcessorStep):
def __init__(self, *args, **kwargs):
pass
def __call__(self, transition):
return transition
def state_dict(self):
return {}
def load_state_dict(self, state):
pass
def reset(self):
pass
def get_config(self):
return {"tokenizer_name": "HuggingFaceTB/SmolVLM-Instruct"}
def transform_features(self, features):
return features
with patch(
"lerobot.policies.smolvla.processor_smolvla.TokenizerProcessorStep", MockTokenizerProcessorStep
):
preprocessor, postprocessor = make_smolvla_pre_post_processors(
config,
stats,
)
# Create CPU data
observation = {
OBS_STATE: torch.randn(8),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(7)
transition = create_transition(observation, action, complementary_data={"task": "test task"})
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is on CUDA
assert processed[OBS_STATE].device.type == "cuda"
assert processed[OBS_IMAGE].device.type == "cuda"
assert processed[TransitionKey.ACTION.value].device.type == "cuda"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_smolvla_processor_accelerate_scenario():
"""Test SmolVLA processor in simulated Accelerate scenario."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
# Mock the tokenizer processor to act as pass-through
class MockTokenizerProcessorStep(ProcessorStep):
def __init__(self, *args, **kwargs):
pass
def __call__(self, transition):
return transition
def state_dict(self):
return {}
def load_state_dict(self, state):
pass
def reset(self):
pass
def get_config(self):
return {"tokenizer_name": "HuggingFaceTB/SmolVLM-Instruct"}
def transform_features(self, features):
return features
with patch(
"lerobot.policies.smolvla.processor_smolvla.TokenizerProcessorStep", MockTokenizerProcessorStep
):
preprocessor, postprocessor = make_smolvla_pre_post_processors(
config,
stats,
)
# Simulate Accelerate: data already on GPU and batched
device = torch.device("cuda:0")
observation = {
OBS_STATE: torch.randn(1, 8).to(device),
OBS_IMAGE: torch.randn(1, 3, 224, 224).to(device),
}
action = torch.randn(1, 7).to(device)
transition = create_transition(observation, action, complementary_data={"task": ["test task"]})
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on same GPU
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_smolvla_processor_multi_gpu():
"""Test SmolVLA processor with multi-GPU setup."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
# Mock the tokenizer processor to act as pass-through
class MockTokenizerProcessorStep(ProcessorStep):
def __init__(self, *args, **kwargs):
pass
def __call__(self, transition):
return transition
def state_dict(self):
return {}
def load_state_dict(self, state):
pass
def reset(self):
pass
def get_config(self):
return {"tokenizer_name": "HuggingFaceTB/SmolVLM-Instruct"}
def transform_features(self, features):
return features
with patch(
"lerobot.policies.smolvla.processor_smolvla.TokenizerProcessorStep", MockTokenizerProcessorStep
):
preprocessor, postprocessor = make_smolvla_pre_post_processors(
config,
stats,
)
# Simulate data on different GPU
device = torch.device("cuda:1")
observation = {
OBS_STATE: torch.randn(1, 8).to(device),
OBS_IMAGE: torch.randn(1, 3, 224, 224).to(device),
}
action = torch.randn(1, 7).to(device)
transition = create_transition(observation, action, complementary_data={"task": ["test task"]})
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on cuda:1
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
def test_smolvla_processor_without_stats():
"""Test SmolVLA processor creation without dataset statistics."""
config = create_default_config()
# Mock the tokenizer processor
with patch(
"lerobot.policies.smolvla.processor_smolvla.TokenizerProcessorStep", MockTokenizerProcessorStep
):
preprocessor, postprocessor = make_smolvla_pre_post_processors(
config,
dataset_stats=None,
)
# Should still create processors
assert preprocessor is not None
assert postprocessor is not None
def test_smolvla_newline_processor_state_dict():
"""Test SmolVLANewLineProcessor state dict methods."""
processor = SmolVLANewLineProcessor()
# Test state_dict (should be empty)
state = processor.state_dict()
assert state == {}
# Test load_state_dict (should do nothing)
processor.load_state_dict({})
# Test reset (should do nothing)
processor.reset()
# Test get_config
config = processor.get_config()
assert config == {}
def test_smolvla_newline_processor_transform_features():
"""Test SmolVLANewLineProcessor transform_features method."""
processor = SmolVLANewLineProcessor()
# Test transform_features
features = {
PipelineFeatureType.OBSERVATION: {OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(10,))},
}
result = processor.transform_features(features)
assert result == features # Should return unchanged
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_smolvla_processor_bfloat16_device_float32_normalizer():
"""Test: DeviceProcessor(bfloat16) + NormalizerProcessor(float32) → output bfloat16 via automatic adaptation"""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
with patch(
"lerobot.policies.smolvla.processor_smolvla.TokenizerProcessorStep", MockTokenizerProcessorStep
):
preprocessor, _ = make_smolvla_pre_post_processors(
config,
stats,
)
# Modify the pipeline to use bfloat16 device processor with float32 normalizer
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
# Device processor converts to bfloat16
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="bfloat16"))
elif isinstance(step, NormalizerProcessorStep):
# Normalizer stays configured as float32 (will auto-adapt to bfloat16)
modified_steps.append(
NormalizerProcessorStep(
features=step.features,
norm_map=step.norm_map,
stats=step.stats,
device=config.device,
dtype=torch.float32, # Deliberately configured as float32
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Verify initial normalizer configuration (SmolVLA has NormalizerProcessorStep at index 5)
normalizer_step = preprocessor.steps[5] # NormalizerProcessorStep
assert normalizer_step.dtype == torch.float32
# Create test data with both state and visual observations
observation = {
OBS_STATE: torch.randn(8, dtype=torch.float32),
OBS_IMAGE: torch.randn(3, 224, 224, dtype=torch.float32),
}
action = torch.randn(7, dtype=torch.float32)
transition = create_transition(
observation, action, complementary_data={"task": "test bfloat16 adaptation"}
)
batch = transition_to_batch(transition)
# Process through full pipeline
processed = preprocessor(batch)
# Verify: DeviceProcessor → bfloat16, NormalizerProcessor adapts → final output is bfloat16
assert processed[OBS_STATE].dtype == torch.bfloat16
assert processed[OBS_IMAGE].dtype == torch.bfloat16 # IDENTITY normalization still gets dtype conversion
assert processed[TransitionKey.ACTION.value].dtype == torch.bfloat16
# Verify normalizer automatically adapted its internal state
assert normalizer_step.dtype == torch.bfloat16
# Check state stats (has normalization)
for stat_tensor in normalizer_step._tensor_stats[OBS_STATE].values():
assert stat_tensor.dtype == torch.bfloat16
# OBS_IMAGE uses IDENTITY normalization, so no stats to check
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_smolvla_processor.py",
"license": "Apache License 2.0",
"lines": 360,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_tdmpc_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TDMPC policy processor."""
import tempfile
import pytest
import torch
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig
from lerobot.policies.tdmpc.processor_tdmpc import make_tdmpc_pre_post_processors
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DataProcessorPipeline,
DeviceProcessorStep,
NormalizerProcessorStep,
RenameObservationsProcessorStep,
TransitionKey,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import create_transition, transition_to_batch
from lerobot.utils.constants import ACTION, OBS_IMAGE, OBS_STATE
def create_default_config():
"""Create a default TDMPC configuration for testing."""
config = TDMPCConfig()
config.input_features = {
OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(12,)),
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
}
config.output_features = {
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(6,)),
}
config.normalization_mapping = {
FeatureType.STATE: NormalizationMode.MEAN_STD,
FeatureType.VISUAL: NormalizationMode.IDENTITY,
FeatureType.ACTION: NormalizationMode.MIN_MAX,
}
config.device = "cpu"
return config
def create_default_stats():
"""Create default dataset statistics for testing."""
return {
OBS_STATE: {"mean": torch.zeros(12), "std": torch.ones(12)},
OBS_IMAGE: {}, # No normalization for images
ACTION: {"min": torch.full((6,), -1.0), "max": torch.ones(6)},
}
def test_make_tdmpc_processor_basic():
"""Test basic creation of TDMPC processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_tdmpc_pre_post_processors(
config,
stats,
)
# Check processor names
assert preprocessor.name == "policy_preprocessor"
assert postprocessor.name == "policy_postprocessor"
# Check steps in preprocessor
assert len(preprocessor.steps) == 4
assert isinstance(preprocessor.steps[0], RenameObservationsProcessorStep)
assert isinstance(preprocessor.steps[1], AddBatchDimensionProcessorStep)
assert isinstance(preprocessor.steps[2], DeviceProcessorStep)
assert isinstance(preprocessor.steps[3], NormalizerProcessorStep)
# Check steps in postprocessor
assert len(postprocessor.steps) == 2
assert isinstance(postprocessor.steps[0], UnnormalizerProcessorStep)
assert isinstance(postprocessor.steps[1], DeviceProcessorStep)
def test_tdmpc_processor_normalization():
"""Test that TDMPC processor correctly normalizes and unnormalizes data."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_tdmpc_pre_post_processors(
config,
stats,
)
# Create test data
observation = {
OBS_STATE: torch.randn(12),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is processed and batched
assert processed[OBS_STATE].shape == (1, 12)
assert processed[OBS_IMAGE].shape == (1, 3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (1, 6)
# Process action through postprocessor
postprocessed = postprocessor(processed[TransitionKey.ACTION.value])
# Check that action is unnormalized (but still batched)
assert postprocessed.shape == (1, 6)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_tdmpc_processor_cuda():
"""Test TDMPC processor with CUDA device."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, postprocessor = make_tdmpc_pre_post_processors(
config,
stats,
)
# Create CPU data
observation = {
OBS_STATE: torch.randn(12),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is on CUDA
assert processed[OBS_STATE].device.type == "cuda"
assert processed[OBS_IMAGE].device.type == "cuda"
assert processed[TransitionKey.ACTION.value].device.type == "cuda"
# Process through postprocessor
postprocessed = postprocessor(processed[TransitionKey.ACTION.value])
# Check that action is back on CPU
assert postprocessed.device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_tdmpc_processor_accelerate_scenario():
"""Test TDMPC processor in simulated Accelerate scenario."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_tdmpc_pre_post_processors(
config,
stats,
)
# Simulate Accelerate: data already on GPU
device = torch.device("cuda:0")
observation = {
OBS_STATE: torch.randn(12).to(device),
OBS_IMAGE: torch.randn(3, 224, 224).to(device),
}
action = torch.randn(6).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on same GPU
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_tdmpc_processor_multi_gpu():
"""Test TDMPC processor with multi-GPU setup."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_tdmpc_pre_post_processors(
config,
stats,
)
# Simulate data on different GPU
device = torch.device("cuda:1")
observation = {
OBS_STATE: torch.randn(12).to(device),
OBS_IMAGE: torch.randn(3, 224, 224).to(device),
}
action = torch.randn(6).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on cuda:1
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
def test_tdmpc_processor_without_stats():
"""Test TDMPC processor creation without dataset statistics."""
config = create_default_config()
preprocessor, postprocessor = make_tdmpc_pre_post_processors(config, dataset_stats=None)
# Should still create processors
assert preprocessor is not None
assert postprocessor is not None
# Process should still work
observation = {
OBS_STATE: torch.randn(12),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed is not None
def test_tdmpc_processor_save_and_load():
"""Test saving and loading TDMPC processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_tdmpc_pre_post_processors(
config,
stats,
)
with tempfile.TemporaryDirectory() as tmpdir:
# Save preprocessor
preprocessor.save_pretrained(tmpdir)
# Load preprocessor
loaded_preprocessor = DataProcessorPipeline.from_pretrained(
tmpdir, config_filename="policy_preprocessor.json"
)
# Test that loaded processor works
observation = {
OBS_STATE: torch.randn(12),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = loaded_preprocessor(batch)
assert processed[OBS_STATE].shape == (1, 12)
assert processed[OBS_IMAGE].shape == (1, 3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (1, 6)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_tdmpc_processor_mixed_precision():
"""Test TDMPC processor with mixed precision."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
# Create processor
preprocessor, postprocessor = make_tdmpc_pre_post_processors(
config,
stats,
)
# Replace DeviceProcessorStep with one that uses float16
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="float16"))
elif isinstance(step, NormalizerProcessorStep):
# Update normalizer to use the same device as the device processor
modified_steps.append(
NormalizerProcessorStep(
features=step.features,
norm_map=step.norm_map,
stats=step.stats,
device=config.device,
dtype=torch.float16, # Match the float16 dtype
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Create test data
observation = {
OBS_STATE: torch.randn(12, dtype=torch.float32),
OBS_IMAGE: torch.randn(3, 224, 224, dtype=torch.float32),
}
action = torch.randn(6, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is converted to float16
assert processed[OBS_STATE].dtype == torch.float16
assert processed[OBS_IMAGE].dtype == torch.float16
assert processed[TransitionKey.ACTION.value].dtype == torch.float16
def test_tdmpc_processor_batch_data():
"""Test TDMPC processor with batched data."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_tdmpc_pre_post_processors(
config,
stats,
)
# Test with batched data
batch_size = 64
observation = {
OBS_STATE: torch.randn(batch_size, 12),
OBS_IMAGE: torch.randn(batch_size, 3, 224, 224),
}
action = torch.randn(batch_size, 6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that batch dimension is preserved
assert processed[OBS_STATE].shape == (batch_size, 12)
assert processed[OBS_IMAGE].shape == (batch_size, 3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (batch_size, 6)
def test_tdmpc_processor_edge_cases():
"""Test TDMPC processor with edge cases."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_tdmpc_pre_post_processors(
config,
stats,
)
# Test with only state observation (no image)
observation = {OBS_STATE: torch.randn(12)}
action = torch.randn(6)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed[OBS_STATE].shape == (1, 12)
assert OBS_IMAGE not in processed
# Test with only image observation (no state)
observation = {OBS_IMAGE: torch.randn(3, 224, 224)}
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed[OBS_IMAGE].shape == (1, 3, 224, 224)
assert OBS_STATE not in processed
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_tdmpc_processor_bfloat16_device_float32_normalizer():
"""Test: DeviceProcessor(bfloat16) + NormalizerProcessor(float32) → output bfloat16 via automatic adaptation"""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, _ = make_tdmpc_pre_post_processors(
config,
stats,
)
# Modify the pipeline to use bfloat16 device processor with float32 normalizer
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
# Device processor converts to bfloat16
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="bfloat16"))
elif isinstance(step, NormalizerProcessorStep):
# Normalizer stays configured as float32 (will auto-adapt to bfloat16)
modified_steps.append(
NormalizerProcessorStep(
features=step.features,
norm_map=step.norm_map,
stats=step.stats,
device=config.device,
dtype=torch.float32, # Deliberately configured as float32
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Verify initial normalizer configuration
normalizer_step = preprocessor.steps[3] # NormalizerProcessorStep
assert normalizer_step.dtype == torch.float32
# Create test data with both state and visual observations
observation = {
OBS_STATE: torch.randn(12, dtype=torch.float32),
OBS_IMAGE: torch.randn(3, 224, 224, dtype=torch.float32),
}
action = torch.randn(6, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through full pipeline
processed = preprocessor(batch)
# Verify: DeviceProcessor → bfloat16, NormalizerProcessor adapts → final output is bfloat16
assert processed[OBS_STATE].dtype == torch.bfloat16
assert processed[OBS_IMAGE].dtype == torch.bfloat16 # IDENTITY normalization still gets dtype conversion
assert processed[TransitionKey.ACTION.value].dtype == torch.bfloat16
# Verify normalizer automatically adapted its internal state
assert normalizer_step.dtype == torch.bfloat16
# Check state stats (has normalization)
for stat_tensor in normalizer_step._tensor_stats[OBS_STATE].values():
assert stat_tensor.dtype == torch.bfloat16
# OBS_IMAGE uses IDENTITY normalization, so no stats to check
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_tdmpc_processor.py",
"license": "Apache License 2.0",
"lines": 368,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_tokenizer_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the TokenizerProcessorStep class.
"""
import tempfile
from unittest.mock import patch
import pytest
import torch
from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature
from lerobot.processor import DataProcessorPipeline, TokenizerProcessorStep, TransitionKey
from lerobot.processor.converters import create_transition, identity_transition
from lerobot.utils.constants import (
ACTION,
OBS_IMAGE,
OBS_LANGUAGE,
OBS_LANGUAGE_SUBTASK_ATTENTION_MASK,
OBS_LANGUAGE_SUBTASK_TOKENS,
OBS_STATE,
)
from tests.utils import require_package
class MockTokenizer:
"""Mock tokenizer for testing that mimics transformers tokenizer interface."""
def __init__(self, vocab_size: int = 1000):
self.vocab_size = vocab_size
def __call__(
self,
text: str | list[str],
max_length: int = 512,
truncation: bool = True,
padding: str = "max_length",
padding_side: str = "right",
return_tensors: str = "pt",
**kwargs,
) -> dict[str, torch.Tensor]:
"""Mock tokenization that returns deterministic tokens based on text."""
texts = [text] if isinstance(text, str) else text
batch_size = len(texts)
# Create mock input_ids and attention_mask
input_ids = torch.zeros(batch_size, max_length, dtype=torch.long)
attention_mask = torch.zeros(batch_size, max_length, dtype=torch.long)
for i, txt in enumerate(texts):
# Simple mock: use hash of text to generate deterministic tokens
text_hash = hash(txt) % self.vocab_size
seq_len = min(len(txt.split()), max_length)
# Fill input_ids with simple pattern based on text
for j in range(seq_len):
input_ids[i, j] = (text_hash + j) % self.vocab_size
# Set attention mask for non-padded positions
attention_mask[i, :seq_len] = 1
result = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
# Return single sequence for single input to match transformers behavior
if len(texts) == 1:
result = {k: v.squeeze(0) for k, v in result.items()}
return result
@pytest.fixture
def mock_tokenizer():
"""Provide a mock tokenizer for testing."""
return MockTokenizer(vocab_size=100)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_basic_tokenization(mock_auto_tokenizer):
"""Test basic string tokenization functionality."""
# Mock AutoTokenizer.from_pretrained to return our mock tokenizer
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer", max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "pick up the red cube"},
)
result = processor(transition)
# Check that original task is preserved
assert result[TransitionKey.COMPLEMENTARY_DATA]["task"] == "pick up the red cube"
# Check that tokens were added to observation
observation = result[TransitionKey.OBSERVATION]
assert f"{OBS_LANGUAGE}.tokens" in observation
assert f"{OBS_LANGUAGE}.attention_mask" in observation
# Check token structure
tokens = observation[f"{OBS_LANGUAGE}.tokens"]
attention_mask = observation[f"{OBS_LANGUAGE}.attention_mask"]
assert isinstance(tokens, torch.Tensor)
assert isinstance(attention_mask, torch.Tensor)
assert tokens.shape == (10,)
assert attention_mask.shape == (10,)
@require_package("transformers")
def test_basic_tokenization_with_tokenizer_object():
"""Test basic string tokenization functionality using tokenizer object directly."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "pick up the red cube"},
)
result = processor(transition)
# Check that original task is preserved
assert result[TransitionKey.COMPLEMENTARY_DATA]["task"] == "pick up the red cube"
# Check that tokens were added to observation
observation = result[TransitionKey.OBSERVATION]
assert f"{OBS_LANGUAGE}.tokens" in observation
assert f"{OBS_LANGUAGE}.attention_mask" in observation
# Check token structure
tokens = observation[f"{OBS_LANGUAGE}.tokens"]
attention_mask = observation[f"{OBS_LANGUAGE}.attention_mask"]
assert isinstance(tokens, torch.Tensor)
assert isinstance(attention_mask, torch.Tensor)
assert tokens.shape == (10,)
assert attention_mask.shape == (10,)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_list_of_strings_tokenization(mock_auto_tokenizer):
"""Test tokenization of a list of strings."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer", max_length=8)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": ["pick up cube", "place on table"]},
)
result = processor(transition)
# Check that original task is preserved
assert result[TransitionKey.COMPLEMENTARY_DATA]["task"] == ["pick up cube", "place on table"]
# Check that tokens were added to observation
observation = result[TransitionKey.OBSERVATION]
tokens = observation[f"{OBS_LANGUAGE}.tokens"]
attention_mask = observation[f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.shape == (2, 8) # batch_size=2, seq_len=8
assert attention_mask.shape == (2, 8)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_custom_keys(mock_auto_tokenizer):
"""Test using custom task_key."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer", task_key="instruction", max_length=5)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"instruction": "move forward"},
)
result = processor(transition)
# Check that tokens are stored in observation regardless of task_key
observation = result[TransitionKey.OBSERVATION]
assert f"{OBS_LANGUAGE}.tokens" in observation
assert f"{OBS_LANGUAGE}.attention_mask" in observation
tokens = observation[f"{OBS_LANGUAGE}.tokens"]
assert tokens.shape == (5,)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_none_complementary_data(mock_auto_tokenizer):
"""Test handling of None complementary_data."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer")
transition = create_transition(observation={}, complementary_data=None)
# create_transition converts None complementary_data to empty dict, so task key is missing
with pytest.raises(KeyError, match="task"):
processor(transition)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_missing_task_key(mock_auto_tokenizer):
"""Test handling when task key is missing."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer")
transition = create_transition(observation={}, complementary_data={"other_field": "some value"})
with pytest.raises(KeyError, match="task"):
processor(transition)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_none_task_value(mock_auto_tokenizer):
"""Test handling when task value is None."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer")
transition = create_transition(observation={}, complementary_data={"task": None})
with pytest.raises(ValueError, match="Task extracted from Complementary data is None"):
processor(transition)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_unsupported_task_type(mock_auto_tokenizer):
"""Test handling of unsupported task types."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer")
# Test with integer task - get_task returns None, observation raises ValueError
transition = create_transition(observation={}, complementary_data={"task": 123})
with pytest.raises(ValueError, match="Task cannot be None"):
processor(transition)
# Test with mixed list - get_task returns None, observation raises ValueError
transition = create_transition(observation={}, complementary_data={"task": ["text", 123, "more text"]})
with pytest.raises(ValueError, match="Task cannot be None"):
processor(transition)
@require_package("transformers")
def test_no_tokenizer_error():
"""Test that ValueError is raised when neither tokenizer nor tokenizer_name is provided."""
with pytest.raises(ValueError, match="Either 'tokenizer' or 'tokenizer_name' must be provided"):
TokenizerProcessorStep()
@require_package("transformers")
def test_invalid_tokenizer_name_error():
"""Test that error is raised when invalid tokenizer_name is provided."""
with patch("lerobot.processor.tokenizer_processor.AutoTokenizer") as mock_auto_tokenizer:
# Mock import error
mock_auto_tokenizer.from_pretrained.side_effect = Exception("Model not found")
with pytest.raises(Exception, match="Model not found"):
TokenizerProcessorStep(tokenizer_name="invalid-tokenizer")
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_get_config_with_tokenizer_name(mock_auto_tokenizer):
"""Test configuration serialization when using tokenizer_name."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(
tokenizer_name="test-tokenizer",
max_length=256,
task_key="instruction",
padding="longest",
truncation=False,
)
config = processor.get_config()
expected = {
"tokenizer_name": "test-tokenizer",
"max_length": 256,
"task_key": "instruction",
"padding_side": "right",
"padding": "longest",
"truncation": False,
}
assert config == expected
@require_package("transformers")
def test_get_config_with_tokenizer_object():
"""Test configuration serialization when using tokenizer object."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(
tokenizer=mock_tokenizer,
max_length=256,
task_key="instruction",
padding="longest",
truncation=False,
)
config = processor.get_config()
# tokenizer_name should not be in config when tokenizer object is used
expected = {
"max_length": 256,
"task_key": "instruction",
"padding_side": "right",
"padding": "longest",
"truncation": False,
}
assert config == expected
assert "tokenizer_name" not in config
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_state_dict_methods(mock_auto_tokenizer):
"""Test state_dict and load_state_dict methods."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer")
# Should return empty dict
state = processor.state_dict()
assert state == {}
# load_state_dict should not raise error
processor.load_state_dict({})
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_reset_method(mock_auto_tokenizer):
"""Test reset method."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer")
# Should not raise error
processor.reset()
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_integration_with_robot_processor(mock_auto_tokenizer):
"""Test integration with RobotProcessor."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
tokenizer_processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer", max_length=6)
robot_processor = DataProcessorPipeline(
[tokenizer_processor], to_transition=identity_transition, to_output=identity_transition
)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "test task"},
)
result = robot_processor(transition)
# Check that observation exists and tokenization was applied
assert TransitionKey.OBSERVATION in result
observation = result[TransitionKey.OBSERVATION]
assert f"{OBS_LANGUAGE}.tokens" in observation
assert f"{OBS_LANGUAGE}.attention_mask" in observation
tokens = observation[f"{OBS_LANGUAGE}.tokens"]
attention_mask = observation[f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.shape == (6,)
assert attention_mask.shape == (6,)
# Check that other data is preserved
assert torch.equal(
result[TransitionKey.OBSERVATION]["state"], transition[TransitionKey.OBSERVATION]["state"]
)
assert torch.equal(result[TransitionKey.ACTION], transition[TransitionKey.ACTION])
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_save_and_load_pretrained_with_tokenizer_name(mock_auto_tokenizer):
"""Test saving and loading processor with tokenizer_name."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
original_processor = TokenizerProcessorStep(
tokenizer_name="test-tokenizer", max_length=32, task_key="instruction"
)
robot_processor = DataProcessorPipeline(
[original_processor], to_transition=identity_transition, to_output=identity_transition
)
with tempfile.TemporaryDirectory() as temp_dir:
# Save processor
robot_processor.save_pretrained(temp_dir)
# Load processor - tokenizer will be recreated from saved config
loaded_processor = DataProcessorPipeline.from_pretrained(
temp_dir,
config_filename="dataprocessorpipeline.json",
to_transition=identity_transition,
to_output=identity_transition,
)
# Test that loaded processor works
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"instruction": "test instruction"},
)
result = loaded_processor(transition)
assert TransitionKey.OBSERVATION in result
assert f"{OBS_LANGUAGE}.tokens" in result[TransitionKey.OBSERVATION]
assert f"{OBS_LANGUAGE}.attention_mask" in result[TransitionKey.OBSERVATION]
@require_package("transformers")
def test_save_and_load_pretrained_with_tokenizer_object():
"""Test saving and loading processor with tokenizer object using overrides."""
mock_tokenizer = MockTokenizer(vocab_size=100)
original_processor = TokenizerProcessorStep(
tokenizer=mock_tokenizer, max_length=32, task_key="instruction"
)
robot_processor = DataProcessorPipeline(
[original_processor], to_transition=identity_transition, to_output=identity_transition
)
with tempfile.TemporaryDirectory() as temp_dir:
# Save processor
robot_processor.save_pretrained(temp_dir)
# Load processor with tokenizer override (since tokenizer object wasn't saved)
loaded_processor = DataProcessorPipeline.from_pretrained(
temp_dir,
config_filename="dataprocessorpipeline.json",
overrides={"tokenizer_processor": {"tokenizer": mock_tokenizer}},
to_transition=identity_transition,
to_output=identity_transition,
)
# Test that loaded processor works
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"instruction": "test instruction"},
)
result = loaded_processor(transition)
assert TransitionKey.OBSERVATION in result
assert f"{OBS_LANGUAGE}.tokens" in result[TransitionKey.OBSERVATION]
assert f"{OBS_LANGUAGE}.attention_mask" in result[TransitionKey.OBSERVATION]
@require_package("transformers")
def test_registry_functionality():
"""Test that the processor is properly registered."""
from lerobot.processor import ProcessorStepRegistry
# Check that the processor is registered
assert "tokenizer_processor" in ProcessorStepRegistry.list()
# Check that we can retrieve it
retrieved_class = ProcessorStepRegistry.get("tokenizer_processor")
assert retrieved_class is TokenizerProcessorStep
@require_package("transformers")
def test_features_basic():
"""Test basic feature contract functionality."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=128)
input_features = {
PipelineFeatureType.OBSERVATION: {OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(10,))},
PipelineFeatureType.ACTION: {ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(5,))},
}
output_features = processor.transform_features(input_features)
# Check that original features are preserved
assert OBS_STATE in output_features[PipelineFeatureType.OBSERVATION]
assert ACTION in output_features[PipelineFeatureType.ACTION]
# Check that tokenized features are added
assert f"{OBS_LANGUAGE}.tokens" in output_features[PipelineFeatureType.OBSERVATION]
assert f"{OBS_LANGUAGE}.attention_mask" in output_features[PipelineFeatureType.OBSERVATION]
# Check feature properties
tokens_feature = output_features[PipelineFeatureType.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask_feature = output_features[PipelineFeatureType.OBSERVATION][
f"{OBS_LANGUAGE}.attention_mask"
]
assert tokens_feature.type == FeatureType.LANGUAGE
assert tokens_feature.shape == (128,)
assert attention_mask_feature.type == FeatureType.LANGUAGE
assert attention_mask_feature.shape == (128,)
@require_package("transformers")
def test_features_with_custom_max_length():
"""Test feature contract with custom max_length."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=64)
input_features = {PipelineFeatureType.OBSERVATION: {}}
output_features = processor.transform_features(input_features)
# Check that features use correct max_length
assert f"{OBS_LANGUAGE}.tokens" in output_features[PipelineFeatureType.OBSERVATION]
assert f"{OBS_LANGUAGE}.attention_mask" in output_features[PipelineFeatureType.OBSERVATION]
tokens_feature = output_features[PipelineFeatureType.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask_feature = output_features[PipelineFeatureType.OBSERVATION][
f"{OBS_LANGUAGE}.attention_mask"
]
assert tokens_feature.shape == (64,)
assert attention_mask_feature.shape == (64,)
@require_package("transformers")
def test_features_existing_features():
"""Test feature contract when tokenized features already exist."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=256)
input_features = {
PipelineFeatureType.OBSERVATION: {
f"{OBS_LANGUAGE}.tokens": PolicyFeature(type=FeatureType.LANGUAGE, shape=(100,)),
f"{OBS_LANGUAGE}.attention_mask": PolicyFeature(type=FeatureType.LANGUAGE, shape=(100,)),
}
}
output_features = processor.transform_features(input_features)
# Should not overwrite existing features
assert output_features[PipelineFeatureType.OBSERVATION][f"{OBS_LANGUAGE}.tokens"].shape == (
100,
) # Original shape preserved
assert output_features[PipelineFeatureType.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"].shape == (100,)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_tokenization_parameters(mock_auto_tokenizer):
"""Test that tokenization parameters are correctly passed to tokenizer."""
# Create a custom mock that tracks calls
class TrackingMockTokenizer:
def __init__(self):
self.last_call_args = None
self.last_call_kwargs = None
def __call__(self, *args, **kwargs):
self.last_call_args = args
self.last_call_kwargs = kwargs
# Return minimal valid output
return {
"input_ids": torch.zeros(16, dtype=torch.long),
"attention_mask": torch.ones(16, dtype=torch.long),
}
tracking_tokenizer = TrackingMockTokenizer()
mock_auto_tokenizer.from_pretrained.return_value = tracking_tokenizer
processor = TokenizerProcessorStep(
tokenizer_name="test-tokenizer",
max_length=16,
padding="longest",
truncation=False,
padding_side="left",
)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "test task"},
)
processor(transition)
# Check that parameters were passed correctly (task is converted to list)
assert tracking_tokenizer.last_call_args == (["test task"],)
assert tracking_tokenizer.last_call_kwargs["max_length"] == 16
assert tracking_tokenizer.last_call_kwargs["padding"] == "longest"
assert tracking_tokenizer.last_call_kwargs["padding_side"] == "left"
assert tracking_tokenizer.last_call_kwargs["truncation"] is False
assert tracking_tokenizer.last_call_kwargs["return_tensors"] == "pt"
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_preserves_other_complementary_data(mock_auto_tokenizer):
"""Test that other complementary data fields are preserved."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer")
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={
"task": "test task",
"episode_id": 123,
"timestamp": 456.789,
"other_field": {"nested": "data"},
},
)
result = processor(transition)
comp_data = result[TransitionKey.COMPLEMENTARY_DATA]
# Check that all original fields are preserved
assert comp_data["task"] == "test task"
assert comp_data["episode_id"] == 123
assert comp_data["timestamp"] == 456.789
assert comp_data["other_field"] == {"nested": "data"}
# Check that tokens were added to observation
observation = result[TransitionKey.OBSERVATION]
assert f"{OBS_LANGUAGE}.tokens" in observation
assert f"{OBS_LANGUAGE}.attention_mask" in observation
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_deterministic_tokenization(mock_auto_tokenizer):
"""Test that tokenization is deterministic for the same input."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer", max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "consistent test"},
)
result1 = processor(transition)
result2 = processor(transition)
tokens1 = result1[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask1 = result1[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
tokens2 = result2[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask2 = result2[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
# Results should be identical
assert torch.equal(tokens1, tokens2)
assert torch.equal(attention_mask1, attention_mask2)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_empty_string_task(mock_auto_tokenizer):
"""Test handling of empty string task."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer", max_length=8)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": ""},
)
result = processor(transition)
# Should still tokenize (mock tokenizer handles empty strings)
observation = result[TransitionKey.OBSERVATION]
assert f"{OBS_LANGUAGE}.tokens" in observation
tokens = observation[f"{OBS_LANGUAGE}.tokens"]
assert tokens.shape == (8,)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_very_long_task(mock_auto_tokenizer):
"""Test handling of very long task strings."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer", max_length=5, truncation=True)
long_task = " ".join(["word"] * 100) # Very long task
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": long_task},
)
result = processor(transition)
# Should be truncated to max_length
observation = result[TransitionKey.OBSERVATION]
tokens = observation[f"{OBS_LANGUAGE}.tokens"]
attention_mask = observation[f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.shape == (5,)
assert attention_mask.shape == (5,)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_custom_padding_side(mock_auto_tokenizer):
"""Test using custom padding_side parameter."""
# Create a mock tokenizer that tracks padding_side calls
class PaddingSideTrackingTokenizer:
def __init__(self):
self.padding_side_calls = []
def __call__(
self,
text,
max_length=512,
truncation=True,
padding="max_length",
padding_side="right",
return_tensors="pt",
**kwargs,
):
self.padding_side_calls.append(padding_side)
# Return minimal valid output
return {
"input_ids": torch.zeros(max_length, dtype=torch.long),
"attention_mask": torch.ones(max_length, dtype=torch.long),
}
tracking_tokenizer = PaddingSideTrackingTokenizer()
mock_auto_tokenizer.from_pretrained.return_value = tracking_tokenizer
# Test left padding
processor_left = TokenizerProcessorStep(
tokenizer_name="test-tokenizer", max_length=10, padding_side="left"
)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "test task"},
)
processor_left(transition)
assert tracking_tokenizer.padding_side_calls[-1] == "left"
# Test right padding (default)
processor_right = TokenizerProcessorStep(
tokenizer_name="test-tokenizer", max_length=10, padding_side="right"
)
processor_right(transition)
assert tracking_tokenizer.padding_side_calls[-1] == "right"
@require_package("transformers")
def test_device_detection_cpu():
"""Test that tokenized tensors stay on CPU when other tensors are on CPU."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
# Create transition with CPU tensors
observation = {OBS_STATE: torch.randn(10)} # CPU tensor
action = torch.randn(5) # CPU tensor
transition = create_transition(
observation=observation, action=action, complementary_data={"task": "test task"}
)
result = processor(transition)
# Check that tokenized tensors are on CPU
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cpu"
assert attention_mask.device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
def test_device_detection_cuda():
"""Test that tokenized tensors are moved to CUDA when other tensors are on CUDA."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
# Create transition with CUDA tensors
observation = {OBS_STATE: torch.randn(10).cuda()} # CUDA tensor
action = torch.randn(5).cuda() # CUDA tensor
transition = create_transition(
observation=observation, action=action, complementary_data={"task": "test task"}
)
result = processor(transition)
# Check that tokenized tensors are on CUDA
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cuda"
assert attention_mask.device.type == "cuda"
assert tokens.device.index == 0 # Should be on same device as input
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
@require_package("transformers")
def test_device_detection_multi_gpu():
"""Test that tokenized tensors match device in multi-GPU setup."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
# Test with tensors on cuda:1
device = torch.device("cuda:1")
observation = {OBS_STATE: torch.randn(10).to(device)}
action = torch.randn(5).to(device)
transition = create_transition(
observation=observation, action=action, complementary_data={"task": "multi gpu test"}
)
result = processor(transition)
# Check that tokenized tensors are on cuda:1
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device == device
assert attention_mask.device == device
@require_package("transformers")
def test_device_detection_no_tensors():
"""Test that tokenized tensors stay on CPU when no other tensors exist."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
# Create transition with no tensors
transition = create_transition(
observation={"metadata": {"key": "value"}}, # No tensors
complementary_data={"task": "no tensor test"},
)
result = processor(transition)
# Check that tokenized tensors are on CPU (default)
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cpu"
assert attention_mask.device.type == "cpu"
@require_package("transformers")
def test_device_detection_mixed_devices():
"""Test device detection when tensors are on different devices (uses first found)."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
if torch.cuda.is_available():
# Create transition with mixed devices
observation = {
"observation.cpu": torch.randn(10), # CPU
"observation.cuda": torch.randn(10).cuda(), # CUDA
}
transition = create_transition(
observation=observation, complementary_data={"task": "mixed device test"}
)
result = processor(transition)
# The device detection should use the first tensor found
# (iteration order depends on dict, but result should be consistent)
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
# Both should be on the same device
assert tokens.device == attention_mask.device
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
def test_device_detection_from_action():
"""Test that device is detected from action tensor when no observation tensors exist."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
# Create transition with action on CUDA but no observation tensors
observation = {"metadata": {"key": "value"}} # No tensors in observation
action = torch.randn(5).cuda()
transition = create_transition(
observation=observation, action=action, complementary_data={"task": "action device test"}
)
result = processor(transition)
# Check that tokenized tensors match action's device
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cuda"
assert attention_mask.device.type == "cuda"
@require_package("transformers")
def test_device_detection_preserves_dtype():
"""Test that device detection doesn't affect dtype of tokenized tensors."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
# Create transition with float tensor (to test dtype isn't affected)
observation = {OBS_STATE: torch.randn(10, dtype=torch.float16)}
transition = create_transition(observation=observation, complementary_data={"task": "dtype test"})
result = processor(transition)
# Check that tokenized tensors have correct dtypes (not affected by input dtype)
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.dtype == torch.long # Should remain long
assert attention_mask.dtype == torch.bool # Should be bool (converted in processor)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_integration_with_device_processor(mock_auto_tokenizer):
"""Test that TokenizerProcessorStep works correctly with DeviceProcessorStep in pipeline."""
from lerobot.processor import DeviceProcessorStep
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
# Create pipeline with TokenizerProcessorStep then DeviceProcessorStep
tokenizer_processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer", max_length=6)
device_processor = DeviceProcessorStep(device="cuda:0")
robot_processor = DataProcessorPipeline(
[tokenizer_processor, device_processor],
to_transition=identity_transition,
to_output=identity_transition,
)
# Start with CPU tensors
transition = create_transition(
observation={OBS_STATE: torch.randn(10)}, # CPU
action=torch.randn(5), # CPU
complementary_data={"task": "pipeline test"},
)
result = robot_processor(transition)
# All tensors should end up on CUDA (moved by DeviceProcessorStep)
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cuda"
assert result[TransitionKey.ACTION].device.type == "cuda"
# Tokenized tensors should also be on CUDA
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device.type == "cuda"
assert attention_mask.device.type == "cuda"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
def test_simulated_accelerate_scenario():
"""Test scenario simulating Accelerate with data already on GPU."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
# Simulate Accelerate scenario: batch already on GPU
device = torch.device("cuda:0")
observation = {
OBS_STATE: torch.randn(1, 10).to(device), # Batched, on GPU
OBS_IMAGE: torch.randn(1, 3, 224, 224).to(device), # Batched, on GPU
}
action = torch.randn(1, 5).to(device) # Batched, on GPU
transition = create_transition(
observation=observation,
action=action,
complementary_data={"task": ["accelerate test"]}, # List for batched task
)
result = processor(transition)
# Tokenized tensors should match GPU placement
tokens = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.tokens"]
attention_mask = result[TransitionKey.OBSERVATION][f"{OBS_LANGUAGE}.attention_mask"]
assert tokens.device == device
assert attention_mask.device == device
# MockTokenizer squeezes single-item batches, so shape is (max_length,) not (1, max_length)
assert tokens.shape == (10,) # MockTokenizer behavior for single string in list
assert attention_mask.shape == (10,)
# =============================================================================
# Tests for get_subtask method
# =============================================================================
@require_package("transformers")
def test_get_subtask_missing_key():
"""Test get_subtask returns None when subtask key is missing from complementary_data."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task"}, # No "subtask" key
)
result = processor.get_subtask(transition)
assert result is None
@require_package("transformers")
def test_get_subtask_none_value():
"""Test get_subtask returns None when subtask value is None."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": None},
)
result = processor.get_subtask(transition)
assert result is None
@require_package("transformers")
def test_get_subtask_none_complementary_data():
"""Test get_subtask returns None when complementary_data is None."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data=None, # No complementary data
)
result = processor.get_subtask(transition)
assert result is None
@require_package("transformers")
def test_get_subtask_string():
"""Test get_subtask returns list with single string when subtask is a string."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": "pick up the cube"},
)
result = processor.get_subtask(transition)
assert result == ["pick up the cube"]
assert isinstance(result, list)
assert len(result) == 1
@require_package("transformers")
def test_get_subtask_list_of_strings():
"""Test get_subtask returns the list when subtask is already a list of strings."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
subtask_list = ["pick up", "move to target", "place down"]
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": subtask_list},
)
result = processor.get_subtask(transition)
assert result == subtask_list
assert isinstance(result, list)
assert len(result) == 3
@require_package("transformers")
def test_get_subtask_unsupported_type_integer():
"""Test get_subtask returns None when subtask is an unsupported type (integer)."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": 123},
)
result = processor.get_subtask(transition)
assert result is None
@require_package("transformers")
def test_get_subtask_unsupported_type_mixed_list():
"""Test get_subtask returns None when subtask is a list with mixed types."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": ["valid string", 123, "another string"]},
)
result = processor.get_subtask(transition)
assert result is None
@require_package("transformers")
def test_get_subtask_unsupported_type_dict():
"""Test get_subtask returns None when subtask is a dictionary."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": {"key": "value"}},
)
result = processor.get_subtask(transition)
assert result is None
@require_package("transformers")
def test_get_subtask_empty_string():
"""Test get_subtask with empty string returns list with empty string."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": ""},
)
result = processor.get_subtask(transition)
assert result == [""]
@require_package("transformers")
def test_get_subtask_empty_list():
"""Test get_subtask with empty list returns empty list."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": []},
)
result = processor.get_subtask(transition)
assert result == []
# =============================================================================
# Tests for subtask tokenization in observation method
# =============================================================================
@require_package("transformers")
def test_subtask_tokenization_when_present():
"""Test that subtask is tokenized and added to observation when present."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=8)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": "pick up the red cube"},
)
result = processor(transition)
# Check that subtask tokens were added to observation
observation = result[TransitionKey.OBSERVATION]
assert OBS_LANGUAGE_SUBTASK_TOKENS in observation
assert OBS_LANGUAGE_SUBTASK_ATTENTION_MASK in observation
# Check token structure
subtask_tokens = observation[OBS_LANGUAGE_SUBTASK_TOKENS]
subtask_attention_mask = observation[OBS_LANGUAGE_SUBTASK_ATTENTION_MASK]
assert isinstance(subtask_tokens, torch.Tensor)
assert isinstance(subtask_attention_mask, torch.Tensor)
assert subtask_tokens.shape == (8,)
assert subtask_attention_mask.shape == (8,)
assert subtask_attention_mask.dtype == torch.bool
@require_package("transformers")
def test_subtask_tokenization_not_added_when_none():
"""Test that subtask tokens are NOT added to observation when subtask is None."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=8)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task"}, # No subtask
)
result = processor(transition)
# Check that subtask tokens were NOT added to observation
observation = result[TransitionKey.OBSERVATION]
assert OBS_LANGUAGE_SUBTASK_TOKENS not in observation
assert OBS_LANGUAGE_SUBTASK_ATTENTION_MASK not in observation
# But main task tokens should still be present
assert f"{OBS_LANGUAGE}.tokens" in observation
assert f"{OBS_LANGUAGE}.attention_mask" in observation
@require_package("transformers")
def test_subtask_tokenization_not_added_when_subtask_value_is_none():
"""Test that subtask tokens are NOT added when subtask value is explicitly None."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=8)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": None},
)
result = processor(transition)
# Check that subtask tokens were NOT added to observation
observation = result[TransitionKey.OBSERVATION]
assert OBS_LANGUAGE_SUBTASK_TOKENS not in observation
assert OBS_LANGUAGE_SUBTASK_ATTENTION_MASK not in observation
@require_package("transformers")
def test_subtask_tokenization_list_of_strings():
"""Test subtask tokenization with list of strings."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=8)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": ["pick up", "place down"]},
)
result = processor(transition)
# Check that subtask tokens were added to observation
observation = result[TransitionKey.OBSERVATION]
assert OBS_LANGUAGE_SUBTASK_TOKENS in observation
assert OBS_LANGUAGE_SUBTASK_ATTENTION_MASK in observation
# Check token structure for batch
subtask_tokens = observation[OBS_LANGUAGE_SUBTASK_TOKENS]
subtask_attention_mask = observation[OBS_LANGUAGE_SUBTASK_ATTENTION_MASK]
assert subtask_tokens.shape == (2, 8) # batch_size=2, seq_len=8
assert subtask_attention_mask.shape == (2, 8)
@require_package("transformers")
def test_subtask_tokenization_device_cpu():
"""Test that subtask tokens are on CPU when other tensors are on CPU."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
# Create transition with CPU tensors
observation = {OBS_STATE: torch.randn(10)} # CPU tensor
action = torch.randn(5) # CPU tensor
transition = create_transition(
observation=observation,
action=action,
complementary_data={"task": "main task", "subtask": "pick up cube"},
)
result = processor(transition)
# Check that subtask tokens are on CPU
subtask_tokens = result[TransitionKey.OBSERVATION][OBS_LANGUAGE_SUBTASK_TOKENS]
subtask_attention_mask = result[TransitionKey.OBSERVATION][OBS_LANGUAGE_SUBTASK_ATTENTION_MASK]
assert subtask_tokens.device.type == "cpu"
assert subtask_attention_mask.device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
@require_package("transformers")
def test_subtask_tokenization_device_cuda():
"""Test that subtask tokens are moved to CUDA when other tensors are on CUDA."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
# Create transition with CUDA tensors
observation = {OBS_STATE: torch.randn(10).cuda()} # CUDA tensor
action = torch.randn(5).cuda() # CUDA tensor
transition = create_transition(
observation=observation,
action=action,
complementary_data={"task": "main task", "subtask": "pick up cube"},
)
result = processor(transition)
# Check that subtask tokens are on CUDA
subtask_tokens = result[TransitionKey.OBSERVATION][OBS_LANGUAGE_SUBTASK_TOKENS]
subtask_attention_mask = result[TransitionKey.OBSERVATION][OBS_LANGUAGE_SUBTASK_ATTENTION_MASK]
assert subtask_tokens.device.type == "cuda"
assert subtask_attention_mask.device.type == "cuda"
@require_package("transformers")
def test_subtask_tokenization_preserves_other_observation_data():
"""Test that subtask tokenization preserves other observation data."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
original_state = torch.tensor([1.0, 2.0, 3.0])
transition = create_transition(
observation={"state": original_state.clone()},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": "pick up cube"},
)
result = processor(transition)
observation = result[TransitionKey.OBSERVATION]
# Check that original observation data is preserved
assert torch.equal(observation["state"], original_state)
# Check that both task and subtask tokens are present
assert f"{OBS_LANGUAGE}.tokens" in observation
assert f"{OBS_LANGUAGE}.attention_mask" in observation
assert OBS_LANGUAGE_SUBTASK_TOKENS in observation
assert OBS_LANGUAGE_SUBTASK_ATTENTION_MASK in observation
@require_package("transformers")
def test_subtask_attention_mask_dtype():
"""Test that subtask attention mask has correct dtype (bool)."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": "pick up cube"},
)
result = processor(transition)
observation = result[TransitionKey.OBSERVATION]
subtask_attention_mask = observation[OBS_LANGUAGE_SUBTASK_ATTENTION_MASK]
assert subtask_attention_mask.dtype == torch.bool
@require_package("transformers")
def test_subtask_tokenization_deterministic():
"""Test that subtask tokenization is deterministic for the same input."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": "consistent subtask"},
)
result1 = processor(transition)
result2 = processor(transition)
subtask_tokens1 = result1[TransitionKey.OBSERVATION][OBS_LANGUAGE_SUBTASK_TOKENS]
subtask_tokens2 = result2[TransitionKey.OBSERVATION][OBS_LANGUAGE_SUBTASK_TOKENS]
subtask_mask1 = result1[TransitionKey.OBSERVATION][OBS_LANGUAGE_SUBTASK_ATTENTION_MASK]
subtask_mask2 = result2[TransitionKey.OBSERVATION][OBS_LANGUAGE_SUBTASK_ATTENTION_MASK]
# Results should be identical
assert torch.equal(subtask_tokens1, subtask_tokens2)
assert torch.equal(subtask_mask1, subtask_mask2)
@require_package("transformers")
@patch("lerobot.processor.tokenizer_processor.AutoTokenizer")
def test_subtask_tokenization_integration_with_pipeline(mock_auto_tokenizer):
"""Test subtask tokenization works correctly with DataProcessorPipeline."""
mock_tokenizer = MockTokenizer(vocab_size=100)
mock_auto_tokenizer.from_pretrained.return_value = mock_tokenizer
tokenizer_processor = TokenizerProcessorStep(tokenizer_name="test-tokenizer", max_length=6)
robot_processor = DataProcessorPipeline(
[tokenizer_processor], to_transition=identity_transition, to_output=identity_transition
)
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": "subtask instruction"},
)
result = robot_processor(transition)
# Check that observation exists and both tokenizations were applied
assert TransitionKey.OBSERVATION in result
observation = result[TransitionKey.OBSERVATION]
# Check task tokens
assert f"{OBS_LANGUAGE}.tokens" in observation
assert f"{OBS_LANGUAGE}.attention_mask" in observation
# Check subtask tokens
assert OBS_LANGUAGE_SUBTASK_TOKENS in observation
assert OBS_LANGUAGE_SUBTASK_ATTENTION_MASK in observation
# Check shapes
assert observation[f"{OBS_LANGUAGE}.tokens"].shape == (6,)
assert observation[OBS_LANGUAGE_SUBTASK_TOKENS].shape == (6,)
@require_package("transformers")
def test_subtask_not_added_for_unsupported_types():
"""Test that subtask tokens are not added when subtask has unsupported type."""
mock_tokenizer = MockTokenizer(vocab_size=100)
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=8)
# Test with integer subtask
transition = create_transition(
observation={"state": torch.tensor([1.0, 2.0])},
action=torch.tensor([0.1, 0.2]),
complementary_data={"task": "main task", "subtask": 123},
)
result = processor(transition)
observation = result[TransitionKey.OBSERVATION]
# Subtask tokens should NOT be added for unsupported types
assert OBS_LANGUAGE_SUBTASK_TOKENS not in observation
assert OBS_LANGUAGE_SUBTASK_ATTENTION_MASK not in observation
# But main task tokens should still be present
assert f"{OBS_LANGUAGE}.tokens" in observation
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_tokenizer_processor.py",
"license": "Apache License 2.0",
"lines": 1146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/processor/test_vqbet_processor.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for VQBeT policy processor."""
import tempfile
import pytest
import torch
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig
from lerobot.policies.vqbet.processor_vqbet import make_vqbet_pre_post_processors
from lerobot.processor import (
AddBatchDimensionProcessorStep,
DataProcessorPipeline,
DeviceProcessorStep,
NormalizerProcessorStep,
RenameObservationsProcessorStep,
TransitionKey,
UnnormalizerProcessorStep,
)
from lerobot.processor.converters import create_transition, transition_to_batch
from lerobot.utils.constants import ACTION, OBS_IMAGE, OBS_STATE
def create_default_config():
"""Create a default VQBeT configuration for testing."""
config = VQBeTConfig()
config.input_features = {
OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(8,)),
OBS_IMAGE: PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)),
}
config.output_features = {
ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(7,)),
}
config.normalization_mapping = {
FeatureType.STATE: NormalizationMode.MEAN_STD,
FeatureType.VISUAL: NormalizationMode.IDENTITY,
FeatureType.ACTION: NormalizationMode.MIN_MAX,
}
config.device = "cpu"
return config
def create_default_stats():
"""Create default dataset statistics for testing."""
return {
OBS_STATE: {"mean": torch.zeros(8), "std": torch.ones(8)},
OBS_IMAGE: {}, # No normalization for images
ACTION: {"min": torch.full((7,), -1.0), "max": torch.ones(7)},
}
def test_make_vqbet_processor_basic():
"""Test basic creation of VQBeT processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_vqbet_pre_post_processors(
config,
stats,
)
# Check processor names
assert preprocessor.name == "policy_preprocessor"
assert postprocessor.name == "policy_postprocessor"
# Check steps in preprocessor
assert len(preprocessor.steps) == 4
assert isinstance(preprocessor.steps[0], RenameObservationsProcessorStep)
assert isinstance(preprocessor.steps[1], AddBatchDimensionProcessorStep)
assert isinstance(preprocessor.steps[2], DeviceProcessorStep)
assert isinstance(preprocessor.steps[3], NormalizerProcessorStep)
# Check steps in postprocessor
assert len(postprocessor.steps) == 2
assert isinstance(postprocessor.steps[0], UnnormalizerProcessorStep)
assert isinstance(postprocessor.steps[1], DeviceProcessorStep)
def test_vqbet_processor_with_images():
"""Test VQBeT processor with image and state observations."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_vqbet_pre_post_processors(
config,
stats,
)
# Create test data with images and states
observation = {
OBS_STATE: torch.randn(8),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(7)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is batched
assert processed[OBS_STATE].shape == (1, 8)
assert processed[OBS_IMAGE].shape == (1, 3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (1, 7)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_vqbet_processor_cuda():
"""Test VQBeT processor with CUDA device."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, postprocessor = make_vqbet_pre_post_processors(
config,
stats,
)
# Create CPU data
observation = {
OBS_STATE: torch.randn(8),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(7)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is on CUDA
assert processed[OBS_STATE].device.type == "cuda"
assert processed[OBS_IMAGE].device.type == "cuda"
assert processed[TransitionKey.ACTION.value].device.type == "cuda"
# Process through postprocessor
postprocessed = postprocessor(processed[TransitionKey.ACTION.value])
# Check that action is back on CPU
assert postprocessed.device.type == "cpu"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_vqbet_processor_accelerate_scenario():
"""Test VQBeT processor in simulated Accelerate scenario."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_vqbet_pre_post_processors(
config,
stats,
)
# Simulate Accelerate: data already on GPU and batched
device = torch.device("cuda:0")
observation = {
OBS_STATE: torch.randn(1, 8).to(device),
OBS_IMAGE: torch.randn(1, 3, 224, 224).to(device),
}
action = torch.randn(1, 7).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on same GPU
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires at least 2 GPUs")
def test_vqbet_processor_multi_gpu():
"""Test VQBeT processor with multi-GPU setup."""
config = create_default_config()
config.device = "cuda:0"
stats = create_default_stats()
preprocessor, postprocessor = make_vqbet_pre_post_processors(
config,
stats,
)
# Simulate data on different GPU
device = torch.device("cuda:1")
observation = {
OBS_STATE: torch.randn(1, 8).to(device),
OBS_IMAGE: torch.randn(1, 3, 224, 224).to(device),
}
action = torch.randn(1, 7).to(device)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data stays on cuda:1
assert processed[OBS_STATE].device == device
assert processed[OBS_IMAGE].device == device
assert processed[TransitionKey.ACTION.value].device == device
def test_vqbet_processor_without_stats():
"""Test VQBeT processor creation without dataset statistics."""
config = create_default_config()
preprocessor, postprocessor = make_vqbet_pre_post_processors(config, dataset_stats=None)
# Should still create processors
assert preprocessor is not None
assert postprocessor is not None
# Process should still work
observation = {
OBS_STATE: torch.randn(8),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(7)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
assert processed is not None
def test_vqbet_processor_save_and_load():
"""Test saving and loading VQBeT processor."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_vqbet_pre_post_processors(
config,
stats,
)
with tempfile.TemporaryDirectory() as tmpdir:
# Save preprocessor
preprocessor.save_pretrained(tmpdir)
# Load preprocessor
loaded_preprocessor = DataProcessorPipeline.from_pretrained(
tmpdir, config_filename="policy_preprocessor.json"
)
# Test that loaded processor works
observation = {
OBS_STATE: torch.randn(8),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(7)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = loaded_preprocessor(batch)
assert processed[OBS_STATE].shape == (1, 8)
assert processed[OBS_IMAGE].shape == (1, 3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (1, 7)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_vqbet_processor_mixed_precision():
"""Test VQBeT processor with mixed precision."""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
# Create processor
preprocessor, postprocessor = make_vqbet_pre_post_processors(
config,
stats,
)
# Replace DeviceProcessorStep with one that uses float16
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="float16"))
elif isinstance(step, NormalizerProcessorStep):
# Update normalizer to use the same device as the device processor
modified_steps.append(
NormalizerProcessorStep(
features=step.features,
norm_map=step.norm_map,
stats=step.stats,
device=config.device,
dtype=torch.float16, # Match the float16 dtype
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Create test data
observation = {
OBS_STATE: torch.randn(8, dtype=torch.float32),
OBS_IMAGE: torch.randn(3, 224, 224, dtype=torch.float32),
}
action = torch.randn(7, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that data is converted to float16
assert processed[OBS_STATE].dtype == torch.float16
assert processed[OBS_IMAGE].dtype == torch.float16
assert processed[TransitionKey.ACTION.value].dtype == torch.float16
def test_vqbet_processor_large_batch():
"""Test VQBeT processor with large batch sizes."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_vqbet_pre_post_processors(
config,
stats,
)
# Test with large batch
batch_size = 128
observation = {
OBS_STATE: torch.randn(batch_size, 8),
OBS_IMAGE: torch.randn(batch_size, 3, 224, 224),
}
action = torch.randn(batch_size, 7)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through preprocessor
processed = preprocessor(batch)
# Check that batch dimension is preserved
assert processed[OBS_STATE].shape == (batch_size, 8)
assert processed[OBS_IMAGE].shape == (batch_size, 3, 224, 224)
assert processed[TransitionKey.ACTION.value].shape == (batch_size, 7)
def test_vqbet_processor_sequential_processing():
"""Test VQBeT processor with sequential data processing."""
config = create_default_config()
stats = create_default_stats()
preprocessor, postprocessor = make_vqbet_pre_post_processors(
config,
stats,
)
# Process multiple samples sequentially
results = []
for _ in range(5):
observation = {
OBS_STATE: torch.randn(8),
OBS_IMAGE: torch.randn(3, 224, 224),
}
action = torch.randn(7)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
processed = preprocessor(batch)
results.append(processed)
# Check that all results are consistent
for result in results:
assert result[OBS_STATE].shape == (1, 8)
assert result[OBS_IMAGE].shape == (1, 3, 224, 224)
assert result[TransitionKey.ACTION.value].shape == (1, 7)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_vqbet_processor_bfloat16_device_float32_normalizer():
"""Test: DeviceProcessor(bfloat16) + NormalizerProcessor(float32) → output bfloat16 via automatic adaptation"""
config = create_default_config()
config.device = "cuda"
stats = create_default_stats()
preprocessor, _ = make_vqbet_pre_post_processors(
config,
stats,
)
# Modify the pipeline to use bfloat16 device processor with float32 normalizer
modified_steps = []
for step in preprocessor.steps:
if isinstance(step, DeviceProcessorStep):
# Device processor converts to bfloat16
modified_steps.append(DeviceProcessorStep(device=config.device, float_dtype="bfloat16"))
elif isinstance(step, NormalizerProcessorStep):
# Normalizer stays configured as float32 (will auto-adapt to bfloat16)
modified_steps.append(
NormalizerProcessorStep(
features=step.features,
norm_map=step.norm_map,
stats=step.stats,
device=config.device,
dtype=torch.float32, # Deliberately configured as float32
)
)
else:
modified_steps.append(step)
preprocessor.steps = modified_steps
# Verify initial normalizer configuration
normalizer_step = preprocessor.steps[3] # NormalizerProcessorStep
assert normalizer_step.dtype == torch.float32
# Create test data with both state and visual observations
observation = {
OBS_STATE: torch.randn(8, dtype=torch.float32),
OBS_IMAGE: torch.randn(3, 224, 224, dtype=torch.float32),
}
action = torch.randn(7, dtype=torch.float32)
transition = create_transition(observation, action)
batch = transition_to_batch(transition)
# Process through full pipeline
processed = preprocessor(batch)
# Verify: DeviceProcessor → bfloat16, NormalizerProcessor adapts → final output is bfloat16
assert processed[OBS_STATE].dtype == torch.bfloat16
assert processed[OBS_IMAGE].dtype == torch.bfloat16 # IDENTITY normalization still gets dtype conversion
assert processed[TransitionKey.ACTION.value].dtype == torch.bfloat16
# Verify normalizer automatically adapted its internal state
assert normalizer_step.dtype == torch.bfloat16
# Check state stats (has normalization)
for stat_tensor in normalizer_step._tensor_stats[OBS_STATE].values():
assert stat_tensor.dtype == torch.bfloat16
# OBS_IMAGE uses IDENTITY normalization, so no stats to check
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/processor/test_vqbet_processor.py",
"license": "Apache License 2.0",
"lines": 366,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:tests/utils/test_visualization_utils.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
from types import SimpleNamespace
import numpy as np
import pytest
from lerobot.processor import TransitionKey
from lerobot.utils.constants import OBS_STATE
@pytest.fixture
def mock_rerun(monkeypatch):
"""
Provide a mock `rerun` module so tests don't depend on the real library.
Also reload the module-under-test so it binds to this mock `rr`.
"""
calls = []
class DummyScalar:
def __init__(self, value):
self.value = float(value)
class DummyImage:
def __init__(self, arr):
self.arr = arr
def dummy_log(key, obj=None, **kwargs):
# Accept either positional `obj` or keyword `entity` and record remaining kwargs.
if obj is None and "entity" in kwargs:
obj = kwargs.pop("entity")
calls.append((key, obj, kwargs))
dummy_rr = SimpleNamespace(
Scalars=DummyScalar,
Image=DummyImage,
log=dummy_log,
init=lambda *a, **k: None,
spawn=lambda *a, **k: None,
)
# Inject fake module into sys.modules
monkeypatch.setitem(sys.modules, "rerun", dummy_rr)
# Now import and reload the module under test, to bind to our rerun mock
import lerobot.utils.visualization_utils as vu
importlib.reload(vu)
# Expose both the reloaded module and the call recorder
yield vu, calls
def _keys(calls):
"""Helper to extract just the keys logged to rr.log"""
return [k for (k, _obj, _kw) in calls]
def _obj_for(calls, key):
"""Find the first object logged under a given key."""
for k, obj, _kw in calls:
if k == key:
return obj
raise KeyError(f"Key {key} not found in calls: {calls}")
def _kwargs_for(calls, key):
for k, _obj, kw in calls:
if k == key:
return kw
raise KeyError(f"Key {key} not found in calls: {calls}")
def test_log_rerun_data_envtransition_scalars_and_image(mock_rerun):
vu, calls = mock_rerun
# Build EnvTransition dict
obs = {
f"{OBS_STATE}.temperature": np.float32(25.0),
# CHW image should be converted to HWC for rr.Image
"observation.camera": np.zeros((3, 10, 20), dtype=np.uint8),
}
act = {
"action.throttle": 0.7,
# 1D array should log individual Scalars with suffix _i
"action.vector": np.array([1.0, 2.0], dtype=np.float32),
}
transition = {
TransitionKey.OBSERVATION: obs,
TransitionKey.ACTION: act,
}
# Extract observation and action data from transition like in the real call sites
obs_data = transition.get(TransitionKey.OBSERVATION, {})
action_data = transition.get(TransitionKey.ACTION, {})
vu.log_rerun_data(observation=obs_data, action=action_data)
# We expect:
# - observation.state.temperature -> Scalars
# - observation.camera -> Image (HWC) with static=True
# - action.throttle -> Scalars
# - action.vector_0, action.vector_1 -> Scalars
expected_keys = {
f"{OBS_STATE}.temperature",
"observation.camera",
"action.throttle",
"action.vector_0",
"action.vector_1",
}
assert set(_keys(calls)) == expected_keys
# Check scalar types and values
temp_obj = _obj_for(calls, f"{OBS_STATE}.temperature")
assert type(temp_obj).__name__ == "DummyScalar"
assert temp_obj.value == pytest.approx(25.0)
throttle_obj = _obj_for(calls, "action.throttle")
assert type(throttle_obj).__name__ == "DummyScalar"
assert throttle_obj.value == pytest.approx(0.7)
v0 = _obj_for(calls, "action.vector_0")
v1 = _obj_for(calls, "action.vector_1")
assert type(v0).__name__ == "DummyScalar"
assert type(v1).__name__ == "DummyScalar"
assert v0.value == pytest.approx(1.0)
assert v1.value == pytest.approx(2.0)
# Check image handling: CHW -> HWC
img_obj = _obj_for(calls, "observation.camera")
assert type(img_obj).__name__ == "DummyImage"
assert img_obj.arr.shape == (10, 20, 3) # transposed
assert _kwargs_for(calls, "observation.camera").get("static", False) is True # static=True for images
def test_log_rerun_data_plain_list_ordering_and_prefixes(mock_rerun):
vu, calls = mock_rerun
# First dict without prefixes treated as observation
# Second dict without prefixes treated as action
obs_plain = {
"temp": 1.5,
# Already HWC image => should stay as-is
"img": np.zeros((5, 6, 3), dtype=np.uint8),
"none": None, # should be skipped
}
act_plain = {
"throttle": 0.3,
"vec": np.array([9, 8, 7], dtype=np.float32),
}
# Extract observation and action data from list like the old function logic did
# First dict was treated as observation, second as action
vu.log_rerun_data(observation=obs_plain, action=act_plain)
# Expected keys with auto-prefixes
expected = {
"observation.temp",
"observation.img",
"action.throttle",
"action.vec_0",
"action.vec_1",
"action.vec_2",
}
logged = set(_keys(calls))
assert logged == expected
# Scalars
t = _obj_for(calls, "observation.temp")
assert type(t).__name__ == "DummyScalar"
assert t.value == pytest.approx(1.5)
throttle = _obj_for(calls, "action.throttle")
assert type(throttle).__name__ == "DummyScalar"
assert throttle.value == pytest.approx(0.3)
# Image stays HWC
img = _obj_for(calls, "observation.img")
assert type(img).__name__ == "DummyImage"
assert img.arr.shape == (5, 6, 3)
assert _kwargs_for(calls, "observation.img").get("static", False) is True
# Vectors
for i, val in enumerate([9, 8, 7]):
o = _obj_for(calls, f"action.vec_{i}")
assert type(o).__name__ == "DummyScalar"
assert o.value == pytest.approx(val)
def test_log_rerun_data_kwargs_only(mock_rerun):
vu, calls = mock_rerun
vu.log_rerun_data(
observation={"observation.temp": 10.0, "observation.gray": np.zeros((8, 8, 1), dtype=np.uint8)},
action={"action.a": 1.0},
)
keys = set(_keys(calls))
assert "observation.temp" in keys
assert "observation.gray" in keys
assert "action.a" in keys
temp = _obj_for(calls, "observation.temp")
assert type(temp).__name__ == "DummyScalar"
assert temp.value == pytest.approx(10.0)
img = _obj_for(calls, "observation.gray")
assert type(img).__name__ == "DummyImage"
assert img.arr.shape == (8, 8, 1) # remains HWC
assert _kwargs_for(calls, "observation.gray").get("static", False) is True
a = _obj_for(calls, "action.a")
assert type(a).__name__ == "DummyScalar"
assert a.value == pytest.approx(1.0)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/utils/test_visualization_utils.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:src/lerobot/datasets/streaming_dataset.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable, Generator, Iterator
from pathlib import Path
import datasets
import numpy as np
import torch
from datasets import load_dataset
from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDatasetMetadata
from lerobot.datasets.utils import (
Backtrackable,
LookAheadError,
LookBackError,
check_version_compatibility,
find_float_index,
get_delta_indices,
is_float_in_list,
item_to_torch,
safe_shard,
)
from lerobot.datasets.video_utils import (
VideoDecoderCache,
decode_video_frames_torchcodec,
)
from lerobot.utils.constants import HF_LEROBOT_HOME, LOOKAHEAD_BACKTRACKTABLE, LOOKBACK_BACKTRACKTABLE
class StreamingLeRobotDataset(torch.utils.data.IterableDataset):
"""LeRobotDataset with streaming capabilities.
This class extends LeRobotDataset to add streaming functionality, allowing data to be streamed
rather than loaded entirely into memory. This is especially useful for large datasets that may
not fit in memory or when you want to quickly explore a dataset without downloading it completely.
The key innovation is using a Backtrackable iterator that maintains a bounded buffer of recent
items, allowing us to access previous frames for delta timestamps without loading the entire
dataset into memory.
Example:
Basic usage:
```python
from lerobot.common.datasets.streaming_dataset import StreamingLeRobotDataset
# Create a streaming dataset with delta timestamps
delta_timestamps = {
"observation.image": [-1.0, -0.5, 0.0], # 1 sec ago, 0.5 sec ago, current
"action": [0.0, 0.1, 0.2], # current, 0.1 sec future, 0.2 sec future
}
dataset = StreamingLeRobotDataset(
repo_id="your-dataset-repo-id",
delta_timestamps=delta_timestamps,
streaming=True,
buffer_size=1000,
)
# Iterate over the dataset
for i, item in enumerate(dataset):
print(f"Sample {i}: Episode {item['episode_index']} Frame {item['frame_index']}")
# item will contain stacked frames according to delta_timestamps
if i >= 10:
break
```
"""
def __init__(
self,
repo_id: str,
root: str | Path | None = None,
episodes: list[int] | None = None,
image_transforms: Callable | None = None,
delta_timestamps: dict[list[float]] | None = None,
tolerance_s: float = 1e-4,
revision: str | None = None,
force_cache_sync: bool = False,
streaming: bool = True,
buffer_size: int = 1000,
max_num_shards: int = 16,
seed: int = 42,
rng: np.random.Generator | None = None,
shuffle: bool = True,
):
"""Initialize a StreamingLeRobotDataset.
Args:
repo_id (str): This is the repo id that will be used to fetch the dataset.
root (Path | None, optional): Local directory to use for downloading/writing files.
episodes (list[int] | None, optional): If specified, this will only load episodes specified by
their episode_index in this list.
image_transforms (Callable | None, optional): Transform to apply to image data.
tolerance_s (float, optional): Tolerance in seconds for timestamp matching.
revision (str, optional): Git revision id (branch name, tag, or commit hash).
force_cache_sync (bool, optional): Flag to sync and refresh local files first.
streaming (bool, optional): Whether to stream the dataset or load it all. Defaults to True.
buffer_size (int, optional): Buffer size for shuffling when streaming. Defaults to 1000.
max_num_shards (int, optional): Number of shards to re-shard the input dataset into. Defaults to 16.
seed (int, optional): Reproducibility random seed.
rng (np.random.Generator | None, optional): Random number generator.
shuffle (bool, optional): Whether to shuffle the dataset across exhaustions. Defaults to True.
"""
super().__init__()
self.repo_id = repo_id
self.root = Path(root) if root else HF_LEROBOT_HOME / repo_id
self.streaming_from_local = root is not None
self.image_transforms = image_transforms
self.episodes = episodes
self.tolerance_s = tolerance_s
self.revision = revision if revision else CODEBASE_VERSION
self.seed = seed
self.rng = rng if rng is not None else np.random.default_rng(seed)
self.shuffle = shuffle
self.streaming = streaming
self.buffer_size = buffer_size
# We cache the video decoders to avoid re-initializing them at each frame (avoiding a ~10x slowdown)
self.video_decoder_cache = None
self.root.mkdir(exist_ok=True, parents=True)
# Load metadata
self.meta = LeRobotDatasetMetadata(
self.repo_id, self.root, self.revision, force_cache_sync=force_cache_sync
)
# Check version
check_version_compatibility(self.repo_id, self.meta._version, CODEBASE_VERSION)
self.delta_timestamps = None
self.delta_indices = None
if delta_timestamps is not None:
self._validate_delta_timestamp_keys(delta_timestamps) # raises ValueError if invalid
self.delta_timestamps = delta_timestamps
self.delta_indices = get_delta_indices(self.delta_timestamps, self.fps)
self.hf_dataset: datasets.IterableDataset = load_dataset(
self.repo_id if not self.streaming_from_local else str(self.root),
split="train",
streaming=self.streaming,
data_files="data/*/*.parquet",
revision=self.revision,
)
self.num_shards = min(self.hf_dataset.num_shards, max_num_shards)
@property
def num_frames(self):
return self.meta.total_frames
@property
def num_episodes(self):
return self.meta.total_episodes
@property
def fps(self):
return self.meta.fps
@staticmethod
def _iter_random_indices(
rng: np.random.Generator, buffer_size: int, random_batch_size=100
) -> Iterator[int]:
while True:
yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size))
@staticmethod
def _infinite_generator_over_elements(rng: np.random.Generator, elements: list[int]) -> Iterator[int]:
while True:
yield rng.choice(elements)
# TODO(fracapuano): Implement multi-threaded prefetching to accelerate data loading.
# The current sequential iteration is a bottleneck. A producer-consumer pattern
# could be used with a ThreadPoolExecutor to run `make_frame` (especially video decoding)
# in parallel, feeding a queue from which this iterator will yield processed items.
def __iter__(self) -> Iterator[dict[str, torch.Tensor]]:
if self.video_decoder_cache is None:
self.video_decoder_cache = VideoDecoderCache()
# keep the same seed across exhaustions if shuffle is False, otherwise shuffle data across exhaustions
rng = np.random.default_rng(self.seed) if not self.shuffle else self.rng
buffer_indices_generator = self._iter_random_indices(rng, self.buffer_size)
idx_to_backtrack_dataset = {
idx: self._make_backtrackable_dataset(safe_shard(self.hf_dataset, idx, self.num_shards))
for idx in range(self.num_shards)
}
# This buffer is populated while iterating on the dataset's shards
# the logic is to add 2 levels of randomness:
# (1) sample one shard at random from the ones available, and
# (2) sample one frame from the shard sampled at (1)
frames_buffer = []
while available_shards := list(idx_to_backtrack_dataset.keys()):
shard_key = next(self._infinite_generator_over_elements(rng, available_shards))
backtrack_dataset = idx_to_backtrack_dataset[shard_key] # selects which shard to iterate on
try:
for frame in self.make_frame(backtrack_dataset):
if len(frames_buffer) == self.buffer_size:
i = next(buffer_indices_generator) # samples a element from the buffer
yield frames_buffer[i]
frames_buffer[i] = frame
else:
frames_buffer.append(frame)
break # random shard sampled, switch shard
except (
RuntimeError,
StopIteration,
): # NOTE: StopIteration inside a generator throws a RuntimeError since python 3.7
del idx_to_backtrack_dataset[shard_key] # Remove exhausted shard, onto another shard
# Once shards are all exhausted, shuffle the buffer and yield the remaining frames
rng.shuffle(frames_buffer)
yield from frames_buffer
def _get_window_steps(
self, delta_timestamps: dict[str, list[float]] | None = None, dynamic_bounds: bool = False
) -> tuple[int, int]:
if delta_timestamps is None:
return 1, 1
if not dynamic_bounds:
# Fix the windows
lookback = LOOKBACK_BACKTRACKTABLE
lookahead = LOOKAHEAD_BACKTRACKTABLE
else:
# Dynamically adjust the windows based on the given delta_timesteps
all_timestamps = sum(delta_timestamps.values(), [])
lookback = min(all_timestamps) * self.fps
lookahead = max(all_timestamps) * self.fps
# When lookback is >=0 it means no negative timesteps have been provided
lookback = 0 if lookback >= 0 else (lookback * -1)
return lookback, lookahead
def _make_backtrackable_dataset(self, dataset: datasets.IterableDataset) -> Backtrackable:
lookback, lookahead = self._get_window_steps(self.delta_timestamps)
return Backtrackable(dataset, history=lookback, lookahead=lookahead)
def _make_timestamps_from_indices(
self, start_ts: float, indices: dict[str, list[int]] | None = None
) -> dict[str, list[float]]:
if indices is not None:
return {
key: (
start_ts + torch.tensor(indices[key]) / self.fps
).tolist() # NOTE: why not delta_timestamps directly?
for key in self.delta_timestamps
}
else:
return dict.fromkeys(self.meta.video_keys, [start_ts])
def _make_padding_camera_frame(self, camera_key: str):
"""Variable-shape padding frame for given camera keys, given in (H, W, C)"""
return torch.zeros(self.meta.info["features"][camera_key]["shape"]).permute(-1, 0, 1)
def _get_video_frame_padding_mask(
self,
video_frames: dict[str, torch.Tensor],
query_timestamps: dict[str, list[float]],
original_timestamps: dict[str, list[float]],
) -> dict[str, torch.BoolTensor]:
padding_mask = {}
for video_key, timestamps in original_timestamps.items():
if video_key not in video_frames:
continue # only padding on video keys that are available
frames = []
mask = []
padding_frame = self._make_padding_camera_frame(video_key)
for ts in timestamps:
if is_float_in_list(ts, query_timestamps[video_key]):
idx = find_float_index(ts, query_timestamps[video_key])
frames.append(video_frames[video_key][idx, :])
mask.append(False)
else:
frames.append(padding_frame)
mask.append(True)
padding_mask[f"{video_key}_is_pad"] = torch.BoolTensor(mask)
return padding_mask
def make_frame(self, dataset_iterator: Backtrackable) -> Generator:
"""Makes a frame starting from a dataset iterator"""
item = next(dataset_iterator)
item = item_to_torch(item)
updates = [] # list of "updates" to apply to the item retrieved from hf_dataset (w/o camera features)
# Get episode index from the item
ep_idx = item["episode_index"]
# "timestamp" restarts from 0 for each episode, whereas we need a global timestep within the single .mp4 file (given by index/fps)
current_ts = item["index"] / self.fps
episode_boundaries_ts = {
key: (
self.meta.episodes[ep_idx][f"videos/{key}/from_timestamp"],
self.meta.episodes[ep_idx][f"videos/{key}/to_timestamp"],
)
for key in self.meta.video_keys
}
# Apply delta querying logic if necessary
if self.delta_indices is not None:
query_result, padding = self._get_delta_frames(dataset_iterator, item)
updates.append(query_result)
updates.append(padding)
# Load video frames, when needed
if len(self.meta.video_keys) > 0:
original_timestamps = self._make_timestamps_from_indices(current_ts, self.delta_indices)
# Some timestamps might not result available considering the episode's boundaries
query_timestamps = self._get_query_timestamps(
current_ts, self.delta_indices, episode_boundaries_ts
)
video_frames = self._query_videos(query_timestamps, ep_idx)
if self.image_transforms is not None:
image_keys = self.meta.camera_keys
for cam in image_keys:
video_frames[cam] = self.image_transforms(video_frames[cam])
updates.append(video_frames)
if self.delta_indices is not None:
# We always return the same number of frames. Unavailable frames are padded.
padding_mask = self._get_video_frame_padding_mask(
video_frames, query_timestamps, original_timestamps
)
updates.append(padding_mask)
result = item.copy()
for update in updates:
result.update(update)
result["task"] = self.meta.tasks.iloc[item["task_index"]].name
yield result
def _get_query_timestamps(
self,
current_ts: float,
query_indices: dict[str, list[int]] | None = None,
episode_boundaries_ts: dict[str, tuple[float, float]] | None = None,
) -> dict[str, list[float]]:
query_timestamps = {}
keys_to_timestamps = self._make_timestamps_from_indices(current_ts, query_indices)
for key in self.meta.video_keys:
if query_indices is not None and key in query_indices:
timestamps = keys_to_timestamps[key]
# Clamp out timesteps outside of episode boundaries
query_timestamps[key] = torch.clamp(
torch.tensor(timestamps), *episode_boundaries_ts[key]
).tolist()
else:
query_timestamps[key] = [current_ts]
return query_timestamps
def _query_videos(self, query_timestamps: dict[str, list[float]], ep_idx: int) -> dict:
"""Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function
in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a
Segmentation Fault. This probably happens because a memory reference to the video loader is created in
the main process and a subprocess fails to access it.
"""
item = {}
for video_key, query_ts in query_timestamps.items():
root = self.meta.url_root if self.streaming and not self.streaming_from_local else self.root
video_path = f"{root}/{self.meta.get_video_file_path(ep_idx, video_key)}"
frames = decode_video_frames_torchcodec(
video_path, query_ts, self.tolerance_s, decoder_cache=self.video_decoder_cache
)
item[video_key] = frames.squeeze(0) if len(query_ts) == 1 else frames
return item
def _get_delta_frames(self, dataset_iterator: Backtrackable, current_item: dict):
# TODO(fracapuano): Modularize this function, refactor the code
"""Get frames with delta offsets using the backtrackable iterator.
Args:
current_item (dict): Current item from the iterator.
ep_idx (int): Episode index.
Returns:
tuple: (query_result, padding) - frames at delta offsets and padding info.
"""
current_episode_idx = current_item["episode_index"]
# Prepare results
query_result = {}
padding = {}
for key, delta_indices in self.delta_indices.items():
if key in self.meta.video_keys:
continue # visual frames are decoded separately
target_frames = []
is_pad = []
# Create a results dictionary to store frames in processing order, then reconstruct original order for stacking
delta_results = {}
# Separate and sort deltas by difficulty (easier operations first)
negative_deltas = sorted([d for d in delta_indices if d < 0], reverse=True) # [-1, -2, -3, ...]
positive_deltas = sorted([d for d in delta_indices if d > 0]) # [1, 2, 3, ...]
zero_deltas = [d for d in delta_indices if d == 0]
# Process zero deltas (current frame)
for delta in zero_deltas:
delta_results[delta] = (
current_item[key],
False,
)
# Process negative deltas in order of increasing difficulty
lookback_failed = False
last_successful_frame = current_item[key]
for delta in negative_deltas:
if lookback_failed:
delta_results[delta] = (last_successful_frame, True)
continue
try:
steps_back = abs(delta)
if dataset_iterator.can_peek_back(steps_back):
past_item = dataset_iterator.peek_back(steps_back)
past_item = item_to_torch(past_item)
if past_item["episode_index"] == current_episode_idx:
delta_results[delta] = (past_item[key], False)
last_successful_frame = past_item[key]
else:
raise LookBackError("Retrieved frame is from different episode!")
else:
raise LookBackError("Cannot go back further than the history buffer!")
except LookBackError:
delta_results[delta] = (last_successful_frame, True)
lookback_failed = True # All subsequent negative deltas will also fail
# Process positive deltas in order of increasing difficulty
lookahead_failed = False
last_successful_frame = current_item[key]
for delta in positive_deltas:
if lookahead_failed:
delta_results[delta] = (last_successful_frame, True)
continue
try:
if dataset_iterator.can_peek_ahead(delta):
future_item = dataset_iterator.peek_ahead(delta)
future_item = item_to_torch(future_item)
if future_item["episode_index"] == current_episode_idx:
delta_results[delta] = (future_item[key], False)
last_successful_frame = future_item[key]
else:
raise LookAheadError("Retrieved frame is from different episode!")
else:
raise LookAheadError("Cannot go ahead further than the lookahead buffer!")
except LookAheadError:
delta_results[delta] = (last_successful_frame, True)
lookahead_failed = True # All subsequent positive deltas will also fail
# Reconstruct original order for stacking
for delta in delta_indices:
frame, is_padded = delta_results[delta]
# add batch dimension for stacking
target_frames.append(frame) # frame.unsqueeze(0))
is_pad.append(is_padded)
# Stack frames and add to results
if target_frames:
query_result[key] = torch.stack(target_frames)
padding[f"{key}_is_pad"] = torch.BoolTensor(is_pad)
return query_result, padding
def _validate_delta_timestamp_keys(self, delta_timestamps: dict[list[float]]) -> None:
"""
Validate that all keys in delta_timestamps correspond to actual features in the dataset.
Raises:
ValueError: If any delta timestamp key doesn't correspond to a dataset feature.
"""
if delta_timestamps is None:
return
# Get all available feature keys from the dataset metadata
available_features = set(self.meta.features.keys())
# Get all keys from delta_timestamps
delta_keys = set(delta_timestamps.keys())
# Find any keys that don't correspond to features
invalid_keys = delta_keys - available_features
if invalid_keys:
raise ValueError(
f"The following delta_timestamp keys do not correspond to dataset features: {invalid_keys}. "
f"Available features are: {sorted(available_features)}"
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/datasets/streaming_dataset.py",
"license": "Apache License 2.0",
"lines": 435,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:tests/datasets/test_streaming.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from lerobot.datasets.streaming_dataset import StreamingLeRobotDataset
from lerobot.datasets.utils import safe_shard
from lerobot.utils.constants import ACTION
from tests.fixtures.constants import DUMMY_REPO_ID
def get_frames_expected_order(streaming_ds: StreamingLeRobotDataset) -> list[int]:
"""Replicates the shuffling logic of StreamingLeRobotDataset to get the expected order of indices."""
rng = np.random.default_rng(streaming_ds.seed)
buffer_size = streaming_ds.buffer_size
num_shards = streaming_ds.num_shards
shards_indices = []
for shard_idx in range(num_shards):
shard = streaming_ds.hf_dataset.shard(num_shards, index=shard_idx)
shard_indices = [item["index"] for item in shard]
shards_indices.append(shard_indices)
shard_iterators = {i: iter(s) for i, s in enumerate(shards_indices)}
buffer_indices_generator = streaming_ds._iter_random_indices(rng, buffer_size)
frames_buffer = []
expected_indices = []
while shard_iterators: # While there are still available shards
available_shard_keys = list(shard_iterators.keys())
if not available_shard_keys:
break
# Call _infinite_generator_over_elements with current available shards (key difference!)
shard_key = next(streaming_ds._infinite_generator_over_elements(rng, available_shard_keys))
try:
frame_index = next(shard_iterators[shard_key])
if len(frames_buffer) == buffer_size:
i = next(buffer_indices_generator)
expected_indices.append(frames_buffer[i])
frames_buffer[i] = frame_index
else:
frames_buffer.append(frame_index)
except StopIteration:
del shard_iterators[shard_key] # Remove exhausted shard
rng.shuffle(frames_buffer)
expected_indices.extend(frames_buffer)
return expected_indices
def test_single_frame_consistency(tmp_path, lerobot_dataset_factory):
"""Test if are correctly accessed"""
ds_num_frames = 400
ds_num_episodes = 10
buffer_size = 100
local_path = tmp_path / "test"
repo_id = f"{DUMMY_REPO_ID}"
ds = lerobot_dataset_factory(
root=local_path,
repo_id=repo_id,
total_episodes=ds_num_episodes,
total_frames=ds_num_frames,
)
streaming_ds = iter(StreamingLeRobotDataset(repo_id=repo_id, root=local_path, buffer_size=buffer_size))
key_checks = []
for _ in range(ds_num_frames):
streaming_frame = next(streaming_ds)
frame_idx = streaming_frame["index"]
target_frame = ds[frame_idx]
for key in streaming_frame:
left = streaming_frame[key]
right = target_frame[key]
if isinstance(left, str):
check = left == right
elif isinstance(left, torch.Tensor):
check = torch.allclose(left, right) and left.shape == right.shape
elif isinstance(left, float):
check = left == right.item() # right is a torch.Tensor
key_checks.append((key, check))
assert all(t[1] for t in key_checks), (
f"Checking {list(filter(lambda t: not t[1], key_checks))[0][0]} left and right were found different (frame_idx: {frame_idx})"
)
@pytest.mark.parametrize(
"shuffle",
[False, True],
)
def test_frames_order_over_epochs(tmp_path, lerobot_dataset_factory, shuffle):
"""Test if streamed frames correspond to shuffling operations over in-memory dataset."""
ds_num_frames = 400
ds_num_episodes = 10
buffer_size = 100
seed = 42
n_epochs = 3
local_path = tmp_path / "test"
repo_id = f"{DUMMY_REPO_ID}"
lerobot_dataset_factory(
root=local_path,
repo_id=repo_id,
total_episodes=ds_num_episodes,
total_frames=ds_num_frames,
)
streaming_ds = StreamingLeRobotDataset(
repo_id=repo_id, root=local_path, buffer_size=buffer_size, seed=seed, shuffle=shuffle
)
first_epoch_indices = [frame["index"] for frame in streaming_ds]
expected_indices = get_frames_expected_order(streaming_ds)
assert first_epoch_indices == expected_indices, "First epoch indices do not match expected indices"
expected_indices = get_frames_expected_order(streaming_ds)
for _ in range(n_epochs):
streaming_indices = [frame["index"] for frame in streaming_ds]
frames_match = all(
s_index == e_index for s_index, e_index in zip(streaming_indices, expected_indices, strict=True)
)
if shuffle:
assert not frames_match
else:
assert frames_match
@pytest.mark.parametrize(
"shuffle",
[False, True],
)
def test_frames_order_with_shards(tmp_path, lerobot_dataset_factory, shuffle):
"""Test if streamed frames correspond to shuffling operations over in-memory dataset with multiple shards."""
ds_num_frames = 100
ds_num_episodes = 10
buffer_size = 10
seed = 42
n_epochs = 3
data_file_size_mb = 0.001
chunks_size = 1
local_path = tmp_path / "test"
repo_id = f"{DUMMY_REPO_ID}-ciao"
lerobot_dataset_factory(
root=local_path,
repo_id=repo_id,
total_episodes=ds_num_episodes,
total_frames=ds_num_frames,
data_files_size_in_mb=data_file_size_mb,
chunks_size=chunks_size,
)
streaming_ds = StreamingLeRobotDataset(
repo_id=repo_id,
root=local_path,
buffer_size=buffer_size,
seed=seed,
shuffle=shuffle,
max_num_shards=4,
)
first_epoch_indices = [frame["index"] for frame in streaming_ds]
expected_indices = get_frames_expected_order(streaming_ds)
assert first_epoch_indices == expected_indices, "First epoch indices do not match expected indices"
for _ in range(n_epochs):
streaming_indices = [
frame["index"] for frame in streaming_ds
] # NOTE: this is the same as first_epoch_indices
frames_match = all(
s_index == e_index for s_index, e_index in zip(streaming_indices, expected_indices, strict=True)
)
if shuffle:
assert not frames_match
else:
assert frames_match
@pytest.mark.parametrize(
"state_deltas, action_deltas",
[
([-1, -0.5, -0.20, 0], [0, 1, 2, 3]),
([-1, -0.5, -0.20, 0], [-1.5, -1, -0.5, -0.20, -0.10, 0]),
([-2, -1, -0.5, 0], [0, 1, 2, 3]),
([-2, -1, -0.5, 0], [-1.5, -1, -0.5, -0.20, -0.10, 0]),
],
)
def test_frames_with_delta_consistency(tmp_path, lerobot_dataset_factory, state_deltas, action_deltas):
ds_num_frames = 500
ds_num_episodes = 10
buffer_size = 100
seed = 42
local_path = tmp_path / "test"
repo_id = f"{DUMMY_REPO_ID}-ciao"
camera_key = "phone"
delta_timestamps = {
camera_key: state_deltas,
"state": state_deltas,
ACTION: action_deltas,
}
ds = lerobot_dataset_factory(
root=local_path,
repo_id=repo_id,
total_episodes=ds_num_episodes,
total_frames=ds_num_frames,
delta_timestamps=delta_timestamps,
)
streaming_ds = iter(
StreamingLeRobotDataset(
repo_id=repo_id,
root=local_path,
buffer_size=buffer_size,
seed=seed,
shuffle=False,
delta_timestamps=delta_timestamps,
)
)
for i in range(ds_num_frames):
streaming_frame = next(streaming_ds)
frame_idx = streaming_frame["index"]
target_frame = ds[frame_idx]
assert set(streaming_frame.keys()) == set(target_frame.keys()), (
f"Keys differ between streaming frame and target one. Differ at: {set(streaming_frame.keys()) - set(target_frame.keys())}"
)
key_checks = []
for key in streaming_frame:
left = streaming_frame[key]
right = target_frame[key]
if isinstance(left, str):
check = left == right
elif isinstance(left, torch.Tensor):
if (
key not in ds.meta.camera_keys
and "is_pad" not in key
and f"{key}_is_pad" in streaming_frame
):
# comparing frames only on non-padded regions. Padding is applied to last-valid broadcasting
left = left[~streaming_frame[f"{key}_is_pad"]]
right = right[~target_frame[f"{key}_is_pad"]]
check = torch.allclose(left, right) and left.shape == right.shape
key_checks.append((key, check))
assert all(t[1] for t in key_checks), (
f"Checking {list(filter(lambda t: not t[1], key_checks))[0][0]} left and right were found different (i: {i}, frame_idx: {frame_idx})"
)
@pytest.mark.parametrize(
"state_deltas, action_deltas",
[
([-1, -0.5, -0.20, 0], [0, 1, 2, 3, 10, 20]),
([-1, -0.5, -0.20, 0], [-20, -1.5, -1, -0.5, -0.20, -0.10, 0]),
([-2, -1, -0.5, 0], [0, 1, 2, 3, 10, 20]),
([-2, -1, -0.5, 0], [-20, -1.5, -1, -0.5, -0.20, -0.10, 0]),
],
)
def test_frames_with_delta_consistency_with_shards(
tmp_path, lerobot_dataset_factory, state_deltas, action_deltas
):
ds_num_frames = 100
ds_num_episodes = 10
buffer_size = 10
data_file_size_mb = 0.001
chunks_size = 1
seed = 42
local_path = tmp_path / "test"
repo_id = f"{DUMMY_REPO_ID}-ciao"
camera_key = "phone"
delta_timestamps = {
camera_key: state_deltas,
"state": state_deltas,
ACTION: action_deltas,
}
ds = lerobot_dataset_factory(
root=local_path,
repo_id=repo_id,
total_episodes=ds_num_episodes,
total_frames=ds_num_frames,
delta_timestamps=delta_timestamps,
data_files_size_in_mb=data_file_size_mb,
chunks_size=chunks_size,
)
streaming_ds = StreamingLeRobotDataset(
repo_id=repo_id,
root=local_path,
buffer_size=buffer_size,
seed=seed,
shuffle=False,
delta_timestamps=delta_timestamps,
max_num_shards=4,
)
iter(streaming_ds)
num_shards = 4
shards_indices = []
for shard_idx in range(num_shards):
shard = safe_shard(streaming_ds.hf_dataset, shard_idx, num_shards)
shard_indices = [item["index"] for item in shard]
shards_indices.append(shard_indices)
streaming_ds = iter(streaming_ds)
for i in range(ds_num_frames):
streaming_frame = next(streaming_ds)
frame_idx = streaming_frame["index"]
target_frame = ds[frame_idx]
assert set(streaming_frame.keys()) == set(target_frame.keys()), (
f"Keys differ between streaming frame and target one. Differ at: {set(streaming_frame.keys()) - set(target_frame.keys())}"
)
key_checks = []
for key in streaming_frame:
left = streaming_frame[key]
right = target_frame[key]
if isinstance(left, str):
check = left == right
elif isinstance(left, torch.Tensor):
if (
key not in ds.meta.camera_keys
and "is_pad" not in key
and f"{key}_is_pad" in streaming_frame
):
# comparing frames only on non-padded regions. Padding is applied to last-valid broadcasting
left = left[~streaming_frame[f"{key}_is_pad"]]
right = right[~target_frame[f"{key}_is_pad"]]
check = torch.allclose(left, right) and left.shape == right.shape
elif isinstance(left, float):
check = left == right.item() # right is a torch.Tensor
key_checks.append((key, check))
assert all(t[1] for t in key_checks), (
f"Checking {list(filter(lambda t: not t[1], key_checks))[0][0]} left and right were found different (i: {i}, frame_idx: {frame_idx})"
)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/datasets/test_streaming.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/lerobot:examples/port_datasets/display_error_files.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from pathlib import Path
def find_missing_workers(completions_dir, world_size):
"""Find workers that are not completed and returns their indices."""
full = list(range(world_size))
completed = []
for path in completions_dir.glob("*"):
if path.name in [".", ".."]:
continue
index = path.name.lstrip("0")
index = 0 if index == "" else int(index)
completed.append(index)
missing_workers = set(full) - set(completed)
return missing_workers
def find_output_files(slurm_dir, worker_indices):
"""Find output files associated to worker indices, and return tuples
of (worker index, output file path)
"""
out_files = []
for path in slurm_dir.glob("*.out"):
_, worker_id = path.name.replace(".out", "").split("_")
worker_id = int(worker_id)
if worker_id in worker_indices:
out_files.append((worker_id, path))
return out_files
def display_error_files(logs_dir, job_name):
executor_path = Path(logs_dir) / job_name / "executor.json"
completions_dir = Path(logs_dir) / job_name / "completions"
with open(executor_path) as f:
executor = json.load(f)
missing_workers = find_missing_workers(completions_dir, executor["world_size"])
for missing in sorted(missing_workers)[::-1]:
print(missing)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--logs-dir",
type=str,
help="Path to logs directory for `datatrove`.",
)
parser.add_argument(
"--job-name",
type=str,
default="port_droid",
help="Job name used in slurm, and name of the directory created inside the provided logs directory.",
)
args = parser.parse_args()
display_error_files(**vars(args))
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/port_datasets/display_error_files.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/port_datasets/port_droid.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import time
from pathlib import Path
import numpy as np
import tensorflow_datasets as tfds
from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
from lerobot.utils.utils import get_elapsed_time_in_days_hours_minutes_seconds
DROID_SHARDS = 2048
DROID_FPS = 15
DROID_ROBOT_TYPE = "Franka"
# Dataset schema slightly adapted from: https://droid-dataset.github.io/droid/the-droid-dataset.html#-dataset-schema
DROID_FEATURES = {
# true on first step of the episode
"is_first": {
"dtype": "bool",
"shape": (1,),
"names": None,
},
# true on last step of the episode
"is_last": {
"dtype": "bool",
"shape": (1,),
"names": None,
},
# true on last step of the episode if it is a terminal step, True for demos
"is_terminal": {
"dtype": "bool",
"shape": (1,),
"names": None,
},
# language_instruction is also stored as "task" to follow LeRobot standard
"language_instruction": {
"dtype": "string",
"shape": (1,),
"names": None,
},
"language_instruction_2": {
"dtype": "string",
"shape": (1,),
"names": None,
},
"language_instruction_3": {
"dtype": "string",
"shape": (1,),
"names": None,
},
"observation.state.gripper_position": {
"dtype": "float32",
"shape": (1,),
"names": {
"axes": ["gripper"],
},
},
"observation.state.cartesian_position": {
"dtype": "float32",
"shape": (6,),
"names": {
"axes": ["x", "y", "z", "roll", "pitch", "yaw"],
},
},
"observation.state.joint_position": {
"dtype": "float32",
"shape": (7,),
"names": {
"axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6"],
},
},
# Add this new feature to follow LeRobot standard of using joint position + gripper
"observation.state": {
"dtype": "float32",
"shape": (8,),
"names": {
"axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6", "gripper"],
},
},
# Initially called wrist_image_left
"observation.images.wrist_left": {
"dtype": "video",
"shape": (180, 320, 3),
"names": [
"height",
"width",
"channels",
],
},
# Initially called exterior_image_1_left
"observation.images.exterior_1_left": {
"dtype": "video",
"shape": (180, 320, 3),
"names": [
"height",
"width",
"channels",
],
},
# Initially called exterior_image_2_left
"observation.images.exterior_2_left": {
"dtype": "video",
"shape": (180, 320, 3),
"names": [
"height",
"width",
"channels",
],
},
"action.gripper_position": {
"dtype": "float32",
"shape": (1,),
"names": {
"axes": ["gripper"],
},
},
"action.gripper_velocity": {
"dtype": "float32",
"shape": (1,),
"names": {
"axes": ["gripper"],
},
},
"action.cartesian_position": {
"dtype": "float32",
"shape": (6,),
"names": {
"axes": ["x", "y", "z", "roll", "pitch", "yaw"],
},
},
"action.cartesian_velocity": {
"dtype": "float32",
"shape": (6,),
"names": {
"axes": ["x", "y", "z", "roll", "pitch", "yaw"],
},
},
"action.joint_position": {
"dtype": "float32",
"shape": (7,),
"names": {
"axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6"],
},
},
"action.joint_velocity": {
"dtype": "float32",
"shape": (7,),
"names": {
"axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6"],
},
},
# This feature was called "action" in RLDS dataset and consists of [6x joint velocities, 1x gripper position]
"action.original": {
"dtype": "float32",
"shape": (7,),
"names": {
"axes": ["x", "y", "z", "roll", "pitch", "yaw", "gripper"],
},
},
# Add this new feature to follow LeRobot standard of using joint position + gripper
"action": {
"dtype": "float32",
"shape": (8,),
"names": {
"axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6", "gripper"],
},
},
"discount": {
"dtype": "float32",
"shape": (1,),
"names": None,
},
"reward": {
"dtype": "float32",
"shape": (1,),
"names": None,
},
# Meta data that are the same for all frames in the episode
"task_category": {
"dtype": "string",
"shape": (1,),
"names": None,
},
"building": {
"dtype": "string",
"shape": (1,),
"names": None,
},
"collector_id": {
"dtype": "string",
"shape": (1,),
"names": None,
},
"date": {
"dtype": "string",
"shape": (1,),
"names": None,
},
"camera_extrinsics.wrist_left": {
"dtype": "float32",
"shape": (6,),
"names": {
"axes": ["x", "y", "z", "roll", "pitch", "yaw"],
},
},
"camera_extrinsics.exterior_1_left": {
"dtype": "float32",
"shape": (6,),
"names": {
"axes": ["x", "y", "z", "roll", "pitch", "yaw"],
},
},
"camera_extrinsics.exterior_2_left": {
"dtype": "float32",
"shape": (6,),
"names": {
"axes": ["x", "y", "z", "roll", "pitch", "yaw"],
},
},
"is_episode_successful": {
"dtype": "bool",
"shape": (1,),
"names": None,
},
}
def is_episode_successful(tf_episode_metadata):
# Adapted from: https://github.com/droid-dataset/droid_policy_learning/blob/dd1020eb20d981f90b5ff07dc80d80d5c0cb108b/robomimic/utils/rlds_utils.py#L8
return "/success/" in tf_episode_metadata["file_path"].numpy().decode()
def generate_lerobot_frames(tf_episode):
m = tf_episode["episode_metadata"]
frame_meta = {
"task_category": m["building"].numpy().decode(),
"building": m["building"].numpy().decode(),
"collector_id": m["collector_id"].numpy().decode(),
"date": m["date"].numpy().decode(),
"camera_extrinsics.wrist_left": m["extrinsics_wrist_cam"].numpy(),
"camera_extrinsics.exterior_1_left": m["extrinsics_exterior_cam_1"].numpy(),
"camera_extrinsics.exterior_2_left": m["extrinsics_exterior_cam_2"].numpy(),
"is_episode_successful": np.array([is_episode_successful(m)]),
}
for f in tf_episode["steps"]:
# Dataset schema slightly adapted from: https://droid-dataset.github.io/droid/the-droid-dataset.html#-dataset-schema
frame = {
"is_first": np.array([f["is_first"].numpy()]),
"is_last": np.array([f["is_last"].numpy()]),
"is_terminal": np.array([f["is_terminal"].numpy()]),
"language_instruction": f["language_instruction"].numpy().decode(),
"language_instruction_2": f["language_instruction_2"].numpy().decode(),
"language_instruction_3": f["language_instruction_3"].numpy().decode(),
"observation.state.gripper_position": f["observation"]["gripper_position"].numpy(),
"observation.state.cartesian_position": f["observation"]["cartesian_position"].numpy(),
"observation.state.joint_position": f["observation"]["joint_position"].numpy(),
"observation.images.wrist_left": f["observation"]["wrist_image_left"].numpy(),
"observation.images.exterior_1_left": f["observation"]["exterior_image_1_left"].numpy(),
"observation.images.exterior_2_left": f["observation"]["exterior_image_2_left"].numpy(),
"action.gripper_position": f["action_dict"]["gripper_position"].numpy(),
"action.gripper_velocity": f["action_dict"]["gripper_velocity"].numpy(),
"action.cartesian_position": f["action_dict"]["cartesian_position"].numpy(),
"action.cartesian_velocity": f["action_dict"]["cartesian_velocity"].numpy(),
"action.joint_position": f["action_dict"]["joint_position"].numpy(),
"action.joint_velocity": f["action_dict"]["joint_velocity"].numpy(),
"discount": np.array([f["discount"].numpy()]),
"reward": np.array([f["reward"].numpy()]),
"action.original": f["action"].numpy(),
}
# language_instruction is also stored as "task" to follow LeRobot standard
frame["task"] = frame["language_instruction"]
# Add this new feature to follow LeRobot standard of using joint position + gripper
frame["observation.state"] = np.concatenate(
[frame["observation.state.joint_position"], frame["observation.state.gripper_position"]]
)
frame["action"] = np.concatenate([frame["action.joint_position"], frame["action.gripper_position"]])
# Meta data that are the same for all frames in the episode
frame.update(frame_meta)
# Cast fp64 to fp32
for key in frame:
if isinstance(frame[key], np.ndarray) and frame[key].dtype == np.float64:
frame[key] = frame[key].astype(np.float32)
yield frame
def port_droid(
raw_dir: Path,
repo_id: str,
push_to_hub: bool = False,
num_shards: int | None = None,
shard_index: int | None = None,
):
dataset_name = raw_dir.parent.name
version = raw_dir.name
data_dir = raw_dir.parent.parent
builder = tfds.builder(f"{dataset_name}/{version}", data_dir=data_dir, version="")
if num_shards is not None:
tfds_num_shards = builder.info.splits["train"].num_shards
if tfds_num_shards != DROID_SHARDS:
raise ValueError(
f"Number of shards of Droid dataset is expected to be {DROID_SHARDS} but is {tfds_num_shards}."
)
if num_shards != tfds_num_shards:
raise ValueError(
f"We only shard over the fixed number of shards provided by tensorflow dataset ({tfds_num_shards}), but {num_shards} shards provided instead."
)
if shard_index >= tfds_num_shards:
raise ValueError(
f"Shard index is greater than the num of shards ({shard_index} >= {num_shards})."
)
raw_dataset = builder.as_dataset(split=f"train[{shard_index}shard]")
else:
raw_dataset = builder.as_dataset(split="train")
lerobot_dataset = LeRobotDataset.create(
repo_id=repo_id,
robot_type=DROID_ROBOT_TYPE,
fps=DROID_FPS,
features=DROID_FEATURES,
)
start_time = time.time()
num_episodes = raw_dataset.cardinality().numpy().item()
logging.info(f"Number of episodes {num_episodes}")
for episode_index, episode in enumerate(raw_dataset):
elapsed_time = time.time() - start_time
d, h, m, s = get_elapsed_time_in_days_hours_minutes_seconds(elapsed_time)
logging.info(
f"{episode_index} / {num_episodes} episodes processed (after {d} days, {h} hours, {m} minutes, {s:.3f} seconds)"
)
for frame in generate_lerobot_frames(episode):
lerobot_dataset.add_frame(frame)
lerobot_dataset.save_episode()
logging.info("Save_episode")
lerobot_dataset.finalize()
if push_to_hub:
lerobot_dataset.push_to_hub(
# Add openx tag, since it belongs to the openx collection of datasets
tags=["openx"],
private=False,
)
def validate_dataset(repo_id):
"""Sanity check that ensure meta data can be loaded and all files are present."""
meta = LeRobotDatasetMetadata(repo_id)
if meta.total_episodes == 0:
raise ValueError("Number of episodes is 0.")
for ep_idx in range(meta.total_episodes):
data_path = meta.root / meta.get_data_file_path(ep_idx)
if not data_path.exists():
raise ValueError(f"Parquet file is missing in: {data_path}")
for vid_key in meta.video_keys:
vid_path = meta.root / meta.get_video_file_path(ep_idx, vid_key)
if not vid_path.exists():
raise ValueError(f"Video file is missing in: {vid_path}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw-dir",
type=Path,
required=True,
help="Directory containing input raw datasets (e.g. `path/to/dataset` or `path/to/dataset/version).",
)
parser.add_argument(
"--repo-id",
type=str,
help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset, required when push-to-hub is True",
)
parser.add_argument(
"--push-to-hub",
action="store_true",
help="Upload to hub.",
)
parser.add_argument(
"--num-shards",
type=int,
default=None,
help="Number of shards. Can be either None to load the full dataset, or 2048 to load one of the 2048 tensorflow dataset files.",
)
parser.add_argument(
"--shard-index",
type=int,
default=None,
help="Index of the shard. Can be either None to load the full dataset, or in [0,2047] to load one of the 2048 tensorflow dataset files.",
)
args = parser.parse_args()
port_droid(**vars(args))
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/port_datasets/port_droid.py",
"license": "Apache License 2.0",
"lines": 391,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/port_datasets/slurm_aggregate_shards.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
from datatrove.executor import LocalPipelineExecutor
from datatrove.executor.slurm import SlurmPipelineExecutor
from datatrove.pipeline.base import PipelineStep
from port_droid import DROID_SHARDS
class AggregateDatasets(PipelineStep):
def __init__(
self,
repo_ids: list[str],
aggregated_repo_id: str,
):
super().__init__()
self.repo_ids = repo_ids
self.aggr_repo_id = aggregated_repo_id
def run(self, data=None, rank: int = 0, world_size: int = 1):
import logging
from lerobot.datasets.aggregate import aggregate_datasets
from lerobot.utils.utils import init_logging
init_logging()
# Since aggregate_datasets already handles parallel processing internally,
# we only need one worker to run the entire aggregation
if rank == 0:
logging.info(f"Starting aggregation of {len(self.repo_ids)} datasets into {self.aggr_repo_id}")
aggregate_datasets(self.repo_ids, self.aggr_repo_id)
logging.info("Aggregation complete!")
else:
logging.info(f"Worker {rank} skipping - only worker 0 performs aggregation")
def make_aggregate_executor(
repo_ids, repo_id, job_name, logs_dir, workers, partition, cpus_per_task, mem_per_cpu, slurm=True
):
kwargs = {
"pipeline": [
AggregateDatasets(repo_ids, repo_id),
],
"logging_dir": str(logs_dir / job_name),
}
if slurm:
# For aggregation, we only need 1 task since aggregate_datasets handles everything
kwargs.update(
{
"job_name": job_name,
"tasks": 1, # Only need 1 task for aggregation
"workers": 1, # Only need 1 worker
"time": "08:00:00",
"partition": partition,
"cpus_per_task": cpus_per_task,
"sbatch_args": {"mem-per-cpu": mem_per_cpu},
}
)
executor = SlurmPipelineExecutor(**kwargs)
else:
kwargs.update(
{
"tasks": 1,
"workers": 1,
}
)
executor = LocalPipelineExecutor(**kwargs)
return executor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--repo-id",
type=str,
help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset, required when push-to-hub is True.",
)
parser.add_argument(
"--logs-dir",
type=Path,
help="Path to logs directory for `datatrove`.",
)
parser.add_argument(
"--job-name",
type=str,
default="aggr_droid",
help="Job name used in slurm, and name of the directory created inside the provided logs directory.",
)
parser.add_argument(
"--slurm",
type=int,
default=1,
help="Launch over slurm. Use `--slurm 0` to launch sequentially (useful to debug).",
)
parser.add_argument(
"--workers",
type=int,
default=1, # Changed default to 1 since aggregation doesn't need multiple workers
help="Number of slurm workers. For aggregation, this should be 1.",
)
parser.add_argument(
"--partition",
type=str,
help="Slurm partition. Ideally a CPU partition. No need for GPU partition.",
)
parser.add_argument(
"--cpus-per-task",
type=int,
default=8,
help="Number of cpus that each slurm worker will use.",
)
parser.add_argument(
"--mem-per-cpu",
type=str,
default="1950M",
help="Memory per cpu that each worker will use.",
)
args = parser.parse_args()
kwargs = vars(args)
kwargs["slurm"] = kwargs.pop("slurm") == 1
repo_ids = [f"{args.repo_id}_world_{DROID_SHARDS}_rank_{rank}" for rank in range(DROID_SHARDS)]
aggregate_executor = make_aggregate_executor(repo_ids, **kwargs)
aggregate_executor.run()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/port_datasets/slurm_aggregate_shards.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/port_datasets/slurm_port_shards.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
from datatrove.executor import LocalPipelineExecutor
from datatrove.executor.slurm import SlurmPipelineExecutor
from datatrove.pipeline.base import PipelineStep
from port_droid import DROID_SHARDS
class PortDroidShards(PipelineStep):
def __init__(
self,
raw_dir: Path | str,
repo_id: str = None,
):
super().__init__()
self.raw_dir = Path(raw_dir)
self.repo_id = repo_id
def run(self, data=None, rank: int = 0, world_size: int = 1):
from datasets.utils.tqdm import disable_progress_bars
from port_droid import port_droid, validate_dataset
from lerobot.utils.utils import init_logging
init_logging()
disable_progress_bars()
shard_repo_id = f"{self.repo_id}_world_{world_size}_rank_{rank}"
try:
validate_dataset(shard_repo_id)
return
except Exception:
pass # nosec B110 - Dataset doesn't exist yet, continue with porting
port_droid(
self.raw_dir,
shard_repo_id,
push_to_hub=False,
num_shards=world_size,
shard_index=rank,
)
validate_dataset(shard_repo_id)
def make_port_executor(
raw_dir, repo_id, job_name, logs_dir, workers, partition, cpus_per_task, mem_per_cpu, slurm=True
):
kwargs = {
"pipeline": [
PortDroidShards(raw_dir, repo_id),
],
"logging_dir": str(logs_dir / job_name),
}
if slurm:
kwargs.update(
{
"job_name": job_name,
"tasks": DROID_SHARDS,
"workers": workers,
"time": "08:00:00",
"partition": partition,
"cpus_per_task": cpus_per_task,
"sbatch_args": {"mem-per-cpu": mem_per_cpu},
}
)
executor = SlurmPipelineExecutor(**kwargs)
else:
kwargs.update(
{
"tasks": 1,
"workers": 1,
}
)
executor = LocalPipelineExecutor(**kwargs)
return executor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw-dir",
type=Path,
required=True,
help="Directory containing input raw datasets (e.g. `path/to/dataset` or `path/to/dataset/version).",
)
parser.add_argument(
"--repo-id",
type=str,
help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset, required when push-to-hub is True.",
)
parser.add_argument(
"--logs-dir",
type=Path,
help="Path to logs directory for `datatrove`.",
)
parser.add_argument(
"--job-name",
type=str,
default="port_droid",
help="Job name used in slurm, and name of the directory created inside the provided logs directory.",
)
parser.add_argument(
"--slurm",
type=int,
default=1,
help="Launch over slurm. Use `--slurm 0` to launch sequentially (useful to debug).",
)
parser.add_argument(
"--workers",
type=int,
default=2048,
help="Number of slurm workers. It should be less than the maximum number of shards.",
)
parser.add_argument(
"--partition",
type=str,
help="Slurm partition. Ideally a CPU partition. No need for GPU partition.",
)
parser.add_argument(
"--cpus-per-task",
type=int,
default=8,
help="Number of cpus that each slurm worker will use.",
)
parser.add_argument(
"--mem-per-cpu",
type=str,
default="1950M",
help="Memory per cpu that each worker will use.",
)
args = parser.parse_args()
kwargs = vars(args)
kwargs["slurm"] = kwargs.pop("slurm") == 1
port_executor = make_port_executor(**kwargs)
port_executor.run()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/port_datasets/slurm_port_shards.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:examples/port_datasets/slurm_upload.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
from pathlib import Path
from datatrove.executor import LocalPipelineExecutor
from datatrove.executor.slurm import SlurmPipelineExecutor
from datatrove.pipeline.base import PipelineStep
from huggingface_hub import HfApi
from huggingface_hub.constants import REPOCARD_NAME
from port_droid import DROID_SHARDS
from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDatasetMetadata
from lerobot.datasets.utils import create_lerobot_dataset_card
from lerobot.utils.utils import init_logging
class UploadDataset(PipelineStep):
def __init__(
self,
repo_id: str,
branch: str | None = None,
revision: str | None = None,
tags: list | None = None,
license: str | None = "apache-2.0",
private: bool = False,
distant_repo_id: str | None = None,
**card_kwargs,
):
super().__init__()
self.repo_id = repo_id
self.distant_repo_id = self.repo_id if distant_repo_id is None else distant_repo_id
self.branch = branch
self.tags = tags
self.license = license
self.private = private
self.card_kwargs = card_kwargs
self.revision = revision if revision else CODEBASE_VERSION
if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") != "1":
logging.warning(
'HF_HUB_ENABLE_HF_TRANSFER is not set to "1". Install hf_transfer and set the env '
"variable for faster uploads:\npip install hf-transfer\nexport HF_HUB_ENABLE_HF_TRANSFER=1"
)
self.create_repo()
def create_repo(self):
logging.info(f"Loading meta data from {self.repo_id}...")
meta = LeRobotDatasetMetadata(self.repo_id)
logging.info(f"Creating repo {self.distant_repo_id}...")
hub_api = HfApi()
hub_api.create_repo(
repo_id=self.distant_repo_id,
private=self.private,
repo_type="dataset",
exist_ok=True,
)
if self.branch:
hub_api.create_branch(
repo_id=self.distant_repo_id,
branch=self.branch,
revision=self.revision,
repo_type="dataset",
exist_ok=True,
)
if not hub_api.file_exists(
self.distant_repo_id, REPOCARD_NAME, repo_type="dataset", revision=self.branch
):
card = create_lerobot_dataset_card(
tags=self.tags, dataset_info=meta.info, license=self.license, **self.card_kwargs
)
card.push_to_hub(repo_id=self.distant_repo_id, repo_type="dataset", revision=self.branch)
hub_api.create_tag(self.distant_repo_id, tag=CODEBASE_VERSION, repo_type="dataset")
def list_files_recursively(directory):
base_path = Path(directory)
return [str(file.relative_to(base_path)) for file in base_path.rglob("*") if file.is_file()]
logging.info(f"Listing all local files from {self.repo_id}...")
self.file_paths = list_files_recursively(meta.root)
self.file_paths = sorted(self.file_paths)
def create_chunks(self, lst, n):
from itertools import islice
it = iter(lst)
return [list(islice(it, size)) for size in [len(lst) // n + (i < len(lst) % n) for i in range(n)]]
def create_commits(self, additions):
import logging
import math
import random
import time
from huggingface_hub import create_commit
from huggingface_hub.utils import HfHubHTTPError
FILES_BETWEEN_COMMITS = 10 # noqa: N806
BASE_DELAY = 0.1 # noqa: N806
MAX_RETRIES = 12 # noqa: N806
# Split the files into smaller chunks for faster commit
# and avoiding "A commit has happened since" error
num_chunks = math.ceil(len(additions) / FILES_BETWEEN_COMMITS)
chunks = self.create_chunks(additions, num_chunks)
for chunk in chunks:
retries = 0
while True:
try:
create_commit(
self.distant_repo_id,
repo_type="dataset",
operations=chunk,
commit_message=f"DataTrove upload ({len(chunk)} files)",
revision=self.branch,
)
# TODO: every 100 chunks super_squach_commits()
logging.info("create_commit completed!")
break
except HfHubHTTPError as e:
if "A commit has happened since" in e.server_message:
if retries >= MAX_RETRIES:
logging.error(f"Failed to create commit after {MAX_RETRIES=}. Giving up.")
raise e
logging.info("Commit creation race condition issue. Waiting...")
time.sleep(BASE_DELAY * 2**retries + random.uniform(0, 2))
retries += 1
else:
raise e
def run(self, data=None, rank: int = 0, world_size: int = 1):
import logging
from datasets.utils.tqdm import disable_progress_bars
from huggingface_hub import CommitOperationAdd, preupload_lfs_files
from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata
from lerobot.utils.utils import init_logging
init_logging()
disable_progress_bars()
chunks = self.create_chunks(self.file_paths, world_size)
file_paths = chunks[rank]
if len(file_paths) == 0:
raise ValueError(file_paths)
logging.info("Pre-uploading LFS files...")
for i, path in enumerate(file_paths):
logging.info(f"{i}: {path}")
meta = LeRobotDatasetMetadata(self.repo_id)
additions = [
CommitOperationAdd(path_in_repo=path, path_or_fileobj=meta.root / path) for path in file_paths
]
preupload_lfs_files(
repo_id=self.distant_repo_id, repo_type="dataset", additions=additions, revision=self.branch
)
logging.info("Creating commits...")
self.create_commits(additions)
logging.info("Done!")
def make_upload_executor(
repo_id, job_name, logs_dir, workers, partition, cpus_per_task, mem_per_cpu, private=False, slurm=True
):
kwargs = {
"pipeline": [
UploadDataset(repo_id, private=private),
],
"logging_dir": str(logs_dir / job_name),
}
if slurm:
kwargs.update(
{
"job_name": job_name,
"tasks": DROID_SHARDS,
"workers": workers,
"time": "08:00:00",
"partition": partition,
"cpus_per_task": cpus_per_task,
"sbatch_args": {"mem-per-cpu": mem_per_cpu},
}
)
executor = SlurmPipelineExecutor(**kwargs)
else:
kwargs.update(
{
"tasks": DROID_SHARDS,
"workers": 1,
}
)
executor = LocalPipelineExecutor(**kwargs)
return executor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--repo-id",
type=str,
help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset, required when push-to-hub is True.",
)
parser.add_argument(
"--logs-dir",
type=Path,
help="Path to logs directory for `datatrove`.",
)
parser.add_argument(
"--job-name",
type=str,
default="upload_droid",
help="Job name used in slurm, and name of the directory created inside the provided logs directory.",
)
parser.add_argument(
"--slurm",
type=int,
default=1,
help="Launch over slurm. Use `--slurm 0` to launch sequentially (useful to debug).",
)
parser.add_argument(
"--workers",
type=int,
default=50,
help="Number of slurm workers. It should be less than the maximum number of shards.",
)
parser.add_argument(
"--partition",
type=str,
help="Slurm partition. Ideally a CPU partition. No need for GPU partition.",
)
parser.add_argument(
"--cpus-per-task",
type=int,
default=8,
help="Number of cpus that each slurm worker will use.",
)
parser.add_argument(
"--mem-per-cpu",
type=str,
default="1950M",
help="Memory per cpu that each worker will use.",
)
parser.add_argument(
"--private",
action="store_true",
default=False,
help="Whether to create a private repository.",
)
init_logging()
args = parser.parse_args()
kwargs = vars(args)
kwargs["slurm"] = kwargs.pop("slurm") == 1
upload_executor = make_upload_executor(**kwargs)
upload_executor.run()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/lerobot",
"file_path": "examples/port_datasets/slurm_upload.py",
"license": "Apache License 2.0",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/datasets/aggregate.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
from pathlib import Path
import datasets
import pandas as pd
import tqdm
from lerobot.datasets.compute_stats import aggregate_stats
from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata
from lerobot.datasets.utils import (
DEFAULT_CHUNK_SIZE,
DEFAULT_DATA_FILE_SIZE_IN_MB,
DEFAULT_DATA_PATH,
DEFAULT_EPISODES_PATH,
DEFAULT_VIDEO_FILE_SIZE_IN_MB,
DEFAULT_VIDEO_PATH,
get_file_size_in_mb,
get_hf_features_from_features,
get_parquet_file_size_in_mb,
to_parquet_with_hf_images,
update_chunk_file_indices,
write_info,
write_stats,
write_tasks,
)
from lerobot.datasets.video_utils import concatenate_video_files, get_video_duration_in_s
def validate_all_metadata(all_metadata: list[LeRobotDatasetMetadata]):
"""Validates that all dataset metadata have consistent properties.
Ensures all datasets have the same fps, robot_type, and features to guarantee
compatibility when aggregating them into a single dataset.
Args:
all_metadata: List of LeRobotDatasetMetadata objects to validate.
Returns:
tuple: A tuple containing (fps, robot_type, features) from the first metadata.
Raises:
ValueError: If any metadata has different fps, robot_type, or features
than the first metadata in the list.
"""
fps = all_metadata[0].fps
robot_type = all_metadata[0].robot_type
features = all_metadata[0].features
for meta in tqdm.tqdm(all_metadata, desc="Validate all meta data"):
if fps != meta.fps:
raise ValueError(f"Same fps is expected, but got fps={meta.fps} instead of {fps}.")
if robot_type != meta.robot_type:
raise ValueError(
f"Same robot_type is expected, but got robot_type={meta.robot_type} instead of {robot_type}."
)
if features != meta.features:
raise ValueError(
f"Same features is expected, but got features={meta.features} instead of {features}."
)
return fps, robot_type, features
def update_data_df(df, src_meta, dst_meta):
"""Updates a data DataFrame with new indices and task mappings for aggregation.
Adjusts episode indices, frame indices, and task indices to account for
previously aggregated data in the destination dataset.
Args:
df: DataFrame containing the data to be updated.
src_meta: Source dataset metadata.
dst_meta: Destination dataset metadata.
Returns:
pd.DataFrame: Updated DataFrame with adjusted indices.
"""
df["episode_index"] = df["episode_index"] + dst_meta.info["total_episodes"]
df["index"] = df["index"] + dst_meta.info["total_frames"]
src_task_names = src_meta.tasks.index.take(df["task_index"].to_numpy())
df["task_index"] = dst_meta.tasks.loc[src_task_names, "task_index"].to_numpy()
return df
def update_meta_data(
df,
dst_meta,
meta_idx,
data_idx,
videos_idx,
):
"""Updates metadata DataFrame with new chunk, file, and timestamp indices.
Adjusts all indices and timestamps to account for previously aggregated
data and videos in the destination dataset.
For data file indices, uses the 'src_to_dst' mapping from aggregate_data()
to correctly map source file indices to their destination locations.
Args:
df: DataFrame containing the metadata to be updated.
dst_meta: Destination dataset metadata.
meta_idx: Dictionary containing current metadata chunk and file indices.
data_idx: Dictionary containing current data chunk and file indices.
videos_idx: Dictionary containing current video indices and timestamps.
Returns:
pd.DataFrame: Updated DataFrame with adjusted indices and timestamps.
"""
df["meta/episodes/chunk_index"] = df["meta/episodes/chunk_index"] + meta_idx["chunk"]
df["meta/episodes/file_index"] = df["meta/episodes/file_index"] + meta_idx["file"]
# Update data file indices using source-to-destination mapping
# This is critical for handling datasets that are already results of a merge
data_src_to_dst = data_idx.get("src_to_dst", {})
if data_src_to_dst:
# Store original indices for lookup
df["_orig_data_chunk"] = df["data/chunk_index"].copy()
df["_orig_data_file"] = df["data/file_index"].copy()
# Vectorized mapping from (src_chunk, src_file) to (dst_chunk, dst_file)
# This is much faster than per-row iteration for large metadata tables
mapping_index = pd.MultiIndex.from_tuples(
list(data_src_to_dst.keys()),
names=["chunk_index", "file_index"],
)
mapping_values = list(data_src_to_dst.values())
mapping_df = pd.DataFrame(
mapping_values,
index=mapping_index,
columns=["dst_chunk", "dst_file"],
)
# Construct a MultiIndex for each row based on original data indices
row_index = pd.MultiIndex.from_arrays(
[df["_orig_data_chunk"], df["_orig_data_file"]],
names=["chunk_index", "file_index"],
)
# Align mapping to rows; missing keys fall back to the default destination
reindexed = mapping_df.reindex(row_index)
reindexed[["dst_chunk", "dst_file"]] = reindexed[["dst_chunk", "dst_file"]].fillna(
{"dst_chunk": data_idx["chunk"], "dst_file": data_idx["file"]}
)
# Assign mapped destination indices back to the DataFrame
df["data/chunk_index"] = reindexed["dst_chunk"].to_numpy()
df["data/file_index"] = reindexed["dst_file"].to_numpy()
# Clean up temporary columns
df = df.drop(columns=["_orig_data_chunk", "_orig_data_file"])
else:
# Fallback to simple offset (backward compatibility for single-file sources)
df["data/chunk_index"] = df["data/chunk_index"] + data_idx["chunk"]
df["data/file_index"] = df["data/file_index"] + data_idx["file"]
for key, video_idx in videos_idx.items():
# Store original video file indices before updating
orig_chunk_col = f"videos/{key}/chunk_index"
orig_file_col = f"videos/{key}/file_index"
df["_orig_chunk"] = df[orig_chunk_col].copy()
df["_orig_file"] = df[orig_file_col].copy()
# Get mappings for this video key
src_to_offset = video_idx.get("src_to_offset", {})
src_to_dst = video_idx.get("src_to_dst", {})
# Apply per-source-file mappings
if src_to_dst:
# Map each episode to its correct destination file and apply offset
for idx in df.index:
src_key = (df.at[idx, "_orig_chunk"], df.at[idx, "_orig_file"])
# Get destination chunk/file for this source file
dst_chunk, dst_file = src_to_dst.get(src_key, (video_idx["chunk"], video_idx["file"]))
df.at[idx, orig_chunk_col] = dst_chunk
df.at[idx, orig_file_col] = dst_file
# Apply timestamp offset
offset = src_to_offset.get(src_key, 0)
df.at[idx, f"videos/{key}/from_timestamp"] += offset
df.at[idx, f"videos/{key}/to_timestamp"] += offset
elif src_to_offset:
# Fallback: use same destination for all, but apply per-file offsets
df[orig_chunk_col] = video_idx["chunk"]
df[orig_file_col] = video_idx["file"]
for idx in df.index:
src_key = (df.at[idx, "_orig_chunk"], df.at[idx, "_orig_file"])
offset = src_to_offset.get(src_key, 0)
df.at[idx, f"videos/{key}/from_timestamp"] += offset
df.at[idx, f"videos/{key}/to_timestamp"] += offset
else:
# Fallback to simple offset (for backward compatibility)
df[orig_chunk_col] = video_idx["chunk"]
df[orig_file_col] = video_idx["file"]
df[f"videos/{key}/from_timestamp"] = (
df[f"videos/{key}/from_timestamp"] + video_idx["latest_duration"]
)
df[f"videos/{key}/to_timestamp"] = df[f"videos/{key}/to_timestamp"] + video_idx["latest_duration"]
# Clean up temporary columns
df = df.drop(columns=["_orig_chunk", "_orig_file"])
df["dataset_from_index"] = df["dataset_from_index"] + dst_meta.info["total_frames"]
df["dataset_to_index"] = df["dataset_to_index"] + dst_meta.info["total_frames"]
df["episode_index"] = df["episode_index"] + dst_meta.info["total_episodes"]
return df
def aggregate_datasets(
repo_ids: list[str],
aggr_repo_id: str,
roots: list[Path] | None = None,
aggr_root: Path | None = None,
data_files_size_in_mb: float | None = None,
video_files_size_in_mb: float | None = None,
chunk_size: int | None = None,
):
"""Aggregates multiple LeRobot datasets into a single unified dataset.
This is the main function that orchestrates the aggregation process by:
1. Loading and validating all source dataset metadata
2. Creating a new destination dataset with unified tasks
3. Aggregating videos, data, and metadata from all source datasets
4. Finalizing the aggregated dataset with proper statistics
Args:
repo_ids: List of repository IDs for the datasets to aggregate.
aggr_repo_id: Repository ID for the aggregated output dataset.
roots: Optional list of root paths for the source datasets.
aggr_root: Optional root path for the aggregated dataset.
data_files_size_in_mb: Maximum size for data files in MB (defaults to DEFAULT_DATA_FILE_SIZE_IN_MB)
video_files_size_in_mb: Maximum size for video files in MB (defaults to DEFAULT_VIDEO_FILE_SIZE_IN_MB)
chunk_size: Maximum number of files per chunk (defaults to DEFAULT_CHUNK_SIZE)
"""
logging.info("Start aggregate_datasets")
if data_files_size_in_mb is None:
data_files_size_in_mb = DEFAULT_DATA_FILE_SIZE_IN_MB
if video_files_size_in_mb is None:
video_files_size_in_mb = DEFAULT_VIDEO_FILE_SIZE_IN_MB
if chunk_size is None:
chunk_size = DEFAULT_CHUNK_SIZE
all_metadata = (
[LeRobotDatasetMetadata(repo_id) for repo_id in repo_ids]
if roots is None
else [
LeRobotDatasetMetadata(repo_id, root=root) for repo_id, root in zip(repo_ids, roots, strict=False)
]
)
fps, robot_type, features = validate_all_metadata(all_metadata)
video_keys = [key for key in features if features[key]["dtype"] == "video"]
dst_meta = LeRobotDatasetMetadata.create(
repo_id=aggr_repo_id,
fps=fps,
robot_type=robot_type,
features=features,
root=aggr_root,
use_videos=len(video_keys) > 0,
chunks_size=chunk_size,
data_files_size_in_mb=data_files_size_in_mb,
video_files_size_in_mb=video_files_size_in_mb,
)
logging.info("Find all tasks")
unique_tasks = pd.concat([m.tasks for m in all_metadata]).index.unique()
dst_meta.tasks = pd.DataFrame(
{"task_index": range(len(unique_tasks))}, index=pd.Index(unique_tasks, name="task")
)
meta_idx = {"chunk": 0, "file": 0}
data_idx = {"chunk": 0, "file": 0}
videos_idx = {
key: {"chunk": 0, "file": 0, "latest_duration": 0, "episode_duration": 0} for key in video_keys
}
dst_meta.episodes = {}
for src_meta in tqdm.tqdm(all_metadata, desc="Copy data and videos"):
videos_idx = aggregate_videos(src_meta, dst_meta, videos_idx, video_files_size_in_mb, chunk_size)
data_idx = aggregate_data(src_meta, dst_meta, data_idx, data_files_size_in_mb, chunk_size)
meta_idx = aggregate_metadata(src_meta, dst_meta, meta_idx, data_idx, videos_idx)
# Clear the src_to_dst mapping after processing each source dataset
# to avoid interference between different source datasets
data_idx.pop("src_to_dst", None)
dst_meta.info["total_episodes"] += src_meta.total_episodes
dst_meta.info["total_frames"] += src_meta.total_frames
finalize_aggregation(dst_meta, all_metadata)
logging.info("Aggregation complete.")
def aggregate_videos(src_meta, dst_meta, videos_idx, video_files_size_in_mb, chunk_size):
"""Aggregates video chunks from a source dataset into the destination dataset.
Handles video file concatenation and rotation based on file size limits.
Creates new video files when size limits are exceeded.
Args:
src_meta: Source dataset metadata.
dst_meta: Destination dataset metadata.
videos_idx: Dictionary tracking video chunk and file indices.
video_files_size_in_mb: Maximum size for video files in MB (defaults to DEFAULT_VIDEO_FILE_SIZE_IN_MB)
chunk_size: Maximum number of files per chunk (defaults to DEFAULT_CHUNK_SIZE)
Returns:
dict: Updated videos_idx with current chunk and file indices.
"""
for key in videos_idx:
videos_idx[key]["episode_duration"] = 0
# Track offset for each source (chunk, file) pair
videos_idx[key]["src_to_offset"] = {}
# Track destination (chunk, file) for each source (chunk, file) pair
videos_idx[key]["src_to_dst"] = {}
# Initialize dst_file_durations if not present
# dst_file_durations tracks duration of each destination file
if "dst_file_durations" not in videos_idx[key]:
videos_idx[key]["dst_file_durations"] = {}
for key, video_idx in videos_idx.items():
unique_chunk_file_pairs = {
(chunk, file)
for chunk, file in zip(
src_meta.episodes[f"videos/{key}/chunk_index"],
src_meta.episodes[f"videos/{key}/file_index"],
strict=False,
)
}
unique_chunk_file_pairs = sorted(unique_chunk_file_pairs)
chunk_idx = video_idx["chunk"]
file_idx = video_idx["file"]
dst_file_durations = video_idx["dst_file_durations"]
for src_chunk_idx, src_file_idx in unique_chunk_file_pairs:
src_path = src_meta.root / DEFAULT_VIDEO_PATH.format(
video_key=key,
chunk_index=src_chunk_idx,
file_index=src_file_idx,
)
dst_path = dst_meta.root / DEFAULT_VIDEO_PATH.format(
video_key=key,
chunk_index=chunk_idx,
file_index=file_idx,
)
src_duration = get_video_duration_in_s(src_path)
dst_key = (chunk_idx, file_idx)
if not dst_path.exists():
# New destination file: offset is 0
videos_idx[key]["src_to_offset"][(src_chunk_idx, src_file_idx)] = 0
videos_idx[key]["src_to_dst"][(src_chunk_idx, src_file_idx)] = dst_key
dst_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(str(src_path), str(dst_path))
# Track duration of this destination file
dst_file_durations[dst_key] = src_duration
videos_idx[key]["episode_duration"] += src_duration
continue
# Check file sizes before appending
src_size = get_file_size_in_mb(src_path)
dst_size = get_file_size_in_mb(dst_path)
if dst_size + src_size >= video_files_size_in_mb:
# Rotate to a new file - offset is 0
chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, chunk_size)
dst_key = (chunk_idx, file_idx)
videos_idx[key]["src_to_offset"][(src_chunk_idx, src_file_idx)] = 0
videos_idx[key]["src_to_dst"][(src_chunk_idx, src_file_idx)] = dst_key
dst_path = dst_meta.root / DEFAULT_VIDEO_PATH.format(
video_key=key,
chunk_index=chunk_idx,
file_index=file_idx,
)
dst_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(str(src_path), str(dst_path))
# Track duration of this new destination file
dst_file_durations[dst_key] = src_duration
else:
# Append to existing destination file
# Offset is the current duration of this destination file
current_dst_duration = dst_file_durations.get(dst_key, 0)
videos_idx[key]["src_to_offset"][(src_chunk_idx, src_file_idx)] = current_dst_duration
videos_idx[key]["src_to_dst"][(src_chunk_idx, src_file_idx)] = dst_key
concatenate_video_files(
[dst_path, src_path],
dst_path,
)
# Update duration of this destination file
dst_file_durations[dst_key] = current_dst_duration + src_duration
videos_idx[key]["episode_duration"] += src_duration
videos_idx[key]["chunk"] = chunk_idx
videos_idx[key]["file"] = file_idx
return videos_idx
def aggregate_data(src_meta, dst_meta, data_idx, data_files_size_in_mb, chunk_size):
"""Aggregates data chunks from a source dataset into the destination dataset.
Reads source data files, updates indices to match the aggregated dataset,
and writes them to the destination with proper file rotation.
Tracks a `src_to_dst` mapping from source (chunk, file) to destination (chunk, file)
which is critical for correctly updating episode metadata when source datasets
have multiple data files (e.g., from a previous merge operation).
Args:
src_meta: Source dataset metadata.
dst_meta: Destination dataset metadata.
data_idx: Dictionary tracking data chunk and file indices.
data_files_size_in_mb: Maximum size for data files in MB.
chunk_size: Maximum number of files per chunk.
Returns:
dict: Updated data_idx with current chunk and file indices.
"""
unique_chunk_file_ids = {
(c, f)
for c, f in zip(
src_meta.episodes["data/chunk_index"], src_meta.episodes["data/file_index"], strict=False
)
}
unique_chunk_file_ids = sorted(unique_chunk_file_ids)
contains_images = len(dst_meta.image_keys) > 0
# retrieve features schema for proper image typing in parquet
hf_features = get_hf_features_from_features(dst_meta.features) if contains_images else None
# Track source to destination file mapping for metadata update
# This is critical for handling datasets that are already results of a merge
src_to_dst: dict[tuple[int, int], tuple[int, int]] = {}
for src_chunk_idx, src_file_idx in unique_chunk_file_ids:
src_path = src_meta.root / DEFAULT_DATA_PATH.format(
chunk_index=src_chunk_idx, file_index=src_file_idx
)
if contains_images:
# Use HuggingFace datasets to read source data to preserve image format
src_ds = datasets.Dataset.from_parquet(str(src_path))
df = src_ds.to_pandas()
else:
df = pd.read_parquet(src_path)
df = update_data_df(df, src_meta, dst_meta)
# Write data and get the actual destination file it was written to
# This avoids duplicating the rotation logic here
data_idx, (dst_chunk, dst_file) = append_or_create_parquet_file(
df,
src_path,
data_idx,
data_files_size_in_mb,
chunk_size,
DEFAULT_DATA_PATH,
contains_images=contains_images,
aggr_root=dst_meta.root,
hf_features=hf_features,
)
# Record the mapping from source to actual destination
src_to_dst[(src_chunk_idx, src_file_idx)] = (dst_chunk, dst_file)
# Add the mapping to data_idx for use in metadata update
data_idx["src_to_dst"] = src_to_dst
return data_idx
def aggregate_metadata(src_meta, dst_meta, meta_idx, data_idx, videos_idx):
"""Aggregates metadata from a source dataset into the destination dataset.
Reads source metadata files, updates all indices and timestamps,
and writes them to the destination with proper file rotation.
Args:
src_meta: Source dataset metadata.
dst_meta: Destination dataset metadata.
meta_idx: Dictionary tracking metadata chunk and file indices.
data_idx: Dictionary tracking data chunk and file indices.
videos_idx: Dictionary tracking video indices and timestamps.
Returns:
dict: Updated meta_idx with current chunk and file indices.
"""
chunk_file_ids = {
(c, f)
for c, f in zip(
src_meta.episodes["meta/episodes/chunk_index"],
src_meta.episodes["meta/episodes/file_index"],
strict=False,
)
}
chunk_file_ids = sorted(chunk_file_ids)
for chunk_idx, file_idx in chunk_file_ids:
src_path = src_meta.root / DEFAULT_EPISODES_PATH.format(chunk_index=chunk_idx, file_index=file_idx)
df = pd.read_parquet(src_path)
df = update_meta_data(
df,
dst_meta,
meta_idx,
data_idx,
videos_idx,
)
meta_idx, _ = append_or_create_parquet_file(
df,
src_path,
meta_idx,
DEFAULT_DATA_FILE_SIZE_IN_MB,
DEFAULT_CHUNK_SIZE,
DEFAULT_EPISODES_PATH,
contains_images=False,
aggr_root=dst_meta.root,
)
# Increment latest_duration by the total duration added from this source dataset
for k in videos_idx:
videos_idx[k]["latest_duration"] += videos_idx[k]["episode_duration"]
return meta_idx
def append_or_create_parquet_file(
df: pd.DataFrame,
src_path: Path,
idx: dict[str, int],
max_mb: float,
chunk_size: int,
default_path: str,
contains_images: bool = False,
aggr_root: Path = None,
hf_features: datasets.Features | None = None,
) -> tuple[dict[str, int], tuple[int, int]]:
"""Appends data to an existing parquet file or creates a new one based on size constraints.
Manages file rotation when size limits are exceeded to prevent individual files
from becoming too large. Handles both regular parquet files and those containing images.
Args:
df: DataFrame to write to the parquet file.
src_path: Path to the source file (used for size estimation).
idx: Dictionary containing current 'chunk' and 'file' indices.
max_mb: Maximum allowed file size in MB before rotation.
chunk_size: Maximum number of files per chunk before incrementing chunk index.
default_path: Format string for generating file paths.
contains_images: Whether the data contains images requiring special handling.
aggr_root: Root path for the aggregated dataset.
hf_features: Optional HuggingFace Features schema for proper image typing.
Returns:
tuple: (updated_idx, (dst_chunk, dst_file)) where updated_idx is the index dict
and (dst_chunk, dst_file) is the actual destination file the data was written to.
"""
dst_chunk, dst_file = idx["chunk"], idx["file"]
dst_path = aggr_root / default_path.format(chunk_index=dst_chunk, file_index=dst_file)
if not dst_path.exists():
dst_path.parent.mkdir(parents=True, exist_ok=True)
if contains_images:
to_parquet_with_hf_images(df, dst_path, features=hf_features)
else:
df.to_parquet(dst_path)
return idx, (dst_chunk, dst_file)
src_size = get_parquet_file_size_in_mb(src_path)
dst_size = get_parquet_file_size_in_mb(dst_path)
if dst_size + src_size >= max_mb:
idx["chunk"], idx["file"] = update_chunk_file_indices(idx["chunk"], idx["file"], chunk_size)
dst_chunk, dst_file = idx["chunk"], idx["file"]
new_path = aggr_root / default_path.format(chunk_index=dst_chunk, file_index=dst_file)
new_path.parent.mkdir(parents=True, exist_ok=True)
final_df = df
target_path = new_path
else:
if contains_images:
# Use HuggingFace datasets to read existing data to preserve image format
existing_ds = datasets.Dataset.from_parquet(str(dst_path))
existing_df = existing_ds.to_pandas()
else:
existing_df = pd.read_parquet(dst_path)
final_df = pd.concat([existing_df, df], ignore_index=True)
target_path = dst_path
if contains_images:
to_parquet_with_hf_images(final_df, target_path, features=hf_features)
else:
final_df.to_parquet(target_path)
return idx, (dst_chunk, dst_file)
def finalize_aggregation(aggr_meta, all_metadata):
"""Finalizes the dataset aggregation by writing summary files and statistics.
Writes the tasks file, info file with total counts and splits, and
aggregated statistics from all source datasets.
Args:
aggr_meta: Aggregated dataset metadata.
all_metadata: List of all source dataset metadata objects.
"""
logging.info("write tasks")
write_tasks(aggr_meta.tasks, aggr_meta.root)
logging.info("write info")
aggr_meta.info.update(
{
"total_tasks": len(aggr_meta.tasks),
"total_episodes": sum(m.total_episodes for m in all_metadata),
"total_frames": sum(m.total_frames for m in all_metadata),
"splits": {"train": f"0:{sum(m.total_episodes for m in all_metadata)}"},
}
)
write_info(aggr_meta.info, aggr_meta.root)
logging.info("write stats")
aggr_meta.stats = aggregate_stats([m.stats for m in all_metadata])
write_stats(aggr_meta.stats, aggr_meta.root)
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/datasets/aggregate.py",
"license": "Apache License 2.0",
"lines": 542,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 2.1 to
3.0. It will:
- Generate per-episodes stats and writes them in `episodes_stats.jsonl`
- Check consistency between these new stats and the old ones.
- Remove the deprecated `stats.json`.
- Update codebase_version in `info.json`.
- Push this new version to the hub on the 'main' branch and tags it with "v3.0".
Usage:
Convert a dataset from the hub:
```bash
python src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py \
--repo-id=lerobot/pusht
```
Convert a local dataset (works in place):
```bash
python src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py \
--repo-id=lerobot/pusht \
--root=/path/to/local/dataset/directory \
--push-to-hub=false
N.B. Path semantics (v2): --root is the exact dataset folder containing
meta/, data/, videos/. When omitted, defaults to $HF_LEROBOT_HOME/{repo_id}.
```
"""
import argparse
import logging
import shutil
from pathlib import Path
from typing import Any
import jsonlines
import pandas as pd
import pyarrow as pa
import tqdm
from datasets import Dataset, Features, Image
from huggingface_hub import HfApi, snapshot_download
from requests import HTTPError
from lerobot.datasets.compute_stats import aggregate_stats
from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
from lerobot.datasets.utils import (
DEFAULT_CHUNK_SIZE,
DEFAULT_DATA_FILE_SIZE_IN_MB,
DEFAULT_DATA_PATH,
DEFAULT_VIDEO_FILE_SIZE_IN_MB,
DEFAULT_VIDEO_PATH,
LEGACY_EPISODES_PATH,
LEGACY_EPISODES_STATS_PATH,
LEGACY_TASKS_PATH,
cast_stats_to_numpy,
flatten_dict,
get_file_size_in_mb,
get_parquet_file_size_in_mb,
get_parquet_num_frames,
load_info,
update_chunk_file_indices,
write_episodes,
write_info,
write_stats,
write_tasks,
)
from lerobot.datasets.video_utils import concatenate_video_files, get_video_duration_in_s
from lerobot.utils.constants import HF_LEROBOT_HOME
from lerobot.utils.utils import init_logging
V21 = "v2.1"
V30 = "v3.0"
"""
-------------------------
OLD
data/chunk-000/episode_000000.parquet
NEW
data/chunk-000/file_000.parquet
-------------------------
OLD
videos/chunk-000/CAMERA/episode_000000.mp4
NEW
videos/CAMERA/chunk-000/file_000.mp4
-------------------------
OLD
episodes.jsonl
{"episode_index": 1, "tasks": ["Put the blue block in the green bowl"], "length": 266}
NEW
meta/episodes/chunk-000/file_000.parquet
episode_index | video_chunk_index | video_file_index | data_chunk_index | data_file_index | tasks | length
-------------------------
OLD
tasks.jsonl
{"task_index": 1, "task": "Put the blue block in the green bowl"}
NEW
meta/tasks.parquet
task_index | task
-------------------------
OLD
episodes_stats.jsonl
{"episode_index": 1, "stats": {"feature_name": {"min": ..., "max": ..., "mean": ..., "std": ..., "count": ...}}}
NEW
meta/episodes/chunk-000/file_000.parquet
episode_index | feature_name/min | feature_name/max | feature_name/mean | feature_name/std | feature_name/count
-------------------------
UPDATE
meta/info.json
-------------------------
"""
def load_jsonlines(fpath: Path) -> list[Any]:
with jsonlines.open(fpath, "r") as reader:
return list(reader)
def legacy_load_episodes(local_dir: Path) -> dict:
episodes = load_jsonlines(local_dir / LEGACY_EPISODES_PATH)
return {item["episode_index"]: item for item in sorted(episodes, key=lambda x: x["episode_index"])}
def legacy_load_episodes_stats(local_dir: Path) -> dict:
episodes_stats = load_jsonlines(local_dir / LEGACY_EPISODES_STATS_PATH)
return {
item["episode_index"]: cast_stats_to_numpy(item["stats"])
for item in sorted(episodes_stats, key=lambda x: x["episode_index"])
}
def legacy_load_tasks(local_dir: Path) -> tuple[dict, dict]:
tasks = load_jsonlines(local_dir / LEGACY_TASKS_PATH)
tasks = {item["task_index"]: item["task"] for item in sorted(tasks, key=lambda x: x["task_index"])}
task_to_task_index = {task: task_index for task_index, task in tasks.items()}
return tasks, task_to_task_index
def validate_local_dataset_version(local_path: Path) -> None:
"""Validate that the local dataset has the expected v2.1 version."""
info = load_info(local_path)
dataset_version = info.get("codebase_version", "unknown")
if dataset_version != V21:
raise ValueError(
f"Local dataset has codebase version '{dataset_version}', expected '{V21}'. "
f"This script is specifically for converting v2.1 datasets to v3.0."
)
def convert_tasks(root, new_root):
logging.info(f"Converting tasks from {root} to {new_root}")
tasks, _ = legacy_load_tasks(root)
task_indices = tasks.keys()
task_strings = tasks.values()
df_tasks = pd.DataFrame({"task_index": task_indices}, index=pd.Index(task_strings, name="task"))
write_tasks(df_tasks, new_root)
def concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys):
# TODO(rcadene): to save RAM use Dataset.from_parquet(file) and concatenate_datasets
dataframes = [pd.read_parquet(file) for file in paths_to_cat]
# Concatenate all DataFrames along rows
concatenated_df = pd.concat(dataframes, ignore_index=True)
path = new_root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx)
path.parent.mkdir(parents=True, exist_ok=True)
if len(image_keys) > 0:
schema = pa.Schema.from_pandas(concatenated_df)
features = Features.from_arrow_schema(schema)
for key in image_keys:
features[key] = Image()
schema = features.arrow_schema
else:
schema = None
concatenated_df.to_parquet(path, index=False, schema=schema)
def convert_data(root: Path, new_root: Path, data_file_size_in_mb: int):
data_dir = root / "data"
ep_paths = sorted(data_dir.glob("*/*.parquet"))
image_keys = get_image_keys(root)
chunk_idx = 0
file_idx = 0
size_in_mb = 0
num_frames = 0
paths_to_cat = []
episodes_metadata = []
logging.info(f"Converting data files from {len(ep_paths)} episodes")
for ep_idx, ep_path in enumerate(tqdm.tqdm(ep_paths, desc="convert data files")):
ep_size_in_mb = get_parquet_file_size_in_mb(ep_path)
ep_num_frames = get_parquet_num_frames(ep_path)
# Check if we need to start a new file BEFORE creating metadata
if size_in_mb + ep_size_in_mb >= data_file_size_in_mb and len(paths_to_cat) > 0:
# Write the accumulated data files
concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys)
# Move to next file
chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE)
# Reset for the next file
size_in_mb = 0
paths_to_cat = []
# Now create metadata with correct chunk/file indices
ep_metadata = {
"episode_index": ep_idx,
"data/chunk_index": chunk_idx,
"data/file_index": file_idx,
"dataset_from_index": num_frames,
"dataset_to_index": num_frames + ep_num_frames,
}
size_in_mb += ep_size_in_mb
num_frames += ep_num_frames
episodes_metadata.append(ep_metadata)
paths_to_cat.append(ep_path)
# Write remaining data if any
if paths_to_cat:
concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys)
return episodes_metadata
def get_video_keys(root):
info = load_info(root)
features = info["features"]
video_keys = [key for key, ft in features.items() if ft["dtype"] == "video"]
return video_keys
def get_image_keys(root):
info = load_info(root)
features = info["features"]
image_keys = [key for key, ft in features.items() if ft["dtype"] == "image"]
return image_keys
def convert_videos(root: Path, new_root: Path, video_file_size_in_mb: int):
logging.info(f"Converting videos from {root} to {new_root}")
video_keys = get_video_keys(root)
if len(video_keys) == 0:
return None
video_keys = sorted(video_keys)
eps_metadata_per_cam = []
for camera in video_keys:
eps_metadata = convert_videos_of_camera(root, new_root, camera, video_file_size_in_mb)
eps_metadata_per_cam.append(eps_metadata)
num_eps_per_cam = [len(eps_cam_map) for eps_cam_map in eps_metadata_per_cam]
if len(set(num_eps_per_cam)) != 1:
raise ValueError(f"All cams dont have same number of episodes ({num_eps_per_cam}).")
episods_metadata = []
num_cameras = len(video_keys)
num_episodes = num_eps_per_cam[0]
for ep_idx in tqdm.tqdm(range(num_episodes), desc="convert videos"):
# Sanity check
ep_ids = [eps_metadata_per_cam[cam_idx][ep_idx]["episode_index"] for cam_idx in range(num_cameras)]
ep_ids += [ep_idx]
if len(set(ep_ids)) != 1:
raise ValueError(f"All episode indices need to match ({ep_ids}).")
ep_dict = {}
for cam_idx in range(num_cameras):
ep_dict.update(eps_metadata_per_cam[cam_idx][ep_idx])
episods_metadata.append(ep_dict)
return episods_metadata
def convert_videos_of_camera(root: Path, new_root: Path, video_key: str, video_file_size_in_mb: int):
# Access old paths to mp4
videos_dir = root / "videos"
ep_paths = sorted(videos_dir.glob(f"*/{video_key}/*.mp4"))
ep_idx = 0
chunk_idx = 0
file_idx = 0
size_in_mb = 0
duration_in_s = 0.0
paths_to_cat = []
episodes_metadata = []
for ep_path in tqdm.tqdm(ep_paths, desc=f"convert videos of {video_key}"):
ep_size_in_mb = get_file_size_in_mb(ep_path)
ep_duration_in_s = get_video_duration_in_s(ep_path)
# Check if adding this episode would exceed the limit
if size_in_mb + ep_size_in_mb >= video_file_size_in_mb and len(paths_to_cat) > 0:
# Size limit would be exceeded, save current accumulation WITHOUT this episode
concatenate_video_files(
paths_to_cat,
new_root
/ DEFAULT_VIDEO_PATH.format(video_key=video_key, chunk_index=chunk_idx, file_index=file_idx),
)
# Update episodes metadata for the file we just saved
for i, _ in enumerate(paths_to_cat):
past_ep_idx = ep_idx - len(paths_to_cat) + i
episodes_metadata[past_ep_idx][f"videos/{video_key}/chunk_index"] = chunk_idx
episodes_metadata[past_ep_idx][f"videos/{video_key}/file_index"] = file_idx
# Move to next file and start fresh with current episode
chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE)
size_in_mb = 0
duration_in_s = 0.0
paths_to_cat = []
# Add current episode metadata
ep_metadata = {
"episode_index": ep_idx,
f"videos/{video_key}/chunk_index": chunk_idx, # Will be updated when file is saved
f"videos/{video_key}/file_index": file_idx, # Will be updated when file is saved
f"videos/{video_key}/from_timestamp": duration_in_s,
f"videos/{video_key}/to_timestamp": duration_in_s + ep_duration_in_s,
}
episodes_metadata.append(ep_metadata)
# Add current episode to accumulation
paths_to_cat.append(ep_path)
size_in_mb += ep_size_in_mb
duration_in_s += ep_duration_in_s
ep_idx += 1
# Write remaining videos if any
if paths_to_cat:
concatenate_video_files(
paths_to_cat,
new_root
/ DEFAULT_VIDEO_PATH.format(video_key=video_key, chunk_index=chunk_idx, file_index=file_idx),
)
# Update episodes metadata for the final file
for i, _ in enumerate(paths_to_cat):
past_ep_idx = ep_idx - len(paths_to_cat) + i
episodes_metadata[past_ep_idx][f"videos/{video_key}/chunk_index"] = chunk_idx
episodes_metadata[past_ep_idx][f"videos/{video_key}/file_index"] = file_idx
return episodes_metadata
def generate_episode_metadata_dict(
episodes_legacy_metadata, episodes_metadata, episodes_stats, episodes_videos=None
):
num_episodes = len(episodes_metadata)
episodes_legacy_metadata_vals = list(episodes_legacy_metadata.values())
episodes_stats_vals = list(episodes_stats.values())
episodes_stats_keys = list(episodes_stats.keys())
for i in range(num_episodes):
ep_legacy_metadata = episodes_legacy_metadata_vals[i]
ep_metadata = episodes_metadata[i]
ep_stats = episodes_stats_vals[i]
ep_ids_set = {
ep_legacy_metadata["episode_index"],
ep_metadata["episode_index"],
episodes_stats_keys[i],
}
if episodes_videos is None:
ep_video = {}
else:
ep_video = episodes_videos[i]
ep_ids_set.add(ep_video["episode_index"])
if len(ep_ids_set) != 1:
raise ValueError(f"Number of episodes is not the same ({ep_ids_set}).")
ep_dict = {**ep_metadata, **ep_video, **ep_legacy_metadata, **flatten_dict({"stats": ep_stats})}
ep_dict["meta/episodes/chunk_index"] = 0
ep_dict["meta/episodes/file_index"] = 0
yield ep_dict
def convert_episodes_metadata(root, new_root, episodes_metadata, episodes_video_metadata=None):
logging.info(f"Converting episodes metadata from {root} to {new_root}")
episodes_legacy_metadata = legacy_load_episodes(root)
episodes_stats = legacy_load_episodes_stats(root)
num_eps_set = {len(episodes_legacy_metadata), len(episodes_metadata)}
if episodes_video_metadata is not None:
num_eps_set.add(len(episodes_video_metadata))
if len(num_eps_set) != 1:
raise ValueError(f"Number of episodes is not the same ({num_eps_set}).")
ds_episodes = Dataset.from_generator(
lambda: generate_episode_metadata_dict(
episodes_legacy_metadata, episodes_metadata, episodes_stats, episodes_video_metadata
)
)
write_episodes(ds_episodes, new_root)
stats = aggregate_stats(list(episodes_stats.values()))
write_stats(stats, new_root)
def convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb):
info = load_info(root)
info["codebase_version"] = V30
del info["total_chunks"]
del info["total_videos"]
info["data_files_size_in_mb"] = data_file_size_in_mb
info["video_files_size_in_mb"] = video_file_size_in_mb
info["data_path"] = DEFAULT_DATA_PATH
info["video_path"] = DEFAULT_VIDEO_PATH if info["video_path"] is not None else None
info["fps"] = int(info["fps"])
logging.info(f"Converting info from {root} to {new_root}")
for key in info["features"]:
if info["features"][key]["dtype"] == "video":
# already has fps in video_info
continue
info["features"][key]["fps"] = info["fps"]
write_info(info, new_root)
def convert_dataset(
repo_id: str,
branch: str | None = None,
data_file_size_in_mb: int | None = None,
video_file_size_in_mb: int | None = None,
root: str | Path | None = None,
push_to_hub: bool = True,
force_conversion: bool = False,
):
if data_file_size_in_mb is None:
data_file_size_in_mb = DEFAULT_DATA_FILE_SIZE_IN_MB
if video_file_size_in_mb is None:
video_file_size_in_mb = DEFAULT_VIDEO_FILE_SIZE_IN_MB
# First check if the dataset already has a v3.0 version
if root is None and not force_conversion:
try:
print("Trying to download v3.0 version of the dataset from the hub...")
snapshot_download(repo_id, repo_type="dataset", revision=V30, local_dir=HF_LEROBOT_HOME / repo_id)
return
except Exception:
print("Dataset does not have an uploaded v3.0 version. Continuing with conversion.")
# Set root based on whether local dataset path is provided
use_local_dataset = False
root = HF_LEROBOT_HOME / repo_id if root is None else Path(root)
if root.exists():
validate_local_dataset_version(root)
use_local_dataset = True
print(f"Using local dataset at {root}")
old_root = root.parent / f"{root.name}_old"
new_root = root.parent / f"{root.name}_v30"
# Handle old_root cleanup if both old_root and root exist
if old_root.is_dir() and root.is_dir():
shutil.rmtree(str(root))
shutil.move(str(old_root), str(root))
if new_root.is_dir():
shutil.rmtree(new_root)
if not use_local_dataset:
snapshot_download(
repo_id,
repo_type="dataset",
revision=V21,
local_dir=root,
)
convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb)
convert_tasks(root, new_root)
episodes_metadata = convert_data(root, new_root, data_file_size_in_mb)
episodes_videos_metadata = convert_videos(root, new_root, video_file_size_in_mb)
convert_episodes_metadata(root, new_root, episodes_metadata, episodes_videos_metadata)
shutil.move(str(root), str(old_root))
shutil.move(str(new_root), str(root))
if push_to_hub:
hub_api = HfApi()
try:
hub_api.delete_tag(repo_id, tag=CODEBASE_VERSION, repo_type="dataset")
except HTTPError as e:
print(f"tag={CODEBASE_VERSION} probably doesn't exist. Skipping exception ({e})")
pass
hub_api.delete_files(
delete_patterns=["data/chunk*/episode_*", "meta/*.jsonl", "videos/chunk*"],
repo_id=repo_id,
revision=branch,
repo_type="dataset",
)
hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
LeRobotDataset(repo_id).push_to_hub()
if __name__ == "__main__":
init_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
"--repo-id",
type=str,
required=True,
help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset "
"(e.g. `lerobot/pusht`, `<USER>/aloha_sim_insertion_human`).",
)
parser.add_argument(
"--branch",
type=str,
default=None,
help="Repo branch to push your dataset. Defaults to the main branch.",
)
parser.add_argument(
"--data-file-size-in-mb",
type=int,
default=None,
help="File size in MB. Defaults to 100 for data and 500 for videos.",
)
parser.add_argument(
"--video-file-size-in-mb",
type=int,
default=None,
help="File size in MB. Defaults to 100 for data and 500 for videos.",
)
parser.add_argument(
"--root",
type=str,
default=None,
help="Local directory to use for downloading/writing the dataset. Defaults to $HF_LEROBOT_HOME/repo_id.",
)
parser.add_argument(
"--push-to-hub",
type=lambda input: input.lower() == "true",
default=True,
help="Push the converted dataset to the hub.",
)
parser.add_argument(
"--force-conversion",
action="store_true",
help="Force conversion even if the dataset already has a v3.0 version.",
)
args = parser.parse_args()
convert_dataset(**vars(args))
| {
"repo_id": "huggingface/lerobot",
"file_path": "src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py",
"license": "Apache License 2.0",
"lines": 472,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/lerobot:tests/datasets/test_aggregate.py | #!/usr/bin/env python
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import datasets
import torch
from lerobot.datasets.aggregate import aggregate_datasets
from lerobot.datasets.lerobot_dataset import LeRobotDataset
from tests.fixtures.constants import DUMMY_REPO_ID
def assert_episode_and_frame_counts(aggr_ds, expected_episodes, expected_frames):
"""Test that total number of episodes and frames are correctly aggregated."""
assert aggr_ds.num_episodes == expected_episodes, (
f"Expected {expected_episodes} episodes, got {aggr_ds.num_episodes}"
)
assert aggr_ds.num_frames == expected_frames, (
f"Expected {expected_frames} frames, got {aggr_ds.num_frames}"
)
def assert_dataset_content_integrity(aggr_ds, ds_0, ds_1):
"""Test that the content of both datasets is preserved correctly in the aggregated dataset."""
keys_to_ignore = ["episode_index", "index", "timestamp"]
# Test first part of dataset corresponds to ds_0, check first item (index 0) matches ds_0[0]
aggr_first_item = aggr_ds[0]
ds_0_first_item = ds_0[0]
# Compare all keys except episode_index and index which should be updated
for key in ds_0_first_item:
if key not in keys_to_ignore:
# Handle both tensor and non-tensor data
if torch.is_tensor(aggr_first_item[key]) and torch.is_tensor(ds_0_first_item[key]):
assert torch.allclose(aggr_first_item[key], ds_0_first_item[key], atol=1e-6), (
f"First item key '{key}' doesn't match between aggregated and ds_0"
)
else:
assert aggr_first_item[key] == ds_0_first_item[key], (
f"First item key '{key}' doesn't match between aggregated and ds_0"
)
# Check last item of ds_0 part (index len(ds_0)-1) matches ds_0[-1]
aggr_ds_0_last_item = aggr_ds[len(ds_0) - 1]
ds_0_last_item = ds_0[-1]
for key in ds_0_last_item:
if key not in keys_to_ignore:
# Handle both tensor and non-tensor data
if torch.is_tensor(aggr_ds_0_last_item[key]) and torch.is_tensor(ds_0_last_item[key]):
assert torch.allclose(aggr_ds_0_last_item[key], ds_0_last_item[key], atol=1e-6), (
f"Last ds_0 item key '{key}' doesn't match between aggregated and ds_0"
)
else:
assert aggr_ds_0_last_item[key] == ds_0_last_item[key], (
f"Last ds_0 item key '{key}' doesn't match between aggregated and ds_0"
)
# Test second part of dataset corresponds to ds_1
# Check first item of ds_1 part (index len(ds_0)) matches ds_1[0]
aggr_ds_1_first_item = aggr_ds[len(ds_0)]
ds_1_first_item = ds_1[0]
for key in ds_1_first_item:
if key not in keys_to_ignore:
# Handle both tensor and non-tensor data
if torch.is_tensor(aggr_ds_1_first_item[key]) and torch.is_tensor(ds_1_first_item[key]):
assert torch.allclose(aggr_ds_1_first_item[key], ds_1_first_item[key], atol=1e-6), (
f"First ds_1 item key '{key}' doesn't match between aggregated and ds_1"
)
else:
assert aggr_ds_1_first_item[key] == ds_1_first_item[key], (
f"First ds_1 item key '{key}' doesn't match between aggregated and ds_1"
)
# Check last item matches ds_1[-1]
aggr_last_item = aggr_ds[-1]
ds_1_last_item = ds_1[-1]
for key in ds_1_last_item:
if key not in keys_to_ignore:
# Handle both tensor and non-tensor data
if torch.is_tensor(aggr_last_item[key]) and torch.is_tensor(ds_1_last_item[key]):
assert torch.allclose(aggr_last_item[key], ds_1_last_item[key], atol=1e-6), (
f"Last item key '{key}' doesn't match between aggregated and ds_1"
)
else:
assert aggr_last_item[key] == ds_1_last_item[key], (
f"Last item key '{key}' doesn't match between aggregated and ds_1"
)
def assert_metadata_consistency(aggr_ds, ds_0, ds_1):
"""Test that metadata is correctly aggregated."""
# Test basic info
assert aggr_ds.fps == ds_0.fps == ds_1.fps, "FPS should be the same across all datasets"
assert aggr_ds.meta.info["robot_type"] == ds_0.meta.info["robot_type"] == ds_1.meta.info["robot_type"], (
"Robot type should be the same"
)
# Test features are the same
assert aggr_ds.features == ds_0.features == ds_1.features, "Features should be the same"
# Test tasks aggregation
expected_tasks = set(ds_0.meta.tasks.index) | set(ds_1.meta.tasks.index)
actual_tasks = set(aggr_ds.meta.tasks.index)
assert actual_tasks == expected_tasks, f"Expected tasks {expected_tasks}, got {actual_tasks}"
def assert_episode_indices_updated_correctly(aggr_ds, ds_0, ds_1):
"""Test that episode indices are correctly updated after aggregation."""
# ds_0 episodes should have episode_index 0 to ds_0.num_episodes-1
for i in range(len(ds_0)):
assert aggr_ds[i]["episode_index"] < ds_0.num_episodes, (
f"Episode index {aggr_ds[i]['episode_index']} at position {i} should be < {ds_0.num_episodes}"
)
def ds1_episodes_condition(ep_idx):
return (ep_idx >= ds_0.num_episodes) and (ep_idx < ds_0.num_episodes + ds_1.num_episodes)
# ds_1 episodes should have episode_index ds_0.num_episodes to total_episodes-1
for i in range(len(ds_0), len(ds_0) + len(ds_1)):
expected_min_episode_idx = ds_0.num_episodes
assert ds1_episodes_condition(aggr_ds[i]["episode_index"]), (
f"Episode index {aggr_ds[i]['episode_index']} at position {i} should be >= {expected_min_episode_idx}"
)
def assert_video_frames_integrity(aggr_ds, ds_0, ds_1):
"""Test that video frames are correctly preserved and frame indices are updated."""
def visual_frames_equal(frame1, frame2):
return torch.allclose(frame1, frame2)
video_keys = list(
filter(
lambda key: aggr_ds.meta.info["features"][key]["dtype"] == "video",
aggr_ds.meta.info["features"].keys(),
)
)
# Test the section corresponding to the first dataset (ds_0)
for i in range(len(ds_0)):
assert aggr_ds[i]["index"] == i, (
f"Frame index at position {i} should be {i}, but got {aggr_ds[i]['index']}"
)
for key in video_keys:
assert visual_frames_equal(aggr_ds[i][key], ds_0[i][key]), (
f"Visual frames at position {i} should be equal between aggregated and ds_0"
)
# Test the section corresponding to the second dataset (ds_1)
for i in range(len(ds_0), len(ds_0) + len(ds_1)):
# The frame index in the aggregated dataset should also match its position.
assert aggr_ds[i]["index"] == i, (
f"Frame index at position {i} should be {i}, but got {aggr_ds[i]['index']}"
)
for key in video_keys:
assert visual_frames_equal(aggr_ds[i][key], ds_1[i - len(ds_0)][key]), (
f"Visual frames at position {i} should be equal between aggregated and ds_1"
)
def assert_dataset_iteration_works(aggr_ds):
"""Test that we can iterate through the entire dataset without errors."""
for _ in aggr_ds:
pass
def assert_video_timestamps_within_bounds(aggr_ds):
"""Test that all video timestamps are within valid bounds for their respective video files.
This catches bugs where timestamps point to frames beyond the actual video length,
which would cause "Invalid frame index" errors during data loading.
"""
try:
from torchcodec.decoders import VideoDecoder
except ImportError:
return
for ep_idx in range(aggr_ds.num_episodes):
ep = aggr_ds.meta.episodes[ep_idx]
for vid_key in aggr_ds.meta.video_keys:
from_ts = ep[f"videos/{vid_key}/from_timestamp"]
to_ts = ep[f"videos/{vid_key}/to_timestamp"]
video_path = aggr_ds.root / aggr_ds.meta.get_video_file_path(ep_idx, vid_key)
if not video_path.exists():
continue
from_frame_idx = round(from_ts * aggr_ds.fps)
to_frame_idx = round(to_ts * aggr_ds.fps)
try:
decoder = VideoDecoder(str(video_path))
num_frames = len(decoder)
# Verify timestamps don't exceed video bounds
assert from_frame_idx >= 0, (
f"Episode {ep_idx}, {vid_key}: from_frame_idx ({from_frame_idx}) < 0"
)
assert from_frame_idx < num_frames, (
f"Episode {ep_idx}, {vid_key}: from_frame_idx ({from_frame_idx}) >= video frames ({num_frames})"
)
assert to_frame_idx <= num_frames, (
f"Episode {ep_idx}, {vid_key}: to_frame_idx ({to_frame_idx}) > video frames ({num_frames})"
)
assert from_frame_idx < to_frame_idx, (
f"Episode {ep_idx}, {vid_key}: from_frame_idx ({from_frame_idx}) >= to_frame_idx ({to_frame_idx})"
)
except Exception as e:
raise AssertionError(
f"Failed to verify timestamps for episode {ep_idx}, {vid_key}: {e}"
) from e
def test_aggregate_datasets(tmp_path, lerobot_dataset_factory):
"""Test basic aggregation functionality with standard parameters."""
ds_0_num_frames = 400
ds_1_num_frames = 800
ds_0_num_episodes = 10
ds_1_num_episodes = 25
# Create two datasets with different number of frames and episodes
ds_0 = lerobot_dataset_factory(
root=tmp_path / "test_0",
repo_id=f"{DUMMY_REPO_ID}_0",
total_episodes=ds_0_num_episodes,
total_frames=ds_0_num_frames,
)
ds_1 = lerobot_dataset_factory(
root=tmp_path / "test_1",
repo_id=f"{DUMMY_REPO_ID}_1",
total_episodes=ds_1_num_episodes,
total_frames=ds_1_num_frames,
)
aggregate_datasets(
repo_ids=[ds_0.repo_id, ds_1.repo_id],
roots=[ds_0.root, ds_1.root],
aggr_repo_id=f"{DUMMY_REPO_ID}_aggr",
aggr_root=tmp_path / "test_aggr",
)
# Mock the revision to prevent Hub calls during dataset loading
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "test_aggr")
aggr_ds = LeRobotDataset(f"{DUMMY_REPO_ID}_aggr", root=tmp_path / "test_aggr")
# Run all assertion functions
expected_total_episodes = ds_0.num_episodes + ds_1.num_episodes
expected_total_frames = ds_0.num_frames + ds_1.num_frames
assert_episode_and_frame_counts(aggr_ds, expected_total_episodes, expected_total_frames)
assert_dataset_content_integrity(aggr_ds, ds_0, ds_1)
assert_metadata_consistency(aggr_ds, ds_0, ds_1)
assert_episode_indices_updated_correctly(aggr_ds, ds_0, ds_1)
assert_video_frames_integrity(aggr_ds, ds_0, ds_1)
assert_video_timestamps_within_bounds(aggr_ds)
assert_dataset_iteration_works(aggr_ds)
def test_aggregate_with_low_threshold(tmp_path, lerobot_dataset_factory):
"""Test aggregation with small file size limits to force file rotation/sharding."""
ds_0_num_episodes = ds_1_num_episodes = 10
ds_0_num_frames = ds_1_num_frames = 400
ds_0 = lerobot_dataset_factory(
root=tmp_path / "small_0",
repo_id=f"{DUMMY_REPO_ID}_small_0",
total_episodes=ds_0_num_episodes,
total_frames=ds_0_num_frames,
)
ds_1 = lerobot_dataset_factory(
root=tmp_path / "small_1",
repo_id=f"{DUMMY_REPO_ID}_small_1",
total_episodes=ds_1_num_episodes,
total_frames=ds_1_num_frames,
)
# Use the new configurable parameters to force file rotation
aggregate_datasets(
repo_ids=[ds_0.repo_id, ds_1.repo_id],
roots=[ds_0.root, ds_1.root],
aggr_repo_id=f"{DUMMY_REPO_ID}_small_aggr",
aggr_root=tmp_path / "small_aggr",
# Tiny file size to trigger new file instantiation
data_files_size_in_mb=0.01,
video_files_size_in_mb=0.1,
)
# Mock the revision to prevent Hub calls during dataset loading
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "small_aggr")
aggr_ds = LeRobotDataset(f"{DUMMY_REPO_ID}_small_aggr", root=tmp_path / "small_aggr")
# Verify aggregation worked correctly despite file size constraints
expected_total_episodes = ds_0_num_episodes + ds_1_num_episodes
expected_total_frames = ds_0_num_frames + ds_1_num_frames
assert_episode_and_frame_counts(aggr_ds, expected_total_episodes, expected_total_frames)
assert_dataset_content_integrity(aggr_ds, ds_0, ds_1)
assert_metadata_consistency(aggr_ds, ds_0, ds_1)
assert_episode_indices_updated_correctly(aggr_ds, ds_0, ds_1)
assert_video_frames_integrity(aggr_ds, ds_0, ds_1)
assert_video_timestamps_within_bounds(aggr_ds)
assert_dataset_iteration_works(aggr_ds)
# Check that multiple files were actually created due to small size limits
data_dir = tmp_path / "small_aggr" / "data"
video_dir = tmp_path / "small_aggr" / "videos"
if data_dir.exists():
parquet_files = list(data_dir.rglob("*.parquet"))
assert len(parquet_files) > 1, "Small file size limits should create multiple parquet files"
if video_dir.exists():
video_files = list(video_dir.rglob("*.mp4"))
assert len(video_files) > 1, "Small file size limits should create multiple video files"
def test_video_timestamps_regression(tmp_path, lerobot_dataset_factory):
"""Regression test for video timestamp bug when merging datasets.
This test specifically checks that video timestamps are correctly calculated
and accumulated when merging multiple datasets.
"""
datasets = []
for i in range(3):
ds = lerobot_dataset_factory(
root=tmp_path / f"regression_{i}",
repo_id=f"{DUMMY_REPO_ID}_regression_{i}",
total_episodes=2,
total_frames=100,
)
datasets.append(ds)
aggregate_datasets(
repo_ids=[ds.repo_id for ds in datasets],
roots=[ds.root for ds in datasets],
aggr_repo_id=f"{DUMMY_REPO_ID}_regression_aggr",
aggr_root=tmp_path / "regression_aggr",
)
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "regression_aggr")
aggr_ds = LeRobotDataset(f"{DUMMY_REPO_ID}_regression_aggr", root=tmp_path / "regression_aggr")
assert_video_timestamps_within_bounds(aggr_ds)
for i in range(len(aggr_ds)):
item = aggr_ds[i]
for key in aggr_ds.meta.video_keys:
assert key in item, f"Video key {key} missing from item {i}"
assert item[key].shape[0] == 3, f"Expected 3 channels for video key {key}"
def assert_image_schema_preserved(aggr_ds):
"""Test that HuggingFace Image feature schema is preserved in aggregated parquet files.
This verifies the fix for a bug where image columns were written with a generic
struct schema {'bytes': Value('binary'), 'path': Value('string')} instead of
the proper Image() feature type, causing HuggingFace Hub viewer to display
raw dict objects instead of image thumbnails.
"""
image_keys = aggr_ds.meta.image_keys
if not image_keys:
return
# Check that parquet files have proper Image schema
data_dir = aggr_ds.root / "data"
parquet_files = list(data_dir.rglob("*.parquet"))
assert len(parquet_files) > 0, "No parquet files found in aggregated dataset"
for parquet_file in parquet_files:
# Load with HuggingFace datasets to check schema
ds = datasets.Dataset.from_parquet(str(parquet_file))
for image_key in image_keys:
feature = ds.features.get(image_key)
assert feature is not None, f"Image key '{image_key}' not found in parquet schema"
assert isinstance(feature, datasets.Image), (
f"Image key '{image_key}' should have Image() feature type, "
f"but got {type(feature).__name__}: {feature}. "
"This indicates image schema was not preserved during aggregation."
)
def assert_image_frames_integrity(aggr_ds, ds_0, ds_1):
"""Test that image frames are correctly preserved after aggregation."""
image_keys = aggr_ds.meta.image_keys
if not image_keys:
return
def images_equal(img1, img2):
return torch.allclose(img1, img2)
# Test the section corresponding to the first dataset (ds_0)
for i in range(len(ds_0)):
assert aggr_ds[i]["index"] == i, (
f"Frame index at position {i} should be {i}, but got {aggr_ds[i]['index']}"
)
for key in image_keys:
assert images_equal(aggr_ds[i][key], ds_0[i][key]), (
f"Image frames at position {i} should be equal between aggregated and ds_0"
)
# Test the section corresponding to the second dataset (ds_1)
for i in range(len(ds_0), len(ds_0) + len(ds_1)):
assert aggr_ds[i]["index"] == i, (
f"Frame index at position {i} should be {i}, but got {aggr_ds[i]['index']}"
)
for key in image_keys:
assert images_equal(aggr_ds[i][key], ds_1[i - len(ds_0)][key]), (
f"Image frames at position {i} should be equal between aggregated and ds_1"
)
def test_aggregate_image_datasets(tmp_path, lerobot_dataset_factory):
"""Test aggregation of image-based datasets preserves HuggingFace Image schema.
This test specifically verifies that:
1. Image-based datasets can be aggregated correctly
2. The HuggingFace Image() feature type is preserved in parquet files
3. Image data integrity is maintained across aggregation
4. Images can be properly decoded after aggregation
This catches the bug where to_parquet_with_hf_images() was not passing
the features schema, causing image columns to be written as generic
struct types instead of Image() types.
"""
ds_0_num_frames = 50
ds_1_num_frames = 75
ds_0_num_episodes = 2
ds_1_num_episodes = 3
# Create two image-based datasets (use_videos=False)
ds_0 = lerobot_dataset_factory(
root=tmp_path / "image_0",
repo_id=f"{DUMMY_REPO_ID}_image_0",
total_episodes=ds_0_num_episodes,
total_frames=ds_0_num_frames,
use_videos=False, # Image-based dataset
)
ds_1 = lerobot_dataset_factory(
root=tmp_path / "image_1",
repo_id=f"{DUMMY_REPO_ID}_image_1",
total_episodes=ds_1_num_episodes,
total_frames=ds_1_num_frames,
use_videos=False, # Image-based dataset
)
# Verify source datasets have image keys
assert len(ds_0.meta.image_keys) > 0, "ds_0 should have image keys"
assert len(ds_1.meta.image_keys) > 0, "ds_1 should have image keys"
# Aggregate the datasets
aggregate_datasets(
repo_ids=[ds_0.repo_id, ds_1.repo_id],
roots=[ds_0.root, ds_1.root],
aggr_repo_id=f"{DUMMY_REPO_ID}_image_aggr",
aggr_root=tmp_path / "image_aggr",
)
# Load the aggregated dataset
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "image_aggr")
aggr_ds = LeRobotDataset(f"{DUMMY_REPO_ID}_image_aggr", root=tmp_path / "image_aggr")
# Verify aggregated dataset has image keys
assert len(aggr_ds.meta.image_keys) > 0, "Aggregated dataset should have image keys"
assert aggr_ds.meta.image_keys == ds_0.meta.image_keys, "Image keys should match source datasets"
# Run standard aggregation assertions
expected_total_episodes = ds_0_num_episodes + ds_1_num_episodes
expected_total_frames = ds_0_num_frames + ds_1_num_frames
assert_episode_and_frame_counts(aggr_ds, expected_total_episodes, expected_total_frames)
assert_dataset_content_integrity(aggr_ds, ds_0, ds_1)
assert_metadata_consistency(aggr_ds, ds_0, ds_1)
assert_episode_indices_updated_correctly(aggr_ds, ds_0, ds_1)
# Image-specific assertions
assert_image_schema_preserved(aggr_ds)
assert_image_frames_integrity(aggr_ds, ds_0, ds_1)
# Verify images can be accessed and have correct shape
sample_item = aggr_ds[0]
for image_key in aggr_ds.meta.image_keys:
img = sample_item[image_key]
assert isinstance(img, torch.Tensor), f"Image {image_key} should be a tensor"
assert img.dim() == 3, f"Image {image_key} should have 3 dimensions (C, H, W)"
assert img.shape[0] == 3, f"Image {image_key} should have 3 channels"
assert_dataset_iteration_works(aggr_ds)
def test_aggregate_already_merged_dataset(tmp_path, lerobot_dataset_factory):
"""Regression test for aggregating a dataset that is itself a result of a previous merge.
This test reproduces the bug where merging datasets with multiple parquet files
(e.g., from a previous merge with file rotation) would cause FileNotFoundError
because metadata file indices were incorrectly preserved instead of being mapped
to their actual destination files.
The fix adds src_to_dst tracking in aggregate_data() to correctly map source
file indices to destination file indices.
"""
# Step 1: Create datasets A and B
ds_a = lerobot_dataset_factory(
root=tmp_path / "ds_a",
repo_id=f"{DUMMY_REPO_ID}_a",
total_episodes=4,
total_frames=200,
)
ds_b = lerobot_dataset_factory(
root=tmp_path / "ds_b",
repo_id=f"{DUMMY_REPO_ID}_b",
total_episodes=4,
total_frames=200,
)
# Step 2: Merge A+B into AB with small file size to force multiple files
aggregate_datasets(
repo_ids=[ds_a.repo_id, ds_b.repo_id],
roots=[ds_a.root, ds_b.root],
aggr_repo_id=f"{DUMMY_REPO_ID}_ab",
aggr_root=tmp_path / "ds_ab",
data_files_size_in_mb=0.01, # Force file rotation
)
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "ds_ab")
ds_ab = LeRobotDataset(f"{DUMMY_REPO_ID}_ab", root=tmp_path / "ds_ab")
# Verify AB has multiple data files (file rotation occurred)
ab_data_files = list((tmp_path / "ds_ab" / "data").rglob("*.parquet"))
assert len(ab_data_files) > 1, "First merge should create multiple parquet files"
# Step 3: Create dataset C
ds_c = lerobot_dataset_factory(
root=tmp_path / "ds_c",
repo_id=f"{DUMMY_REPO_ID}_c",
total_episodes=2,
total_frames=100,
)
# Step 4: Merge AB+C into final - THIS IS WHERE THE BUG OCCURRED
aggregate_datasets(
repo_ids=[ds_ab.repo_id, ds_c.repo_id],
roots=[ds_ab.root, ds_c.root],
aggr_repo_id=f"{DUMMY_REPO_ID}_abc",
aggr_root=tmp_path / "ds_abc",
)
with (
patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version,
patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download,
):
mock_get_safe_version.return_value = "v3.0"
mock_snapshot_download.return_value = str(tmp_path / "ds_abc")
ds_abc = LeRobotDataset(f"{DUMMY_REPO_ID}_abc", root=tmp_path / "ds_abc")
# Step 5: Verify all data files referenced in metadata actually exist
for ep_idx in range(ds_abc.num_episodes):
data_file_path = ds_abc.root / ds_abc.meta.get_data_file_path(ep_idx)
assert data_file_path.exists(), (
f"Episode {ep_idx} references non-existent file: {data_file_path}\n"
"This indicates the src_to_dst mapping fix is not working correctly."
)
# Step 6: Verify we can iterate through the entire dataset without FileNotFoundError
expected_episodes = ds_a.num_episodes + ds_b.num_episodes + ds_c.num_episodes
expected_frames = ds_a.num_frames + ds_b.num_frames + ds_c.num_frames
assert ds_abc.num_episodes == expected_episodes
assert ds_abc.num_frames == expected_frames
# This would raise FileNotFoundError before the fix
assert_dataset_iteration_works(ds_abc)
| {
"repo_id": "huggingface/lerobot",
"file_path": "tests/datasets/test_aggregate.py",
"license": "Apache License 2.0",
"lines": 512,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.