zero7101 commited on
Commit
1d24065
·
verified ·
1 Parent(s): 2f35783

Add files using upload-large-folder tool

Browse files
lerobot/src/lerobot/rl/joint_observations_processor.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass
18
+ from typing import Any
19
+
20
+ import torch
21
+
22
+ from lerobot.configs.types import PipelineFeatureType, PolicyFeature
23
+ from lerobot.processor.pipeline import (
24
+ ObservationProcessorStep,
25
+ ProcessorStepRegistry,
26
+ )
27
+ from lerobot.robots import Robot
28
+ from lerobot.utils.constants import OBS_STATE
29
+
30
+
31
+ @dataclass
32
+ @ProcessorStepRegistry.register("joint_velocity_processor")
33
+ class JointVelocityProcessorStep(ObservationProcessorStep):
34
+ """
35
+ Calculates and appends joint velocity information to the observation state.
36
+
37
+ This step computes the velocity of each joint by calculating the finite
38
+ difference between the current and the last observed joint positions. The
39
+ resulting velocity vector is then concatenated to the original state vector.
40
+
41
+ Attributes:
42
+ dt: The time step (delta time) in seconds between observations, used for
43
+ calculating velocity.
44
+ last_joint_positions: Stores the joint positions from the previous step
45
+ to enable velocity calculation.
46
+ """
47
+
48
+ dt: float = 0.1
49
+
50
+ last_joint_positions: torch.Tensor | None = None
51
+
52
+ def observation(self, observation: dict) -> dict:
53
+ """
54
+ Computes joint velocities and adds them to the observation state.
55
+
56
+ Args:
57
+ observation: The input observation dictionary, expected to contain
58
+ an `observation.state` key with joint positions.
59
+
60
+ Returns:
61
+ A new observation dictionary with the `observation.state` tensor
62
+ extended to include joint velocities.
63
+
64
+ Raises:
65
+ ValueError: If `observation.state` is not found in the observation.
66
+ """
67
+ # Get current joint positions (assuming they're in observation.state)
68
+ current_positions = observation.get(OBS_STATE)
69
+ if current_positions is None:
70
+ raise ValueError(f"{OBS_STATE} is not in observation")
71
+
72
+ # Initialize last joint positions if not already set
73
+ if self.last_joint_positions is None:
74
+ self.last_joint_positions = current_positions.clone()
75
+ joint_velocities = torch.zeros_like(current_positions)
76
+ else:
77
+ # Compute velocities
78
+ joint_velocities = (current_positions - self.last_joint_positions) / self.dt
79
+
80
+ self.last_joint_positions = current_positions.clone()
81
+
82
+ # Extend observation with velocities
83
+ extended_state = torch.cat([current_positions, joint_velocities], dim=-1)
84
+
85
+ # Create new observation dict
86
+ new_observation = dict(observation)
87
+ new_observation[OBS_STATE] = extended_state
88
+
89
+ return new_observation
90
+
91
+ def get_config(self) -> dict[str, Any]:
92
+ """
93
+ Returns the configuration of the step for serialization.
94
+
95
+ Returns:
96
+ A dictionary containing the time step `dt`.
97
+ """
98
+ return {
99
+ "dt": self.dt,
100
+ }
101
+
102
+ def reset(self) -> None:
103
+ """Resets the internal state, clearing the last known joint positions."""
104
+ self.last_joint_positions = None
105
+
106
+ def transform_features(
107
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
108
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
109
+ """
110
+ Updates the `observation.state` feature to reflect the added velocities.
111
+
112
+ This method doubles the size of the first dimension of the `observation.state`
113
+ shape to account for the concatenation of position and velocity vectors.
114
+
115
+ Args:
116
+ features: The policy features dictionary.
117
+
118
+ Returns:
119
+ The updated policy features dictionary.
120
+ """
121
+ if OBS_STATE in features[PipelineFeatureType.OBSERVATION]:
122
+ original_feature = features[PipelineFeatureType.OBSERVATION][OBS_STATE]
123
+ # Double the shape to account for positions + velocities
124
+ new_shape = (original_feature.shape[0] * 2,) + original_feature.shape[1:]
125
+
126
+ features[PipelineFeatureType.OBSERVATION][OBS_STATE] = PolicyFeature(
127
+ type=original_feature.type, shape=new_shape
128
+ )
129
+ return features
130
+
131
+
132
+ @dataclass
133
+ @ProcessorStepRegistry.register("current_processor")
134
+ class MotorCurrentProcessorStep(ObservationProcessorStep):
135
+ """
136
+ Reads motor currents from a robot and appends them to the observation state.
137
+
138
+ This step queries the robot's hardware interface to get the present current
139
+ for each motor and concatenates this information to the existing state vector.
140
+
141
+ Attributes:
142
+ robot: An instance of a `lerobot` Robot class that provides access to
143
+ the hardware bus.
144
+ """
145
+
146
+ robot: Robot | None = None
147
+
148
+ def observation(self, observation: dict) -> dict:
149
+ """
150
+ Fetches motor currents and adds them to the observation state.
151
+
152
+ Args:
153
+ observation: The input observation dictionary.
154
+
155
+ Returns:
156
+ A new observation dictionary with the `observation.state` tensor
157
+ extended to include motor currents.
158
+
159
+ Raises:
160
+ ValueError: If the `robot` attribute has not been set.
161
+ """
162
+ # Get current values from robot state
163
+ if self.robot is None:
164
+ raise ValueError("Robot is not set")
165
+
166
+ present_current_dict = self.robot.bus.sync_read("Present_Current") # type: ignore[attr-defined]
167
+ motor_currents = torch.tensor(
168
+ [present_current_dict[name] for name in self.robot.bus.motors], # type: ignore[attr-defined]
169
+ dtype=torch.float32,
170
+ ).unsqueeze(0)
171
+
172
+ current_state = observation.get(OBS_STATE)
173
+ if current_state is None:
174
+ return observation
175
+
176
+ extended_state = torch.cat([current_state, motor_currents], dim=-1)
177
+
178
+ # Create new observation dict
179
+ new_observation = dict(observation)
180
+ new_observation[OBS_STATE] = extended_state
181
+
182
+ return new_observation
183
+
184
+ def transform_features(
185
+ self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]
186
+ ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]:
187
+ """
188
+ Updates the `observation.state` feature to reflect the added motor currents.
189
+
190
+ This method increases the size of the first dimension of the `observation.state`
191
+ shape by the number of motors in the robot.
192
+
193
+ Args:
194
+ features: The policy features dictionary.
195
+
196
+ Returns:
197
+ The updated policy features dictionary.
198
+ """
199
+ if OBS_STATE in features[PipelineFeatureType.OBSERVATION] and self.robot is not None:
200
+ original_feature = features[PipelineFeatureType.OBSERVATION][OBS_STATE]
201
+ # Add motor current dimensions to the original state shape
202
+ num_motors = 0
203
+ if hasattr(self.robot, "bus") and hasattr(self.robot.bus, "motors"): # type: ignore[attr-defined]
204
+ num_motors = len(self.robot.bus.motors) # type: ignore[attr-defined]
205
+
206
+ if num_motors > 0:
207
+ new_shape = (original_feature.shape[0] + num_motors,) + original_feature.shape[1:]
208
+ features[PipelineFeatureType.OBSERVATION][OBS_STATE] = PolicyFeature(
209
+ type=original_feature.type, shape=new_shape
210
+ )
211
+ return features
lerobot/src/lerobot/rl/learner.py ADDED
@@ -0,0 +1,1203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team.
4
+ # All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """
18
+ Learner server runner for distributed HILSerl robot policy training.
19
+
20
+ This script implements the learner component of the distributed HILSerl architecture.
21
+ It initializes the policy network, maintains replay buffers, and updates
22
+ the policy based on transitions received from the actor server.
23
+
24
+ Examples of usage:
25
+
26
+ - Start a learner server for training:
27
+ ```bash
28
+ python -m lerobot.rl.learner --config_path src/lerobot/configs/train_config_hilserl_so100.json
29
+ ```
30
+
31
+ **NOTE**: Start the learner server before launching the actor server. The learner opens a gRPC server
32
+ to communicate with actors.
33
+
34
+ **NOTE**: Training progress can be monitored through Weights & Biases if wandb.enable is set to true
35
+ in your configuration.
36
+
37
+ **WORKFLOW**:
38
+ 1. Create training configuration with proper policy, dataset, and environment settings
39
+ 2. Start this learner server with the configuration
40
+ 3. Start an actor server with the same configuration
41
+ 4. Monitor training progress through wandb dashboard
42
+
43
+ For more details on the complete HILSerl training workflow, see:
44
+ https://github.com/michel-aractingi/lerobot-hilserl-guide
45
+ """
46
+
47
+ import logging
48
+ import os
49
+ import shutil
50
+ import time
51
+ from concurrent.futures import ThreadPoolExecutor
52
+ from pathlib import Path
53
+ from pprint import pformat
54
+
55
+ import grpc
56
+ import torch
57
+ from termcolor import colored
58
+ from torch import nn
59
+ from torch.multiprocessing import Queue
60
+ from torch.optim.optimizer import Optimizer
61
+
62
+ from lerobot.cameras import opencv # noqa: F401
63
+ from lerobot.configs import parser
64
+ from lerobot.configs.train import TrainRLServerPipelineConfig
65
+ from lerobot.datasets.factory import make_dataset
66
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
67
+ from lerobot.policies.factory import make_policy
68
+ from lerobot.policies.sac.modeling_sac import SACPolicy
69
+ from lerobot.rl.buffer import ReplayBuffer, concatenate_batch_transitions
70
+ from lerobot.rl.process import ProcessSignalHandler
71
+ from lerobot.rl.wandb_utils import WandBLogger
72
+ from lerobot.robots import so_follower # noqa: F401
73
+ from lerobot.teleoperators import gamepad, so_leader # noqa: F401
74
+ from lerobot.teleoperators.utils import TeleopEvents
75
+ from lerobot.transport import services_pb2_grpc
76
+ from lerobot.transport.utils import (
77
+ MAX_MESSAGE_SIZE,
78
+ bytes_to_python_object,
79
+ bytes_to_transitions,
80
+ state_to_bytes,
81
+ )
82
+ from lerobot.utils.constants import (
83
+ ACTION,
84
+ CHECKPOINTS_DIR,
85
+ LAST_CHECKPOINT_LINK,
86
+ PRETRAINED_MODEL_DIR,
87
+ TRAINING_STATE_DIR,
88
+ )
89
+ from lerobot.utils.random_utils import set_seed
90
+ from lerobot.utils.train_utils import (
91
+ get_step_checkpoint_dir,
92
+ load_training_state as utils_load_training_state,
93
+ save_checkpoint,
94
+ update_last_checkpoint,
95
+ )
96
+ from lerobot.utils.transition import move_state_dict_to_device, move_transition_to_device
97
+ from lerobot.utils.utils import (
98
+ format_big_number,
99
+ get_safe_torch_device,
100
+ init_logging,
101
+ )
102
+
103
+ from .learner_service import MAX_WORKERS, SHUTDOWN_TIMEOUT, LearnerService
104
+
105
+
106
+ @parser.wrap()
107
+ def train_cli(cfg: TrainRLServerPipelineConfig):
108
+ if not use_threads(cfg):
109
+ import torch.multiprocessing as mp
110
+
111
+ mp.set_start_method("spawn")
112
+
113
+ # Use the job_name from the config
114
+ train(
115
+ cfg,
116
+ job_name=cfg.job_name,
117
+ )
118
+
119
+ logging.info("[LEARNER] train_cli finished")
120
+
121
+
122
+ def train(cfg: TrainRLServerPipelineConfig, job_name: str | None = None):
123
+ """
124
+ Main training function that initializes and runs the training process.
125
+
126
+ Args:
127
+ cfg (TrainRLServerPipelineConfig): The training configuration
128
+ job_name (str | None, optional): Job name for logging. Defaults to None.
129
+ """
130
+
131
+ cfg.validate()
132
+
133
+ if job_name is None:
134
+ job_name = cfg.job_name
135
+
136
+ if job_name is None:
137
+ raise ValueError("Job name must be specified either in config or as a parameter")
138
+
139
+ display_pid = False
140
+ if not use_threads(cfg):
141
+ display_pid = True
142
+
143
+ # Create logs directory to ensure it exists
144
+ log_dir = os.path.join(cfg.output_dir, "logs")
145
+ os.makedirs(log_dir, exist_ok=True)
146
+ log_file = os.path.join(log_dir, f"learner_{job_name}.log")
147
+
148
+ # Initialize logging with explicit log file
149
+ init_logging(log_file=log_file, display_pid=display_pid)
150
+ logging.info(f"Learner logging initialized, writing to {log_file}")
151
+ logging.info(pformat(cfg.to_dict()))
152
+
153
+ # Setup WandB logging if enabled
154
+ if cfg.wandb.enable and cfg.wandb.project:
155
+ from lerobot.rl.wandb_utils import WandBLogger
156
+
157
+ wandb_logger = WandBLogger(cfg)
158
+ else:
159
+ wandb_logger = None
160
+ logging.info(colored("Logs will be saved locally.", "yellow", attrs=["bold"]))
161
+
162
+ # Handle resume logic
163
+ cfg = handle_resume_logic(cfg)
164
+
165
+ set_seed(seed=cfg.seed)
166
+
167
+ torch.backends.cudnn.benchmark = True
168
+ torch.backends.cuda.matmul.allow_tf32 = True
169
+
170
+ is_threaded = use_threads(cfg)
171
+ shutdown_event = ProcessSignalHandler(is_threaded, display_pid=display_pid).shutdown_event
172
+
173
+ start_learner_threads(
174
+ cfg=cfg,
175
+ wandb_logger=wandb_logger,
176
+ shutdown_event=shutdown_event,
177
+ )
178
+
179
+
180
+ def start_learner_threads(
181
+ cfg: TrainRLServerPipelineConfig,
182
+ wandb_logger: WandBLogger | None,
183
+ shutdown_event: any, # Event,
184
+ ) -> None:
185
+ """
186
+ Start the learner threads for training.
187
+
188
+ Args:
189
+ cfg (TrainRLServerPipelineConfig): Training configuration
190
+ wandb_logger (WandBLogger | None): Logger for metrics
191
+ shutdown_event: Event to signal shutdown
192
+ """
193
+ # Create multiprocessing queues
194
+ transition_queue = Queue()
195
+ interaction_message_queue = Queue()
196
+ parameters_queue = Queue()
197
+
198
+ concurrency_entity = None
199
+
200
+ if use_threads(cfg):
201
+ from threading import Thread
202
+
203
+ concurrency_entity = Thread
204
+ else:
205
+ from torch.multiprocessing import Process
206
+
207
+ concurrency_entity = Process
208
+
209
+ communication_process = concurrency_entity(
210
+ target=start_learner,
211
+ args=(
212
+ parameters_queue,
213
+ transition_queue,
214
+ interaction_message_queue,
215
+ shutdown_event,
216
+ cfg,
217
+ ),
218
+ daemon=True,
219
+ )
220
+ communication_process.start()
221
+
222
+ add_actor_information_and_train(
223
+ cfg=cfg,
224
+ wandb_logger=wandb_logger,
225
+ shutdown_event=shutdown_event,
226
+ transition_queue=transition_queue,
227
+ interaction_message_queue=interaction_message_queue,
228
+ parameters_queue=parameters_queue,
229
+ )
230
+ logging.info("[LEARNER] Training process stopped")
231
+
232
+ logging.info("[LEARNER] Closing queues")
233
+ transition_queue.close()
234
+ interaction_message_queue.close()
235
+ parameters_queue.close()
236
+
237
+ communication_process.join()
238
+ logging.info("[LEARNER] Communication process joined")
239
+
240
+ logging.info("[LEARNER] join queues")
241
+ transition_queue.cancel_join_thread()
242
+ interaction_message_queue.cancel_join_thread()
243
+ parameters_queue.cancel_join_thread()
244
+
245
+ logging.info("[LEARNER] queues closed")
246
+
247
+
248
+ # Core algorithm functions
249
+
250
+
251
+ def add_actor_information_and_train(
252
+ cfg: TrainRLServerPipelineConfig,
253
+ wandb_logger: WandBLogger | None,
254
+ shutdown_event: any, # Event,
255
+ transition_queue: Queue,
256
+ interaction_message_queue: Queue,
257
+ parameters_queue: Queue,
258
+ ):
259
+ """
260
+ Handles data transfer from the actor to the learner, manages training updates,
261
+ and logs training progress in an online reinforcement learning setup.
262
+
263
+ This function continuously:
264
+ - Transfers transitions from the actor to the replay buffer.
265
+ - Logs received interaction messages.
266
+ - Ensures training begins only when the replay buffer has a sufficient number of transitions.
267
+ - Samples batches from the replay buffer and performs multiple critic updates.
268
+ - Periodically updates the actor, critic, and temperature optimizers.
269
+ - Logs training statistics, including loss values and optimization frequency.
270
+
271
+ NOTE: This function doesn't have a single responsibility, it should be split into multiple functions
272
+ in the future. The reason why we did that is the GIL in Python. It's super slow the performance
273
+ are divided by 200. So we need to have a single thread that does all the work.
274
+
275
+ Args:
276
+ cfg (TrainRLServerPipelineConfig): Configuration object containing hyperparameters.
277
+ wandb_logger (WandBLogger | None): Logger for tracking training progress.
278
+ shutdown_event (Event): Event to signal shutdown.
279
+ transition_queue (Queue): Queue for receiving transitions from the actor.
280
+ interaction_message_queue (Queue): Queue for receiving interaction messages from the actor.
281
+ parameters_queue (Queue): Queue for sending policy parameters to the actor.
282
+ """
283
+ # Extract all configuration variables at the beginning, it improve the speed performance
284
+ # of 7%
285
+ device = get_safe_torch_device(try_device=cfg.policy.device, log=True)
286
+ storage_device = get_safe_torch_device(try_device=cfg.policy.storage_device)
287
+ clip_grad_norm_value = cfg.policy.grad_clip_norm
288
+ online_step_before_learning = cfg.policy.online_step_before_learning
289
+ utd_ratio = cfg.policy.utd_ratio
290
+ fps = cfg.env.fps
291
+ log_freq = cfg.log_freq
292
+ save_freq = cfg.save_freq
293
+ policy_update_freq = cfg.policy.policy_update_freq
294
+ policy_parameters_push_frequency = cfg.policy.actor_learner_config.policy_parameters_push_frequency
295
+ saving_checkpoint = cfg.save_checkpoint
296
+ online_steps = cfg.policy.online_steps
297
+ async_prefetch = cfg.policy.async_prefetch
298
+
299
+ # Initialize logging for multiprocessing
300
+ if not use_threads(cfg):
301
+ log_dir = os.path.join(cfg.output_dir, "logs")
302
+ os.makedirs(log_dir, exist_ok=True)
303
+ log_file = os.path.join(log_dir, f"learner_train_process_{os.getpid()}.log")
304
+ init_logging(log_file=log_file, display_pid=True)
305
+ logging.info("Initialized logging for actor information and training process")
306
+
307
+ logging.info("Initializing policy")
308
+
309
+ policy: SACPolicy = make_policy(
310
+ cfg=cfg.policy,
311
+ env_cfg=cfg.env,
312
+ )
313
+
314
+ assert isinstance(policy, nn.Module)
315
+
316
+ policy.train()
317
+
318
+ push_actor_policy_to_queue(parameters_queue=parameters_queue, policy=policy)
319
+
320
+ last_time_policy_pushed = time.time()
321
+
322
+ optimizers, lr_scheduler = make_optimizers_and_scheduler(cfg=cfg, policy=policy)
323
+
324
+ # If we are resuming, we need to load the training state
325
+ resume_optimization_step, resume_interaction_step = load_training_state(cfg=cfg, optimizers=optimizers)
326
+
327
+ log_training_info(cfg=cfg, policy=policy)
328
+
329
+ replay_buffer = initialize_replay_buffer(cfg, device, storage_device)
330
+ batch_size = cfg.batch_size
331
+ offline_replay_buffer = None
332
+
333
+ if cfg.dataset is not None:
334
+ offline_replay_buffer = initialize_offline_replay_buffer(
335
+ cfg=cfg,
336
+ device=device,
337
+ storage_device=storage_device,
338
+ )
339
+ batch_size: int = batch_size // 2 # We will sample from both replay buffer
340
+
341
+ logging.info("Starting learner thread")
342
+ interaction_message = None
343
+ optimization_step = resume_optimization_step if resume_optimization_step is not None else 0
344
+ interaction_step_shift = resume_interaction_step if resume_interaction_step is not None else 0
345
+
346
+ dataset_repo_id = None
347
+ if cfg.dataset is not None:
348
+ dataset_repo_id = cfg.dataset.repo_id
349
+
350
+ # Initialize iterators
351
+ online_iterator = None
352
+ offline_iterator = None
353
+
354
+ # NOTE: THIS IS THE MAIN LOOP OF THE LEARNER
355
+ while True:
356
+ # Exit the training loop if shutdown is requested
357
+ if shutdown_event is not None and shutdown_event.is_set():
358
+ logging.info("[LEARNER] Shutdown signal received. Exiting...")
359
+ break
360
+
361
+ # Process all available transitions to the replay buffer, send by the actor server
362
+ process_transitions(
363
+ transition_queue=transition_queue,
364
+ replay_buffer=replay_buffer,
365
+ offline_replay_buffer=offline_replay_buffer,
366
+ device=device,
367
+ dataset_repo_id=dataset_repo_id,
368
+ shutdown_event=shutdown_event,
369
+ )
370
+
371
+ # Process all available interaction messages sent by the actor server
372
+ interaction_message = process_interaction_messages(
373
+ interaction_message_queue=interaction_message_queue,
374
+ interaction_step_shift=interaction_step_shift,
375
+ wandb_logger=wandb_logger,
376
+ shutdown_event=shutdown_event,
377
+ )
378
+
379
+ # Wait until the replay buffer has enough samples to start training
380
+ if len(replay_buffer) < online_step_before_learning:
381
+ continue
382
+
383
+ if online_iterator is None:
384
+ online_iterator = replay_buffer.get_iterator(
385
+ batch_size=batch_size, async_prefetch=async_prefetch, queue_size=2
386
+ )
387
+
388
+ if offline_replay_buffer is not None and offline_iterator is None:
389
+ offline_iterator = offline_replay_buffer.get_iterator(
390
+ batch_size=batch_size, async_prefetch=async_prefetch, queue_size=2
391
+ )
392
+
393
+ time_for_one_optimization_step = time.time()
394
+ for _ in range(utd_ratio - 1):
395
+ # Sample from the iterators
396
+ batch = next(online_iterator)
397
+
398
+ if dataset_repo_id is not None:
399
+ batch_offline = next(offline_iterator)
400
+ batch = concatenate_batch_transitions(
401
+ left_batch_transitions=batch, right_batch_transition=batch_offline
402
+ )
403
+
404
+ actions = batch[ACTION]
405
+ rewards = batch["reward"]
406
+ observations = batch["state"]
407
+ next_observations = batch["next_state"]
408
+ done = batch["done"]
409
+ check_nan_in_transition(observations=observations, actions=actions, next_state=next_observations)
410
+
411
+ observation_features, next_observation_features = get_observation_features(
412
+ policy=policy, observations=observations, next_observations=next_observations
413
+ )
414
+
415
+ # Create a batch dictionary with all required elements for the forward method
416
+ forward_batch = {
417
+ ACTION: actions,
418
+ "reward": rewards,
419
+ "state": observations,
420
+ "next_state": next_observations,
421
+ "done": done,
422
+ "observation_feature": observation_features,
423
+ "next_observation_feature": next_observation_features,
424
+ "complementary_info": batch["complementary_info"],
425
+ }
426
+
427
+ # Use the forward method for critic loss
428
+ critic_output = policy.forward(forward_batch, model="critic")
429
+
430
+ # Main critic optimization
431
+ loss_critic = critic_output["loss_critic"]
432
+ optimizers["critic"].zero_grad()
433
+ loss_critic.backward()
434
+ critic_grad_norm = torch.nn.utils.clip_grad_norm_(
435
+ parameters=policy.critic_ensemble.parameters(), max_norm=clip_grad_norm_value
436
+ )
437
+ optimizers["critic"].step()
438
+
439
+ # Discrete critic optimization (if available)
440
+ if policy.config.num_discrete_actions is not None:
441
+ discrete_critic_output = policy.forward(forward_batch, model="discrete_critic")
442
+ loss_discrete_critic = discrete_critic_output["loss_discrete_critic"]
443
+ optimizers["discrete_critic"].zero_grad()
444
+ loss_discrete_critic.backward()
445
+ discrete_critic_grad_norm = torch.nn.utils.clip_grad_norm_(
446
+ parameters=policy.discrete_critic.parameters(), max_norm=clip_grad_norm_value
447
+ )
448
+ optimizers["discrete_critic"].step()
449
+
450
+ # Update target networks (main and discrete)
451
+ policy.update_target_networks()
452
+
453
+ # Sample for the last update in the UTD ratio
454
+ batch = next(online_iterator)
455
+
456
+ if dataset_repo_id is not None:
457
+ batch_offline = next(offline_iterator)
458
+ batch = concatenate_batch_transitions(
459
+ left_batch_transitions=batch, right_batch_transition=batch_offline
460
+ )
461
+
462
+ actions = batch[ACTION]
463
+ rewards = batch["reward"]
464
+ observations = batch["state"]
465
+ next_observations = batch["next_state"]
466
+ done = batch["done"]
467
+
468
+ check_nan_in_transition(observations=observations, actions=actions, next_state=next_observations)
469
+
470
+ observation_features, next_observation_features = get_observation_features(
471
+ policy=policy, observations=observations, next_observations=next_observations
472
+ )
473
+
474
+ # Create a batch dictionary with all required elements for the forward method
475
+ forward_batch = {
476
+ ACTION: actions,
477
+ "reward": rewards,
478
+ "state": observations,
479
+ "next_state": next_observations,
480
+ "done": done,
481
+ "observation_feature": observation_features,
482
+ "next_observation_feature": next_observation_features,
483
+ }
484
+
485
+ critic_output = policy.forward(forward_batch, model="critic")
486
+
487
+ loss_critic = critic_output["loss_critic"]
488
+ optimizers["critic"].zero_grad()
489
+ loss_critic.backward()
490
+ critic_grad_norm = torch.nn.utils.clip_grad_norm_(
491
+ parameters=policy.critic_ensemble.parameters(), max_norm=clip_grad_norm_value
492
+ ).item()
493
+ optimizers["critic"].step()
494
+
495
+ # Initialize training info dictionary
496
+ training_infos = {
497
+ "loss_critic": loss_critic.item(),
498
+ "critic_grad_norm": critic_grad_norm,
499
+ }
500
+
501
+ # Discrete critic optimization (if available)
502
+ if policy.config.num_discrete_actions is not None:
503
+ discrete_critic_output = policy.forward(forward_batch, model="discrete_critic")
504
+ loss_discrete_critic = discrete_critic_output["loss_discrete_critic"]
505
+ optimizers["discrete_critic"].zero_grad()
506
+ loss_discrete_critic.backward()
507
+ discrete_critic_grad_norm = torch.nn.utils.clip_grad_norm_(
508
+ parameters=policy.discrete_critic.parameters(), max_norm=clip_grad_norm_value
509
+ ).item()
510
+ optimizers["discrete_critic"].step()
511
+
512
+ # Add discrete critic info to training info
513
+ training_infos["loss_discrete_critic"] = loss_discrete_critic.item()
514
+ training_infos["discrete_critic_grad_norm"] = discrete_critic_grad_norm
515
+
516
+ # Actor and temperature optimization (at specified frequency)
517
+ if optimization_step % policy_update_freq == 0:
518
+ for _ in range(policy_update_freq):
519
+ # Actor optimization
520
+ actor_output = policy.forward(forward_batch, model="actor")
521
+ loss_actor = actor_output["loss_actor"]
522
+ optimizers["actor"].zero_grad()
523
+ loss_actor.backward()
524
+ actor_grad_norm = torch.nn.utils.clip_grad_norm_(
525
+ parameters=policy.actor.parameters(), max_norm=clip_grad_norm_value
526
+ ).item()
527
+ optimizers["actor"].step()
528
+
529
+ # Add actor info to training info
530
+ training_infos["loss_actor"] = loss_actor.item()
531
+ training_infos["actor_grad_norm"] = actor_grad_norm
532
+
533
+ # Temperature optimization
534
+ temperature_output = policy.forward(forward_batch, model="temperature")
535
+ loss_temperature = temperature_output["loss_temperature"]
536
+ optimizers["temperature"].zero_grad()
537
+ loss_temperature.backward()
538
+ temp_grad_norm = torch.nn.utils.clip_grad_norm_(
539
+ parameters=[policy.log_alpha], max_norm=clip_grad_norm_value
540
+ ).item()
541
+ optimizers["temperature"].step()
542
+
543
+ # Add temperature info to training info
544
+ training_infos["loss_temperature"] = loss_temperature.item()
545
+ training_infos["temperature_grad_norm"] = temp_grad_norm
546
+ training_infos["temperature"] = policy.temperature
547
+
548
+ # Update temperature
549
+ policy.update_temperature()
550
+
551
+ # Push policy to actors if needed
552
+ if time.time() - last_time_policy_pushed > policy_parameters_push_frequency:
553
+ push_actor_policy_to_queue(parameters_queue=parameters_queue, policy=policy)
554
+ last_time_policy_pushed = time.time()
555
+
556
+ # Update target networks (main and discrete)
557
+ policy.update_target_networks()
558
+
559
+ # Log training metrics at specified intervals
560
+ if optimization_step % log_freq == 0:
561
+ training_infos["replay_buffer_size"] = len(replay_buffer)
562
+ if offline_replay_buffer is not None:
563
+ training_infos["offline_replay_buffer_size"] = len(offline_replay_buffer)
564
+ training_infos["Optimization step"] = optimization_step
565
+
566
+ # Log training metrics
567
+ if wandb_logger:
568
+ wandb_logger.log_dict(d=training_infos, mode="train", custom_step_key="Optimization step")
569
+
570
+ # Calculate and log optimization frequency
571
+ time_for_one_optimization_step = time.time() - time_for_one_optimization_step
572
+ frequency_for_one_optimization_step = 1 / (time_for_one_optimization_step + 1e-9)
573
+
574
+ logging.info(f"[LEARNER] Optimization frequency loop [Hz]: {frequency_for_one_optimization_step}")
575
+
576
+ # Log optimization frequency
577
+ if wandb_logger:
578
+ wandb_logger.log_dict(
579
+ {
580
+ "Optimization frequency loop [Hz]": frequency_for_one_optimization_step,
581
+ "Optimization step": optimization_step,
582
+ },
583
+ mode="train",
584
+ custom_step_key="Optimization step",
585
+ )
586
+
587
+ optimization_step += 1
588
+ if optimization_step % log_freq == 0:
589
+ logging.info(f"[LEARNER] Number of optimization step: {optimization_step}")
590
+
591
+ # Save checkpoint at specified intervals
592
+ if saving_checkpoint and (optimization_step % save_freq == 0 or optimization_step == online_steps):
593
+ save_training_checkpoint(
594
+ cfg=cfg,
595
+ optimization_step=optimization_step,
596
+ online_steps=online_steps,
597
+ interaction_message=interaction_message,
598
+ policy=policy,
599
+ optimizers=optimizers,
600
+ replay_buffer=replay_buffer,
601
+ offline_replay_buffer=offline_replay_buffer,
602
+ dataset_repo_id=dataset_repo_id,
603
+ fps=fps,
604
+ )
605
+
606
+
607
+ def start_learner(
608
+ parameters_queue: Queue,
609
+ transition_queue: Queue,
610
+ interaction_message_queue: Queue,
611
+ shutdown_event: any, # Event,
612
+ cfg: TrainRLServerPipelineConfig,
613
+ ):
614
+ """
615
+ Start the learner server for training.
616
+ It will receive transitions and interaction messages from the actor server,
617
+ and send policy parameters to the actor server.
618
+
619
+ Args:
620
+ parameters_queue: Queue for sending policy parameters to the actor
621
+ transition_queue: Queue for receiving transitions from the actor
622
+ interaction_message_queue: Queue for receiving interaction messages from the actor
623
+ shutdown_event: Event to signal shutdown
624
+ cfg: Training configuration
625
+ """
626
+ if not use_threads(cfg):
627
+ # Create a process-specific log file
628
+ log_dir = os.path.join(cfg.output_dir, "logs")
629
+ os.makedirs(log_dir, exist_ok=True)
630
+ log_file = os.path.join(log_dir, f"learner_process_{os.getpid()}.log")
631
+
632
+ # Initialize logging with explicit log file
633
+ init_logging(log_file=log_file, display_pid=True)
634
+ logging.info("Learner server process logging initialized")
635
+
636
+ # Setup process handlers to handle shutdown signal
637
+ # But use shutdown event from the main process
638
+ # Return back for MP
639
+ # TODO: Check if its useful
640
+ _ = ProcessSignalHandler(False, display_pid=True)
641
+
642
+ service = LearnerService(
643
+ shutdown_event=shutdown_event,
644
+ parameters_queue=parameters_queue,
645
+ seconds_between_pushes=cfg.policy.actor_learner_config.policy_parameters_push_frequency,
646
+ transition_queue=transition_queue,
647
+ interaction_message_queue=interaction_message_queue,
648
+ queue_get_timeout=cfg.policy.actor_learner_config.queue_get_timeout,
649
+ )
650
+
651
+ server = grpc.server(
652
+ ThreadPoolExecutor(max_workers=MAX_WORKERS),
653
+ options=[
654
+ ("grpc.max_receive_message_length", MAX_MESSAGE_SIZE),
655
+ ("grpc.max_send_message_length", MAX_MESSAGE_SIZE),
656
+ ],
657
+ )
658
+
659
+ services_pb2_grpc.add_LearnerServiceServicer_to_server(
660
+ service,
661
+ server,
662
+ )
663
+
664
+ host = cfg.policy.actor_learner_config.learner_host
665
+ port = cfg.policy.actor_learner_config.learner_port
666
+
667
+ server.add_insecure_port(f"{host}:{port}")
668
+ server.start()
669
+ logging.info("[LEARNER] gRPC server started")
670
+
671
+ shutdown_event.wait()
672
+ logging.info("[LEARNER] Stopping gRPC server...")
673
+ server.stop(SHUTDOWN_TIMEOUT)
674
+ logging.info("[LEARNER] gRPC server stopped")
675
+
676
+
677
+ def save_training_checkpoint(
678
+ cfg: TrainRLServerPipelineConfig,
679
+ optimization_step: int,
680
+ online_steps: int,
681
+ interaction_message: dict | None,
682
+ policy: nn.Module,
683
+ optimizers: dict[str, Optimizer],
684
+ replay_buffer: ReplayBuffer,
685
+ offline_replay_buffer: ReplayBuffer | None = None,
686
+ dataset_repo_id: str | None = None,
687
+ fps: int = 30,
688
+ ) -> None:
689
+ """
690
+ Save training checkpoint and associated data.
691
+
692
+ This function performs the following steps:
693
+ 1. Creates a checkpoint directory with the current optimization step
694
+ 2. Saves the policy model, configuration, and optimizer states
695
+ 3. Saves the current interaction step for resuming training
696
+ 4. Updates the "last" checkpoint symlink to point to this checkpoint
697
+ 5. Saves the replay buffer as a dataset for later use
698
+ 6. If an offline replay buffer exists, saves it as a separate dataset
699
+
700
+ Args:
701
+ cfg: Training configuration
702
+ optimization_step: Current optimization step
703
+ online_steps: Total number of online steps
704
+ interaction_message: Dictionary containing interaction information
705
+ policy: Policy model to save
706
+ optimizers: Dictionary of optimizers
707
+ replay_buffer: Replay buffer to save as dataset
708
+ offline_replay_buffer: Optional offline replay buffer to save
709
+ dataset_repo_id: Repository ID for dataset
710
+ fps: Frames per second for dataset
711
+ """
712
+ logging.info(f"Checkpoint policy after step {optimization_step}")
713
+ _num_digits = max(6, len(str(online_steps)))
714
+ interaction_step = interaction_message["Interaction step"] if interaction_message is not None else 0
715
+
716
+ # Create checkpoint directory
717
+ checkpoint_dir = get_step_checkpoint_dir(cfg.output_dir, online_steps, optimization_step)
718
+
719
+ # Save checkpoint
720
+ save_checkpoint(
721
+ checkpoint_dir=checkpoint_dir,
722
+ step=optimization_step,
723
+ cfg=cfg,
724
+ policy=policy,
725
+ optimizer=optimizers,
726
+ scheduler=None,
727
+ )
728
+
729
+ # Save interaction step manually
730
+ training_state_dir = os.path.join(checkpoint_dir, TRAINING_STATE_DIR)
731
+ os.makedirs(training_state_dir, exist_ok=True)
732
+ training_state = {"step": optimization_step, "interaction_step": interaction_step}
733
+ torch.save(training_state, os.path.join(training_state_dir, "training_state.pt"))
734
+
735
+ # Update the "last" symlink
736
+ update_last_checkpoint(checkpoint_dir)
737
+
738
+ # TODO : temporary save replay buffer here, remove later when on the robot
739
+ # We want to control this with the keyboard inputs
740
+ dataset_dir = os.path.join(cfg.output_dir, "dataset")
741
+ if os.path.exists(dataset_dir) and os.path.isdir(dataset_dir):
742
+ shutil.rmtree(dataset_dir)
743
+
744
+ # Save dataset
745
+ # NOTE: Handle the case where the dataset repo id is not specified in the config
746
+ # eg. RL training without demonstrations data
747
+ repo_id_buffer_save = cfg.env.task if dataset_repo_id is None else dataset_repo_id
748
+ replay_buffer.to_lerobot_dataset(repo_id=repo_id_buffer_save, fps=fps, root=dataset_dir)
749
+
750
+ if offline_replay_buffer is not None:
751
+ dataset_offline_dir = os.path.join(cfg.output_dir, "dataset_offline")
752
+ if os.path.exists(dataset_offline_dir) and os.path.isdir(dataset_offline_dir):
753
+ shutil.rmtree(dataset_offline_dir)
754
+
755
+ offline_replay_buffer.to_lerobot_dataset(
756
+ cfg.dataset.repo_id,
757
+ fps=fps,
758
+ root=dataset_offline_dir,
759
+ )
760
+
761
+ logging.info("Resume training")
762
+
763
+
764
+ def make_optimizers_and_scheduler(cfg: TrainRLServerPipelineConfig, policy: nn.Module):
765
+ """
766
+ Creates and returns optimizers for the actor, critic, and temperature components of a reinforcement learning policy.
767
+
768
+ This function sets up Adam optimizers for:
769
+ - The **actor network**, ensuring that only relevant parameters are optimized.
770
+ - The **critic ensemble**, which evaluates the value function.
771
+ - The **temperature parameter**, which controls the entropy in soft actor-critic (SAC)-like methods.
772
+
773
+ It also initializes a learning rate scheduler, though currently, it is set to `None`.
774
+
775
+ NOTE:
776
+ - If the encoder is shared, its parameters are excluded from the actor's optimization process.
777
+ - The policy's log temperature (`log_alpha`) is wrapped in a list to ensure proper optimization as a standalone tensor.
778
+
779
+ Args:
780
+ cfg: Configuration object containing hyperparameters.
781
+ policy (nn.Module): The policy model containing the actor, critic, and temperature components.
782
+
783
+ Returns:
784
+ Tuple[Dict[str, torch.optim.Optimizer], Optional[torch.optim.lr_scheduler._LRScheduler]]:
785
+ A tuple containing:
786
+ - `optimizers`: A dictionary mapping component names ("actor", "critic", "temperature") to their respective Adam optimizers.
787
+ - `lr_scheduler`: Currently set to `None` but can be extended to support learning rate scheduling.
788
+
789
+ """
790
+ optimizer_actor = torch.optim.Adam(
791
+ params=[
792
+ p
793
+ for n, p in policy.actor.named_parameters()
794
+ if not policy.config.shared_encoder or not n.startswith("encoder")
795
+ ],
796
+ lr=cfg.policy.actor_lr,
797
+ )
798
+ optimizer_critic = torch.optim.Adam(params=policy.critic_ensemble.parameters(), lr=cfg.policy.critic_lr)
799
+
800
+ if cfg.policy.num_discrete_actions is not None:
801
+ optimizer_discrete_critic = torch.optim.Adam(
802
+ params=policy.discrete_critic.parameters(), lr=cfg.policy.critic_lr
803
+ )
804
+ optimizer_temperature = torch.optim.Adam(params=[policy.log_alpha], lr=cfg.policy.critic_lr)
805
+ lr_scheduler = None
806
+ optimizers = {
807
+ "actor": optimizer_actor,
808
+ "critic": optimizer_critic,
809
+ "temperature": optimizer_temperature,
810
+ }
811
+ if cfg.policy.num_discrete_actions is not None:
812
+ optimizers["discrete_critic"] = optimizer_discrete_critic
813
+ return optimizers, lr_scheduler
814
+
815
+
816
+ # Training setup functions
817
+
818
+
819
+ def handle_resume_logic(cfg: TrainRLServerPipelineConfig) -> TrainRLServerPipelineConfig:
820
+ """
821
+ Handle the resume logic for training.
822
+
823
+ If resume is True:
824
+ - Verifies that a checkpoint exists
825
+ - Loads the checkpoint configuration
826
+ - Logs resumption details
827
+ - Returns the checkpoint configuration
828
+
829
+ If resume is False:
830
+ - Checks if an output directory exists (to prevent accidental overwriting)
831
+ - Returns the original configuration
832
+
833
+ Args:
834
+ cfg (TrainRLServerPipelineConfig): The training configuration
835
+
836
+ Returns:
837
+ TrainRLServerPipelineConfig: The updated configuration
838
+
839
+ Raises:
840
+ RuntimeError: If resume is True but no checkpoint found, or if resume is False but directory exists
841
+ """
842
+ out_dir = cfg.output_dir
843
+
844
+ # Case 1: Not resuming, but need to check if directory exists to prevent overwrites
845
+ if not cfg.resume:
846
+ checkpoint_dir = os.path.join(out_dir, CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK)
847
+ if os.path.exists(checkpoint_dir):
848
+ raise RuntimeError(
849
+ f"Output directory {checkpoint_dir} already exists. Use `resume=true` to resume training."
850
+ )
851
+ return cfg
852
+
853
+ # Case 2: Resuming training
854
+ checkpoint_dir = os.path.join(out_dir, CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK)
855
+ if not os.path.exists(checkpoint_dir):
856
+ raise RuntimeError(f"No model checkpoint found in {checkpoint_dir} for resume=True")
857
+
858
+ # Log that we found a valid checkpoint and are resuming
859
+ logging.info(
860
+ colored(
861
+ "Valid checkpoint found: resume=True detected, resuming previous run",
862
+ color="yellow",
863
+ attrs=["bold"],
864
+ )
865
+ )
866
+
867
+ # Load config using Draccus
868
+ checkpoint_cfg_path = os.path.join(checkpoint_dir, PRETRAINED_MODEL_DIR, "train_config.json")
869
+ checkpoint_cfg = TrainRLServerPipelineConfig.from_pretrained(checkpoint_cfg_path)
870
+
871
+ # Ensure resume flag is set in returned config
872
+ checkpoint_cfg.resume = True
873
+ return checkpoint_cfg
874
+
875
+
876
+ def load_training_state(
877
+ cfg: TrainRLServerPipelineConfig,
878
+ optimizers: Optimizer | dict[str, Optimizer],
879
+ ):
880
+ """
881
+ Loads the training state (optimizers, step count, etc.) from a checkpoint.
882
+
883
+ Args:
884
+ cfg (TrainRLServerPipelineConfig): Training configuration
885
+ optimizers (Optimizer | dict): Optimizers to load state into
886
+
887
+ Returns:
888
+ tuple: (optimization_step, interaction_step) or (None, None) if not resuming
889
+ """
890
+ if not cfg.resume:
891
+ return None, None
892
+
893
+ # Construct path to the last checkpoint directory
894
+ checkpoint_dir = os.path.join(cfg.output_dir, CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK)
895
+
896
+ logging.info(f"Loading training state from {checkpoint_dir}")
897
+
898
+ try:
899
+ # Use the utility function from train_utils which loads the optimizer state
900
+ step, optimizers, _ = utils_load_training_state(Path(checkpoint_dir), optimizers, None)
901
+
902
+ # Load interaction step separately from training_state.pt
903
+ training_state_path = os.path.join(checkpoint_dir, TRAINING_STATE_DIR, "training_state.pt")
904
+ interaction_step = 0
905
+ if os.path.exists(training_state_path):
906
+ training_state = torch.load(training_state_path, weights_only=False) # nosec B614: Safe usage of torch.load
907
+ interaction_step = training_state.get("interaction_step", 0)
908
+
909
+ logging.info(f"Resuming from step {step}, interaction step {interaction_step}")
910
+ return step, interaction_step
911
+
912
+ except Exception as e:
913
+ logging.error(f"Failed to load training state: {e}")
914
+ return None, None
915
+
916
+
917
+ def log_training_info(cfg: TrainRLServerPipelineConfig, policy: nn.Module) -> None:
918
+ """
919
+ Log information about the training process.
920
+
921
+ Args:
922
+ cfg (TrainRLServerPipelineConfig): Training configuration
923
+ policy (nn.Module): Policy model
924
+ """
925
+ num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad)
926
+ num_total_params = sum(p.numel() for p in policy.parameters())
927
+
928
+ logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {cfg.output_dir}")
929
+ logging.info(f"{cfg.env.task=}")
930
+ logging.info(f"{cfg.policy.online_steps=}")
931
+ logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})")
932
+ logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})")
933
+
934
+
935
+ def initialize_replay_buffer(
936
+ cfg: TrainRLServerPipelineConfig, device: str, storage_device: str
937
+ ) -> ReplayBuffer:
938
+ """
939
+ Initialize a replay buffer, either empty or from a dataset if resuming.
940
+
941
+ Args:
942
+ cfg (TrainRLServerPipelineConfig): Training configuration
943
+ device (str): Device to store tensors on
944
+ storage_device (str): Device for storage optimization
945
+
946
+ Returns:
947
+ ReplayBuffer: Initialized replay buffer
948
+ """
949
+ if not cfg.resume:
950
+ return ReplayBuffer(
951
+ capacity=cfg.policy.online_buffer_capacity,
952
+ device=device,
953
+ state_keys=cfg.policy.input_features.keys(),
954
+ storage_device=storage_device,
955
+ optimize_memory=True,
956
+ )
957
+
958
+ logging.info("Resume training load the online dataset")
959
+ dataset_path = os.path.join(cfg.output_dir, "dataset")
960
+
961
+ # NOTE: In RL is possible to not have a dataset.
962
+ repo_id = None
963
+ if cfg.dataset is not None:
964
+ repo_id = cfg.dataset.repo_id
965
+ dataset = LeRobotDataset(
966
+ repo_id=repo_id,
967
+ root=dataset_path,
968
+ )
969
+ return ReplayBuffer.from_lerobot_dataset(
970
+ lerobot_dataset=dataset,
971
+ capacity=cfg.policy.online_buffer_capacity,
972
+ device=device,
973
+ state_keys=cfg.policy.input_features.keys(),
974
+ optimize_memory=True,
975
+ )
976
+
977
+
978
+ def initialize_offline_replay_buffer(
979
+ cfg: TrainRLServerPipelineConfig,
980
+ device: str,
981
+ storage_device: str,
982
+ ) -> ReplayBuffer:
983
+ """
984
+ Initialize an offline replay buffer from a dataset.
985
+
986
+ Args:
987
+ cfg (TrainRLServerPipelineConfig): Training configuration
988
+ device (str): Device to store tensors on
989
+ storage_device (str): Device for storage optimization
990
+
991
+ Returns:
992
+ ReplayBuffer: Initialized offline replay buffer
993
+ """
994
+ if not cfg.resume:
995
+ logging.info("make_dataset offline buffer")
996
+ offline_dataset = make_dataset(cfg)
997
+ else:
998
+ logging.info("load offline dataset")
999
+ dataset_offline_path = os.path.join(cfg.output_dir, "dataset_offline")
1000
+ offline_dataset = LeRobotDataset(
1001
+ repo_id=cfg.dataset.repo_id,
1002
+ root=dataset_offline_path,
1003
+ )
1004
+
1005
+ logging.info("Convert to a offline replay buffer")
1006
+ offline_replay_buffer = ReplayBuffer.from_lerobot_dataset(
1007
+ offline_dataset,
1008
+ device=device,
1009
+ state_keys=cfg.policy.input_features.keys(),
1010
+ storage_device=storage_device,
1011
+ optimize_memory=True,
1012
+ capacity=cfg.policy.offline_buffer_capacity,
1013
+ )
1014
+ return offline_replay_buffer
1015
+
1016
+
1017
+ # Utilities/Helpers functions
1018
+
1019
+
1020
+ def get_observation_features(
1021
+ policy: SACPolicy, observations: torch.Tensor, next_observations: torch.Tensor
1022
+ ) -> tuple[torch.Tensor | None, torch.Tensor | None]:
1023
+ """
1024
+ Get observation features from the policy encoder. It act as cache for the observation features.
1025
+ when the encoder is frozen, the observation features are not updated.
1026
+ We can save compute by caching the observation features.
1027
+
1028
+ Args:
1029
+ policy: The policy model
1030
+ observations: The current observations
1031
+ next_observations: The next observations
1032
+
1033
+ Returns:
1034
+ tuple: observation_features, next_observation_features
1035
+ """
1036
+
1037
+ if policy.config.vision_encoder_name is None or not policy.config.freeze_vision_encoder:
1038
+ return None, None
1039
+
1040
+ with torch.no_grad():
1041
+ observation_features = policy.actor.encoder.get_cached_image_features(observations)
1042
+ next_observation_features = policy.actor.encoder.get_cached_image_features(next_observations)
1043
+
1044
+ return observation_features, next_observation_features
1045
+
1046
+
1047
+ def use_threads(cfg: TrainRLServerPipelineConfig) -> bool:
1048
+ return cfg.policy.concurrency.learner == "threads"
1049
+
1050
+
1051
+ def check_nan_in_transition(
1052
+ observations: torch.Tensor,
1053
+ actions: torch.Tensor,
1054
+ next_state: torch.Tensor,
1055
+ raise_error: bool = False,
1056
+ ) -> bool:
1057
+ """
1058
+ Check for NaN values in transition data.
1059
+
1060
+ Args:
1061
+ observations: Dictionary of observation tensors
1062
+ actions: Action tensor
1063
+ next_state: Dictionary of next state tensors
1064
+ raise_error: If True, raises ValueError when NaN is detected
1065
+
1066
+ Returns:
1067
+ bool: True if NaN values were detected, False otherwise
1068
+ """
1069
+ nan_detected = False
1070
+
1071
+ # Check observations
1072
+ for key, tensor in observations.items():
1073
+ if torch.isnan(tensor).any():
1074
+ logging.error(f"observations[{key}] contains NaN values")
1075
+ nan_detected = True
1076
+ if raise_error:
1077
+ raise ValueError(f"NaN detected in observations[{key}]")
1078
+
1079
+ # Check next state
1080
+ for key, tensor in next_state.items():
1081
+ if torch.isnan(tensor).any():
1082
+ logging.error(f"next_state[{key}] contains NaN values")
1083
+ nan_detected = True
1084
+ if raise_error:
1085
+ raise ValueError(f"NaN detected in next_state[{key}]")
1086
+
1087
+ # Check actions
1088
+ if torch.isnan(actions).any():
1089
+ logging.error("actions contains NaN values")
1090
+ nan_detected = True
1091
+ if raise_error:
1092
+ raise ValueError("NaN detected in actions")
1093
+
1094
+ return nan_detected
1095
+
1096
+
1097
+ def push_actor_policy_to_queue(parameters_queue: Queue, policy: nn.Module):
1098
+ logging.debug("[LEARNER] Pushing actor policy to the queue")
1099
+
1100
+ # Create a dictionary to hold all the state dicts
1101
+ state_dicts = {"policy": move_state_dict_to_device(policy.actor.state_dict(), device="cpu")}
1102
+
1103
+ # Add discrete critic if it exists
1104
+ if hasattr(policy, "discrete_critic") and policy.discrete_critic is not None:
1105
+ state_dicts["discrete_critic"] = move_state_dict_to_device(
1106
+ policy.discrete_critic.state_dict(), device="cpu"
1107
+ )
1108
+ logging.debug("[LEARNER] Including discrete critic in state dict push")
1109
+
1110
+ state_bytes = state_to_bytes(state_dicts)
1111
+ parameters_queue.put(state_bytes)
1112
+
1113
+
1114
+ def process_interaction_message(
1115
+ message, interaction_step_shift: int, wandb_logger: WandBLogger | None = None
1116
+ ):
1117
+ """Process a single interaction message with consistent handling."""
1118
+ message = bytes_to_python_object(message)
1119
+ # Shift interaction step for consistency with checkpointed state
1120
+ message["Interaction step"] += interaction_step_shift
1121
+
1122
+ # Log if logger available
1123
+ if wandb_logger:
1124
+ wandb_logger.log_dict(d=message, mode="train", custom_step_key="Interaction step")
1125
+
1126
+ return message
1127
+
1128
+
1129
+ def process_transitions(
1130
+ transition_queue: Queue,
1131
+ replay_buffer: ReplayBuffer,
1132
+ offline_replay_buffer: ReplayBuffer,
1133
+ device: str,
1134
+ dataset_repo_id: str | None,
1135
+ shutdown_event: any,
1136
+ ):
1137
+ """Process all available transitions from the queue.
1138
+
1139
+ Args:
1140
+ transition_queue: Queue for receiving transitions from the actor
1141
+ replay_buffer: Replay buffer to add transitions to
1142
+ offline_replay_buffer: Offline replay buffer to add transitions to
1143
+ device: Device to move transitions to
1144
+ dataset_repo_id: Repository ID for dataset
1145
+ shutdown_event: Event to signal shutdown
1146
+ """
1147
+ while not transition_queue.empty() and not shutdown_event.is_set():
1148
+ transition_list = transition_queue.get()
1149
+ transition_list = bytes_to_transitions(buffer=transition_list)
1150
+
1151
+ for transition in transition_list:
1152
+ transition = move_transition_to_device(transition=transition, device=device)
1153
+
1154
+ # Skip transitions with NaN values
1155
+ if check_nan_in_transition(
1156
+ observations=transition["state"],
1157
+ actions=transition[ACTION],
1158
+ next_state=transition["next_state"],
1159
+ ):
1160
+ logging.warning("[LEARNER] NaN detected in transition, skipping")
1161
+ continue
1162
+
1163
+ replay_buffer.add(**transition)
1164
+
1165
+ # Add to offline buffer if it's an intervention
1166
+ if dataset_repo_id is not None and transition.get("complementary_info", {}).get(
1167
+ TeleopEvents.IS_INTERVENTION
1168
+ ):
1169
+ offline_replay_buffer.add(**transition)
1170
+
1171
+
1172
+ def process_interaction_messages(
1173
+ interaction_message_queue: Queue,
1174
+ interaction_step_shift: int,
1175
+ wandb_logger: WandBLogger | None,
1176
+ shutdown_event: any,
1177
+ ) -> dict | None:
1178
+ """Process all available interaction messages from the queue.
1179
+
1180
+ Args:
1181
+ interaction_message_queue: Queue for receiving interaction messages
1182
+ interaction_step_shift: Amount to shift interaction step by
1183
+ wandb_logger: Logger for tracking progress
1184
+ shutdown_event: Event to signal shutdown
1185
+
1186
+ Returns:
1187
+ dict | None: The last interaction message processed, or None if none were processed
1188
+ """
1189
+ last_message = None
1190
+ while not interaction_message_queue.empty() and not shutdown_event.is_set():
1191
+ message = interaction_message_queue.get()
1192
+ last_message = process_interaction_message(
1193
+ message=message,
1194
+ interaction_step_shift=interaction_step_shift,
1195
+ wandb_logger=wandb_logger,
1196
+ )
1197
+
1198
+ return last_message
1199
+
1200
+
1201
+ if __name__ == "__main__":
1202
+ train_cli()
1203
+ logging.info("[LEARNER] main finished")
lerobot/src/lerobot/rl/learner_service.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team.
4
+ # All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ import logging
19
+ import time
20
+ from multiprocessing import Event, Queue
21
+
22
+ from lerobot.rl.queue import get_last_item_from_queue
23
+ from lerobot.transport import services_pb2, services_pb2_grpc
24
+ from lerobot.transport.utils import receive_bytes_in_chunks, send_bytes_in_chunks
25
+
26
+ MAX_WORKERS = 3 # Stream parameters, send transitions and interactions
27
+ SHUTDOWN_TIMEOUT = 10
28
+
29
+
30
+ class LearnerService(services_pb2_grpc.LearnerServiceServicer):
31
+ """
32
+ Implementation of the LearnerService gRPC service
33
+ This service is used to send parameters to the Actor and receive transitions and interactions from the Actor
34
+ check transport.proto for the gRPC service definition
35
+ """
36
+
37
+ def __init__(
38
+ self,
39
+ shutdown_event: Event, # type: ignore
40
+ parameters_queue: Queue,
41
+ seconds_between_pushes: float,
42
+ transition_queue: Queue,
43
+ interaction_message_queue: Queue,
44
+ queue_get_timeout: float = 0.001,
45
+ ):
46
+ self.shutdown_event = shutdown_event
47
+ self.parameters_queue = parameters_queue
48
+ self.seconds_between_pushes = seconds_between_pushes
49
+ self.transition_queue = transition_queue
50
+ self.interaction_message_queue = interaction_message_queue
51
+ self.queue_get_timeout = queue_get_timeout
52
+
53
+ def StreamParameters(self, request, context): # noqa: N802
54
+ # TODO: authorize the request
55
+ logging.info("[LEARNER] Received request to stream parameters from the Actor")
56
+
57
+ last_push_time = 0
58
+
59
+ while not self.shutdown_event.is_set():
60
+ time_since_last_push = time.time() - last_push_time
61
+ if time_since_last_push < self.seconds_between_pushes:
62
+ self.shutdown_event.wait(self.seconds_between_pushes - time_since_last_push)
63
+ # Continue, because we could receive a shutdown event,
64
+ # and it's checked in the while loop
65
+ continue
66
+
67
+ logging.info("[LEARNER] Push parameters to the Actor")
68
+ buffer = get_last_item_from_queue(
69
+ self.parameters_queue, block=True, timeout=self.queue_get_timeout
70
+ )
71
+
72
+ if buffer is None:
73
+ continue
74
+
75
+ yield from send_bytes_in_chunks(
76
+ buffer,
77
+ services_pb2.Parameters,
78
+ log_prefix="[LEARNER] Sending parameters",
79
+ silent=True,
80
+ )
81
+
82
+ last_push_time = time.time()
83
+ logging.info("[LEARNER] Parameters sent")
84
+
85
+ logging.info("[LEARNER] Stream parameters finished")
86
+ return services_pb2.Empty()
87
+
88
+ def SendTransitions(self, request_iterator, _context): # noqa: N802
89
+ # TODO: authorize the request
90
+ logging.info("[LEARNER] Received request to receive transitions from the Actor")
91
+
92
+ receive_bytes_in_chunks(
93
+ request_iterator,
94
+ self.transition_queue,
95
+ self.shutdown_event,
96
+ log_prefix="[LEARNER] transitions",
97
+ )
98
+
99
+ logging.debug("[LEARNER] Finished receiving transitions")
100
+ return services_pb2.Empty()
101
+
102
+ def SendInteractions(self, request_iterator, _context): # noqa: N802
103
+ # TODO: authorize the request
104
+ logging.info("[LEARNER] Received request to receive interactions from the Actor")
105
+
106
+ receive_bytes_in_chunks(
107
+ request_iterator,
108
+ self.interaction_message_queue,
109
+ self.shutdown_event,
110
+ log_prefix="[LEARNER] interactions",
111
+ )
112
+
113
+ logging.debug("[LEARNER] Finished receiving interactions")
114
+ return services_pb2.Empty()
115
+
116
+ def Ready(self, request, context): # noqa: N802
117
+ return services_pb2.Empty()
lerobot/src/lerobot/rl/process.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team.
4
+ # All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ import logging
19
+ import os
20
+ import signal
21
+ import sys
22
+
23
+
24
+ class ProcessSignalHandler:
25
+ """Utility class to attach graceful shutdown signal handlers.
26
+
27
+ The class exposes a shutdown_event attribute that is set when a shutdown
28
+ signal is received. A counter tracks how many shutdown signals have been
29
+ caught. On the second signal the process exits with status 1.
30
+ """
31
+
32
+ _SUPPORTED_SIGNALS = ("SIGINT", "SIGTERM", "SIGHUP", "SIGQUIT")
33
+
34
+ def __init__(self, use_threads: bool, display_pid: bool = False):
35
+ # TODO: Check if we can use Event from threading since Event from
36
+ # multiprocessing is the a clone of threading.Event.
37
+ # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Event
38
+ if use_threads:
39
+ from threading import Event
40
+ else:
41
+ from multiprocessing import Event
42
+
43
+ self.shutdown_event = Event()
44
+ self._counter: int = 0
45
+ self._display_pid = display_pid
46
+
47
+ self._register_handlers()
48
+
49
+ @property
50
+ def counter(self) -> int: # pragma: no cover – simple accessor
51
+ """Number of shutdown signals that have been intercepted."""
52
+ return self._counter
53
+
54
+ def _register_handlers(self):
55
+ """Attach the internal _signal_handler to a subset of POSIX signals."""
56
+
57
+ def _signal_handler(signum, frame):
58
+ pid_str = ""
59
+ if self._display_pid:
60
+ pid_str = f"[PID: {os.getpid()}]"
61
+ logging.info(f"{pid_str} Shutdown signal {signum} received. Cleaning up…")
62
+ self.shutdown_event.set()
63
+ self._counter += 1
64
+
65
+ # On a second Ctrl-C (or any supported signal) force the exit to
66
+ # mimic the previous behaviour while giving the caller one chance to
67
+ # shutdown gracefully.
68
+ # TODO: Investigate if we need it later
69
+ if self._counter > 1:
70
+ logging.info("Force shutdown")
71
+ sys.exit(1)
72
+
73
+ for sig_name in self._SUPPORTED_SIGNALS:
74
+ sig = getattr(signal, sig_name, None)
75
+ if sig is None:
76
+ # The signal is not available on this platform (Windows for
77
+ # instance does not provide SIGHUP, SIGQUIT…). Skip it.
78
+ continue
79
+ try:
80
+ signal.signal(sig, _signal_handler)
81
+ except (ValueError, OSError): # pragma: no cover – unlikely but safe
82
+ # Signal not supported or we are in a non-main thread.
83
+ continue
lerobot/src/lerobot/rl/queue.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import platform
18
+ from contextlib import suppress
19
+ from queue import Empty
20
+ from typing import Any
21
+
22
+ from torch.multiprocessing import Queue
23
+
24
+
25
+ def get_last_item_from_queue(queue: Queue, block=True, timeout: float = 0.1) -> Any:
26
+ if block:
27
+ try:
28
+ item = queue.get(timeout=timeout)
29
+ except Empty:
30
+ return None
31
+ else:
32
+ item = None
33
+
34
+ # Drain queue and keep only the most recent parameters
35
+ if platform.system() == "Darwin":
36
+ # On Mac, avoid using `qsize` due to unreliable implementation.
37
+ # There is a comment on `qsize` code in the Python source:
38
+ # Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
39
+ try:
40
+ while True:
41
+ item = queue.get_nowait()
42
+ except Empty:
43
+ pass
44
+
45
+ return item
46
+
47
+ # Details about using qsize in https://github.com/huggingface/lerobot/issues/1523
48
+ while queue.qsize() > 0:
49
+ with suppress(Empty):
50
+ item = queue.get_nowait()
51
+
52
+ return item
lerobot/src/lerobot/rl/wandb_utils.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import logging
17
+ import os
18
+ import re
19
+ from glob import glob
20
+ from pathlib import Path
21
+
22
+ from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE
23
+ from termcolor import colored
24
+
25
+ from lerobot.configs.train import TrainPipelineConfig
26
+ from lerobot.utils.constants import PRETRAINED_MODEL_DIR
27
+
28
+
29
+ def cfg_to_group(cfg: TrainPipelineConfig, return_list: bool = False) -> list[str] | str:
30
+ """Return a group name for logging. Optionally returns group name as list."""
31
+ lst = [
32
+ f"policy:{cfg.policy.type}",
33
+ f"seed:{cfg.seed}",
34
+ ]
35
+ if cfg.dataset is not None:
36
+ lst.append(f"dataset:{cfg.dataset.repo_id}")
37
+ if cfg.env is not None:
38
+ lst.append(f"env:{cfg.env.type}")
39
+ return lst if return_list else "-".join(lst)
40
+
41
+
42
+ def get_wandb_run_id_from_filesystem(log_dir: Path) -> str:
43
+ # Get the WandB run ID.
44
+ paths = glob(str(log_dir / "wandb/latest-run/run-*"))
45
+ if len(paths) != 1:
46
+ raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.")
47
+ match = re.search(r"run-([^\.]+).wandb", paths[0].split("/")[-1])
48
+ if match is None:
49
+ raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.")
50
+ wandb_run_id = match.groups(0)[0]
51
+ return wandb_run_id
52
+
53
+
54
+ def get_safe_wandb_artifact_name(name: str):
55
+ """WandB artifacts don't accept ":" or "/" in their name."""
56
+ return name.replace(":", "_").replace("/", "_")
57
+
58
+
59
+ class WandBLogger:
60
+ """A helper class to log object using wandb."""
61
+
62
+ def __init__(self, cfg: TrainPipelineConfig):
63
+ self.cfg = cfg.wandb
64
+ self.log_dir = cfg.output_dir
65
+ self.job_name = cfg.job_name
66
+ self.env_fps = cfg.env.fps if cfg.env else None
67
+ self._group = cfg_to_group(cfg)
68
+
69
+ # Set up WandB.
70
+ os.environ["WANDB_SILENT"] = "True"
71
+ import wandb
72
+
73
+ wandb_run_id = (
74
+ cfg.wandb.run_id
75
+ if cfg.wandb.run_id
76
+ else get_wandb_run_id_from_filesystem(self.log_dir)
77
+ if cfg.resume
78
+ else None
79
+ )
80
+ wandb.init(
81
+ id=wandb_run_id,
82
+ project=self.cfg.project,
83
+ entity=self.cfg.entity,
84
+ name=self.job_name,
85
+ notes=self.cfg.notes,
86
+ tags=cfg_to_group(cfg, return_list=True),
87
+ dir=self.log_dir,
88
+ config=cfg.to_dict(),
89
+ # TODO(rcadene): try set to True
90
+ save_code=False,
91
+ # TODO(rcadene): split train and eval, and run async eval with job_type="eval"
92
+ job_type="train_eval",
93
+ resume="must" if cfg.resume else None,
94
+ mode=self.cfg.mode if self.cfg.mode in ["online", "offline", "disabled"] else "online",
95
+ )
96
+ run_id = wandb.run.id
97
+ # NOTE: We will override the cfg.wandb.run_id with the wandb run id.
98
+ # This is because we want to be able to resume the run from the wandb run id.
99
+ cfg.wandb.run_id = run_id
100
+ # Handle custom step key for rl asynchronous training.
101
+ self._wandb_custom_step_key: set[str] | None = None
102
+ logging.info(colored("Logs will be synced with wandb.", "blue", attrs=["bold"]))
103
+ logging.info(f"Track this run --> {colored(wandb.run.get_url(), 'yellow', attrs=['bold'])}")
104
+ self._wandb = wandb
105
+
106
+ def log_policy(self, checkpoint_dir: Path):
107
+ """Checkpoints the policy to wandb."""
108
+ if self.cfg.disable_artifact:
109
+ return
110
+
111
+ step_id = checkpoint_dir.name
112
+ artifact_name = f"{self._group}-{step_id}"
113
+ artifact_name = get_safe_wandb_artifact_name(artifact_name)
114
+ artifact = self._wandb.Artifact(artifact_name, type="model")
115
+ pretrained_model_dir = checkpoint_dir / PRETRAINED_MODEL_DIR
116
+
117
+ # Check if this is a PEFT model (has adapter files instead of model.safetensors)
118
+ adapter_model_file = pretrained_model_dir / "adapter_model.safetensors"
119
+ standard_model_file = pretrained_model_dir / SAFETENSORS_SINGLE_FILE
120
+
121
+ if adapter_model_file.exists():
122
+ # PEFT model: add adapter files and configs
123
+ artifact.add_file(adapter_model_file)
124
+ adapter_config_file = pretrained_model_dir / "adapter_config.json"
125
+ if adapter_config_file.exists():
126
+ artifact.add_file(adapter_config_file)
127
+ # Also add the policy config which is needed for loading
128
+ config_file = pretrained_model_dir / "config.json"
129
+ if config_file.exists():
130
+ artifact.add_file(config_file)
131
+ elif standard_model_file.exists():
132
+ # Standard model: add the single safetensors file
133
+ artifact.add_file(standard_model_file)
134
+ else:
135
+ logging.warning(
136
+ f"No {SAFETENSORS_SINGLE_FILE} or adapter_model.safetensors found in {pretrained_model_dir}. "
137
+ "Skipping model artifact upload to WandB."
138
+ )
139
+ return
140
+
141
+ self._wandb.log_artifact(artifact)
142
+
143
+ def log_dict(
144
+ self, d: dict, step: int | None = None, mode: str = "train", custom_step_key: str | None = None
145
+ ):
146
+ if mode not in {"train", "eval"}:
147
+ raise ValueError(mode)
148
+ if step is None and custom_step_key is None:
149
+ raise ValueError("Either step or custom_step_key must be provided.")
150
+
151
+ # NOTE: This is not simple. Wandb step must always monotonically increase and it
152
+ # increases with each wandb.log call, but in the case of asynchronous RL for example,
153
+ # multiple time steps is possible. For example, the interaction step with the environment,
154
+ # the training step, the evaluation step, etc. So we need to define a custom step key
155
+ # to log the correct step for each metric.
156
+ if custom_step_key is not None:
157
+ if self._wandb_custom_step_key is None:
158
+ self._wandb_custom_step_key = set()
159
+ new_custom_key = f"{mode}/{custom_step_key}"
160
+ if new_custom_key not in self._wandb_custom_step_key:
161
+ self._wandb_custom_step_key.add(new_custom_key)
162
+ self._wandb.define_metric(new_custom_key, hidden=True)
163
+
164
+ for k, v in d.items():
165
+ if not isinstance(v, (int | float | str)):
166
+ logging.warning(
167
+ f'WandB logging of key "{k}" was ignored as its type "{type(v)}" is not handled by this wrapper.'
168
+ )
169
+ continue
170
+
171
+ # Do not log the custom step key itself.
172
+ if self._wandb_custom_step_key is not None and k in self._wandb_custom_step_key:
173
+ continue
174
+
175
+ if custom_step_key is not None:
176
+ value_custom_step = d[custom_step_key]
177
+ data = {f"{mode}/{k}": v, f"{mode}/{custom_step_key}": value_custom_step}
178
+ self._wandb.log(data)
179
+ continue
180
+
181
+ self._wandb.log(data={f"{mode}/{k}": v}, step=step)
182
+
183
+ def log_video(self, video_path: str, step: int, mode: str = "train"):
184
+ if mode not in {"train", "eval"}:
185
+ raise ValueError(mode)
186
+
187
+ wandb_video = self._wandb.Video(video_path, fps=self.env_fps, format="mp4")
188
+ self._wandb.log({f"{mode}/video": wandb_video}, step=step)
lerobot/src/lerobot/robots/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .config import RobotConfig
18
+ from .robot import Robot
19
+ from .utils import make_robot_from_config
lerobot/src/lerobot/robots/config.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import abc
16
+ from dataclasses import dataclass
17
+ from pathlib import Path
18
+
19
+ import draccus
20
+
21
+
22
+ @dataclass(kw_only=True)
23
+ class RobotConfig(draccus.ChoiceRegistry, abc.ABC):
24
+ # Allows to distinguish between different robots of the same type
25
+ id: str | None = None
26
+ # Directory to store calibration file
27
+ calibration_dir: Path | None = None
28
+
29
+ def __post_init__(self):
30
+ if hasattr(self, "cameras") and self.cameras:
31
+ for _, config in self.cameras.items():
32
+ for attr in ["width", "height", "fps"]:
33
+ if getattr(config, attr) is None:
34
+ raise ValueError(
35
+ f"Specifying '{attr}' is required for the camera to be used in a robot"
36
+ )
37
+
38
+ @property
39
+ def type(self) -> str:
40
+ return self.get_choice_name(self.__class__)
lerobot/src/lerobot/robots/robot.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import abc
16
+ import builtins
17
+ from pathlib import Path
18
+
19
+ import draccus
20
+
21
+ from lerobot.motors import MotorCalibration
22
+ from lerobot.processor import RobotAction, RobotObservation
23
+ from lerobot.utils.constants import HF_LEROBOT_CALIBRATION, ROBOTS
24
+
25
+ from .config import RobotConfig
26
+
27
+
28
+ # TODO(aliberts): action/obs typing such as Generic[ObsType, ActType] similar to gym.Env ?
29
+ # https://github.com/Farama-Foundation/Gymnasium/blob/3287c869f9a48d99454306b0d4b4ec537f0f35e3/gymnasium/core.py#L23
30
+ class Robot(abc.ABC):
31
+ """
32
+ The base abstract class for all LeRobot-compatible robots.
33
+
34
+ This class provides a standardized interface for interacting with physical robots.
35
+ Subclasses must implement all abstract methods and properties to be usable.
36
+
37
+ Attributes:
38
+ config_class (RobotConfig): The expected configuration class for this robot.
39
+ name (str): The unique robot name used to identify this robot type.
40
+ """
41
+
42
+ # Set these in ALL subclasses
43
+ config_class: builtins.type[RobotConfig]
44
+ name: str
45
+
46
+ def __init__(self, config: RobotConfig):
47
+ self.robot_type = self.name
48
+ self.id = config.id
49
+ self.calibration_dir = (
50
+ config.calibration_dir if config.calibration_dir else HF_LEROBOT_CALIBRATION / ROBOTS / self.name
51
+ )
52
+ self.calibration_dir.mkdir(parents=True, exist_ok=True)
53
+ self.calibration_fpath = self.calibration_dir / f"{self.id}.json"
54
+ self.calibration: dict[str, MotorCalibration] = {}
55
+ if self.calibration_fpath.is_file():
56
+ self._load_calibration()
57
+
58
+ def __str__(self) -> str:
59
+ return f"{self.id} {self.__class__.__name__}"
60
+
61
+ # TODO(aliberts): create a proper Feature class for this that links with datasets
62
+ @property
63
+ @abc.abstractmethod
64
+ def observation_features(self) -> dict:
65
+ """
66
+ A dictionary describing the structure and types of the observations produced by the robot.
67
+ Its structure (keys) should match the structure of what is returned by :pymeth:`get_observation`.
68
+ Values for the dict should either be:
69
+ - The type of the value if it's a simple value, e.g. `float` for single proprioceptive value (a joint's position/velocity)
70
+ - A tuple representing the shape if it's an array-type value, e.g. `(height, width, channel)` for images
71
+
72
+ Note: this property should be able to be called regardless of whether the robot is connected or not.
73
+ """
74
+ pass
75
+
76
+ @property
77
+ @abc.abstractmethod
78
+ def action_features(self) -> dict:
79
+ """
80
+ A dictionary describing the structure and types of the actions expected by the robot. Its structure
81
+ (keys) should match the structure of what is passed to :pymeth:`send_action`. Values for the dict
82
+ should be the type of the value if it's a simple value, e.g. `float` for single proprioceptive value
83
+ (a joint's goal position/velocity)
84
+
85
+ Note: this property should be able to be called regardless of whether the robot is connected or not.
86
+ """
87
+ pass
88
+
89
+ @property
90
+ @abc.abstractmethod
91
+ def is_connected(self) -> bool:
92
+ """
93
+ Whether the robot is currently connected or not. If `False`, calling :pymeth:`get_observation` or
94
+ :pymeth:`send_action` should raise an error.
95
+ """
96
+ pass
97
+
98
+ @abc.abstractmethod
99
+ def connect(self, calibrate: bool = True) -> None:
100
+ """
101
+ Establish communication with the robot.
102
+
103
+ Args:
104
+ calibrate (bool): If True, automatically calibrate the robot after connecting if it's not
105
+ calibrated or needs calibration (this is hardware-dependant).
106
+ """
107
+ pass
108
+
109
+ @property
110
+ @abc.abstractmethod
111
+ def is_calibrated(self) -> bool:
112
+ """Whether the robot is currently calibrated or not. Should be always `True` if not applicable"""
113
+ pass
114
+
115
+ @abc.abstractmethod
116
+ def calibrate(self) -> None:
117
+ """
118
+ Calibrate the robot if applicable. If not, this should be a no-op.
119
+
120
+ This method should collect any necessary data (e.g., motor offsets) and update the
121
+ :pyattr:`calibration` dictionary accordingly.
122
+ """
123
+ pass
124
+
125
+ def _load_calibration(self, fpath: Path | None = None) -> None:
126
+ """
127
+ Helper to load calibration data from the specified file.
128
+
129
+ Args:
130
+ fpath (Path | None): Optional path to the calibration file. Defaults to `self.calibration_fpath`.
131
+ """
132
+ fpath = self.calibration_fpath if fpath is None else fpath
133
+ with open(fpath) as f, draccus.config_type("json"):
134
+ self.calibration = draccus.load(dict[str, MotorCalibration], f)
135
+
136
+ def _save_calibration(self, fpath: Path | None = None) -> None:
137
+ """
138
+ Helper to save calibration data to the specified file.
139
+
140
+ Args:
141
+ fpath (Path | None): Optional path to save the calibration file. Defaults to `self.calibration_fpath`.
142
+ """
143
+ fpath = self.calibration_fpath if fpath is None else fpath
144
+ with open(fpath, "w") as f, draccus.config_type("json"):
145
+ draccus.dump(self.calibration, f, indent=4)
146
+
147
+ @abc.abstractmethod
148
+ def configure(self) -> None:
149
+ """
150
+ Apply any one-time or runtime configuration to the robot.
151
+ This may include setting motor parameters, control modes, or initial state.
152
+ """
153
+ pass
154
+
155
+ @abc.abstractmethod
156
+ def get_observation(self) -> RobotObservation:
157
+ """
158
+ Retrieve the current observation from the robot.
159
+
160
+ Returns:
161
+ RobotObservation: A flat dictionary representing the robot's current sensory state. Its structure
162
+ should match :pymeth:`observation_features`.
163
+ """
164
+
165
+ pass
166
+
167
+ @abc.abstractmethod
168
+ def send_action(self, action: RobotAction) -> RobotAction:
169
+ """
170
+ Send an action command to the robot.
171
+
172
+ Args:
173
+ action (RobotAction): Dictionary representing the desired action. Its structure should match
174
+ :pymeth:`action_features`.
175
+
176
+ Returns:
177
+ RobotAction: The action actually sent to the motors potentially clipped or modified, e.g. by
178
+ safety limits on velocity.
179
+ """
180
+ pass
181
+
182
+ @abc.abstractmethod
183
+ def disconnect(self) -> None:
184
+ """Disconnect from the robot and perform any necessary cleanup."""
185
+ pass
lerobot/src/lerobot/robots/utils.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ from pprint import pformat
17
+ from typing import cast
18
+
19
+ from lerobot.utils.import_utils import make_device_from_device_class
20
+
21
+ from .config import RobotConfig
22
+ from .robot import Robot
23
+
24
+
25
+ def make_robot_from_config(config: RobotConfig) -> Robot:
26
+ # TODO(Steven): Consider just using the make_device_from_device_class for all types
27
+ if config.type == "koch_follower":
28
+ from .koch_follower import KochFollower
29
+
30
+ return KochFollower(config)
31
+ elif config.type == "omx_follower":
32
+ from .omx_follower import OmxFollower
33
+
34
+ return OmxFollower(config)
35
+ elif config.type == "so100_follower":
36
+ from .so_follower import SO100Follower
37
+
38
+ return SO100Follower(config)
39
+ elif config.type == "so101_follower":
40
+ from .so_follower import SO101Follower
41
+
42
+ return SO101Follower(config)
43
+ elif config.type == "lekiwi":
44
+ from .lekiwi import LeKiwi
45
+
46
+ return LeKiwi(config)
47
+ elif config.type == "hope_jr_hand":
48
+ from .hope_jr import HopeJrHand
49
+
50
+ return HopeJrHand(config)
51
+ elif config.type == "hope_jr_arm":
52
+ from .hope_jr import HopeJrArm
53
+
54
+ return HopeJrArm(config)
55
+ elif config.type == "bi_so_follower":
56
+ from .bi_so_follower import BiSOFollower
57
+
58
+ return BiSOFollower(config)
59
+ elif config.type == "reachy2":
60
+ from .reachy2 import Reachy2Robot
61
+
62
+ return Reachy2Robot(config)
63
+ elif config.type == "mock_robot":
64
+ from tests.mocks.mock_robot import MockRobot
65
+
66
+ return MockRobot(config)
67
+ else:
68
+ try:
69
+ return cast(Robot, make_device_from_device_class(config))
70
+ except Exception as e:
71
+ raise ValueError(f"Error creating robot with config {config}: {e}") from e
72
+
73
+
74
+ # TODO(pepijn): Move to pipeline step to make sure we don't have to do this in the robot code and send action to robot is clean for use in dataset
75
+ def ensure_safe_goal_position(
76
+ goal_present_pos: dict[str, tuple[float, float]], max_relative_target: float | dict[str, float]
77
+ ) -> dict[str, float]:
78
+ """Caps relative action target magnitude for safety."""
79
+
80
+ if isinstance(max_relative_target, float):
81
+ diff_cap = dict.fromkeys(goal_present_pos, max_relative_target)
82
+ elif isinstance(max_relative_target, dict):
83
+ if not set(goal_present_pos) == set(max_relative_target):
84
+ raise ValueError("max_relative_target keys must match those of goal_present_pos.")
85
+ diff_cap = max_relative_target
86
+ else:
87
+ raise TypeError(max_relative_target)
88
+
89
+ warnings_dict = {}
90
+ safe_goal_positions = {}
91
+ for key, (goal_pos, present_pos) in goal_present_pos.items():
92
+ diff = goal_pos - present_pos
93
+ max_diff = diff_cap[key]
94
+ safe_diff = min(diff, max_diff)
95
+ safe_diff = max(safe_diff, -max_diff)
96
+ safe_goal_pos = present_pos + safe_diff
97
+ safe_goal_positions[key] = safe_goal_pos
98
+ if abs(safe_goal_pos - goal_pos) > 1e-4:
99
+ warnings_dict[key] = {
100
+ "original goal_pos": goal_pos,
101
+ "safe goal_pos": safe_goal_pos,
102
+ }
103
+
104
+ if warnings_dict:
105
+ logging.warning(
106
+ "Relative goal position magnitude had to be clamped to be safe.\n"
107
+ f"{pformat(warnings_dict, indent=4)}"
108
+ )
109
+
110
+ return safe_goal_positions
lerobot/src/lerobot/scripts/lerobot_calibrate.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Helper to recalibrate your device (robot or teleoperator).
17
+
18
+ Example:
19
+
20
+ ```shell
21
+ lerobot-calibrate \
22
+ --teleop.type=so100_leader \
23
+ --teleop.port=/dev/tty.usbmodem58760431551 \
24
+ --teleop.id=blue
25
+ ```
26
+ """
27
+
28
+ import logging
29
+ from dataclasses import asdict, dataclass
30
+ from pprint import pformat
31
+
32
+ import draccus
33
+
34
+ from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401
35
+ from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401
36
+ from lerobot.robots import ( # noqa: F401
37
+ Robot,
38
+ RobotConfig,
39
+ bi_so_follower,
40
+ hope_jr,
41
+ koch_follower,
42
+ lekiwi,
43
+ make_robot_from_config,
44
+ omx_follower,
45
+ so_follower,
46
+ )
47
+ from lerobot.teleoperators import ( # noqa: F401
48
+ Teleoperator,
49
+ TeleoperatorConfig,
50
+ bi_so_leader,
51
+ homunculus,
52
+ koch_leader,
53
+ make_teleoperator_from_config,
54
+ omx_leader,
55
+ so_leader,
56
+ )
57
+ from lerobot.utils.import_utils import register_third_party_plugins
58
+ from lerobot.utils.utils import init_logging
59
+
60
+
61
+ @dataclass
62
+ class CalibrateConfig:
63
+ teleop: TeleoperatorConfig | None = None
64
+ robot: RobotConfig | None = None
65
+
66
+ def __post_init__(self):
67
+ if bool(self.teleop) == bool(self.robot):
68
+ raise ValueError("Choose either a teleop or a robot.")
69
+
70
+ self.device = self.robot if self.robot else self.teleop
71
+
72
+
73
+ @draccus.wrap()
74
+ def calibrate(cfg: CalibrateConfig):
75
+ init_logging()
76
+ logging.info(pformat(asdict(cfg)))
77
+
78
+ if isinstance(cfg.device, RobotConfig):
79
+ device = make_robot_from_config(cfg.device)
80
+ elif isinstance(cfg.device, TeleoperatorConfig):
81
+ device = make_teleoperator_from_config(cfg.device)
82
+
83
+ device.connect(calibrate=False)
84
+ device.calibrate()
85
+ device.disconnect()
86
+
87
+
88
+ def main():
89
+ register_third_party_plugins()
90
+ calibrate()
91
+
92
+
93
+ if __name__ == "__main__":
94
+ main()
lerobot/src/lerobot/scripts/lerobot_dataset_viz.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Visualize data of **all** frames of any episode of a dataset of type LeRobotDataset.
17
+
18
+ Note: The last frame of the episode doesn't always correspond to a final state.
19
+ That's because our datasets are composed of transition from state to state up to
20
+ the antepenultimate state associated to the ultimate action to arrive in the final state.
21
+ However, there might not be a transition from a final state to another state.
22
+
23
+ Note: This script aims to visualize the data used to train the neural networks.
24
+ ~What you see is what you get~. When visualizing image modality, it is often expected to observe
25
+ lossy compression artifacts since these images have been decoded from compressed mp4 videos to
26
+ save disk space. The compression factor applied has been tuned to not affect success rate.
27
+
28
+ Examples:
29
+
30
+ - Visualize data stored on a local machine:
31
+ ```
32
+ local$ lerobot-dataset-viz \
33
+ --repo-id lerobot/pusht \
34
+ --episode-index 0
35
+ ```
36
+
37
+ - Visualize data stored on a distant machine with a local viewer:
38
+ ```
39
+ distant$ lerobot-dataset-viz \
40
+ --repo-id lerobot/pusht \
41
+ --episode-index 0 \
42
+ --save 1 \
43
+ --output-dir path/to/directory
44
+
45
+ local$ scp distant:path/to/directory/lerobot_pusht_episode_0.rrd .
46
+ local$ rerun lerobot_pusht_episode_0.rrd
47
+ ```
48
+
49
+ - Visualize data stored on a distant machine through streaming:
50
+ (You need to forward the websocket port to the distant machine, with
51
+ `ssh -L 9087:localhost:9087 username@remote-host`)
52
+ ```
53
+ distant$ lerobot-dataset-viz \
54
+ --repo-id lerobot/pusht \
55
+ --episode-index 0 \
56
+ --mode distant \
57
+ --ws-port 9087
58
+
59
+ local$ rerun ws://localhost:9087
60
+ ```
61
+
62
+ """
63
+
64
+ import argparse
65
+ import gc
66
+ import logging
67
+ import time
68
+ from pathlib import Path
69
+
70
+ import numpy as np
71
+ import rerun as rr
72
+ import torch
73
+ import torch.utils.data
74
+ import tqdm
75
+
76
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
77
+ from lerobot.utils.constants import ACTION, DONE, OBS_STATE, REWARD
78
+
79
+
80
+ def to_hwc_uint8_numpy(chw_float32_torch: torch.Tensor) -> np.ndarray:
81
+ assert chw_float32_torch.dtype == torch.float32
82
+ assert chw_float32_torch.ndim == 3
83
+ c, h, w = chw_float32_torch.shape
84
+ assert c < h and c < w, f"expect channel first images, but instead {chw_float32_torch.shape}"
85
+ hwc_uint8_numpy = (chw_float32_torch * 255).type(torch.uint8).permute(1, 2, 0).numpy()
86
+ return hwc_uint8_numpy
87
+
88
+
89
+ def visualize_dataset(
90
+ dataset: LeRobotDataset,
91
+ episode_index: int,
92
+ batch_size: int = 32,
93
+ num_workers: int = 0,
94
+ mode: str = "local",
95
+ web_port: int = 9090,
96
+ ws_port: int = 9087,
97
+ save: bool = False,
98
+ output_dir: Path | None = None,
99
+ display_compressed_images: bool = False,
100
+ ) -> Path | None:
101
+ if save:
102
+ assert output_dir is not None, (
103
+ "Set an output directory where to write .rrd files with `--output-dir path/to/directory`."
104
+ )
105
+
106
+ repo_id = dataset.repo_id
107
+
108
+ logging.info("Loading dataloader")
109
+ dataloader = torch.utils.data.DataLoader(
110
+ dataset,
111
+ num_workers=num_workers,
112
+ batch_size=batch_size,
113
+ )
114
+
115
+ logging.info("Starting Rerun")
116
+
117
+ if mode not in ["local", "distant"]:
118
+ raise ValueError(mode)
119
+
120
+ spawn_local_viewer = mode == "local" and not save
121
+ rr.init(f"{repo_id}/episode_{episode_index}", spawn=spawn_local_viewer)
122
+
123
+ # Manually call python garbage collector after `rr.init` to avoid hanging in a blocking flush
124
+ # when iterating on a dataloader with `num_workers` > 0
125
+ # TODO(rcadene): remove `gc.collect` when rerun version 0.16 is out, which includes a fix
126
+ gc.collect()
127
+
128
+ if mode == "distant":
129
+ rr.serve_web_viewer(open_browser=False, web_port=web_port)
130
+
131
+ logging.info("Logging to Rerun")
132
+
133
+ for batch in tqdm.tqdm(dataloader, total=len(dataloader)):
134
+ # iterate over the batch
135
+ for i in range(len(batch["index"])):
136
+ rr.set_time("frame_index", sequence=batch["frame_index"][i].item())
137
+ rr.set_time("timestamp", timestamp=batch["timestamp"][i].item())
138
+
139
+ # display each camera image
140
+ for key in dataset.meta.camera_keys:
141
+ img = to_hwc_uint8_numpy(batch[key][i])
142
+ img_entity = rr.Image(img).compress() if display_compressed_images else rr.Image(img)
143
+ rr.log(key, entity=img_entity)
144
+
145
+ # display each dimension of action space (e.g. actuators command)
146
+ if ACTION in batch:
147
+ for dim_idx, val in enumerate(batch[ACTION][i]):
148
+ rr.log(f"{ACTION}/{dim_idx}", rr.Scalars(val.item()))
149
+
150
+ # display each dimension of observed state space (e.g. agent position in joint space)
151
+ if OBS_STATE in batch:
152
+ for dim_idx, val in enumerate(batch[OBS_STATE][i]):
153
+ rr.log(f"state/{dim_idx}", rr.Scalars(val.item()))
154
+
155
+ if DONE in batch:
156
+ rr.log(DONE, rr.Scalars(batch[DONE][i].item()))
157
+
158
+ if REWARD in batch:
159
+ rr.log(REWARD, rr.Scalars(batch[REWARD][i].item()))
160
+
161
+ if "next.success" in batch:
162
+ rr.log("next.success", rr.Scalars(batch["next.success"][i].item()))
163
+
164
+ if mode == "local" and save:
165
+ # save .rrd locally
166
+ output_dir = Path(output_dir)
167
+ output_dir.mkdir(parents=True, exist_ok=True)
168
+ repo_id_str = repo_id.replace("/", "_")
169
+ rrd_path = output_dir / f"{repo_id_str}_episode_{episode_index}.rrd"
170
+ rr.save(rrd_path)
171
+ return rrd_path
172
+
173
+ elif mode == "distant":
174
+ # stop the process from exiting since it is serving the websocket connection
175
+ try:
176
+ while True:
177
+ time.sleep(1)
178
+ except KeyboardInterrupt:
179
+ print("Ctrl-C received. Exiting.")
180
+
181
+
182
+ def main():
183
+ parser = argparse.ArgumentParser()
184
+
185
+ parser.add_argument(
186
+ "--repo-id",
187
+ type=str,
188
+ required=True,
189
+ help="Name of hugging face repository containing a LeRobotDataset dataset (e.g. `lerobot/pusht`).",
190
+ )
191
+ parser.add_argument(
192
+ "--episode-index",
193
+ type=int,
194
+ required=True,
195
+ help="Episode to visualize.",
196
+ )
197
+ parser.add_argument(
198
+ "--root",
199
+ type=Path,
200
+ default=None,
201
+ help="Root directory for the dataset stored locally (e.g. `--root data`). By default, the dataset will be loaded from hugging face cache folder, or downloaded from the hub if available.",
202
+ )
203
+ parser.add_argument(
204
+ "--output-dir",
205
+ type=Path,
206
+ default=None,
207
+ help="Directory path to write a .rrd file when `--save 1` is set.",
208
+ )
209
+ parser.add_argument(
210
+ "--batch-size",
211
+ type=int,
212
+ default=32,
213
+ help="Batch size loaded by DataLoader.",
214
+ )
215
+ parser.add_argument(
216
+ "--num-workers",
217
+ type=int,
218
+ default=4,
219
+ help="Number of processes of Dataloader for loading the data.",
220
+ )
221
+ parser.add_argument(
222
+ "--mode",
223
+ type=str,
224
+ default="local",
225
+ help=(
226
+ "Mode of viewing between 'local' or 'distant'. "
227
+ "'local' requires data to be on a local machine. It spawns a viewer to visualize the data locally. "
228
+ "'distant' creates a server on the distant machine where the data is stored. "
229
+ "Visualize the data by connecting to the server with `rerun ws://localhost:PORT` on the local machine."
230
+ ),
231
+ )
232
+ parser.add_argument(
233
+ "--web-port",
234
+ type=int,
235
+ default=9090,
236
+ help="Web port for rerun.io when `--mode distant` is set.",
237
+ )
238
+ parser.add_argument(
239
+ "--ws-port",
240
+ type=int,
241
+ default=9087,
242
+ help="Web socket port for rerun.io when `--mode distant` is set.",
243
+ )
244
+ parser.add_argument(
245
+ "--save",
246
+ type=int,
247
+ default=0,
248
+ help=(
249
+ "Save a .rrd file in the directory provided by `--output-dir`. "
250
+ "It also deactivates the spawning of a viewer. "
251
+ "Visualize the data by running `rerun path/to/file.rrd` on your local machine."
252
+ ),
253
+ )
254
+
255
+ parser.add_argument(
256
+ "--tolerance-s",
257
+ type=float,
258
+ default=1e-4,
259
+ help=(
260
+ "Tolerance in seconds used to ensure data timestamps respect the dataset fps value"
261
+ "This is argument passed to the constructor of LeRobotDataset and maps to its tolerance_s constructor argument"
262
+ "If not given, defaults to 1e-4."
263
+ ),
264
+ )
265
+
266
+ parser.add_argument(
267
+ "--display-compressed-images",
268
+ type=bool,
269
+ required=True,
270
+ default=False,
271
+ help="If set, display compressed images in Rerun instead of uncompressed ones.",
272
+ )
273
+
274
+ args = parser.parse_args()
275
+ kwargs = vars(args)
276
+ repo_id = kwargs.pop("repo_id")
277
+ root = kwargs.pop("root")
278
+ tolerance_s = kwargs.pop("tolerance_s")
279
+
280
+ logging.info("Loading dataset")
281
+ dataset = LeRobotDataset(repo_id, episodes=[args.episode_index], root=root, tolerance_s=tolerance_s)
282
+
283
+ visualize_dataset(dataset, **vars(args))
284
+
285
+
286
+ if __name__ == "__main__":
287
+ main()
lerobot/src/lerobot/scripts/lerobot_edit_dataset.py ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ Edit LeRobot datasets using various transformation tools.
19
+
20
+ This script allows you to delete episodes, split datasets, merge datasets,
21
+ remove features, and convert image datasets to video format.
22
+ When new_repo_id is specified, creates a new dataset.
23
+
24
+ Usage Examples:
25
+
26
+ Delete episodes 0, 2, and 5 from a dataset:
27
+ python -m lerobot.scripts.lerobot_edit_dataset \
28
+ --repo_id lerobot/pusht \
29
+ --operation.type delete_episodes \
30
+ --operation.episode_indices "[0, 2, 5]"
31
+
32
+ Delete episodes and save to a new dataset:
33
+ python -m lerobot.scripts.lerobot_edit_dataset \
34
+ --repo_id lerobot/pusht \
35
+ --new_repo_id lerobot/pusht_filtered \
36
+ --operation.type delete_episodes \
37
+ --operation.episode_indices "[0, 2, 5]"
38
+
39
+ Split dataset by fractions:
40
+ python -m lerobot.scripts.lerobot_edit_dataset \
41
+ --repo_id lerobot/pusht \
42
+ --operation.type split \
43
+ --operation.splits '{"train": 0.8, "val": 0.2}'
44
+
45
+ Split dataset by episode indices:
46
+ python -m lerobot.scripts.lerobot_edit_dataset \
47
+ --repo_id lerobot/pusht \
48
+ --operation.type split \
49
+ --operation.splits '{"train": [0, 1, 2, 3], "val": [4, 5]}'
50
+
51
+ Split into more than two splits:
52
+ python -m lerobot.scripts.lerobot_edit_dataset \
53
+ --repo_id lerobot/pusht \
54
+ --operation.type split \
55
+ --operation.splits '{"train": 0.6, "val": 0.2, "test": 0.2}'
56
+
57
+ Merge multiple datasets:
58
+ python -m lerobot.scripts.lerobot_edit_dataset \
59
+ --repo_id lerobot/pusht_merged \
60
+ --operation.type merge \
61
+ --operation.repo_ids "['lerobot/pusht_train', 'lerobot/pusht_val']"
62
+
63
+ Remove camera feature:
64
+ python -m lerobot.scripts.lerobot_edit_dataset \
65
+ --repo_id lerobot/pusht \
66
+ --operation.type remove_feature \
67
+ --operation.feature_names "['observation.images.top']"
68
+
69
+ Convert image dataset to video format (saves locally):
70
+ python -m lerobot.scripts.lerobot_edit_dataset \
71
+ --repo_id lerobot/pusht_image \
72
+ --operation.type convert_to_video \
73
+ --operation.output_dir /path/to/output/pusht_video
74
+
75
+ Convert image dataset and save with new repo_id:
76
+ python -m lerobot.scripts.lerobot_edit_dataset \
77
+ --repo_id lerobot/pusht_image \
78
+ --new_repo_id lerobot/pusht_video \
79
+ --operation.type convert_to_video
80
+
81
+ Convert and push to hub:
82
+ python -m lerobot.scripts.lerobot_edit_dataset \
83
+ --repo_id lerobot/pusht_image \
84
+ --new_repo_id lerobot/pusht_video \
85
+ --operation.type convert_to_video \
86
+ --push_to_hub true
87
+
88
+ Using JSON config file:
89
+ python -m lerobot.scripts.lerobot_edit_dataset \
90
+ --config_path path/to/edit_config.json
91
+ """
92
+
93
+ import logging
94
+ import shutil
95
+ from concurrent.futures import ThreadPoolExecutor, as_completed
96
+ from dataclasses import dataclass
97
+ from pathlib import Path
98
+
99
+ import pandas as pd
100
+ from tqdm import tqdm
101
+
102
+ from lerobot.configs import parser
103
+ from lerobot.datasets.dataset_tools import (
104
+ delete_episodes,
105
+ merge_datasets,
106
+ remove_feature,
107
+ split_dataset,
108
+ )
109
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
110
+ from lerobot.datasets.utils import write_stats, write_tasks
111
+ from lerobot.datasets.video_utils import encode_video_frames, get_video_info
112
+ from lerobot.utils.constants import HF_LEROBOT_HOME, OBS_IMAGE
113
+ from lerobot.utils.utils import init_logging
114
+
115
+
116
+ @dataclass
117
+ class DeleteEpisodesConfig:
118
+ type: str = "delete_episodes"
119
+ episode_indices: list[int] | None = None
120
+
121
+
122
+ @dataclass
123
+ class SplitConfig:
124
+ type: str = "split"
125
+ splits: dict[str, float | list[int]] | None = None
126
+
127
+
128
+ @dataclass
129
+ class MergeConfig:
130
+ type: str = "merge"
131
+ repo_ids: list[str] | None = None
132
+
133
+
134
+ @dataclass
135
+ class RemoveFeatureConfig:
136
+ type: str = "remove_feature"
137
+ feature_names: list[str] | None = None
138
+
139
+
140
+ @dataclass
141
+ class ConvertToVideoConfig:
142
+ type: str = "convert_to_video"
143
+ output_dir: str | None = None
144
+ vcodec: str = "libsvtav1"
145
+ pix_fmt: str = "yuv420p"
146
+ g: int = 2
147
+ crf: int = 30
148
+ fast_decode: int = 0
149
+ episode_indices: list[int] | None = None
150
+ num_workers: int = 4
151
+
152
+
153
+ @dataclass
154
+ class EditDatasetConfig:
155
+ repo_id: str
156
+ operation: DeleteEpisodesConfig | SplitConfig | MergeConfig | RemoveFeatureConfig | ConvertToVideoConfig
157
+ root: str | None = None
158
+ new_repo_id: str | None = None
159
+ push_to_hub: bool = False
160
+
161
+
162
+ def get_output_path(repo_id: str, new_repo_id: str | None, root: Path | None) -> tuple[str, Path]:
163
+ if new_repo_id:
164
+ output_repo_id = new_repo_id
165
+ output_dir = root / new_repo_id if root else HF_LEROBOT_HOME / new_repo_id
166
+ else:
167
+ output_repo_id = repo_id
168
+ dataset_path = root / repo_id if root else HF_LEROBOT_HOME / repo_id
169
+ old_path = Path(str(dataset_path) + "_old")
170
+
171
+ if dataset_path.exists():
172
+ if old_path.exists():
173
+ shutil.rmtree(old_path)
174
+ shutil.move(str(dataset_path), str(old_path))
175
+
176
+ output_dir = dataset_path
177
+
178
+ return output_repo_id, output_dir
179
+
180
+
181
+ def handle_delete_episodes(cfg: EditDatasetConfig) -> None:
182
+ if not isinstance(cfg.operation, DeleteEpisodesConfig):
183
+ raise ValueError("Operation config must be DeleteEpisodesConfig")
184
+
185
+ if not cfg.operation.episode_indices:
186
+ raise ValueError("episode_indices must be specified for delete_episodes operation")
187
+
188
+ dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
189
+ output_repo_id, output_dir = get_output_path(
190
+ cfg.repo_id, cfg.new_repo_id, Path(cfg.root) if cfg.root else None
191
+ )
192
+
193
+ if cfg.new_repo_id is None:
194
+ dataset.root = Path(str(dataset.root) + "_old")
195
+
196
+ logging.info(f"Deleting episodes {cfg.operation.episode_indices} from {cfg.repo_id}")
197
+ new_dataset = delete_episodes(
198
+ dataset,
199
+ episode_indices=cfg.operation.episode_indices,
200
+ output_dir=output_dir,
201
+ repo_id=output_repo_id,
202
+ )
203
+
204
+ logging.info(f"Dataset saved to {output_dir}")
205
+ logging.info(f"Episodes: {new_dataset.meta.total_episodes}, Frames: {new_dataset.meta.total_frames}")
206
+
207
+ if cfg.push_to_hub:
208
+ logging.info(f"Pushing to hub as {output_repo_id}")
209
+ LeRobotDataset(output_repo_id, root=output_dir).push_to_hub()
210
+
211
+
212
+ def handle_split(cfg: EditDatasetConfig) -> None:
213
+ if not isinstance(cfg.operation, SplitConfig):
214
+ raise ValueError("Operation config must be SplitConfig")
215
+
216
+ if not cfg.operation.splits:
217
+ raise ValueError(
218
+ "splits dict must be specified with split names as keys and fractions/episode lists as values"
219
+ )
220
+
221
+ dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
222
+
223
+ logging.info(f"Splitting dataset {cfg.repo_id} with splits: {cfg.operation.splits}")
224
+ split_datasets = split_dataset(dataset, splits=cfg.operation.splits)
225
+
226
+ for split_name, split_ds in split_datasets.items():
227
+ split_repo_id = f"{cfg.repo_id}_{split_name}"
228
+ logging.info(
229
+ f"{split_name}: {split_ds.meta.total_episodes} episodes, {split_ds.meta.total_frames} frames"
230
+ )
231
+
232
+ if cfg.push_to_hub:
233
+ logging.info(f"Pushing {split_name} split to hub as {split_repo_id}")
234
+ LeRobotDataset(split_ds.repo_id, root=split_ds.root).push_to_hub()
235
+
236
+
237
+ def handle_merge(cfg: EditDatasetConfig) -> None:
238
+ if not isinstance(cfg.operation, MergeConfig):
239
+ raise ValueError("Operation config must be MergeConfig")
240
+
241
+ if not cfg.operation.repo_ids:
242
+ raise ValueError("repo_ids must be specified for merge operation")
243
+
244
+ if not cfg.repo_id:
245
+ raise ValueError("repo_id must be specified as the output repository for merged dataset")
246
+
247
+ logging.info(f"Loading {len(cfg.operation.repo_ids)} datasets to merge")
248
+ datasets = [LeRobotDataset(repo_id, root=cfg.root) for repo_id in cfg.operation.repo_ids]
249
+
250
+ output_dir = Path(cfg.root) / cfg.repo_id if cfg.root else HF_LEROBOT_HOME / cfg.repo_id
251
+
252
+ logging.info(f"Merging datasets into {cfg.repo_id}")
253
+ merged_dataset = merge_datasets(
254
+ datasets,
255
+ output_repo_id=cfg.repo_id,
256
+ output_dir=output_dir,
257
+ )
258
+
259
+ logging.info(f"Merged dataset saved to {output_dir}")
260
+ logging.info(
261
+ f"Episodes: {merged_dataset.meta.total_episodes}, Frames: {merged_dataset.meta.total_frames}"
262
+ )
263
+
264
+ if cfg.push_to_hub:
265
+ logging.info(f"Pushing to hub as {cfg.repo_id}")
266
+ LeRobotDataset(merged_dataset.repo_id, root=output_dir).push_to_hub()
267
+
268
+
269
+ def handle_remove_feature(cfg: EditDatasetConfig) -> None:
270
+ if not isinstance(cfg.operation, RemoveFeatureConfig):
271
+ raise ValueError("Operation config must be RemoveFeatureConfig")
272
+
273
+ if not cfg.operation.feature_names:
274
+ raise ValueError("feature_names must be specified for remove_feature operation")
275
+
276
+ dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
277
+ output_repo_id, output_dir = get_output_path(
278
+ cfg.repo_id, cfg.new_repo_id, Path(cfg.root) if cfg.root else None
279
+ )
280
+
281
+ if cfg.new_repo_id is None:
282
+ dataset.root = Path(str(dataset.root) + "_old")
283
+
284
+ logging.info(f"Removing features {cfg.operation.feature_names} from {cfg.repo_id}")
285
+ new_dataset = remove_feature(
286
+ dataset,
287
+ feature_names=cfg.operation.feature_names,
288
+ output_dir=output_dir,
289
+ repo_id=output_repo_id,
290
+ )
291
+
292
+ logging.info(f"Dataset saved to {output_dir}")
293
+ logging.info(f"Remaining features: {list(new_dataset.meta.features.keys())}")
294
+
295
+ if cfg.push_to_hub:
296
+ logging.info(f"Pushing to hub as {output_repo_id}")
297
+ LeRobotDataset(output_repo_id, root=output_dir).push_to_hub()
298
+
299
+
300
+ def save_episode_images_for_video(
301
+ dataset: LeRobotDataset,
302
+ imgs_dir: Path,
303
+ img_key: str,
304
+ episode_index: int,
305
+ num_workers: int = 4,
306
+ ) -> None:
307
+ """Save images from a specific episode and camera to disk for video encoding.
308
+
309
+ Args:
310
+ dataset: The LeRobot dataset to extract images from
311
+ imgs_dir: Directory to save images to
312
+ img_key: The image key (camera) to extract
313
+ episode_index: Index of the episode to save
314
+ num_workers: Number of threads for parallel image saving
315
+ """
316
+ # Create directory
317
+ imgs_dir.mkdir(parents=True, exist_ok=True)
318
+
319
+ # Get dataset without torch format for PIL image access
320
+ hf_dataset = dataset.hf_dataset.with_format(None)
321
+
322
+ # Select only this camera's images
323
+ imgs_dataset = hf_dataset.select_columns(img_key)
324
+
325
+ # Get episode start and end indices
326
+ from_idx = dataset.meta.episodes["dataset_from_index"][episode_index]
327
+ to_idx = dataset.meta.episodes["dataset_to_index"][episode_index]
328
+
329
+ # Get all items for this episode
330
+ episode_dataset = imgs_dataset.select(range(from_idx, to_idx))
331
+
332
+ # Define function to save a single image
333
+ def save_single_image(i_item_tuple):
334
+ i, item = i_item_tuple
335
+ img = item[img_key]
336
+ # Use frame-XXXXXX.png format to match encode_video_frames expectations
337
+ img.save(str(imgs_dir / f"frame-{i:06d}.png"), quality=100)
338
+ return i
339
+
340
+ # Save images with proper naming convention for encode_video_frames (frame-XXXXXX.png)
341
+ items = list(enumerate(episode_dataset))
342
+
343
+ with ThreadPoolExecutor(max_workers=num_workers) as executor:
344
+ futures = [executor.submit(save_single_image, item) for item in items]
345
+ for future in as_completed(futures):
346
+ future.result() # This will raise any exceptions that occurred
347
+
348
+
349
+ def encode_episode_videos(
350
+ dataset: LeRobotDataset,
351
+ new_meta: LeRobotDatasetMetadata,
352
+ episode_index: int,
353
+ vcodec: str,
354
+ pix_fmt: str,
355
+ g: int,
356
+ crf: int,
357
+ fast_decode: int,
358
+ temp_dir: Path,
359
+ num_image_workers: int = 4,
360
+ ) -> dict[str, dict]:
361
+ """Encode videos for a single episode and return video metadata.
362
+
363
+ Args:
364
+ dataset: Source dataset with images
365
+ new_meta: Metadata object for the new video dataset
366
+ episode_index: Episode index to process
367
+ vcodec: Video codec
368
+ pix_fmt: Pixel format
369
+ g: Group of pictures size
370
+ crf: Constant rate factor
371
+ fast_decode: Fast decode tuning
372
+ temp_dir: Temporary directory for images
373
+ num_image_workers: Number of workers for saving images
374
+
375
+ Returns:
376
+ Dictionary mapping video keys to their metadata (chunk_index, file_index, timestamps)
377
+ """
378
+ hf_dataset = dataset.hf_dataset.with_format(None)
379
+ img_keys = [key for key in hf_dataset.features if key.startswith(OBS_IMAGE)]
380
+
381
+ video_metadata = {}
382
+ fps = int(dataset.fps) # Convert to int for PyAV compatibility
383
+ episode_length = dataset.meta.episodes["length"][episode_index]
384
+ episode_duration = episode_length / dataset.fps # Use original fps for duration calculation
385
+
386
+ for img_key in img_keys:
387
+ # Save images temporarily
388
+ imgs_dir = temp_dir / f"episode_{episode_index:06d}" / img_key
389
+ save_episode_images_for_video(dataset, imgs_dir, img_key, episode_index, num_image_workers)
390
+
391
+ # Determine chunk and file indices
392
+ # For simplicity, we'll put each episode in its own file
393
+ chunk_idx = episode_index // new_meta.chunks_size
394
+ file_idx = episode_index % new_meta.chunks_size
395
+
396
+ # Create video path in the new dataset structure
397
+ video_path = new_meta.root / new_meta.video_path.format(
398
+ video_key=img_key, chunk_index=chunk_idx, file_index=file_idx
399
+ )
400
+ video_path.parent.mkdir(parents=True, exist_ok=True)
401
+
402
+ # Encode video
403
+ encode_video_frames(
404
+ imgs_dir=imgs_dir,
405
+ video_path=video_path,
406
+ fps=fps,
407
+ vcodec=vcodec,
408
+ pix_fmt=pix_fmt,
409
+ g=g,
410
+ crf=crf,
411
+ fast_decode=fast_decode,
412
+ overwrite=True,
413
+ )
414
+
415
+ # Clean up temporary images
416
+ shutil.rmtree(imgs_dir)
417
+
418
+ # Store video metadata
419
+ video_metadata[img_key] = {
420
+ f"videos/{img_key}/chunk_index": chunk_idx,
421
+ f"videos/{img_key}/file_index": file_idx,
422
+ f"videos/{img_key}/from_timestamp": 0.0,
423
+ f"videos/{img_key}/to_timestamp": episode_duration,
424
+ }
425
+
426
+ return video_metadata
427
+
428
+
429
+ def convert_dataset_to_videos(
430
+ dataset: LeRobotDataset,
431
+ output_dir: Path,
432
+ repo_id: str | None = None,
433
+ vcodec: str = "libsvtav1",
434
+ pix_fmt: str = "yuv420p",
435
+ g: int = 2,
436
+ crf: int = 30,
437
+ fast_decode: int = 0,
438
+ episode_indices: list[int] | None = None,
439
+ num_workers: int = 4,
440
+ ) -> LeRobotDataset:
441
+ """Convert image-based dataset to video-based dataset.
442
+
443
+ Creates a new LeRobotDataset with videos instead of images, following the proper
444
+ LeRobot dataset structure with videos stored in chunked MP4 files.
445
+
446
+ Args:
447
+ dataset: The source LeRobot dataset with images
448
+ output_dir: Directory to save the new video dataset
449
+ repo_id: Repository ID for the new dataset (default: original_id + "_video")
450
+ vcodec: Video codec (default: libsvtav1)
451
+ pix_fmt: Pixel format (default: yuv420p)
452
+ g: Group of pictures size (default: 2)
453
+ crf: Constant rate factor (default: 30)
454
+ fast_decode: Fast decode tuning (default: 0)
455
+ episode_indices: List of episode indices to convert (None = all episodes)
456
+ num_workers: Number of threads for parallel processing (default: 4)
457
+
458
+ Returns:
459
+ New LeRobotDataset with videos
460
+ """
461
+ # Check that it's an image dataset
462
+ if len(dataset.meta.video_keys) > 0:
463
+ raise ValueError(
464
+ f"This operation is for image datasets only. Video dataset provided: {dataset.repo_id}"
465
+ )
466
+
467
+ # Get all image keys
468
+ hf_dataset = dataset.hf_dataset.with_format(None)
469
+ img_keys = [key for key in hf_dataset.features if key.startswith(OBS_IMAGE)]
470
+
471
+ if len(img_keys) == 0:
472
+ raise ValueError(f"No image keys found in dataset {dataset.repo_id}")
473
+
474
+ # Determine which episodes to process
475
+ if episode_indices is None:
476
+ episode_indices = list(range(dataset.meta.total_episodes))
477
+
478
+ if repo_id is None:
479
+ repo_id = f"{dataset.repo_id}_video"
480
+
481
+ logging.info(
482
+ f"Converting {len(episode_indices)} episodes with {len(img_keys)} cameras from {dataset.repo_id}"
483
+ )
484
+ logging.info(f"Video codec: {vcodec}, pixel format: {pix_fmt}, GOP: {g}, CRF: {crf}")
485
+
486
+ # Create new features dict, converting image features to video features
487
+ new_features = {}
488
+ for key, value in dataset.meta.features.items():
489
+ if key not in img_keys:
490
+ new_features[key] = value
491
+ else:
492
+ # Convert image key to video format
493
+ new_features[key] = value.copy()
494
+ new_features[key]["dtype"] = "video" # Change dtype from "image" to "video"
495
+ # Video info will be updated after episodes are encoded
496
+
497
+ # Create new metadata for video dataset
498
+ new_meta = LeRobotDatasetMetadata.create(
499
+ repo_id=repo_id,
500
+ fps=dataset.meta.fps,
501
+ features=new_features,
502
+ robot_type=dataset.meta.robot_type,
503
+ root=output_dir,
504
+ use_videos=True,
505
+ chunks_size=dataset.meta.chunks_size,
506
+ data_files_size_in_mb=dataset.meta.data_files_size_in_mb,
507
+ video_files_size_in_mb=dataset.meta.video_files_size_in_mb,
508
+ )
509
+
510
+ # Create temporary directory for image extraction
511
+ temp_dir = output_dir / "temp_images"
512
+ temp_dir.mkdir(parents=True, exist_ok=True)
513
+
514
+ # Process each episode
515
+ all_episode_metadata = []
516
+
517
+ try:
518
+ for ep_idx in tqdm(episode_indices, desc="Converting episodes to videos"):
519
+ # Get episode metadata from source
520
+ src_episode = dataset.meta.episodes[ep_idx]
521
+
522
+ # Encode videos for this episode
523
+ video_metadata = encode_episode_videos(
524
+ dataset=dataset,
525
+ new_meta=new_meta,
526
+ episode_index=ep_idx,
527
+ vcodec=vcodec,
528
+ pix_fmt=pix_fmt,
529
+ g=g,
530
+ crf=crf,
531
+ fast_decode=fast_decode,
532
+ temp_dir=temp_dir,
533
+ num_image_workers=num_workers,
534
+ )
535
+
536
+ # Build episode metadata
537
+ episode_meta = {
538
+ "episode_index": ep_idx,
539
+ "length": src_episode["length"],
540
+ "dataset_from_index": ep_idx * src_episode["length"],
541
+ "dataset_to_index": (ep_idx + 1) * src_episode["length"],
542
+ }
543
+
544
+ # Add video metadata
545
+ for img_key in img_keys:
546
+ episode_meta.update(video_metadata[img_key])
547
+
548
+ # Add data chunk/file info (using same structure as source)
549
+ if "data/chunk_index" in src_episode:
550
+ episode_meta["data/chunk_index"] = src_episode["data/chunk_index"]
551
+ episode_meta["data/file_index"] = src_episode["data/file_index"]
552
+
553
+ all_episode_metadata.append(episode_meta)
554
+
555
+ # Copy and transform data files (removing image columns)
556
+ _copy_data_without_images(dataset, new_meta, episode_indices, img_keys)
557
+
558
+ # Save episode metadata
559
+ episodes_df = pd.DataFrame(all_episode_metadata)
560
+ episodes_path = new_meta.root / "meta" / "episodes" / "chunk-000" / "file-000.parquet"
561
+ episodes_path.parent.mkdir(parents=True, exist_ok=True)
562
+ episodes_df.to_parquet(episodes_path, index=False)
563
+
564
+ # Update metadata info
565
+ new_meta.info["total_episodes"] = len(episode_indices)
566
+ new_meta.info["total_frames"] = sum(ep["length"] for ep in all_episode_metadata)
567
+ new_meta.info["total_tasks"] = dataset.meta.total_tasks
568
+ new_meta.info["splits"] = {"train": f"0:{len(episode_indices)}"}
569
+
570
+ # Update video info for all image keys (now videos)
571
+ # We need to manually set video info since update_video_info() checks video_keys first
572
+ for img_key in img_keys:
573
+ if not new_meta.features[img_key].get("info", None):
574
+ video_path = new_meta.root / new_meta.video_path.format(
575
+ video_key=img_key, chunk_index=0, file_index=0
576
+ )
577
+ new_meta.info["features"][img_key]["info"] = get_video_info(video_path)
578
+
579
+ from lerobot.datasets.utils import write_info
580
+
581
+ write_info(new_meta.info, new_meta.root)
582
+
583
+ # Copy stats and tasks
584
+ if dataset.meta.stats is not None:
585
+ # Remove image stats
586
+ new_stats = {k: v for k, v in dataset.meta.stats.items() if k not in img_keys}
587
+ write_stats(new_stats, new_meta.root)
588
+
589
+ if dataset.meta.tasks is not None:
590
+ write_tasks(dataset.meta.tasks, new_meta.root)
591
+
592
+ finally:
593
+ # Clean up temporary directory
594
+ if temp_dir.exists():
595
+ shutil.rmtree(temp_dir)
596
+
597
+ logging.info(f"✓ Completed converting {dataset.repo_id} to video format")
598
+ logging.info(f"New dataset saved to: {output_dir}")
599
+
600
+ # Return new dataset
601
+ return LeRobotDataset(repo_id=repo_id, root=output_dir)
602
+
603
+
604
+ def _copy_data_without_images(
605
+ src_dataset: LeRobotDataset,
606
+ dst_meta: LeRobotDatasetMetadata,
607
+ episode_indices: list[int],
608
+ img_keys: list[str],
609
+ ) -> None:
610
+ """Copy data files without image columns.
611
+
612
+ Args:
613
+ src_dataset: Source dataset
614
+ dst_meta: Destination metadata
615
+ episode_indices: Episodes to include
616
+ img_keys: Image keys to remove
617
+ """
618
+ from lerobot.datasets.utils import DATA_DIR
619
+
620
+ data_dir = src_dataset.root / DATA_DIR
621
+ parquet_files = sorted(data_dir.glob("*/*.parquet"))
622
+
623
+ if not parquet_files:
624
+ raise ValueError(f"No parquet files found in {data_dir}")
625
+
626
+ episode_set = set(episode_indices)
627
+
628
+ for src_path in tqdm(parquet_files, desc="Processing data files"):
629
+ df = pd.read_parquet(src_path).reset_index(drop=True)
630
+
631
+ # Filter to only include selected episodes
632
+ df = df[df["episode_index"].isin(episode_set)].copy()
633
+
634
+ if len(df) == 0:
635
+ continue
636
+
637
+ # Remove image columns
638
+ columns_to_drop = [col for col in img_keys if col in df.columns]
639
+ if columns_to_drop:
640
+ df = df.drop(columns=columns_to_drop)
641
+
642
+ # Get chunk and file indices from path
643
+ relative_path = src_path.relative_to(src_dataset.root)
644
+ chunk_dir = relative_path.parts[1]
645
+ file_name = relative_path.parts[2]
646
+ chunk_idx = int(chunk_dir.split("-")[1])
647
+ file_idx = int(file_name.split("-")[1].split(".")[0])
648
+
649
+ # Write to destination without pandas index
650
+ dst_path = dst_meta.root / f"data/chunk-{chunk_idx:03d}/file-{file_idx:03d}.parquet"
651
+ dst_path.parent.mkdir(parents=True, exist_ok=True)
652
+ df.to_parquet(dst_path, index=False)
653
+
654
+
655
+ def handle_convert_to_video(cfg: EditDatasetConfig) -> None:
656
+ # Note: Parser may create any config type with the right fields, so we access fields directly
657
+ # instead of checking isinstance()
658
+ dataset = LeRobotDataset(cfg.repo_id, root=cfg.root)
659
+
660
+ # Determine output directory and repo_id
661
+ # Priority: 1) new_repo_id, 2) operation.output_dir, 3) auto-generated name
662
+ output_dir_config = getattr(cfg.operation, "output_dir", None)
663
+
664
+ if cfg.new_repo_id:
665
+ # Use new_repo_id for both local storage and hub push
666
+ output_repo_id = cfg.new_repo_id
667
+ output_dir = Path(cfg.root) / cfg.new_repo_id if cfg.root else HF_LEROBOT_HOME / cfg.new_repo_id
668
+ logging.info(f"Saving to new dataset: {cfg.new_repo_id}")
669
+ elif output_dir_config:
670
+ # Use custom output directory for local-only storage
671
+ output_dir = Path(output_dir_config)
672
+ # Extract repo name from output_dir for the dataset
673
+ output_repo_id = output_dir.name
674
+ logging.info(f"Saving to local directory: {output_dir}")
675
+ else:
676
+ # Auto-generate name: append "_video" to original repo_id
677
+ output_repo_id = f"{cfg.repo_id}_video"
678
+ output_dir = Path(cfg.root) / output_repo_id if cfg.root else HF_LEROBOT_HOME / output_repo_id
679
+ logging.info(f"Saving to auto-generated location: {output_dir}")
680
+
681
+ logging.info(f"Converting dataset {cfg.repo_id} to video format")
682
+
683
+ new_dataset = convert_dataset_to_videos(
684
+ dataset=dataset,
685
+ output_dir=output_dir,
686
+ repo_id=output_repo_id,
687
+ vcodec=getattr(cfg.operation, "vcodec", "libsvtav1"),
688
+ pix_fmt=getattr(cfg.operation, "pix_fmt", "yuv420p"),
689
+ g=getattr(cfg.operation, "g", 2),
690
+ crf=getattr(cfg.operation, "crf", 30),
691
+ fast_decode=getattr(cfg.operation, "fast_decode", 0),
692
+ episode_indices=getattr(cfg.operation, "episode_indices", None),
693
+ num_workers=getattr(cfg.operation, "num_workers", 4),
694
+ )
695
+
696
+ logging.info("Video dataset created successfully!")
697
+ logging.info(f"Location: {output_dir}")
698
+ logging.info(f"Episodes: {new_dataset.meta.total_episodes}")
699
+ logging.info(f"Frames: {new_dataset.meta.total_frames}")
700
+
701
+ if cfg.push_to_hub:
702
+ logging.info(f"Pushing to hub as {output_repo_id}...")
703
+ new_dataset.push_to_hub()
704
+ logging.info("✓ Successfully pushed to hub!")
705
+ else:
706
+ logging.info("Dataset saved locally (not pushed to hub)")
707
+
708
+
709
+ @parser.wrap()
710
+ def edit_dataset(cfg: EditDatasetConfig) -> None:
711
+ operation_type = cfg.operation.type
712
+
713
+ if operation_type == "delete_episodes":
714
+ handle_delete_episodes(cfg)
715
+ elif operation_type == "split":
716
+ handle_split(cfg)
717
+ elif operation_type == "merge":
718
+ handle_merge(cfg)
719
+ elif operation_type == "remove_feature":
720
+ handle_remove_feature(cfg)
721
+ elif operation_type == "convert_to_video":
722
+ handle_convert_to_video(cfg)
723
+ else:
724
+ raise ValueError(
725
+ f"Unknown operation type: {operation_type}\n"
726
+ f"Available operations: delete_episodes, split, merge, remove_feature, convert_to_video"
727
+ )
728
+
729
+
730
+ def main() -> None:
731
+ init_logging()
732
+ edit_dataset()
733
+
734
+
735
+ if __name__ == "__main__":
736
+ main()
lerobot/src/lerobot/scripts/lerobot_eval.py ADDED
@@ -0,0 +1,813 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Evaluate a policy on an environment by running rollouts and computing metrics.
17
+
18
+ Usage examples:
19
+
20
+ You want to evaluate a model from the hub (eg: https://huggingface.co/lerobot/diffusion_pusht)
21
+ for 10 episodes.
22
+
23
+ ```
24
+ lerobot-eval \
25
+ --policy.path=lerobot/diffusion_pusht \
26
+ --env.type=pusht \
27
+ --eval.batch_size=10 \
28
+ --eval.n_episodes=10 \
29
+ --policy.use_amp=false \
30
+ --policy.device=cuda
31
+ ```
32
+
33
+ OR, you want to evaluate a model checkpoint from the LeRobot training script for 10 episodes.
34
+ ```
35
+ lerobot-eval \
36
+ --policy.path=outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \
37
+ --env.type=pusht \
38
+ --eval.batch_size=10 \
39
+ --eval.n_episodes=10 \
40
+ --policy.use_amp=false \
41
+ --policy.device=cuda
42
+ ```
43
+
44
+ Note that in both examples, the repo/folder should contain at least `config.json` and `model.safetensors` files.
45
+
46
+ You can learn about the CLI options for this script in the `EvalPipelineConfig` in lerobot/configs/eval.py
47
+ """
48
+
49
+ import concurrent.futures as cf
50
+ import json
51
+ import logging
52
+ import threading
53
+ import time
54
+ from collections import defaultdict
55
+ from collections.abc import Callable
56
+ from contextlib import nullcontext
57
+ from copy import deepcopy
58
+ from dataclasses import asdict
59
+ from functools import partial
60
+ from pathlib import Path
61
+ from pprint import pformat
62
+ from typing import Any, TypedDict
63
+
64
+ import einops
65
+ import gymnasium as gym
66
+ import numpy as np
67
+ import torch
68
+ from termcolor import colored
69
+ from torch import Tensor, nn
70
+ from tqdm import trange
71
+
72
+ from lerobot.configs import parser
73
+ from lerobot.configs.eval import EvalPipelineConfig
74
+ from lerobot.envs.factory import make_env, make_env_pre_post_processors
75
+ from lerobot.envs.utils import (
76
+ add_envs_task,
77
+ check_env_attributes_and_types,
78
+ close_envs,
79
+ preprocess_observation,
80
+ )
81
+ from lerobot.policies.factory import make_policy, make_pre_post_processors
82
+ from lerobot.policies.pretrained import PreTrainedPolicy
83
+ from lerobot.processor import PolicyAction, PolicyProcessorPipeline
84
+ from lerobot.utils.constants import ACTION, DONE, OBS_STR, REWARD
85
+ from lerobot.utils.import_utils import register_third_party_plugins
86
+ from lerobot.utils.io_utils import write_video
87
+ from lerobot.utils.random_utils import set_seed
88
+ from lerobot.utils.utils import (
89
+ get_safe_torch_device,
90
+ init_logging,
91
+ inside_slurm,
92
+ )
93
+
94
+
95
+ def rollout(
96
+ env: gym.vector.VectorEnv,
97
+ policy: PreTrainedPolicy,
98
+ env_preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
99
+ env_postprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
100
+ preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
101
+ postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction],
102
+ seeds: list[int] | None = None,
103
+ return_observations: bool = False,
104
+ render_callback: Callable[[gym.vector.VectorEnv], None] | None = None,
105
+ ) -> dict:
106
+ """Run a batched policy rollout once through a batch of environments.
107
+
108
+ Note that all environments in the batch are run until the last environment is done. This means some
109
+ data will probably need to be discarded (for environments that aren't the first one to be done).
110
+
111
+ The return dictionary contains:
112
+ (optional) "observation": A dictionary of (batch, sequence + 1, *) tensors mapped to observation
113
+ keys. NOTE that this has an extra sequence element relative to the other keys in the
114
+ dictionary. This is because an extra observation is included for after the environment is
115
+ terminated or truncated.
116
+ "action": A (batch, sequence, action_dim) tensor of actions applied based on the observations (not
117
+ including the last observations).
118
+ "reward": A (batch, sequence) tensor of rewards received for applying the actions.
119
+ "success": A (batch, sequence) tensor of success conditions (the only time this can be True is upon
120
+ environment termination/truncation).
121
+ "done": A (batch, sequence) tensor of **cumulative** done conditions. For any given batch element,
122
+ the first True is followed by True's all the way till the end. This can be used for masking
123
+ extraneous elements from the sequences above.
124
+
125
+ Args:
126
+ env: The batch of environments.
127
+ policy: The policy. Must be a PyTorch nn module.
128
+ seeds: The environments are seeded once at the start of the rollout. If provided, this argument
129
+ specifies the seeds for each of the environments.
130
+ return_observations: Whether to include all observations in the returned rollout data. Observations
131
+ are returned optionally because they typically take more memory to cache. Defaults to False.
132
+ render_callback: Optional rendering callback to be used after the environments are reset, and after
133
+ every step.
134
+ Returns:
135
+ The dictionary described above.
136
+ """
137
+ assert isinstance(policy, nn.Module), "Policy must be a PyTorch nn module."
138
+
139
+ # Reset the policy and environments.
140
+ policy.reset()
141
+ observation, info = env.reset(seed=seeds)
142
+ if render_callback is not None:
143
+ render_callback(env)
144
+
145
+ all_observations = []
146
+ all_actions = []
147
+ all_rewards = []
148
+ all_successes = []
149
+ all_dones = []
150
+
151
+ step = 0
152
+ # Keep track of which environments are done.
153
+ done = np.array([False] * env.num_envs)
154
+ max_steps = env.call("_max_episode_steps")[0]
155
+ progbar = trange(
156
+ max_steps,
157
+ desc=f"Running rollout with at most {max_steps} steps",
158
+ disable=inside_slurm(), # we dont want progress bar when we use slurm, since it clutters the logs
159
+ leave=False,
160
+ )
161
+ check_env_attributes_and_types(env)
162
+ while not np.all(done) and step < max_steps:
163
+ # Numpy array to tensor and changing dictionary keys to LeRobot policy format.
164
+ observation = preprocess_observation(observation)
165
+ if return_observations:
166
+ all_observations.append(deepcopy(observation))
167
+
168
+ # Infer "task" from attributes of environments.
169
+ # TODO: works with SyncVectorEnv but not AsyncVectorEnv
170
+ observation = add_envs_task(env, observation)
171
+
172
+ # Apply environment-specific preprocessing (e.g., LiberoProcessorStep for LIBERO)
173
+ observation = env_preprocessor(observation)
174
+
175
+ observation = preprocessor(observation)
176
+ with torch.inference_mode():
177
+ action = policy.select_action(observation)
178
+ action = postprocessor(action)
179
+
180
+ action_transition = {ACTION: action}
181
+ action_transition = env_postprocessor(action_transition)
182
+ action = action_transition[ACTION]
183
+
184
+ # Convert to CPU / numpy.
185
+ action_numpy: np.ndarray = action.to("cpu").numpy()
186
+ assert action_numpy.ndim == 2, "Action dimensions should be (batch, action_dim)"
187
+
188
+ # Apply the next action.
189
+ observation, reward, terminated, truncated, info = env.step(action_numpy)
190
+ if render_callback is not None:
191
+ render_callback(env)
192
+
193
+ # VectorEnv stores is_success in `info["final_info"][env_index]["is_success"]`. "final_info" isn't
194
+ # available if none of the envs finished.
195
+ if "final_info" in info:
196
+ final_info = info["final_info"]
197
+ if not isinstance(final_info, dict):
198
+ raise RuntimeError(
199
+ "Unsupported `final_info` format: expected dict (Gymnasium >= 1.0). "
200
+ "You're likely using an older version of gymnasium (< 1.0). Please upgrade."
201
+ )
202
+ successes = final_info["is_success"].tolist()
203
+ else:
204
+ successes = [False] * env.num_envs
205
+
206
+ # Keep track of which environments are done so far.
207
+ # Mark the episode as done if we reach the maximum step limit.
208
+ # This ensures that the rollout always terminates cleanly at `max_steps`,
209
+ # and allows logging/saving (e.g., videos) to be triggered consistently.
210
+ done = terminated | truncated | done
211
+ if step + 1 == max_steps:
212
+ done = np.ones_like(done, dtype=bool)
213
+
214
+ all_actions.append(torch.from_numpy(action_numpy))
215
+ all_rewards.append(torch.from_numpy(reward))
216
+ all_dones.append(torch.from_numpy(done))
217
+ all_successes.append(torch.tensor(successes))
218
+
219
+ step += 1
220
+ running_success_rate = (
221
+ einops.reduce(torch.stack(all_successes, dim=1), "b n -> b", "any").numpy().mean()
222
+ )
223
+ progbar.set_postfix({"running_success_rate": f"{running_success_rate.item() * 100:.1f}%"})
224
+ progbar.update()
225
+
226
+ # Track the final observation.
227
+ if return_observations:
228
+ observation = preprocess_observation(observation)
229
+ all_observations.append(deepcopy(observation))
230
+
231
+ # Stack the sequence along the first dimension so that we have (batch, sequence, *) tensors.
232
+ ret = {
233
+ ACTION: torch.stack(all_actions, dim=1),
234
+ "reward": torch.stack(all_rewards, dim=1),
235
+ "success": torch.stack(all_successes, dim=1),
236
+ "done": torch.stack(all_dones, dim=1),
237
+ }
238
+ if return_observations:
239
+ stacked_observations = {}
240
+ for key in all_observations[0]:
241
+ stacked_observations[key] = torch.stack([obs[key] for obs in all_observations], dim=1)
242
+ ret[OBS_STR] = stacked_observations
243
+
244
+ if hasattr(policy, "use_original_modules"):
245
+ policy.use_original_modules()
246
+
247
+ return ret
248
+
249
+
250
+ def eval_policy(
251
+ env: gym.vector.VectorEnv,
252
+ policy: PreTrainedPolicy,
253
+ env_preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
254
+ env_postprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
255
+ preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
256
+ postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction],
257
+ n_episodes: int,
258
+ max_episodes_rendered: int = 0,
259
+ videos_dir: Path | None = None,
260
+ return_episode_data: bool = False,
261
+ start_seed: int | None = None,
262
+ ) -> dict:
263
+ """
264
+ Args:
265
+ env: The batch of environments.
266
+ policy: The policy.
267
+ n_episodes: The number of episodes to evaluate.
268
+ max_episodes_rendered: Maximum number of episodes to render into videos.
269
+ videos_dir: Where to save rendered videos.
270
+ return_episode_data: Whether to return episode data for online training. Incorporates the data into
271
+ the "episodes" key of the returned dictionary.
272
+ start_seed: The first seed to use for the first individual rollout. For all subsequent rollouts the
273
+ seed is incremented by 1. If not provided, the environments are not manually seeded.
274
+ Returns:
275
+ Dictionary with metrics and data regarding the rollouts.
276
+ """
277
+ if max_episodes_rendered > 0 and not videos_dir:
278
+ raise ValueError("If max_episodes_rendered > 0, videos_dir must be provided.")
279
+
280
+ if not isinstance(policy, PreTrainedPolicy):
281
+ exc = ValueError(
282
+ f"Policy of type 'PreTrainedPolicy' is expected, but type '{type(policy)}' was provided."
283
+ )
284
+ try:
285
+ from peft import PeftModel
286
+
287
+ if not isinstance(policy, PeftModel):
288
+ raise exc
289
+ except ImportError:
290
+ raise exc from None
291
+
292
+ start = time.time()
293
+ policy.eval()
294
+
295
+ # Determine how many batched rollouts we need to get n_episodes. Note that if n_episodes is not evenly
296
+ # divisible by env.num_envs we end up discarding some data in the last batch.
297
+ n_batches = n_episodes // env.num_envs + int((n_episodes % env.num_envs) != 0)
298
+
299
+ # Keep track of some metrics.
300
+ sum_rewards = []
301
+ max_rewards = []
302
+ all_successes = []
303
+ all_seeds = []
304
+ threads = [] # for video saving threads
305
+ n_episodes_rendered = 0 # for saving the correct number of videos
306
+
307
+ # Callback for visualization.
308
+ def render_frame(env: gym.vector.VectorEnv):
309
+ # noqa: B023
310
+ if n_episodes_rendered >= max_episodes_rendered:
311
+ return
312
+ n_to_render_now = min(max_episodes_rendered - n_episodes_rendered, env.num_envs)
313
+ if isinstance(env, gym.vector.SyncVectorEnv):
314
+ ep_frames.append(np.stack([env.envs[i].render() for i in range(n_to_render_now)])) # noqa: B023
315
+ elif isinstance(env, gym.vector.AsyncVectorEnv):
316
+ # Here we must render all frames and discard any we don't need.
317
+ ep_frames.append(np.stack(env.call("render")[:n_to_render_now]))
318
+
319
+ if max_episodes_rendered > 0:
320
+ video_paths: list[str] = []
321
+
322
+ if return_episode_data:
323
+ episode_data: dict | None = None
324
+
325
+ # we dont want progress bar when we use slurm, since it clutters the logs
326
+ progbar = trange(n_batches, desc="Stepping through eval batches", disable=inside_slurm())
327
+ for batch_ix in progbar:
328
+ # Cache frames for rendering videos. Each item will be (b, h, w, c), and the list indexes the rollout
329
+ # step.
330
+ if max_episodes_rendered > 0:
331
+ ep_frames: list[np.ndarray] = []
332
+
333
+ if start_seed is None:
334
+ seeds = None
335
+ else:
336
+ seeds = range(
337
+ start_seed + (batch_ix * env.num_envs), start_seed + ((batch_ix + 1) * env.num_envs)
338
+ )
339
+ rollout_data = rollout(
340
+ env=env,
341
+ policy=policy,
342
+ env_preprocessor=env_preprocessor,
343
+ env_postprocessor=env_postprocessor,
344
+ preprocessor=preprocessor,
345
+ postprocessor=postprocessor,
346
+ seeds=list(seeds) if seeds else None,
347
+ return_observations=return_episode_data,
348
+ render_callback=render_frame if max_episodes_rendered > 0 else None,
349
+ )
350
+
351
+ # Figure out where in each rollout sequence the first done condition was encountered (results after
352
+ # this won't be included).
353
+ n_steps = rollout_data["done"].shape[1]
354
+ # Note: this relies on a property of argmax: that it returns the first occurrence as a tiebreaker.
355
+ done_indices = torch.argmax(rollout_data["done"].to(int), dim=1)
356
+
357
+ # Make a mask with shape (batch, n_steps) to mask out rollout data after the first done
358
+ # (batch-element-wise). Note the `done_indices + 1` to make sure to keep the data from the done step.
359
+ mask = (torch.arange(n_steps) <= einops.repeat(done_indices + 1, "b -> b s", s=n_steps)).int()
360
+ # Extend metrics.
361
+ batch_sum_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "sum")
362
+ sum_rewards.extend(batch_sum_rewards.tolist())
363
+ batch_max_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "max")
364
+ max_rewards.extend(batch_max_rewards.tolist())
365
+ batch_successes = einops.reduce((rollout_data["success"] * mask), "b n -> b", "any")
366
+ all_successes.extend(batch_successes.tolist())
367
+ if seeds:
368
+ all_seeds.extend(seeds)
369
+ else:
370
+ all_seeds.append(None)
371
+
372
+ # FIXME: episode_data is either None or it doesn't exist
373
+ if return_episode_data:
374
+ this_episode_data = _compile_episode_data(
375
+ rollout_data,
376
+ done_indices,
377
+ start_episode_index=batch_ix * env.num_envs,
378
+ start_data_index=(0 if episode_data is None else (episode_data["index"][-1].item() + 1)),
379
+ fps=env.unwrapped.metadata["render_fps"],
380
+ )
381
+ if episode_data is None:
382
+ episode_data = this_episode_data
383
+ else:
384
+ # Some sanity checks to make sure we are correctly compiling the data.
385
+ assert episode_data["episode_index"][-1] + 1 == this_episode_data["episode_index"][0]
386
+ assert episode_data["index"][-1] + 1 == this_episode_data["index"][0]
387
+ # Concatenate the episode data.
388
+ episode_data = {k: torch.cat([episode_data[k], this_episode_data[k]]) for k in episode_data}
389
+
390
+ # Maybe render video for visualization.
391
+ if max_episodes_rendered > 0 and len(ep_frames) > 0:
392
+ batch_stacked_frames = np.stack(ep_frames, axis=1) # (b, t, *)
393
+ for stacked_frames, done_index in zip(
394
+ batch_stacked_frames, done_indices.flatten().tolist(), strict=False
395
+ ):
396
+ if n_episodes_rendered >= max_episodes_rendered:
397
+ break
398
+
399
+ videos_dir.mkdir(parents=True, exist_ok=True)
400
+ video_path = videos_dir / f"eval_episode_{n_episodes_rendered}.mp4"
401
+ video_paths.append(str(video_path))
402
+ thread = threading.Thread(
403
+ target=write_video,
404
+ args=(
405
+ str(video_path),
406
+ stacked_frames[: done_index + 1], # + 1 to capture the last observation
407
+ env.unwrapped.metadata["render_fps"],
408
+ ),
409
+ )
410
+ thread.start()
411
+ threads.append(thread)
412
+ n_episodes_rendered += 1
413
+
414
+ progbar.set_postfix(
415
+ {"running_success_rate": f"{np.mean(all_successes[:n_episodes]).item() * 100:.1f}%"}
416
+ )
417
+
418
+ # Wait till all video rendering threads are done.
419
+ for thread in threads:
420
+ thread.join()
421
+
422
+ # Compile eval info.
423
+ info = {
424
+ "per_episode": [
425
+ {
426
+ "episode_ix": i,
427
+ "sum_reward": sum_reward,
428
+ "max_reward": max_reward,
429
+ "success": success,
430
+ "seed": seed,
431
+ }
432
+ for i, (sum_reward, max_reward, success, seed) in enumerate(
433
+ zip(
434
+ sum_rewards[:n_episodes],
435
+ max_rewards[:n_episodes],
436
+ all_successes[:n_episodes],
437
+ all_seeds[:n_episodes],
438
+ strict=True,
439
+ )
440
+ )
441
+ ],
442
+ "aggregated": {
443
+ "avg_sum_reward": float(np.nanmean(sum_rewards[:n_episodes])),
444
+ "avg_max_reward": float(np.nanmean(max_rewards[:n_episodes])),
445
+ "pc_success": float(np.nanmean(all_successes[:n_episodes]) * 100),
446
+ "eval_s": time.time() - start,
447
+ "eval_ep_s": (time.time() - start) / n_episodes,
448
+ },
449
+ }
450
+
451
+ if return_episode_data:
452
+ info["episodes"] = episode_data
453
+
454
+ if max_episodes_rendered > 0:
455
+ info["video_paths"] = video_paths
456
+
457
+ return info
458
+
459
+
460
+ def _compile_episode_data(
461
+ rollout_data: dict, done_indices: Tensor, start_episode_index: int, start_data_index: int, fps: float
462
+ ) -> dict:
463
+ """Convenience function for `eval_policy(return_episode_data=True)`
464
+
465
+ Compiles all the rollout data into a Hugging Face dataset.
466
+
467
+ Similar logic is implemented when datasets are pushed to hub (see: `push_to_hub`).
468
+ """
469
+ ep_dicts = []
470
+ total_frames = 0
471
+ for ep_ix in range(rollout_data[ACTION].shape[0]):
472
+ # + 2 to include the first done frame and the last observation frame.
473
+ num_frames = done_indices[ep_ix].item() + 2
474
+ total_frames += num_frames
475
+
476
+ # Here we do `num_frames - 1` as we don't want to include the last observation frame just yet.
477
+ ep_dict = {
478
+ ACTION: rollout_data[ACTION][ep_ix, : num_frames - 1],
479
+ "episode_index": torch.tensor([start_episode_index + ep_ix] * (num_frames - 1)),
480
+ "frame_index": torch.arange(0, num_frames - 1, 1),
481
+ "timestamp": torch.arange(0, num_frames - 1, 1) / fps,
482
+ DONE: rollout_data["done"][ep_ix, : num_frames - 1],
483
+ "next.success": rollout_data["success"][ep_ix, : num_frames - 1],
484
+ REWARD: rollout_data["reward"][ep_ix, : num_frames - 1].type(torch.float32),
485
+ }
486
+
487
+ # For the last observation frame, all other keys will just be copy padded.
488
+ for k in ep_dict:
489
+ ep_dict[k] = torch.cat([ep_dict[k], ep_dict[k][-1:]])
490
+
491
+ for key in rollout_data[OBS_STR]:
492
+ ep_dict[key] = rollout_data[OBS_STR][key][ep_ix, :num_frames]
493
+
494
+ ep_dicts.append(ep_dict)
495
+
496
+ data_dict = {}
497
+ for key in ep_dicts[0]:
498
+ data_dict[key] = torch.cat([x[key] for x in ep_dicts])
499
+
500
+ data_dict["index"] = torch.arange(start_data_index, start_data_index + total_frames, 1)
501
+
502
+ return data_dict
503
+
504
+
505
+ @parser.wrap()
506
+ def eval_main(cfg: EvalPipelineConfig):
507
+ logging.info(pformat(asdict(cfg)))
508
+
509
+ # Check device is available
510
+ device = get_safe_torch_device(cfg.policy.device, log=True)
511
+
512
+ torch.backends.cudnn.benchmark = True
513
+ torch.backends.cuda.matmul.allow_tf32 = True
514
+ set_seed(cfg.seed)
515
+
516
+ logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {cfg.output_dir}")
517
+
518
+ logging.info("Making environment.")
519
+ envs = make_env(
520
+ cfg.env,
521
+ n_envs=cfg.eval.batch_size,
522
+ use_async_envs=cfg.eval.use_async_envs,
523
+ trust_remote_code=cfg.trust_remote_code,
524
+ )
525
+
526
+ logging.info("Making policy.")
527
+
528
+ policy = make_policy(
529
+ cfg=cfg.policy,
530
+ env_cfg=cfg.env,
531
+ rename_map=cfg.rename_map,
532
+ )
533
+
534
+ policy.eval()
535
+
536
+ # The inference device is automatically set to match the detected hardware, overriding any previous device settings from training to ensure compatibility.
537
+ preprocessor_overrides = {
538
+ "device_processor": {"device": str(policy.config.device)},
539
+ "rename_observations_processor": {"rename_map": cfg.rename_map},
540
+ }
541
+
542
+ preprocessor, postprocessor = make_pre_post_processors(
543
+ policy_cfg=cfg.policy,
544
+ pretrained_path=cfg.policy.pretrained_path,
545
+ preprocessor_overrides=preprocessor_overrides,
546
+ )
547
+
548
+ # Create environment-specific preprocessor and postprocessor (e.g., for LIBERO environments)
549
+ env_preprocessor, env_postprocessor = make_env_pre_post_processors(env_cfg=cfg.env, policy_cfg=cfg.policy)
550
+
551
+ with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext():
552
+ info = eval_policy_all(
553
+ envs=envs,
554
+ policy=policy,
555
+ env_preprocessor=env_preprocessor,
556
+ env_postprocessor=env_postprocessor,
557
+ preprocessor=preprocessor,
558
+ postprocessor=postprocessor,
559
+ n_episodes=cfg.eval.n_episodes,
560
+ max_episodes_rendered=10,
561
+ videos_dir=Path(cfg.output_dir) / "videos",
562
+ start_seed=cfg.seed,
563
+ max_parallel_tasks=cfg.env.max_parallel_tasks,
564
+ )
565
+ print("Overall Aggregated Metrics:")
566
+ print(info["overall"])
567
+
568
+ # Print per-suite stats
569
+ for task_group, task_group_info in info.items():
570
+ print(f"\nAggregated Metrics for {task_group}:")
571
+ print(task_group_info)
572
+ # Close all vec envs
573
+ close_envs(envs)
574
+
575
+ # Save info
576
+ with open(Path(cfg.output_dir) / "eval_info.json", "w") as f:
577
+ json.dump(info, f, indent=2)
578
+
579
+ logging.info("End of eval")
580
+
581
+
582
+ # ---- typed payload returned by one task eval ----
583
+ class TaskMetrics(TypedDict):
584
+ sum_rewards: list[float]
585
+ max_rewards: list[float]
586
+ successes: list[bool]
587
+ video_paths: list[str]
588
+
589
+
590
+ ACC_KEYS = ("sum_rewards", "max_rewards", "successes", "video_paths")
591
+
592
+
593
+ def eval_one(
594
+ env: gym.vector.VectorEnv,
595
+ *,
596
+ policy: PreTrainedPolicy,
597
+ env_preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
598
+ env_postprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
599
+ preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
600
+ postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction],
601
+ n_episodes: int,
602
+ max_episodes_rendered: int,
603
+ videos_dir: Path | None,
604
+ return_episode_data: bool,
605
+ start_seed: int | None,
606
+ ) -> TaskMetrics:
607
+ """Evaluates one task_id of one suite using the provided vec env."""
608
+
609
+ task_videos_dir = videos_dir
610
+
611
+ task_result = eval_policy(
612
+ env=env,
613
+ policy=policy,
614
+ env_preprocessor=env_preprocessor,
615
+ env_postprocessor=env_postprocessor,
616
+ preprocessor=preprocessor,
617
+ postprocessor=postprocessor,
618
+ n_episodes=n_episodes,
619
+ max_episodes_rendered=max_episodes_rendered,
620
+ videos_dir=task_videos_dir,
621
+ return_episode_data=return_episode_data,
622
+ start_seed=start_seed,
623
+ )
624
+
625
+ per_episode = task_result["per_episode"]
626
+ return TaskMetrics(
627
+ sum_rewards=[ep["sum_reward"] for ep in per_episode],
628
+ max_rewards=[ep["max_reward"] for ep in per_episode],
629
+ successes=[ep["success"] for ep in per_episode],
630
+ video_paths=task_result.get("video_paths", []),
631
+ )
632
+
633
+
634
+ def run_one(
635
+ task_group: str,
636
+ task_id: int,
637
+ env,
638
+ *,
639
+ policy,
640
+ env_preprocessor,
641
+ env_postprocessor,
642
+ preprocessor,
643
+ postprocessor,
644
+ n_episodes: int,
645
+ max_episodes_rendered: int,
646
+ videos_dir: Path | None,
647
+ return_episode_data: bool,
648
+ start_seed: int | None,
649
+ ):
650
+ """
651
+ Run eval_one for a single (task_group, task_id, env).
652
+ Returns (task_group, task_id, task_metrics_dict).
653
+ This function is intentionally module-level to make it easy to test.
654
+ """
655
+ task_videos_dir = None
656
+ if videos_dir is not None:
657
+ task_videos_dir = videos_dir / f"{task_group}_{task_id}"
658
+ task_videos_dir.mkdir(parents=True, exist_ok=True)
659
+
660
+ # Call the existing eval_one (assumed to return TaskMetrics-like dict)
661
+ metrics = eval_one(
662
+ env,
663
+ policy=policy,
664
+ env_preprocessor=env_preprocessor,
665
+ env_postprocessor=env_postprocessor,
666
+ preprocessor=preprocessor,
667
+ postprocessor=postprocessor,
668
+ n_episodes=n_episodes,
669
+ max_episodes_rendered=max_episodes_rendered,
670
+ videos_dir=task_videos_dir,
671
+ return_episode_data=return_episode_data,
672
+ start_seed=start_seed,
673
+ )
674
+ # ensure we always provide video_paths key to simplify accumulation
675
+ if max_episodes_rendered > 0:
676
+ metrics.setdefault("video_paths", [])
677
+ return task_group, task_id, metrics
678
+
679
+
680
+ def eval_policy_all(
681
+ envs: dict[str, dict[int, gym.vector.VectorEnv]],
682
+ policy,
683
+ env_preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
684
+ env_postprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
685
+ preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]],
686
+ postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction],
687
+ n_episodes: int,
688
+ *,
689
+ max_episodes_rendered: int = 0,
690
+ videos_dir: Path | None = None,
691
+ return_episode_data: bool = False,
692
+ start_seed: int | None = None,
693
+ max_parallel_tasks: int = 1,
694
+ ) -> dict:
695
+ """
696
+ Evaluate a nested `envs` dict: {task_group: {task_id: vec_env}}.
697
+ This implementation flattens tasks, runs them sequentially or via ThreadPoolExecutor,
698
+ accumulates per-group and overall statistics, and returns the same aggregate metrics
699
+ schema as the single-env evaluator (avg_sum_reward / avg_max_reward / pc_success / timings)
700
+ plus per-task infos.
701
+ """
702
+ start_t = time.time()
703
+
704
+ # Flatten envs into list of (task_group, task_id, env)
705
+ tasks = [(tg, tid, vec) for tg, group in envs.items() for tid, vec in group.items()]
706
+
707
+ # accumulators: track metrics at both per-group level and across all groups
708
+ group_acc: dict[str, dict[str, list]] = defaultdict(lambda: {k: [] for k in ACC_KEYS})
709
+ overall: dict[str, list] = {k: [] for k in ACC_KEYS}
710
+ per_task_infos: list[dict] = []
711
+
712
+ # small inline helper to accumulate one task's metrics into accumulators
713
+ def _accumulate_to(group: str, metrics: dict):
714
+ # metrics expected to contain 'sum_rewards', 'max_rewards', 'successes', optionally 'video_paths'
715
+ # but eval_one may store per-episode lists; we assume metrics uses scalars averaged per task as before.
716
+ # To be robust, accept scalars or lists.
717
+ def _append(key, value):
718
+ if value is None:
719
+ return
720
+ if isinstance(value, list):
721
+ group_acc[group][key].extend(value)
722
+ overall[key].extend(value)
723
+ else:
724
+ group_acc[group][key].append(value)
725
+ overall[key].append(value)
726
+
727
+ _append("sum_rewards", metrics.get("sum_rewards"))
728
+ _append("max_rewards", metrics.get("max_rewards"))
729
+ _append("successes", metrics.get("successes"))
730
+ # video_paths is list-like
731
+ paths = metrics.get("video_paths", [])
732
+ if paths:
733
+ group_acc[group]["video_paths"].extend(paths)
734
+ overall["video_paths"].extend(paths)
735
+
736
+ # Choose runner (sequential vs threaded)
737
+ task_runner = partial(
738
+ run_one,
739
+ policy=policy,
740
+ env_preprocessor=env_preprocessor,
741
+ env_postprocessor=env_postprocessor,
742
+ preprocessor=preprocessor,
743
+ postprocessor=postprocessor,
744
+ n_episodes=n_episodes,
745
+ max_episodes_rendered=max_episodes_rendered,
746
+ videos_dir=videos_dir,
747
+ return_episode_data=return_episode_data,
748
+ start_seed=start_seed,
749
+ )
750
+
751
+ if max_parallel_tasks <= 1:
752
+ # sequential path (single accumulator path on the main thread)
753
+ # NOTE: keeping a single-threaded accumulator avoids concurrent list appends or locks
754
+ for task_group, task_id, env in tasks:
755
+ tg, tid, metrics = task_runner(task_group, task_id, env)
756
+ _accumulate_to(tg, metrics)
757
+ per_task_infos.append({"task_group": tg, "task_id": tid, "metrics": metrics})
758
+ else:
759
+ # threaded path: submit all tasks, consume completions on main thread and accumulate there
760
+ with cf.ThreadPoolExecutor(max_workers=max_parallel_tasks) as executor:
761
+ fut2meta = {}
762
+ for task_group, task_id, env in tasks:
763
+ fut = executor.submit(task_runner, task_group, task_id, env)
764
+ fut2meta[fut] = (task_group, task_id)
765
+ for fut in cf.as_completed(fut2meta):
766
+ tg, tid, metrics = fut.result()
767
+ _accumulate_to(tg, metrics)
768
+ per_task_infos.append({"task_group": tg, "task_id": tid, "metrics": metrics})
769
+
770
+ # compute aggregated metrics helper (robust to lists/scalars)
771
+ def _agg_from_list(xs):
772
+ if not xs:
773
+ return float("nan")
774
+ arr = np.array(xs, dtype=float)
775
+ return float(np.nanmean(arr))
776
+
777
+ # compute per-group aggregates
778
+ groups_aggregated = {}
779
+ for group, acc in group_acc.items():
780
+ groups_aggregated[group] = {
781
+ "avg_sum_reward": _agg_from_list(acc["sum_rewards"]),
782
+ "avg_max_reward": _agg_from_list(acc["max_rewards"]),
783
+ "pc_success": _agg_from_list(acc["successes"]) * 100 if acc["successes"] else float("nan"),
784
+ "n_episodes": len(acc["sum_rewards"]),
785
+ "video_paths": list(acc["video_paths"]),
786
+ }
787
+
788
+ # overall aggregates
789
+ overall_agg = {
790
+ "avg_sum_reward": _agg_from_list(overall["sum_rewards"]),
791
+ "avg_max_reward": _agg_from_list(overall["max_rewards"]),
792
+ "pc_success": _agg_from_list(overall["successes"]) * 100 if overall["successes"] else float("nan"),
793
+ "n_episodes": len(overall["sum_rewards"]),
794
+ "eval_s": time.time() - start_t,
795
+ "eval_ep_s": (time.time() - start_t) / max(1, len(overall["sum_rewards"])),
796
+ "video_paths": list(overall["video_paths"]),
797
+ }
798
+
799
+ return {
800
+ "per_task": per_task_infos,
801
+ "per_group": groups_aggregated,
802
+ "overall": overall_agg,
803
+ }
804
+
805
+
806
+ def main():
807
+ init_logging()
808
+ register_third_party_plugins()
809
+ eval_main()
810
+
811
+
812
+ if __name__ == "__main__":
813
+ main()
lerobot/src/lerobot/scripts/lerobot_find_cameras.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ Helper to find the camera devices available in your system.
19
+
20
+ Example:
21
+
22
+ ```shell
23
+ lerobot-find-cameras
24
+ ```
25
+ """
26
+
27
+ # NOTE(Steven): RealSense can also be identified/opened as OpenCV cameras. If you know the camera is a RealSense, use the `lerobot-find-cameras realsense` flag to avoid confusion.
28
+ # NOTE(Steven): macOS cameras sometimes report different FPS at init time, not an issue here as we don't specify FPS when opening the cameras, but the information displayed might not be truthful.
29
+
30
+ import argparse
31
+ import concurrent.futures
32
+ import logging
33
+ import time
34
+ from pathlib import Path
35
+ from typing import Any
36
+
37
+ import numpy as np
38
+ from PIL import Image
39
+
40
+ from lerobot.cameras.configs import ColorMode
41
+ from lerobot.cameras.opencv.camera_opencv import OpenCVCamera
42
+ from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig
43
+ from lerobot.cameras.realsense.camera_realsense import RealSenseCamera
44
+ from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig
45
+
46
+ logger = logging.getLogger(__name__)
47
+
48
+
49
+ def find_all_opencv_cameras() -> list[dict[str, Any]]:
50
+ """
51
+ Finds all available OpenCV cameras plugged into the system.
52
+
53
+ Returns:
54
+ A list of all available OpenCV cameras with their metadata.
55
+ """
56
+ all_opencv_cameras_info: list[dict[str, Any]] = []
57
+ logger.info("Searching for OpenCV cameras...")
58
+ try:
59
+ opencv_cameras = OpenCVCamera.find_cameras()
60
+ for cam_info in opencv_cameras:
61
+ all_opencv_cameras_info.append(cam_info)
62
+ logger.info(f"Found {len(opencv_cameras)} OpenCV cameras.")
63
+ except Exception as e:
64
+ logger.error(f"Error finding OpenCV cameras: {e}")
65
+
66
+ return all_opencv_cameras_info
67
+
68
+
69
+ def find_all_realsense_cameras() -> list[dict[str, Any]]:
70
+ """
71
+ Finds all available RealSense cameras plugged into the system.
72
+
73
+ Returns:
74
+ A list of all available RealSense cameras with their metadata.
75
+ """
76
+ all_realsense_cameras_info: list[dict[str, Any]] = []
77
+ logger.info("Searching for RealSense cameras...")
78
+ try:
79
+ realsense_cameras = RealSenseCamera.find_cameras()
80
+ for cam_info in realsense_cameras:
81
+ all_realsense_cameras_info.append(cam_info)
82
+ logger.info(f"Found {len(realsense_cameras)} RealSense cameras.")
83
+ except ImportError:
84
+ logger.warning("Skipping RealSense camera search: pyrealsense2 library not found or not importable.")
85
+ except Exception as e:
86
+ logger.error(f"Error finding RealSense cameras: {e}")
87
+
88
+ return all_realsense_cameras_info
89
+
90
+
91
+ def find_and_print_cameras(camera_type_filter: str | None = None) -> list[dict[str, Any]]:
92
+ """
93
+ Finds available cameras based on an optional filter and prints their information.
94
+
95
+ Args:
96
+ camera_type_filter: Optional string to filter cameras ("realsense" or "opencv").
97
+ If None, lists all cameras.
98
+
99
+ Returns:
100
+ A list of all available cameras matching the filter, with their metadata.
101
+ """
102
+ all_cameras_info: list[dict[str, Any]] = []
103
+
104
+ if camera_type_filter:
105
+ camera_type_filter = camera_type_filter.lower()
106
+
107
+ if camera_type_filter is None or camera_type_filter == "opencv":
108
+ all_cameras_info.extend(find_all_opencv_cameras())
109
+ if camera_type_filter is None or camera_type_filter == "realsense":
110
+ all_cameras_info.extend(find_all_realsense_cameras())
111
+
112
+ if not all_cameras_info:
113
+ if camera_type_filter:
114
+ logger.warning(f"No {camera_type_filter} cameras were detected.")
115
+ else:
116
+ logger.warning("No cameras (OpenCV or RealSense) were detected.")
117
+ else:
118
+ print("\n--- Detected Cameras ---")
119
+ for i, cam_info in enumerate(all_cameras_info):
120
+ print(f"Camera #{i}:")
121
+ for key, value in cam_info.items():
122
+ if key == "default_stream_profile" and isinstance(value, dict):
123
+ print(f" {key.replace('_', ' ').capitalize()}:")
124
+ for sub_key, sub_value in value.items():
125
+ print(f" {sub_key.capitalize()}: {sub_value}")
126
+ else:
127
+ print(f" {key.replace('_', ' ').capitalize()}: {value}")
128
+ print("-" * 20)
129
+ return all_cameras_info
130
+
131
+
132
+ def save_image(
133
+ img_array: np.ndarray,
134
+ camera_identifier: str | int,
135
+ images_dir: Path,
136
+ camera_type: str,
137
+ ):
138
+ """
139
+ Saves a single image to disk using Pillow. Handles color conversion if necessary.
140
+ """
141
+ try:
142
+ img = Image.fromarray(img_array, mode="RGB")
143
+
144
+ safe_identifier = str(camera_identifier).replace("/", "_").replace("\\", "_")
145
+ filename_prefix = f"{camera_type.lower()}_{safe_identifier}"
146
+ filename = f"{filename_prefix}.png"
147
+
148
+ path = images_dir / filename
149
+ path.parent.mkdir(parents=True, exist_ok=True)
150
+ img.save(str(path))
151
+ logger.info(f"Saved image: {path}")
152
+ except Exception as e:
153
+ logger.error(f"Failed to save image for camera {camera_identifier} (type {camera_type}): {e}")
154
+
155
+
156
+ def create_camera_instance(cam_meta: dict[str, Any]) -> dict[str, Any] | None:
157
+ """Create and connect to a camera instance based on metadata."""
158
+ cam_type = cam_meta.get("type")
159
+ cam_id = cam_meta.get("id")
160
+ instance = None
161
+
162
+ logger.info(f"Preparing {cam_type} ID {cam_id} with default profile")
163
+
164
+ try:
165
+ if cam_type == "OpenCV":
166
+ cv_config = OpenCVCameraConfig(
167
+ index_or_path=cam_id,
168
+ color_mode=ColorMode.RGB,
169
+ )
170
+ instance = OpenCVCamera(cv_config)
171
+ elif cam_type == "RealSense":
172
+ rs_config = RealSenseCameraConfig(
173
+ serial_number_or_name=cam_id,
174
+ color_mode=ColorMode.RGB,
175
+ )
176
+ instance = RealSenseCamera(rs_config)
177
+ else:
178
+ logger.warning(f"Unknown camera type: {cam_type} for ID {cam_id}. Skipping.")
179
+ return None
180
+
181
+ if instance:
182
+ logger.info(f"Connecting to {cam_type} camera: {cam_id}...")
183
+ instance.connect(warmup=True)
184
+ return {"instance": instance, "meta": cam_meta}
185
+ except Exception as e:
186
+ logger.error(f"Failed to connect or configure {cam_type} camera {cam_id}: {e}")
187
+ if instance and instance.is_connected:
188
+ instance.disconnect()
189
+ return None
190
+
191
+
192
+ def process_camera_image(
193
+ cam_dict: dict[str, Any], output_dir: Path, current_time: float
194
+ ) -> concurrent.futures.Future | None:
195
+ """Capture and process an image from a single camera."""
196
+ cam = cam_dict["instance"]
197
+ meta = cam_dict["meta"]
198
+ cam_type_str = str(meta.get("type", "unknown"))
199
+ cam_id_str = str(meta.get("id", "unknown"))
200
+
201
+ try:
202
+ image_data = cam.read()
203
+
204
+ return save_image(
205
+ image_data,
206
+ cam_id_str,
207
+ output_dir,
208
+ cam_type_str,
209
+ )
210
+ except TimeoutError:
211
+ logger.warning(
212
+ f"Timeout reading from {cam_type_str} camera {cam_id_str} at time {current_time:.2f}s."
213
+ )
214
+ except Exception as e:
215
+ logger.error(f"Error reading from {cam_type_str} camera {cam_id_str}: {e}")
216
+ return None
217
+
218
+
219
+ def cleanup_cameras(cameras_to_use: list[dict[str, Any]]):
220
+ """Disconnect all cameras."""
221
+ logger.info(f"Disconnecting {len(cameras_to_use)} cameras...")
222
+ for cam_dict in cameras_to_use:
223
+ try:
224
+ if cam_dict["instance"] and cam_dict["instance"].is_connected:
225
+ cam_dict["instance"].disconnect()
226
+ except Exception as e:
227
+ logger.error(f"Error disconnecting camera {cam_dict['meta'].get('id')}: {e}")
228
+
229
+
230
+ def save_images_from_all_cameras(
231
+ output_dir: Path,
232
+ record_time_s: float = 2.0,
233
+ camera_type: str | None = None,
234
+ ):
235
+ """
236
+ Connects to detected cameras (optionally filtered by type) and saves images from each.
237
+ Uses default stream profiles for width, height, and FPS.
238
+
239
+ Args:
240
+ output_dir: Directory to save images.
241
+ record_time_s: Duration in seconds to record images.
242
+ camera_type: Optional string to filter cameras ("realsense" or "opencv").
243
+ If None, uses all detected cameras.
244
+ """
245
+ output_dir.mkdir(parents=True, exist_ok=True)
246
+ logger.info(f"Saving images to {output_dir}")
247
+ all_camera_metadata = find_and_print_cameras(camera_type_filter=camera_type)
248
+
249
+ if not all_camera_metadata:
250
+ logger.warning("No cameras detected matching the criteria. Cannot save images.")
251
+ return
252
+
253
+ cameras_to_use = []
254
+ for cam_meta in all_camera_metadata:
255
+ camera_instance = create_camera_instance(cam_meta)
256
+ if camera_instance:
257
+ cameras_to_use.append(camera_instance)
258
+
259
+ if not cameras_to_use:
260
+ logger.warning("No cameras could be connected. Aborting image save.")
261
+ return
262
+
263
+ logger.info(f"Starting image capture for {record_time_s} seconds from {len(cameras_to_use)} cameras.")
264
+ start_time = time.perf_counter()
265
+
266
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(cameras_to_use) * 2) as executor:
267
+ try:
268
+ while time.perf_counter() - start_time < record_time_s:
269
+ futures = []
270
+ current_capture_time = time.perf_counter()
271
+
272
+ for cam_dict in cameras_to_use:
273
+ future = process_camera_image(cam_dict, output_dir, current_capture_time)
274
+ if future:
275
+ futures.append(future)
276
+
277
+ if futures:
278
+ concurrent.futures.wait(futures)
279
+
280
+ except KeyboardInterrupt:
281
+ logger.info("Capture interrupted by user.")
282
+ finally:
283
+ print("\nFinalizing image saving...")
284
+ executor.shutdown(wait=True)
285
+ cleanup_cameras(cameras_to_use)
286
+ print(f"Image capture finished. Images saved to {output_dir}")
287
+
288
+
289
+ def main():
290
+ parser = argparse.ArgumentParser(
291
+ description="Unified camera utility script for listing cameras and capturing images."
292
+ )
293
+
294
+ parser.add_argument(
295
+ "camera_type",
296
+ type=str,
297
+ nargs="?",
298
+ default=None,
299
+ choices=["realsense", "opencv"],
300
+ help="Specify camera type to capture from (e.g., 'realsense', 'opencv'). Captures from all if omitted.",
301
+ )
302
+ parser.add_argument(
303
+ "--output-dir",
304
+ type=Path,
305
+ default="outputs/captured_images",
306
+ help="Directory to save images. Default: outputs/captured_images",
307
+ )
308
+ parser.add_argument(
309
+ "--record-time-s",
310
+ type=float,
311
+ default=6.0,
312
+ help="Time duration to attempt capturing frames. Default: 6 seconds.",
313
+ )
314
+ args = parser.parse_args()
315
+ save_images_from_all_cameras(**vars(args))
316
+
317
+
318
+ if __name__ == "__main__":
319
+ main()
lerobot/src/lerobot/scripts/lerobot_find_joint_limits.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ Script to find joint limits and end-effector bounds via teleoperation.
19
+
20
+ Example:
21
+
22
+ ```shell
23
+ lerobot-find-joint-limits \
24
+ --robot.type=so100_follower \
25
+ --robot.port=/dev/tty.usbmodem58760432981 \
26
+ --robot.id=black \
27
+ --teleop.type=so100_leader \
28
+ --teleop.port=/dev/tty.usbmodem58760434471 \
29
+ --teleop.id=blue \
30
+ --urdf_path=<user>/SO-ARM100-main/Simulation/SO101/so101_new_calib.urdf \
31
+ --target_frame_name=gripper \
32
+ --teleop_time_s=30 \
33
+ --warmup_time_s=5 \
34
+ --control_loop_fps=30
35
+ ```
36
+ """
37
+
38
+ import time
39
+ from dataclasses import dataclass
40
+
41
+ import draccus
42
+ import numpy as np
43
+
44
+ from lerobot.model.kinematics import RobotKinematics
45
+ from lerobot.robots import ( # noqa: F401
46
+ RobotConfig,
47
+ bi_so_follower,
48
+ koch_follower,
49
+ make_robot_from_config,
50
+ omx_follower,
51
+ so_follower,
52
+ )
53
+ from lerobot.teleoperators import ( # noqa: F401
54
+ TeleoperatorConfig,
55
+ bi_so_leader,
56
+ gamepad,
57
+ koch_leader,
58
+ make_teleoperator_from_config,
59
+ omx_leader,
60
+ so_leader,
61
+ )
62
+ from lerobot.utils.robot_utils import precise_sleep
63
+
64
+
65
+ @dataclass
66
+ class FindJointLimitsConfig:
67
+ teleop: TeleoperatorConfig
68
+ robot: RobotConfig
69
+
70
+ # Path to URDF file for kinematics
71
+ # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo:
72
+ # https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf
73
+ urdf_path: str
74
+ target_frame_name: str = "gripper"
75
+
76
+ # Duration of the recording phase in seconds
77
+ teleop_time_s: float = 30
78
+ # Duration of the warmup phase in seconds
79
+ warmup_time_s: float = 5
80
+ # Control loop frequency
81
+ control_loop_fps: int = 30
82
+
83
+
84
+ @draccus.wrap()
85
+ def find_joint_and_ee_bounds(cfg: FindJointLimitsConfig):
86
+ teleop = make_teleoperator_from_config(cfg.teleop)
87
+ robot = make_robot_from_config(cfg.robot)
88
+
89
+ print(f"Connecting to robot: {cfg.robot.type}...")
90
+ teleop.connect()
91
+ robot.connect()
92
+ print("Devices connected.")
93
+
94
+ # Initialize Kinematics
95
+ try:
96
+ kinematics = RobotKinematics(cfg.urdf_path, cfg.target_frame_name)
97
+ except Exception as e:
98
+ print(f"Error initializing kinematics: {e}")
99
+ print("Ensure URDF path and target frame name are correct.")
100
+ robot.disconnect()
101
+ teleop.disconnect()
102
+ return
103
+
104
+ # Initialize variables
105
+ max_pos = None
106
+ min_pos = None
107
+ max_ee = None
108
+ min_ee = None
109
+
110
+ start_t = time.perf_counter()
111
+ warmup_done = False
112
+
113
+ print("\n" + "=" * 40)
114
+ print(f" WARMUP PHASE ({cfg.warmup_time_s}s)")
115
+ print(" Move the robot freely to ensure control works.")
116
+ print(" Data is NOT being recorded yet.")
117
+ print("=" * 40 + "\n")
118
+
119
+ try:
120
+ while True:
121
+ t0 = time.perf_counter()
122
+
123
+ # 1. Teleoperation Control Loop
124
+ action = teleop.get_action()
125
+ robot.send_action(action)
126
+
127
+ # 2. Read Observations
128
+ observation = robot.get_observation()
129
+ joint_positions = np.array([observation[f"{key}.pos"] for key in robot.bus.motors])
130
+
131
+ # 3. Calculate Kinematics
132
+ # Forward kinematics to get (x, y, z) translation
133
+ ee_pos = kinematics.forward_kinematics(joint_positions)[:3, 3]
134
+
135
+ current_time = time.perf_counter()
136
+ elapsed = current_time - start_t
137
+
138
+ # 4. Handle Phases
139
+ if elapsed < cfg.warmup_time_s:
140
+ # Still in warmup
141
+ pass
142
+
143
+ else:
144
+ # Phase Transition: Warmup -> Recording
145
+ if not warmup_done:
146
+ print("\n" + "=" * 40)
147
+ print(" RECORDING STARTED")
148
+ print(" Move robot to ALL joint limits.")
149
+ print(" Press Ctrl+C to stop early and save results.")
150
+ print("=" * 40 + "\n")
151
+
152
+ # Initialize limits with current position at start of recording
153
+ max_pos = joint_positions.copy()
154
+ min_pos = joint_positions.copy()
155
+ max_ee = ee_pos.copy()
156
+ min_ee = ee_pos.copy()
157
+ warmup_done = True
158
+
159
+ # Update Limits
160
+ max_ee = np.maximum(max_ee, ee_pos)
161
+ min_ee = np.minimum(min_ee, ee_pos)
162
+ max_pos = np.maximum(max_pos, joint_positions)
163
+ min_pos = np.minimum(min_pos, joint_positions)
164
+
165
+ # Time check
166
+ recording_time = elapsed - cfg.warmup_time_s
167
+ remaining = cfg.teleop_time_s - recording_time
168
+
169
+ # Simple throttle for print statements (every ~1 sec)
170
+ if int(recording_time * 100) % 100 == 0:
171
+ print(f"Time remaining: {remaining:.1f}s", end="\r")
172
+
173
+ if recording_time > cfg.teleop_time_s:
174
+ print("\nTime limit reached.")
175
+ break
176
+
177
+ precise_sleep(max(1.0 / cfg.control_loop_fps - (time.perf_counter() - t0), 0.0))
178
+
179
+ except KeyboardInterrupt:
180
+ print("\n\nInterrupted by user. Stopping safely...")
181
+
182
+ finally:
183
+ # Safety: Disconnect devices
184
+ print("\nDisconnecting devices...")
185
+ robot.disconnect()
186
+ teleop.disconnect()
187
+
188
+ # Results Output
189
+ if max_pos is not None:
190
+ print("\n" + "=" * 40)
191
+ print("FINAL RESULTS")
192
+ print("=" * 40)
193
+
194
+ # Rounding for readability
195
+ r_max_ee = np.round(max_ee, 4).tolist()
196
+ r_min_ee = np.round(min_ee, 4).tolist()
197
+ r_max_pos = np.round(max_pos, 4).tolist()
198
+ r_min_pos = np.round(min_pos, 4).tolist()
199
+
200
+ print("\n# End Effector Bounds (x, y, z):")
201
+ print(f"max_ee = {r_max_ee}")
202
+ print(f"min_ee = {r_min_ee}")
203
+
204
+ print("\n# Joint Position Limits (radians):")
205
+ print(f"max_pos = {r_max_pos}")
206
+ print(f"min_pos = {r_min_pos}")
207
+
208
+ else:
209
+ print("No data recorded (exited during warmup).")
210
+
211
+
212
+ def main():
213
+ find_joint_and_ee_bounds()
214
+
215
+
216
+ if __name__ == "__main__":
217
+ main()
lerobot/src/lerobot/scripts/lerobot_find_port.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Helper to find the USB port associated with your MotorsBus.
17
+
18
+ Example:
19
+
20
+ ```shell
21
+ lerobot-find-port
22
+ ```
23
+ """
24
+
25
+ import platform
26
+ import time
27
+ from pathlib import Path
28
+
29
+
30
+ def find_available_ports():
31
+ from serial.tools import list_ports # Part of pyserial library
32
+
33
+ if platform.system() == "Windows":
34
+ # List COM ports using pyserial
35
+ ports = [port.device for port in list_ports.comports()]
36
+ else: # Linux/macOS
37
+ # List /dev/tty* ports for Unix-based systems
38
+ ports = [str(path) for path in Path("/dev").glob("tty*")]
39
+ return ports
40
+
41
+
42
+ def find_port():
43
+ print("Finding all available ports for the MotorsBus.")
44
+ ports_before = find_available_ports()
45
+ print("Ports before disconnecting:", ports_before)
46
+
47
+ print("Remove the USB cable from your MotorsBus and press Enter when done.")
48
+ input() # Wait for user to disconnect the device
49
+
50
+ time.sleep(0.5) # Allow some time for port to be released
51
+ ports_after = find_available_ports()
52
+ ports_diff = list(set(ports_before) - set(ports_after))
53
+
54
+ if len(ports_diff) == 1:
55
+ port = ports_diff[0]
56
+ print(f"The port of this MotorsBus is '{port}'")
57
+ print("Reconnect the USB cable.")
58
+ elif len(ports_diff) == 0:
59
+ raise OSError(f"Could not detect the port. No difference was found ({ports_diff}).")
60
+ else:
61
+ raise OSError(f"Could not detect the port. More than one port was found ({ports_diff}).")
62
+
63
+
64
+ def main():
65
+ find_port()
66
+
67
+
68
+ if __name__ == "__main__":
69
+ main()
lerobot/src/lerobot/scripts/lerobot_imgtransform_viz.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Visualize effects of image transforms for a given configuration.
17
+
18
+ This script will generate examples of transformed images as they are output by LeRobot dataset.
19
+ Additionally, each individual transform can be visualized separately as well as examples of combined transforms
20
+
21
+ Example:
22
+ ```bash
23
+ lerobot-imgtransform-viz \
24
+ --repo_id=lerobot/pusht \
25
+ --episodes='[0]' \
26
+ --image_transforms.enable=True
27
+ ```
28
+ """
29
+
30
+ import logging
31
+ from copy import deepcopy
32
+ from dataclasses import replace
33
+ from pathlib import Path
34
+
35
+ import draccus
36
+ from torchvision.transforms import ToPILImage
37
+
38
+ from lerobot.configs.default import DatasetConfig
39
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
40
+ from lerobot.datasets.transforms import (
41
+ ImageTransforms,
42
+ ImageTransformsConfig,
43
+ make_transform_from_config,
44
+ )
45
+
46
+ OUTPUT_DIR = Path("outputs/image_transforms")
47
+ to_pil = ToPILImage()
48
+
49
+
50
+ def save_all_transforms(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples):
51
+ output_dir_all = output_dir / "all"
52
+ output_dir_all.mkdir(parents=True, exist_ok=True)
53
+
54
+ tfs = ImageTransforms(cfg)
55
+ for i in range(1, n_examples + 1):
56
+ transformed_frame = tfs(original_frame)
57
+ to_pil(transformed_frame).save(output_dir_all / f"{i}.png", quality=100)
58
+
59
+ print("Combined transforms examples saved to:")
60
+ print(f" {output_dir_all}")
61
+
62
+
63
+ def save_each_transform(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples):
64
+ if not cfg.enable:
65
+ logging.warning(
66
+ "No single transforms will be saved, because `image_transforms.enable=False`. To enable, set `enable` to True in `ImageTransformsConfig` or in the command line with `--image_transforms.enable=True`."
67
+ )
68
+ return
69
+
70
+ print("Individual transforms examples saved to:")
71
+ for tf_name, tf_cfg in cfg.tfs.items():
72
+ # Apply a few transformation with random value in min_max range
73
+ output_dir_single = output_dir / tf_name
74
+ output_dir_single.mkdir(parents=True, exist_ok=True)
75
+
76
+ tf = make_transform_from_config(tf_cfg)
77
+ for i in range(1, n_examples + 1):
78
+ transformed_frame = tf(original_frame)
79
+ to_pil(transformed_frame).save(output_dir_single / f"{i}.png", quality=100)
80
+
81
+ # Apply min, max, average transformations
82
+ tf_cfg_kwgs_min = deepcopy(tf_cfg.kwargs)
83
+ tf_cfg_kwgs_max = deepcopy(tf_cfg.kwargs)
84
+ tf_cfg_kwgs_avg = deepcopy(tf_cfg.kwargs)
85
+
86
+ for key, (min_, max_) in tf_cfg.kwargs.items():
87
+ avg = (min_ + max_) / 2
88
+ tf_cfg_kwgs_min[key] = [min_, min_]
89
+ tf_cfg_kwgs_max[key] = [max_, max_]
90
+ tf_cfg_kwgs_avg[key] = [avg, avg]
91
+
92
+ tf_min = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_min}))
93
+ tf_max = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_max}))
94
+ tf_avg = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_avg}))
95
+
96
+ tf_frame_min = tf_min(original_frame)
97
+ tf_frame_max = tf_max(original_frame)
98
+ tf_frame_avg = tf_avg(original_frame)
99
+
100
+ to_pil(tf_frame_min).save(output_dir_single / "min.png", quality=100)
101
+ to_pil(tf_frame_max).save(output_dir_single / "max.png", quality=100)
102
+ to_pil(tf_frame_avg).save(output_dir_single / "mean.png", quality=100)
103
+
104
+ print(f" {output_dir_single}")
105
+
106
+
107
+ @draccus.wrap()
108
+ def visualize_image_transforms(cfg: DatasetConfig, output_dir: Path = OUTPUT_DIR, n_examples: int = 5):
109
+ dataset = LeRobotDataset(
110
+ repo_id=cfg.repo_id,
111
+ episodes=cfg.episodes,
112
+ revision=cfg.revision,
113
+ video_backend=cfg.video_backend,
114
+ )
115
+
116
+ output_dir = output_dir / cfg.repo_id.split("/")[-1]
117
+ output_dir.mkdir(parents=True, exist_ok=True)
118
+
119
+ # Get 1st frame from 1st camera of 1st episode
120
+ original_frame = dataset[0][dataset.meta.camera_keys[0]]
121
+ to_pil(original_frame).save(output_dir / "original_frame.png", quality=100)
122
+ print("\nOriginal frame saved to:")
123
+ print(f" {output_dir / 'original_frame.png'}.")
124
+
125
+ save_all_transforms(cfg.image_transforms, original_frame, output_dir, n_examples)
126
+ save_each_transform(cfg.image_transforms, original_frame, output_dir, n_examples)
127
+
128
+
129
+ def main():
130
+ visualize_image_transforms()
131
+
132
+
133
+ if __name__ == "__main__":
134
+ main()
lerobot/src/lerobot/scripts/lerobot_info.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ Use this script to get a quick summary of your system config.
19
+ It should be able to run without any of LeRobot's dependencies or LeRobot itself installed.
20
+
21
+ Example:
22
+
23
+ ```shell
24
+ lerobot-info
25
+ ```
26
+ """
27
+
28
+ import importlib
29
+ import platform
30
+ import shutil
31
+ import subprocess
32
+ from importlib.metadata import PackageNotFoundError, distribution
33
+
34
+ PACKAGE_NAME = "lerobot"
35
+
36
+
37
+ def get_ffmpeg_version() -> str:
38
+ """Get the ffmpeg version if installed, otherwise return 'N/A'."""
39
+ command_path = shutil.which("ffmpeg")
40
+ if command_path is None:
41
+ return "N/A"
42
+ try:
43
+ result = subprocess.run([command_path, "-version"], capture_output=True, text=True, check=True)
44
+ first_line = result.stdout.splitlines()[0]
45
+ version_info = first_line.split(" ")[2]
46
+ return version_info
47
+ except (subprocess.SubprocessError, IndexError):
48
+ return "Installed (version parsing failed)"
49
+
50
+
51
+ def get_package_version(package_name: str) -> str:
52
+ """Get the version of a package if it exists, otherwise return 'N/A'."""
53
+ try:
54
+ module = importlib.import_module(package_name)
55
+ return getattr(module, "__version__", "Installed (version not found)")
56
+ except ImportError:
57
+ return "N/A"
58
+
59
+
60
+ def get_sys_info() -> dict[str, str]:
61
+ """Run this to get basic system info to help for tracking issues & bugs."""
62
+ # General package versions
63
+ info = {
64
+ "LeRobot version": get_package_version(PACKAGE_NAME),
65
+ "Platform": platform.platform(),
66
+ "Python version": platform.python_version(),
67
+ "Huggingface Hub version": get_package_version("huggingface_hub"),
68
+ "Datasets version": get_package_version("datasets"),
69
+ "Numpy version": get_package_version("numpy"),
70
+ "FFmpeg version": get_ffmpeg_version(),
71
+ }
72
+
73
+ # PyTorch and GPU specific information
74
+ torch_version = "N/A"
75
+ torch_cuda_available = "N/A"
76
+ cuda_version = "N/A"
77
+ gpu_model = "N/A"
78
+ try:
79
+ import torch
80
+
81
+ torch_version = str(torch.__version__)
82
+ torch_cuda_available = torch.cuda.is_available()
83
+ if torch_cuda_available:
84
+ cuda_version = str(torch.version.cuda)
85
+ # Gets the name of the first available GPU
86
+ gpu_model = torch.cuda.get_device_name(0)
87
+ except ImportError:
88
+ # If torch is not installed, the default "N/A" values will be used.
89
+ pass
90
+
91
+ info.update(
92
+ {
93
+ "PyTorch version": torch_version,
94
+ "Is PyTorch built with CUDA support?": str(torch_cuda_available),
95
+ "Cuda version": cuda_version,
96
+ "GPU model": gpu_model,
97
+ "Using GPU in script?": "<fill in>",
98
+ }
99
+ )
100
+ scripts = "N/A"
101
+ try:
102
+ dist = distribution(PACKAGE_NAME)
103
+ scripts = [ep.name for ep in dist.entry_points if ep.group == "console_scripts"]
104
+ except PackageNotFoundError:
105
+ pass
106
+
107
+ info.update({f"{PACKAGE_NAME} scripts": str(scripts)})
108
+
109
+ return info
110
+
111
+
112
+ def format_dict_for_markdown(d: dict[str, str]) -> str:
113
+ """Formats a dictionary into a markdown-friendly bulleted list."""
114
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()])
115
+
116
+
117
+ def main():
118
+ """
119
+ Main function to print system info in markdown format.
120
+ """
121
+ system_info = get_sys_info()
122
+ print(format_dict_for_markdown(system_info))
123
+
124
+
125
+ if __name__ == "__main__":
126
+ main()
lerobot/src/lerobot/scripts/lerobot_replay.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Replays the actions of an episode from a dataset on a robot.
17
+
18
+ Examples:
19
+
20
+ ```shell
21
+ lerobot-replay \
22
+ --robot.type=so100_follower \
23
+ --robot.port=/dev/tty.usbmodem58760431541 \
24
+ --robot.id=black \
25
+ --dataset.repo_id=aliberts/record-test \
26
+ --dataset.episode=0
27
+ ```
28
+
29
+ Example replay with bimanual so100:
30
+ ```shell
31
+ lerobot-replay \
32
+ --robot.type=bi_so_follower \
33
+ --robot.left_arm_port=/dev/tty.usbmodem5A460851411 \
34
+ --robot.right_arm_port=/dev/tty.usbmodem5A460812391 \
35
+ --robot.id=bimanual_follower \
36
+ --dataset.repo_id=${HF_USER}/bimanual-so100-handover-cube \
37
+ --dataset.episode=0
38
+ ```
39
+
40
+ """
41
+
42
+ import logging
43
+ import time
44
+ from dataclasses import asdict, dataclass
45
+ from pathlib import Path
46
+ from pprint import pformat
47
+
48
+ from lerobot.configs import parser
49
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
50
+ from lerobot.processor import (
51
+ make_default_robot_action_processor,
52
+ )
53
+ from lerobot.robots import ( # noqa: F401
54
+ Robot,
55
+ RobotConfig,
56
+ bi_so_follower,
57
+ earthrover_mini_plus,
58
+ hope_jr,
59
+ koch_follower,
60
+ make_robot_from_config,
61
+ omx_follower,
62
+ reachy2,
63
+ so_follower,
64
+ unitree_g1,
65
+ )
66
+ from lerobot.utils.constants import ACTION
67
+ from lerobot.utils.import_utils import register_third_party_plugins
68
+ from lerobot.utils.robot_utils import precise_sleep
69
+ from lerobot.utils.utils import (
70
+ init_logging,
71
+ log_say,
72
+ )
73
+
74
+
75
+ @dataclass
76
+ class DatasetReplayConfig:
77
+ # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).
78
+ repo_id: str
79
+ # Episode to replay.
80
+ episode: int
81
+ # Root directory where the dataset will be stored (e.g. 'dataset/path').
82
+ root: str | Path | None = None
83
+ # Limit the frames per second. By default, uses the policy fps.
84
+ fps: int = 30
85
+
86
+
87
+ @dataclass
88
+ class ReplayConfig:
89
+ robot: RobotConfig
90
+ dataset: DatasetReplayConfig
91
+ # Use vocal synthesis to read events.
92
+ play_sounds: bool = True
93
+
94
+
95
+ @parser.wrap()
96
+ def replay(cfg: ReplayConfig):
97
+ init_logging()
98
+ logging.info(pformat(asdict(cfg)))
99
+
100
+ robot_action_processor = make_default_robot_action_processor()
101
+
102
+ robot = make_robot_from_config(cfg.robot)
103
+ dataset = LeRobotDataset(cfg.dataset.repo_id, root=cfg.dataset.root, episodes=[cfg.dataset.episode])
104
+
105
+ # Filter dataset to only include frames from the specified episode since episodes are chunked in dataset V3.0
106
+ episode_frames = dataset.hf_dataset.filter(lambda x: x["episode_index"] == cfg.dataset.episode)
107
+ actions = episode_frames.select_columns(ACTION)
108
+
109
+ robot.connect()
110
+
111
+ log_say("Replaying episode", cfg.play_sounds, blocking=True)
112
+ for idx in range(len(episode_frames)):
113
+ start_episode_t = time.perf_counter()
114
+
115
+ action_array = actions[idx][ACTION]
116
+ action = {}
117
+ for i, name in enumerate(dataset.features[ACTION]["names"]):
118
+ action[name] = action_array[i]
119
+
120
+ robot_obs = robot.get_observation()
121
+
122
+ processed_action = robot_action_processor((action, robot_obs))
123
+
124
+ _ = robot.send_action(processed_action)
125
+
126
+ dt_s = time.perf_counter() - start_episode_t
127
+ precise_sleep(max(1 / dataset.fps - dt_s, 0.0))
128
+
129
+ robot.disconnect()
130
+
131
+
132
+ def main():
133
+ register_third_party_plugins()
134
+ replay()
135
+
136
+
137
+ if __name__ == "__main__":
138
+ main()