zero7101 commited on
Commit
80a28ec
·
verified ·
1 Parent(s): 1d24065

Add files using upload-large-folder tool

Browse files
lerobot/src/lerobot/processor/__init__.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .batch_processor import AddBatchDimensionProcessorStep
18
+ from .converters import (
19
+ batch_to_transition,
20
+ create_transition,
21
+ transition_to_batch,
22
+ )
23
+ from .core import (
24
+ EnvAction,
25
+ EnvTransition,
26
+ PolicyAction,
27
+ RobotAction,
28
+ RobotObservation,
29
+ TransitionKey,
30
+ )
31
+ from .delta_action_processor import MapDeltaActionToRobotActionStep, MapTensorToDeltaActionDictStep
32
+ from .device_processor import DeviceProcessorStep
33
+ from .factory import (
34
+ make_default_processors,
35
+ make_default_robot_action_processor,
36
+ make_default_robot_observation_processor,
37
+ make_default_teleop_action_processor,
38
+ )
39
+ from .gym_action_processor import (
40
+ Numpy2TorchActionProcessorStep,
41
+ Torch2NumpyActionProcessorStep,
42
+ )
43
+ from .hil_processor import (
44
+ AddTeleopActionAsComplimentaryDataStep,
45
+ AddTeleopEventsAsInfoStep,
46
+ GripperPenaltyProcessorStep,
47
+ ImageCropResizeProcessorStep,
48
+ InterventionActionProcessorStep,
49
+ RewardClassifierProcessorStep,
50
+ TimeLimitProcessorStep,
51
+ )
52
+ from .normalize_processor import NormalizerProcessorStep, UnnormalizerProcessorStep, hotswap_stats
53
+ from .observation_processor import VanillaObservationProcessorStep
54
+ from .pipeline import (
55
+ ActionProcessorStep,
56
+ ComplementaryDataProcessorStep,
57
+ DataProcessorPipeline,
58
+ DoneProcessorStep,
59
+ IdentityProcessorStep,
60
+ InfoProcessorStep,
61
+ ObservationProcessorStep,
62
+ PolicyActionProcessorStep,
63
+ PolicyProcessorPipeline,
64
+ ProcessorKwargs,
65
+ ProcessorStep,
66
+ ProcessorStepRegistry,
67
+ RewardProcessorStep,
68
+ RobotActionProcessorStep,
69
+ RobotProcessorPipeline,
70
+ TruncatedProcessorStep,
71
+ )
72
+ from .policy_robot_bridge import (
73
+ PolicyActionToRobotActionProcessorStep,
74
+ RobotActionToPolicyActionProcessorStep,
75
+ )
76
+ from .rename_processor import RenameObservationsProcessorStep
77
+ from .tokenizer_processor import ActionTokenizerProcessorStep, TokenizerProcessorStep
78
+
79
+ __all__ = [
80
+ "ActionProcessorStep",
81
+ "AddTeleopActionAsComplimentaryDataStep",
82
+ "AddTeleopEventsAsInfoStep",
83
+ "ComplementaryDataProcessorStep",
84
+ "batch_to_transition",
85
+ "create_transition",
86
+ "DeviceProcessorStep",
87
+ "DoneProcessorStep",
88
+ "EnvAction",
89
+ "EnvTransition",
90
+ "GripperPenaltyProcessorStep",
91
+ "hotswap_stats",
92
+ "IdentityProcessorStep",
93
+ "ImageCropResizeProcessorStep",
94
+ "InfoProcessorStep",
95
+ "InterventionActionProcessorStep",
96
+ "make_default_processors",
97
+ "make_default_teleop_action_processor",
98
+ "make_default_robot_action_processor",
99
+ "make_default_robot_observation_processor",
100
+ "MapDeltaActionToRobotActionStep",
101
+ "MapTensorToDeltaActionDictStep",
102
+ "NormalizerProcessorStep",
103
+ "Numpy2TorchActionProcessorStep",
104
+ "ObservationProcessorStep",
105
+ "PolicyAction",
106
+ "PolicyActionProcessorStep",
107
+ "PolicyProcessorPipeline",
108
+ "ProcessorKwargs",
109
+ "ProcessorStep",
110
+ "ProcessorStepRegistry",
111
+ "RobotAction",
112
+ "RobotActionProcessorStep",
113
+ "RobotObservation",
114
+ "RenameObservationsProcessorStep",
115
+ "RewardClassifierProcessorStep",
116
+ "RewardProcessorStep",
117
+ "DataProcessorPipeline",
118
+ "TimeLimitProcessorStep",
119
+ "AddBatchDimensionProcessorStep",
120
+ "RobotProcessorPipeline",
121
+ "TokenizerProcessorStep",
122
+ "ActionTokenizerProcessorStep",
123
+ "Torch2NumpyActionProcessorStep",
124
+ "RobotActionToPolicyActionProcessorStep",
125
+ "PolicyActionToRobotActionProcessorStep",
126
+ "transition_to_batch",
127
+ "TransitionKey",
128
+ "TruncatedProcessorStep",
129
+ "UnnormalizerProcessorStep",
130
+ "VanillaObservationProcessorStep",
131
+ ]
lerobot/src/lerobot/rl/actor.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Actor server runner for distributed HILSerl robot policy training.
18
+
19
+ This script implements the actor component of the distributed HILSerl architecture.
20
+ It executes the policy in the robot environment, collects experience,
21
+ and sends transitions to the learner server for policy updates.
22
+
23
+ Examples of usage:
24
+
25
+ - Start an actor server for real robot training with human-in-the-loop intervention:
26
+ ```bash
27
+ python -m lerobot.rl.actor --config_path src/lerobot/configs/train_config_hilserl_so100.json
28
+ ```
29
+
30
+ **NOTE**: The actor server requires a running learner server to connect to. Ensure the learner
31
+ server is started before launching the actor.
32
+
33
+ **NOTE**: Human intervention is key to HILSerl training. Press the upper right trigger button on the
34
+ gamepad to take control of the robot during training. Initially intervene frequently, then gradually
35
+ reduce interventions as the policy improves.
36
+
37
+ **WORKFLOW**:
38
+ 1. Determine robot workspace bounds using `lerobot-find-joint-limits`
39
+ 2. Record demonstrations with `gym_manipulator.py` in record mode
40
+ 3. Process the dataset and determine camera crops with `crop_dataset_roi.py`
41
+ 4. Start the learner server with the training configuration
42
+ 5. Start this actor server with the same configuration
43
+ 6. Use human interventions to guide policy learning
44
+
45
+ For more details on the complete HILSerl training workflow, see:
46
+ https://github.com/michel-aractingi/lerobot-hilserl-guide
47
+ """
48
+
49
+ import logging
50
+ import os
51
+ import time
52
+ from functools import lru_cache
53
+ from queue import Empty
54
+
55
+ import grpc
56
+ import torch
57
+ from torch import nn
58
+ from torch.multiprocessing import Event, Queue
59
+
60
+ from lerobot.cameras import opencv # noqa: F401
61
+ from lerobot.configs import parser
62
+ from lerobot.configs.train import TrainRLServerPipelineConfig
63
+ from lerobot.policies.factory import make_policy
64
+ from lerobot.policies.sac.modeling_sac import SACPolicy
65
+ from lerobot.processor import TransitionKey
66
+ from lerobot.rl.process import ProcessSignalHandler
67
+ from lerobot.rl.queue import get_last_item_from_queue
68
+ from lerobot.robots import so_follower # noqa: F401
69
+ from lerobot.teleoperators import gamepad, so_leader # noqa: F401
70
+ from lerobot.teleoperators.utils import TeleopEvents
71
+ from lerobot.transport import services_pb2, services_pb2_grpc
72
+ from lerobot.transport.utils import (
73
+ bytes_to_state_dict,
74
+ grpc_channel_options,
75
+ python_object_to_bytes,
76
+ receive_bytes_in_chunks,
77
+ send_bytes_in_chunks,
78
+ transitions_to_bytes,
79
+ )
80
+ from lerobot.utils.random_utils import set_seed
81
+ from lerobot.utils.robot_utils import precise_sleep
82
+ from lerobot.utils.transition import (
83
+ Transition,
84
+ move_state_dict_to_device,
85
+ move_transition_to_device,
86
+ )
87
+ from lerobot.utils.utils import (
88
+ TimerManager,
89
+ get_safe_torch_device,
90
+ init_logging,
91
+ )
92
+
93
+ from .gym_manipulator import (
94
+ create_transition,
95
+ make_processors,
96
+ make_robot_env,
97
+ step_env_and_process_transition,
98
+ )
99
+
100
+ # Main entry point
101
+
102
+
103
+ @parser.wrap()
104
+ def actor_cli(cfg: TrainRLServerPipelineConfig):
105
+ cfg.validate()
106
+ display_pid = False
107
+ if not use_threads(cfg):
108
+ import torch.multiprocessing as mp
109
+
110
+ mp.set_start_method("spawn")
111
+ display_pid = True
112
+
113
+ # Create logs directory to ensure it exists
114
+ log_dir = os.path.join(cfg.output_dir, "logs")
115
+ os.makedirs(log_dir, exist_ok=True)
116
+ log_file = os.path.join(log_dir, f"actor_{cfg.job_name}.log")
117
+
118
+ # Initialize logging with explicit log file
119
+ init_logging(log_file=log_file, display_pid=display_pid)
120
+ logging.info(f"Actor logging initialized, writing to {log_file}")
121
+
122
+ is_threaded = use_threads(cfg)
123
+ shutdown_event = ProcessSignalHandler(is_threaded, display_pid=display_pid).shutdown_event
124
+
125
+ learner_client, grpc_channel = learner_service_client(
126
+ host=cfg.policy.actor_learner_config.learner_host,
127
+ port=cfg.policy.actor_learner_config.learner_port,
128
+ )
129
+
130
+ logging.info("[ACTOR] Establishing connection with Learner")
131
+ if not establish_learner_connection(learner_client, shutdown_event):
132
+ logging.error("[ACTOR] Failed to establish connection with Learner")
133
+ return
134
+
135
+ if not use_threads(cfg):
136
+ # If we use multithreading, we can reuse the channel
137
+ grpc_channel.close()
138
+ grpc_channel = None
139
+
140
+ logging.info("[ACTOR] Connection with Learner established")
141
+
142
+ parameters_queue = Queue()
143
+ transitions_queue = Queue()
144
+ interactions_queue = Queue()
145
+
146
+ concurrency_entity = None
147
+ if use_threads(cfg):
148
+ from threading import Thread
149
+
150
+ concurrency_entity = Thread
151
+ else:
152
+ from multiprocessing import Process
153
+
154
+ concurrency_entity = Process
155
+
156
+ receive_policy_process = concurrency_entity(
157
+ target=receive_policy,
158
+ args=(cfg, parameters_queue, shutdown_event, grpc_channel),
159
+ daemon=True,
160
+ )
161
+
162
+ transitions_process = concurrency_entity(
163
+ target=send_transitions,
164
+ args=(cfg, transitions_queue, shutdown_event, grpc_channel),
165
+ daemon=True,
166
+ )
167
+
168
+ interactions_process = concurrency_entity(
169
+ target=send_interactions,
170
+ args=(cfg, interactions_queue, shutdown_event, grpc_channel),
171
+ daemon=True,
172
+ )
173
+
174
+ transitions_process.start()
175
+ interactions_process.start()
176
+ receive_policy_process.start()
177
+
178
+ act_with_policy(
179
+ cfg=cfg,
180
+ shutdown_event=shutdown_event,
181
+ parameters_queue=parameters_queue,
182
+ transitions_queue=transitions_queue,
183
+ interactions_queue=interactions_queue,
184
+ )
185
+ logging.info("[ACTOR] Policy process joined")
186
+
187
+ logging.info("[ACTOR] Closing queues")
188
+ transitions_queue.close()
189
+ interactions_queue.close()
190
+ parameters_queue.close()
191
+
192
+ transitions_process.join()
193
+ logging.info("[ACTOR] Transitions process joined")
194
+ interactions_process.join()
195
+ logging.info("[ACTOR] Interactions process joined")
196
+ receive_policy_process.join()
197
+ logging.info("[ACTOR] Receive policy process joined")
198
+
199
+ logging.info("[ACTOR] join queues")
200
+ transitions_queue.cancel_join_thread()
201
+ interactions_queue.cancel_join_thread()
202
+ parameters_queue.cancel_join_thread()
203
+
204
+ logging.info("[ACTOR] queues closed")
205
+
206
+
207
+ # Core algorithm functions
208
+
209
+
210
+ def act_with_policy(
211
+ cfg: TrainRLServerPipelineConfig,
212
+ shutdown_event: any, # Event,
213
+ parameters_queue: Queue,
214
+ transitions_queue: Queue,
215
+ interactions_queue: Queue,
216
+ ):
217
+ """
218
+ Executes policy interaction within the environment.
219
+
220
+ This function rolls out the policy in the environment, collecting interaction data and pushing it to a queue for streaming to the learner.
221
+ Once an episode is completed, updated network parameters received from the learner are retrieved from a queue and loaded into the network.
222
+
223
+ Args:
224
+ cfg: Configuration settings for the interaction process.
225
+ shutdown_event: Event to check if the process should shutdown.
226
+ parameters_queue: Queue to receive updated network parameters from the learner.
227
+ transitions_queue: Queue to send transitions to the learner.
228
+ interactions_queue: Queue to send interactions to the learner.
229
+ """
230
+ # Initialize logging for multiprocessing
231
+ if not use_threads(cfg):
232
+ log_dir = os.path.join(cfg.output_dir, "logs")
233
+ os.makedirs(log_dir, exist_ok=True)
234
+ log_file = os.path.join(log_dir, f"actor_policy_{os.getpid()}.log")
235
+ init_logging(log_file=log_file, display_pid=True)
236
+ logging.info("Actor policy process logging initialized")
237
+
238
+ logging.info("make_env online")
239
+
240
+ online_env, teleop_device = make_robot_env(cfg=cfg.env)
241
+ env_processor, action_processor = make_processors(online_env, teleop_device, cfg.env, cfg.policy.device)
242
+
243
+ set_seed(cfg.seed)
244
+ device = get_safe_torch_device(cfg.policy.device, log=True)
245
+
246
+ torch.backends.cudnn.benchmark = True
247
+ torch.backends.cuda.matmul.allow_tf32 = True
248
+
249
+ logging.info("make_policy")
250
+
251
+ ### Instantiate the policy in both the actor and learner processes
252
+ ### To avoid sending a SACPolicy object through the port, we create a policy instance
253
+ ### on both sides, the learner sends the updated parameters every n steps to update the actor's parameters
254
+ policy: SACPolicy = make_policy(
255
+ cfg=cfg.policy,
256
+ env_cfg=cfg.env,
257
+ )
258
+ policy = policy.eval()
259
+ assert isinstance(policy, nn.Module)
260
+
261
+ obs, info = online_env.reset()
262
+ env_processor.reset()
263
+ action_processor.reset()
264
+
265
+ # Process initial observation
266
+ transition = create_transition(observation=obs, info=info)
267
+ transition = env_processor(transition)
268
+
269
+ # NOTE: For the moment we will solely handle the case of a single environment
270
+ sum_reward_episode = 0
271
+ list_transition_to_send_to_learner = []
272
+ episode_intervention = False
273
+ # Add counters for intervention rate calculation
274
+ episode_intervention_steps = 0
275
+ episode_total_steps = 0
276
+
277
+ policy_timer = TimerManager("Policy inference", log=False)
278
+
279
+ for interaction_step in range(cfg.policy.online_steps):
280
+ start_time = time.perf_counter()
281
+ if shutdown_event.is_set():
282
+ logging.info("[ACTOR] Shutting down act_with_policy")
283
+ return
284
+
285
+ observation = {
286
+ k: v for k, v in transition[TransitionKey.OBSERVATION].items() if k in cfg.policy.input_features
287
+ }
288
+
289
+ # Time policy inference and check if it meets FPS requirement
290
+ with policy_timer:
291
+ # Extract observation from transition for policy
292
+ action = policy.select_action(batch=observation)
293
+ policy_fps = policy_timer.fps_last
294
+
295
+ log_policy_frequency_issue(policy_fps=policy_fps, cfg=cfg, interaction_step=interaction_step)
296
+
297
+ # Use the new step function
298
+ new_transition = step_env_and_process_transition(
299
+ env=online_env,
300
+ transition=transition,
301
+ action=action,
302
+ env_processor=env_processor,
303
+ action_processor=action_processor,
304
+ )
305
+
306
+ # Extract values from processed transition
307
+ next_observation = {
308
+ k: v
309
+ for k, v in new_transition[TransitionKey.OBSERVATION].items()
310
+ if k in cfg.policy.input_features
311
+ }
312
+
313
+ # Teleop action is the action that was executed in the environment
314
+ # It is either the action from the teleop device or the action from the policy
315
+ executed_action = new_transition[TransitionKey.COMPLEMENTARY_DATA]["teleop_action"]
316
+
317
+ reward = new_transition[TransitionKey.REWARD]
318
+ done = new_transition.get(TransitionKey.DONE, False)
319
+ truncated = new_transition.get(TransitionKey.TRUNCATED, False)
320
+
321
+ sum_reward_episode += float(reward)
322
+ episode_total_steps += 1
323
+
324
+ # Check for intervention from transition info
325
+ intervention_info = new_transition[TransitionKey.INFO]
326
+ if intervention_info.get(TeleopEvents.IS_INTERVENTION, False):
327
+ episode_intervention = True
328
+ episode_intervention_steps += 1
329
+
330
+ complementary_info = {
331
+ "discrete_penalty": torch.tensor(
332
+ [new_transition[TransitionKey.COMPLEMENTARY_DATA].get("discrete_penalty", 0.0)]
333
+ ),
334
+ }
335
+ # Create transition for learner (convert to old format)
336
+ list_transition_to_send_to_learner.append(
337
+ Transition(
338
+ state=observation,
339
+ action=executed_action,
340
+ reward=reward,
341
+ next_state=next_observation,
342
+ done=done,
343
+ truncated=truncated,
344
+ complementary_info=complementary_info,
345
+ )
346
+ )
347
+
348
+ # Update transition for next iteration
349
+ transition = new_transition
350
+
351
+ if done or truncated:
352
+ logging.info(f"[ACTOR] Global step {interaction_step}: Episode reward: {sum_reward_episode}")
353
+
354
+ update_policy_parameters(policy=policy, parameters_queue=parameters_queue, device=device)
355
+
356
+ if len(list_transition_to_send_to_learner) > 0:
357
+ push_transitions_to_transport_queue(
358
+ transitions=list_transition_to_send_to_learner,
359
+ transitions_queue=transitions_queue,
360
+ )
361
+ list_transition_to_send_to_learner = []
362
+
363
+ stats = get_frequency_stats(policy_timer)
364
+ policy_timer.reset()
365
+
366
+ # Calculate intervention rate
367
+ intervention_rate = 0.0
368
+ if episode_total_steps > 0:
369
+ intervention_rate = episode_intervention_steps / episode_total_steps
370
+
371
+ # Send episodic reward to the learner
372
+ interactions_queue.put(
373
+ python_object_to_bytes(
374
+ {
375
+ "Episodic reward": sum_reward_episode,
376
+ "Interaction step": interaction_step,
377
+ "Episode intervention": int(episode_intervention),
378
+ "Intervention rate": intervention_rate,
379
+ **stats,
380
+ }
381
+ )
382
+ )
383
+
384
+ # Reset intervention counters and environment
385
+ sum_reward_episode = 0.0
386
+ episode_intervention = False
387
+ episode_intervention_steps = 0
388
+ episode_total_steps = 0
389
+
390
+ # Reset environment and processors
391
+ obs, info = online_env.reset()
392
+ env_processor.reset()
393
+ action_processor.reset()
394
+
395
+ # Process initial observation
396
+ transition = create_transition(observation=obs, info=info)
397
+ transition = env_processor(transition)
398
+
399
+ if cfg.env.fps is not None:
400
+ dt_time = time.perf_counter() - start_time
401
+ precise_sleep(max(1 / cfg.env.fps - dt_time, 0.0))
402
+
403
+
404
+ # Communication Functions - Group all gRPC/messaging functions
405
+
406
+
407
+ def establish_learner_connection(
408
+ stub: services_pb2_grpc.LearnerServiceStub,
409
+ shutdown_event: Event, # type: ignore
410
+ attempts: int = 30,
411
+ ):
412
+ """Establish a connection with the learner.
413
+
414
+ Args:
415
+ stub (services_pb2_grpc.LearnerServiceStub): The stub to use for the connection.
416
+ shutdown_event (Event): The event to check if the connection should be established.
417
+ attempts (int): The number of attempts to establish the connection.
418
+ Returns:
419
+ bool: True if the connection is established, False otherwise.
420
+ """
421
+ for _ in range(attempts):
422
+ if shutdown_event.is_set():
423
+ logging.info("[ACTOR] Shutting down establish_learner_connection")
424
+ return False
425
+
426
+ # Force a connection attempt and check state
427
+ try:
428
+ logging.info("[ACTOR] Send ready message to Learner")
429
+ if stub.Ready(services_pb2.Empty()) == services_pb2.Empty():
430
+ return True
431
+ except grpc.RpcError as e:
432
+ logging.error(f"[ACTOR] Waiting for Learner to be ready... {e}")
433
+ time.sleep(2)
434
+ return False
435
+
436
+
437
+ @lru_cache(maxsize=1)
438
+ def learner_service_client(
439
+ host: str = "127.0.0.1",
440
+ port: int = 50051,
441
+ ) -> tuple[services_pb2_grpc.LearnerServiceStub, grpc.Channel]:
442
+ """
443
+ Returns a client for the learner service.
444
+
445
+ GRPC uses HTTP/2, which is a binary protocol and multiplexes requests over a single connection.
446
+ So we need to create only one client and reuse it.
447
+ """
448
+
449
+ channel = grpc.insecure_channel(
450
+ f"{host}:{port}",
451
+ grpc_channel_options(),
452
+ )
453
+ stub = services_pb2_grpc.LearnerServiceStub(channel)
454
+ logging.info("[ACTOR] Learner service client created")
455
+ return stub, channel
456
+
457
+
458
+ def receive_policy(
459
+ cfg: TrainRLServerPipelineConfig,
460
+ parameters_queue: Queue,
461
+ shutdown_event: Event, # type: ignore
462
+ learner_client: services_pb2_grpc.LearnerServiceStub | None = None,
463
+ grpc_channel: grpc.Channel | None = None,
464
+ ):
465
+ """Receive parameters from the learner.
466
+
467
+ Args:
468
+ cfg (TrainRLServerPipelineConfig): The configuration for the actor.
469
+ parameters_queue (Queue): The queue to receive the parameters.
470
+ shutdown_event (Event): The event to check if the process should shutdown.
471
+ """
472
+ logging.info("[ACTOR] Start receiving parameters from the Learner")
473
+ if not use_threads(cfg):
474
+ # Create a process-specific log file
475
+ log_dir = os.path.join(cfg.output_dir, "logs")
476
+ os.makedirs(log_dir, exist_ok=True)
477
+ log_file = os.path.join(log_dir, f"actor_receive_policy_{os.getpid()}.log")
478
+
479
+ # Initialize logging with explicit log file
480
+ init_logging(log_file=log_file, display_pid=True)
481
+ logging.info("Actor receive policy process logging initialized")
482
+
483
+ # Setup process handlers to handle shutdown signal
484
+ # But use shutdown event from the main process
485
+ _ = ProcessSignalHandler(use_threads=False, display_pid=True)
486
+
487
+ if grpc_channel is None or learner_client is None:
488
+ learner_client, grpc_channel = learner_service_client(
489
+ host=cfg.policy.actor_learner_config.learner_host,
490
+ port=cfg.policy.actor_learner_config.learner_port,
491
+ )
492
+
493
+ try:
494
+ iterator = learner_client.StreamParameters(services_pb2.Empty())
495
+ receive_bytes_in_chunks(
496
+ iterator,
497
+ parameters_queue,
498
+ shutdown_event,
499
+ log_prefix="[ACTOR] parameters",
500
+ )
501
+
502
+ except grpc.RpcError as e:
503
+ logging.error(f"[ACTOR] gRPC error: {e}")
504
+
505
+ if not use_threads(cfg):
506
+ grpc_channel.close()
507
+ logging.info("[ACTOR] Received policy loop stopped")
508
+
509
+
510
+ def send_transitions(
511
+ cfg: TrainRLServerPipelineConfig,
512
+ transitions_queue: Queue,
513
+ shutdown_event: any, # Event,
514
+ learner_client: services_pb2_grpc.LearnerServiceStub | None = None,
515
+ grpc_channel: grpc.Channel | None = None,
516
+ ) -> services_pb2.Empty:
517
+ """
518
+ Sends transitions to the learner.
519
+
520
+ This function continuously retrieves messages from the queue and processes:
521
+
522
+ - Transition Data:
523
+ - A batch of transitions (observation, action, reward, next observation) is collected.
524
+ - Transitions are moved to the CPU and serialized using PyTorch.
525
+ - The serialized data is wrapped in a `services_pb2.Transition` message and sent to the learner.
526
+ """
527
+
528
+ if not use_threads(cfg):
529
+ # Create a process-specific log file
530
+ log_dir = os.path.join(cfg.output_dir, "logs")
531
+ os.makedirs(log_dir, exist_ok=True)
532
+ log_file = os.path.join(log_dir, f"actor_transitions_{os.getpid()}.log")
533
+
534
+ # Initialize logging with explicit log file
535
+ init_logging(log_file=log_file, display_pid=True)
536
+ logging.info("Actor transitions process logging initialized")
537
+
538
+ if grpc_channel is None or learner_client is None:
539
+ learner_client, grpc_channel = learner_service_client(
540
+ host=cfg.policy.actor_learner_config.learner_host,
541
+ port=cfg.policy.actor_learner_config.learner_port,
542
+ )
543
+
544
+ try:
545
+ learner_client.SendTransitions(
546
+ transitions_stream(
547
+ shutdown_event, transitions_queue, cfg.policy.actor_learner_config.queue_get_timeout
548
+ )
549
+ )
550
+ except grpc.RpcError as e:
551
+ logging.error(f"[ACTOR] gRPC error: {e}")
552
+
553
+ logging.info("[ACTOR] Finished streaming transitions")
554
+
555
+ if not use_threads(cfg):
556
+ grpc_channel.close()
557
+ logging.info("[ACTOR] Transitions process stopped")
558
+
559
+
560
+ def send_interactions(
561
+ cfg: TrainRLServerPipelineConfig,
562
+ interactions_queue: Queue,
563
+ shutdown_event: Event, # type: ignore
564
+ learner_client: services_pb2_grpc.LearnerServiceStub | None = None,
565
+ grpc_channel: grpc.Channel | None = None,
566
+ ) -> services_pb2.Empty:
567
+ """
568
+ Sends interactions to the learner.
569
+
570
+ This function continuously retrieves messages from the queue and processes:
571
+
572
+ - Interaction Messages:
573
+ - Contains useful statistics about episodic rewards and policy timings.
574
+ - The message is serialized using `pickle` and sent to the learner.
575
+ """
576
+
577
+ if not use_threads(cfg):
578
+ # Create a process-specific log file
579
+ log_dir = os.path.join(cfg.output_dir, "logs")
580
+ os.makedirs(log_dir, exist_ok=True)
581
+ log_file = os.path.join(log_dir, f"actor_interactions_{os.getpid()}.log")
582
+
583
+ # Initialize logging with explicit log file
584
+ init_logging(log_file=log_file, display_pid=True)
585
+ logging.info("Actor interactions process logging initialized")
586
+
587
+ # Setup process handlers to handle shutdown signal
588
+ # But use shutdown event from the main process
589
+ _ = ProcessSignalHandler(use_threads=False, display_pid=True)
590
+
591
+ if grpc_channel is None or learner_client is None:
592
+ learner_client, grpc_channel = learner_service_client(
593
+ host=cfg.policy.actor_learner_config.learner_host,
594
+ port=cfg.policy.actor_learner_config.learner_port,
595
+ )
596
+
597
+ try:
598
+ learner_client.SendInteractions(
599
+ interactions_stream(
600
+ shutdown_event, interactions_queue, cfg.policy.actor_learner_config.queue_get_timeout
601
+ )
602
+ )
603
+ except grpc.RpcError as e:
604
+ logging.error(f"[ACTOR] gRPC error: {e}")
605
+
606
+ logging.info("[ACTOR] Finished streaming interactions")
607
+
608
+ if not use_threads(cfg):
609
+ grpc_channel.close()
610
+ logging.info("[ACTOR] Interactions process stopped")
611
+
612
+
613
+ def transitions_stream(shutdown_event: Event, transitions_queue: Queue, timeout: float) -> services_pb2.Empty: # type: ignore
614
+ while not shutdown_event.is_set():
615
+ try:
616
+ message = transitions_queue.get(block=True, timeout=timeout)
617
+ except Empty:
618
+ logging.debug("[ACTOR] Transition queue is empty")
619
+ continue
620
+
621
+ yield from send_bytes_in_chunks(
622
+ message, services_pb2.Transition, log_prefix="[ACTOR] Send transitions"
623
+ )
624
+
625
+ return services_pb2.Empty()
626
+
627
+
628
+ def interactions_stream(
629
+ shutdown_event: Event,
630
+ interactions_queue: Queue,
631
+ timeout: float, # type: ignore
632
+ ) -> services_pb2.Empty:
633
+ while not shutdown_event.is_set():
634
+ try:
635
+ message = interactions_queue.get(block=True, timeout=timeout)
636
+ except Empty:
637
+ logging.debug("[ACTOR] Interaction queue is empty")
638
+ continue
639
+
640
+ yield from send_bytes_in_chunks(
641
+ message,
642
+ services_pb2.InteractionMessage,
643
+ log_prefix="[ACTOR] Send interactions",
644
+ )
645
+
646
+ return services_pb2.Empty()
647
+
648
+
649
+ # Policy functions
650
+
651
+
652
+ def update_policy_parameters(policy: SACPolicy, parameters_queue: Queue, device):
653
+ bytes_state_dict = get_last_item_from_queue(parameters_queue, block=False)
654
+ if bytes_state_dict is not None:
655
+ logging.info("[ACTOR] Load new parameters from Learner.")
656
+ state_dicts = bytes_to_state_dict(bytes_state_dict)
657
+
658
+ # TODO: check encoder parameter synchronization possible issues:
659
+ # 1. When shared_encoder=True, we're loading stale encoder params from actor's state_dict
660
+ # instead of the updated encoder params from critic (which is optimized separately)
661
+ # 2. When freeze_vision_encoder=True, we waste bandwidth sending/loading frozen params
662
+ # 3. Need to handle encoder params correctly for both actor and discrete_critic
663
+ # Potential fixes:
664
+ # - Send critic's encoder state when shared_encoder=True
665
+ # - Skip encoder params entirely when freeze_vision_encoder=True
666
+ # - Ensure discrete_critic gets correct encoder state (currently uses encoder_critic)
667
+
668
+ # Load actor state dict
669
+ actor_state_dict = move_state_dict_to_device(state_dicts["policy"], device=device)
670
+ policy.actor.load_state_dict(actor_state_dict)
671
+
672
+ # Load discrete critic if present
673
+ if hasattr(policy, "discrete_critic") and "discrete_critic" in state_dicts:
674
+ discrete_critic_state_dict = move_state_dict_to_device(
675
+ state_dicts["discrete_critic"], device=device
676
+ )
677
+ policy.discrete_critic.load_state_dict(discrete_critic_state_dict)
678
+ logging.info("[ACTOR] Loaded discrete critic parameters from Learner.")
679
+
680
+
681
+ # Utilities functions
682
+
683
+
684
+ def push_transitions_to_transport_queue(transitions: list, transitions_queue):
685
+ """Send transitions to learner in smaller chunks to avoid network issues.
686
+
687
+ Args:
688
+ transitions: List of transitions to send
689
+ message_queue: Queue to send messages to learner
690
+ chunk_size: Size of each chunk to send
691
+ """
692
+ transition_to_send_to_learner = []
693
+ for transition in transitions:
694
+ tr = move_transition_to_device(transition=transition, device="cpu")
695
+ for key, value in tr["state"].items():
696
+ if torch.isnan(value).any():
697
+ logging.warning(f"Found NaN values in transition {key}")
698
+
699
+ transition_to_send_to_learner.append(tr)
700
+
701
+ transitions_queue.put(transitions_to_bytes(transition_to_send_to_learner))
702
+
703
+
704
+ def get_frequency_stats(timer: TimerManager) -> dict[str, float]:
705
+ """Get the frequency statistics of the policy.
706
+
707
+ Args:
708
+ timer (TimerManager): The timer with collected metrics.
709
+
710
+ Returns:
711
+ dict[str, float]: The frequency statistics of the policy.
712
+ """
713
+ stats = {}
714
+ if timer.count > 1:
715
+ avg_fps = timer.fps_avg
716
+ p90_fps = timer.fps_percentile(90)
717
+ logging.debug(f"[ACTOR] Average policy frame rate: {avg_fps}")
718
+ logging.debug(f"[ACTOR] Policy frame rate 90th percentile: {p90_fps}")
719
+ stats = {
720
+ "Policy frequency [Hz]": avg_fps,
721
+ "Policy frequency 90th-p [Hz]": p90_fps,
722
+ }
723
+ return stats
724
+
725
+
726
+ def log_policy_frequency_issue(policy_fps: float, cfg: TrainRLServerPipelineConfig, interaction_step: int):
727
+ if policy_fps < cfg.env.fps:
728
+ logging.warning(
729
+ f"[ACTOR] Policy FPS {policy_fps:.1f} below required {cfg.env.fps} at step {interaction_step}"
730
+ )
731
+
732
+
733
+ def use_threads(cfg: TrainRLServerPipelineConfig) -> bool:
734
+ return cfg.policy.concurrency.actor == "threads"
735
+
736
+
737
+ if __name__ == "__main__":
738
+ actor_cli()
lerobot/src/lerobot/rl/buffer.py ADDED
@@ -0,0 +1,834 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import functools
18
+ from collections.abc import Callable, Sequence
19
+ from contextlib import suppress
20
+ from typing import TypedDict
21
+
22
+ import torch
23
+ import torch.nn.functional as F # noqa: N812
24
+ from tqdm import tqdm
25
+
26
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
27
+ from lerobot.utils.constants import ACTION, DONE, OBS_IMAGE, REWARD
28
+ from lerobot.utils.transition import Transition
29
+
30
+
31
+ class BatchTransition(TypedDict):
32
+ state: dict[str, torch.Tensor]
33
+ action: torch.Tensor
34
+ reward: torch.Tensor
35
+ next_state: dict[str, torch.Tensor]
36
+ done: torch.Tensor
37
+ truncated: torch.Tensor
38
+ complementary_info: dict[str, torch.Tensor | float | int] | None = None
39
+
40
+
41
+ def random_crop_vectorized(images: torch.Tensor, output_size: tuple) -> torch.Tensor:
42
+ """
43
+ Perform a per-image random crop over a batch of images in a vectorized way.
44
+ (Same as shown previously.)
45
+ """
46
+ B, C, H, W = images.shape # noqa: N806
47
+ crop_h, crop_w = output_size
48
+
49
+ if crop_h > H or crop_w > W:
50
+ raise ValueError(
51
+ f"Requested crop size ({crop_h}, {crop_w}) is bigger than the image size ({H}, {W})."
52
+ )
53
+
54
+ tops = torch.randint(0, H - crop_h + 1, (B,), device=images.device)
55
+ lefts = torch.randint(0, W - crop_w + 1, (B,), device=images.device)
56
+
57
+ rows = torch.arange(crop_h, device=images.device).unsqueeze(0) + tops.unsqueeze(1)
58
+ cols = torch.arange(crop_w, device=images.device).unsqueeze(0) + lefts.unsqueeze(1)
59
+
60
+ rows = rows.unsqueeze(2).expand(-1, -1, crop_w) # (B, crop_h, crop_w)
61
+ cols = cols.unsqueeze(1).expand(-1, crop_h, -1) # (B, crop_h, crop_w)
62
+
63
+ images_hwcn = images.permute(0, 2, 3, 1) # (B, H, W, C)
64
+
65
+ # Gather pixels
66
+ cropped_hwcn = images_hwcn[torch.arange(B, device=images.device).view(B, 1, 1), rows, cols, :]
67
+ # cropped_hwcn => (B, crop_h, crop_w, C)
68
+
69
+ cropped = cropped_hwcn.permute(0, 3, 1, 2) # (B, C, crop_h, crop_w)
70
+ return cropped
71
+
72
+
73
+ def random_shift(images: torch.Tensor, pad: int = 4):
74
+ """Vectorized random shift, imgs: (B,C,H,W), pad: #pixels"""
75
+ _, _, h, w = images.shape
76
+ images = F.pad(input=images, pad=(pad, pad, pad, pad), mode="replicate")
77
+ return random_crop_vectorized(images=images, output_size=(h, w))
78
+
79
+
80
+ class ReplayBuffer:
81
+ def __init__(
82
+ self,
83
+ capacity: int,
84
+ device: str = "cuda:0",
85
+ state_keys: Sequence[str] | None = None,
86
+ image_augmentation_function: Callable | None = None,
87
+ use_drq: bool = True,
88
+ storage_device: str = "cpu",
89
+ optimize_memory: bool = False,
90
+ ):
91
+ """
92
+ Replay buffer for storing transitions.
93
+ It will allocate tensors on the specified device, when the first transition is added.
94
+ NOTE: If you encounter memory issues, you can try to use the `optimize_memory` flag to save memory or
95
+ and use the `storage_device` flag to store the buffer on a different device.
96
+ Args:
97
+ capacity (int): Maximum number of transitions to store in the buffer.
98
+ device (str): The device where the tensors will be moved when sampling ("cuda:0" or "cpu").
99
+ state_keys (List[str]): The list of keys that appear in `state` and `next_state`.
100
+ image_augmentation_function (Optional[Callable]): A function that takes a batch of images
101
+ and returns a batch of augmented images. If None, a default augmentation function is used.
102
+ use_drq (bool): Whether to use the default DRQ image augmentation style, when sampling in the buffer.
103
+ storage_device: The device (e.g. "cpu" or "cuda:0") where the data will be stored.
104
+ Using "cpu" can help save GPU memory.
105
+ optimize_memory (bool): If True, optimizes memory by not storing duplicate next_states when
106
+ they can be derived from states. This is useful for large datasets where next_state[i] = state[i+1].
107
+ """
108
+ if capacity <= 0:
109
+ raise ValueError("Capacity must be greater than 0.")
110
+
111
+ self.capacity = capacity
112
+ self.device = device
113
+ self.storage_device = storage_device
114
+ self.position = 0
115
+ self.size = 0
116
+ self.initialized = False
117
+ self.optimize_memory = optimize_memory
118
+
119
+ # Track episode boundaries for memory optimization
120
+ self.episode_ends = torch.zeros(capacity, dtype=torch.bool, device=storage_device)
121
+
122
+ # If no state_keys provided, default to an empty list
123
+ self.state_keys = state_keys if state_keys is not None else []
124
+
125
+ self.image_augmentation_function = image_augmentation_function
126
+
127
+ if image_augmentation_function is None:
128
+ base_function = functools.partial(random_shift, pad=4)
129
+ self.image_augmentation_function = torch.compile(base_function)
130
+ self.use_drq = use_drq
131
+
132
+ def _initialize_storage(
133
+ self,
134
+ state: dict[str, torch.Tensor],
135
+ action: torch.Tensor,
136
+ complementary_info: dict[str, torch.Tensor] | None = None,
137
+ ):
138
+ """Initialize the storage tensors based on the first transition."""
139
+ # Determine shapes from the first transition
140
+ state_shapes = {key: val.squeeze(0).shape for key, val in state.items()}
141
+ action_shape = action.squeeze(0).shape
142
+
143
+ # Pre-allocate tensors for storage
144
+ self.states = {
145
+ key: torch.empty((self.capacity, *shape), device=self.storage_device)
146
+ for key, shape in state_shapes.items()
147
+ }
148
+ self.actions = torch.empty((self.capacity, *action_shape), device=self.storage_device)
149
+ self.rewards = torch.empty((self.capacity,), device=self.storage_device)
150
+
151
+ if not self.optimize_memory:
152
+ # Standard approach: store states and next_states separately
153
+ self.next_states = {
154
+ key: torch.empty((self.capacity, *shape), device=self.storage_device)
155
+ for key, shape in state_shapes.items()
156
+ }
157
+ else:
158
+ # Memory-optimized approach: don't allocate next_states buffer
159
+ # Just create a reference to states for consistent API
160
+ self.next_states = self.states # Just a reference for API consistency
161
+
162
+ self.dones = torch.empty((self.capacity,), dtype=torch.bool, device=self.storage_device)
163
+ self.truncateds = torch.empty((self.capacity,), dtype=torch.bool, device=self.storage_device)
164
+
165
+ # Initialize storage for complementary_info
166
+ self.has_complementary_info = complementary_info is not None
167
+ self.complementary_info_keys = []
168
+ self.complementary_info = {}
169
+
170
+ if self.has_complementary_info:
171
+ self.complementary_info_keys = list(complementary_info.keys())
172
+ # Pre-allocate tensors for each key in complementary_info
173
+ for key, value in complementary_info.items():
174
+ if isinstance(value, torch.Tensor):
175
+ value_shape = value.squeeze(0).shape
176
+ self.complementary_info[key] = torch.empty(
177
+ (self.capacity, *value_shape), device=self.storage_device
178
+ )
179
+ elif isinstance(value, (int | float)):
180
+ # Handle scalar values similar to reward
181
+ self.complementary_info[key] = torch.empty((self.capacity,), device=self.storage_device)
182
+ else:
183
+ raise ValueError(f"Unsupported type {type(value)} for complementary_info[{key}]")
184
+
185
+ self.initialized = True
186
+
187
+ def __len__(self):
188
+ return self.size
189
+
190
+ def add(
191
+ self,
192
+ state: dict[str, torch.Tensor],
193
+ action: torch.Tensor,
194
+ reward: float,
195
+ next_state: dict[str, torch.Tensor],
196
+ done: bool,
197
+ truncated: bool,
198
+ complementary_info: dict[str, torch.Tensor] | None = None,
199
+ ):
200
+ """Saves a transition, ensuring tensors are stored on the designated storage device."""
201
+ # Initialize storage if this is the first transition
202
+ if not self.initialized:
203
+ self._initialize_storage(state=state, action=action, complementary_info=complementary_info)
204
+
205
+ # Store the transition in pre-allocated tensors
206
+ for key in self.states:
207
+ self.states[key][self.position].copy_(state[key].squeeze(dim=0))
208
+
209
+ if not self.optimize_memory:
210
+ # Only store next_states if not optimizing memory
211
+ self.next_states[key][self.position].copy_(next_state[key].squeeze(dim=0))
212
+
213
+ self.actions[self.position].copy_(action.squeeze(dim=0))
214
+ self.rewards[self.position] = reward
215
+ self.dones[self.position] = done
216
+ self.truncateds[self.position] = truncated
217
+
218
+ # Handle complementary_info if provided and storage is initialized
219
+ if complementary_info is not None and self.has_complementary_info:
220
+ # Store the complementary_info
221
+ for key in self.complementary_info_keys:
222
+ if key in complementary_info:
223
+ value = complementary_info[key]
224
+ if isinstance(value, torch.Tensor):
225
+ self.complementary_info[key][self.position].copy_(value.squeeze(dim=0))
226
+ elif isinstance(value, (int | float)):
227
+ self.complementary_info[key][self.position] = value
228
+
229
+ self.position = (self.position + 1) % self.capacity
230
+ self.size = min(self.size + 1, self.capacity)
231
+
232
+ def sample(self, batch_size: int) -> BatchTransition:
233
+ """Sample a random batch of transitions and collate them into batched tensors."""
234
+ if not self.initialized:
235
+ raise RuntimeError("Cannot sample from an empty buffer. Add transitions first.")
236
+
237
+ batch_size = min(batch_size, self.size)
238
+ high = max(0, self.size - 1) if self.optimize_memory and self.size < self.capacity else self.size
239
+
240
+ # Random indices for sampling - create on the same device as storage
241
+ idx = torch.randint(low=0, high=high, size=(batch_size,), device=self.storage_device)
242
+
243
+ # Identify image keys that need augmentation
244
+ image_keys = [k for k in self.states if k.startswith(OBS_IMAGE)] if self.use_drq else []
245
+
246
+ # Create batched state and next_state
247
+ batch_state = {}
248
+ batch_next_state = {}
249
+
250
+ # First pass: load all state tensors to target device
251
+ for key in self.states:
252
+ batch_state[key] = self.states[key][idx].to(self.device)
253
+
254
+ if not self.optimize_memory:
255
+ # Standard approach - load next_states directly
256
+ batch_next_state[key] = self.next_states[key][idx].to(self.device)
257
+ else:
258
+ # Memory-optimized approach - get next_state from the next index
259
+ next_idx = (idx + 1) % self.capacity
260
+ batch_next_state[key] = self.states[key][next_idx].to(self.device)
261
+
262
+ # Apply image augmentation in a batched way if needed
263
+ if self.use_drq and image_keys:
264
+ # Concatenate all images from state and next_state
265
+ all_images = []
266
+ for key in image_keys:
267
+ all_images.append(batch_state[key])
268
+ all_images.append(batch_next_state[key])
269
+
270
+ # Optimization: Batch all images and apply augmentation once
271
+ all_images_tensor = torch.cat(all_images, dim=0)
272
+ augmented_images = self.image_augmentation_function(all_images_tensor)
273
+
274
+ # Split the augmented images back to their sources
275
+ for i, key in enumerate(image_keys):
276
+ # Calculate offsets for the current image key:
277
+ # For each key, we have 2*batch_size images (batch_size for states, batch_size for next_states)
278
+ # States start at index i*2*batch_size and take up batch_size slots
279
+ batch_state[key] = augmented_images[i * 2 * batch_size : (i * 2 + 1) * batch_size]
280
+ # Next states start after the states at index (i*2+1)*batch_size and also take up batch_size slots
281
+ batch_next_state[key] = augmented_images[(i * 2 + 1) * batch_size : (i + 1) * 2 * batch_size]
282
+
283
+ # Sample other tensors
284
+ batch_actions = self.actions[idx].to(self.device)
285
+ batch_rewards = self.rewards[idx].to(self.device)
286
+ batch_dones = self.dones[idx].to(self.device).float()
287
+ batch_truncateds = self.truncateds[idx].to(self.device).float()
288
+
289
+ # Sample complementary_info if available
290
+ batch_complementary_info = None
291
+ if self.has_complementary_info:
292
+ batch_complementary_info = {}
293
+ for key in self.complementary_info_keys:
294
+ batch_complementary_info[key] = self.complementary_info[key][idx].to(self.device)
295
+
296
+ return BatchTransition(
297
+ state=batch_state,
298
+ action=batch_actions,
299
+ reward=batch_rewards,
300
+ next_state=batch_next_state,
301
+ done=batch_dones,
302
+ truncated=batch_truncateds,
303
+ complementary_info=batch_complementary_info,
304
+ )
305
+
306
+ def get_iterator(
307
+ self,
308
+ batch_size: int,
309
+ async_prefetch: bool = True,
310
+ queue_size: int = 2,
311
+ ):
312
+ """
313
+ Creates an infinite iterator that yields batches of transitions.
314
+ Will automatically restart when internal iterator is exhausted.
315
+
316
+ Args:
317
+ batch_size (int): Size of batches to sample
318
+ async_prefetch (bool): Whether to use asynchronous prefetching with threads (default: True)
319
+ queue_size (int): Number of batches to prefetch (default: 2)
320
+
321
+ Yields:
322
+ BatchTransition: Batched transitions
323
+ """
324
+ while True: # Create an infinite loop
325
+ if async_prefetch:
326
+ # Get the standard iterator
327
+ iterator = self._get_async_iterator(queue_size=queue_size, batch_size=batch_size)
328
+ else:
329
+ iterator = self._get_naive_iterator(batch_size=batch_size, queue_size=queue_size)
330
+
331
+ # Yield all items from the iterator
332
+ with suppress(StopIteration):
333
+ yield from iterator
334
+
335
+ def _get_async_iterator(self, batch_size: int, queue_size: int = 2):
336
+ """
337
+ Create an iterator that continuously yields prefetched batches in a
338
+ background thread. The design is intentionally simple and avoids busy
339
+ waiting / complex state management.
340
+
341
+ Args:
342
+ batch_size (int): Size of batches to sample.
343
+ queue_size (int): Maximum number of prefetched batches to keep in
344
+ memory.
345
+
346
+ Yields:
347
+ BatchTransition: A batch sampled from the replay buffer.
348
+ """
349
+ import queue
350
+ import threading
351
+
352
+ data_queue: queue.Queue = queue.Queue(maxsize=queue_size)
353
+ shutdown_event = threading.Event()
354
+
355
+ def producer() -> None:
356
+ """Continuously put sampled batches into the queue until shutdown."""
357
+ while not shutdown_event.is_set():
358
+ try:
359
+ batch = self.sample(batch_size)
360
+ # The timeout ensures the thread unblocks if the queue is full
361
+ # and the shutdown event gets set meanwhile.
362
+ data_queue.put(batch, block=True, timeout=0.5)
363
+ except queue.Full:
364
+ # Queue is full – loop again (will re-check shutdown_event)
365
+ continue
366
+ except Exception:
367
+ # Surface any unexpected error and terminate the producer.
368
+ shutdown_event.set()
369
+
370
+ producer_thread = threading.Thread(target=producer, daemon=True)
371
+ producer_thread.start()
372
+
373
+ try:
374
+ while not shutdown_event.is_set():
375
+ try:
376
+ yield data_queue.get(block=True)
377
+ except Exception:
378
+ # If the producer already set the shutdown flag we exit.
379
+ if shutdown_event.is_set():
380
+ break
381
+ finally:
382
+ shutdown_event.set()
383
+ # Drain the queue quickly to help the thread exit if it's blocked on `put`.
384
+ while not data_queue.empty():
385
+ _ = data_queue.get_nowait()
386
+ # Give the producer thread a bit of time to finish.
387
+ producer_thread.join(timeout=1.0)
388
+
389
+ def _get_naive_iterator(self, batch_size: int, queue_size: int = 2):
390
+ """
391
+ Creates a simple non-threaded iterator that yields batches.
392
+
393
+ Args:
394
+ batch_size (int): Size of batches to sample
395
+ queue_size (int): Number of initial batches to prefetch
396
+
397
+ Yields:
398
+ BatchTransition: Batch transitions
399
+ """
400
+ import collections
401
+
402
+ queue = collections.deque()
403
+
404
+ def enqueue(n):
405
+ for _ in range(n):
406
+ data = self.sample(batch_size)
407
+ queue.append(data)
408
+
409
+ enqueue(queue_size)
410
+ while queue:
411
+ yield queue.popleft()
412
+ enqueue(1)
413
+
414
+ @classmethod
415
+ def from_lerobot_dataset(
416
+ cls,
417
+ lerobot_dataset: LeRobotDataset,
418
+ device: str = "cuda:0",
419
+ state_keys: Sequence[str] | None = None,
420
+ capacity: int | None = None,
421
+ image_augmentation_function: Callable | None = None,
422
+ use_drq: bool = True,
423
+ storage_device: str = "cpu",
424
+ optimize_memory: bool = False,
425
+ ) -> "ReplayBuffer":
426
+ """
427
+ Convert a LeRobotDataset into a ReplayBuffer.
428
+
429
+ Args:
430
+ lerobot_dataset (LeRobotDataset): The dataset to convert.
431
+ device (str): The device for sampling tensors. Defaults to "cuda:0".
432
+ state_keys (Sequence[str] | None): The list of keys that appear in `state` and `next_state`.
433
+ capacity (int | None): Buffer capacity. If None, uses dataset length.
434
+ action_mask (Sequence[int] | None): Indices of action dimensions to keep.
435
+ image_augmentation_function (Callable | None): Function for image augmentation.
436
+ If None, uses default random shift with pad=4.
437
+ use_drq (bool): Whether to use DrQ image augmentation when sampling.
438
+ storage_device (str): Device for storing tensor data. Using "cpu" saves GPU memory.
439
+ optimize_memory (bool): If True, reduces memory usage by not duplicating state data.
440
+
441
+ Returns:
442
+ ReplayBuffer: The replay buffer with dataset transitions.
443
+ """
444
+ if capacity is None:
445
+ capacity = len(lerobot_dataset)
446
+
447
+ if capacity < len(lerobot_dataset):
448
+ raise ValueError(
449
+ "The capacity of the ReplayBuffer must be greater than or equal to the length of the LeRobotDataset."
450
+ )
451
+
452
+ # Create replay buffer with image augmentation and DrQ settings
453
+ replay_buffer = cls(
454
+ capacity=capacity,
455
+ device=device,
456
+ state_keys=state_keys,
457
+ image_augmentation_function=image_augmentation_function,
458
+ use_drq=use_drq,
459
+ storage_device=storage_device,
460
+ optimize_memory=optimize_memory,
461
+ )
462
+
463
+ # Convert dataset to transitions
464
+ list_transition = cls._lerobotdataset_to_transitions(dataset=lerobot_dataset, state_keys=state_keys)
465
+
466
+ # Initialize the buffer with the first transition to set up storage tensors
467
+ if list_transition:
468
+ first_transition = list_transition[0]
469
+ first_state = {k: v.to(device) for k, v in first_transition["state"].items()}
470
+ first_action = first_transition[ACTION].to(device)
471
+
472
+ # Get complementary info if available
473
+ first_complementary_info = None
474
+ if (
475
+ "complementary_info" in first_transition
476
+ and first_transition["complementary_info"] is not None
477
+ ):
478
+ first_complementary_info = {
479
+ k: v.to(device) for k, v in first_transition["complementary_info"].items()
480
+ }
481
+
482
+ replay_buffer._initialize_storage(
483
+ state=first_state, action=first_action, complementary_info=first_complementary_info
484
+ )
485
+
486
+ # Fill the buffer with all transitions
487
+ for data in list_transition:
488
+ for k, v in data.items():
489
+ if isinstance(v, dict):
490
+ for key, tensor in v.items():
491
+ v[key] = tensor.to(storage_device)
492
+ elif isinstance(v, torch.Tensor):
493
+ data[k] = v.to(storage_device)
494
+
495
+ action = data[ACTION]
496
+
497
+ replay_buffer.add(
498
+ state=data["state"],
499
+ action=action,
500
+ reward=data["reward"],
501
+ next_state=data["next_state"],
502
+ done=data["done"],
503
+ truncated=False, # NOTE: Truncation are not supported yet in lerobot dataset
504
+ complementary_info=data.get("complementary_info", None),
505
+ )
506
+
507
+ return replay_buffer
508
+
509
+ def to_lerobot_dataset(
510
+ self,
511
+ repo_id: str,
512
+ fps=1,
513
+ root=None,
514
+ task_name="from_replay_buffer",
515
+ ) -> LeRobotDataset:
516
+ """
517
+ Converts all transitions in this ReplayBuffer into a single LeRobotDataset object.
518
+ """
519
+ if self.size == 0:
520
+ raise ValueError("The replay buffer is empty. Cannot convert to a dataset.")
521
+
522
+ # Create features dictionary for the dataset
523
+ features = {
524
+ "index": {"dtype": "int64", "shape": [1]}, # global index across episodes
525
+ "episode_index": {"dtype": "int64", "shape": [1]}, # which episode
526
+ "frame_index": {"dtype": "int64", "shape": [1]}, # index inside an episode
527
+ "timestamp": {"dtype": "float32", "shape": [1]}, # for now we store dummy
528
+ "task_index": {"dtype": "int64", "shape": [1]},
529
+ }
530
+
531
+ # Add "action"
532
+ sample_action = self.actions[0]
533
+ act_info = guess_feature_info(t=sample_action, name=ACTION)
534
+ features[ACTION] = act_info
535
+
536
+ # Add "reward" and "done"
537
+ features[REWARD] = {"dtype": "float32", "shape": (1,)}
538
+ features[DONE] = {"dtype": "bool", "shape": (1,)}
539
+
540
+ # Add state keys
541
+ for key in self.states:
542
+ sample_val = self.states[key][0]
543
+ f_info = guess_feature_info(t=sample_val, name=key)
544
+ features[key] = f_info
545
+
546
+ # Add complementary_info keys if available
547
+ if self.has_complementary_info:
548
+ for key in self.complementary_info_keys:
549
+ sample_val = self.complementary_info[key][0]
550
+ if isinstance(sample_val, torch.Tensor) and sample_val.ndim == 0:
551
+ sample_val = sample_val.unsqueeze(0)
552
+ f_info = guess_feature_info(t=sample_val, name=f"complementary_info.{key}")
553
+ features[f"complementary_info.{key}"] = f_info
554
+
555
+ # Create an empty LeRobotDataset
556
+ lerobot_dataset = LeRobotDataset.create(
557
+ repo_id=repo_id,
558
+ fps=fps,
559
+ root=root,
560
+ robot_type=None,
561
+ features=features,
562
+ use_videos=True,
563
+ )
564
+
565
+ # Start writing images if needed
566
+ lerobot_dataset.start_image_writer(num_processes=0, num_threads=3)
567
+
568
+ # Convert transitions into episodes and frames
569
+
570
+ for idx in range(self.size):
571
+ actual_idx = (self.position - self.size + idx) % self.capacity
572
+
573
+ frame_dict = {}
574
+
575
+ # Fill the data for state keys
576
+ for key in self.states:
577
+ frame_dict[key] = self.states[key][actual_idx].cpu()
578
+
579
+ # Fill action, reward, done
580
+ frame_dict[ACTION] = self.actions[actual_idx].cpu()
581
+ frame_dict[REWARD] = torch.tensor([self.rewards[actual_idx]], dtype=torch.float32).cpu()
582
+ frame_dict[DONE] = torch.tensor([self.dones[actual_idx]], dtype=torch.bool).cpu()
583
+ frame_dict["task"] = task_name
584
+
585
+ # Add complementary_info if available
586
+ if self.has_complementary_info:
587
+ for key in self.complementary_info_keys:
588
+ val = self.complementary_info[key][actual_idx]
589
+ # Convert tensors to CPU
590
+ if isinstance(val, torch.Tensor):
591
+ if val.ndim == 0:
592
+ val = val.unsqueeze(0)
593
+ frame_dict[f"complementary_info.{key}"] = val.cpu()
594
+ # Non-tensor values can be used directly
595
+ else:
596
+ frame_dict[f"complementary_info.{key}"] = val
597
+
598
+ # Add to the dataset's buffer
599
+ lerobot_dataset.add_frame(frame_dict)
600
+
601
+ # If we reached an episode boundary, call save_episode, reset counters
602
+ if self.dones[actual_idx] or self.truncateds[actual_idx]:
603
+ lerobot_dataset.save_episode()
604
+
605
+ # Save any remaining frames in the buffer
606
+ if lerobot_dataset.episode_buffer["size"] > 0:
607
+ lerobot_dataset.save_episode()
608
+
609
+ lerobot_dataset.stop_image_writer()
610
+ lerobot_dataset.finalize()
611
+
612
+ return lerobot_dataset
613
+
614
+ @staticmethod
615
+ def _lerobotdataset_to_transitions(
616
+ dataset: LeRobotDataset,
617
+ state_keys: Sequence[str] | None = None,
618
+ ) -> list[Transition]:
619
+ """
620
+ Convert a LeRobotDataset into a list of RL (s, a, r, s', done) transitions.
621
+
622
+ Args:
623
+ dataset (LeRobotDataset):
624
+ The dataset to convert. Each item in the dataset is expected to have
625
+ at least the following keys:
626
+ {
627
+ "action": ...
628
+ "next.reward": ...
629
+ "next.done": ...
630
+ "episode_index": ...
631
+ }
632
+ plus whatever your 'state_keys' specify.
633
+
634
+ state_keys (Sequence[str] | None):
635
+ The dataset keys to include in 'state' and 'next_state'. Their names
636
+ will be kept as-is in the output transitions. E.g.
637
+ ["observation.state", "observation.environment_state"].
638
+ If None, you must handle or define default keys.
639
+
640
+ Returns:
641
+ transitions (List[Transition]):
642
+ A list of Transition dictionaries with the same length as `dataset`.
643
+ """
644
+ if state_keys is None:
645
+ raise ValueError("State keys must be provided when converting LeRobotDataset to Transitions.")
646
+
647
+ transitions = []
648
+ num_frames = len(dataset)
649
+
650
+ # Check if the dataset has "next.done" key
651
+ sample = dataset[0]
652
+ has_done_key = DONE in sample
653
+
654
+ # Check for complementary_info keys
655
+ complementary_info_keys = [key for key in sample if key.startswith("complementary_info.")]
656
+ has_complementary_info = len(complementary_info_keys) > 0
657
+
658
+ # If not, we need to infer it from episode boundaries
659
+ if not has_done_key:
660
+ print("'next.done' key not found in dataset. Inferring from episode boundaries...")
661
+
662
+ for i in tqdm(range(num_frames)):
663
+ current_sample = dataset[i]
664
+
665
+ # ----- 1) Current state -----
666
+ current_state: dict[str, torch.Tensor] = {}
667
+ for key in state_keys:
668
+ val = current_sample[key]
669
+ current_state[key] = val.unsqueeze(0) # Add batch dimension
670
+
671
+ # ----- 2) Action -----
672
+ action = current_sample[ACTION].unsqueeze(0) # Add batch dimension
673
+
674
+ # ----- 3) Reward and done -----
675
+ reward = float(current_sample[REWARD].item()) # ensure float
676
+
677
+ # Determine done flag - use next.done if available, otherwise infer from episode boundaries
678
+ if has_done_key:
679
+ done = bool(current_sample[DONE].item()) # ensure bool
680
+ else:
681
+ # If this is the last frame or if next frame is in a different episode, mark as done
682
+ done = False
683
+ if i == num_frames - 1:
684
+ done = True
685
+ elif i < num_frames - 1:
686
+ next_sample = dataset[i + 1]
687
+ if next_sample["episode_index"] != current_sample["episode_index"]:
688
+ done = True
689
+
690
+ # TODO: (azouitine) Handle truncation (using the same value as done for now)
691
+ truncated = done
692
+
693
+ # ----- 4) Next state -----
694
+ # If not done and the next sample is in the same episode, we pull the next sample's state.
695
+ # Otherwise (done=True or next sample crosses to a new episode), next_state = current_state.
696
+ next_state = current_state # default
697
+ if not done and (i < num_frames - 1):
698
+ next_sample = dataset[i + 1]
699
+ if next_sample["episode_index"] == current_sample["episode_index"]:
700
+ # Build next_state from the same keys
701
+ next_state_data: dict[str, torch.Tensor] = {}
702
+ for key in state_keys:
703
+ val = next_sample[key]
704
+ next_state_data[key] = val.unsqueeze(0) # Add batch dimension
705
+ next_state = next_state_data
706
+
707
+ # ----- 5) Complementary info (if available) -----
708
+ complementary_info = None
709
+ if has_complementary_info:
710
+ complementary_info = {}
711
+ for key in complementary_info_keys:
712
+ # Strip the "complementary_info." prefix to get the actual key
713
+ clean_key = key[len("complementary_info.") :]
714
+ val = current_sample[key]
715
+ # Handle tensor and non-tensor values differently
716
+ if isinstance(val, torch.Tensor):
717
+ complementary_info[clean_key] = val.unsqueeze(0) # Add batch dimension
718
+ else:
719
+ # TODO: (azouitine) Check if it's necessary to convert to tensor
720
+ # For non-tensor values, use directly
721
+ complementary_info[clean_key] = val
722
+
723
+ # ----- Construct the Transition -----
724
+ transition = Transition(
725
+ state=current_state,
726
+ action=action,
727
+ reward=reward,
728
+ next_state=next_state,
729
+ done=done,
730
+ truncated=truncated,
731
+ complementary_info=complementary_info,
732
+ )
733
+ transitions.append(transition)
734
+
735
+ return transitions
736
+
737
+
738
+ # Utility function to guess shapes/dtypes from a tensor
739
+ def guess_feature_info(t, name: str):
740
+ """
741
+ Return a dictionary with the 'dtype' and 'shape' for a given tensor or scalar value.
742
+ If it looks like a 3D (C,H,W) shape, we might consider it an 'image'.
743
+ Otherwise default to appropriate dtype for numeric.
744
+ """
745
+
746
+ shape = tuple(t.shape)
747
+ # Basic guess: if we have exactly 3 dims and shape[0] in {1, 3}, guess 'image'
748
+ if len(shape) == 3 and shape[0] in [1, 3]:
749
+ return {
750
+ "dtype": "image",
751
+ "shape": shape,
752
+ }
753
+ else:
754
+ # Otherwise treat as numeric
755
+ return {
756
+ "dtype": "float32",
757
+ "shape": shape,
758
+ }
759
+
760
+
761
+ def concatenate_batch_transitions(
762
+ left_batch_transitions: BatchTransition, right_batch_transition: BatchTransition
763
+ ) -> BatchTransition:
764
+ """
765
+ Concatenates two BatchTransition objects into one.
766
+
767
+ This function merges the right BatchTransition into the left one by concatenating
768
+ all corresponding tensors along dimension 0. The operation modifies the left_batch_transitions
769
+ in place and also returns it.
770
+
771
+ Args:
772
+ left_batch_transitions (BatchTransition): The first batch to concatenate and the one
773
+ that will be modified in place.
774
+ right_batch_transition (BatchTransition): The second batch to append to the first one.
775
+
776
+ Returns:
777
+ BatchTransition: The concatenated batch (same object as left_batch_transitions).
778
+
779
+ Warning:
780
+ This function modifies the left_batch_transitions object in place.
781
+ """
782
+ # Concatenate state fields
783
+ left_batch_transitions["state"] = {
784
+ key: torch.cat(
785
+ [left_batch_transitions["state"][key], right_batch_transition["state"][key]],
786
+ dim=0,
787
+ )
788
+ for key in left_batch_transitions["state"]
789
+ }
790
+
791
+ # Concatenate basic fields
792
+ left_batch_transitions[ACTION] = torch.cat(
793
+ [left_batch_transitions[ACTION], right_batch_transition[ACTION]], dim=0
794
+ )
795
+ left_batch_transitions["reward"] = torch.cat(
796
+ [left_batch_transitions["reward"], right_batch_transition["reward"]], dim=0
797
+ )
798
+
799
+ # Concatenate next_state fields
800
+ left_batch_transitions["next_state"] = {
801
+ key: torch.cat(
802
+ [left_batch_transitions["next_state"][key], right_batch_transition["next_state"][key]],
803
+ dim=0,
804
+ )
805
+ for key in left_batch_transitions["next_state"]
806
+ }
807
+
808
+ # Concatenate done and truncated fields
809
+ left_batch_transitions["done"] = torch.cat(
810
+ [left_batch_transitions["done"], right_batch_transition["done"]], dim=0
811
+ )
812
+ left_batch_transitions["truncated"] = torch.cat(
813
+ [left_batch_transitions["truncated"], right_batch_transition["truncated"]],
814
+ dim=0,
815
+ )
816
+
817
+ # Handle complementary_info
818
+ left_info = left_batch_transitions.get("complementary_info")
819
+ right_info = right_batch_transition.get("complementary_info")
820
+
821
+ # Only process if right_info exists
822
+ if right_info is not None:
823
+ # Initialize left complementary_info if needed
824
+ if left_info is None:
825
+ left_batch_transitions["complementary_info"] = right_info
826
+ else:
827
+ # Concatenate each field
828
+ for key in right_info:
829
+ if key in left_info:
830
+ left_info[key] = torch.cat([left_info[key], right_info[key]], dim=0)
831
+ else:
832
+ left_info[key] = right_info[key]
833
+
834
+ return left_batch_transitions
lerobot/src/lerobot/rl/crop_dataset_roi.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import json
19
+ from copy import deepcopy
20
+ from pathlib import Path
21
+
22
+ import cv2
23
+ import torch
24
+ import torchvision.transforms.functional as F # type: ignore # noqa: N812
25
+ from tqdm import tqdm # type: ignore
26
+
27
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
28
+ from lerobot.utils.constants import DONE, REWARD
29
+
30
+
31
+ def select_rect_roi(img):
32
+ """
33
+ Allows the user to draw a rectangular ROI on the image.
34
+
35
+ The user must click and drag to draw the rectangle.
36
+ - While dragging, the rectangle is dynamically drawn.
37
+ - On mouse button release, the rectangle is fixed.
38
+ - Press 'c' to confirm the selection.
39
+ - Press 'r' to reset the selection.
40
+ - Press ESC to cancel.
41
+
42
+ Returns:
43
+ A tuple (top, left, height, width) representing the rectangular ROI,
44
+ or None if no valid ROI is selected.
45
+ """
46
+ # Create a working copy of the image
47
+ clone = img.copy()
48
+ working_img = clone.copy()
49
+
50
+ roi = None # Will store the final ROI as (top, left, height, width)
51
+ drawing = False
52
+ index_x, index_y = -1, -1 # Initial click coordinates
53
+
54
+ def mouse_callback(event, x, y, flags, param):
55
+ nonlocal index_x, index_y, drawing, roi, working_img
56
+
57
+ if event == cv2.EVENT_LBUTTONDOWN:
58
+ # Start drawing: record starting coordinates
59
+ drawing = True
60
+ index_x, index_y = x, y
61
+
62
+ elif event == cv2.EVENT_MOUSEMOVE:
63
+ if drawing:
64
+ # Compute the top-left and bottom-right corners regardless of drag direction
65
+ top = min(index_y, y)
66
+ left = min(index_x, x)
67
+ bottom = max(index_y, y)
68
+ right = max(index_x, x)
69
+ # Show a temporary image with the current rectangle drawn
70
+ temp = working_img.copy()
71
+ cv2.rectangle(temp, (left, top), (right, bottom), (0, 255, 0), 2)
72
+ cv2.imshow("Select ROI", temp)
73
+
74
+ elif event == cv2.EVENT_LBUTTONUP:
75
+ # Finish drawing
76
+ drawing = False
77
+ top = min(index_y, y)
78
+ left = min(index_x, x)
79
+ bottom = max(index_y, y)
80
+ right = max(index_x, x)
81
+ height = bottom - top
82
+ width = right - left
83
+ roi = (top, left, height, width) # (top, left, height, width)
84
+ # Draw the final rectangle on the working image and display it
85
+ working_img = clone.copy()
86
+ cv2.rectangle(working_img, (left, top), (right, bottom), (0, 255, 0), 2)
87
+ cv2.imshow("Select ROI", working_img)
88
+
89
+ # Create the window and set the callback
90
+ cv2.namedWindow("Select ROI")
91
+ cv2.setMouseCallback("Select ROI", mouse_callback)
92
+ cv2.imshow("Select ROI", working_img)
93
+
94
+ print("Instructions for ROI selection:")
95
+ print(" - Click and drag to draw a rectangular ROI.")
96
+ print(" - Press 'c' to confirm the selection.")
97
+ print(" - Press 'r' to reset and draw again.")
98
+ print(" - Press ESC to cancel the selection.")
99
+
100
+ # Wait until the user confirms with 'c', resets with 'r', or cancels with ESC
101
+ while True:
102
+ key = cv2.waitKey(1) & 0xFF
103
+ # Confirm ROI if one has been drawn
104
+ if key == ord("c") and roi is not None:
105
+ break
106
+ # Reset: clear the ROI and restore the original image
107
+ elif key == ord("r"):
108
+ working_img = clone.copy()
109
+ roi = None
110
+ cv2.imshow("Select ROI", working_img)
111
+ # Cancel selection for this image
112
+ elif key == 27: # ESC key
113
+ roi = None
114
+ break
115
+
116
+ cv2.destroyWindow("Select ROI")
117
+ return roi
118
+
119
+
120
+ def select_square_roi_for_images(images: dict) -> dict:
121
+ """
122
+ For each image in the provided dictionary, open a window to allow the user
123
+ to select a rectangular ROI. Returns a dictionary mapping each key to a tuple
124
+ (top, left, height, width) representing the ROI.
125
+
126
+ Parameters:
127
+ images (dict): Dictionary where keys are identifiers and values are OpenCV images.
128
+
129
+ Returns:
130
+ dict: Mapping of image keys to the selected rectangular ROI.
131
+ """
132
+ selected_rois = {}
133
+
134
+ for key, img in images.items():
135
+ if img is None:
136
+ print(f"Image for key '{key}' is None, skipping.")
137
+ continue
138
+
139
+ print(f"\nSelect rectangular ROI for image with key: '{key}'")
140
+ roi = select_rect_roi(img)
141
+
142
+ if roi is None:
143
+ print(f"No valid ROI selected for '{key}'.")
144
+ else:
145
+ selected_rois[key] = roi
146
+ print(f"ROI for '{key}': {roi}")
147
+
148
+ return selected_rois
149
+
150
+
151
+ def get_image_from_lerobot_dataset(dataset: LeRobotDataset):
152
+ """
153
+ Find the first row in the dataset and extract the image in order to be used for the crop.
154
+ """
155
+ row = dataset[0]
156
+ image_dict = {}
157
+ for k in row:
158
+ if "image" in k:
159
+ image_dict[k] = deepcopy(row[k])
160
+ return image_dict
161
+
162
+
163
+ def convert_lerobot_dataset_to_cropped_lerobot_dataset(
164
+ original_dataset: LeRobotDataset,
165
+ crop_params_dict: dict[str, tuple[int, int, int, int]],
166
+ new_repo_id: str,
167
+ new_dataset_root: str,
168
+ resize_size: tuple[int, int] = (128, 128),
169
+ push_to_hub: bool = False,
170
+ task: str = "",
171
+ ) -> LeRobotDataset:
172
+ """
173
+ Converts an existing LeRobotDataset by iterating over its episodes and frames,
174
+ applying cropping and resizing to image observations, and saving a new dataset
175
+ with the transformed data.
176
+
177
+ Args:
178
+ original_dataset (LeRobotDataset): The source dataset.
179
+ crop_params_dict (Dict[str, Tuple[int, int, int, int]]):
180
+ A dictionary mapping observation keys to crop parameters (top, left, height, width).
181
+ new_repo_id (str): Repository id for the new dataset.
182
+ new_dataset_root (str): The root directory where the new dataset will be written.
183
+ resize_size (Tuple[int, int], optional): The target size (height, width) after cropping.
184
+ Defaults to (128, 128).
185
+
186
+ Returns:
187
+ LeRobotDataset: A new LeRobotDataset where the specified image observations have been cropped
188
+ and resized.
189
+ """
190
+ # 1. Create a new (empty) LeRobotDataset for writing.
191
+ new_dataset = LeRobotDataset.create(
192
+ repo_id=new_repo_id,
193
+ fps=int(original_dataset.fps),
194
+ root=new_dataset_root,
195
+ robot_type=original_dataset.meta.robot_type,
196
+ features=original_dataset.meta.info["features"],
197
+ use_videos=len(original_dataset.meta.video_keys) > 0,
198
+ )
199
+
200
+ # Update the metadata for every image key that will be cropped:
201
+ # (Here we simply set the shape to be the final resize_size.)
202
+ for key in crop_params_dict:
203
+ if key in new_dataset.meta.info["features"]:
204
+ new_dataset.meta.info["features"][key]["shape"] = [3] + list(resize_size)
205
+
206
+ # TODO: Directly modify the mp4 video + meta info features, instead of recreating a dataset
207
+ prev_episode_index = 0
208
+ for frame_idx in tqdm(range(len(original_dataset))):
209
+ frame = original_dataset[frame_idx]
210
+
211
+ # Create a copy of the frame to add to the new dataset
212
+ new_frame = {}
213
+ for key, value in frame.items():
214
+ if key in ("task_index", "timestamp", "episode_index", "frame_index", "index", "task"):
215
+ continue
216
+ if key in (DONE, REWARD):
217
+ # if not isinstance(value, str) and len(value.shape) == 0:
218
+ value = value.unsqueeze(0)
219
+
220
+ if key in crop_params_dict:
221
+ top, left, height, width = crop_params_dict[key]
222
+ # Apply crop then resize.
223
+ cropped = F.crop(value, top, left, height, width)
224
+ value = F.resize(cropped, resize_size)
225
+ value = value.clamp(0, 1)
226
+ if key.startswith("complementary_info") and isinstance(value, torch.Tensor) and value.dim() == 0:
227
+ value = value.unsqueeze(0)
228
+ new_frame[key] = value
229
+
230
+ new_frame["task"] = task
231
+ new_dataset.add_frame(new_frame)
232
+
233
+ if frame["episode_index"].item() != prev_episode_index:
234
+ # Save the episode
235
+ new_dataset.save_episode()
236
+ prev_episode_index = frame["episode_index"].item()
237
+
238
+ # Save the last episode
239
+ new_dataset.save_episode()
240
+
241
+ if push_to_hub:
242
+ new_dataset.push_to_hub()
243
+
244
+ return new_dataset
245
+
246
+
247
+ if __name__ == "__main__":
248
+ parser = argparse.ArgumentParser(description="Crop rectangular ROIs from a LeRobot dataset.")
249
+ parser.add_argument(
250
+ "--repo-id",
251
+ type=str,
252
+ default="lerobot",
253
+ help="The repository id of the LeRobot dataset to process.",
254
+ )
255
+ parser.add_argument(
256
+ "--root",
257
+ type=str,
258
+ default=None,
259
+ help="The root directory of the LeRobot dataset.",
260
+ )
261
+ parser.add_argument(
262
+ "--crop-params-path",
263
+ type=str,
264
+ default=None,
265
+ help="The path to the JSON file containing the ROIs.",
266
+ )
267
+ parser.add_argument(
268
+ "--push-to-hub",
269
+ action="store_true",
270
+ help="Whether to push the new dataset to the hub.",
271
+ )
272
+ parser.add_argument(
273
+ "--task",
274
+ type=str,
275
+ default="",
276
+ help="The natural language task to describe the dataset.",
277
+ )
278
+ parser.add_argument(
279
+ "--new-repo-id",
280
+ type=str,
281
+ default=None,
282
+ help="The repository id for the new cropped and resized dataset. If not provided, it defaults to `repo_id` + '_cropped_resized'.",
283
+ )
284
+ args = parser.parse_args()
285
+
286
+ dataset = LeRobotDataset(repo_id=args.repo_id, root=args.root)
287
+
288
+ images = get_image_from_lerobot_dataset(dataset)
289
+ images = {k: v.cpu().permute(1, 2, 0).numpy() for k, v in images.items()}
290
+ images = {k: (v * 255).astype("uint8") for k, v in images.items()}
291
+
292
+ if args.crop_params_path is None:
293
+ rois = select_square_roi_for_images(images)
294
+ else:
295
+ with open(args.crop_params_path) as f:
296
+ rois = json.load(f)
297
+
298
+ # Print the selected rectangular ROIs
299
+ print("\nSelected Rectangular Regions of Interest (top, left, height, width):")
300
+ for key, roi in rois.items():
301
+ print(f"{key}: {roi}")
302
+
303
+ new_repo_id = args.new_repo_id if args.new_repo_id else args.repo_id + "_cropped_resized"
304
+
305
+ if args.new_repo_id:
306
+ new_dataset_name = args.new_repo_id.split("/")[-1]
307
+ # Parent 1: HF user, Parent 2: HF LeRobot Home
308
+ new_dataset_root = dataset.root.parent.parent / new_dataset_name
309
+ else:
310
+ new_dataset_root = Path(str(dataset.root) + "_cropped_resized")
311
+
312
+ cropped_resized_dataset = convert_lerobot_dataset_to_cropped_lerobot_dataset(
313
+ original_dataset=dataset,
314
+ crop_params_dict=rois,
315
+ new_repo_id=new_repo_id,
316
+ new_dataset_root=new_dataset_root,
317
+ resize_size=(128, 128),
318
+ push_to_hub=args.push_to_hub,
319
+ task=args.task,
320
+ )
321
+
322
+ meta_dir = new_dataset_root / "meta"
323
+ meta_dir.mkdir(exist_ok=True)
324
+
325
+ with open(meta_dir / "crop_params.json", "w") as f:
326
+ json.dump(rois, f, indent=4)
lerobot/src/lerobot/rl/eval_policy.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import logging
17
+
18
+ from lerobot.cameras import opencv # noqa: F401
19
+ from lerobot.configs import parser
20
+ from lerobot.configs.train import TrainRLServerPipelineConfig
21
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
22
+ from lerobot.policies.factory import make_policy
23
+ from lerobot.robots import ( # noqa: F401
24
+ RobotConfig,
25
+ make_robot_from_config,
26
+ so_follower,
27
+ )
28
+ from lerobot.teleoperators import (
29
+ gamepad, # noqa: F401
30
+ so_leader, # noqa: F401
31
+ )
32
+
33
+ from .gym_manipulator import make_robot_env
34
+
35
+ logging.basicConfig(level=logging.INFO)
36
+
37
+
38
+ def eval_policy(env, policy, n_episodes):
39
+ sum_reward_episode = []
40
+ for _ in range(n_episodes):
41
+ obs, _ = env.reset()
42
+ episode_reward = 0.0
43
+ while True:
44
+ action = policy.select_action(obs)
45
+ obs, reward, terminated, truncated, _ = env.step(action)
46
+ episode_reward += reward
47
+ if terminated or truncated:
48
+ break
49
+ sum_reward_episode.append(episode_reward)
50
+
51
+ logging.info(f"Success after 20 steps {sum_reward_episode}")
52
+ logging.info(f"success rate {sum(sum_reward_episode) / len(sum_reward_episode)}")
53
+
54
+
55
+ @parser.wrap()
56
+ def main(cfg: TrainRLServerPipelineConfig):
57
+ env_cfg = cfg.env
58
+ env = make_robot_env(env_cfg)
59
+ dataset_cfg = cfg.dataset
60
+ dataset = LeRobotDataset(repo_id=dataset_cfg.repo_id)
61
+ dataset_meta = dataset.meta
62
+
63
+ policy = make_policy(
64
+ cfg=cfg.policy,
65
+ # env_cfg=cfg.env,
66
+ ds_meta=dataset_meta,
67
+ )
68
+ policy = policy.from_pretrained(env_cfg.pretrained_policy_name_or_path)
69
+ policy.eval()
70
+
71
+ eval_policy(env, policy=policy, n_episodes=10)
72
+
73
+
74
+ if __name__ == "__main__":
75
+ main()
lerobot/src/lerobot/rl/gym_manipulator.py ADDED
@@ -0,0 +1,771 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import logging
18
+ import time
19
+ from dataclasses import dataclass
20
+ from typing import Any
21
+
22
+ import gymnasium as gym
23
+ import numpy as np
24
+ import torch
25
+
26
+ from lerobot.cameras import opencv # noqa: F401
27
+ from lerobot.configs import parser
28
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
29
+ from lerobot.envs.configs import HILSerlRobotEnvConfig
30
+ from lerobot.model.kinematics import RobotKinematics
31
+ from lerobot.processor import (
32
+ AddBatchDimensionProcessorStep,
33
+ AddTeleopActionAsComplimentaryDataStep,
34
+ AddTeleopEventsAsInfoStep,
35
+ DataProcessorPipeline,
36
+ DeviceProcessorStep,
37
+ EnvTransition,
38
+ GripperPenaltyProcessorStep,
39
+ ImageCropResizeProcessorStep,
40
+ InterventionActionProcessorStep,
41
+ MapDeltaActionToRobotActionStep,
42
+ MapTensorToDeltaActionDictStep,
43
+ Numpy2TorchActionProcessorStep,
44
+ RewardClassifierProcessorStep,
45
+ RobotActionToPolicyActionProcessorStep,
46
+ RobotObservation,
47
+ TimeLimitProcessorStep,
48
+ Torch2NumpyActionProcessorStep,
49
+ TransitionKey,
50
+ VanillaObservationProcessorStep,
51
+ create_transition,
52
+ )
53
+ from lerobot.processor.converters import identity_transition
54
+ from lerobot.robots import ( # noqa: F401
55
+ RobotConfig,
56
+ make_robot_from_config,
57
+ so_follower,
58
+ )
59
+ from lerobot.robots.robot import Robot
60
+ from lerobot.robots.so_follower.robot_kinematic_processor import (
61
+ EEBoundsAndSafety,
62
+ EEReferenceAndDelta,
63
+ ForwardKinematicsJointsToEEObservation,
64
+ GripperVelocityToJoint,
65
+ InverseKinematicsRLStep,
66
+ )
67
+ from lerobot.teleoperators import (
68
+ gamepad, # noqa: F401
69
+ keyboard, # noqa: F401
70
+ make_teleoperator_from_config,
71
+ so_leader, # noqa: F401
72
+ )
73
+ from lerobot.teleoperators.teleoperator import Teleoperator
74
+ from lerobot.teleoperators.utils import TeleopEvents
75
+ from lerobot.utils.constants import ACTION, DONE, OBS_IMAGES, OBS_STATE, REWARD
76
+ from lerobot.utils.robot_utils import precise_sleep
77
+ from lerobot.utils.utils import log_say
78
+
79
+ from .joint_observations_processor import JointVelocityProcessorStep, MotorCurrentProcessorStep
80
+
81
+ logging.basicConfig(level=logging.INFO)
82
+
83
+
84
+ @dataclass
85
+ class DatasetConfig:
86
+ """Configuration for dataset creation and management."""
87
+
88
+ repo_id: str
89
+ task: str
90
+ root: str | None = None
91
+ num_episodes_to_record: int = 5
92
+ replay_episode: int | None = None
93
+ push_to_hub: bool = False
94
+
95
+
96
+ @dataclass
97
+ class GymManipulatorConfig:
98
+ """Main configuration for gym manipulator environment."""
99
+
100
+ env: HILSerlRobotEnvConfig
101
+ dataset: DatasetConfig
102
+ mode: str | None = None # Either "record", "replay", None
103
+ device: str = "cpu"
104
+
105
+
106
+ def reset_follower_position(robot_arm: Robot, target_position: np.ndarray) -> None:
107
+ """Reset robot arm to target position using smooth trajectory."""
108
+ current_position_dict = robot_arm.bus.sync_read("Present_Position")
109
+ current_position = np.array(
110
+ [current_position_dict[name] for name in current_position_dict], dtype=np.float32
111
+ )
112
+ trajectory = torch.from_numpy(
113
+ np.linspace(current_position, target_position, 50)
114
+ ) # NOTE: 30 is just an arbitrary number
115
+ for pose in trajectory:
116
+ action_dict = dict(zip(current_position_dict, pose, strict=False))
117
+ robot_arm.bus.sync_write("Goal_Position", action_dict)
118
+ precise_sleep(0.015)
119
+
120
+
121
+ class RobotEnv(gym.Env):
122
+ """Gym environment for robotic control with human intervention support."""
123
+
124
+ def __init__(
125
+ self,
126
+ robot,
127
+ use_gripper: bool = False,
128
+ display_cameras: bool = False,
129
+ reset_pose: list[float] | None = None,
130
+ reset_time_s: float = 5.0,
131
+ ) -> None:
132
+ """Initialize robot environment with configuration options.
133
+
134
+ Args:
135
+ robot: Robot interface for hardware communication.
136
+ use_gripper: Whether to include gripper in action space.
137
+ display_cameras: Whether to show camera feeds during execution.
138
+ reset_pose: Joint positions for environment reset.
139
+ reset_time_s: Time to wait during reset.
140
+ """
141
+ super().__init__()
142
+
143
+ self.robot = robot
144
+ self.display_cameras = display_cameras
145
+
146
+ # Connect to the robot if not already connected.
147
+ if not self.robot.is_connected:
148
+ self.robot.connect()
149
+
150
+ # Episode tracking.
151
+ self.current_step = 0
152
+ self.episode_data = None
153
+
154
+ self._joint_names = [f"{key}.pos" for key in self.robot.bus.motors]
155
+ self._image_keys = self.robot.cameras.keys()
156
+
157
+ self.reset_pose = reset_pose
158
+ self.reset_time_s = reset_time_s
159
+
160
+ self.use_gripper = use_gripper
161
+
162
+ self._joint_names = list(self.robot.bus.motors.keys())
163
+ self._raw_joint_positions = None
164
+
165
+ self._setup_spaces()
166
+
167
+ def _get_observation(self) -> RobotObservation:
168
+ """Get current robot observation including joint positions and camera images."""
169
+ obs_dict = self.robot.get_observation()
170
+ raw_joint_joint_position = {f"{name}.pos": obs_dict[f"{name}.pos"] for name in self._joint_names}
171
+ joint_positions = np.array([raw_joint_joint_position[f"{name}.pos"] for name in self._joint_names])
172
+
173
+ images = {key: obs_dict[key] for key in self._image_keys}
174
+
175
+ return {"agent_pos": joint_positions, "pixels": images, **raw_joint_joint_position}
176
+
177
+ def _setup_spaces(self) -> None:
178
+ """Configure observation and action spaces based on robot capabilities."""
179
+ current_observation = self._get_observation()
180
+
181
+ observation_spaces = {}
182
+
183
+ # Define observation spaces for images and other states.
184
+ if current_observation is not None and "pixels" in current_observation:
185
+ prefix = OBS_IMAGES
186
+ observation_spaces = {
187
+ f"{prefix}.{key}": gym.spaces.Box(
188
+ low=0, high=255, shape=current_observation["pixels"][key].shape, dtype=np.uint8
189
+ )
190
+ for key in current_observation["pixels"]
191
+ }
192
+
193
+ if current_observation is not None:
194
+ agent_pos = current_observation["agent_pos"]
195
+ observation_spaces[OBS_STATE] = gym.spaces.Box(
196
+ low=0,
197
+ high=10,
198
+ shape=agent_pos.shape,
199
+ dtype=np.float32,
200
+ )
201
+
202
+ self.observation_space = gym.spaces.Dict(observation_spaces)
203
+
204
+ # Define the action space for joint positions along with setting an intervention flag.
205
+ action_dim = 3
206
+ bounds = {}
207
+ bounds["min"] = -np.ones(action_dim)
208
+ bounds["max"] = np.ones(action_dim)
209
+
210
+ if self.use_gripper:
211
+ action_dim += 1
212
+ bounds["min"] = np.concatenate([bounds["min"], [0]])
213
+ bounds["max"] = np.concatenate([bounds["max"], [2]])
214
+
215
+ self.action_space = gym.spaces.Box(
216
+ low=bounds["min"],
217
+ high=bounds["max"],
218
+ shape=(action_dim,),
219
+ dtype=np.float32,
220
+ )
221
+
222
+ def reset(
223
+ self, *, seed: int | None = None, options: dict[str, Any] | None = None
224
+ ) -> tuple[RobotObservation, dict[str, Any]]:
225
+ """Reset environment to initial state.
226
+
227
+ Args:
228
+ seed: Random seed for reproducibility.
229
+ options: Additional reset options.
230
+
231
+ Returns:
232
+ Tuple of (observation, info) dictionaries.
233
+ """
234
+ # Reset the robot
235
+ # self.robot.reset()
236
+ start_time = time.perf_counter()
237
+ if self.reset_pose is not None:
238
+ log_say("Reset the environment.", play_sounds=True)
239
+ reset_follower_position(self.robot, np.array(self.reset_pose))
240
+ log_say("Reset the environment done.", play_sounds=True)
241
+
242
+ precise_sleep(max(self.reset_time_s - (time.perf_counter() - start_time), 0.0))
243
+
244
+ super().reset(seed=seed, options=options)
245
+
246
+ # Reset episode tracking variables.
247
+ self.current_step = 0
248
+ self.episode_data = None
249
+ obs = self._get_observation()
250
+ self._raw_joint_positions = {f"{key}.pos": obs[f"{key}.pos"] for key in self._joint_names}
251
+ return obs, {TeleopEvents.IS_INTERVENTION: False}
252
+
253
+ def step(self, action) -> tuple[RobotObservation, float, bool, bool, dict[str, Any]]:
254
+ """Execute one environment step with given action."""
255
+ joint_targets_dict = {f"{key}.pos": action[i] for i, key in enumerate(self.robot.bus.motors.keys())}
256
+
257
+ self.robot.send_action(joint_targets_dict)
258
+
259
+ obs = self._get_observation()
260
+
261
+ self._raw_joint_positions = {f"{key}.pos": obs[f"{key}.pos"] for key in self._joint_names}
262
+
263
+ if self.display_cameras:
264
+ self.render()
265
+
266
+ self.current_step += 1
267
+
268
+ reward = 0.0
269
+ terminated = False
270
+ truncated = False
271
+
272
+ return (
273
+ obs,
274
+ reward,
275
+ terminated,
276
+ truncated,
277
+ {TeleopEvents.IS_INTERVENTION: False},
278
+ )
279
+
280
+ def render(self) -> None:
281
+ """Display robot camera feeds."""
282
+ import cv2
283
+
284
+ current_observation = self._get_observation()
285
+ if current_observation is not None:
286
+ image_keys = [key for key in current_observation if "image" in key]
287
+
288
+ for key in image_keys:
289
+ cv2.imshow(key, cv2.cvtColor(current_observation[key].numpy(), cv2.COLOR_RGB2BGR))
290
+ cv2.waitKey(1)
291
+
292
+ def close(self) -> None:
293
+ """Close environment and disconnect robot."""
294
+ if self.robot.is_connected:
295
+ self.robot.disconnect()
296
+
297
+ def get_raw_joint_positions(self) -> dict[str, float]:
298
+ """Get raw joint positions."""
299
+ return self._raw_joint_positions
300
+
301
+
302
+ def make_robot_env(cfg: HILSerlRobotEnvConfig) -> tuple[gym.Env, Any]:
303
+ """Create robot environment from configuration.
304
+
305
+ Args:
306
+ cfg: Environment configuration.
307
+
308
+ Returns:
309
+ Tuple of (gym environment, teleoperator device).
310
+ """
311
+ # Check if this is a GymHIL simulation environment
312
+ if cfg.name == "gym_hil":
313
+ assert cfg.robot is None and cfg.teleop is None, "GymHIL environment does not support robot or teleop"
314
+ import gym_hil # noqa: F401
315
+
316
+ # Extract gripper settings with defaults
317
+ use_gripper = cfg.processor.gripper.use_gripper if cfg.processor.gripper is not None else True
318
+ gripper_penalty = cfg.processor.gripper.gripper_penalty if cfg.processor.gripper is not None else 0.0
319
+
320
+ env = gym.make(
321
+ f"gym_hil/{cfg.task}",
322
+ image_obs=True,
323
+ render_mode="human",
324
+ use_gripper=use_gripper,
325
+ gripper_penalty=gripper_penalty,
326
+ )
327
+
328
+ return env, None
329
+
330
+ # Real robot environment
331
+ assert cfg.robot is not None, "Robot config must be provided for real robot environment"
332
+ assert cfg.teleop is not None, "Teleop config must be provided for real robot environment"
333
+
334
+ robot = make_robot_from_config(cfg.robot)
335
+ teleop_device = make_teleoperator_from_config(cfg.teleop)
336
+ teleop_device.connect()
337
+
338
+ # Create base environment with safe defaults
339
+ use_gripper = cfg.processor.gripper.use_gripper if cfg.processor.gripper is not None else True
340
+ display_cameras = (
341
+ cfg.processor.observation.display_cameras if cfg.processor.observation is not None else False
342
+ )
343
+ reset_pose = cfg.processor.reset.fixed_reset_joint_positions if cfg.processor.reset is not None else None
344
+
345
+ env = RobotEnv(
346
+ robot=robot,
347
+ use_gripper=use_gripper,
348
+ display_cameras=display_cameras,
349
+ reset_pose=reset_pose,
350
+ )
351
+
352
+ return env, teleop_device
353
+
354
+
355
+ def make_processors(
356
+ env: gym.Env, teleop_device: Teleoperator | None, cfg: HILSerlRobotEnvConfig, device: str = "cpu"
357
+ ) -> tuple[
358
+ DataProcessorPipeline[EnvTransition, EnvTransition], DataProcessorPipeline[EnvTransition, EnvTransition]
359
+ ]:
360
+ """Create environment and action processors.
361
+
362
+ Args:
363
+ env: Robot environment instance.
364
+ teleop_device: Teleoperator device for intervention.
365
+ cfg: Processor configuration.
366
+ device: Target device for computations.
367
+
368
+ Returns:
369
+ Tuple of (environment processor, action processor).
370
+ """
371
+ terminate_on_success = (
372
+ cfg.processor.reset.terminate_on_success if cfg.processor.reset is not None else True
373
+ )
374
+
375
+ if cfg.name == "gym_hil":
376
+ action_pipeline_steps = [
377
+ InterventionActionProcessorStep(terminate_on_success=terminate_on_success),
378
+ Torch2NumpyActionProcessorStep(),
379
+ ]
380
+
381
+ env_pipeline_steps = [
382
+ Numpy2TorchActionProcessorStep(),
383
+ VanillaObservationProcessorStep(),
384
+ AddBatchDimensionProcessorStep(),
385
+ DeviceProcessorStep(device=device),
386
+ ]
387
+
388
+ return DataProcessorPipeline(
389
+ steps=env_pipeline_steps, to_transition=identity_transition, to_output=identity_transition
390
+ ), DataProcessorPipeline(
391
+ steps=action_pipeline_steps, to_transition=identity_transition, to_output=identity_transition
392
+ )
393
+
394
+ # Full processor pipeline for real robot environment
395
+ # Get robot and motor information for kinematics
396
+ motor_names = list(env.robot.bus.motors.keys())
397
+
398
+ # Set up kinematics solver if inverse kinematics is configured
399
+ kinematics_solver = None
400
+ if cfg.processor.inverse_kinematics is not None:
401
+ kinematics_solver = RobotKinematics(
402
+ urdf_path=cfg.processor.inverse_kinematics.urdf_path,
403
+ target_frame_name=cfg.processor.inverse_kinematics.target_frame_name,
404
+ joint_names=motor_names,
405
+ )
406
+
407
+ env_pipeline_steps = [VanillaObservationProcessorStep()]
408
+
409
+ if cfg.processor.observation is not None:
410
+ if cfg.processor.observation.add_joint_velocity_to_observation:
411
+ env_pipeline_steps.append(JointVelocityProcessorStep(dt=1.0 / cfg.fps))
412
+ if cfg.processor.observation.add_current_to_observation:
413
+ env_pipeline_steps.append(MotorCurrentProcessorStep(robot=env.robot))
414
+
415
+ if kinematics_solver is not None:
416
+ env_pipeline_steps.append(
417
+ ForwardKinematicsJointsToEEObservation(
418
+ kinematics=kinematics_solver,
419
+ motor_names=motor_names,
420
+ )
421
+ )
422
+
423
+ if cfg.processor.image_preprocessing is not None:
424
+ env_pipeline_steps.append(
425
+ ImageCropResizeProcessorStep(
426
+ crop_params_dict=cfg.processor.image_preprocessing.crop_params_dict,
427
+ resize_size=cfg.processor.image_preprocessing.resize_size,
428
+ )
429
+ )
430
+
431
+ # Add time limit processor if reset config exists
432
+ if cfg.processor.reset is not None:
433
+ env_pipeline_steps.append(
434
+ TimeLimitProcessorStep(max_episode_steps=int(cfg.processor.reset.control_time_s * cfg.fps))
435
+ )
436
+
437
+ # Add gripper penalty processor if gripper config exists and enabled
438
+ if cfg.processor.gripper is not None and cfg.processor.gripper.use_gripper:
439
+ env_pipeline_steps.append(
440
+ GripperPenaltyProcessorStep(
441
+ penalty=cfg.processor.gripper.gripper_penalty,
442
+ max_gripper_pos=cfg.processor.max_gripper_pos,
443
+ )
444
+ )
445
+
446
+ if (
447
+ cfg.processor.reward_classifier is not None
448
+ and cfg.processor.reward_classifier.pretrained_path is not None
449
+ ):
450
+ env_pipeline_steps.append(
451
+ RewardClassifierProcessorStep(
452
+ pretrained_path=cfg.processor.reward_classifier.pretrained_path,
453
+ device=device,
454
+ success_threshold=cfg.processor.reward_classifier.success_threshold,
455
+ success_reward=cfg.processor.reward_classifier.success_reward,
456
+ terminate_on_success=terminate_on_success,
457
+ )
458
+ )
459
+
460
+ env_pipeline_steps.append(AddBatchDimensionProcessorStep())
461
+ env_pipeline_steps.append(DeviceProcessorStep(device=device))
462
+
463
+ action_pipeline_steps = [
464
+ AddTeleopActionAsComplimentaryDataStep(teleop_device=teleop_device),
465
+ AddTeleopEventsAsInfoStep(teleop_device=teleop_device),
466
+ InterventionActionProcessorStep(
467
+ use_gripper=cfg.processor.gripper.use_gripper if cfg.processor.gripper is not None else False,
468
+ terminate_on_success=terminate_on_success,
469
+ ),
470
+ ]
471
+
472
+ # Replace InverseKinematicsProcessor with new kinematic processors
473
+ if cfg.processor.inverse_kinematics is not None and kinematics_solver is not None:
474
+ # Add EE bounds and safety processor
475
+ inverse_kinematics_steps = [
476
+ MapTensorToDeltaActionDictStep(
477
+ use_gripper=cfg.processor.gripper.use_gripper if cfg.processor.gripper is not None else False
478
+ ),
479
+ MapDeltaActionToRobotActionStep(),
480
+ EEReferenceAndDelta(
481
+ kinematics=kinematics_solver,
482
+ end_effector_step_sizes=cfg.processor.inverse_kinematics.end_effector_step_sizes,
483
+ motor_names=motor_names,
484
+ use_latched_reference=False,
485
+ use_ik_solution=True,
486
+ ),
487
+ EEBoundsAndSafety(
488
+ end_effector_bounds=cfg.processor.inverse_kinematics.end_effector_bounds,
489
+ ),
490
+ GripperVelocityToJoint(
491
+ clip_max=cfg.processor.max_gripper_pos,
492
+ speed_factor=1.0,
493
+ discrete_gripper=True,
494
+ ),
495
+ InverseKinematicsRLStep(
496
+ kinematics=kinematics_solver, motor_names=motor_names, initial_guess_current_joints=False
497
+ ),
498
+ ]
499
+ action_pipeline_steps.extend(inverse_kinematics_steps)
500
+ action_pipeline_steps.append(RobotActionToPolicyActionProcessorStep(motor_names=motor_names))
501
+
502
+ return DataProcessorPipeline(
503
+ steps=env_pipeline_steps, to_transition=identity_transition, to_output=identity_transition
504
+ ), DataProcessorPipeline(
505
+ steps=action_pipeline_steps, to_transition=identity_transition, to_output=identity_transition
506
+ )
507
+
508
+
509
+ def step_env_and_process_transition(
510
+ env: gym.Env,
511
+ transition: EnvTransition,
512
+ action: torch.Tensor,
513
+ env_processor: DataProcessorPipeline[EnvTransition, EnvTransition],
514
+ action_processor: DataProcessorPipeline[EnvTransition, EnvTransition],
515
+ ) -> EnvTransition:
516
+ """
517
+ Execute one step with processor pipeline.
518
+
519
+ Args:
520
+ env: The robot environment
521
+ transition: Current transition state
522
+ action: Action to execute
523
+ env_processor: Environment processor
524
+ action_processor: Action processor
525
+
526
+ Returns:
527
+ Processed transition with updated state.
528
+ """
529
+
530
+ # Create action transition
531
+ transition[TransitionKey.ACTION] = action
532
+ transition[TransitionKey.OBSERVATION] = (
533
+ env.get_raw_joint_positions() if hasattr(env, "get_raw_joint_positions") else {}
534
+ )
535
+ processed_action_transition = action_processor(transition)
536
+ processed_action = processed_action_transition[TransitionKey.ACTION]
537
+
538
+ obs, reward, terminated, truncated, info = env.step(processed_action)
539
+
540
+ reward = reward + processed_action_transition[TransitionKey.REWARD]
541
+ terminated = terminated or processed_action_transition[TransitionKey.DONE]
542
+ truncated = truncated or processed_action_transition[TransitionKey.TRUNCATED]
543
+ complementary_data = processed_action_transition[TransitionKey.COMPLEMENTARY_DATA].copy()
544
+ new_info = processed_action_transition[TransitionKey.INFO].copy()
545
+ new_info.update(info)
546
+
547
+ new_transition = create_transition(
548
+ observation=obs,
549
+ action=processed_action,
550
+ reward=reward,
551
+ done=terminated,
552
+ truncated=truncated,
553
+ info=new_info,
554
+ complementary_data=complementary_data,
555
+ )
556
+ new_transition = env_processor(new_transition)
557
+
558
+ return new_transition
559
+
560
+
561
+ def control_loop(
562
+ env: gym.Env,
563
+ env_processor: DataProcessorPipeline[EnvTransition, EnvTransition],
564
+ action_processor: DataProcessorPipeline[EnvTransition, EnvTransition],
565
+ teleop_device: Teleoperator,
566
+ cfg: GymManipulatorConfig,
567
+ ) -> None:
568
+ """Main control loop for robot environment interaction.
569
+ if cfg.mode == "record": then a dataset will be created and recorded
570
+
571
+ Args:
572
+ env: The robot environment
573
+ env_processor: Environment processor
574
+ action_processor: Action processor
575
+ teleop_device: Teleoperator device
576
+ cfg: gym_manipulator configuration
577
+ """
578
+ dt = 1.0 / cfg.env.fps
579
+
580
+ print(f"Starting control loop at {cfg.env.fps} FPS")
581
+ print("Controls:")
582
+ print("- Use gamepad/teleop device for intervention")
583
+ print("- When not intervening, robot will stay still")
584
+ print("- Press Ctrl+C to exit")
585
+
586
+ # Reset environment and processors
587
+ obs, info = env.reset()
588
+ complementary_data = (
589
+ {"raw_joint_positions": info.pop("raw_joint_positions")} if "raw_joint_positions" in info else {}
590
+ )
591
+ env_processor.reset()
592
+ action_processor.reset()
593
+
594
+ # Process initial observation
595
+ transition = create_transition(observation=obs, info=info, complementary_data=complementary_data)
596
+ transition = env_processor(data=transition)
597
+
598
+ # Determine if gripper is used
599
+ use_gripper = cfg.env.processor.gripper.use_gripper if cfg.env.processor.gripper is not None else True
600
+
601
+ dataset = None
602
+ if cfg.mode == "record":
603
+ action_features = teleop_device.action_features
604
+ features = {
605
+ ACTION: action_features,
606
+ REWARD: {"dtype": "float32", "shape": (1,), "names": None},
607
+ DONE: {"dtype": "bool", "shape": (1,), "names": None},
608
+ }
609
+ if use_gripper:
610
+ features["complementary_info.discrete_penalty"] = {
611
+ "dtype": "float32",
612
+ "shape": (1,),
613
+ "names": ["discrete_penalty"],
614
+ }
615
+
616
+ for key, value in transition[TransitionKey.OBSERVATION].items():
617
+ if key == OBS_STATE:
618
+ features[key] = {
619
+ "dtype": "float32",
620
+ "shape": value.squeeze(0).shape,
621
+ "names": None,
622
+ }
623
+ if "image" in key:
624
+ features[key] = {
625
+ "dtype": "video",
626
+ "shape": value.squeeze(0).shape,
627
+ "names": ["channels", "height", "width"],
628
+ }
629
+
630
+ # Create dataset
631
+ dataset = LeRobotDataset.create(
632
+ cfg.dataset.repo_id,
633
+ cfg.env.fps,
634
+ root=cfg.dataset.root,
635
+ use_videos=True,
636
+ image_writer_threads=4,
637
+ image_writer_processes=0,
638
+ features=features,
639
+ )
640
+
641
+ episode_idx = 0
642
+ episode_step = 0
643
+ episode_start_time = time.perf_counter()
644
+
645
+ while episode_idx < cfg.dataset.num_episodes_to_record:
646
+ step_start_time = time.perf_counter()
647
+
648
+ # Create a neutral action (no movement)
649
+ neutral_action = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float32)
650
+ if use_gripper:
651
+ neutral_action = torch.cat([neutral_action, torch.tensor([1.0])]) # Gripper stay
652
+
653
+ # Use the new step function
654
+ transition = step_env_and_process_transition(
655
+ env=env,
656
+ transition=transition,
657
+ action=neutral_action,
658
+ env_processor=env_processor,
659
+ action_processor=action_processor,
660
+ )
661
+ terminated = transition.get(TransitionKey.DONE, False)
662
+ truncated = transition.get(TransitionKey.TRUNCATED, False)
663
+
664
+ if cfg.mode == "record":
665
+ observations = {
666
+ k: v.squeeze(0).cpu()
667
+ for k, v in transition[TransitionKey.OBSERVATION].items()
668
+ if isinstance(v, torch.Tensor)
669
+ }
670
+ # Use teleop_action if available, otherwise use the action from the transition
671
+ action_to_record = transition[TransitionKey.COMPLEMENTARY_DATA].get(
672
+ "teleop_action", transition[TransitionKey.ACTION]
673
+ )
674
+ frame = {
675
+ **observations,
676
+ ACTION: action_to_record.cpu(),
677
+ REWARD: np.array([transition[TransitionKey.REWARD]], dtype=np.float32),
678
+ DONE: np.array([terminated or truncated], dtype=bool),
679
+ }
680
+ if use_gripper:
681
+ discrete_penalty = transition[TransitionKey.COMPLEMENTARY_DATA].get("discrete_penalty", 0.0)
682
+ frame["complementary_info.discrete_penalty"] = np.array([discrete_penalty], dtype=np.float32)
683
+
684
+ if dataset is not None:
685
+ frame["task"] = cfg.dataset.task
686
+ dataset.add_frame(frame)
687
+
688
+ episode_step += 1
689
+
690
+ # Handle episode termination
691
+ if terminated or truncated:
692
+ episode_time = time.perf_counter() - episode_start_time
693
+ logging.info(
694
+ f"Episode ended after {episode_step} steps in {episode_time:.1f}s with reward {transition[TransitionKey.REWARD]}"
695
+ )
696
+ episode_step = 0
697
+ episode_idx += 1
698
+
699
+ if dataset is not None:
700
+ if transition[TransitionKey.INFO].get(TeleopEvents.RERECORD_EPISODE, False):
701
+ logging.info(f"Re-recording episode {episode_idx}")
702
+ dataset.clear_episode_buffer()
703
+ episode_idx -= 1
704
+ else:
705
+ logging.info(f"Saving episode {episode_idx}")
706
+ dataset.save_episode()
707
+
708
+ # Reset for new episode
709
+ obs, info = env.reset()
710
+ env_processor.reset()
711
+ action_processor.reset()
712
+
713
+ transition = create_transition(observation=obs, info=info)
714
+ transition = env_processor(transition)
715
+
716
+ # Maintain fps timing
717
+ precise_sleep(max(dt - (time.perf_counter() - step_start_time), 0.0))
718
+
719
+ if dataset is not None and cfg.dataset.push_to_hub:
720
+ logging.info("Pushing dataset to hub")
721
+ dataset.push_to_hub()
722
+
723
+
724
+ def replay_trajectory(
725
+ env: gym.Env, action_processor: DataProcessorPipeline, cfg: GymManipulatorConfig
726
+ ) -> None:
727
+ """Replay recorded trajectory on robot environment."""
728
+ assert cfg.dataset.replay_episode is not None, "Replay episode must be provided for replay"
729
+
730
+ dataset = LeRobotDataset(
731
+ cfg.dataset.repo_id,
732
+ root=cfg.dataset.root,
733
+ episodes=[cfg.dataset.replay_episode],
734
+ download_videos=False,
735
+ )
736
+ episode_frames = dataset.hf_dataset.filter(lambda x: x["episode_index"] == cfg.dataset.replay_episode)
737
+ actions = episode_frames.select_columns(ACTION)
738
+
739
+ _, info = env.reset()
740
+
741
+ for action_data in actions:
742
+ start_time = time.perf_counter()
743
+ transition = create_transition(
744
+ observation=env.get_raw_joint_positions() if hasattr(env, "get_raw_joint_positions") else {},
745
+ action=action_data[ACTION],
746
+ )
747
+ transition = action_processor(transition)
748
+ env.step(transition[TransitionKey.ACTION])
749
+ precise_sleep(max(1 / cfg.env.fps - (time.perf_counter() - start_time), 0.0))
750
+
751
+
752
+ @parser.wrap()
753
+ def main(cfg: GymManipulatorConfig) -> None:
754
+ """Main entry point for gym manipulator script."""
755
+ env, teleop_device = make_robot_env(cfg.env)
756
+ env_processor, action_processor = make_processors(env, teleop_device, cfg.env, cfg.device)
757
+
758
+ print("Environment observation space:", env.observation_space)
759
+ print("Environment action space:", env.action_space)
760
+ print("Environment processor:", env_processor)
761
+ print("Action processor:", action_processor)
762
+
763
+ if cfg.mode == "replay":
764
+ replay_trajectory(env, action_processor, cfg)
765
+ exit()
766
+
767
+ control_loop(env, env_processor, action_processor, teleop_device, cfg)
768
+
769
+
770
+ if __name__ == "__main__":
771
+ main()
lerobot/src/lerobot/robots/earthrover_mini_plus/robot_earthrover_mini_plus.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """EarthRover Mini Plus robot using Frodobots SDK."""
17
+
18
+ import base64
19
+ import logging
20
+ from functools import cached_property
21
+
22
+ import cv2
23
+ import numpy as np
24
+ import requests
25
+
26
+ from lerobot.processor import RobotAction, RobotObservation
27
+ from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
28
+ from lerobot.utils.errors import DeviceNotConnectedError
29
+
30
+ from ..robot import Robot
31
+ from .config_earthrover_mini_plus import EarthRoverMiniPlusConfig
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+ # Action feature keys
36
+ ACTION_LINEAR_VEL = "linear.vel"
37
+ ACTION_ANGULAR_VEL = "angular.vel"
38
+
39
+ # Observation feature keys
40
+ OBS_FRONT = "front"
41
+ OBS_REAR = "rear"
42
+ OBS_LINEAR_VEL = "linear.vel"
43
+ OBS_BATTERY_LEVEL = "battery.level"
44
+ OBS_ORIENTATION_DEG = "orientation.deg"
45
+ OBS_GPS_LATITUDE = "gps.latitude"
46
+ OBS_GPS_LONGITUDE = "gps.longitude"
47
+ OBS_GPS_SIGNAL = "gps.signal"
48
+ OBS_SIGNAL_LEVEL = "signal.level"
49
+ OBS_VIBRATION = "vibration"
50
+ OBS_LAMP_STATE = "lamp.state"
51
+
52
+
53
+ class EarthRoverMiniPlus(Robot):
54
+ """
55
+ EarthRover Mini Plus robot controlled via Frodobots SDK HTTP API.
56
+
57
+ This robot uses cloud-based control through the Frodobots SDK instead of direct
58
+ hardware connection. Cameras stream via WebRTC through Agora cloud, and control
59
+ commands are sent via HTTP POST requests.
60
+
61
+ The robot supports:
62
+ - Dual cameras (front and rear) accessed via SDK HTTP endpoints
63
+ - Linear and angular velocity control
64
+ - Battery and orientation telemetry
65
+
66
+ Attributes:
67
+ config: Robot configuration
68
+ sdk_base_url: URL of the Frodobots SDK server (default: http://localhost:8000)
69
+ """
70
+
71
+ config_class = EarthRoverMiniPlusConfig
72
+ name = "earthrover_mini_plus"
73
+
74
+ def __init__(self, config: EarthRoverMiniPlusConfig):
75
+ """Initialize EarthRover Mini Plus robot.
76
+
77
+ Args:
78
+ config: Robot configuration including SDK URL
79
+ """
80
+ super().__init__(config)
81
+ self.config = config
82
+ self.sdk_base_url = "http://localhost:8000"
83
+
84
+ # Empty cameras dict for compatibility with recording script
85
+ # Cameras are accessed directly via SDK, not through Camera objects
86
+ self.cameras = {}
87
+ self._is_connected = False
88
+
89
+ # Cache for camera frames (fallback when requests fail)
90
+ self._last_front_frame = None
91
+ self._last_rear_frame = None
92
+
93
+ # Cache for robot telemetry data (fallback when requests fail)
94
+ self._last_robot_data = None
95
+
96
+ logger.info(f"Initialized {self.name} with SDK at {self.sdk_base_url}")
97
+
98
+ @property
99
+ def is_connected(self) -> bool:
100
+ """Check if robot is connected to SDK."""
101
+ return self._is_connected
102
+
103
+ @check_if_already_connected
104
+ def connect(self, calibrate: bool = True) -> None:
105
+ """Connect to robot via Frodobots SDK.
106
+
107
+ Args:
108
+ calibrate: Not used for SDK-based robot (kept for API compatibility)
109
+
110
+ Raises:
111
+ DeviceAlreadyConnectedError: If robot is already connected
112
+ DeviceNotConnectedError: If cannot connect to SDK server
113
+ """
114
+
115
+ # Verify SDK is running and accessible
116
+ try:
117
+ response = requests.get(f"{self.sdk_base_url}/data", timeout=10.0)
118
+ if response.status_code != 200:
119
+ raise DeviceNotConnectedError(
120
+ f"Cannot connect to SDK at {self.sdk_base_url}. "
121
+ "Make sure it's running: hypercorn main:app --reload"
122
+ )
123
+ except requests.RequestException as e:
124
+ raise DeviceNotConnectedError(f"Cannot connect to SDK at {self.sdk_base_url}: {e}") from e
125
+
126
+ self._is_connected = True
127
+ logger.info(f"{self.name} connected to SDK")
128
+
129
+ if calibrate:
130
+ self.calibrate()
131
+
132
+ def calibrate(self) -> None:
133
+ """Calibration not needed for SDK-based robot."""
134
+ logger.info("Calibration not required for SDK-based robot")
135
+
136
+ @property
137
+ def is_calibrated(self) -> bool:
138
+ """SDK robot doesn't require calibration.
139
+
140
+ Returns:
141
+ bool: Always True for SDK-based robots
142
+ """
143
+ return True
144
+
145
+ def configure(self) -> None:
146
+ """Configure robot (no-op for SDK-based robot)."""
147
+ pass
148
+
149
+ @cached_property
150
+ def observation_features(self) -> dict[str, type | tuple]:
151
+ """Define the observation space for dataset recording.
152
+
153
+ Returns:
154
+ dict: Observation features with types/shapes:
155
+ - front: (480, 640, 3) - Front camera RGB image
156
+ - rear: (480, 640, 3) - Rear camera RGB image
157
+ - linear.vel: float - Current speed (0-1, SDK reports only positive speeds)
158
+ - battery.level: float - Battery level (0-1, normalized from 0-100)
159
+ - orientation.deg: float - Robot orientation (0-1, normalized from raw value)
160
+ - gps.latitude: float - GPS latitude coordinate
161
+ - gps.longitude: float - GPS longitude coordinate
162
+ - gps.signal: float - GPS signal strength (0-1, normalized from percentage)
163
+ - signal.level: float - Network signal level (0-1, normalized from 0-5)
164
+ - vibration: float - Vibration sensor reading
165
+ - lamp.state: float - Lamp state (0=off, 1=on)
166
+ """
167
+ return {
168
+ # Cameras (height, width, channels)
169
+ OBS_FRONT: (480, 640, 3),
170
+ OBS_REAR: (480, 640, 3),
171
+ # Motion state
172
+ OBS_LINEAR_VEL: float,
173
+ # Robot state
174
+ OBS_BATTERY_LEVEL: float,
175
+ OBS_ORIENTATION_DEG: float,
176
+ # GPS
177
+ OBS_GPS_LATITUDE: float,
178
+ OBS_GPS_LONGITUDE: float,
179
+ OBS_GPS_SIGNAL: float,
180
+ # Sensors
181
+ OBS_SIGNAL_LEVEL: float,
182
+ OBS_VIBRATION: float,
183
+ OBS_LAMP_STATE: float,
184
+ }
185
+
186
+ @cached_property
187
+ def action_features(self) -> dict[str, type]:
188
+ """Define the action space.
189
+
190
+ Returns:
191
+ dict: Action features with types:
192
+ - linear.vel: float - Target linear velocity
193
+ - angular.vel: float - Target angular velocity
194
+ """
195
+ return {
196
+ ACTION_LINEAR_VEL: float,
197
+ ACTION_ANGULAR_VEL: float,
198
+ }
199
+
200
+ @check_if_not_connected
201
+ def get_observation(self) -> RobotObservation:
202
+ """Get current robot observation from SDK.
203
+
204
+ Returns:
205
+ RobotObservation: Observation containing:
206
+ - front: Front camera image (480, 640, 3) in RGB format
207
+ - rear: Rear camera image (480, 640, 3) in RGB format
208
+ - linear.vel: Current speed (0-1, SDK reports only positive speeds)
209
+ - battery.level: Battery level (0-1, normalized from 0-100)
210
+ - orientation.deg: Robot orientation (0-1, normalized from raw value)
211
+ - gps.latitude: GPS latitude coordinate
212
+ - gps.longitude: GPS longitude coordinate
213
+ - gps.signal: GPS signal strength (0-1, normalized from percentage)
214
+ - signal.level: Network signal level (0-1, normalized from 0-5)
215
+ - vibration: Vibration sensor reading
216
+ - lamp.state: Lamp state (0=off, 1=on)
217
+
218
+ Raises:
219
+ DeviceNotConnectedError: If robot is not connected
220
+
221
+ Note:
222
+ Camera frames are retrieved from SDK endpoints /v2/front and /v2/rear.
223
+ Frames are decoded from base64 and converted from BGR to RGB format.
224
+ Robot telemetry is retrieved from /data endpoint.
225
+ All SDK values are normalized to appropriate ranges for dataset recording.
226
+ """
227
+
228
+ observation = {}
229
+
230
+ # Get camera images from SDK
231
+ frames = self._get_camera_frames()
232
+ observation[OBS_FRONT] = frames["front"]
233
+ observation[OBS_REAR] = frames["rear"]
234
+
235
+ # Get robot state from SDK
236
+ robot_data = self._get_robot_data()
237
+
238
+ # Motion state
239
+ observation[OBS_LINEAR_VEL] = robot_data["speed"] / 100.0 # Normalize 0-100 to 0-1
240
+
241
+ # Robot state
242
+ observation[OBS_BATTERY_LEVEL] = robot_data["battery"] / 100.0 # Normalize 0-100 to 0-1
243
+ observation[OBS_ORIENTATION_DEG] = robot_data["orientation"] / 360.0 # Normalize to 0-1
244
+
245
+ # GPS data
246
+ observation[OBS_GPS_LATITUDE] = robot_data["latitude"]
247
+ observation[OBS_GPS_LONGITUDE] = robot_data["longitude"]
248
+ observation[OBS_GPS_SIGNAL] = robot_data["gps_signal"] / 100.0 # Normalize percentage to 0-1
249
+
250
+ # Sensors
251
+ observation[OBS_SIGNAL_LEVEL] = robot_data["signal_level"] / 5.0 # Normalize 0-5 to 0-1
252
+ observation[OBS_VIBRATION] = robot_data["vibration"]
253
+ observation[OBS_LAMP_STATE] = float(robot_data["lamp"]) # 0 or 1
254
+
255
+ return observation
256
+
257
+ @check_if_not_connected
258
+ def send_action(self, action: RobotAction) -> RobotAction:
259
+ """Send action to robot via SDK.
260
+
261
+ Args:
262
+ action: Action dict with keys:
263
+ - linear.vel: Target linear velocity (-1 to 1)
264
+ - angular.vel: Target angular velocity (-1 to 1)
265
+
266
+ Returns:
267
+ RobotAction: The action that was sent (matches action_features keys)
268
+ Raises:
269
+ DeviceNotConnectedError: If robot is not connected
270
+
271
+ Note:
272
+ Actions are sent to SDK via POST /control endpoint.
273
+ SDK expects commands in range [-1, 1].
274
+ """
275
+
276
+ # Extract action values and convert to float
277
+ linear = float(action.get(ACTION_LINEAR_VEL, 0.0))
278
+ angular = float(action.get(ACTION_ANGULAR_VEL, 0.0))
279
+
280
+ # Send command to SDK
281
+ try:
282
+ self._send_command_to_sdk(linear, angular)
283
+ except Exception as e:
284
+ logger.error(f"Error sending action: {e}")
285
+
286
+ # Return action in format matching action_features
287
+ return {
288
+ ACTION_LINEAR_VEL: linear,
289
+ ACTION_ANGULAR_VEL: angular,
290
+ }
291
+
292
+ @check_if_not_connected
293
+ def disconnect(self) -> None:
294
+ """Disconnect from robot.
295
+
296
+ Stops the robot and closes connection to SDK.
297
+
298
+ Raises:
299
+ DeviceNotConnectedError: If robot is not connected
300
+ """
301
+
302
+ # Stop the robot before disconnecting
303
+ try:
304
+ self._send_command_to_sdk(0.0, 0.0)
305
+ except Exception as e:
306
+ logger.warning(f"Failed to stop robot during disconnect: {e}")
307
+
308
+ self._is_connected = False
309
+ logger.info(f"{self.name} disconnected")
310
+
311
+ # Private helper methods for SDK communication
312
+
313
+ def _get_camera_frames(self) -> dict[str, np.ndarray]:
314
+ """Get camera frames from SDK using v2 endpoints with caching fallback.
315
+
316
+ Returns:
317
+ dict: Dictionary with 'front' and 'rear' keys containing:
318
+ - Current frame (if request succeeds)
319
+ - Cached frame (if request fails but cache exists)
320
+ - Zero array (if request fails and no cache exists yet)
321
+
322
+ Note:
323
+ Uses /v2/front and /v2/rear endpoints which are 15x faster than /screenshot.
324
+ Images are base64 encoded, resized to 640x480, and converted from BGR to RGB.
325
+ If request fails, returns the last successfully retrieved frame (cached).
326
+ """
327
+ frames = {}
328
+
329
+ # Get front camera
330
+ try:
331
+ response = requests.get(f"{self.sdk_base_url}/v2/front", timeout=2.0)
332
+ if response.status_code == 200:
333
+ data = response.json()
334
+ if "front_frame" in data and data["front_frame"]:
335
+ front_img = self._decode_base64_image(data["front_frame"])
336
+ if front_img is not None:
337
+ # Resize and convert BGR to RGB
338
+ front_img = cv2.resize(front_img, (640, 480))
339
+ front_rgb = cv2.cvtColor(front_img, cv2.COLOR_BGR2RGB)
340
+ frames["front"] = front_rgb
341
+ # Cache the successful frame
342
+ self._last_front_frame = front_rgb
343
+ except Exception as e:
344
+ logger.warning(f"Error fetching front camera: {e}")
345
+
346
+ # Fallback: use cache or zero array
347
+ if "front" not in frames:
348
+ if self._last_front_frame is not None:
349
+ frames["front"] = self._last_front_frame
350
+ else:
351
+ frames["front"] = np.zeros((480, 640, 3), dtype=np.uint8)
352
+
353
+ # Get rear camera
354
+ try:
355
+ response = requests.get(f"{self.sdk_base_url}/v2/rear", timeout=2.0)
356
+ if response.status_code == 200:
357
+ data = response.json()
358
+ if "rear_frame" in data and data["rear_frame"]:
359
+ rear_img = self._decode_base64_image(data["rear_frame"])
360
+ if rear_img is not None:
361
+ # Resize and convert BGR to RGB
362
+ rear_img = cv2.resize(rear_img, (640, 480))
363
+ rear_rgb = cv2.cvtColor(rear_img, cv2.COLOR_BGR2RGB)
364
+ frames["rear"] = rear_rgb
365
+ # Cache the successful frame
366
+ self._last_rear_frame = rear_rgb
367
+ except Exception as e:
368
+ logger.warning(f"Error fetching rear camera: {e}")
369
+
370
+ # Fallback: use cache or zero array
371
+ if "rear" not in frames:
372
+ if self._last_rear_frame is not None:
373
+ frames["rear"] = self._last_rear_frame
374
+ else:
375
+ frames["rear"] = np.zeros((480, 640, 3), dtype=np.uint8)
376
+
377
+ return frames
378
+
379
+ def _decode_base64_image(self, base64_string: str) -> np.ndarray | None:
380
+ """Decode base64 string to image.
381
+
382
+ Args:
383
+ base64_string: Base64 encoded image string
384
+
385
+ Returns:
386
+ np.ndarray: Decoded image in BGR format (OpenCV default), or None if decoding fails
387
+ """
388
+ try:
389
+ img_bytes = base64.b64decode(base64_string)
390
+ nparr = np.frombuffer(img_bytes, np.uint8)
391
+ img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
392
+ return img # Return in BGR format (OpenCV default)
393
+ except Exception as e:
394
+ logger.error(f"Error decoding image: {e}")
395
+ return None
396
+
397
+ def _get_robot_data(self) -> dict:
398
+ """Get robot telemetry data from SDK.
399
+
400
+ Returns:
401
+ dict: Robot telemetry data including battery, speed, orientation, GPS, etc:
402
+ - Current data (if request succeeds)
403
+ - Cached data (if request fails but cache exists)
404
+ - Default values (if request fails and no cache exists yet)
405
+
406
+ Note:
407
+ Uses /data endpoint which provides comprehensive robot state.
408
+ If request fails, returns the last successfully retrieved data (cached).
409
+ """
410
+ try:
411
+ response = requests.get(f"{self.sdk_base_url}/data", timeout=2.0)
412
+ if response.status_code == 200:
413
+ data = response.json()
414
+ # Cache the successful data
415
+ self._last_robot_data = data
416
+ return data
417
+ except Exception as e:
418
+ logger.warning(f"Error fetching robot data: {e}")
419
+
420
+ # Fallback: use cache or default values
421
+ if self._last_robot_data is not None:
422
+ return self._last_robot_data
423
+ else:
424
+ # Return dict with default values (used only on first failure before any cache exists)
425
+ return {
426
+ "speed": 0,
427
+ "battery": 0,
428
+ "orientation": 0,
429
+ "latitude": 0.0,
430
+ "longitude": 0.0,
431
+ "gps_signal": 0,
432
+ "signal_level": 0,
433
+ "vibration": 0.0,
434
+ "lamp": 0,
435
+ }
436
+
437
+ def _send_command_to_sdk(self, linear: float, angular: float, lamp: int = 0) -> bool:
438
+ """Send control command to SDK.
439
+
440
+ Args:
441
+ linear: Linear velocity command (-1 to 1)
442
+ angular: Angular velocity command (-1 to 1)
443
+ lamp: Lamp control (0=off, 1=on)
444
+
445
+ Returns:
446
+ bool: True if command sent successfully, False otherwise
447
+
448
+ Note:
449
+ Uses POST /control endpoint. Commands are sent as JSON payload.
450
+ """
451
+ try:
452
+ payload = {
453
+ "command": {
454
+ "linear": linear,
455
+ "angular": angular,
456
+ "lamp": lamp,
457
+ }
458
+ }
459
+
460
+ response = requests.post(
461
+ f"{self.sdk_base_url}/control",
462
+ json=payload,
463
+ timeout=1.0,
464
+ )
465
+
466
+ return response.status_code == 200
467
+ except Exception as e:
468
+ logger.error(f"Error sending command: {e}")
469
+ return False
lerobot/src/lerobot/robots/hope_jr/config_hope_jr.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+
19
+ from lerobot.cameras import CameraConfig
20
+
21
+ from ..config import RobotConfig
22
+
23
+
24
+ @RobotConfig.register_subclass("hope_jr_hand")
25
+ @dataclass
26
+ class HopeJrHandConfig(RobotConfig):
27
+ port: str # Port to connect to the hand
28
+ side: str # "left" / "right"
29
+
30
+ disable_torque_on_disconnect: bool = True
31
+
32
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
33
+
34
+ def __post_init__(self):
35
+ super().__post_init__()
36
+ if self.side not in ["right", "left"]:
37
+ raise ValueError(self.side)
38
+
39
+
40
+ @RobotConfig.register_subclass("hope_jr_arm")
41
+ @dataclass
42
+ class HopeJrArmConfig(RobotConfig):
43
+ port: str # Port to connect to the hand
44
+ disable_torque_on_disconnect: bool = True
45
+
46
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
47
+ # Set this to a positive scalar to have the same value for all motors, or a dictionary that maps motor
48
+ # names to the max_relative_target value for that motor.
49
+ max_relative_target: float | dict[str, float] | None = None
50
+
51
+ cameras: dict[str, CameraConfig] = field(default_factory=dict)
lerobot/src/lerobot/robots/hope_jr/hope_jr.mdx ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../../../docs/source/hope_jr.mdx
lerobot/src/lerobot/scripts/lerobot_record.py ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Records a dataset. Actions for the robot can be either generated by teleoperation or by a policy.
17
+
18
+ Example:
19
+
20
+ ```shell
21
+ lerobot-record \
22
+ --robot.type=so100_follower \
23
+ --robot.port=/dev/tty.usbmodem58760431541 \
24
+ --robot.cameras="{laptop: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \
25
+ --robot.id=black \
26
+ --dataset.repo_id=<my_username>/<my_dataset_name> \
27
+ --dataset.num_episodes=2 \
28
+ --dataset.single_task="Grab the cube" \
29
+ --display_data=true
30
+ # <- Optional: specify video codec (h264, hevc, libsvtav1). Default is libsvtav1. \
31
+ # --dataset.vcodec=h264 \
32
+ # <- Teleop optional if you want to teleoperate to record or in between episodes with a policy \
33
+ # --teleop.type=so100_leader \
34
+ # --teleop.port=/dev/tty.usbmodem58760431551 \
35
+ # --teleop.id=blue \
36
+ # <- Policy optional if you want to record with a policy \
37
+ # --policy.path=${HF_USER}/my_policy \
38
+ ```
39
+
40
+ Example recording with bimanual so100:
41
+ ```shell
42
+ lerobot-record \
43
+ --robot.type=bi_so_follower \
44
+ --robot.left_arm_config.port=/dev/tty.usbmodem5A460822851 \
45
+ --robot.right_arm_config.port=/dev/tty.usbmodem5A460814411 \
46
+ --robot.id=bimanual_follower \
47
+ --robot.left_arm_config.cameras='{
48
+ wrist: {"type": "opencv", "index_or_path": 1, "width": 640, "height": 480, "fps": 30},
49
+ top: {"type": "opencv", "index_or_path": 3, "width": 640, "height": 480, "fps": 30},
50
+ }' --robot.right_arm_config.cameras='{
51
+ wrist: {"type": "opencv", "index_or_path": 2, "width": 640, "height": 480, "fps": 30},
52
+ front: {"type": "opencv", "index_or_path": 4, "width": 640, "height": 480, "fps": 30},
53
+ }' \
54
+ --teleop.type=bi_so_leader \
55
+ --teleop.left_arm_config.port=/dev/tty.usbmodem5A460852721 \
56
+ --teleop.right_arm_config.port=/dev/tty.usbmodem5A460819811 \
57
+ --teleop.id=bimanual_leader \
58
+ --display_data=true \
59
+ --dataset.repo_id=${HF_USER}/bimanual-so-handover-cube \
60
+ --dataset.num_episodes=25 \
61
+ --dataset.single_task="Grab and handover the red cube to the other arm"
62
+ ```
63
+ """
64
+
65
+ import logging
66
+ import time
67
+ from dataclasses import asdict, dataclass, field
68
+ from pathlib import Path
69
+ from pprint import pformat
70
+ from typing import Any
71
+
72
+ from lerobot.cameras import ( # noqa: F401
73
+ CameraConfig, # noqa: F401
74
+ )
75
+ from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401
76
+ from lerobot.cameras.reachy2_camera.configuration_reachy2_camera import Reachy2CameraConfig # noqa: F401
77
+ from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401
78
+ from lerobot.cameras.zmq.configuration_zmq import ZMQCameraConfig # noqa: F401
79
+ from lerobot.configs import parser
80
+ from lerobot.configs.policies import PreTrainedConfig
81
+ from lerobot.datasets.image_writer import safe_stop_image_writer
82
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
83
+ from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features
84
+ from lerobot.datasets.utils import build_dataset_frame, combine_feature_dicts
85
+ from lerobot.datasets.video_utils import VideoEncodingManager
86
+ from lerobot.policies.factory import make_policy, make_pre_post_processors
87
+ from lerobot.policies.pretrained import PreTrainedPolicy
88
+ from lerobot.policies.utils import make_robot_action
89
+ from lerobot.processor import (
90
+ PolicyAction,
91
+ PolicyProcessorPipeline,
92
+ RobotAction,
93
+ RobotObservation,
94
+ RobotProcessorPipeline,
95
+ make_default_processors,
96
+ )
97
+ from lerobot.processor.rename_processor import rename_stats
98
+ from lerobot.robots import ( # noqa: F401
99
+ Robot,
100
+ RobotConfig,
101
+ bi_so_follower,
102
+ earthrover_mini_plus,
103
+ hope_jr,
104
+ koch_follower,
105
+ make_robot_from_config,
106
+ omx_follower,
107
+ reachy2,
108
+ so_follower,
109
+ unitree_g1,
110
+ )
111
+ from lerobot.teleoperators import ( # noqa: F401
112
+ Teleoperator,
113
+ TeleoperatorConfig,
114
+ bi_so_leader,
115
+ homunculus,
116
+ koch_leader,
117
+ make_teleoperator_from_config,
118
+ omx_leader,
119
+ reachy2_teleoperator,
120
+ so_leader,
121
+ )
122
+ from lerobot.teleoperators.keyboard.teleop_keyboard import KeyboardTeleop
123
+ from lerobot.utils.constants import ACTION, OBS_STR
124
+ from lerobot.utils.control_utils import (
125
+ init_keyboard_listener,
126
+ is_headless,
127
+ predict_action,
128
+ sanity_check_dataset_name,
129
+ sanity_check_dataset_robot_compatibility,
130
+ )
131
+ from lerobot.utils.import_utils import register_third_party_plugins
132
+ from lerobot.utils.robot_utils import precise_sleep
133
+ from lerobot.utils.utils import (
134
+ get_safe_torch_device,
135
+ init_logging,
136
+ log_say,
137
+ )
138
+ from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
139
+
140
+
141
+ @dataclass
142
+ class DatasetRecordConfig:
143
+ # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).
144
+ repo_id: str
145
+ # A short but accurate description of the task performed during the recording (e.g. "Pick the Lego block and drop it in the box on the right.")
146
+ single_task: str
147
+ # Root directory where the dataset will be stored (e.g. 'dataset/path').
148
+ root: str | Path | None = None
149
+ # Limit the frames per second.
150
+ fps: int = 30
151
+ # Number of seconds for data recording for each episode.
152
+ episode_time_s: int | float = 60
153
+ # Number of seconds for resetting the environment after each episode.
154
+ reset_time_s: int | float = 60
155
+ # Number of episodes to record.
156
+ num_episodes: int = 50
157
+ # Encode frames in the dataset into video
158
+ video: bool = True
159
+ # Upload dataset to Hugging Face hub.
160
+ push_to_hub: bool = True
161
+ # Upload on private repository on the Hugging Face hub.
162
+ private: bool = False
163
+ # Add tags to your dataset on the hub.
164
+ tags: list[str] | None = None
165
+ # Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only;
166
+ # set to ≥1 to use subprocesses, each using threads to write images. The best number of processes
167
+ # and threads depends on your system. We recommend 4 threads per camera with 0 processes.
168
+ # If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses.
169
+ num_image_writer_processes: int = 0
170
+ # Number of threads writing the frames as png images on disk, per camera.
171
+ # Too many threads might cause unstable teleoperation fps due to main thread being blocked.
172
+ # Not enough threads might cause low camera fps.
173
+ num_image_writer_threads_per_camera: int = 4
174
+ # Number of episodes to record before batch encoding videos
175
+ # Set to 1 for immediate encoding (default behavior), or higher for batched encoding
176
+ video_encoding_batch_size: int = 1
177
+ # Video codec for encoding videos. Options: 'h264', 'hevc', 'libsvtav1'.
178
+ # Use 'h264' for faster encoding on systems where AV1 encoding is CPU-heavy.
179
+ vcodec: str = "libsvtav1"
180
+ # Rename map for the observation to override the image and state keys
181
+ rename_map: dict[str, str] = field(default_factory=dict)
182
+
183
+ def __post_init__(self):
184
+ if self.single_task is None:
185
+ raise ValueError("You need to provide a task as argument in `single_task`.")
186
+
187
+
188
+ @dataclass
189
+ class RecordConfig:
190
+ robot: RobotConfig
191
+ dataset: DatasetRecordConfig
192
+ # Whether to control the robot with a teleoperator
193
+ teleop: TeleoperatorConfig | None = None
194
+ # Whether to control the robot with a policy
195
+ policy: PreTrainedConfig | None = None
196
+ # Display all cameras on screen
197
+ display_data: bool = False
198
+ # Display data on a remote Rerun server
199
+ display_ip: str | None = None
200
+ # Port of the remote Rerun server
201
+ display_port: int | None = None
202
+ # Whether to display compressed images in Rerun
203
+ display_compressed_images: bool = False
204
+ # Use vocal synthesis to read events.
205
+ play_sounds: bool = True
206
+ # Resume recording on an existing dataset.
207
+ resume: bool = False
208
+
209
+ def __post_init__(self):
210
+ # HACK: We parse again the cli args here to get the pretrained path if there was one.
211
+ policy_path = parser.get_path_arg("policy")
212
+
213
+ if policy_path:
214
+ cli_overrides = parser.get_cli_overrides("policy")
215
+
216
+ self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
217
+ self.policy.pretrained_path = policy_path
218
+
219
+ if self.teleop is None and self.policy is None:
220
+ raise ValueError("Choose a policy, a teleoperator or both to control the robot")
221
+
222
+ @classmethod
223
+ def __get_path_fields__(cls) -> list[str]:
224
+ """This enables the parser to load config from the policy using `--policy.path=local/dir`"""
225
+ return ["policy"]
226
+
227
+
228
+ """ --------------- record_loop() data flow --------------------------
229
+ [ Robot ]
230
+ V
231
+ [ robot.get_observation() ] ---> raw_obs
232
+ V
233
+ [ robot_observation_processor ] ---> processed_obs
234
+ V
235
+ .-----( ACTION LOGIC )------------------.
236
+ V V
237
+ [ From Teleoperator ] [ From Policy ]
238
+ | |
239
+ | [teleop.get_action] -> raw_action | [predict_action]
240
+ | | | |
241
+ | V | V
242
+ | [teleop_action_processor] | |
243
+ | | | |
244
+ '---> processed_teleop_action '---> processed_policy_action
245
+ | |
246
+ '-------------------------.-------------'
247
+ V
248
+ [ robot_action_processor ] --> robot_action_to_send
249
+ V
250
+ [ robot.send_action() ] -- (Robot Executes)
251
+ V
252
+ ( Save to Dataset )
253
+ V
254
+ ( Rerun Log / Loop Wait )
255
+ """
256
+
257
+
258
+ @safe_stop_image_writer
259
+ def record_loop(
260
+ robot: Robot,
261
+ events: dict,
262
+ fps: int,
263
+ teleop_action_processor: RobotProcessorPipeline[
264
+ tuple[RobotAction, RobotObservation], RobotAction
265
+ ], # runs after teleop
266
+ robot_action_processor: RobotProcessorPipeline[
267
+ tuple[RobotAction, RobotObservation], RobotAction
268
+ ], # runs before robot
269
+ robot_observation_processor: RobotProcessorPipeline[
270
+ RobotObservation, RobotObservation
271
+ ], # runs after robot
272
+ dataset: LeRobotDataset | None = None,
273
+ teleop: Teleoperator | list[Teleoperator] | None = None,
274
+ policy: PreTrainedPolicy | None = None,
275
+ preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]] | None = None,
276
+ postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction] | None = None,
277
+ control_time_s: int | None = None,
278
+ single_task: str | None = None,
279
+ display_data: bool = False,
280
+ display_compressed_images: bool = False,
281
+ ):
282
+ if dataset is not None and dataset.fps != fps:
283
+ raise ValueError(f"The dataset fps should be equal to requested fps ({dataset.fps} != {fps}).")
284
+
285
+ teleop_arm = teleop_keyboard = None
286
+ if isinstance(teleop, list):
287
+ teleop_keyboard = next((t for t in teleop if isinstance(t, KeyboardTeleop)), None)
288
+ teleop_arm = next(
289
+ (
290
+ t
291
+ for t in teleop
292
+ if isinstance(
293
+ t,
294
+ (
295
+ so_leader.SO100Leader
296
+ | so_leader.SO101Leader
297
+ | koch_leader.KochLeader
298
+ | omx_leader.OmxLeader
299
+ ),
300
+ )
301
+ ),
302
+ None,
303
+ )
304
+
305
+ if not (teleop_arm and teleop_keyboard and len(teleop) == 2 and robot.name == "lekiwi_client"):
306
+ raise ValueError(
307
+ "For multi-teleop, the list must contain exactly one KeyboardTeleop and one arm teleoperator. Currently only supported for LeKiwi robot."
308
+ )
309
+
310
+ # Reset policy and processor if they are provided
311
+ if policy is not None and preprocessor is not None and postprocessor is not None:
312
+ policy.reset()
313
+ preprocessor.reset()
314
+ postprocessor.reset()
315
+
316
+ timestamp = 0
317
+ start_episode_t = time.perf_counter()
318
+ while timestamp < control_time_s:
319
+ start_loop_t = time.perf_counter()
320
+
321
+ if events["exit_early"]:
322
+ events["exit_early"] = False
323
+ break
324
+
325
+ # Get robot observation
326
+ obs = robot.get_observation()
327
+
328
+ # Applies a pipeline to the raw robot observation, default is IdentityProcessor
329
+ obs_processed = robot_observation_processor(obs)
330
+
331
+ if policy is not None or dataset is not None:
332
+ observation_frame = build_dataset_frame(dataset.features, obs_processed, prefix=OBS_STR)
333
+
334
+ # Get action from either policy or teleop
335
+ if policy is not None and preprocessor is not None and postprocessor is not None:
336
+ action_values = predict_action(
337
+ observation=observation_frame,
338
+ policy=policy,
339
+ device=get_safe_torch_device(policy.config.device),
340
+ preprocessor=preprocessor,
341
+ postprocessor=postprocessor,
342
+ use_amp=policy.config.use_amp,
343
+ task=single_task,
344
+ robot_type=robot.robot_type,
345
+ )
346
+
347
+ act_processed_policy: RobotAction = make_robot_action(action_values, dataset.features)
348
+
349
+ elif policy is None and isinstance(teleop, Teleoperator):
350
+ act = teleop.get_action()
351
+
352
+ # Applies a pipeline to the raw teleop action, default is IdentityProcessor
353
+ act_processed_teleop = teleop_action_processor((act, obs))
354
+
355
+ elif policy is None and isinstance(teleop, list):
356
+ arm_action = teleop_arm.get_action()
357
+ arm_action = {f"arm_{k}": v for k, v in arm_action.items()}
358
+ keyboard_action = teleop_keyboard.get_action()
359
+ base_action = robot._from_keyboard_to_base_action(keyboard_action)
360
+ act = {**arm_action, **base_action} if len(base_action) > 0 else arm_action
361
+ act_processed_teleop = teleop_action_processor((act, obs))
362
+ else:
363
+ logging.info(
364
+ "No policy or teleoperator provided, skipping action generation."
365
+ "This is likely to happen when resetting the environment without a teleop device."
366
+ "The robot won't be at its rest position at the start of the next episode."
367
+ )
368
+ continue
369
+
370
+ # Applies a pipeline to the action, default is IdentityProcessor
371
+ if policy is not None and act_processed_policy is not None:
372
+ action_values = act_processed_policy
373
+ robot_action_to_send = robot_action_processor((act_processed_policy, obs))
374
+ else:
375
+ action_values = act_processed_teleop
376
+ robot_action_to_send = robot_action_processor((act_processed_teleop, obs))
377
+
378
+ # Send action to robot
379
+ # Action can eventually be clipped using `max_relative_target`,
380
+ # so action actually sent is saved in the dataset. action = postprocessor.process(action)
381
+ # TODO(steven, pepijn, adil): we should use a pipeline step to clip the action, so the sent action is the action that we input to the robot.
382
+ _sent_action = robot.send_action(robot_action_to_send)
383
+
384
+ # Write to dataset
385
+ if dataset is not None:
386
+ action_frame = build_dataset_frame(dataset.features, action_values, prefix=ACTION)
387
+ frame = {**observation_frame, **action_frame, "task": single_task}
388
+ dataset.add_frame(frame)
389
+
390
+ if display_data:
391
+ log_rerun_data(
392
+ observation=obs_processed, action=action_values, compress_images=display_compressed_images
393
+ )
394
+
395
+ dt_s = time.perf_counter() - start_loop_t
396
+ precise_sleep(max(1 / fps - dt_s, 0.0))
397
+
398
+ timestamp = time.perf_counter() - start_episode_t
399
+
400
+
401
+ @parser.wrap()
402
+ def record(cfg: RecordConfig) -> LeRobotDataset:
403
+ init_logging()
404
+ logging.info(pformat(asdict(cfg)))
405
+ if cfg.display_data:
406
+ init_rerun(session_name="recording", ip=cfg.display_ip, port=cfg.display_port)
407
+ display_compressed_images = (
408
+ True
409
+ if (cfg.display_data and cfg.display_ip is not None and cfg.display_port is not None)
410
+ else cfg.display_compressed_images
411
+ )
412
+
413
+ robot = make_robot_from_config(cfg.robot)
414
+ teleop = make_teleoperator_from_config(cfg.teleop) if cfg.teleop is not None else None
415
+
416
+ teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors()
417
+
418
+ dataset_features = combine_feature_dicts(
419
+ aggregate_pipeline_dataset_features(
420
+ pipeline=teleop_action_processor,
421
+ initial_features=create_initial_features(
422
+ action=robot.action_features
423
+ ), # TODO(steven, pepijn): in future this should be come from teleop or policy
424
+ use_videos=cfg.dataset.video,
425
+ ),
426
+ aggregate_pipeline_dataset_features(
427
+ pipeline=robot_observation_processor,
428
+ initial_features=create_initial_features(observation=robot.observation_features),
429
+ use_videos=cfg.dataset.video,
430
+ ),
431
+ )
432
+
433
+ dataset = None
434
+ listener = None
435
+
436
+ try:
437
+ if cfg.resume:
438
+ dataset = LeRobotDataset(
439
+ cfg.dataset.repo_id,
440
+ root=cfg.dataset.root,
441
+ batch_encoding_size=cfg.dataset.video_encoding_batch_size,
442
+ vcodec=cfg.dataset.vcodec,
443
+ )
444
+
445
+ if hasattr(robot, "cameras") and len(robot.cameras) > 0:
446
+ dataset.start_image_writer(
447
+ num_processes=cfg.dataset.num_image_writer_processes,
448
+ num_threads=cfg.dataset.num_image_writer_threads_per_camera * len(robot.cameras),
449
+ )
450
+ sanity_check_dataset_robot_compatibility(dataset, robot, cfg.dataset.fps, dataset_features)
451
+ else:
452
+ # Create empty dataset or load existing saved episodes
453
+ sanity_check_dataset_name(cfg.dataset.repo_id, cfg.policy)
454
+ dataset = LeRobotDataset.create(
455
+ cfg.dataset.repo_id,
456
+ cfg.dataset.fps,
457
+ root=cfg.dataset.root,
458
+ robot_type=robot.name,
459
+ features=dataset_features,
460
+ use_videos=cfg.dataset.video,
461
+ image_writer_processes=cfg.dataset.num_image_writer_processes,
462
+ image_writer_threads=cfg.dataset.num_image_writer_threads_per_camera * len(robot.cameras),
463
+ batch_encoding_size=cfg.dataset.video_encoding_batch_size,
464
+ vcodec=cfg.dataset.vcodec,
465
+ )
466
+
467
+ # Load pretrained policy
468
+ policy = None if cfg.policy is None else make_policy(cfg.policy, ds_meta=dataset.meta)
469
+ preprocessor = None
470
+ postprocessor = None
471
+ if cfg.policy is not None:
472
+ preprocessor, postprocessor = make_pre_post_processors(
473
+ policy_cfg=cfg.policy,
474
+ pretrained_path=cfg.policy.pretrained_path,
475
+ dataset_stats=rename_stats(dataset.meta.stats, cfg.dataset.rename_map),
476
+ preprocessor_overrides={
477
+ "device_processor": {"device": cfg.policy.device},
478
+ "rename_observations_processor": {"rename_map": cfg.dataset.rename_map},
479
+ },
480
+ )
481
+
482
+ robot.connect()
483
+ if teleop is not None:
484
+ teleop.connect()
485
+
486
+ listener, events = init_keyboard_listener()
487
+
488
+ with VideoEncodingManager(dataset):
489
+ recorded_episodes = 0
490
+ while recorded_episodes < cfg.dataset.num_episodes and not events["stop_recording"]:
491
+ log_say(f"Recording episode {dataset.num_episodes}", cfg.play_sounds)
492
+ record_loop(
493
+ robot=robot,
494
+ events=events,
495
+ fps=cfg.dataset.fps,
496
+ teleop_action_processor=teleop_action_processor,
497
+ robot_action_processor=robot_action_processor,
498
+ robot_observation_processor=robot_observation_processor,
499
+ teleop=teleop,
500
+ policy=policy,
501
+ preprocessor=preprocessor,
502
+ postprocessor=postprocessor,
503
+ dataset=dataset,
504
+ control_time_s=cfg.dataset.episode_time_s,
505
+ single_task=cfg.dataset.single_task,
506
+ display_data=cfg.display_data,
507
+ display_compressed_images=display_compressed_images,
508
+ )
509
+
510
+ # Execute a few seconds without recording to give time to manually reset the environment
511
+ # Skip reset for the last episode to be recorded
512
+ if not events["stop_recording"] and (
513
+ (recorded_episodes < cfg.dataset.num_episodes - 1) or events["rerecord_episode"]
514
+ ):
515
+ log_say("Reset the environment", cfg.play_sounds)
516
+
517
+ # reset g1 robot
518
+ if robot.name == "unitree_g1":
519
+ robot.reset()
520
+
521
+ record_loop(
522
+ robot=robot,
523
+ events=events,
524
+ fps=cfg.dataset.fps,
525
+ teleop_action_processor=teleop_action_processor,
526
+ robot_action_processor=robot_action_processor,
527
+ robot_observation_processor=robot_observation_processor,
528
+ teleop=teleop,
529
+ control_time_s=cfg.dataset.reset_time_s,
530
+ single_task=cfg.dataset.single_task,
531
+ display_data=cfg.display_data,
532
+ )
533
+
534
+ if events["rerecord_episode"]:
535
+ log_say("Re-record episode", cfg.play_sounds)
536
+ events["rerecord_episode"] = False
537
+ events["exit_early"] = False
538
+ dataset.clear_episode_buffer()
539
+ continue
540
+
541
+ dataset.save_episode()
542
+ recorded_episodes += 1
543
+ finally:
544
+ log_say("Stop recording", cfg.play_sounds, blocking=True)
545
+
546
+ if dataset:
547
+ dataset.finalize()
548
+
549
+ if robot.is_connected:
550
+ robot.disconnect()
551
+ if teleop and teleop.is_connected:
552
+ teleop.disconnect()
553
+
554
+ if not is_headless() and listener:
555
+ listener.stop()
556
+
557
+ if cfg.dataset.push_to_hub:
558
+ dataset.push_to_hub(tags=cfg.dataset.tags, private=cfg.dataset.private)
559
+
560
+ log_say("Exiting", cfg.play_sounds)
561
+ return dataset
562
+
563
+
564
+ def main():
565
+ register_third_party_plugins()
566
+ record()
567
+
568
+
569
+ if __name__ == "__main__":
570
+ main()
lerobot/src/lerobot/scripts/lerobot_setup_motors.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Helper to set motor ids and baudrate.
17
+
18
+ Example:
19
+
20
+ ```shell
21
+ lerobot-setup-motors \
22
+ --teleop.type=so100_leader \
23
+ --teleop.port=/dev/tty.usbmodem575E0031751
24
+ ```
25
+ """
26
+
27
+ from dataclasses import dataclass
28
+
29
+ import draccus
30
+
31
+ from lerobot.robots import ( # noqa: F401
32
+ RobotConfig,
33
+ bi_so_follower,
34
+ koch_follower,
35
+ lekiwi,
36
+ make_robot_from_config,
37
+ omx_follower,
38
+ so_follower,
39
+ )
40
+ from lerobot.teleoperators import ( # noqa: F401
41
+ TeleoperatorConfig,
42
+ bi_so_leader,
43
+ koch_leader,
44
+ make_teleoperator_from_config,
45
+ omx_leader,
46
+ so_leader,
47
+ )
48
+
49
+ COMPATIBLE_DEVICES = [
50
+ "koch_follower",
51
+ "koch_leader",
52
+ "omx_follower",
53
+ "omx_leader",
54
+ "so100_follower",
55
+ "so100_leader",
56
+ "so101_follower",
57
+ "so101_leader",
58
+ "lekiwi",
59
+ ]
60
+
61
+
62
+ @dataclass
63
+ class SetupConfig:
64
+ teleop: TeleoperatorConfig | None = None
65
+ robot: RobotConfig | None = None
66
+
67
+ def __post_init__(self):
68
+ if bool(self.teleop) == bool(self.robot):
69
+ raise ValueError("Choose either a teleop or a robot.")
70
+
71
+ self.device = self.robot if self.robot else self.teleop
72
+
73
+
74
+ @draccus.wrap()
75
+ def setup_motors(cfg: SetupConfig):
76
+ if cfg.device.type not in COMPATIBLE_DEVICES:
77
+ raise NotImplementedError
78
+
79
+ if isinstance(cfg.device, RobotConfig):
80
+ device = make_robot_from_config(cfg.device)
81
+ else:
82
+ device = make_teleoperator_from_config(cfg.device)
83
+
84
+ device.setup_motors()
85
+
86
+
87
+ def main():
88
+ setup_motors()
89
+
90
+
91
+ if __name__ == "__main__":
92
+ main()
lerobot/src/lerobot/scripts/lerobot_teleoperate.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Simple script to control a robot from teleoperation.
17
+
18
+ Example:
19
+
20
+ ```shell
21
+ lerobot-teleoperate \
22
+ --robot.type=so101_follower \
23
+ --robot.port=/dev/tty.usbmodem58760431541 \
24
+ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
25
+ --robot.id=black \
26
+ --teleop.type=so101_leader \
27
+ --teleop.port=/dev/tty.usbmodem58760431551 \
28
+ --teleop.id=blue \
29
+ --display_data=true
30
+ ```
31
+
32
+ Example teleoperation with bimanual so100:
33
+
34
+ ```shell
35
+ lerobot-teleoperate \
36
+ --robot.type=bi_so_follower \
37
+ --robot.left_arm_config.port=/dev/tty.usbmodem5A460822851 \
38
+ --robot.right_arm_config.port=/dev/tty.usbmodem5A460814411 \
39
+ --robot.id=bimanual_follower \
40
+ --robot.left_arm_config.cameras='{
41
+ wrist: {"type": "opencv", "index_or_path": 1, "width": 640, "height": 480, "fps": 30},
42
+ }' --robot.right_arm_config.cameras='{
43
+ wrist: {"type": "opencv", "index_or_path": 2, "width": 640, "height": 480, "fps": 30},
44
+ }' \
45
+ --teleop.type=bi_so_leader \
46
+ --teleop.left_arm_config.port=/dev/tty.usbmodem5A460852721 \
47
+ --teleop.right_arm_config.port=/dev/tty.usbmodem5A460819811 \
48
+ --teleop.id=bimanual_leader \
49
+ --display_data=true
50
+ ```
51
+
52
+ """
53
+
54
+ import logging
55
+ import time
56
+ from dataclasses import asdict, dataclass
57
+ from pprint import pformat
58
+
59
+ import rerun as rr
60
+
61
+ from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401
62
+ from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401
63
+ from lerobot.configs import parser
64
+ from lerobot.processor import (
65
+ RobotAction,
66
+ RobotObservation,
67
+ RobotProcessorPipeline,
68
+ make_default_processors,
69
+ )
70
+ from lerobot.robots import ( # noqa: F401
71
+ Robot,
72
+ RobotConfig,
73
+ bi_so_follower,
74
+ earthrover_mini_plus,
75
+ hope_jr,
76
+ koch_follower,
77
+ make_robot_from_config,
78
+ omx_follower,
79
+ reachy2,
80
+ so_follower,
81
+ )
82
+ from lerobot.teleoperators import ( # noqa: F401
83
+ Teleoperator,
84
+ TeleoperatorConfig,
85
+ bi_so_leader,
86
+ gamepad,
87
+ homunculus,
88
+ keyboard,
89
+ koch_leader,
90
+ make_teleoperator_from_config,
91
+ omx_leader,
92
+ reachy2_teleoperator,
93
+ so_leader,
94
+ )
95
+ from lerobot.utils.import_utils import register_third_party_plugins
96
+ from lerobot.utils.robot_utils import precise_sleep
97
+ from lerobot.utils.utils import init_logging, move_cursor_up
98
+ from lerobot.utils.visualization_utils import init_rerun, log_rerun_data
99
+
100
+
101
+ @dataclass
102
+ class TeleoperateConfig:
103
+ # TODO: pepijn, steven: if more robots require multiple teleoperators (like lekiwi) its good to make this possibele in teleop.py and record.py with List[Teleoperator]
104
+ teleop: TeleoperatorConfig
105
+ robot: RobotConfig
106
+ # Limit the maximum frames per second.
107
+ fps: int = 60
108
+ teleop_time_s: float | None = None
109
+ # Display all cameras on screen
110
+ display_data: bool = False
111
+ # Display data on a remote Rerun server
112
+ display_ip: str | None = None
113
+ # Port of the remote Rerun server
114
+ display_port: int | None = None
115
+ # Whether to display compressed images in Rerun
116
+ display_compressed_images: bool = False
117
+
118
+
119
+ def teleop_loop(
120
+ teleop: Teleoperator,
121
+ robot: Robot,
122
+ fps: int,
123
+ teleop_action_processor: RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction],
124
+ robot_action_processor: RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction],
125
+ robot_observation_processor: RobotProcessorPipeline[RobotObservation, RobotObservation],
126
+ display_data: bool = False,
127
+ duration: float | None = None,
128
+ display_compressed_images: bool = False,
129
+ ):
130
+ """
131
+ This function continuously reads actions from a teleoperation device, processes them through optional
132
+ pipelines, sends them to a robot, and optionally displays the robot's state. The loop runs at a
133
+ specified frequency until a set duration is reached or it is manually interrupted.
134
+
135
+ Args:
136
+ teleop: The teleoperator device instance providing control actions.
137
+ robot: The robot instance being controlled.
138
+ fps: The target frequency for the control loop in frames per second.
139
+ display_data: If True, fetches robot observations and displays them in the console and Rerun.
140
+ display_compressed_images: If True, compresses images before sending them to Rerun for display.
141
+ duration: The maximum duration of the teleoperation loop in seconds. If None, the loop runs indefinitely.
142
+ teleop_action_processor: An optional pipeline to process raw actions from the teleoperator.
143
+ robot_action_processor: An optional pipeline to process actions before they are sent to the robot.
144
+ robot_observation_processor: An optional pipeline to process raw observations from the robot.
145
+ """
146
+
147
+ display_len = max(len(key) for key in robot.action_features)
148
+ start = time.perf_counter()
149
+
150
+ while True:
151
+ loop_start = time.perf_counter()
152
+
153
+ # Get robot observation
154
+ # Not really needed for now other than for visualization
155
+ # teleop_action_processor can take None as an observation
156
+ # given that it is the identity processor as default
157
+ obs = robot.get_observation()
158
+
159
+ # Get teleop action
160
+ raw_action = teleop.get_action()
161
+
162
+ # Process teleop action through pipeline
163
+ teleop_action = teleop_action_processor((raw_action, obs))
164
+
165
+ # Process action for robot through pipeline
166
+ robot_action_to_send = robot_action_processor((teleop_action, obs))
167
+
168
+ # Send processed action to robot (robot_action_processor.to_output should return RobotAction)
169
+ _ = robot.send_action(robot_action_to_send)
170
+
171
+ if display_data:
172
+ # Process robot observation through pipeline
173
+ obs_transition = robot_observation_processor(obs)
174
+
175
+ log_rerun_data(
176
+ observation=obs_transition,
177
+ action=teleop_action,
178
+ compress_images=display_compressed_images,
179
+ )
180
+
181
+ print("\n" + "-" * (display_len + 10))
182
+ print(f"{'NAME':<{display_len}} | {'NORM':>7}")
183
+ # Display the final robot action that was sent
184
+ for motor, value in robot_action_to_send.items():
185
+ print(f"{motor:<{display_len}} | {value:>7.2f}")
186
+ move_cursor_up(len(robot_action_to_send) + 3)
187
+
188
+ dt_s = time.perf_counter() - loop_start
189
+ precise_sleep(max(1 / fps - dt_s, 0.0))
190
+ loop_s = time.perf_counter() - loop_start
191
+ print(f"Teleop loop time: {loop_s * 1e3:.2f}ms ({1 / loop_s:.0f} Hz)")
192
+ move_cursor_up(1)
193
+
194
+ if duration is not None and time.perf_counter() - start >= duration:
195
+ return
196
+
197
+
198
+ @parser.wrap()
199
+ def teleoperate(cfg: TeleoperateConfig):
200
+ init_logging()
201
+ logging.info(pformat(asdict(cfg)))
202
+ if cfg.display_data:
203
+ init_rerun(session_name="teleoperation", ip=cfg.display_ip, port=cfg.display_port)
204
+ display_compressed_images = (
205
+ True
206
+ if (cfg.display_data and cfg.display_ip is not None and cfg.display_port is not None)
207
+ else cfg.display_compressed_images
208
+ )
209
+
210
+ teleop = make_teleoperator_from_config(cfg.teleop)
211
+ robot = make_robot_from_config(cfg.robot)
212
+ teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors()
213
+
214
+ teleop.connect()
215
+ robot.connect()
216
+
217
+ try:
218
+ teleop_loop(
219
+ teleop=teleop,
220
+ robot=robot,
221
+ fps=cfg.fps,
222
+ display_data=cfg.display_data,
223
+ duration=cfg.teleop_time_s,
224
+ teleop_action_processor=teleop_action_processor,
225
+ robot_action_processor=robot_action_processor,
226
+ robot_observation_processor=robot_observation_processor,
227
+ display_compressed_images=display_compressed_images,
228
+ )
229
+ except KeyboardInterrupt:
230
+ pass
231
+ finally:
232
+ if cfg.display_data:
233
+ rr.rerun_shutdown()
234
+ teleop.disconnect()
235
+ robot.disconnect()
236
+
237
+
238
+ def main():
239
+ register_third_party_plugins()
240
+ teleoperate()
241
+
242
+
243
+ if __name__ == "__main__":
244
+ main()
lerobot/src/lerobot/scripts/lerobot_train.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import dataclasses
17
+ import logging
18
+ import time
19
+ from contextlib import nullcontext
20
+ from pprint import pformat
21
+ from typing import Any
22
+
23
+ import torch
24
+ from accelerate import Accelerator
25
+ from termcolor import colored
26
+ from torch.optim import Optimizer
27
+
28
+ from lerobot.configs import parser
29
+ from lerobot.configs.train import TrainPipelineConfig
30
+ from lerobot.datasets.factory import make_dataset
31
+ from lerobot.datasets.sampler import EpisodeAwareSampler
32
+ from lerobot.datasets.utils import cycle
33
+ from lerobot.envs.factory import make_env, make_env_pre_post_processors
34
+ from lerobot.envs.utils import close_envs
35
+ from lerobot.optim.factory import make_optimizer_and_scheduler
36
+ from lerobot.policies.factory import make_policy, make_pre_post_processors
37
+ from lerobot.policies.pretrained import PreTrainedPolicy
38
+ from lerobot.rl.wandb_utils import WandBLogger
39
+ from lerobot.scripts.lerobot_eval import eval_policy_all
40
+ from lerobot.utils.import_utils import register_third_party_plugins
41
+ from lerobot.utils.logging_utils import AverageMeter, MetricsTracker
42
+ from lerobot.utils.random_utils import set_seed
43
+ from lerobot.utils.train_utils import (
44
+ get_step_checkpoint_dir,
45
+ get_step_identifier,
46
+ load_training_state,
47
+ save_checkpoint,
48
+ update_last_checkpoint,
49
+ )
50
+ from lerobot.utils.utils import (
51
+ format_big_number,
52
+ has_method,
53
+ init_logging,
54
+ )
55
+
56
+
57
+ def update_policy(
58
+ train_metrics: MetricsTracker,
59
+ policy: PreTrainedPolicy,
60
+ batch: Any,
61
+ optimizer: Optimizer,
62
+ grad_clip_norm: float,
63
+ accelerator: Accelerator,
64
+ lr_scheduler=None,
65
+ lock=None,
66
+ rabc_weights_provider=None,
67
+ ) -> tuple[MetricsTracker, dict]:
68
+ """
69
+ Performs a single training step to update the policy's weights.
70
+
71
+ This function executes the forward and backward passes, clips gradients, and steps the optimizer and
72
+ learning rate scheduler. Accelerator handles mixed-precision training automatically.
73
+
74
+ Args:
75
+ train_metrics: A MetricsTracker instance to record training statistics.
76
+ policy: The policy model to be trained.
77
+ batch: A batch of training data.
78
+ optimizer: The optimizer used to update the policy's parameters.
79
+ grad_clip_norm: The maximum norm for gradient clipping.
80
+ accelerator: The Accelerator instance for distributed training and mixed precision.
81
+ lr_scheduler: An optional learning rate scheduler.
82
+ lock: An optional lock for thread-safe optimizer updates.
83
+ rabc_weights_provider: Optional RABCWeights instance for sample weighting.
84
+
85
+ Returns:
86
+ A tuple containing:
87
+ - The updated MetricsTracker with new statistics for this step.
88
+ - A dictionary of outputs from the policy's forward pass, for logging purposes.
89
+ """
90
+ start_time = time.perf_counter()
91
+ policy.train()
92
+
93
+ # Get RA-BC weights if enabled
94
+ rabc_batch_weights = None
95
+ rabc_batch_stats = None
96
+ if rabc_weights_provider is not None:
97
+ rabc_batch_weights, rabc_batch_stats = rabc_weights_provider.compute_batch_weights(batch)
98
+
99
+ # Let accelerator handle mixed precision
100
+ with accelerator.autocast():
101
+ # Use per-sample loss when RA-BC is enabled for proper weighting
102
+ if rabc_batch_weights is not None:
103
+ # Get per-sample losses
104
+ per_sample_loss, output_dict = policy.forward(batch, reduction="none")
105
+
106
+ # Apply RA-BC weights: L_RA-BC = Σ(w_i * l_i) / (Σw_i + ε)
107
+ # rabc_batch_weights is already normalized to sum to batch_size
108
+ epsilon = 1e-6
109
+ loss = (per_sample_loss * rabc_batch_weights).sum() / (rabc_batch_weights.sum() + epsilon)
110
+ # Log raw mean weight (before normalization) - this is the meaningful metric
111
+ output_dict["rabc_mean_weight"] = rabc_batch_stats["raw_mean_weight"]
112
+ output_dict["rabc_num_zero_weight"] = rabc_batch_stats["num_zero_weight"]
113
+ output_dict["rabc_num_full_weight"] = rabc_batch_stats["num_full_weight"]
114
+ else:
115
+ loss, output_dict = policy.forward(batch)
116
+
117
+ # TODO(rcadene): policy.unnormalize_outputs(out_dict)
118
+
119
+ # Use accelerator's backward method
120
+ accelerator.backward(loss)
121
+
122
+ # Clip gradients if specified
123
+ if grad_clip_norm > 0:
124
+ grad_norm = accelerator.clip_grad_norm_(policy.parameters(), grad_clip_norm)
125
+ else:
126
+ grad_norm = torch.nn.utils.clip_grad_norm_(
127
+ policy.parameters(), float("inf"), error_if_nonfinite=False
128
+ )
129
+
130
+ # Optimizer step
131
+ with lock if lock is not None else nullcontext():
132
+ optimizer.step()
133
+
134
+ optimizer.zero_grad()
135
+
136
+ # Step through pytorch scheduler at every batch instead of epoch
137
+ if lr_scheduler is not None:
138
+ lr_scheduler.step()
139
+
140
+ # Update internal buffers if policy has update method
141
+ if has_method(accelerator.unwrap_model(policy, keep_fp32_wrapper=True), "update"):
142
+ accelerator.unwrap_model(policy, keep_fp32_wrapper=True).update()
143
+
144
+ train_metrics.loss = loss.item()
145
+ train_metrics.grad_norm = grad_norm.item()
146
+ train_metrics.lr = optimizer.param_groups[0]["lr"]
147
+ train_metrics.update_s = time.perf_counter() - start_time
148
+ return train_metrics, output_dict
149
+
150
+
151
+ @parser.wrap()
152
+ def train(cfg: TrainPipelineConfig, accelerator: Accelerator | None = None):
153
+ """
154
+ Main function to train a policy.
155
+
156
+ This function orchestrates the entire training pipeline, including:
157
+ - Setting up logging, seeding, and device configuration.
158
+ - Creating the dataset, evaluation environment (if applicable), policy, and optimizer.
159
+ - Handling resumption from a checkpoint.
160
+ - Running the main training loop, which involves fetching data batches and calling `update_policy`.
161
+ - Periodically logging metrics, saving model checkpoints, and evaluating the policy.
162
+ - Pushing the final trained model to the Hugging Face Hub if configured.
163
+
164
+ Args:
165
+ cfg: A `TrainPipelineConfig` object containing all training configurations.
166
+ accelerator: Optional Accelerator instance. If None, one will be created automatically.
167
+ """
168
+ cfg.validate()
169
+
170
+ # Create Accelerator if not provided
171
+ # It will automatically detect if running in distributed mode or single-process mode
172
+ # We set step_scheduler_with_optimizer=False to prevent accelerate from adjusting the lr_scheduler steps based on the num_processes
173
+ # We set find_unused_parameters=True to handle models with conditional computation
174
+ if accelerator is None:
175
+ from accelerate.utils import DistributedDataParallelKwargs
176
+
177
+ ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
178
+ # Accelerate auto-detects the device based on the available hardware and ignores the policy.device setting.
179
+ # Force the device to be CPU when policy.device is set to CPU.
180
+ force_cpu = cfg.policy.device == "cpu"
181
+ accelerator = Accelerator(
182
+ step_scheduler_with_optimizer=False,
183
+ kwargs_handlers=[ddp_kwargs],
184
+ cpu=force_cpu,
185
+ )
186
+
187
+ init_logging(accelerator=accelerator)
188
+
189
+ # Determine if this is the main process (for logging and checkpointing)
190
+ # When using accelerate, only the main process should log to avoid duplicate outputs
191
+ is_main_process = accelerator.is_main_process
192
+
193
+ # Only log on main process
194
+ if is_main_process:
195
+ logging.info(pformat(cfg.to_dict()))
196
+
197
+ # Initialize wandb only on main process
198
+ if cfg.wandb.enable and cfg.wandb.project and is_main_process:
199
+ wandb_logger = WandBLogger(cfg)
200
+ else:
201
+ wandb_logger = None
202
+ if is_main_process:
203
+ logging.info(colored("Logs will be saved locally.", "yellow", attrs=["bold"]))
204
+
205
+ if cfg.seed is not None:
206
+ set_seed(cfg.seed, accelerator=accelerator)
207
+
208
+ # Use accelerator's device
209
+ device = accelerator.device
210
+ torch.backends.cudnn.benchmark = True
211
+ torch.backends.cuda.matmul.allow_tf32 = True
212
+
213
+ # Dataset loading synchronization: main process downloads first to avoid race conditions
214
+ if is_main_process:
215
+ logging.info("Creating dataset")
216
+ dataset = make_dataset(cfg)
217
+
218
+ accelerator.wait_for_everyone()
219
+
220
+ # Now all other processes can safely load the dataset
221
+ if not is_main_process:
222
+ dataset = make_dataset(cfg)
223
+
224
+ # Create environment used for evaluating checkpoints during training on simulation data.
225
+ # On real-world data, no need to create an environment as evaluations are done outside train.py,
226
+ # using the eval.py instead, with gym_dora environment and dora-rs.
227
+ eval_env = None
228
+ if cfg.eval_freq > 0 and cfg.env is not None:
229
+ if is_main_process:
230
+ logging.info("Creating env")
231
+ eval_env = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs)
232
+
233
+ if is_main_process:
234
+ logging.info("Creating policy")
235
+ policy = make_policy(
236
+ cfg=cfg.policy,
237
+ ds_meta=dataset.meta,
238
+ rename_map=cfg.rename_map,
239
+ )
240
+
241
+ if cfg.peft is not None:
242
+ logging.info("Using PEFT! Wrapping model.")
243
+ # Convert CLI peft config to dict for overrides
244
+ peft_cli_overrides = dataclasses.asdict(cfg.peft)
245
+ policy = policy.wrap_with_peft(peft_cli_overrides=peft_cli_overrides)
246
+
247
+ # Wait for all processes to finish policy creation before continuing
248
+ accelerator.wait_for_everyone()
249
+
250
+ # Create processors - only provide dataset_stats if not resuming from saved processors
251
+ processor_kwargs = {}
252
+ postprocessor_kwargs = {}
253
+ if (cfg.policy.pretrained_path and not cfg.resume) or not cfg.policy.pretrained_path:
254
+ # Only provide dataset_stats when not resuming from saved processor state
255
+ processor_kwargs["dataset_stats"] = dataset.meta.stats
256
+
257
+ # For SARM, always provide dataset_meta for progress normalization
258
+ if cfg.policy.type == "sarm":
259
+ processor_kwargs["dataset_meta"] = dataset.meta
260
+
261
+ if cfg.policy.pretrained_path is not None:
262
+ processor_kwargs["preprocessor_overrides"] = {
263
+ "device_processor": {"device": device.type},
264
+ "normalizer_processor": {
265
+ "stats": dataset.meta.stats,
266
+ "features": {**policy.config.input_features, **policy.config.output_features},
267
+ "norm_map": policy.config.normalization_mapping,
268
+ },
269
+ }
270
+ processor_kwargs["preprocessor_overrides"]["rename_observations_processor"] = {
271
+ "rename_map": cfg.rename_map
272
+ }
273
+ postprocessor_kwargs["postprocessor_overrides"] = {
274
+ "unnormalizer_processor": {
275
+ "stats": dataset.meta.stats,
276
+ "features": policy.config.output_features,
277
+ "norm_map": policy.config.normalization_mapping,
278
+ },
279
+ }
280
+
281
+ preprocessor, postprocessor = make_pre_post_processors(
282
+ policy_cfg=cfg.policy,
283
+ pretrained_path=cfg.policy.pretrained_path,
284
+ **processor_kwargs,
285
+ **postprocessor_kwargs,
286
+ )
287
+
288
+ if is_main_process:
289
+ logging.info("Creating optimizer and scheduler")
290
+ optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy)
291
+
292
+ # Load precomputed SARM progress for RA-BC if enabled
293
+ # Generate progress using: src/lerobot/policies/sarm/compute_rabc_weights.py
294
+ rabc_weights = None
295
+ if cfg.use_rabc:
296
+ from lerobot.utils.rabc import RABCWeights
297
+
298
+ # Get chunk_size from policy config
299
+ chunk_size = getattr(policy.config, "chunk_size", None)
300
+ if chunk_size is None:
301
+ raise ValueError("Chunk size is not found in policy config")
302
+
303
+ head_mode = getattr(cfg, "rabc_head_mode", "sparse")
304
+ logging.info(f"Loading SARM progress for RA-BC from {cfg.rabc_progress_path}")
305
+ logging.info(f"Using chunk_size={chunk_size} from policy config, head_mode={head_mode}")
306
+ rabc_weights = RABCWeights(
307
+ progress_path=cfg.rabc_progress_path,
308
+ chunk_size=chunk_size,
309
+ head_mode=head_mode,
310
+ kappa=getattr(cfg, "rabc_kappa", 0.01),
311
+ epsilon=getattr(cfg, "rabc_epsilon", 1e-6),
312
+ device=device,
313
+ )
314
+
315
+ step = 0 # number of policy updates (forward + backward + optim)
316
+
317
+ if cfg.resume:
318
+ step, optimizer, lr_scheduler = load_training_state(cfg.checkpoint_path, optimizer, lr_scheduler)
319
+
320
+ num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad)
321
+ num_total_params = sum(p.numel() for p in policy.parameters())
322
+
323
+ if is_main_process:
324
+ logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {cfg.output_dir}")
325
+ if cfg.env is not None:
326
+ logging.info(f"{cfg.env.task=}")
327
+ logging.info("Creating environment processors")
328
+ env_preprocessor, env_postprocessor = make_env_pre_post_processors(
329
+ env_cfg=cfg.env, policy_cfg=cfg.policy
330
+ )
331
+ logging.info(f"{cfg.steps=} ({format_big_number(cfg.steps)})")
332
+ logging.info(f"{dataset.num_frames=} ({format_big_number(dataset.num_frames)})")
333
+ logging.info(f"{dataset.num_episodes=}")
334
+ num_processes = accelerator.num_processes
335
+ effective_bs = cfg.batch_size * num_processes
336
+ logging.info(f"Effective batch size: {cfg.batch_size} x {num_processes} = {effective_bs}")
337
+ logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})")
338
+ logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})")
339
+
340
+ # create dataloader for offline training
341
+ if hasattr(cfg.policy, "drop_n_last_frames"):
342
+ shuffle = False
343
+ sampler = EpisodeAwareSampler(
344
+ dataset.meta.episodes["dataset_from_index"],
345
+ dataset.meta.episodes["dataset_to_index"],
346
+ episode_indices_to_use=dataset.episodes,
347
+ drop_n_last_frames=cfg.policy.drop_n_last_frames,
348
+ shuffle=True,
349
+ )
350
+ else:
351
+ shuffle = True
352
+ sampler = None
353
+
354
+ dataloader = torch.utils.data.DataLoader(
355
+ dataset,
356
+ num_workers=cfg.num_workers,
357
+ batch_size=cfg.batch_size,
358
+ shuffle=shuffle and not cfg.dataset.streaming,
359
+ sampler=sampler,
360
+ pin_memory=device.type == "cuda",
361
+ drop_last=False,
362
+ prefetch_factor=2 if cfg.num_workers > 0 else None,
363
+ )
364
+
365
+ # Prepare everything with accelerator
366
+ accelerator.wait_for_everyone()
367
+ policy, optimizer, dataloader, lr_scheduler = accelerator.prepare(
368
+ policy, optimizer, dataloader, lr_scheduler
369
+ )
370
+ dl_iter = cycle(dataloader)
371
+
372
+ policy.train()
373
+
374
+ train_metrics = {
375
+ "loss": AverageMeter("loss", ":.3f"),
376
+ "grad_norm": AverageMeter("grdn", ":.3f"),
377
+ "lr": AverageMeter("lr", ":0.1e"),
378
+ "update_s": AverageMeter("updt_s", ":.3f"),
379
+ "dataloading_s": AverageMeter("data_s", ":.3f"),
380
+ }
381
+
382
+ # Use effective batch size for proper epoch calculation in distributed training
383
+ effective_batch_size = cfg.batch_size * accelerator.num_processes
384
+ train_tracker = MetricsTracker(
385
+ effective_batch_size,
386
+ dataset.num_frames,
387
+ dataset.num_episodes,
388
+ train_metrics,
389
+ initial_step=step,
390
+ accelerator=accelerator,
391
+ )
392
+
393
+ if is_main_process:
394
+ logging.info(
395
+ f"Start offline training on a fixed dataset, with effective batch size: {effective_batch_size}"
396
+ )
397
+
398
+ for _ in range(step, cfg.steps):
399
+ start_time = time.perf_counter()
400
+ batch = next(dl_iter)
401
+ batch = preprocessor(batch)
402
+ train_tracker.dataloading_s = time.perf_counter() - start_time
403
+
404
+ train_tracker, output_dict = update_policy(
405
+ train_tracker,
406
+ policy,
407
+ batch,
408
+ optimizer,
409
+ cfg.optimizer.grad_clip_norm,
410
+ accelerator=accelerator,
411
+ lr_scheduler=lr_scheduler,
412
+ rabc_weights_provider=rabc_weights,
413
+ )
414
+
415
+ # Note: eval and checkpoint happens *after* the `step`th training update has completed, so we
416
+ # increment `step` here.
417
+ step += 1
418
+ train_tracker.step()
419
+ is_log_step = cfg.log_freq > 0 and step % cfg.log_freq == 0 and is_main_process
420
+ is_saving_step = step % cfg.save_freq == 0 or step == cfg.steps
421
+ is_eval_step = cfg.eval_freq > 0 and step % cfg.eval_freq == 0
422
+
423
+ if is_log_step:
424
+ logging.info(train_tracker)
425
+ if wandb_logger:
426
+ wandb_log_dict = train_tracker.to_dict()
427
+ if output_dict:
428
+ wandb_log_dict.update(output_dict)
429
+ # Log RA-BC statistics if enabled
430
+ if rabc_weights is not None:
431
+ rabc_stats = rabc_weights.get_stats()
432
+ wandb_log_dict.update(
433
+ {
434
+ "rabc_delta_mean": rabc_stats["delta_mean"],
435
+ "rabc_delta_std": rabc_stats["delta_std"],
436
+ "rabc_num_frames": rabc_stats["num_frames"],
437
+ }
438
+ )
439
+ wandb_logger.log_dict(wandb_log_dict, step)
440
+ train_tracker.reset_averages()
441
+
442
+ if cfg.save_checkpoint and is_saving_step:
443
+ if is_main_process:
444
+ logging.info(f"Checkpoint policy after step {step}")
445
+ checkpoint_dir = get_step_checkpoint_dir(cfg.output_dir, cfg.steps, step)
446
+ save_checkpoint(
447
+ checkpoint_dir=checkpoint_dir,
448
+ step=step,
449
+ cfg=cfg,
450
+ policy=accelerator.unwrap_model(policy),
451
+ optimizer=optimizer,
452
+ scheduler=lr_scheduler,
453
+ preprocessor=preprocessor,
454
+ postprocessor=postprocessor,
455
+ )
456
+ update_last_checkpoint(checkpoint_dir)
457
+ if wandb_logger:
458
+ wandb_logger.log_policy(checkpoint_dir)
459
+
460
+ accelerator.wait_for_everyone()
461
+
462
+ if cfg.env and is_eval_step:
463
+ if is_main_process:
464
+ step_id = get_step_identifier(step, cfg.steps)
465
+ logging.info(f"Eval policy at step {step}")
466
+ with torch.no_grad(), accelerator.autocast():
467
+ eval_info = eval_policy_all(
468
+ envs=eval_env, # dict[suite][task_id] -> vec_env
469
+ policy=accelerator.unwrap_model(policy),
470
+ env_preprocessor=env_preprocessor,
471
+ env_postprocessor=env_postprocessor,
472
+ preprocessor=preprocessor,
473
+ postprocessor=postprocessor,
474
+ n_episodes=cfg.eval.n_episodes,
475
+ videos_dir=cfg.output_dir / "eval" / f"videos_step_{step_id}",
476
+ max_episodes_rendered=4,
477
+ start_seed=cfg.seed,
478
+ max_parallel_tasks=cfg.env.max_parallel_tasks,
479
+ )
480
+ # overall metrics (suite-agnostic)
481
+ aggregated = eval_info["overall"]
482
+
483
+ # optional: per-suite logging
484
+ for suite, suite_info in eval_info.items():
485
+ logging.info("Suite %s aggregated: %s", suite, suite_info)
486
+
487
+ # meters/tracker
488
+ eval_metrics = {
489
+ "avg_sum_reward": AverageMeter("∑rwrd", ":.3f"),
490
+ "pc_success": AverageMeter("success", ":.1f"),
491
+ "eval_s": AverageMeter("eval_s", ":.3f"),
492
+ }
493
+ eval_tracker = MetricsTracker(
494
+ cfg.batch_size,
495
+ dataset.num_frames,
496
+ dataset.num_episodes,
497
+ eval_metrics,
498
+ initial_step=step,
499
+ accelerator=accelerator,
500
+ )
501
+ eval_tracker.eval_s = aggregated.pop("eval_s")
502
+ eval_tracker.avg_sum_reward = aggregated.pop("avg_sum_reward")
503
+ eval_tracker.pc_success = aggregated.pop("pc_success")
504
+ if wandb_logger:
505
+ wandb_log_dict = {**eval_tracker.to_dict(), **eval_info}
506
+ wandb_logger.log_dict(wandb_log_dict, step, mode="eval")
507
+ wandb_logger.log_video(eval_info["overall"]["video_paths"][0], step, mode="eval")
508
+
509
+ accelerator.wait_for_everyone()
510
+
511
+ if eval_env:
512
+ close_envs(eval_env)
513
+
514
+ if is_main_process:
515
+ logging.info("End of training")
516
+
517
+ if cfg.policy.push_to_hub:
518
+ unwrapped_policy = accelerator.unwrap_model(policy)
519
+ if cfg.policy.use_peft:
520
+ unwrapped_policy.push_model_to_hub(cfg, peft_model=unwrapped_policy)
521
+ else:
522
+ unwrapped_policy.push_model_to_hub(cfg)
523
+ preprocessor.push_to_hub(cfg.policy.repo_id)
524
+ postprocessor.push_to_hub(cfg.policy.repo_id)
525
+
526
+ # Properly clean up the distributed process group
527
+ accelerator.wait_for_everyone()
528
+ accelerator.end_training()
529
+
530
+
531
+ def main():
532
+ register_third_party_plugins()
533
+ train()
534
+
535
+
536
+ if __name__ == "__main__":
537
+ main()
lerobot/src/lerobot/scripts/lerobot_train_tokenizer.py ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Train FAST tokenizer for action encoding.
15
+
16
+ This script:
17
+ 1. Loads action chunks from LeRobotDataset (with episode sampling)
18
+ 2. Optionally applies delta transforms (relative vs absolute actions)
19
+ 3. Extracts specified action dimensions for encoding
20
+ 4. Applies normalization (MEAN_STD, MIN_MAX, QUANTILES, or other modes)
21
+ 5. Trains FAST tokenizer (BPE on DCT coefficients) on the action chunks
22
+ 6. Saves tokenizer to output directory
23
+ 7. Optionally pushes tokenizer to Hugging Face Hub
24
+ 8. Reports compression statistics
25
+
26
+ Example:
27
+
28
+ ```shell
29
+ lerobot-train-tokenizer \
30
+ --repo_id=user/dataset_name \
31
+ --action_horizon=10 \
32
+ --max_episodes=100 \
33
+ --sample_fraction=0.1 \
34
+ --encoded_dims="0:6" \
35
+ --delta_dims="0,1,2,3,4,5" \
36
+ --use_delta_transform=true \
37
+ --state_key="observation.state" \
38
+ --normalization_mode="QUANTILES" \
39
+ --vocab_size=1024 \
40
+ --scale=10.0 \
41
+ --output_dir="./fast_tokenizer_dataset_name" \
42
+ --push_to_hub=true \
43
+ --hub_repo_id="user/fast_tokenizer_dataset_name" \
44
+ --hub_private=false
45
+ """
46
+
47
+ import json
48
+ from dataclasses import dataclass
49
+ from pathlib import Path
50
+ from typing import TYPE_CHECKING
51
+
52
+ import numpy as np
53
+ import torch
54
+ from huggingface_hub import HfApi
55
+
56
+ from lerobot.utils.import_utils import _transformers_available
57
+
58
+ if TYPE_CHECKING or _transformers_available:
59
+ from transformers import AutoProcessor
60
+ else:
61
+ AutoProcessor = None
62
+
63
+ from lerobot.configs import parser
64
+ from lerobot.configs.types import NormalizationMode
65
+ from lerobot.datasets.lerobot_dataset import LeRobotDataset
66
+ from lerobot.utils.constants import ACTION, OBS_STATE
67
+
68
+
69
+ @dataclass
70
+ class TokenizerTrainingConfig:
71
+ """Configuration for training FAST tokenizer."""
72
+
73
+ # LeRobot dataset repository ID
74
+ repo_id: str
75
+ # Root directory for dataset (default: ~/.cache/huggingface/lerobot)
76
+ root: str | None = None
77
+ # Number of future actions in each chunk
78
+ action_horizon: int = 10
79
+ # Max episodes to use (None = all episodes in dataset)
80
+ max_episodes: int | None = None
81
+ # Fraction of chunks to sample per episode
82
+ sample_fraction: float = 0.1
83
+ # Comma-separated dimension ranges to encode (e.g., "0:6,7:23")
84
+ encoded_dims: str = "0:6,7:23"
85
+ # Comma-separated dimension indices for delta transform (e.g., "0,1,2,3,4,5")
86
+ delta_dims: str | None = None
87
+ # Whether to apply delta transform (relative actions vs absolute actions)
88
+ use_delta_transform: bool = False
89
+ # Dataset key for state observations (default: "observation.state")
90
+ state_key: str = OBS_STATE
91
+ # Normalization mode (MEAN_STD, MIN_MAX, QUANTILES, QUANTILE10, IDENTITY)
92
+ normalization_mode: str = "QUANTILES"
93
+ # FAST vocabulary size (BPE vocab size)
94
+ vocab_size: int = 1024
95
+ # DCT scaling factor (default: 10.0)
96
+ scale: float = 10.0
97
+ # Directory to save tokenizer (default: ./fast_tokenizer_{repo_id})
98
+ output_dir: str | None = None
99
+ # Whether to push the tokenizer to Hugging Face Hub
100
+ push_to_hub: bool = False
101
+ # Hub repository ID (e.g., "username/tokenizer-name"). If None, uses output_dir name
102
+ hub_repo_id: str | None = None
103
+ # Whether to create a private repository on the Hub
104
+ hub_private: bool = False
105
+
106
+
107
+ def apply_delta_transform(state: np.ndarray, actions: np.ndarray, delta_dims: list[int] | None) -> np.ndarray:
108
+ """Apply delta transform to specified dimensions.
109
+
110
+ Args:
111
+ state: Current state [D]
112
+ actions: Future actions [D]
113
+ delta_dims: List of dimension indices to apply delta transform to
114
+
115
+ Returns:
116
+ Transformed actions [D]
117
+ """
118
+ if delta_dims is None or len(delta_dims) == 0:
119
+ return actions
120
+
121
+ delta_actions = actions.copy()
122
+ for dim in delta_dims:
123
+ delta_actions[dim] = actions[dim] - state[dim]
124
+
125
+ return delta_actions
126
+
127
+
128
+ def apply_normalization(
129
+ data: np.ndarray,
130
+ stats: dict[str, np.ndarray],
131
+ mode: NormalizationMode,
132
+ eps: float = 1e-8,
133
+ ) -> np.ndarray:
134
+ """Apply normalization to data based on the specified mode.
135
+
136
+ Args:
137
+ data: Data to normalize [N, H, D] or [D]
138
+ stats: Dictionary of statistics (mean, std, min, max, q01, q99, q10, q90)
139
+ mode: Normalization mode to apply
140
+ eps: Small epsilon for numerical stability
141
+
142
+ Returns:
143
+ Normalized data with the same shape as input
144
+ """
145
+ if mode == NormalizationMode.IDENTITY:
146
+ return data
147
+
148
+ if mode == NormalizationMode.MEAN_STD:
149
+ mean = stats.get("mean")
150
+ std = stats.get("std")
151
+ if mean is None or std is None:
152
+ raise ValueError("MEAN_STD mode requires 'mean' and 'std' in stats")
153
+ return (data - mean) / np.maximum(std, eps)
154
+
155
+ if mode == NormalizationMode.MIN_MAX:
156
+ min_val = stats.get("min")
157
+ max_val = stats.get("max")
158
+ if min_val is None or max_val is None:
159
+ raise ValueError("MIN_MAX mode requires 'min' and 'max' in stats")
160
+ denom = np.maximum(max_val - min_val, eps)
161
+ return 2.0 * (data - min_val) / denom - 1.0
162
+
163
+ if mode == NormalizationMode.QUANTILES:
164
+ q01 = stats.get("q01")
165
+ q99 = stats.get("q99")
166
+ if q01 is None or q99 is None:
167
+ raise ValueError("QUANTILES mode requires 'q01' and 'q99' in stats")
168
+ denom = np.maximum(q99 - q01, eps)
169
+ # Clip to quantile range then normalize to [-1, 1]
170
+ clipped = np.clip(data, q01, q99)
171
+ return 2.0 * (clipped - q01) / denom - 1.0
172
+
173
+ if mode == NormalizationMode.QUANTILE10:
174
+ q10 = stats.get("q10")
175
+ q90 = stats.get("q90")
176
+ if q10 is None or q90 is None:
177
+ raise ValueError("QUANTILE10 mode requires 'q10' and 'q90' in stats")
178
+ denom = np.maximum(q90 - q10, eps)
179
+ # Clip to quantile range then normalize to [-1, 1]
180
+ clipped = np.clip(data, q10, q90)
181
+ return 2.0 * (clipped - q10) / denom - 1.0
182
+
183
+ raise ValueError(f"Unsupported normalization mode: {mode}")
184
+
185
+
186
+ def process_episode(args):
187
+ """Process single episode and return action chunks."""
188
+ dataset, ep_idx, action_horizon, delta_dims, sample_fraction, state_key, use_delta_transform = args
189
+
190
+ try:
191
+ # get episode info
192
+ ep_info = dataset.meta.episodes[ep_idx]
193
+ from_idx = ep_info["dataset_from_index"]
194
+ to_idx = ep_info["dataset_to_index"]
195
+ ep_length = to_idx - from_idx
196
+
197
+ if ep_length < action_horizon:
198
+ return None
199
+
200
+ # load all frames in episode
201
+ # if dataset has episode filtering, we need to use the mapping
202
+ states = []
203
+ actions = []
204
+
205
+ for abs_idx in range(from_idx, to_idx):
206
+ # map absolute index to relative index if needed
207
+ if dataset._absolute_to_relative_idx is not None:
208
+ if abs_idx not in dataset._absolute_to_relative_idx:
209
+ # this episode's frames aren't in the filtered dataset
210
+ return None
211
+ rel_idx = dataset._absolute_to_relative_idx[abs_idx]
212
+ else:
213
+ rel_idx = abs_idx
214
+
215
+ frame = dataset.hf_dataset[rel_idx]
216
+
217
+ # get state (could be from observation.state or other state key)
218
+ if state_key in frame:
219
+ state = (
220
+ frame[state_key].numpy()
221
+ if torch.is_tensor(frame[state_key])
222
+ else np.array(frame[state_key])
223
+ )
224
+ else:
225
+ # if no state key, use zeros (no delta transform)
226
+ state = np.zeros_like(
227
+ frame[ACTION].numpy() if torch.is_tensor(frame[ACTION]) else np.array(frame[ACTION])
228
+ )
229
+
230
+ action = frame[ACTION].numpy() if torch.is_tensor(frame[ACTION]) else np.array(frame[ACTION])
231
+
232
+ states.append(state)
233
+ actions.append(action)
234
+
235
+ states = np.array(states)
236
+ actions = np.array(actions)
237
+
238
+ # create action chunks (sliding window)
239
+ # all actions in a chunk are relative to the FIRST state in that chunk
240
+ action_chunks = []
241
+
242
+ for i in range(len(states) - action_horizon + 1):
243
+ current_state = states[i] # First state in chunk
244
+ future_absolute_actions = actions[i : i + action_horizon]
245
+
246
+ if use_delta_transform:
247
+ # relative actions
248
+ delta_chunk = np.zeros_like(future_absolute_actions)
249
+ for t in range(action_horizon):
250
+ delta_chunk[t] = apply_delta_transform(
251
+ current_state,
252
+ future_absolute_actions[t],
253
+ delta_dims,
254
+ )
255
+ action_chunks.append(delta_chunk)
256
+ else:
257
+ # absolute actions (no delta)
258
+ action_chunks.append(future_absolute_actions)
259
+
260
+ if len(action_chunks) == 0:
261
+ return None
262
+
263
+ action_chunks = np.array(action_chunks)
264
+
265
+ # sample chunks
266
+ if sample_fraction < 1.0:
267
+ n_chunks = len(action_chunks)
268
+ n_samples = max(1, int(n_chunks * sample_fraction))
269
+ episode_seed = hash(ep_idx) % (2**31)
270
+ rng = np.random.RandomState(episode_seed)
271
+ indices = rng.choice(n_chunks, size=n_samples, replace=False)
272
+ action_chunks = action_chunks[indices]
273
+
274
+ return action_chunks
275
+
276
+ except Exception as e:
277
+ print(f"Error processing episode {ep_idx}: {e}")
278
+ import traceback
279
+
280
+ traceback.print_exc()
281
+ return None
282
+
283
+
284
+ def train_fast_tokenizer(
285
+ action_chunks: np.ndarray,
286
+ vocab_size: int = 1024,
287
+ scale: float = 10.0,
288
+ ) -> AutoProcessor:
289
+ """
290
+ Train FAST tokenizer (BPE on DCT coefficients) on action chunks.
291
+
292
+ Uses the .fit() method to train a new tokenizer on the provided data.
293
+
294
+ Args:
295
+ action_chunks: Array of action chunks [N, H, D] where N=num_chunks, H=horizon, D=action_dim
296
+ vocab_size: BPE vocabulary size
297
+ scale: DCT scaling factor for quantization
298
+
299
+ Returns:
300
+ Trained FAST tokenizer
301
+ """
302
+ print(f"Training FAST tokenizer on {len(action_chunks)} action chunks...")
303
+ print(f"Action chunk shape: {action_chunks.shape}")
304
+ print(f"Vocab size: {vocab_size}")
305
+ print(f"DCT scale: {scale}")
306
+
307
+ # download the tokenizer source code (not pretrained weights)
308
+ # we'll train a new tokenizer on our own data
309
+ base_tokenizer = AutoProcessor.from_pretrained("physical-intelligence/fast", trust_remote_code=True)
310
+
311
+ # convert action_chunks array to list of arrays (expected by .fit())
312
+ action_data_list = [action_chunks[i] for i in range(len(action_chunks))]
313
+
314
+ # train the new tokenizer on our action data using .fit()
315
+ # this trains the BPE tokenizer on DCT coefficients
316
+ print("Training new tokenizer (this may take a few minutes)...")
317
+ tokenizer = base_tokenizer.fit(
318
+ action_data_list,
319
+ scale=scale,
320
+ vocab_size=vocab_size,
321
+ time_horizon=action_chunks.shape[1], # action_horizon
322
+ action_dim=action_chunks.shape[2], # encoded dimensions
323
+ )
324
+ print("✓ Tokenizer training complete!")
325
+
326
+ # validate it works
327
+ sample_chunk = action_chunks[0]
328
+ encoded = tokenizer(sample_chunk[None])[0]
329
+ if isinstance(encoded, list):
330
+ encoded = np.array(encoded)
331
+ print(f"Sample encoding: {len(encoded)} tokens for chunk shape {sample_chunk.shape}")
332
+
333
+ return tokenizer
334
+
335
+
336
+ def compute_compression_stats(tokenizer, action_chunks: np.ndarray):
337
+ """Compute compression statistics."""
338
+ print("\nComputing compression statistics...")
339
+
340
+ # sample for stats (use max 1000 chunks for speed)
341
+ sample_size = min(1000, len(action_chunks))
342
+ sample_indices = np.random.RandomState(42).choice(len(action_chunks), size=sample_size, replace=False)
343
+ sample_chunks = action_chunks[sample_indices]
344
+
345
+ token_lengths = []
346
+ for chunk in sample_chunks:
347
+ encoded = tokenizer(chunk[None])[0]
348
+ if isinstance(encoded, list):
349
+ token_lengths.append(len(encoded))
350
+ else:
351
+ token_lengths.append(encoded.shape[0] if hasattr(encoded, "shape") else len(encoded))
352
+
353
+ token_lengths = np.array(token_lengths)
354
+
355
+ # compression ratio: (H * D) / avg_tokens
356
+ input_size = action_chunks.shape[1] * action_chunks.shape[2]
357
+ avg_tokens = np.mean(token_lengths)
358
+ compression_ratio = input_size / avg_tokens
359
+
360
+ stats = {
361
+ "compression_ratio": float(compression_ratio),
362
+ "mean_token_length": float(np.mean(token_lengths)),
363
+ "p99_token_length": float(np.percentile(token_lengths, 99)),
364
+ "min_token_length": float(np.min(token_lengths)),
365
+ "max_token_length": float(np.max(token_lengths)),
366
+ }
367
+
368
+ print("Compression Statistics:")
369
+ print(f" Average compression ratio: {stats['compression_ratio']:.2f}x")
370
+ print(f" Mean token length: {stats['mean_token_length']:.1f}")
371
+ print(f" P99 token length: {stats['p99_token_length']:.0f}")
372
+ print(f" Min token length: {stats['min_token_length']:.0f}")
373
+ print(f" Max token length: {stats['max_token_length']:.0f}")
374
+
375
+ return stats
376
+
377
+
378
+ @parser.wrap()
379
+ def train_tokenizer(cfg: TokenizerTrainingConfig):
380
+ """
381
+ Train FAST tokenizer for action encoding.
382
+
383
+ Args:
384
+ cfg: TokenizerTrainingConfig dataclass with all configuration parameters
385
+ """
386
+ # load dataset
387
+ print(f"Loading dataset: {cfg.repo_id}")
388
+ dataset = LeRobotDataset(repo_id=cfg.repo_id, root=cfg.root)
389
+ print(f"Dataset loaded: {dataset.num_episodes} episodes, {dataset.num_frames} frames")
390
+
391
+ # parse normalization mode
392
+ try:
393
+ norm_mode = NormalizationMode(cfg.normalization_mode)
394
+ except ValueError as err:
395
+ raise ValueError(
396
+ f"Invalid normalization_mode: {cfg.normalization_mode}. "
397
+ f"Must be one of: {', '.join([m.value for m in NormalizationMode])}"
398
+ ) from err
399
+ print(f"Normalization mode: {norm_mode.value}")
400
+
401
+ # parse encoded dimensions
402
+ encoded_dim_ranges = []
403
+ for range_str in cfg.encoded_dims.split(","):
404
+ start, end = map(int, range_str.strip().split(":"))
405
+ encoded_dim_ranges.append((start, end))
406
+
407
+ total_encoded_dims = sum(end - start for start, end in encoded_dim_ranges)
408
+ print(f"Encoding {total_encoded_dims} dimensions: {cfg.encoded_dims}")
409
+
410
+ # parse delta dimensions
411
+ delta_dim_list = None
412
+ if cfg.delta_dims is not None and cfg.delta_dims.strip():
413
+ delta_dim_list = [int(d.strip()) for d in cfg.delta_dims.split(",")]
414
+ print(f"Delta dimensions: {delta_dim_list}")
415
+ else:
416
+ print("No delta dimensions specified")
417
+
418
+ print(f"Use delta transform: {cfg.use_delta_transform}")
419
+ if cfg.use_delta_transform and (delta_dim_list is None or len(delta_dim_list) == 0):
420
+ print("Warning: use_delta_transform=True but no delta_dims specified. No delta will be applied.")
421
+
422
+ print(f"Action horizon: {cfg.action_horizon}")
423
+ print(f"State key: {cfg.state_key}")
424
+
425
+ # determine episodes to process
426
+ num_episodes = dataset.num_episodes
427
+ if cfg.max_episodes is not None:
428
+ num_episodes = min(cfg.max_episodes, num_episodes)
429
+
430
+ print(f"Processing {num_episodes} episodes...")
431
+
432
+ # process episodes sequentially (to avoid pickling issues with dataset)
433
+ all_chunks = []
434
+ for ep_idx in range(num_episodes):
435
+ if ep_idx % 10 == 0:
436
+ print(f" Processing episode {ep_idx}/{num_episodes}...")
437
+
438
+ chunks = process_episode(
439
+ (
440
+ dataset,
441
+ ep_idx,
442
+ cfg.action_horizon,
443
+ delta_dim_list,
444
+ cfg.sample_fraction,
445
+ cfg.state_key,
446
+ cfg.use_delta_transform,
447
+ )
448
+ )
449
+ if chunks is not None:
450
+ all_chunks.append(chunks)
451
+
452
+ # concatenate all chunks
453
+ all_chunks = np.concatenate(all_chunks, axis=0)
454
+ print(f"Collected {len(all_chunks)} action chunks")
455
+
456
+ # extract only encoded dimensions FIRST (before normalization)
457
+ encoded_chunks = []
458
+ for start, end in encoded_dim_ranges:
459
+ encoded_chunks.append(all_chunks[:, :, start:end])
460
+ encoded_chunks = np.concatenate(encoded_chunks, axis=-1) # [N, H, D_encoded]
461
+ print(f"Extracted {encoded_chunks.shape[-1]} encoded dimensions")
462
+
463
+ # apply normalization to encoded dimensions
464
+ print("\nBefore normalization - overall stats:")
465
+ print(f" Min: {np.min(encoded_chunks):.4f}, Max: {np.max(encoded_chunks):.4f}")
466
+ print(f" Mean: {np.mean(encoded_chunks):.4f}, Std: {np.std(encoded_chunks):.4f}")
467
+
468
+ # get normalization stats from dataset
469
+ norm_stats = dataset.meta.stats
470
+ if norm_stats is not None and ACTION in norm_stats:
471
+ action_stats = norm_stats[ACTION]
472
+
473
+ # build encoded dimension indices
474
+ encoded_dim_indices = []
475
+ for start, end in encoded_dim_ranges:
476
+ encoded_dim_indices.extend(range(start, end))
477
+ encoded_dim_indices = np.array(encoded_dim_indices)
478
+
479
+ # extract stats for encoded dimensions only
480
+ encoded_stats = {}
481
+ for stat_name, stat_values in action_stats.items():
482
+ if isinstance(stat_values, (list, np.ndarray)):
483
+ stat_array = np.array(stat_values)
484
+ if len(stat_array) > max(encoded_dim_indices):
485
+ encoded_stats[stat_name] = stat_array[encoded_dim_indices]
486
+
487
+ if encoded_stats:
488
+ print(f"\nNormalization stats for encoded dimensions (mode: {norm_mode.value}):")
489
+ for stat_name, stat_values in encoded_stats.items():
490
+ print(
491
+ f" {stat_name}: shape={stat_values.shape}, "
492
+ f"range=[{np.min(stat_values):.4f}, {np.max(stat_values):.4f}]"
493
+ )
494
+
495
+ # apply normalization based on mode
496
+ try:
497
+ encoded_chunks = apply_normalization(encoded_chunks, encoded_stats, norm_mode, eps=1e-8)
498
+ print(f"\nApplied {norm_mode.value} normalization")
499
+ except ValueError as e:
500
+ print(f"Warning: {e}. Using raw actions without normalization.")
501
+
502
+ print("\nAfter normalization - overall stats:")
503
+ print(f" Min: {np.min(encoded_chunks):.4f}, Max: {np.max(encoded_chunks):.4f}")
504
+ print(f" Mean: {np.mean(encoded_chunks):.4f}, Std: {np.std(encoded_chunks):.4f}")
505
+
506
+ print("\nPer-dimension stats (after normalization):")
507
+ for d in range(encoded_chunks.shape[-1]):
508
+ dim_data = encoded_chunks[:, :, d]
509
+ print(
510
+ f" Dim {d}: min={np.min(dim_data):7.4f}, max={np.max(dim_data):7.4f}, "
511
+ f"mean={np.mean(dim_data):7.4f}, std={np.std(dim_data):7.4f}"
512
+ )
513
+ else:
514
+ print("Warning: Could not extract stats for encoded dimensions, using raw actions")
515
+ else:
516
+ print("Warning: No normalization stats found in dataset, using raw actions")
517
+
518
+ print(f"Encoded chunks shape: {encoded_chunks.shape}")
519
+
520
+ # train FAST tokenizer
521
+ tokenizer = train_fast_tokenizer(
522
+ encoded_chunks,
523
+ vocab_size=cfg.vocab_size,
524
+ scale=cfg.scale,
525
+ )
526
+
527
+ # compute compression statistics
528
+ compression_stats = compute_compression_stats(tokenizer, encoded_chunks)
529
+
530
+ # save tokenizer
531
+ output_dir = cfg.output_dir
532
+ if output_dir is None:
533
+ output_dir = f"fast_tokenizer_{cfg.repo_id.replace('/', '_')}"
534
+ output_path = Path(output_dir)
535
+ output_path.mkdir(parents=True, exist_ok=True)
536
+
537
+ tokenizer.save_pretrained(output_path)
538
+
539
+ # save metadata
540
+ metadata = {
541
+ "repo_id": cfg.repo_id,
542
+ "vocab_size": cfg.vocab_size,
543
+ "scale": cfg.scale,
544
+ "encoded_dims": cfg.encoded_dims,
545
+ "encoded_dim_ranges": encoded_dim_ranges,
546
+ "total_encoded_dims": total_encoded_dims,
547
+ "delta_dims": cfg.delta_dims,
548
+ "delta_dim_list": delta_dim_list,
549
+ "use_delta_transform": cfg.use_delta_transform,
550
+ "state_key": cfg.state_key,
551
+ "normalization_mode": norm_mode.value,
552
+ "action_horizon": cfg.action_horizon,
553
+ "num_training_chunks": len(encoded_chunks),
554
+ "compression_stats": compression_stats,
555
+ }
556
+
557
+ with open(output_path / "metadata.json", "w") as f:
558
+ json.dump(metadata, f, indent=2)
559
+
560
+ print(f"\nSaved FAST tokenizer to {output_path}")
561
+ print(f"Metadata: {json.dumps(metadata, indent=2)}")
562
+
563
+ # push to Hugging Face Hub if requested
564
+ if cfg.push_to_hub:
565
+ # determine the hub repository ID
566
+ hub_repo_id = cfg.hub_repo_id
567
+ if hub_repo_id is None:
568
+ hub_repo_id = output_path.name
569
+ print(f"\nNo hub_repo_id provided, using: {hub_repo_id}")
570
+
571
+ print(f"\nPushing tokenizer to Hugging Face Hub: {hub_repo_id}")
572
+ print(f" Private: {cfg.hub_private}")
573
+
574
+ try:
575
+ # use the tokenizer's push_to_hub method
576
+ tokenizer.push_to_hub(
577
+ repo_id=hub_repo_id,
578
+ private=cfg.hub_private,
579
+ commit_message=f"Upload FAST tokenizer trained on {cfg.repo_id}",
580
+ )
581
+
582
+ # also upload the metadata.json file separately
583
+ api = HfApi()
584
+ api.upload_file(
585
+ path_or_fileobj=str(output_path / "metadata.json"),
586
+ path_in_repo="metadata.json",
587
+ repo_id=hub_repo_id,
588
+ repo_type="model",
589
+ commit_message="Upload tokenizer metadata",
590
+ )
591
+
592
+ print(f"Successfully pushed tokenizer to: https://huggingface.co/{hub_repo_id}")
593
+ except Exception as e:
594
+ print(f"Error pushing to hub: {e}")
595
+ print(" Make sure you're logged in with `huggingface-cli login`")
596
+
597
+
598
+ def main():
599
+ """CLI entry point that parses arguments and runs the tokenizer training."""
600
+ train_tokenizer()
601
+
602
+
603
+ if __name__ == "__main__":
604
+ main()
lerobot/src/lerobot/teleoperators/config.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import abc
16
+ from dataclasses import dataclass
17
+ from pathlib import Path
18
+
19
+ import draccus
20
+
21
+
22
+ @dataclass(kw_only=True)
23
+ class TeleoperatorConfig(draccus.ChoiceRegistry, abc.ABC):
24
+ # Allows to distinguish between different teleoperators of the same type
25
+ id: str | None = None
26
+ # Directory to store calibration file
27
+ calibration_dir: Path | None = None
28
+
29
+ @property
30
+ def type(self) -> str:
31
+ return self.get_choice_name(self.__class__)
lerobot/src/lerobot/teleoperators/reachy2_teleoperator/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from .config_reachy2_teleoperator import Reachy2TeleoperatorConfig
18
+ from .reachy2_teleoperator import (
19
+ REACHY2_ANTENNAS_JOINTS,
20
+ REACHY2_L_ARM_JOINTS,
21
+ REACHY2_NECK_JOINTS,
22
+ REACHY2_R_ARM_JOINTS,
23
+ REACHY2_VEL,
24
+ Reachy2Teleoperator,
25
+ )
lerobot/src/lerobot/teleoperators/reachy2_teleoperator/reachy2_teleoperator.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from __future__ import annotations
17
+
18
+ import logging
19
+ import time
20
+ from typing import TYPE_CHECKING
21
+
22
+ from lerobot.utils.import_utils import _reachy2_sdk_available
23
+
24
+ if TYPE_CHECKING or _reachy2_sdk_available:
25
+ from reachy2_sdk import ReachySDK
26
+ else:
27
+ ReachySDK = None
28
+
29
+ from lerobot.utils.decorators import check_if_already_connected, check_if_not_connected
30
+ from lerobot.utils.errors import DeviceNotConnectedError
31
+
32
+ from ..teleoperator import Teleoperator
33
+ from .config_reachy2_teleoperator import Reachy2TeleoperatorConfig
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+ # {lerobot_keys: reachy2_sdk_keys}
38
+ REACHY2_NECK_JOINTS = {
39
+ "neck_yaw.pos": "head.neck.yaw",
40
+ "neck_pitch.pos": "head.neck.pitch",
41
+ "neck_roll.pos": "head.neck.roll",
42
+ }
43
+
44
+ REACHY2_ANTENNAS_JOINTS = {
45
+ "l_antenna.pos": "head.l_antenna",
46
+ "r_antenna.pos": "head.r_antenna",
47
+ }
48
+
49
+ REACHY2_R_ARM_JOINTS = {
50
+ "r_shoulder_pitch.pos": "r_arm.shoulder.pitch",
51
+ "r_shoulder_roll.pos": "r_arm.shoulder.roll",
52
+ "r_elbow_yaw.pos": "r_arm.elbow.yaw",
53
+ "r_elbow_pitch.pos": "r_arm.elbow.pitch",
54
+ "r_wrist_roll.pos": "r_arm.wrist.roll",
55
+ "r_wrist_pitch.pos": "r_arm.wrist.pitch",
56
+ "r_wrist_yaw.pos": "r_arm.wrist.yaw",
57
+ "r_gripper.pos": "r_arm.gripper",
58
+ }
59
+
60
+ REACHY2_L_ARM_JOINTS = {
61
+ "l_shoulder_pitch.pos": "l_arm.shoulder.pitch",
62
+ "l_shoulder_roll.pos": "l_arm.shoulder.roll",
63
+ "l_elbow_yaw.pos": "l_arm.elbow.yaw",
64
+ "l_elbow_pitch.pos": "l_arm.elbow.pitch",
65
+ "l_wrist_roll.pos": "l_arm.wrist.roll",
66
+ "l_wrist_pitch.pos": "l_arm.wrist.pitch",
67
+ "l_wrist_yaw.pos": "l_arm.wrist.yaw",
68
+ "l_gripper.pos": "l_arm.gripper",
69
+ }
70
+
71
+ REACHY2_VEL = {
72
+ "mobile_base.vx": "vx",
73
+ "mobile_base.vy": "vy",
74
+ "mobile_base.vtheta": "vtheta",
75
+ }
76
+
77
+
78
+ class Reachy2Teleoperator(Teleoperator):
79
+ """
80
+ [Reachy 2](https://www.pollen-robotics.com/reachy/), by Pollen Robotics.
81
+ """
82
+
83
+ config_class = Reachy2TeleoperatorConfig
84
+ name = "reachy2_specific"
85
+
86
+ def __init__(self, config: Reachy2TeleoperatorConfig):
87
+ super().__init__(config)
88
+
89
+ self.config = config
90
+ self.reachy: None | ReachySDK = None
91
+
92
+ self.joints_dict: dict[str, str] = self._generate_joints_dict()
93
+
94
+ def _generate_joints_dict(self) -> dict[str, str]:
95
+ joints = {}
96
+ if self.config.with_neck:
97
+ joints.update(REACHY2_NECK_JOINTS)
98
+ if self.config.with_l_arm:
99
+ joints.update(REACHY2_L_ARM_JOINTS)
100
+ if self.config.with_r_arm:
101
+ joints.update(REACHY2_R_ARM_JOINTS)
102
+ if self.config.with_antennas:
103
+ joints.update(REACHY2_ANTENNAS_JOINTS)
104
+ return joints
105
+
106
+ @property
107
+ def action_features(self) -> dict[str, type]:
108
+ if self.config.with_mobile_base:
109
+ return {
110
+ **dict.fromkeys(
111
+ self.joints_dict.keys(),
112
+ float,
113
+ ),
114
+ **dict.fromkeys(
115
+ REACHY2_VEL.keys(),
116
+ float,
117
+ ),
118
+ }
119
+ else:
120
+ return dict.fromkeys(self.joints_dict.keys(), float)
121
+
122
+ @property
123
+ def feedback_features(self) -> dict[str, type]:
124
+ return {}
125
+
126
+ @property
127
+ def is_connected(self) -> bool:
128
+ return self.reachy.is_connected() if self.reachy is not None else False
129
+
130
+ @check_if_already_connected
131
+ def connect(self, calibrate: bool = True) -> None:
132
+ self.reachy = ReachySDK(self.config.ip_address)
133
+
134
+ if not self.is_connected:
135
+ raise DeviceNotConnectedError()
136
+ logger.info(f"{self} connected.")
137
+
138
+ @property
139
+ def is_calibrated(self) -> bool:
140
+ return True
141
+
142
+ def calibrate(self) -> None:
143
+ pass
144
+
145
+ def configure(self) -> None:
146
+ pass
147
+
148
+ @check_if_not_connected
149
+ def get_action(self) -> dict[str, float]:
150
+ start = time.perf_counter()
151
+
152
+ joint_action: dict[str, float] = {}
153
+ vel_action: dict[str, float] = {}
154
+
155
+ if self.config.use_present_position:
156
+ joint_action = {k: self.reachy.joints[v].present_position for k, v in self.joints_dict.items()}
157
+ else:
158
+ joint_action = {k: self.reachy.joints[v].goal_position for k, v in self.joints_dict.items()}
159
+ if not self.config.with_mobile_base:
160
+ dt_ms = (time.perf_counter() - start) * 1e3
161
+ logger.debug(f"{self} read action: {dt_ms:.1f}ms")
162
+ return joint_action
163
+ if self.config.use_present_position:
164
+ vel_action = {k: self.reachy.mobile_base.odometry[v] for k, v in REACHY2_VEL.items()}
165
+ else:
166
+ vel_action = {k: self.reachy.mobile_base.last_cmd_vel[v] for k, v in REACHY2_VEL.items()}
167
+ dt_ms = (time.perf_counter() - start) * 1e3
168
+ logger.debug(f"{self} read action: {dt_ms:.1f}ms")
169
+ return {**joint_action, **vel_action}
170
+
171
+ def send_feedback(self, feedback: dict[str, float]) -> None:
172
+ raise NotImplementedError
173
+
174
+ def disconnect(self) -> None:
175
+ if self.is_connected:
176
+ self.reachy.disconnect()
lerobot/src/lerobot/teleoperators/so_leader/config_so_leader.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass
18
+ from typing import TypeAlias
19
+
20
+ from ..config import TeleoperatorConfig
21
+
22
+
23
+ @dataclass
24
+ class SOLeaderConfig:
25
+ """Base configuration class for SO Leader teleoperators."""
26
+
27
+ # Port to connect to the arm
28
+ port: str
29
+
30
+ # Whether to use degrees for angles
31
+ use_degrees: bool = False
32
+
33
+
34
+ @TeleoperatorConfig.register_subclass("so101_leader")
35
+ @TeleoperatorConfig.register_subclass("so100_leader")
36
+ @dataclass
37
+ class SOLeaderTeleopConfig(TeleoperatorConfig, SOLeaderConfig):
38
+ pass
39
+
40
+
41
+ SO100LeaderConfig: TypeAlias = SOLeaderTeleopConfig
42
+ SO101LeaderConfig: TypeAlias = SOLeaderTeleopConfig
lerobot/src/lerobot/teleoperators/teleoperator.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import abc
16
+ import builtins
17
+ from pathlib import Path
18
+ from typing import Any
19
+
20
+ import draccus
21
+
22
+ from lerobot.motors.motors_bus import MotorCalibration
23
+ from lerobot.processor import RobotAction
24
+ from lerobot.utils.constants import HF_LEROBOT_CALIBRATION, TELEOPERATORS
25
+
26
+ from .config import TeleoperatorConfig
27
+
28
+
29
+ class Teleoperator(abc.ABC):
30
+ """
31
+ The base abstract class for all LeRobot-compatible teleoperation devices.
32
+
33
+ This class provides a standardized interface for interacting with physical teleoperators.
34
+ Subclasses must implement all abstract methods and properties to be usable.
35
+
36
+ Attributes:
37
+ config_class (RobotConfig): The expected configuration class for this teleoperator.
38
+ name (str): The unique name used to identify this teleoperator type.
39
+ """
40
+
41
+ # Set these in ALL subclasses
42
+ config_class: builtins.type[TeleoperatorConfig]
43
+ name: str
44
+
45
+ def __init__(self, config: TeleoperatorConfig):
46
+ self.id = config.id
47
+ self.calibration_dir = (
48
+ config.calibration_dir
49
+ if config.calibration_dir
50
+ else HF_LEROBOT_CALIBRATION / TELEOPERATORS / self.name
51
+ )
52
+ self.calibration_dir.mkdir(parents=True, exist_ok=True)
53
+ self.calibration_fpath = self.calibration_dir / f"{self.id}.json"
54
+ self.calibration: dict[str, MotorCalibration] = {}
55
+ if self.calibration_fpath.is_file():
56
+ self._load_calibration()
57
+
58
+ def __str__(self) -> str:
59
+ return f"{self.id} {self.__class__.__name__}"
60
+
61
+ @property
62
+ @abc.abstractmethod
63
+ def action_features(self) -> dict:
64
+ """
65
+ A dictionary describing the structure and types of the actions produced by the teleoperator. Its
66
+ structure (keys) should match the structure of what is returned by :pymeth:`get_action`. Values for
67
+ the dict should be the type of the value if it's a simple value, e.g. `float` for single
68
+ proprioceptive value (a joint's goal position/velocity)
69
+
70
+ Note: this property should be able to be called regardless of whether the robot is connected or not.
71
+ """
72
+ pass
73
+
74
+ @property
75
+ @abc.abstractmethod
76
+ def feedback_features(self) -> dict:
77
+ """
78
+ A dictionary describing the structure and types of the feedback actions expected by the robot. Its
79
+ structure (keys) should match the structure of what is passed to :pymeth:`send_feedback`. Values for
80
+ the dict should be the type of the value if it's a simple value, e.g. `float` for single
81
+ proprioceptive value (a joint's goal position/velocity)
82
+
83
+ Note: this property should be able to be called regardless of whether the robot is connected or not.
84
+ """
85
+ pass
86
+
87
+ @property
88
+ @abc.abstractmethod
89
+ def is_connected(self) -> bool:
90
+ """
91
+ Whether the teleoperator is currently connected or not. If `False`, calling :pymeth:`get_action`
92
+ or :pymeth:`send_feedback` should raise an error.
93
+ """
94
+ pass
95
+
96
+ @abc.abstractmethod
97
+ def connect(self, calibrate: bool = True) -> None:
98
+ """
99
+ Establish communication with the teleoperator.
100
+
101
+ Args:
102
+ calibrate (bool): If True, automatically calibrate the teleoperator after connecting if it's not
103
+ calibrated or needs calibration (this is hardware-dependant).
104
+ """
105
+ pass
106
+
107
+ @property
108
+ @abc.abstractmethod
109
+ def is_calibrated(self) -> bool:
110
+ """Whether the teleoperator is currently calibrated or not. Should be always `True` if not applicable"""
111
+ pass
112
+
113
+ @abc.abstractmethod
114
+ def calibrate(self) -> None:
115
+ """
116
+ Calibrate the teleoperator if applicable. If not, this should be a no-op.
117
+
118
+ This method should collect any necessary data (e.g., motor offsets) and update the
119
+ :pyattr:`calibration` dictionary accordingly.
120
+ """
121
+ pass
122
+
123
+ def _load_calibration(self, fpath: Path | None = None) -> None:
124
+ """
125
+ Helper to load calibration data from the specified file.
126
+
127
+ Args:
128
+ fpath (Path | None): Optional path to the calibration file. Defaults to `self.calibration_fpath`.
129
+ """
130
+ fpath = self.calibration_fpath if fpath is None else fpath
131
+ with open(fpath) as f, draccus.config_type("json"):
132
+ self.calibration = draccus.load(dict[str, MotorCalibration], f)
133
+
134
+ def _save_calibration(self, fpath: Path | None = None) -> None:
135
+ """
136
+ Helper to save calibration data to the specified file.
137
+
138
+ Args:
139
+ fpath (Path | None): Optional path to save the calibration file. Defaults to `self.calibration_fpath`.
140
+ """
141
+ fpath = self.calibration_fpath if fpath is None else fpath
142
+ with open(fpath, "w") as f, draccus.config_type("json"):
143
+ draccus.dump(self.calibration, f, indent=4)
144
+
145
+ @abc.abstractmethod
146
+ def configure(self) -> None:
147
+ """
148
+ Apply any one-time or runtime configuration to the teleoperator.
149
+ This may include setting motor parameters, control modes, or initial state.
150
+ """
151
+ pass
152
+
153
+ @abc.abstractmethod
154
+ def get_action(self) -> RobotAction:
155
+ """
156
+ Retrieve the current action from the teleoperator.
157
+
158
+ Returns:
159
+ RobotAction: A flat dictionary representing the teleoperator's current actions. Its
160
+ structure should match :pymeth:`observation_features`.
161
+ """
162
+ pass
163
+
164
+ @abc.abstractmethod
165
+ def send_feedback(self, feedback: dict[str, Any]) -> None:
166
+ """
167
+ Send a feedback action command to the teleoperator.
168
+
169
+ Args:
170
+ feedback (dict[str, Any]): Dictionary representing the desired feedback. Its structure should match
171
+ :pymeth:`feedback_features`.
172
+
173
+ Returns:
174
+ dict[str, Any]: The action actually sent to the motors potentially clipped or modified, e.g. by
175
+ safety limits on velocity.
176
+ """
177
+ pass
178
+
179
+ @abc.abstractmethod
180
+ def disconnect(self) -> None:
181
+ """Disconnect from the teleoperator and perform any necessary cleanup."""
182
+ pass
lerobot/src/lerobot/teleoperators/utils.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from enum import Enum
16
+ from typing import cast
17
+
18
+ from lerobot.utils.import_utils import make_device_from_device_class
19
+
20
+ from .config import TeleoperatorConfig
21
+ from .teleoperator import Teleoperator
22
+
23
+
24
+ class TeleopEvents(Enum):
25
+ """Shared constants for teleoperator events across teleoperators."""
26
+
27
+ SUCCESS = "success"
28
+ FAILURE = "failure"
29
+ RERECORD_EPISODE = "rerecord_episode"
30
+ IS_INTERVENTION = "is_intervention"
31
+ TERMINATE_EPISODE = "terminate_episode"
32
+
33
+
34
+ def make_teleoperator_from_config(config: TeleoperatorConfig) -> Teleoperator:
35
+ # TODO(Steven): Consider just using the make_device_from_device_class for all types
36
+ if config.type == "keyboard":
37
+ from .keyboard import KeyboardTeleop
38
+
39
+ return KeyboardTeleop(config)
40
+ elif config.type == "koch_leader":
41
+ from .koch_leader import KochLeader
42
+
43
+ return KochLeader(config)
44
+ elif config.type == "omx_leader":
45
+ from .omx_leader import OmxLeader
46
+
47
+ return OmxLeader(config)
48
+ elif config.type == "so100_leader":
49
+ from .so_leader import SO100Leader
50
+
51
+ return SO100Leader(config)
52
+ elif config.type == "so101_leader":
53
+ from .so_leader import SO101Leader
54
+
55
+ return SO101Leader(config)
56
+ elif config.type == "mock_teleop":
57
+ from tests.mocks.mock_teleop import MockTeleop
58
+
59
+ return MockTeleop(config)
60
+ elif config.type == "gamepad":
61
+ from .gamepad.teleop_gamepad import GamepadTeleop
62
+
63
+ return GamepadTeleop(config)
64
+ elif config.type == "keyboard_ee":
65
+ from .keyboard.teleop_keyboard import KeyboardEndEffectorTeleop
66
+
67
+ return KeyboardEndEffectorTeleop(config)
68
+ elif config.type == "homunculus_glove":
69
+ from .homunculus import HomunculusGlove
70
+
71
+ return HomunculusGlove(config)
72
+ elif config.type == "homunculus_arm":
73
+ from .homunculus import HomunculusArm
74
+
75
+ return HomunculusArm(config)
76
+ elif config.type == "bi_so_leader":
77
+ from .bi_so_leader import BiSOLeader
78
+
79
+ return BiSOLeader(config)
80
+ elif config.type == "reachy2_teleoperator":
81
+ from .reachy2_teleoperator import Reachy2Teleoperator
82
+
83
+ return Reachy2Teleoperator(config)
84
+ else:
85
+ try:
86
+ return cast(Teleoperator, make_device_from_device_class(config))
87
+ except Exception as e:
88
+ raise ValueError(f"Error creating robot with config {config}: {e}") from e