savoji commited on
Commit
d4bed85
·
verified ·
1 Parent(s): c139af0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. project/ManiSkill3/src/maniskill3_environment/docker/10_nvidia.json +6 -0
  2. project/ManiSkill3/src/maniskill3_environment/docker/Dockerfile +49 -0
  3. project/ManiSkill3/src/maniskill3_environment/docker/compose.yml +60 -0
  4. project/ManiSkill3/src/maniskill3_environment/docker/nvidia_icd.json +7 -0
  5. project/ManiSkill3/src/maniskill3_environment/docker/nvidia_layers.json +21 -0
  6. project/ManiSkill3/src/maniskill3_environment/lerobot/.cache/calibration/aloha_default/left_follower.json +68 -0
  7. project/ManiSkill3/src/maniskill3_environment/lerobot/.cache/calibration/aloha_default/left_leader.json +68 -0
  8. project/ManiSkill3/src/maniskill3_environment/lerobot/.cache/calibration/aloha_default/right_follower.json +68 -0
  9. project/ManiSkill3/src/maniskill3_environment/lerobot/.cache/calibration/aloha_default/right_leader.json +68 -0
  10. project/ManiSkill3/src/maniskill3_environment/lerobot/benchmarks/video/README.md +271 -0
  11. project/ManiSkill3/src/maniskill3_environment/lerobot/benchmarks/video/capture_camera_feed.py +102 -0
  12. project/ManiSkill3/src/maniskill3_environment/lerobot/benchmarks/video/run_video_benchmark.py +490 -0
  13. project/ManiSkill3/src/maniskill3_environment/lerobot/docker/lerobot-cpu/Dockerfile +29 -0
  14. project/ManiSkill3/src/maniskill3_environment/lerobot/docker/lerobot-gpu-dev/Dockerfile +68 -0
  15. project/ManiSkill3/src/maniskill3_environment/lerobot/docker/lerobot-gpu/Dockerfile +24 -0
  16. project/ManiSkill3/src/maniskill3_environment/lerobot/examples/11_use_lekiwi.md +597 -0
  17. project/ManiSkill3/src/maniskill3_environment/lerobot/examples/11_use_moss.md +337 -0
  18. project/ManiSkill3/src/maniskill3_environment/lerobot/examples/1_load_lerobot_dataset.py +148 -0
  19. project/ManiSkill3/src/maniskill3_environment/lerobot/examples/7_get_started_with_real_robot.md +1003 -0
  20. project/ManiSkill3/src/maniskill3_environment/lerobot/examples/8_use_stretch.md +161 -0
  21. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/backward_compatibility.py +68 -0
  22. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/card_template.md +27 -0
  23. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/compute_stats.py +176 -0
  24. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/factory.py +118 -0
  25. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/image_writer.py +178 -0
  26. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/lerobot_dataset.py +1217 -0
  27. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/online_buffer.py +384 -0
  28. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/push_dataset_to_hub/utils.py +131 -0
  29. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/sampler.py +61 -0
  30. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/transforms.py +249 -0
  31. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/utils.py +813 -0
  32. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py +884 -0
  33. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py +664 -0
  34. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v21/_remove_language_instruction.py +87 -0
  35. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v21/batch_convert_dataset_v20_to_v21.py +54 -0
  36. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v21/convert_dataset_v20_to_v21.py +114 -0
  37. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v21/convert_stats.py +99 -0
  38. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/video_utils.py +432 -0
  39. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/envs/__init__.py +15 -0
  40. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/envs/configs.py +156 -0
  41. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/envs/factory.py +69 -0
  42. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/envs/utils.py +127 -0
  43. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/optim/__init__.py +15 -0
  44. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/optim/factory.py +40 -0
  45. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/optim/optimizers.py +118 -0
  46. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/optim/schedulers.py +122 -0
  47. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/policies/__init__.py +19 -0
  48. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/policies/act/configuration_act.py +186 -0
  49. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/policies/act/modeling_act.py +765 -0
  50. project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/policies/diffusion/configuration_diffusion.py +237 -0
project/ManiSkill3/src/maniskill3_environment/docker/10_nvidia.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "file_format_version" : "1.0.0",
3
+ "ICD" : {
4
+ "library_path" : "libEGL_nvidia.so.0"
5
+ }
6
+ }
project/ManiSkill3/src/maniskill3_environment/docker/Dockerfile ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cudagl:11.3.1-devel-ubuntu20.04
2
+ ENV NVIDIA_DRIVER_CAPABILITIES=all
3
+ ARG PYTHON_VERSION=3.9
4
+
5
+ # Install os-level packages
6
+ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
7
+ bash-completion \
8
+ build-essential \
9
+ ca-certificates \
10
+ cmake \
11
+ curl \
12
+ git \
13
+ htop \
14
+ libegl1 \
15
+ libxext6 \
16
+ libjpeg-dev \
17
+ libpng-dev \
18
+ libvulkan1 \
19
+ rsync \
20
+ tmux \
21
+ unzip \
22
+ vim \
23
+ vulkan-utils \
24
+ wget \
25
+ xvfb \
26
+ && rm -rf /var/lib/apt/lists/*
27
+
28
+ # Install (mini) conda
29
+ RUN curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
30
+ chmod +x ~/miniconda.sh && \
31
+ ~/miniconda.sh -b -p /opt/conda && \
32
+ rm ~/miniconda.sh && \
33
+ /opt/conda/bin/conda init && \
34
+ /opt/conda/bin/conda install -y python="$PYTHON_VERSION" && \
35
+ /opt/conda/bin/conda clean -ya
36
+
37
+ ENV PATH=/opt/conda/bin:$PATH
38
+ SHELL ["/bin/bash", "-c"]
39
+
40
+ # https://github.com/haosulab/ManiSkill/issues/9
41
+ COPY nvidia_icd.json /usr/share/vulkan/icd.d/nvidia_icd.json
42
+ COPY nvidia_layers.json /etc/vulkan/implicit_layer.d/nvidia_layers.json
43
+
44
+ # install dependencies
45
+ RUN pip install --upgrade mani-skill==3.0.0.b18 && pip cache purge
46
+ RUN pip install torch
47
+
48
+ # download physx GPU binary via sapien
49
+ RUN python -c "exec('import sapien.physx as physx;\ntry:\n physx.enable_gpu()\nexcept:\n pass;')"
project/ManiSkill3/src/maniskill3_environment/docker/compose.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ maniskill:
3
+ shm_size: '12gb'
4
+ image: maniskill/base:latest
5
+ build:
6
+ network: host
7
+ args:
8
+ NO_PROXY: '127.0.0.1,10.218.163.63,localhost,.huawei.com,.kyber.team'
9
+ FTP_PROXY: 'http://localhost:3128'
10
+ HTTPS_PROXY: 'http://localhost:3128'
11
+ HTTP_PROXY: 'http://localhost:3128'
12
+ context: ..
13
+ dockerfile: ./docker/dockerfile
14
+ stdin_open: true
15
+ tty: true
16
+ ipc: host
17
+ pid: host
18
+ network_mode: host
19
+ privileged: true
20
+ cap_add:
21
+ - SYS_PTRACE
22
+ security_opt:
23
+ - seccomp:unconfined
24
+ environment:
25
+ runtime: nvidia
26
+ NVIDIA_DRIVER_CAPABILITIES: all
27
+ DISPLAY: $DISPLAY
28
+ NVIDIA_DISABLE_REQUIRE: "1"
29
+ NO_PROXY: '127.0.0.1,10.218.163.63,localhost,.huawei.com,.kyber.team'
30
+ FTP_PROXY: 'http://localhost:3128'
31
+ HTTPS_PROXY: 'http://localhost:3128'
32
+ HTTP_PROXY: 'http://localhost:3128'
33
+ # GIT_INDEX_FILE: /path/to/git_index # Specify the actual path
34
+ # ports:
35
+ # - "7007:7007"
36
+ volumes:
37
+ - ../:/workspace/
38
+ - /home:/home
39
+ - /tmp:/tmp
40
+ - /mnt:/mnt
41
+ - /tmp/.X11-unix:/tmp/.X11-unix
42
+ - /home/${USER}/.ssh:/home/${USER}/.ssh
43
+ - /data/datasets:/datasets
44
+ - /home/${USER}/weights:/workspace/weights
45
+ - /home/${USER}/weights:/weights
46
+ - /home/${USER}/datasets:/workspace/datasets
47
+ - /data:/data
48
+ - /media:/media
49
+ - /media/kyber/sda/maniskill/data:/root/.maniskill/data
50
+ - $PWD:/host
51
+ - /usr/share/nvidia:/usr/share/nvidia
52
+ working_dir: /workspace/
53
+ command: /bin/bash
54
+ deploy:
55
+ resources:
56
+ reservations:
57
+ devices:
58
+ - driver: nvidia
59
+ count: all
60
+ capabilities: [ gpu ]
project/ManiSkill3/src/maniskill3_environment/docker/nvidia_icd.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "file_format_version" : "1.0.0",
3
+ "ICD": {
4
+ "library_path": "libGLX_nvidia.so.0",
5
+ "api_version" : "1.2.155"
6
+ }
7
+ }
project/ManiSkill3/src/maniskill3_environment/docker/nvidia_layers.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "file_format_version" : "1.0.0",
3
+ "layer": {
4
+ "name": "VK_LAYER_NV_optimus",
5
+ "type": "INSTANCE",
6
+ "library_path": "libGLX_nvidia.so.0",
7
+ "api_version" : "1.2.155",
8
+ "implementation_version" : "1",
9
+ "description" : "NVIDIA Optimus layer",
10
+ "functions": {
11
+ "vkGetInstanceProcAddr": "vk_optimusGetInstanceProcAddr",
12
+ "vkGetDeviceProcAddr": "vk_optimusGetDeviceProcAddr"
13
+ },
14
+ "enable_environment": {
15
+ "__NV_PRIME_RENDER_OFFLOAD": "1"
16
+ },
17
+ "disable_environment": {
18
+ "DISABLE_LAYER_NV_OPTIMUS_1": ""
19
+ }
20
+ }
21
+ }
project/ManiSkill3/src/maniskill3_environment/lerobot/.cache/calibration/aloha_default/left_follower.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "homing_offset": [
3
+ 2048,
4
+ 3072,
5
+ 3072,
6
+ -1024,
7
+ -1024,
8
+ 2048,
9
+ -2048,
10
+ 2048,
11
+ -2048
12
+ ],
13
+ "drive_mode": [
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 0,
18
+ 0,
19
+ 1,
20
+ 0,
21
+ 1,
22
+ 0
23
+ ],
24
+ "start_pos": [
25
+ 2015,
26
+ 3058,
27
+ 3061,
28
+ 1071,
29
+ 1071,
30
+ 2035,
31
+ 2152,
32
+ 2029,
33
+ 2499
34
+ ],
35
+ "end_pos": [
36
+ -1008,
37
+ -1963,
38
+ -1966,
39
+ 2141,
40
+ 2143,
41
+ -971,
42
+ 3043,
43
+ -1077,
44
+ 3144
45
+ ],
46
+ "calib_mode": [
47
+ "DEGREE",
48
+ "DEGREE",
49
+ "DEGREE",
50
+ "DEGREE",
51
+ "DEGREE",
52
+ "DEGREE",
53
+ "DEGREE",
54
+ "DEGREE",
55
+ "LINEAR"
56
+ ],
57
+ "motor_names": [
58
+ "waist",
59
+ "shoulder",
60
+ "shoulder_shadow",
61
+ "elbow",
62
+ "elbow_shadow",
63
+ "forearm_roll",
64
+ "wrist_angle",
65
+ "wrist_rotate",
66
+ "gripper"
67
+ ]
68
+ }
project/ManiSkill3/src/maniskill3_environment/lerobot/.cache/calibration/aloha_default/left_leader.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "homing_offset": [
3
+ 2048,
4
+ 3072,
5
+ 3072,
6
+ -1024,
7
+ -1024,
8
+ 2048,
9
+ -2048,
10
+ 2048,
11
+ -1024
12
+ ],
13
+ "drive_mode": [
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 0,
18
+ 0,
19
+ 1,
20
+ 0,
21
+ 1,
22
+ 0
23
+ ],
24
+ "start_pos": [
25
+ 2035,
26
+ 3024,
27
+ 3019,
28
+ 979,
29
+ 981,
30
+ 1982,
31
+ 2166,
32
+ 2124,
33
+ 1968
34
+ ],
35
+ "end_pos": [
36
+ -990,
37
+ -2017,
38
+ -2015,
39
+ 2078,
40
+ 2076,
41
+ -1030,
42
+ 3117,
43
+ -1016,
44
+ 2556
45
+ ],
46
+ "calib_mode": [
47
+ "DEGREE",
48
+ "DEGREE",
49
+ "DEGREE",
50
+ "DEGREE",
51
+ "DEGREE",
52
+ "DEGREE",
53
+ "DEGREE",
54
+ "DEGREE",
55
+ "LINEAR"
56
+ ],
57
+ "motor_names": [
58
+ "waist",
59
+ "shoulder",
60
+ "shoulder_shadow",
61
+ "elbow",
62
+ "elbow_shadow",
63
+ "forearm_roll",
64
+ "wrist_angle",
65
+ "wrist_rotate",
66
+ "gripper"
67
+ ]
68
+ }
project/ManiSkill3/src/maniskill3_environment/lerobot/.cache/calibration/aloha_default/right_follower.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "homing_offset": [
3
+ 2048,
4
+ 3072,
5
+ 3072,
6
+ -1024,
7
+ -1024,
8
+ 2048,
9
+ -2048,
10
+ 2048,
11
+ -2048
12
+ ],
13
+ "drive_mode": [
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 0,
18
+ 0,
19
+ 1,
20
+ 0,
21
+ 1,
22
+ 0
23
+ ],
24
+ "start_pos": [
25
+ 2056,
26
+ 2895,
27
+ 2896,
28
+ 1191,
29
+ 1190,
30
+ 2018,
31
+ 2051,
32
+ 2056,
33
+ 2509
34
+ ],
35
+ "end_pos": [
36
+ -1040,
37
+ -2004,
38
+ -2006,
39
+ 2126,
40
+ 2127,
41
+ -1010,
42
+ 3050,
43
+ -1117,
44
+ 3143
45
+ ],
46
+ "calib_mode": [
47
+ "DEGREE",
48
+ "DEGREE",
49
+ "DEGREE",
50
+ "DEGREE",
51
+ "DEGREE",
52
+ "DEGREE",
53
+ "DEGREE",
54
+ "DEGREE",
55
+ "LINEAR"
56
+ ],
57
+ "motor_names": [
58
+ "waist",
59
+ "shoulder",
60
+ "shoulder_shadow",
61
+ "elbow",
62
+ "elbow_shadow",
63
+ "forearm_roll",
64
+ "wrist_angle",
65
+ "wrist_rotate",
66
+ "gripper"
67
+ ]
68
+ }
project/ManiSkill3/src/maniskill3_environment/lerobot/.cache/calibration/aloha_default/right_leader.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "homing_offset": [
3
+ 2048,
4
+ 3072,
5
+ 3072,
6
+ -1024,
7
+ -1024,
8
+ 2048,
9
+ -2048,
10
+ 2048,
11
+ -2048
12
+ ],
13
+ "drive_mode": [
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 0,
18
+ 0,
19
+ 1,
20
+ 0,
21
+ 1,
22
+ 0
23
+ ],
24
+ "start_pos": [
25
+ 2068,
26
+ 3034,
27
+ 3030,
28
+ 1038,
29
+ 1041,
30
+ 1991,
31
+ 1948,
32
+ 2090,
33
+ 1985
34
+ ],
35
+ "end_pos": [
36
+ -1025,
37
+ -2014,
38
+ -2015,
39
+ 2058,
40
+ 2060,
41
+ -955,
42
+ 3091,
43
+ -940,
44
+ 2576
45
+ ],
46
+ "calib_mode": [
47
+ "DEGREE",
48
+ "DEGREE",
49
+ "DEGREE",
50
+ "DEGREE",
51
+ "DEGREE",
52
+ "DEGREE",
53
+ "DEGREE",
54
+ "DEGREE",
55
+ "LINEAR"
56
+ ],
57
+ "motor_names": [
58
+ "waist",
59
+ "shoulder",
60
+ "shoulder_shadow",
61
+ "elbow",
62
+ "elbow_shadow",
63
+ "forearm_roll",
64
+ "wrist_angle",
65
+ "wrist_rotate",
66
+ "gripper"
67
+ ]
68
+ }
project/ManiSkill3/src/maniskill3_environment/lerobot/benchmarks/video/README.md ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Video benchmark
2
+
3
+
4
+ ## Questions
5
+ What is the optimal trade-off between:
6
+ - maximizing loading time with random access,
7
+ - minimizing memory space on disk,
8
+ - maximizing success rate of policies,
9
+ - compatibility across devices/platforms for decoding videos (e.g. video players, web browsers).
10
+
11
+ How to encode videos?
12
+ - Which video codec (`-vcodec`) to use? h264, h265, AV1?
13
+ - What pixel format to use (`-pix_fmt`)? `yuv444p` or `yuv420p`?
14
+ - How much compression (`-crf`)? No compression with `0`, intermediate compression with `25` or extreme with `50+`?
15
+ - Which frequency to chose for key frames (`-g`)? A key frame every `10` frames?
16
+
17
+ How to decode videos?
18
+ - Which `decoder`? `torchvision`, `torchaudio`, `ffmpegio`, `decord`, or `nvc`?
19
+ - What scenarios to use for the requesting timestamps during benchmark? (`timestamps_mode`)
20
+
21
+
22
+ ## Variables
23
+ **Image content & size**
24
+ We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an apartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution).
25
+ For these reasons, we run this benchmark on four representative datasets:
26
+ - `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera.
27
+ - `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera.
28
+ - `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera.
29
+ - `aliberts/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera.
30
+
31
+ Note: The datasets used for this benchmark need to be image datasets, not video datasets.
32
+
33
+ **Data augmentations**
34
+ We might revisit this benchmark and find better settings if we train our policies with various data augmentations to make them more robust (e.g. robust to color changes, compression, etc.).
35
+
36
+ ### Encoding parameters
37
+ | parameter | values |
38
+ |-------------|--------------------------------------------------------------|
39
+ | **vcodec** | `libx264`, `libx265`, `libsvtav1` |
40
+ | **pix_fmt** | `yuv444p`, `yuv420p` |
41
+ | **g** | `1`, `2`, `3`, `4`, `5`, `6`, `10`, `15`, `20`, `40`, `None` |
42
+ | **crf** | `0`, `5`, `10`, `15`, `20`, `25`, `30`, `40`, `50`, `None` |
43
+
44
+ Note that `crf` value might be interpreted differently by various video codecs. In other words, the same value used with one codec doesn't necessarily translate into the same compression level with another codec. In fact, the default value (`None`) isn't the same amongst the different video codecs. Importantly, it is also the case for many other ffmpeg arguments like `g` which specifies the frequency of the key frames.
45
+
46
+ For a comprehensive list and documentation of these parameters, see the ffmpeg documentation depending on the video codec used:
47
+ - h264: https://trac.ffmpeg.org/wiki/Encode/H.264
48
+ - h265: https://trac.ffmpeg.org/wiki/Encode/H.265
49
+ - AV1: https://trac.ffmpeg.org/wiki/Encode/AV1
50
+
51
+ ### Decoding parameters
52
+ **Decoder**
53
+ We tested two video decoding backends from torchvision:
54
+ - `pyav`
55
+ - `video_reader` (requires to build torchvision from source)
56
+
57
+ **Requested timestamps**
58
+ Given the way video decoding works, once a keyframe has been loaded, the decoding of subsequent frames is fast.
59
+ This of course is affected by the `-g` parameter during encoding, which specifies the frequency of the keyframes. Given our typical use cases in robotics policies which might request a few timestamps in different random places, we want to replicate these use cases with the following scenarios:
60
+ - `1_frame`: 1 frame,
61
+ - `2_frames`: 2 consecutive frames (e.g. `[t, t + 1 / fps]`),
62
+ - `6_frames`: 6 consecutive frames (e.g. `[t + i / fps for i in range(6)]`)
63
+
64
+ Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`.
65
+
66
+ Additionally, because some policies might request single timestamps that are a few frames apart, we also have the following scenario:
67
+ - `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`),
68
+
69
+ However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded.
70
+
71
+
72
+ ## Metrics
73
+ **Data compression ratio (lower is better)**
74
+ `video_images_size_ratio` is the ratio of the memory space on disk taken by the encoded video over the memory space taken by the original images. For instance, `video_images_size_ratio=25%` means that the video takes 4 times less memory space on disk compared to the original images.
75
+
76
+ **Loading time ratio (lower is better)**
77
+ `video_images_load_time_ratio` is the ratio of the time it takes to decode frames from the video at a given timestamps over the time it takes to load the exact same original images. Lower is better. For instance, `video_images_load_time_ratio=200%` means that decoding from video is 2 times slower than loading the original images.
78
+
79
+ **Average Mean Square Error (lower is better)**
80
+ `avg_mse` is the average mean square error between each decoded frame and its corresponding original image over all requested timestamps, and also divided by the number of pixels in the image to be comparable when switching to different image sizes.
81
+
82
+ **Average Peak Signal to Noise Ratio (higher is better)**
83
+ `avg_psnr` measures the ratio between the maximum possible power of a signal and the power of corrupting noise that affects the fidelity of its representation. Higher PSNR indicates better quality.
84
+
85
+ **Average Structural Similarity Index Measure (higher is better)**
86
+ `avg_ssim` evaluates the perceived quality of images by comparing luminance, contrast, and structure. SSIM values range from -1 to 1, where 1 indicates perfect similarity.
87
+
88
+ One aspect that can't be measured here with those metrics is the compatibility of the encoding across platforms, in particular on web browser, for visualization purposes.
89
+ h264, h265 and AV1 are all commonly used codecs and should not pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility:
90
+ - `yuv420p` is more widely supported across various platforms, including web browsers.
91
+ - `yuv444p` offers higher color fidelity but might not be supported as broadly.
92
+
93
+
94
+ <!-- **Loss of a pretrained policy (higher is better)** (not available)
95
+ `loss_pretrained` is the result of evaluating with the selected encoding/decoding settings a policy pretrained on original images. It is easier to understand than `avg_l2_error`.
96
+
97
+ **Success rate after retraining (higher is better)** (not available)
98
+ `success_rate` is the result of training and evaluating a policy with the selected encoding/decoding settings. It is the most difficult metric to get but also the very best. -->
99
+
100
+
101
+ ## How the benchmark works
102
+ The benchmark evaluates both encoding and decoding of video frames on the first episode of each dataset.
103
+
104
+ **Encoding:** for each `vcodec` and `pix_fmt` pair, we use a default value for `g` and `crf` upon which we change a single value (either `g` or `crf`) to one of the specified values (we don't test every combination of those as this would be computationally too heavy).
105
+ This gives a unique set of encoding parameters which is used to encode the episode.
106
+
107
+ **Decoding:** Then, for each of those unique encodings, we iterate through every combination of the decoding parameters `backend` and `timestamps_mode`. For each of them, we record the metrics of a number of samples (given by `--num-samples`). This is parallelized for efficiency and the number of processes can be controlled with `--num-workers`. Ideally, it's best to have a `--num-samples` that is divisible by `--num-workers`.
108
+
109
+ Intermediate results saved for each `vcodec` and `pix_fmt` combination in csv tables.
110
+ These are then all concatenated to a single table ready for analysis.
111
+
112
+ ## Caveats
113
+ We tried to measure the most impactful parameters for both encoding and decoding. However, for computational reasons we can't test out every combination.
114
+
115
+ Additional encoding parameters exist that are not included in this benchmark. In particular:
116
+ - `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1.
117
+ - `-tune` which allows to optimize the encoding for certain aspects (e.g. film quality, fast decoding, etc.).
118
+
119
+ See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters.
120
+
121
+ Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few:
122
+ - `torchaudio`
123
+ - `ffmpegio`
124
+ - `decord`
125
+ - `nvc`
126
+
127
+ Note as well that since we are mostly interested in the performance at decoding time (also because encoding is done only once before uploading a dataset), we did not measure encoding times nor have any metrics regarding encoding.
128
+ However, besides the necessity to build ffmpeg from source, encoding did not pose any issue and it didn't take a significant amount of time during this benchmark.
129
+
130
+
131
+ ## Install
132
+ Building ffmpeg from source is required to include libx265 and libaom/libsvtav1 (av1) video codecs ([compilation guide](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu)).
133
+
134
+ **Note:** While you still need to build torchvision with a conda-installed `ffmpeg<4.3` to use the `video_reader` decoder (as described in [#220](https://github.com/huggingface/lerobot/pull/220)), you also need another version which is custom-built with all the video codecs for encoding. For the script to then use that version, you can prepend the command above with `PATH="$HOME/bin:$PATH"`, which is where ffmpeg should be built.
135
+
136
+
137
+ ## Adding a video decoder
138
+ Right now, we're only benchmarking the two video decoder available with torchvision: `pyav` and `video_reader`.
139
+ You can easily add a new decoder to benchmark by adding it to this function in the script:
140
+ ```diff
141
+ def decode_video_frames(
142
+ video_path: str,
143
+ timestamps: list[float],
144
+ tolerance_s: float,
145
+ backend: str,
146
+ ) -> torch.Tensor:
147
+ if backend in ["pyav", "video_reader"]:
148
+ return decode_video_frames_torchvision(
149
+ video_path, timestamps, tolerance_s, backend
150
+ )
151
+ + elif backend == ["your_decoder"]:
152
+ + return your_decoder_function(
153
+ + video_path, timestamps, tolerance_s, backend
154
+ + )
155
+ else:
156
+ raise NotImplementedError(backend)
157
+ ```
158
+
159
+
160
+ ## Example
161
+ For a quick run, you can try these parameters:
162
+ ```bash
163
+ python benchmark/video/run_video_benchmark.py \
164
+ --output-dir outputs/video_benchmark \
165
+ --repo-ids \
166
+ lerobot/pusht_image \
167
+ aliberts/aloha_mobile_shrimp_image \
168
+ --vcodec libx264 libx265 \
169
+ --pix-fmt yuv444p yuv420p \
170
+ --g 2 20 None \
171
+ --crf 10 40 None \
172
+ --timestamps-modes 1_frame 2_frames \
173
+ --backends pyav video_reader \
174
+ --num-samples 5 \
175
+ --num-workers 5 \
176
+ --save-frames 0
177
+ ```
178
+
179
+
180
+ ## Results
181
+
182
+ ### Reproduce
183
+ We ran the benchmark with the following parameters:
184
+ ```bash
185
+ # h264 and h265 encodings
186
+ python benchmark/video/run_video_benchmark.py \
187
+ --output-dir outputs/video_benchmark \
188
+ --repo-ids \
189
+ lerobot/pusht_image \
190
+ aliberts/aloha_mobile_shrimp_image \
191
+ aliberts/paris_street \
192
+ aliberts/kitchen \
193
+ --vcodec libx264 libx265 \
194
+ --pix-fmt yuv444p yuv420p \
195
+ --g 1 2 3 4 5 6 10 15 20 40 None \
196
+ --crf 0 5 10 15 20 25 30 40 50 None \
197
+ --timestamps-modes 1_frame 2_frames 6_frames \
198
+ --backends pyav video_reader \
199
+ --num-samples 50 \
200
+ --num-workers 5 \
201
+ --save-frames 1
202
+
203
+ # av1 encoding (only compatible with yuv420p and pyav decoder)
204
+ python benchmark/video/run_video_benchmark.py \
205
+ --output-dir outputs/video_benchmark \
206
+ --repo-ids \
207
+ lerobot/pusht_image \
208
+ aliberts/aloha_mobile_shrimp_image \
209
+ aliberts/paris_street \
210
+ aliberts/kitchen \
211
+ --vcodec libsvtav1 \
212
+ --pix-fmt yuv420p \
213
+ --g 1 2 3 4 5 6 10 15 20 40 None \
214
+ --crf 0 5 10 15 20 25 30 40 50 None \
215
+ --timestamps-modes 1_frame 2_frames 6_frames \
216
+ --backends pyav \
217
+ --num-samples 50 \
218
+ --num-workers 5 \
219
+ --save-frames 1
220
+ ```
221
+
222
+ The full results are available [here](https://docs.google.com/spreadsheets/d/1OYJB43Qu8fC26k_OyoMFgGBBKfQRCi4BIuYitQnq3sw/edit?usp=sharing)
223
+
224
+
225
+ ### Parameters selected for LeRobotDataset
226
+ Considering these results, we chose what we think is the best set of encoding parameter:
227
+ - vcodec: `libsvtav1`
228
+ - pix-fmt: `yuv420p`
229
+ - g: `2`
230
+ - crf: `30`
231
+
232
+ Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_reader` does not support it (and `pyav` doesn't require a custom build of `torchvision`).
233
+
234
+ ### Summary
235
+
236
+ These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav`
237
+
238
+ | video_images_size_ratio | vcodec | pix_fmt | | | |
239
+ |------------------------------------|------------|---------|-----------|-----------|-----------|
240
+ | | libx264 | | libx265 | | libsvtav1 |
241
+ | repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
242
+ | lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% |
243
+ | aliberts/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% |
244
+ | aliberts/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% |
245
+ | aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% |
246
+
247
+ | video_images_load_time_ratio | vcodec | pix_fmt | | | |
248
+ |------------------------------------|---------|---------|----------|---------|-----------|
249
+ | | libx264 | | libx265 | | libsvtav1 |
250
+ | repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
251
+ | lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 |
252
+ | aliberts/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** |
253
+ | aliberts/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** |
254
+ | aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** |
255
+
256
+ | | | vcodec | pix_fmt | | | |
257
+ |------------------------------------|----------|----------|--------------|----------|-----------|--------------|
258
+ | | | libx264 | | libx265 | | libsvtav1 |
259
+ | repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
260
+ | lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 |
261
+ | | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 |
262
+ | | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% |
263
+ | aliberts/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** |
264
+ | | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** |
265
+ | | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** |
266
+ | aliberts/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** |
267
+ | | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** |
268
+ | | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** |
269
+ | aliberts/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** |
270
+ | | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** |
271
+ | | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** |
project/ManiSkill3/src/maniskill3_environment/lerobot/benchmarks/video/capture_camera_feed.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Capture video feed from a camera as raw images."""
17
+
18
+ import argparse
19
+ import datetime as dt
20
+ import os
21
+ import time
22
+ from pathlib import Path
23
+
24
+ import cv2
25
+ import rerun as rr
26
+
27
+ # see https://rerun.io/docs/howto/visualization/limit-ram
28
+ RERUN_MEMORY_LIMIT = os.getenv("LEROBOT_RERUN_MEMORY_LIMIT", "5%")
29
+
30
+
31
+ def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int, duration: int):
32
+ rr.init("lerobot_capture_camera_feed")
33
+ rr.spawn(memory_limit=RERUN_MEMORY_LIMIT)
34
+
35
+ now = dt.datetime.now()
36
+ capture_dir = output_dir / f"{now:%Y-%m-%d}" / f"{now:%H-%M-%S}"
37
+ if not capture_dir.exists():
38
+ capture_dir.mkdir(parents=True, exist_ok=True)
39
+
40
+ # Opens the default webcam
41
+ cap = cv2.VideoCapture(0)
42
+ if not cap.isOpened():
43
+ print("Error: Could not open video stream.")
44
+ return
45
+
46
+ cap.set(cv2.CAP_PROP_FPS, fps)
47
+ cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
48
+ cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
49
+
50
+ frame_index = 0
51
+ start_time = time.time()
52
+ while time.time() - start_time < duration:
53
+ ret, frame = cap.read()
54
+
55
+ if not ret:
56
+ print("Error: Could not read frame.")
57
+ break
58
+ rr.log("video/stream", rr.Image(frame.numpy()), static=True)
59
+ cv2.imwrite(str(capture_dir / f"frame_{frame_index:06d}.png"), frame)
60
+ frame_index += 1
61
+
62
+ # Release the capture
63
+ cap.release()
64
+
65
+ # TODO(Steven): Add a graceful shutdown via a close() method for the Viewer context, though not currently supported in the Rerun API.
66
+
67
+
68
+ if __name__ == "__main__":
69
+ parser = argparse.ArgumentParser()
70
+
71
+ parser.add_argument(
72
+ "--output-dir",
73
+ type=Path,
74
+ default=Path("outputs/cam_capture/"),
75
+ help="Directory where the capture images are written. A subfolder named with the current date & time will be created inside it for each capture.",
76
+ )
77
+ parser.add_argument(
78
+ "--fps",
79
+ type=int,
80
+ default=30,
81
+ help="Frames Per Second of the capture.",
82
+ )
83
+ parser.add_argument(
84
+ "--width",
85
+ type=int,
86
+ default=1280,
87
+ help="Width of the captured images.",
88
+ )
89
+ parser.add_argument(
90
+ "--height",
91
+ type=int,
92
+ default=720,
93
+ help="Height of the captured images.",
94
+ )
95
+ parser.add_argument(
96
+ "--duration",
97
+ type=int,
98
+ default=20,
99
+ help="Duration in seconds for which the video stream should be captured.",
100
+ )
101
+ args = parser.parse_args()
102
+ display_and_save_video_stream(**vars(args))
project/ManiSkill3/src/maniskill3_environment/lerobot/benchmarks/video/run_video_benchmark.py ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Assess the performance of video decoding in various configurations.
17
+
18
+ This script will benchmark different video encoding and decoding parameters.
19
+ See the provided README.md or run `python benchmark/video/run_video_benchmark.py --help` for usage info.
20
+ """
21
+
22
+ import argparse
23
+ import datetime as dt
24
+ import random
25
+ import shutil
26
+ from collections import OrderedDict
27
+ from concurrent.futures import ThreadPoolExecutor, as_completed
28
+ from pathlib import Path
29
+
30
+ import einops
31
+ import numpy as np
32
+ import pandas as pd
33
+ import PIL
34
+ import torch
35
+ from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity
36
+ from tqdm import tqdm
37
+
38
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
39
+ from lerobot.common.datasets.video_utils import (
40
+ decode_video_frames_torchvision,
41
+ encode_video_frames,
42
+ )
43
+ from lerobot.common.utils.benchmark import TimeBenchmark
44
+
45
+ BASE_ENCODING = OrderedDict(
46
+ [
47
+ ("vcodec", "libx264"),
48
+ ("pix_fmt", "yuv444p"),
49
+ ("g", 2),
50
+ ("crf", None),
51
+ # TODO(aliberts): Add fastdecode
52
+ # ("fastdecode", 0),
53
+ ]
54
+ )
55
+
56
+
57
+ # TODO(rcadene, aliberts): move to `utils.py` folder when we want to refactor
58
+ def parse_int_or_none(value) -> int | None:
59
+ if value.lower() == "none":
60
+ return None
61
+ try:
62
+ return int(value)
63
+ except ValueError as e:
64
+ raise argparse.ArgumentTypeError(f"Invalid int or None: {value}") from e
65
+
66
+
67
+ def check_datasets_formats(repo_ids: list) -> None:
68
+ for repo_id in repo_ids:
69
+ dataset = LeRobotDataset(repo_id)
70
+ if len(dataset.meta.video_keys) > 0:
71
+ raise ValueError(
72
+ f"Use only image dataset for running this benchmark. Video dataset provided: {repo_id}"
73
+ )
74
+
75
+
76
+ def get_directory_size(directory: Path) -> int:
77
+ total_size = 0
78
+ for item in directory.rglob("*"):
79
+ if item.is_file():
80
+ total_size += item.stat().st_size
81
+ return total_size
82
+
83
+
84
+ def load_original_frames(imgs_dir: Path, timestamps: list[float], fps: int) -> torch.Tensor:
85
+ frames = []
86
+ for ts in timestamps:
87
+ idx = int(ts * fps)
88
+ frame = PIL.Image.open(imgs_dir / f"frame_{idx:06d}.png")
89
+ frame = torch.from_numpy(np.array(frame))
90
+ frame = frame.type(torch.float32) / 255
91
+ frame = einops.rearrange(frame, "h w c -> c h w")
92
+ frames.append(frame)
93
+ return torch.stack(frames)
94
+
95
+
96
+ def save_decoded_frames(
97
+ imgs_dir: Path, save_dir: Path, frames: torch.Tensor, timestamps: list[float], fps: int
98
+ ) -> None:
99
+ if save_dir.exists() and len(list(save_dir.glob("frame_*.png"))) == len(timestamps):
100
+ return
101
+
102
+ save_dir.mkdir(parents=True, exist_ok=True)
103
+ for i, ts in enumerate(timestamps):
104
+ idx = int(ts * fps)
105
+ frame_hwc = (frames[i].permute((1, 2, 0)) * 255).type(torch.uint8).cpu().numpy()
106
+ PIL.Image.fromarray(frame_hwc).save(save_dir / f"frame_{idx:06d}_decoded.png")
107
+ shutil.copyfile(imgs_dir / f"frame_{idx:06d}.png", save_dir / f"frame_{idx:06d}_original.png")
108
+
109
+
110
+ def save_first_episode(imgs_dir: Path, dataset: LeRobotDataset) -> None:
111
+ ep_num_images = dataset.episode_data_index["to"][0].item()
112
+ if imgs_dir.exists() and len(list(imgs_dir.glob("frame_*.png"))) == ep_num_images:
113
+ return
114
+
115
+ imgs_dir.mkdir(parents=True, exist_ok=True)
116
+ hf_dataset = dataset.hf_dataset.with_format(None)
117
+
118
+ # We only save images from the first camera
119
+ img_keys = [key for key in hf_dataset.features if key.startswith("observation.image")]
120
+ imgs_dataset = hf_dataset.select_columns(img_keys[0])
121
+
122
+ for i, item in enumerate(
123
+ tqdm(imgs_dataset, desc=f"saving {dataset.repo_id} first episode images", leave=False)
124
+ ):
125
+ img = item[img_keys[0]]
126
+ img.save(str(imgs_dir / f"frame_{i:06d}.png"), quality=100)
127
+
128
+ if i >= ep_num_images - 1:
129
+ break
130
+
131
+
132
+ def sample_timestamps(timestamps_mode: str, ep_num_images: int, fps: int) -> list[float]:
133
+ # Start at 5 to allow for 2_frames_4_space and 6_frames
134
+ idx = random.randint(5, ep_num_images - 1)
135
+ match timestamps_mode:
136
+ case "1_frame":
137
+ frame_indexes = [idx]
138
+ case "2_frames":
139
+ frame_indexes = [idx - 1, idx]
140
+ case "2_frames_4_space":
141
+ frame_indexes = [idx - 5, idx]
142
+ case "6_frames":
143
+ frame_indexes = [idx - i for i in range(6)][::-1]
144
+ case _:
145
+ raise ValueError(timestamps_mode)
146
+
147
+ return [idx / fps for idx in frame_indexes]
148
+
149
+
150
+ def decode_video_frames(
151
+ video_path: str,
152
+ timestamps: list[float],
153
+ tolerance_s: float,
154
+ backend: str,
155
+ ) -> torch.Tensor:
156
+ if backend in ["pyav", "video_reader"]:
157
+ return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend)
158
+ else:
159
+ raise NotImplementedError(backend)
160
+
161
+
162
+ def benchmark_decoding(
163
+ imgs_dir: Path,
164
+ video_path: Path,
165
+ timestamps_mode: str,
166
+ backend: str,
167
+ ep_num_images: int,
168
+ fps: int,
169
+ num_samples: int = 50,
170
+ num_workers: int = 4,
171
+ save_frames: bool = False,
172
+ ) -> dict:
173
+ def process_sample(sample: int):
174
+ time_benchmark = TimeBenchmark()
175
+ timestamps = sample_timestamps(timestamps_mode, ep_num_images, fps)
176
+ num_frames = len(timestamps)
177
+ result = {
178
+ "psnr_values": [],
179
+ "ssim_values": [],
180
+ "mse_values": [],
181
+ }
182
+
183
+ with time_benchmark:
184
+ frames = decode_video_frames(video_path, timestamps=timestamps, tolerance_s=5e-1, backend=backend)
185
+ result["load_time_video_ms"] = time_benchmark.result_ms / num_frames
186
+
187
+ with time_benchmark:
188
+ original_frames = load_original_frames(imgs_dir, timestamps, fps)
189
+ result["load_time_images_ms"] = time_benchmark.result_ms / num_frames
190
+
191
+ frames_np, original_frames_np = frames.numpy(), original_frames.numpy()
192
+ for i in range(num_frames):
193
+ result["mse_values"].append(mean_squared_error(original_frames_np[i], frames_np[i]))
194
+ result["psnr_values"].append(
195
+ peak_signal_noise_ratio(original_frames_np[i], frames_np[i], data_range=1.0)
196
+ )
197
+ result["ssim_values"].append(
198
+ structural_similarity(original_frames_np[i], frames_np[i], data_range=1.0, channel_axis=0)
199
+ )
200
+
201
+ if save_frames and sample == 0:
202
+ save_dir = video_path.with_suffix("") / f"{timestamps_mode}_{backend}"
203
+ save_decoded_frames(imgs_dir, save_dir, frames, timestamps, fps)
204
+
205
+ return result
206
+
207
+ load_times_video_ms = []
208
+ load_times_images_ms = []
209
+ mse_values = []
210
+ psnr_values = []
211
+ ssim_values = []
212
+
213
+ # A sample is a single set of decoded frames specified by timestamps_mode (e.g. a single frame, 2 frames, etc.).
214
+ # For each sample, we record metrics (loading time and quality metrics) which are then averaged over all samples.
215
+ # As these samples are independent, we run them in parallel threads to speed up the benchmark.
216
+ with ThreadPoolExecutor(max_workers=num_workers) as executor:
217
+ futures = [executor.submit(process_sample, i) for i in range(num_samples)]
218
+ for future in tqdm(as_completed(futures), total=num_samples, desc="samples", leave=False):
219
+ result = future.result()
220
+ load_times_video_ms.append(result["load_time_video_ms"])
221
+ load_times_images_ms.append(result["load_time_images_ms"])
222
+ psnr_values.extend(result["psnr_values"])
223
+ ssim_values.extend(result["ssim_values"])
224
+ mse_values.extend(result["mse_values"])
225
+
226
+ avg_load_time_video_ms = float(np.array(load_times_video_ms).mean())
227
+ avg_load_time_images_ms = float(np.array(load_times_images_ms).mean())
228
+ video_images_load_time_ratio = avg_load_time_video_ms / avg_load_time_images_ms
229
+
230
+ return {
231
+ "avg_load_time_video_ms": avg_load_time_video_ms,
232
+ "avg_load_time_images_ms": avg_load_time_images_ms,
233
+ "video_images_load_time_ratio": video_images_load_time_ratio,
234
+ "avg_mse": float(np.mean(mse_values)),
235
+ "avg_psnr": float(np.mean(psnr_values)),
236
+ "avg_ssim": float(np.mean(ssim_values)),
237
+ }
238
+
239
+
240
+ def benchmark_encoding_decoding(
241
+ dataset: LeRobotDataset,
242
+ video_path: Path,
243
+ imgs_dir: Path,
244
+ encoding_cfg: dict,
245
+ decoding_cfg: dict,
246
+ num_samples: int,
247
+ num_workers: int,
248
+ save_frames: bool,
249
+ overwrite: bool = False,
250
+ seed: int = 1337,
251
+ ) -> list[dict]:
252
+ fps = dataset.fps
253
+
254
+ if overwrite or not video_path.is_file():
255
+ tqdm.write(f"encoding {video_path}")
256
+ encode_video_frames(
257
+ imgs_dir=imgs_dir,
258
+ video_path=video_path,
259
+ fps=fps,
260
+ vcodec=encoding_cfg["vcodec"],
261
+ pix_fmt=encoding_cfg["pix_fmt"],
262
+ g=encoding_cfg.get("g"),
263
+ crf=encoding_cfg.get("crf"),
264
+ # fast_decode=encoding_cfg.get("fastdecode"),
265
+ overwrite=True,
266
+ )
267
+
268
+ ep_num_images = dataset.episode_data_index["to"][0].item()
269
+ width, height = tuple(dataset[0][dataset.meta.camera_keys[0]].shape[-2:])
270
+ num_pixels = width * height
271
+ video_size_bytes = video_path.stat().st_size
272
+ images_size_bytes = get_directory_size(imgs_dir)
273
+ video_images_size_ratio = video_size_bytes / images_size_bytes
274
+
275
+ random.seed(seed)
276
+ benchmark_table = []
277
+ for timestamps_mode in tqdm(
278
+ decoding_cfg["timestamps_modes"], desc="decodings (timestamps_modes)", leave=False
279
+ ):
280
+ for backend in tqdm(decoding_cfg["backends"], desc="decodings (backends)", leave=False):
281
+ benchmark_row = benchmark_decoding(
282
+ imgs_dir,
283
+ video_path,
284
+ timestamps_mode,
285
+ backend,
286
+ ep_num_images,
287
+ fps,
288
+ num_samples,
289
+ num_workers,
290
+ save_frames,
291
+ )
292
+ benchmark_row.update(
293
+ **{
294
+ "repo_id": dataset.repo_id,
295
+ "resolution": f"{width} x {height}",
296
+ "num_pixels": num_pixels,
297
+ "video_size_bytes": video_size_bytes,
298
+ "images_size_bytes": images_size_bytes,
299
+ "video_images_size_ratio": video_images_size_ratio,
300
+ "timestamps_mode": timestamps_mode,
301
+ "backend": backend,
302
+ },
303
+ **encoding_cfg,
304
+ )
305
+ benchmark_table.append(benchmark_row)
306
+
307
+ return benchmark_table
308
+
309
+
310
+ def main(
311
+ output_dir: Path,
312
+ repo_ids: list[str],
313
+ vcodec: list[str],
314
+ pix_fmt: list[str],
315
+ g: list[int],
316
+ crf: list[int],
317
+ # fastdecode: list[int],
318
+ timestamps_modes: list[str],
319
+ backends: list[str],
320
+ num_samples: int,
321
+ num_workers: int,
322
+ save_frames: bool,
323
+ ):
324
+ check_datasets_formats(repo_ids)
325
+ encoding_benchmarks = {
326
+ "g": g,
327
+ "crf": crf,
328
+ # "fastdecode": fastdecode,
329
+ }
330
+ decoding_benchmarks = {
331
+ "timestamps_modes": timestamps_modes,
332
+ "backends": backends,
333
+ }
334
+ headers = ["repo_id", "resolution", "num_pixels"]
335
+ headers += list(BASE_ENCODING.keys())
336
+ headers += [
337
+ "timestamps_mode",
338
+ "backend",
339
+ "video_size_bytes",
340
+ "images_size_bytes",
341
+ "video_images_size_ratio",
342
+ "avg_load_time_video_ms",
343
+ "avg_load_time_images_ms",
344
+ "video_images_load_time_ratio",
345
+ "avg_mse",
346
+ "avg_psnr",
347
+ "avg_ssim",
348
+ ]
349
+ file_paths = []
350
+ for video_codec in tqdm(vcodec, desc="encodings (vcodec)"):
351
+ for pixel_format in tqdm(pix_fmt, desc="encodings (pix_fmt)", leave=False):
352
+ benchmark_table = []
353
+ for repo_id in tqdm(repo_ids, desc="encodings (datasets)", leave=False):
354
+ dataset = LeRobotDataset(repo_id)
355
+ imgs_dir = output_dir / "images" / dataset.repo_id.replace("/", "_")
356
+ # We only use the first episode
357
+ save_first_episode(imgs_dir, dataset)
358
+ for key, values in tqdm(encoding_benchmarks.items(), desc="encodings (g, crf)", leave=False):
359
+ for value in tqdm(values, desc=f"encodings ({key})", leave=False):
360
+ encoding_cfg = BASE_ENCODING.copy()
361
+ encoding_cfg["vcodec"] = video_codec
362
+ encoding_cfg["pix_fmt"] = pixel_format
363
+ encoding_cfg[key] = value
364
+ args_path = Path("_".join(str(value) for value in encoding_cfg.values()))
365
+ video_path = output_dir / "videos" / args_path / f"{repo_id.replace('/', '_')}.mp4"
366
+ benchmark_table += benchmark_encoding_decoding(
367
+ dataset,
368
+ video_path,
369
+ imgs_dir,
370
+ encoding_cfg,
371
+ decoding_benchmarks,
372
+ num_samples,
373
+ num_workers,
374
+ save_frames,
375
+ )
376
+
377
+ # Save intermediate results
378
+ benchmark_df = pd.DataFrame(benchmark_table, columns=headers)
379
+ now = dt.datetime.now()
380
+ csv_path = (
381
+ output_dir
382
+ / f"{now:%Y-%m-%d}_{now:%H-%M-%S}_{video_codec}_{pixel_format}_{num_samples}-samples.csv"
383
+ )
384
+ benchmark_df.to_csv(csv_path, header=True, index=False)
385
+ file_paths.append(csv_path)
386
+ del benchmark_df
387
+
388
+ # Concatenate all results
389
+ df_list = [pd.read_csv(csv_path) for csv_path in file_paths]
390
+ concatenated_df = pd.concat(df_list, ignore_index=True)
391
+ concatenated_path = output_dir / f"{now:%Y-%m-%d}_{now:%H-%M-%S}_all_{num_samples}-samples.csv"
392
+ concatenated_df.to_csv(concatenated_path, header=True, index=False)
393
+
394
+
395
+ if __name__ == "__main__":
396
+ parser = argparse.ArgumentParser()
397
+ parser.add_argument(
398
+ "--output-dir",
399
+ type=Path,
400
+ default=Path("outputs/video_benchmark"),
401
+ help="Directory where the video benchmark outputs are written.",
402
+ )
403
+ parser.add_argument(
404
+ "--repo-ids",
405
+ type=str,
406
+ nargs="*",
407
+ default=[
408
+ "lerobot/pusht_image",
409
+ "aliberts/aloha_mobile_shrimp_image",
410
+ "aliberts/paris_street",
411
+ "aliberts/kitchen",
412
+ ],
413
+ help="Datasets repo-ids to test against. First episodes only are used. Must be images.",
414
+ )
415
+ parser.add_argument(
416
+ "--vcodec",
417
+ type=str,
418
+ nargs="*",
419
+ default=["libx264", "libx265", "libsvtav1"],
420
+ help="Video codecs to be tested",
421
+ )
422
+ parser.add_argument(
423
+ "--pix-fmt",
424
+ type=str,
425
+ nargs="*",
426
+ default=["yuv444p", "yuv420p"],
427
+ help="Pixel formats (chroma subsampling) to be tested",
428
+ )
429
+ parser.add_argument(
430
+ "--g",
431
+ type=parse_int_or_none,
432
+ nargs="*",
433
+ default=[1, 2, 3, 4, 5, 6, 10, 15, 20, 40, 100, None],
434
+ help="Group of pictures sizes to be tested.",
435
+ )
436
+ parser.add_argument(
437
+ "--crf",
438
+ type=parse_int_or_none,
439
+ nargs="*",
440
+ default=[0, 5, 10, 15, 20, 25, 30, 40, 50, None],
441
+ help="Constant rate factors to be tested.",
442
+ )
443
+ # parser.add_argument(
444
+ # "--fastdecode",
445
+ # type=int,
446
+ # nargs="*",
447
+ # default=[0, 1],
448
+ # help="Use the fastdecode tuning option. 0 disables it. "
449
+ # "For libx264 and libx265, only 1 is possible. "
450
+ # "For libsvtav1, 1, 2 or 3 are possible values with a higher number meaning a faster decoding optimization",
451
+ # )
452
+ parser.add_argument(
453
+ "--timestamps-modes",
454
+ type=str,
455
+ nargs="*",
456
+ default=[
457
+ "1_frame",
458
+ "2_frames",
459
+ "2_frames_4_space",
460
+ "6_frames",
461
+ ],
462
+ help="Timestamps scenarios to be tested.",
463
+ )
464
+ parser.add_argument(
465
+ "--backends",
466
+ type=str,
467
+ nargs="*",
468
+ default=["pyav", "video_reader"],
469
+ help="Torchvision decoding backend to be tested.",
470
+ )
471
+ parser.add_argument(
472
+ "--num-samples",
473
+ type=int,
474
+ default=50,
475
+ help="Number of samples for each encoding x decoding config.",
476
+ )
477
+ parser.add_argument(
478
+ "--num-workers",
479
+ type=int,
480
+ default=10,
481
+ help="Number of processes for parallelized sample processing.",
482
+ )
483
+ parser.add_argument(
484
+ "--save-frames",
485
+ type=int,
486
+ default=0,
487
+ help="Whether to save decoded frames or not. Enter a non-zero number for true.",
488
+ )
489
+ args = parser.parse_args()
490
+ main(**vars(args))
project/ManiSkill3/src/maniskill3_environment/lerobot/docker/lerobot-cpu/Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configure image
2
+ ARG PYTHON_VERSION=3.10
3
+ FROM python:${PYTHON_VERSION}-slim
4
+
5
+ # Configure environment variables
6
+ ARG PYTHON_VERSION
7
+ ENV DEBIAN_FRONTEND=noninteractive
8
+ ENV MUJOCO_GL="egl"
9
+ ENV PATH="/opt/venv/bin:$PATH"
10
+
11
+ # Install dependencies and set up Python in a single layer
12
+ RUN apt-get update && apt-get install -y --no-install-recommends \
13
+ build-essential cmake git \
14
+ libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
15
+ speech-dispatcher libgeos-dev \
16
+ && ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
17
+ && python -m venv /opt/venv \
18
+ && apt-get clean && rm -rf /var/lib/apt/lists/* \
19
+ && echo "source /opt/venv/bin/activate" >> /root/.bashrc
20
+
21
+ # Clone repository and install LeRobot in a single layer
22
+ COPY . /lerobot
23
+ WORKDIR /lerobot
24
+ RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
25
+ && /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
26
+ --extra-index-url https://download.pytorch.org/whl/cpu
27
+
28
+ # Execute in bash shell rather than python
29
+ CMD ["/bin/bash"]
project/ManiSkill3/src/maniskill3_environment/lerobot/docker/lerobot-gpu-dev/Dockerfile ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.2.2-devel-ubuntu22.04
2
+
3
+ # Configure image
4
+ ARG PYTHON_VERSION=3.10
5
+ ARG DEBIAN_FRONTEND=noninteractive
6
+
7
+ # Install apt dependencies
8
+ RUN apt-get update && apt-get install -y --no-install-recommends \
9
+ build-essential cmake \
10
+ git git-lfs openssh-client \
11
+ nano vim less util-linux tree \
12
+ htop atop nvtop \
13
+ sed gawk grep curl wget zip unzip \
14
+ tcpdump sysstat screen tmux \
15
+ libglib2.0-0 libgl1-mesa-glx libegl1-mesa \
16
+ speech-dispatcher portaudio19-dev libgeos-dev \
17
+ python${PYTHON_VERSION} python${PYTHON_VERSION}-venv \
18
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
19
+
20
+ # Install ffmpeg build dependencies. See:
21
+ # https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu
22
+ # TODO(aliberts): create image to build dependencies from source instead
23
+ RUN apt-get update && apt-get install -y --no-install-recommends \
24
+ autoconf automake yasm \
25
+ libass-dev \
26
+ libfreetype6-dev \
27
+ libgnutls28-dev \
28
+ libunistring-dev \
29
+ libmp3lame-dev \
30
+ libtool \
31
+ libvorbis-dev \
32
+ meson \
33
+ ninja-build \
34
+ pkg-config \
35
+ texinfo \
36
+ yasm \
37
+ zlib1g-dev \
38
+ nasm \
39
+ libx264-dev \
40
+ libx265-dev libnuma-dev \
41
+ libvpx-dev \
42
+ libfdk-aac-dev \
43
+ libopus-dev \
44
+ libsvtav1-dev libsvtav1enc-dev libsvtav1dec-dev \
45
+ libdav1d-dev
46
+
47
+ # Install gh cli tool
48
+ RUN (type -p wget >/dev/null || (apt update && apt-get install wget -y)) \
49
+ && mkdir -p -m 755 /etc/apt/keyrings \
50
+ && wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \
51
+ && chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
52
+ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
53
+ && apt update \
54
+ && apt install gh -y \
55
+ && apt clean && rm -rf /var/lib/apt/lists/*
56
+
57
+ # Setup `python`
58
+ RUN ln -s /usr/bin/python3 /usr/bin/python
59
+
60
+ # Install poetry
61
+ RUN curl -sSL https://install.python-poetry.org | python -
62
+ ENV PATH="/root/.local/bin:$PATH"
63
+ RUN echo 'if [ "$HOME" != "/root" ]; then ln -sf /root/.local/bin/poetry $HOME/.local/bin/poetry; fi' >> /root/.bashrc
64
+ RUN poetry config virtualenvs.create false
65
+ RUN poetry config virtualenvs.in-project true
66
+
67
+ # Set EGL as the rendering backend for MuJoCo
68
+ ENV MUJOCO_GL="egl"
project/ManiSkill3/src/maniskill3_environment/lerobot/docker/lerobot-gpu/Dockerfile ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.4.1-base-ubuntu22.04
2
+
3
+ # Configure environment variables
4
+ ARG PYTHON_VERSION=3.10
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+ ENV MUJOCO_GL="egl"
7
+ ENV PATH="/opt/venv/bin:$PATH"
8
+
9
+ # Install dependencies and set up Python in a single layer
10
+ RUN apt-get update && apt-get install -y --no-install-recommends \
11
+ build-essential cmake git \
12
+ libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
13
+ speech-dispatcher libgeos-dev \
14
+ python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
15
+ && ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
16
+ && python -m venv /opt/venv \
17
+ && apt-get clean && rm -rf /var/lib/apt/lists/* \
18
+ && echo "source /opt/venv/bin/activate" >> /root/.bashrc
19
+
20
+ # Clone repository and install LeRobot in a single layer
21
+ COPY . /lerobot
22
+ WORKDIR /lerobot
23
+ RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
24
+ && /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]"
project/ManiSkill3/src/maniskill3_environment/lerobot/examples/11_use_lekiwi.md ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Using the [LeKiwi](https://github.com/SIGRobotics-UIUC/LeKiwi) Robot with LeRobot
2
+
3
+ ## Table of Contents
4
+
5
+ - [A. Source the parts](#a-source-the-parts)
6
+ - [B. Install software Pi](#b-install-software-on-pi)
7
+ - [C. Setup LeRobot laptop/pc](#c-install-lerobot-on-laptop)
8
+ - [D. Assemble the arms](#d-assembly)
9
+ - [E. Calibrate](#e-calibration)
10
+ - [F. Teleoperate](#f-teleoperate)
11
+ - [G. Record a dataset](#g-record-a-dataset)
12
+ - [H. Visualize a dataset](#h-visualize-a-dataset)
13
+ - [I. Replay an episode](#i-replay-an-episode)
14
+ - [J. Train a policy](#j-train-a-policy)
15
+ - [K. Evaluate your policy](#k-evaluate-your-policy)
16
+
17
+ > [!TIP]
18
+ > If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb) in the channel [`#mobile-so-100-arm`](https://discord.com/channels/1216765309076115607/1318390825528332371).
19
+
20
+ ## A. Source the parts
21
+
22
+ Follow this [README](https://github.com/SIGRobotics-UIUC/LeKiwi). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts, and advice if it's your first time printing or if you don't own a 3D printer.
23
+
24
+ Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
25
+
26
+ ### Wired version
27
+ If you have the **wired** LeKiwi version you can skip the installation of the Raspberry Pi and setting up SSH. You can also run all commands directly on your PC for both the LeKiwi scripts and the leader arm scripts for teleoperating.
28
+
29
+ ## B. Install software on Pi
30
+ Now we have to setup the remote PC that will run on the LeKiwi Robot. This is normally a Raspberry Pi, but can be any PC that can run on 5V and has enough usb ports (2 or more) for the cameras and motor control board.
31
+
32
+ ### Install OS
33
+ For setting up the Raspberry Pi and its SD-card see: [Setup PI](https://www.raspberrypi.com/documentation/computers/getting-started.html). Here is explained how to download the [Imager](https://www.raspberrypi.com/software/) to install Raspberry Pi OS or Ubuntu.
34
+
35
+ ### Setup SSH
36
+ After setting up your Pi, you should enable and setup [SSH](https://www.raspberrypi.com/news/coding-on-raspberry-pi-remotely-with-visual-studio-code/) (Secure Shell Protocol) so you can login into the Pi from your laptop without requiring a screen, keyboard and mouse in the Pi. A great tutorial on how to do this can be found [here](https://www.raspberrypi.com/documentation/computers/remote-access.html#ssh). Logging into your Pi can be done in your Command Prompt (cmd) or if you use VSCode you can use [this](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-ssh) extension.
37
+
38
+ ### Install LeRobot
39
+
40
+ On your Raspberry Pi:
41
+
42
+ #### 1. [Install Miniconda](https://docs.anaconda.com/miniconda/install/#quick-command-line-install):
43
+
44
+ #### 2. Restart shell
45
+ Copy paste in your shell: `source ~/.bashrc` or for Mac: `source ~/.bash_profile` or `source ~/.zshrc` if you're using zshell
46
+
47
+ #### 3. Create and activate a fresh conda environment for lerobot
48
+
49
+ <details>
50
+ <summary><strong>Video install instructions</strong></summary>
51
+
52
+ <video src="https://github.com/user-attachments/assets/17172d3b-3b64-4b80-9cf1-b2b7c5cbd236"></video>
53
+
54
+ </details>
55
+
56
+ ```bash
57
+ conda create -y -n lerobot python=3.10
58
+ ```
59
+
60
+ Then activate your conda environment (do this each time you open a shell to use lerobot!):
61
+ ```bash
62
+ conda activate lerobot
63
+ ```
64
+
65
+ #### 4. Clone LeRobot:
66
+ ```bash
67
+ git clone https://github.com/huggingface/lerobot.git ~/lerobot
68
+ ```
69
+
70
+ #### 5. Install ffmpeg in your environment:
71
+ When using `miniconda`, install `ffmpeg` in your environment:
72
+ ```bash
73
+ conda install ffmpeg -c conda-forge
74
+ ```
75
+
76
+ #### 6. Install LeRobot with dependencies for the feetech motors:
77
+ ```bash
78
+ cd ~/lerobot && pip install -e ".[feetech]"
79
+ ```
80
+
81
+ ## C. Install LeRobot on laptop
82
+ If you already have install LeRobot on your laptop you can skip this step, otherwise please follow along as we do the same steps we did on the Pi.
83
+
84
+ > [!TIP]
85
+ > We use the Command Prompt (cmd) quite a lot. If you are not comfortable using the cmd or want to brush up using the command line you can have a look here: [Command line crash course](https://developer.mozilla.org/en-US/docs/Learn_web_development/Getting_started/Environment_setup/Command_line)
86
+
87
+ On your computer:
88
+
89
+ #### 1. [Install Miniconda](https://docs.anaconda.com/miniconda/install/#quick-command-line-install):
90
+
91
+ #### 2. Restart shell
92
+ Copy paste in your shell: `source ~/.bashrc` or for Mac: `source ~/.bash_profile` or `source ~/.zshrc` if you're using zshell
93
+
94
+ #### 3. Create and activate a fresh conda environment for lerobot
95
+
96
+ <details>
97
+ <summary><strong>Video install instructions</strong></summary>
98
+
99
+ <video src="https://github.com/user-attachments/assets/17172d3b-3b64-4b80-9cf1-b2b7c5cbd236"></video>
100
+
101
+ </details>
102
+
103
+ ```bash
104
+ conda create -y -n lerobot python=3.10
105
+ ```
106
+
107
+ Then activate your conda environment (do this each time you open a shell to use lerobot!):
108
+ ```bash
109
+ conda activate lerobot
110
+ ```
111
+
112
+ #### 4. Clone LeRobot:
113
+ ```bash
114
+ git clone https://github.com/huggingface/lerobot.git ~/lerobot
115
+ ```
116
+
117
+ #### 5. Install ffmpeg in your environment:
118
+ When using `miniconda`, install `ffmpeg` in your environment:
119
+ ```bash
120
+ conda install ffmpeg -c conda-forge
121
+ ```
122
+
123
+ #### 6. Install LeRobot with dependencies for the feetech motors:
124
+ ```bash
125
+ cd ~/lerobot && pip install -e ".[feetech]"
126
+ ```
127
+
128
+ Great :hugs:! You are now done installing LeRobot and we can begin assembling the SO100 arms and Mobile base :robot:.
129
+ Every time you now want to use LeRobot you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands.
130
+
131
+ # D. Assembly
132
+
133
+ First we will assemble the two SO100 arms. One to attach to the mobile base and one for teleoperation. Then we will assemble the mobile base.
134
+
135
+ ## SO100 Arms
136
+ ### Configure motors
137
+ The instructions for configuring the motors can be found [Here](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md#c-configure-the-motors) in step C of the SO100 tutorial. Besides the ID's for the arm motors we also need to set the motor ID's for the mobile base. These needs to be in a specific order to work. Below an image of the motor ID's and motor mounting positions for the mobile base. Note that we only use one Motor Control board on LeKiwi. This means the motor ID's for the wheels are 7, 8 and 9.
138
+
139
+ <img src="../media/lekiwi/motor_ids.webp?raw=true" alt="Motor ID's for mobile robot" title="Motor ID's for mobile robot" width="60%">
140
+
141
+ ### Assemble arms
142
+ [Assemble arms instruction](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md#d-assemble-the-arms)
143
+
144
+ ## Mobile base (LeKiwi)
145
+ [Assemble LeKiwi](https://github.com/SIGRobotics-UIUC/LeKiwi)
146
+
147
+ ### Update config
148
+ Both config files on the LeKiwi LeRobot and on the laptop should be the same. First we should find the Ip address of the Raspberry Pi of the mobile manipulator. This is the same Ip address used in SSH. We also need the usb port of the control board of the leader arm on the laptop and the port of the control board on LeKiwi. We can find these ports with the following script.
149
+
150
+ #### a. Run the script to find port
151
+
152
+ <details>
153
+ <summary><strong>Video finding port</strong></summary>
154
+ <video src="https://github.com/user-attachments/assets/4a21a14d-2046-4805-93c4-ee97a30ba33f"></video>
155
+ <video src="https://github.com/user-attachments/assets/1cc3aecf-c16d-4ff9-aec7-8c175afbbce2"></video>
156
+ </details>
157
+
158
+ To find the port for each bus servo adapter, run the utility script:
159
+ ```bash
160
+ python lerobot/scripts/find_motors_bus_port.py
161
+ ```
162
+
163
+ #### b. Example outputs
164
+
165
+ Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
166
+ ```
167
+ Finding all available ports for the MotorBus.
168
+ ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
169
+ Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
170
+
171
+ [...Disconnect leader arm and press Enter...]
172
+
173
+ The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
174
+ Reconnect the usb cable.
175
+ ```
176
+ Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
177
+ ```
178
+ Finding all available ports for the MotorBus.
179
+ ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
180
+ Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
181
+
182
+ [...Disconnect follower arm and press Enter...]
183
+
184
+ The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
185
+ Reconnect the usb cable.
186
+ ```
187
+
188
+ #### c. Troubleshooting
189
+ On Linux, you might need to give access to the USB ports by running:
190
+ ```bash
191
+ sudo chmod 666 /dev/ttyACM0
192
+ sudo chmod 666 /dev/ttyACM1
193
+ ```
194
+
195
+ #### d. Update config file
196
+
197
+ IMPORTANTLY: Now that you have your ports of leader and follower arm and ip address of the mobile-so100, update the **ip** in Network configuration, **port** in leader_arms and **port** in lekiwi. In the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py) file. Where you will find something like:
198
+ ```python
199
+ @RobotConfig.register_subclass("lekiwi")
200
+ @dataclass
201
+ class LeKiwiRobotConfig(RobotConfig):
202
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
203
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
204
+ # the number of motors in your follower arms.
205
+ max_relative_target: int | None = None
206
+
207
+ # Network Configuration
208
+ ip: str = "172.17.133.91"
209
+ port: int = 5555
210
+ video_port: int = 5556
211
+
212
+ cameras: dict[str, CameraConfig] = field(
213
+ default_factory=lambda: {
214
+ "mobile": OpenCVCameraConfig(camera_index="/dev/video0", fps=30, width=640, height=480),
215
+ "mobile2": OpenCVCameraConfig(camera_index="/dev/video2", fps=30, width=640, height=480),
216
+ }
217
+ )
218
+
219
+ calibration_dir: str = ".cache/calibration/lekiwi"
220
+
221
+ leader_arms: dict[str, MotorsBusConfig] = field(
222
+ default_factory=lambda: {
223
+ "main": FeetechMotorsBusConfig(
224
+ port="/dev/tty.usbmodem585A0077581",
225
+ motors={
226
+ # name: (index, model)
227
+ "shoulder_pan": [1, "sts3215"],
228
+ "shoulder_lift": [2, "sts3215"],
229
+ "elbow_flex": [3, "sts3215"],
230
+ "wrist_flex": [4, "sts3215"],
231
+ "wrist_roll": [5, "sts3215"],
232
+ "gripper": [6, "sts3215"],
233
+ },
234
+ ),
235
+ }
236
+ )
237
+
238
+ follower_arms: dict[str, MotorsBusConfig] = field(
239
+ default_factory=lambda: {
240
+ "main": FeetechMotorsBusConfig(
241
+ port="/dev/ttyACM0",
242
+ motors={
243
+ # name: (index, model)
244
+ "shoulder_pan": [1, "sts3215"],
245
+ "shoulder_lift": [2, "sts3215"],
246
+ "elbow_flex": [3, "sts3215"],
247
+ "wrist_flex": [4, "sts3215"],
248
+ "wrist_roll": [5, "sts3215"],
249
+ "gripper": [6, "sts3215"],
250
+ "left_wheel": (7, "sts3215"),
251
+ "back_wheel": (8, "sts3215"),
252
+ "right_wheel": (9, "sts3215"),
253
+ },
254
+ ),
255
+ }
256
+ )
257
+
258
+ teleop_keys: dict[str, str] = field(
259
+ default_factory=lambda: {
260
+ # Movement
261
+ "forward": "w",
262
+ "backward": "s",
263
+ "left": "a",
264
+ "right": "d",
265
+ "rotate_left": "z",
266
+ "rotate_right": "x",
267
+ # Speed control
268
+ "speed_up": "r",
269
+ "speed_down": "f",
270
+ # quit teleop
271
+ "quit": "q",
272
+ }
273
+ )
274
+
275
+ mock: bool = False
276
+ ```
277
+
278
+ ## Wired version
279
+
280
+ For the wired LeKiwi version your configured IP address should refer to your own laptop (127.0.0.1), because leader arm and LeKiwi are in this case connected to own laptop. Below and example configuration for this wired setup:
281
+ ```python
282
+ @RobotConfig.register_subclass("lekiwi")
283
+ @dataclass
284
+ class LeKiwiRobotConfig(RobotConfig):
285
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
286
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
287
+ # the number of motors in your follower arms.
288
+ max_relative_target: int | None = None
289
+
290
+ # Network Configuration
291
+ ip: str = "127.0.0.1"
292
+ port: int = 5555
293
+ video_port: int = 5556
294
+
295
+ cameras: dict[str, CameraConfig] = field(
296
+ default_factory=lambda: {
297
+ "front": OpenCVCameraConfig(
298
+ camera_index=0, fps=30, width=640, height=480, rotation=90
299
+ ),
300
+ "wrist": OpenCVCameraConfig(
301
+ camera_index=1, fps=30, width=640, height=480, rotation=180
302
+ ),
303
+ }
304
+ )
305
+
306
+ calibration_dir: str = ".cache/calibration/lekiwi"
307
+
308
+ leader_arms: dict[str, MotorsBusConfig] = field(
309
+ default_factory=lambda: {
310
+ "main": FeetechMotorsBusConfig(
311
+ port="/dev/tty.usbmodem585A0077581",
312
+ motors={
313
+ # name: (index, model)
314
+ "shoulder_pan": [1, "sts3215"],
315
+ "shoulder_lift": [2, "sts3215"],
316
+ "elbow_flex": [3, "sts3215"],
317
+ "wrist_flex": [4, "sts3215"],
318
+ "wrist_roll": [5, "sts3215"],
319
+ "gripper": [6, "sts3215"],
320
+ },
321
+ ),
322
+ }
323
+ )
324
+
325
+ follower_arms: dict[str, MotorsBusConfig] = field(
326
+ default_factory=lambda: {
327
+ "main": FeetechMotorsBusConfig(
328
+ port="/dev/tty.usbmodem58760431061",
329
+ motors={
330
+ # name: (index, model)
331
+ "shoulder_pan": [1, "sts3215"],
332
+ "shoulder_lift": [2, "sts3215"],
333
+ "elbow_flex": [3, "sts3215"],
334
+ "wrist_flex": [4, "sts3215"],
335
+ "wrist_roll": [5, "sts3215"],
336
+ "gripper": [6, "sts3215"],
337
+ "left_wheel": (7, "sts3215"),
338
+ "back_wheel": (8, "sts3215"),
339
+ "right_wheel": (9, "sts3215"),
340
+ },
341
+ ),
342
+ }
343
+ )
344
+
345
+ teleop_keys: dict[str, str] = field(
346
+ default_factory=lambda: {
347
+ # Movement
348
+ "forward": "w",
349
+ "backward": "s",
350
+ "left": "a",
351
+ "right": "d",
352
+ "rotate_left": "z",
353
+ "rotate_right": "x",
354
+ # Speed control
355
+ "speed_up": "r",
356
+ "speed_down": "f",
357
+ # quit teleop
358
+ "quit": "q",
359
+ }
360
+ )
361
+
362
+ mock: bool = False
363
+ ```
364
+
365
+ # E. Calibration
366
+ Now we have to calibrate the leader arm and the follower arm. The wheel motors don't have to be calibrated.
367
+
368
+
369
+ ### Calibrate follower arm (on mobile base)
370
+ > [!IMPORTANT]
371
+ > Contrarily to step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
372
+
373
+ You will need to move the follower arm to these positions sequentially:
374
+
375
+ | 1. Zero position | 2. Rotated position | 3. Rest position |
376
+ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
377
+ | <img src="../media/lekiwi/mobile_calib_zero.webp?raw=true" alt="SO-100 follower arm zero position" title="SO-100 follower arm zero position" style="width:100%;"> | <img src="../media/lekiwi/mobile_calib_rotated.webp?raw=true" alt="SO-100 follower arm rotated position" title="SO-100 follower arm rotated position" style="width:100%;"> | <img src="../media/lekiwi/mobile_calib_rest.webp?raw=true" alt="SO-100 follower arm rest position" title="SO-100 follower arm rest position" style="width:100%;"> |
378
+
379
+ Make sure the arm is connected to the Raspberry Pi and run this script (on the Raspberry Pi) to launch manual calibration:
380
+ ```bash
381
+ python lerobot/scripts/control_robot.py \
382
+ --robot.type=lekiwi \
383
+ --robot.cameras='{}' \
384
+ --control.type=calibrate \
385
+ --control.arms='["main_follower"]'
386
+ ```
387
+
388
+ ### Wired version
389
+ If you have the **wired** LeKiwi version please run all commands including this calibration command on your laptop.
390
+
391
+ ### Calibrate leader arm
392
+ Then to calibrate the leader arm (which is attached to the laptop/pc). You will need to move the leader arm to these positions sequentially:
393
+
394
+ | 1. Zero position | 2. Rotated position | 3. Rest position |
395
+ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
396
+ | <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
397
+
398
+ Run this script (on your laptop/pc) to launch manual calibration:
399
+ ```bash
400
+ python lerobot/scripts/control_robot.py \
401
+ --robot.type=lekiwi \
402
+ --robot.cameras='{}' \
403
+ --control.type=calibrate \
404
+ --control.arms='["main_leader"]'
405
+ ```
406
+
407
+ # F. Teleoperate
408
+
409
+ > [!TIP]
410
+ > If you're using a Mac, you might need to give Terminal permission to access your keyboard. Go to System Preferences > Security & Privacy > Input Monitoring and check the box for Terminal.
411
+
412
+ To teleoperate SSH into your Raspberry Pi, and run `conda activate lerobot` and this script:
413
+ ```bash
414
+ python lerobot/scripts/control_robot.py \
415
+ --robot.type=lekiwi \
416
+ --control.type=remote_robot
417
+ ```
418
+
419
+ Then on your laptop, also run `conda activate lerobot` and this script:
420
+ ```bash
421
+ python lerobot/scripts/control_robot.py \
422
+ --robot.type=lekiwi \
423
+ --control.type=teleoperate \
424
+ --control.fps=30
425
+ ```
426
+
427
+ > **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`. For the `--control.type=remote_robot` you will also need to set `--control.viewer_ip` and `--control.viewer_port`
428
+
429
+ You should see on your laptop something like this: ```[INFO] Connected to remote robot at tcp://172.17.133.91:5555 and video stream at tcp://172.17.133.91:5556.``` Now you can move the leader arm and use the keyboard (w,a,s,d) to drive forward, left, backwards, right. And use (z,x) to turn left or turn right. You can use (r,f) to increase and decrease the speed of the mobile robot. There are three speed modes, see the table below:
430
+ | Speed Mode | Linear Speed (m/s) | Rotation Speed (deg/s) |
431
+ | ---------- | ------------------ | ---------------------- |
432
+ | Fast | 0.4 | 90 |
433
+ | Medium | 0.25 | 60 |
434
+ | Slow | 0.1 | 30 |
435
+
436
+
437
+ | Key | Action |
438
+ | --- | -------------- |
439
+ | W | Move forward |
440
+ | A | Move left |
441
+ | S | Move backward |
442
+ | D | Move right |
443
+ | Z | Turn left |
444
+ | X | Turn right |
445
+ | R | Increase speed |
446
+ | F | Decrease speed |
447
+
448
+ > [!TIP]
449
+ > If you use a different keyboard you can change the keys for each command in the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py).
450
+
451
+ ### Wired version
452
+ If you have the **wired** LeKiwi version please run all commands including both these teleoperation commands on your laptop.
453
+
454
+ ## Troubleshoot communication
455
+
456
+ If you are having trouble connecting to the Mobile SO100, follow these steps to diagnose and resolve the issue.
457
+
458
+ ### 1. Verify IP Address Configuration
459
+ Make sure that the correct ip for the Pi is set in the configuration file. To check the Raspberry Pi's IP address, run (on the Pi command line):
460
+ ```bash
461
+ hostname -I
462
+ ```
463
+
464
+ ### 2. Check if Pi is reachable from laptop/pc
465
+ Try pinging the Raspberry Pi from your laptop:
466
+ ```bach
467
+ ping <your_pi_ip_address>
468
+ ```
469
+
470
+ If the ping fails:
471
+ - Ensure the Pi is powered on and connected to the same network.
472
+ - Check if SSH is enabled on the Pi.
473
+
474
+ ### 3. Try SSH connection
475
+ If you can't SSH into the Pi, it might not be properly connected. Use:
476
+ ```bash
477
+ ssh <your_pi_user_name>@<your_pi_ip_address>
478
+ ```
479
+ If you get a connection error:
480
+ - Ensure SSH is enabled on the Pi by running:
481
+ ```bash
482
+ sudo raspi-config
483
+ ```
484
+ Then navigate to: **Interfacing Options -> SSH** and enable it.
485
+
486
+ ### 4. Same config file
487
+ Make sure the configuration file on both your laptop/pc and the Raspberry Pi is the same.
488
+
489
+ # G. Record a dataset
490
+ Once you're familiar with teleoperation, you can record your first dataset with LeKiwi.
491
+
492
+ To start the program on LeKiwi, SSH into your Raspberry Pi, and run `conda activate lerobot` and this script:
493
+ ```bash
494
+ python lerobot/scripts/control_robot.py \
495
+ --robot.type=lekiwi \
496
+ --control.type=remote_robot
497
+ ```
498
+
499
+ If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
500
+ ```bash
501
+ huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
502
+ ```
503
+
504
+ Store your Hugging Face repository name in a variable to run these commands:
505
+ ```bash
506
+ HF_USER=$(huggingface-cli whoami | head -n 1)
507
+ echo $HF_USER
508
+ ```
509
+ On your laptop then run this command to record 2 episodes and upload your dataset to the hub:
510
+ ```bash
511
+ python lerobot/scripts/control_robot.py \
512
+ --robot.type=lekiwi \
513
+ --control.type=record \
514
+ --control.fps=30 \
515
+ --control.single_task="Grasp a lego block and put it in the bin." \
516
+ --control.repo_id=${HF_USER}/lekiwi_test \
517
+ --control.tags='["tutorial"]' \
518
+ --control.warmup_time_s=5 \
519
+ --control.episode_time_s=30 \
520
+ --control.reset_time_s=30 \
521
+ --control.num_episodes=2 \
522
+ --control.push_to_hub=true
523
+ ```
524
+
525
+ Note: You can resume recording by adding `--control.resume=true`.
526
+
527
+ ### Wired version
528
+ If you have the **wired** LeKiwi version please run all commands including both these record dataset commands on your laptop.
529
+
530
+ # H. Visualize a dataset
531
+
532
+ If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
533
+ ```bash
534
+ echo ${HF_USER}/lekiwi_test
535
+ ```
536
+
537
+ If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with (a window can be opened in the browser `http://127.0.0.1:9090` with the visualization tool):
538
+ ```bash
539
+ python lerobot/scripts/visualize_dataset_html.py \
540
+ --repo-id ${HF_USER}/lekiwi_test \
541
+ --local-files-only 1
542
+ ```
543
+
544
+ # I. Replay an episode
545
+ Now try to replay the first episode on your robot:
546
+ ```bash
547
+ python lerobot/scripts/control_robot.py \
548
+ --robot.type=lekiwi \
549
+ --control.type=replay \
550
+ --control.fps=30 \
551
+ --control.repo_id=${HF_USER}/lekiwi_test \
552
+ --control.episode=0
553
+ ```
554
+
555
+ ## J. Train a policy
556
+
557
+ To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
558
+ ```bash
559
+ python lerobot/scripts/train.py \
560
+ --dataset.repo_id=${HF_USER}/lekiwi_test \
561
+ --policy.type=act \
562
+ --output_dir=outputs/train/act_lekiwi_test \
563
+ --job_name=act_lekiwi_test \
564
+ --policy.device=cuda \
565
+ --wandb.enable=true
566
+ ```
567
+
568
+ Let's explain it:
569
+ 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/lekiwi_test`.
570
+ 2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
571
+ 4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
572
+ 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
573
+
574
+ Training should take several hours. You will find checkpoints in `outputs/train/act_lekiwi_test/checkpoints`.
575
+
576
+ ## K. Evaluate your policy
577
+
578
+ You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
579
+ ```bash
580
+ python lerobot/scripts/control_robot.py \
581
+ --robot.type=lekiwi \
582
+ --control.type=record \
583
+ --control.fps=30 \
584
+ --control.single_task="Drive to the red block and pick it up" \
585
+ --control.repo_id=${HF_USER}/eval_act_lekiwi_test \
586
+ --control.tags='["tutorial"]' \
587
+ --control.warmup_time_s=5 \
588
+ --control.episode_time_s=30 \
589
+ --control.reset_time_s=30 \
590
+ --control.num_episodes=10 \
591
+ --control.push_to_hub=true \
592
+ --control.policy.path=outputs/train/act_lekiwi_test/checkpoints/last/pretrained_model
593
+ ```
594
+
595
+ As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
596
+ 1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_lekiwi_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_lekiwi_test`).
597
+ 2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_lekiwi_test`).
project/ManiSkill3/src/maniskill3_environment/lerobot/examples/11_use_moss.md ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This tutorial explains how to use [Moss v1](https://github.com/jess-moss/moss-robot-arms) with LeRobot.
2
+
3
+ ## Source the parts
4
+
5
+ Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials with link to source the parts, as well as the instructions to 3D print the parts and advice if it's your first time printing or if you don't own a 3D printer already.
6
+
7
+ **Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
8
+
9
+ ## Install LeRobot
10
+
11
+ On your computer:
12
+
13
+ 1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
14
+ ```bash
15
+ mkdir -p ~/miniconda3
16
+ wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
17
+ bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
18
+ rm ~/miniconda3/miniconda.sh
19
+ ~/miniconda3/bin/conda init bash
20
+ ```
21
+
22
+ 2. Restart shell or `source ~/.bashrc`
23
+
24
+ 3. Create and activate a fresh conda environment for lerobot
25
+ ```bash
26
+ conda create -y -n lerobot python=3.10 && conda activate lerobot
27
+ ```
28
+
29
+ 4. Clone LeRobot:
30
+ ```bash
31
+ git clone https://github.com/huggingface/lerobot.git ~/lerobot
32
+ ```
33
+
34
+ 5. Install ffmpeg in your environment:
35
+ When using `miniconda`, install `ffmpeg` in your environment:
36
+ ```bash
37
+ conda install ffmpeg -c conda-forge
38
+ ```
39
+
40
+ 6. Install LeRobot with dependencies for the feetech motors:
41
+ ```bash
42
+ cd ~/lerobot && pip install -e ".[feetech]"
43
+ ```
44
+
45
+ ## Configure the motors
46
+
47
+ Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below.
48
+
49
+ **Find USB ports associated to your arms**
50
+ To find the correct ports for each arm, run the utility script twice:
51
+ ```bash
52
+ python lerobot/scripts/find_motors_bus_port.py
53
+ ```
54
+
55
+ Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
56
+ ```
57
+ Finding all available ports for the MotorBus.
58
+ ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
59
+ Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
60
+
61
+ [...Disconnect leader arm and press Enter...]
62
+
63
+ The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
64
+ Reconnect the usb cable.
65
+ ```
66
+
67
+ Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
68
+ ```
69
+ Finding all available ports for the MotorBus.
70
+ ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
71
+ Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
72
+
73
+ [...Disconnect follower arm and press Enter...]
74
+
75
+ The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
76
+ Reconnect the usb cable.
77
+ ```
78
+
79
+ Troubleshooting: On Linux, you might need to give access to the USB ports by running:
80
+ ```bash
81
+ sudo chmod 666 /dev/ttyACM0
82
+ sudo chmod 666 /dev/ttyACM1
83
+ ```
84
+
85
+ #### Update config file
86
+
87
+ IMPORTANTLY: Now that you have your ports, update the **port** default values of [`MossRobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like:
88
+ ```python
89
+ @RobotConfig.register_subclass("moss")
90
+ @dataclass
91
+ class MossRobotConfig(ManipulatorRobotConfig):
92
+ calibration_dir: str = ".cache/calibration/moss"
93
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
94
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
95
+ # the number of motors in your follower arms.
96
+ max_relative_target: int | None = None
97
+
98
+ leader_arms: dict[str, MotorsBusConfig] = field(
99
+ default_factory=lambda: {
100
+ "main": FeetechMotorsBusConfig(
101
+ port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE
102
+ motors={
103
+ # name: (index, model)
104
+ "shoulder_pan": [1, "sts3215"],
105
+ "shoulder_lift": [2, "sts3215"],
106
+ "elbow_flex": [3, "sts3215"],
107
+ "wrist_flex": [4, "sts3215"],
108
+ "wrist_roll": [5, "sts3215"],
109
+ "gripper": [6, "sts3215"],
110
+ },
111
+ ),
112
+ }
113
+ )
114
+
115
+ follower_arms: dict[str, MotorsBusConfig] = field(
116
+ default_factory=lambda: {
117
+ "main": FeetechMotorsBusConfig(
118
+ port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE
119
+ motors={
120
+ # name: (index, model)
121
+ "shoulder_pan": [1, "sts3215"],
122
+ "shoulder_lift": [2, "sts3215"],
123
+ "elbow_flex": [3, "sts3215"],
124
+ "wrist_flex": [4, "sts3215"],
125
+ "wrist_roll": [5, "sts3215"],
126
+ "gripper": [6, "sts3215"],
127
+ },
128
+ ),
129
+ }
130
+ )
131
+ ```
132
+
133
+ **Configure your motors**
134
+ Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
135
+ ```bash
136
+ python lerobot/scripts/configure_motor.py \
137
+ --port /dev/tty.usbmodem58760432961 \
138
+ --brand feetech \
139
+ --model sts3215 \
140
+ --baudrate 1000000 \
141
+ --ID 1
142
+ ```
143
+
144
+ Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
145
+
146
+ Then unplug your motor and plug the second motor and set its ID to 2.
147
+ ```bash
148
+ python lerobot/scripts/configure_motor.py \
149
+ --port /dev/tty.usbmodem58760432961 \
150
+ --brand feetech \
151
+ --model sts3215 \
152
+ --baudrate 1000000 \
153
+ --ID 2
154
+ ```
155
+
156
+ Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
157
+
158
+ **Remove the gears of the 6 leader motors**
159
+ Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
160
+
161
+ **Add motor horn to the motors**
162
+ Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). For Moss v1, you need to align the holes on the motor horn to the motor spline to be approximately 3, 6, 9 and 12 o'clock.
163
+ Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
164
+
165
+ ## Assemble the arms
166
+
167
+ Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
168
+
169
+ ## Calibrate
170
+
171
+ Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Moss v1 robot to work on another.
172
+
173
+ **Manual calibration of follower arm**
174
+ /!\ Contrarily to step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
175
+
176
+ You will need to move the follower arm to these positions sequentially:
177
+
178
+ | 1. Zero position | 2. Rotated position | 3. Rest position |
179
+ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
180
+ | <img src="../media/moss/follower_zero.webp?raw=true" alt="Moss v1 follower arm zero position" title="Moss v1 follower arm zero position" style="width:100%;"> | <img src="../media/moss/follower_rotated.webp?raw=true" alt="Moss v1 follower arm rotated position" title="Moss v1 follower arm rotated position" style="width:100%;"> | <img src="../media/moss/follower_rest.webp?raw=true" alt="Moss v1 follower arm rest position" title="Moss v1 follower arm rest position" style="width:100%;"> |
181
+
182
+ Make sure both arms are connected and run this script to launch manual calibration:
183
+ ```bash
184
+ python lerobot/scripts/control_robot.py \
185
+ --robot.type=moss \
186
+ --robot.cameras='{}' \
187
+ --control.type=calibrate \
188
+ --control.arms='["main_follower"]'
189
+ ```
190
+
191
+ **Manual calibration of leader arm**
192
+ Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
193
+
194
+ | 1. Zero position | 2. Rotated position | 3. Rest position |
195
+ | ------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
196
+ | <img src="../media/moss/leader_zero.webp?raw=true" alt="Moss v1 leader arm zero position" title="Moss v1 leader arm zero position" style="width:100%;"> | <img src="../media/moss/leader_rotated.webp?raw=true" alt="Moss v1 leader arm rotated position" title="Moss v1 leader arm rotated position" style="width:100%;"> | <img src="../media/moss/leader_rest.webp?raw=true" alt="Moss v1 leader arm rest position" title="Moss v1 leader arm rest position" style="width:100%;"> |
197
+
198
+ Run this script to launch manual calibration:
199
+ ```bash
200
+ python lerobot/scripts/control_robot.py \
201
+ --robot.type=moss \
202
+ --robot.cameras='{}' \
203
+ --control.type=calibrate \
204
+ --control.arms='["main_leader"]'
205
+ ```
206
+
207
+ ## Teleoperate
208
+
209
+ **Simple teleop**
210
+ Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
211
+ ```bash
212
+ python lerobot/scripts/control_robot.py \
213
+ --robot.type=moss \
214
+ --robot.cameras='{}' \
215
+ --control.type=teleoperate
216
+ ```
217
+
218
+
219
+ **Teleop with displaying cameras**
220
+ Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
221
+
222
+ > **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
223
+
224
+ ```bash
225
+ python lerobot/scripts/control_robot.py \
226
+ --robot.type=moss \
227
+ --control.type=teleoperate
228
+ ```
229
+
230
+ ## Record a dataset
231
+
232
+ Once you're familiar with teleoperation, you can record your first dataset with Moss v1.
233
+
234
+ If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
235
+ ```bash
236
+ huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
237
+ ```
238
+
239
+ Store your Hugging Face repository name in a variable to run these commands:
240
+ ```bash
241
+ HF_USER=$(huggingface-cli whoami | head -n 1)
242
+ echo $HF_USER
243
+ ```
244
+
245
+ Record 2 episodes and upload your dataset to the hub:
246
+ ```bash
247
+ python lerobot/scripts/control_robot.py \
248
+ --robot.type=moss \
249
+ --control.type=record \
250
+ --control.fps=30 \
251
+ --control.single_task="Grasp a lego block and put it in the bin." \
252
+ --control.repo_id=${HF_USER}/moss_test \
253
+ --control.tags='["moss","tutorial"]' \
254
+ --control.warmup_time_s=5 \
255
+ --control.episode_time_s=30 \
256
+ --control.reset_time_s=30 \
257
+ --control.num_episodes=2 \
258
+ --control.push_to_hub=true
259
+ ```
260
+
261
+ Note: You can resume recording by adding `--control.resume=true`.
262
+
263
+ ## Visualize a dataset
264
+
265
+ If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
266
+ ```bash
267
+ echo ${HF_USER}/moss_test
268
+ ```
269
+
270
+ If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with:
271
+ ```bash
272
+ python lerobot/scripts/visualize_dataset_html.py \
273
+ --repo-id ${HF_USER}/moss_test \
274
+ --local-files-only 1
275
+ ```
276
+
277
+ ## Replay an episode
278
+
279
+ Now try to replay the first episode on your robot:
280
+ ```bash
281
+ python lerobot/scripts/control_robot.py \
282
+ --robot.type=moss \
283
+ --control.type=replay \
284
+ --control.fps=30 \
285
+ --control.repo_id=${HF_USER}/moss_test \
286
+ --control.episode=0
287
+ ```
288
+
289
+ ## Train a policy
290
+
291
+ To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
292
+ ```bash
293
+ python lerobot/scripts/train.py \
294
+ --dataset.repo_id=${HF_USER}/moss_test \
295
+ --policy.type=act \
296
+ --output_dir=outputs/train/act_moss_test \
297
+ --job_name=act_moss_test \
298
+ --policy.device=cuda \
299
+ --wandb.enable=true
300
+ ```
301
+
302
+ Let's explain it:
303
+ 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/moss_test`.
304
+ 2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
305
+ 4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
306
+ 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
307
+
308
+ Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`.
309
+
310
+ ## Evaluate your policy
311
+
312
+ You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
313
+ ```bash
314
+ python lerobot/scripts/control_robot.py \
315
+ --robot.type=moss \
316
+ --control.type=record \
317
+ --control.fps=30 \
318
+ --control.single_task="Grasp a lego block and put it in the bin." \
319
+ --control.repo_id=${HF_USER}/eval_act_moss_test \
320
+ --control.tags='["tutorial"]' \
321
+ --control.warmup_time_s=5 \
322
+ --control.episode_time_s=30 \
323
+ --control.reset_time_s=30 \
324
+ --control.num_episodes=10 \
325
+ --control.push_to_hub=true \
326
+ --control.policy.path=outputs/train/act_moss_test/checkpoints/last/pretrained_model
327
+ ```
328
+
329
+ As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
330
+ 1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_moss_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_moss_test`).
331
+ 2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_moss_test`).
332
+
333
+ ## More
334
+
335
+ Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
336
+
337
+ If you have any question or need help, please reach out on Discord in the channel [`#moss-arm`](https://discord.com/channels/1216765309076115607/1275374638985252925).
project/ManiSkill3/src/maniskill3_environment/lerobot/examples/1_load_lerobot_dataset.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This script demonstrates the use of `LeRobotDataset` class for handling and processing robotic datasets from Hugging Face.
17
+ It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch.
18
+
19
+ Features included in this script:
20
+ - Viewing a dataset's metadata and exploring its properties.
21
+ - Loading an existing dataset from the hub or a subset of it.
22
+ - Accessing frames by episode number.
23
+ - Using advanced dataset features like timestamp-based frame selection.
24
+ - Demonstrating compatibility with PyTorch DataLoader for batch processing.
25
+
26
+ The script ends with examples of how to batch process data using PyTorch's DataLoader.
27
+ """
28
+
29
+ from pprint import pprint
30
+
31
+ import torch
32
+ from huggingface_hub import HfApi
33
+
34
+ import lerobot
35
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
36
+
37
+ # We ported a number of existing datasets ourselves, use this to see the list:
38
+ print("List of available datasets:")
39
+ pprint(lerobot.available_datasets)
40
+
41
+ # You can also browse through the datasets created/ported by the community on the hub using the hub api:
42
+ hub_api = HfApi()
43
+ repo_ids = [info.id for info in hub_api.list_datasets(task_categories="robotics", tags=["LeRobot"])]
44
+ pprint(repo_ids)
45
+
46
+ # Or simply explore them in your web browser directly at:
47
+ # https://huggingface.co/datasets?other=LeRobot
48
+
49
+ # Let's take this one for this example
50
+ repo_id = "lerobot/aloha_mobile_cabinet"
51
+ # We can have a look and fetch its metadata to know more about it:
52
+ ds_meta = LeRobotDatasetMetadata(repo_id)
53
+
54
+ # By instantiating just this class, you can quickly access useful information about the content and the
55
+ # structure of the dataset without downloading the actual data yet (only metadata files — which are
56
+ # lightweight).
57
+ print(f"Total number of episodes: {ds_meta.total_episodes}")
58
+ print(f"Average number of frames per episode: {ds_meta.total_frames / ds_meta.total_episodes:.3f}")
59
+ print(f"Frames per second used during data collection: {ds_meta.fps}")
60
+ print(f"Robot type: {ds_meta.robot_type}")
61
+ print(f"keys to access images from cameras: {ds_meta.camera_keys=}\n")
62
+
63
+ print("Tasks:")
64
+ print(ds_meta.tasks)
65
+ print("Features:")
66
+ pprint(ds_meta.features)
67
+
68
+ # You can also get a short summary by simply printing the object:
69
+ print(ds_meta)
70
+
71
+ # You can then load the actual dataset from the hub.
72
+ # Either load any subset of episodes:
73
+ dataset = LeRobotDataset(repo_id, episodes=[0, 10, 11, 23])
74
+
75
+ # And see how many frames you have:
76
+ print(f"Selected episodes: {dataset.episodes}")
77
+ print(f"Number of episodes selected: {dataset.num_episodes}")
78
+ print(f"Number of frames selected: {dataset.num_frames}")
79
+
80
+ # Or simply load the entire dataset:
81
+ dataset = LeRobotDataset(repo_id)
82
+ print(f"Number of episodes selected: {dataset.num_episodes}")
83
+ print(f"Number of frames selected: {dataset.num_frames}")
84
+
85
+ # The previous metadata class is contained in the 'meta' attribute of the dataset:
86
+ print(dataset.meta)
87
+
88
+ # LeRobotDataset actually wraps an underlying Hugging Face dataset
89
+ # (see https://huggingface.co/docs/datasets for more information).
90
+ print(dataset.hf_dataset)
91
+
92
+ # LeRobot datasets also subclasses PyTorch datasets so you can do everything you know and love from working
93
+ # with the latter, like iterating through the dataset.
94
+ # The __getitem__ iterates over the frames of the dataset. Since our datasets are also structured by
95
+ # episodes, you can access the frame indices of any episode using the episode_data_index. Here, we access
96
+ # frame indices associated to the first episode:
97
+ episode_index = 0
98
+ from_idx = dataset.episode_data_index["from"][episode_index].item()
99
+ to_idx = dataset.episode_data_index["to"][episode_index].item()
100
+
101
+ # Then we grab all the image frames from the first camera:
102
+ camera_key = dataset.meta.camera_keys[0]
103
+ frames = [dataset[idx][camera_key] for idx in range(from_idx, to_idx)]
104
+
105
+ # The objects returned by the dataset are all torch.Tensors
106
+ print(type(frames[0]))
107
+ print(frames[0].shape)
108
+
109
+ # Since we're using pytorch, the shape is in pytorch, channel-first convention (c, h, w).
110
+ # We can compare this shape with the information available for that feature
111
+ pprint(dataset.features[camera_key])
112
+ # In particular:
113
+ print(dataset.features[camera_key]["shape"])
114
+ # The shape is in (h, w, c) which is a more universal format.
115
+
116
+ # For many machine learning applications we need to load the history of past observations or trajectories of
117
+ # future actions. Our datasets can load previous and future frames for each key/modality, using timestamps
118
+ # differences with the current loaded frame. For instance:
119
+ delta_timestamps = {
120
+ # loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame
121
+ camera_key: [-1, -0.5, -0.20, 0],
122
+ # loads 6 state vectors: 1.5 seconds before, 1 second before, ... 200 ms, 100 ms, and current frame
123
+ "observation.state": [-1.5, -1, -0.5, -0.20, -0.10, 0],
124
+ # loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future
125
+ "action": [t / dataset.fps for t in range(64)],
126
+ }
127
+ # Note that in any case, these delta_timestamps values need to be multiples of (1/fps) so that added to any
128
+ # timestamp, you still get a valid timestamp.
129
+
130
+ dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps)
131
+ print(f"\n{dataset[0][camera_key].shape=}") # (4, c, h, w)
132
+ print(f"{dataset[0]['observation.state'].shape=}") # (6, c)
133
+ print(f"{dataset[0]['action'].shape=}\n") # (64, c)
134
+
135
+ # Finally, our datasets are fully compatible with PyTorch dataloaders and samplers because they are just
136
+ # PyTorch datasets.
137
+ dataloader = torch.utils.data.DataLoader(
138
+ dataset,
139
+ num_workers=0,
140
+ batch_size=32,
141
+ shuffle=True,
142
+ )
143
+
144
+ for batch in dataloader:
145
+ print(f"{batch[camera_key].shape=}") # (32, 4, c, h, w)
146
+ print(f"{batch['observation.state'].shape=}") # (32, 6, c)
147
+ print(f"{batch['action'].shape=}") # (32, 64, c)
148
+ break
project/ManiSkill3/src/maniskill3_environment/lerobot/examples/7_get_started_with_real_robot.md ADDED
@@ -0,0 +1,1003 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Getting Started with Real-World Robots
2
+
3
+ This tutorial will guide you through the process of setting up and training a neural network to autonomously control a real robot.
4
+
5
+ **What You'll Learn:**
6
+ 1. How to order and assemble your robot.
7
+ 2. How to connect, configure, and calibrate your robot.
8
+ 3. How to record and visualize your dataset.
9
+ 4. How to train a policy using your data and prepare it for evaluation.
10
+ 5. How to evaluate your policy and visualize the results.
11
+
12
+ By following these steps, you'll be able to replicate tasks like picking up a Lego block and placing it in a bin with a high success rate, as demonstrated in [this video](https://x.com/RemiCadene/status/1814680760592572934).
13
+
14
+ This tutorial is specifically made for the affordable [Koch v1.1](https://github.com/jess-moss/koch-v1-1) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](https://aloha-2.github.io) by changing some configurations. The Koch v1.1 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot.
15
+
16
+ During the data collection phase, you will control the follower arm by moving the leader arm. This process is known as "teleoperation." This technique is used to collect robot trajectories. Afterward, you'll train a neural network to imitate these trajectories and deploy the network to enable your robot to operate autonomously.
17
+
18
+ If you encounter any issues at any step of the tutorial, feel free to seek help on [Discord](https://discord.com/invite/s3KuuzsPFb) or don't hesitate to iterate with us on the tutorial by creating issues or pull requests. Thanks!
19
+
20
+ ## 1. Order and Assemble your Koch v1.1
21
+
22
+ Follow the sourcing and assembling instructions provided on the [Koch v1.1 Github page](https://github.com/jess-moss/koch-v1-1). This will guide you through setting up both the follower and leader arms, as shown in the image below.
23
+
24
+ <div style="text-align:center;">
25
+ <img src="../media/tutorial/koch_v1_1_leader_follower.webp?raw=true" alt="Koch v1.1 leader and follower arms" title="Koch v1.1 leader and follower arms" width="50%">
26
+ </div>
27
+
28
+ For a visual walkthrough of the assembly process, you can refer to [this video tutorial](https://youtu.be/8nQIg9BwwTk).
29
+
30
+ ## 2. Configure motors, calibrate arms, teleoperate your Koch v1.1
31
+
32
+ First, install the additional dependencies required for robots built with dynamixel motors like Koch v1.1 by running one of the following commands (make sure gcc is installed).
33
+
34
+ Using `pip`:
35
+ ```bash
36
+ pip install -e ".[dynamixel]"
37
+ ```
38
+
39
+ Using `poetry`:
40
+ ```bash
41
+ poetry sync --extras "dynamixel"
42
+ ```
43
+
44
+ Using `uv`:
45
+ ```bash
46
+ uv sync --extra "dynamixel"
47
+ ```
48
+
49
+ You are now ready to plug the 5V power supply to the motor bus of the leader arm (the smaller one) since all its motors only require 5V.
50
+
51
+ Then plug the 12V power supply to the motor bus of the follower arm. It has two motors that need 12V, and the rest will be powered with 5V through the voltage convertor.
52
+
53
+ Finally, connect both arms to your computer via USB. Note that the USB doesn't provide any power, and both arms need to be plugged in with their associated power supply to be detected by your computer.
54
+
55
+ Now you are ready to configure your motors for the first time, as detailed in the sections below. In the upcoming sections, you'll learn about our classes and functions by running some python code in an interactive session, or by copy-pasting it in a python file.
56
+
57
+ If you have already configured your motors the first time, you can streamline the process by directly running the teleoperate script (which is detailed further in the tutorial):
58
+
59
+ > **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
60
+
61
+ ```bash
62
+ python lerobot/scripts/control_robot.py \
63
+ --robot.type=koch \
64
+ --control.type=teleoperate
65
+ ```
66
+
67
+ It will automatically:
68
+ 1. Identify any missing calibrations and initiate the calibration procedure.
69
+ 2. Connect the robot and start teleoperation.
70
+
71
+ ### a. Control your motors with DynamixelMotorsBus
72
+
73
+ You can use the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py) to communicate with the motors connected as a chain to the corresponding USB bus. This class leverages the Python [Dynamixel SDK](https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20) to facilitate reading from and writing to the motors.
74
+
75
+ **First Configuration of your motors**
76
+
77
+ You will need to unplug each motor in turn and run a command the identify the motor. The motor will save its own identification, so you only need to do this once. Start by unplugging all of the motors.
78
+
79
+ Do the Leader arm first, as all of its motors are of the same type. Plug in your first motor on your leader arm and run this script to set its ID to 1.
80
+ ```bash
81
+ python lerobot/scripts/configure_motor.py \
82
+ --port /dev/tty.usbmodem58760432961 \
83
+ --brand dynamixel \
84
+ --model xl330-m288 \
85
+ --baudrate 1000000 \
86
+ --ID 1
87
+ ```
88
+
89
+ Then unplug your first motor and plug the second motor and set its ID to 2.
90
+ ```bash
91
+ python lerobot/scripts/configure_motor.py \
92
+ --port /dev/tty.usbmodem58760432961 \
93
+ --brand dynamixel \
94
+ --model xl330-m288 \
95
+ --baudrate 1000000 \
96
+ --ID 2
97
+ ```
98
+
99
+ Redo the process for all your motors until ID 6.
100
+
101
+ The process for the follower arm is almost the same, but the follower arm has two types of motors. For the first two motors, make sure you set the model to `xl430-w250`. _Important: configuring follower motors requires plugging and unplugging power. Make sure you use the 5V power for the XL330s and the 12V power for the XL430s!_
102
+
103
+ After all of your motors are configured properly, you're ready to plug them all together in a daisy-chain as shown in the original video.
104
+
105
+ **Instantiate the DynamixelMotorsBus**
106
+
107
+ To begin, create two instances of the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py), one for each arm, using their corresponding USB ports (e.g. `DynamixelMotorsBus(port="/dev/tty.usbmodem575E0031751"`).
108
+
109
+ To find the correct ports for each arm, run the utility script twice:
110
+ ```bash
111
+ python lerobot/scripts/find_motors_bus_port.py
112
+ ```
113
+
114
+ Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
115
+ ```
116
+ Finding all available ports for the MotorBus.
117
+ ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
118
+ Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
119
+
120
+ [...Disconnect leader arm and press Enter...]
121
+
122
+ The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
123
+ Reconnect the usb cable.
124
+ ```
125
+
126
+ Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
127
+ ```
128
+ Finding all available ports for the MotorBus.
129
+ ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
130
+ Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
131
+
132
+ [...Disconnect follower arm and press Enter...]
133
+
134
+ The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
135
+ Reconnect the usb cable.
136
+ ```
137
+
138
+ Troubleshooting: On Linux, you might need to give access to the USB ports by running this command with your ports:
139
+ ```bash
140
+ sudo chmod 666 /dev/tty.usbmodem575E0032081
141
+ sudo chmod 666 /dev/tty.usbmodem575E0031751
142
+ ```
143
+
144
+ *Listing and Configuring Motors*
145
+
146
+ Next, you'll need to list the motors for each arm, including their name, index, and model. Initially, each motor is assigned the factory default index `1`. Since each motor requires a unique index to function correctly when connected in a chain on a common bus, you'll need to assign different indices. It's recommended to use an ascending index order, starting from `1` (e.g., `1, 2, 3, 4, 5, 6`). These indices will be saved in the persistent memory of each motor during the first connection.
147
+
148
+ To assign indices to the motors, run this code in an interactive Python session. Replace the `port` values with the ones you identified earlier:
149
+ ```python
150
+ from lerobot.common.robot_devices.motors.configs import DynamixelMotorsBusConfig
151
+ from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus
152
+
153
+ leader_config = DynamixelMotorsBusConfig(
154
+ port="/dev/tty.usbmodem575E0031751",
155
+ motors={
156
+ # name: (index, model)
157
+ "shoulder_pan": (1, "xl330-m077"),
158
+ "shoulder_lift": (2, "xl330-m077"),
159
+ "elbow_flex": (3, "xl330-m077"),
160
+ "wrist_flex": (4, "xl330-m077"),
161
+ "wrist_roll": (5, "xl330-m077"),
162
+ "gripper": (6, "xl330-m077"),
163
+ },
164
+ )
165
+
166
+ follower_config = DynamixelMotorsBusConfig(
167
+ port="/dev/tty.usbmodem575E0032081",
168
+ motors={
169
+ # name: (index, model)
170
+ "shoulder_pan": (1, "xl430-w250"),
171
+ "shoulder_lift": (2, "xl430-w250"),
172
+ "elbow_flex": (3, "xl330-m288"),
173
+ "wrist_flex": (4, "xl330-m288"),
174
+ "wrist_roll": (5, "xl330-m288"),
175
+ "gripper": (6, "xl330-m288"),
176
+ },
177
+ )
178
+
179
+ leader_arm = DynamixelMotorsBus(leader_config)
180
+ follower_arm = DynamixelMotorsBus(follower_config)
181
+ ```
182
+
183
+ IMPORTANTLY: Now that you have your ports, update [`KochRobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like:
184
+ ```python
185
+ @RobotConfig.register_subclass("koch")
186
+ @dataclass
187
+ class KochRobotConfig(ManipulatorRobotConfig):
188
+ calibration_dir: str = ".cache/calibration/koch"
189
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
190
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
191
+ # the number of motors in your follower arms.
192
+ max_relative_target: int | None = None
193
+
194
+ leader_arms: dict[str, MotorsBusConfig] = field(
195
+ default_factory=lambda: {
196
+ "main": DynamixelMotorsBusConfig(
197
+ port="/dev/tty.usbmodem585A0085511", <-- UPDATE HERE
198
+ motors={
199
+ # name: (index, model)
200
+ "shoulder_pan": [1, "xl330-m077"],
201
+ "shoulder_lift": [2, "xl330-m077"],
202
+ "elbow_flex": [3, "xl330-m077"],
203
+ "wrist_flex": [4, "xl330-m077"],
204
+ "wrist_roll": [5, "xl330-m077"],
205
+ "gripper": [6, "xl330-m077"],
206
+ },
207
+ ),
208
+ }
209
+ )
210
+
211
+ follower_arms: dict[str, MotorsBusConfig] = field(
212
+ default_factory=lambda: {
213
+ "main": DynamixelMotorsBusConfig(
214
+ port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE
215
+ motors={
216
+ # name: (index, model)
217
+ "shoulder_pan": [1, "xl430-w250"],
218
+ "shoulder_lift": [2, "xl430-w250"],
219
+ "elbow_flex": [3, "xl330-m288"],
220
+ "wrist_flex": [4, "xl330-m288"],
221
+ "wrist_roll": [5, "xl330-m288"],
222
+ "gripper": [6, "xl330-m288"],
223
+ },
224
+ ),
225
+ }
226
+ )
227
+ ```
228
+
229
+ **Connect and Configure your Motors**
230
+
231
+ Before you can start using your motors, you'll need to configure them to ensure proper communication. When you first connect the motors, the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py) automatically detects any mismatch between the current motor indices (factory set to `1`) and the specified indices (e.g., `1, 2, 3, 4, 5, 6`). This triggers a configuration procedure that requires you to unplug the power cord and motors, then reconnect each motor sequentially, starting from the one closest to the bus.
232
+
233
+ For a visual guide, refer to the [video tutorial of the configuration procedure](https://youtu.be/U78QQ9wCdpY).
234
+
235
+ To connect and configure the leader arm, run the following code in the same Python interactive session as earlier in the tutorial:
236
+ ```python
237
+ leader_arm.connect()
238
+ ```
239
+
240
+ When you connect the leader arm for the first time, you might see an output similar to this:
241
+ ```
242
+ Read failed due to communication error on port /dev/tty.usbmodem575E0032081 for group_key ID_shoulder_pan_shoulder_lift_elbow_flex_wrist_flex_wrist_roll_gripper: [TxRxResult] There is no status packet!
243
+
244
+ /!\ A configuration issue has been detected with your motors:
245
+ If this is the first time you are using these motors, press enter to configure your motors... but before verify that all the cables are connected the proper way. If you find an issue, before making a modification, kill the python process, unplug the power cord to not damage the motors, rewire correctly, then plug the power again and relaunch the script.
246
+
247
+ Motor indices detected: {9600: [1]}
248
+
249
+ 1. Unplug the power cord
250
+ 2. Plug/unplug minimal number of cables to only have the first 1 motor(s) (['shoulder_pan']) connected.
251
+ 3. Re-plug the power cord
252
+ Press Enter to continue...
253
+
254
+ *Follow the procedure*
255
+
256
+ Setting expected motor indices: [1, 2, 3, 4, 5, 6]
257
+ ```
258
+
259
+ Once the leader arm is configured, repeat the process for the follower arm by running:
260
+ ```python
261
+ follower_arm.connect()
262
+ ```
263
+
264
+ Congratulations! Both arms are now properly configured and connected. You won't need to go through the configuration procedure again in the future.
265
+
266
+ **Troubleshooting**:
267
+
268
+ If the configuration process fails, you may need to do the configuration process via the Dynamixel Wizard.
269
+
270
+ Known failure modes:
271
+ - Calling `arm.connect()` raises `OSError: No motor found, but one new motor expected. Verify power cord is plugged in and retry` on Ubuntu 22.
272
+
273
+ Steps:
274
+ 1. Visit https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_wizard2/#connect-dynamixel.
275
+ 2. Follow the software installation instructions in section 3 of the web page.
276
+ 3. Launch the software.
277
+ 4. Configure the device scanning options in the menu under `Tools` > `Options` > `Scan`. Check only Protocol 2.0, select only the USB port identifier of interest, select all baudrates, set the ID range to `[0, 10]`. _While this step was not strictly necessary, it greatly speeds up scanning_.
278
+ 5. For each motor in turn:
279
+ - Disconnect the power to the driver board.
280
+ - Connect **only** the motor of interest to the driver board, making sure to disconnect it from any other motors.
281
+ - Reconnect the power to the driver board.
282
+ - From the software menu select `Device` > `Scan` and let the scan run. A device should appear.
283
+ - If the device has an asterisk (*) near it, it means the firmware is indeed outdated. From the software menu, select `Tools` > `Firmware Update`. Follow the prompts.
284
+ - The main panel should have table with various parameters of the device (refer to the web page, section 5). Select the row with `ID`, and then set the desired ID on the bottom right panel by selecting and clicking `Save`.
285
+ - Just like you did with the ID, also set the `Baud Rate` to 1 Mbps.
286
+ 6. Check everything has been done right:
287
+ - Rewire the arms in their final configuration and power both of them.
288
+ - Scan for devices. All 12 motors should appear.
289
+ - Select the motors one by one and move the arm. Check that the graphical indicator near the top right shows the movement.
290
+
291
+ ** There is a common issue with the Dynamixel XL430-W250 motors where the motors become undiscoverable after upgrading their firmware from Mac and Windows Dynamixel Wizard2 applications. When this occurs, it is required to do a firmware recovery (Select `DYNAMIXEL Firmware Recovery` and follow the prompts). There are two known workarounds to conduct this firmware reset:
292
+ 1) Install the Dynamixel Wizard on a linux machine and complete the firmware recovery
293
+ 2) Use the Dynamixel U2D2 in order to perform the reset with Windows or Mac. This U2D2 can be purchased [here](https://www.robotis.us/u2d2/).
294
+ For either solution, open DYNAMIXEL Wizard 2.0 and select the appropriate port. You will likely be unable to see the motor in the GUI at this time. Select `Firmware Recovery`, carefully choose the correct model, and wait for the process to complete. Finally, re-scan to confirm the firmware recovery was successful.
295
+
296
+ **Read and Write with DynamixelMotorsBus**
297
+
298
+ To get familiar with how `DynamixelMotorsBus` communicates with the motors, you can start by reading data from them. Copy past this code in the same interactive python session:
299
+ ```python
300
+ leader_pos = leader_arm.read("Present_Position")
301
+ follower_pos = follower_arm.read("Present_Position")
302
+ print(leader_pos)
303
+ print(follower_pos)
304
+ ```
305
+
306
+ Expected output might look like:
307
+ ```
308
+ array([2054, 523, 3071, 1831, 3049, 2441], dtype=int32)
309
+ array([2003, 1601, 56, 2152, 3101, 2283], dtype=int32)
310
+ ```
311
+
312
+ Try moving the arms to various positions and observe how the values change.
313
+
314
+ Now let's try to enable torque in the follower arm by copy pasting this code:
315
+ ```python
316
+ from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
317
+
318
+ follower_arm.write("Torque_Enable", TorqueMode.ENABLED.value)
319
+ ```
320
+
321
+ With torque enabled, the follower arm will be locked in its current position. Do not attempt to manually move the arm while torque is enabled, as this could damage the motors.
322
+
323
+ Now, to get more familiar with reading and writing, let's move the arm programmatically copy pasting the following example code:
324
+ ```python
325
+ # Get the current position
326
+ position = follower_arm.read("Present_Position")
327
+
328
+ # Update first motor (shoulder_pan) position by +10 steps
329
+ position[0] += 10
330
+ follower_arm.write("Goal_Position", position)
331
+
332
+ # Update all motors position by -30 steps
333
+ position -= 30
334
+ follower_arm.write("Goal_Position", position)
335
+
336
+ # Update gripper by +30 steps
337
+ position[-1] += 30
338
+ follower_arm.write("Goal_Position", position[-1], "gripper")
339
+ ```
340
+
341
+ When you're done playing, you can try to disable the torque, but make sure you hold your robot so that it doesn't fall:
342
+ ```python
343
+ follower_arm.write("Torque_Enable", TorqueMode.DISABLED.value)
344
+ ```
345
+
346
+ Finally, disconnect the arms:
347
+ ```python
348
+ leader_arm.disconnect()
349
+ follower_arm.disconnect()
350
+ ```
351
+
352
+ Alternatively, you can unplug the power cord, which will automatically disable torque and disconnect the motors.
353
+
354
+ */!\ Warning*: These motors tend to overheat, especially under torque or if left plugged in for too long. Unplug after use.
355
+
356
+ ### b. Teleoperate your Koch v1.1 with ManipulatorRobot
357
+
358
+ **Instantiate the ManipulatorRobot**
359
+
360
+ Before you can teleoperate your robot, you need to instantiate the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) using the previously defined `leader_config` and `follower_config`.
361
+
362
+ For the Koch v1.1 robot, we only have one leader, so we refer to it as `"main"` and define it as `leader_arms={"main": leader_config}`. We do the same for the follower arm. For other robots (like the Aloha), which may have two pairs of leader and follower arms, you would define them like this: `leader_arms={"left": left_leader_config, "right": right_leader_config},`. Same thing for the follower arms.
363
+
364
+
365
+ Run the following code to instantiate your manipulator robot:
366
+ ```python
367
+ from lerobot.common.robot_devices.robots.configs import KochRobotConfig
368
+ from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot
369
+
370
+ robot_config = KochRobotConfig(
371
+ leader_arms={"main": leader_config},
372
+ follower_arms={"main": follower_config},
373
+ cameras={}, # We don't use any camera for now
374
+ )
375
+ robot = ManipulatorRobot(robot_config)
376
+ ```
377
+
378
+ The `KochRobotConfig` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger.
379
+
380
+ For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `AlohaRobotConfig` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected for Aloha.
381
+
382
+ **Calibrate and Connect the ManipulatorRobot**
383
+
384
+ Next, you'll need to calibrate your Koch robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Koch robot to work on another.
385
+
386
+ When you connect your robot for the first time, the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) will detect if the calibration file is missing and trigger the calibration procedure. During this process, you will be guided to move each arm to three different positions.
387
+
388
+ Here are the positions you'll move the follower arm to:
389
+
390
+ | 1. Zero position | 2. Rotated position | 3. Rest position |
391
+ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
392
+ | <img src="../media/koch/follower_zero.webp?raw=true" alt="Koch v1.1 follower arm zero position" title="Koch v1.1 follower arm zero position" style="width:100%;"> | <img src="../media/koch/follower_rotated.webp?raw=true" alt="Koch v1.1 follower arm rotated position" title="Koch v1.1 follower arm rotated position" style="width:100%;"> | <img src="../media/koch/follower_rest.webp?raw=true" alt="Koch v1.1 follower arm rest position" title="Koch v1.1 follower arm rest position" style="width:100%;"> |
393
+
394
+ And here are the corresponding positions for the leader arm:
395
+
396
+ | 1. Zero position | 2. Rotated position | 3. Rest position |
397
+ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
398
+ | <img src="../media/koch/leader_zero.webp?raw=true" alt="Koch v1.1 leader arm zero position" title="Koch v1.1 leader arm zero position" style="width:100%;"> | <img src="../media/koch/leader_rotated.webp?raw=true" alt="Koch v1.1 leader arm rotated position" title="Koch v1.1 leader arm rotated position" style="width:100%;"> | <img src="../media/koch/leader_rest.webp?raw=true" alt="Koch v1.1 leader arm rest position" title="Koch v1.1 leader arm rest position" style="width:100%;"> |
399
+
400
+ You can watch a [video tutorial of the calibration procedure](https://youtu.be/8drnU9uRY24) for more details.
401
+
402
+ During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to measure if the values changed negatively or positively.
403
+
404
+ Finally, the rest position ensures that the follower and leader arms are roughly aligned after calibration, preventing sudden movements that could damage the motors when starting teleoperation.
405
+
406
+ Importantly, once calibrated, all Koch robots will move to the same positions (e.g. zero and rotated position) when commanded.
407
+
408
+ Run the following code to calibrate and connect your robot:
409
+ ```python
410
+ robot.connect()
411
+ ```
412
+
413
+ The output will look like this:
414
+ ```
415
+ Connecting main follower arm
416
+ Connecting main leader arm
417
+
418
+ Missing calibration file '.cache/calibration/koch/main_follower.json'
419
+ Running calibration of koch main follower...
420
+ Move arm to zero position
421
+ [...]
422
+ Move arm to rotated position
423
+ [...]
424
+ Move arm to rest position
425
+ [...]
426
+ Calibration is done! Saving calibration file '.cache/calibration/koch/main_follower.json'
427
+
428
+ Missing calibration file '.cache/calibration/koch/main_leader.json'
429
+ Running calibration of koch main leader...
430
+ Move arm to zero position
431
+ [...]
432
+ Move arm to rotated position
433
+ [...]
434
+ Move arm to rest position
435
+ [...]
436
+ Calibration is done! Saving calibration file '.cache/calibration/koch/main_leader.json'
437
+ ```
438
+
439
+ *Verifying Calibration*
440
+
441
+ Once calibration is complete, you can check the positions of the leader and follower arms to ensure they match. If the calibration was successful, the positions should be very similar.
442
+
443
+ Run this code to get the positions in degrees:
444
+ ```python
445
+ leader_pos = robot.leader_arms["main"].read("Present_Position")
446
+ follower_pos = robot.follower_arms["main"].read("Present_Position")
447
+
448
+ print(leader_pos)
449
+ print(follower_pos)
450
+ ```
451
+
452
+ Example output:
453
+ ```
454
+ array([-0.43945312, 133.94531, 179.82422, -18.984375, -1.9335938, 34.541016], dtype=float32)
455
+ array([-0.58723712, 131.72314, 174.98743, -16.872612, 0.786213, 35.271973], dtype=float32)
456
+ ```
457
+
458
+ These values are in degrees, which makes them easier to interpret and debug. The zero position used during calibration should roughly correspond to 0 degrees for each motor, and the rotated position should roughly correspond to 90 degrees for each motor.
459
+
460
+ **Teleoperate your Koch v1.1**
461
+
462
+ You can easily teleoperate your robot by reading the positions from the leader arm and sending them as goal positions to the follower arm.
463
+
464
+ To teleoperate your robot for 30 seconds at a frequency of approximately 200Hz, run the following code:
465
+ ```python
466
+ import tqdm
467
+ seconds = 30
468
+ frequency = 200
469
+ for _ in tqdm.tqdm(range(seconds*frequency)):
470
+ leader_pos = robot.leader_arms["main"].read("Present_Position")
471
+ robot.follower_arms["main"].write("Goal_Position", leader_pos)
472
+ ```
473
+
474
+ *Using `teleop_step` for Teleoperation*
475
+
476
+ Alternatively, you can teleoperate the robot using the `teleop_step` method from [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py).
477
+
478
+ Run this code to teleoperate:
479
+ ```python
480
+ for _ in tqdm.tqdm(range(seconds*frequency)):
481
+ robot.teleop_step()
482
+ ```
483
+
484
+ *Recording data during Teleoperation*
485
+
486
+ Teleoperation is particularly useful for recording data. You can use the `teleop_step(record_data=True)` to returns both the follower arm's position as `"observation.state"` and the leader arm's position as `"action"`. This function also converts the numpy arrays into PyTorch tensors. If you're working with a robot that has two leader and two follower arms (like the Aloha), the positions are concatenated.
487
+
488
+ Run the following code to see how slowly moving the leader arm affects the observation and action:
489
+ ```python
490
+ leader_pos = robot.leader_arms["main"].read("Present_Position")
491
+ follower_pos = robot.follower_arms["main"].read("Present_Position")
492
+ observation, action = robot.teleop_step(record_data=True)
493
+
494
+ print(follower_pos)
495
+ print(observation)
496
+ print(leader_pos)
497
+ print(action)
498
+ ```
499
+
500
+ Expected output:
501
+ ```
502
+ array([7.8223, 131.1328, 165.5859, -23.4668, -0.9668, 32.4316], dtype=float32)
503
+ {'observation.state': tensor([7.8223, 131.1328, 165.5859, -23.4668, -0.9668, 32.4316])}
504
+ array([3.4277, 134.1211, 179.8242, -18.5449, -1.5820, 34.7168], dtype=float32)
505
+ {'action': tensor([3.4277, 134.1211, 179.8242, -18.5449, -1.5820, 34.7168])}
506
+ ```
507
+
508
+ *Asynchronous Frame Recording*
509
+
510
+ Additionally, `teleop_step` can asynchronously record frames from multiple cameras and include them in the observation dictionary as `"observation.images.CAMERA_NAME"`. This feature will be covered in more detail in the next section.
511
+
512
+ *Disconnecting the Robot*
513
+
514
+ When you're finished, make sure to disconnect your robot by running:
515
+ ```python
516
+ robot.disconnect()
517
+ ```
518
+
519
+ Alternatively, you can unplug the power cord, which will also disable torque.
520
+
521
+ */!\ Warning*: These motors tend to overheat, especially under torque or if left plugged in for too long. Unplug after use.
522
+
523
+ ### c. Add your cameras with OpenCVCamera
524
+
525
+ **(Optional) Use your phone as camera on Linux**
526
+
527
+ If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera
528
+
529
+ 1. *Install `v4l2loopback-dkms` and `v4l-utils`*. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using:
530
+ ```python
531
+ sudo apt install v4l2loopback-dkms v4l-utils
532
+ ```
533
+ 2. *Install [DroidCam](https://droidcam.app) on your phone*. This app is available for both iOS and Android.
534
+ 3. *Install [OBS Studio](https://obsproject.com)*. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org):
535
+ ```python
536
+ flatpak install flathub com.obsproject.Studio
537
+ ```
538
+ 4. *Install the DroidCam OBS plugin*. This plugin integrates DroidCam with OBS Studio. Install it with:
539
+ ```python
540
+ flatpak install flathub com.obsproject.Studio.Plugin.DroidCam
541
+ ```
542
+ 5. *Start OBS Studio*. Launch with:
543
+ ```python
544
+ flatpak run com.obsproject.Studio
545
+ ```
546
+ 6. *Add your phone as a source*. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`.
547
+ 7. *Adjust resolution settings*. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in.
548
+ 8. *Start virtual camera*. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide).
549
+ 9. *Verify the virtual camera setup*. Use `v4l2-ctl` to list the devices:
550
+ ```python
551
+ v4l2-ctl --list-devices
552
+ ```
553
+ You should see an entry like:
554
+ ```
555
+ VirtualCam (platform:v4l2loopback-000):
556
+ /dev/video1
557
+ ```
558
+ 10. *Check the camera resolution*. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`.
559
+ ```python
560
+ v4l2-ctl -d /dev/video1 --get-fmt-video
561
+ ```
562
+ You should see an entry like:
563
+ ```
564
+ >>> Format Video Capture:
565
+ >>> Width/Height : 640/480
566
+ >>> Pixel Format : 'YUYV' (YUYV 4:2:2)
567
+ ```
568
+
569
+ Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed.
570
+
571
+ If everything is set up correctly, you can proceed with the rest of the tutorial.
572
+
573
+ **(Optional) Use your iPhone as a camera on MacOS**
574
+
575
+ To use your iPhone as a camera on macOS, enable the Continuity Camera feature:
576
+ - Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later.
577
+ - Sign in both devices with the same Apple ID.
578
+ - Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection.
579
+
580
+ For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac).
581
+
582
+ Your iPhone should be detected automatically when running the camera setup script in the next section.
583
+
584
+ **Instantiate an OpenCVCamera**
585
+
586
+ The [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py) class allows you to efficiently record frames from most cameras using the [`opencv2`](https://docs.opencv.org) library. For more details on compatibility, see [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
587
+
588
+ To instantiate an [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py), you need a camera index (e.g. `OpenCVCamera(camera_index=0)`). When you only have one camera like a webcam of a laptop, the camera index is usually `0` but it might differ, and the camera index might change if you reboot your computer or re-plug your camera. This behavior depends on your operating system.
589
+
590
+ To find the camera indices, run the following utility script, which will save a few frames from each detected camera:
591
+ ```bash
592
+ python lerobot/common/robot_devices/cameras/opencv.py \
593
+ --images-dir outputs/images_from_opencv_cameras
594
+ ```
595
+
596
+ The output will look something like this if you have two cameras connected:
597
+ ```
598
+ Mac or Windows detected. Finding available camera indices through scanning all indices from 0 to 60
599
+ [...]
600
+ Camera found at index 0
601
+ Camera found at index 1
602
+ [...]
603
+ Connecting cameras
604
+ OpenCVCamera(0, fps=30.0, width=1920.0, height=1080.0, color_mode=rgb)
605
+ OpenCVCamera(1, fps=24.0, width=1920.0, height=1080.0, color_mode=rgb)
606
+ Saving images to outputs/images_from_opencv_cameras
607
+ Frame: 0000 Latency (ms): 39.52
608
+ [...]
609
+ Frame: 0046 Latency (ms): 40.07
610
+ Images have been saved to outputs/images_from_opencv_cameras
611
+ ```
612
+
613
+ Check the saved images in `outputs/images_from_opencv_cameras` to identify which camera index corresponds to which physical camera (e.g. `0` for `camera_00` or `1` for `camera_01`):
614
+ ```
615
+ camera_00_frame_000000.png
616
+ [...]
617
+ camera_00_frame_000047.png
618
+ camera_01_frame_000000.png
619
+ [...]
620
+ camera_01_frame_000047.png
621
+ ```
622
+
623
+ Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green.
624
+
625
+ Finally, run this code to instantiate and connectyour camera:
626
+ ```python
627
+ from lerobot.common.robot_devices.cameras.configs import OpenCVCameraConfig
628
+ from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera
629
+
630
+ config = OpenCVCameraConfig(camera_index=0)
631
+ camera = OpenCVCamera(config)
632
+ camera.connect()
633
+ color_image = camera.read()
634
+
635
+ print(color_image.shape)
636
+ print(color_image.dtype)
637
+ ```
638
+
639
+ Expected output for a laptop camera on MacBookPro:
640
+ ```
641
+ (1080, 1920, 3)
642
+ uint8
643
+ ```
644
+
645
+ Or like this if you followed our tutorial to set a virtual camera:
646
+ ```
647
+ (480, 640, 3)
648
+ uint8
649
+ ```
650
+
651
+ With certain camera, you can also specify additional parameters like frame rate, resolution, and color mode during instantiation. For instance:
652
+ ```python
653
+ config = OpenCVCameraConfig(camera_index=0, fps=30, width=640, height=480)
654
+ ```
655
+
656
+ If the provided arguments are not compatible with the camera, an exception will be raised.
657
+
658
+ *Disconnecting the camera*
659
+
660
+ When you're done using the camera, disconnect it by running:
661
+ ```python
662
+ camera.disconnect()
663
+ ```
664
+
665
+ **Instantiate your robot with cameras**
666
+
667
+ Additionally, you can set up your robot to work with your cameras.
668
+
669
+ Modify the following Python code with the appropriate camera names and configurations:
670
+ ```python
671
+ robot = ManipulatorRobot(
672
+ KochRobotConfig(
673
+ leader_arms={"main": leader_arm},
674
+ follower_arms={"main": follower_arm},
675
+ calibration_dir=".cache/calibration/koch",
676
+ cameras={
677
+ "laptop": OpenCVCameraConfig(0, fps=30, width=640, height=480),
678
+ "phone": OpenCVCameraConfig(1, fps=30, width=640, height=480),
679
+ },
680
+ )
681
+ )
682
+ robot.connect()
683
+ ```
684
+
685
+ As a result, `teleop_step(record_data=True` will return a frame for each camera following the pytorch "channel first" convention but we keep images in `uint8` with pixels in range [0,255] to easily save them.
686
+
687
+ Modify this code with the names of your cameras and run it:
688
+ ```python
689
+ observation, action = robot.teleop_step(record_data=True)
690
+ print(observation["observation.images.laptop"].shape)
691
+ print(observation["observation.images.phone"].shape)
692
+ print(observation["observation.images.laptop"].min().item())
693
+ print(observation["observation.images.laptop"].max().item())
694
+ ```
695
+
696
+ The output should look like this:
697
+ ```
698
+ torch.Size([3, 480, 640])
699
+ torch.Size([3, 480, 640])
700
+ 0
701
+ 255
702
+ ```
703
+
704
+ ### d. Use `control_robot.py` and our `teleoperate` function
705
+
706
+ Instead of manually running the python code in a terminal window, you can use [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to instantiate your robot by providing the robot configurations via command line and control your robot with various modes as explained next.
707
+
708
+ Try running this code to teleoperate your robot (if you dont have a camera, keep reading):
709
+ ```bash
710
+ python lerobot/scripts/control_robot.py \
711
+ --robot.type=koch \
712
+ --control.type=teleoperate
713
+ ```
714
+
715
+ You will see a lot of lines appearing like this one:
716
+ ```
717
+ INFO 2024-08-10 11:15:03 ol_robot.py:209 dt: 5.12 (195.1hz) dtRlead: 4.93 (203.0hz) dtWfoll: 0.19 (5239.0hz)
718
+ ```
719
+
720
+ It contains
721
+ - `2024-08-10 11:15:03` which is the date and time of the call to the print function.
722
+ - `ol_robot.py:209` which is the end of the file name and the line number where the print function is called (`lerobot/scripts/control_robot.py` line `209`).
723
+ - `dt: 5.12 (195.1hz)` which is the "delta time" or the number of milliseconds spent between the previous call to `robot.teleop_step()` and the current one, associated with the frequency (5.12 ms equals 195.1 Hz) ; note that you can control the maximum frequency by adding fps as argument such as `--fps 30`.
724
+ - `dtRlead: 4.93 (203.0hz)` which is the number of milliseconds it took to read the position of the leader arm using `leader_arm.read("Present_Position")`.
725
+ - `dtWfoll: 0.22 (4446.9hz)` which is the number of milliseconds it took to set a new goal position for the follower arm using `follower_arm.write("Goal_position", leader_pos)` ; note that writing is done asynchronously so it takes less time than reading.
726
+
727
+ Importantly: If you don't have any camera, you can remove them dynamically with this [draccus](https://github.com/dlwh/draccus) syntax `--robot.cameras='{}'`:
728
+ ```bash
729
+ python lerobot/scripts/control_robot.py \
730
+ --robot.type=koch \
731
+ --robot.cameras='{}' \
732
+ --control.type=teleoperate
733
+ ```
734
+
735
+ We advise to create a new yaml file when the command becomes too long.
736
+
737
+ ## 3. Record your Dataset and Visualize it
738
+
739
+ Using what you've learned previously, you can now easily record a dataset of states and actions for one episode. You can use `busy_wait` to control the speed of teleoperation and record at a fixed `fps` (frame per seconds).
740
+
741
+ Try this code to record 30 seconds at 60 fps:
742
+ ```python
743
+ import time
744
+ from lerobot.scripts.control_robot import busy_wait
745
+
746
+ record_time_s = 30
747
+ fps = 60
748
+
749
+ states = []
750
+ actions = []
751
+ for _ in range(record_time_s * fps):
752
+ start_time = time.perf_counter()
753
+ observation, action = robot.teleop_step(record_data=True)
754
+
755
+ states.append(observation["observation.state"])
756
+ actions.append(action["action"])
757
+
758
+ dt_s = time.perf_counter() - start_time
759
+ busy_wait(1 / fps - dt_s)
760
+
761
+ # Note that observation and action are available in RAM, but
762
+ # you could potentially store them on disk with pickle/hdf5 or
763
+ # our optimized format `LeRobotDataset`. More on this next.
764
+ ```
765
+
766
+ Importantly, many utilities are still missing. For instance, if you have cameras, you will need to save the images on disk to not go out of RAM, and to do so in threads to not slow down communication with your robot. Also, you will need to store your data in a format optimized for training and web sharing like [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py). More on this in the next section.
767
+
768
+ ### a. Use the `record` function
769
+
770
+ You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to achieve efficient data recording. It encompasses many recording utilities:
771
+ 1. Frames from cameras are saved on disk in threads, and encoded into videos at the end of each episode recording.
772
+ 2. Video streams from cameras are displayed in window so that you can verify them.
773
+ 3. Data is stored with [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py) format which is pushed to your Hugging Face page (unless `--control.push_to_hub=false` is provided).
774
+ 4. Checkpoints are done during recording, so if any issue occurs, you can resume recording by re-running the same command again with `--control.resume=true`. You will need to manually delete the dataset directory if you want to start recording from scratch.
775
+ 5. Set the flow of data recording using command line arguments:
776
+ - `--control.warmup_time_s=10` defines the number of seconds before starting data collection. It allows the robot devices to warmup and synchronize (10 seconds by default).
777
+ - `--control.episode_time_s=60` defines the number of seconds for data recording for each episode (60 seconds by default).
778
+ - `--control.reset_time_s=60` defines the number of seconds for resetting the environment after each episode (60 seconds by default).
779
+ - `--control.num_episodes=50` defines the number of episodes to record (50 by default).
780
+ 6. Control the flow during data recording using keyboard keys:
781
+ - Press right arrow `->` at any time during episode recording to early stop and go to resetting. Same during resetting, to early stop and to go to the next episode recording.
782
+ - Press left arrow `<-` at any time during episode recording or resetting to early stop, cancel the current episode, and re-record it.
783
+ - Press escape `ESC` at any time during episode recording to end the session early and go straight to video encoding and dataset uploading.
784
+ 7. Similarly to `teleoperate`, you can also use the command line to override anything.
785
+
786
+ Before trying `record`, if you want to push your dataset to the hub, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
787
+ ```bash
788
+ huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
789
+ ```
790
+ Also, store your Hugging Face repository name in a variable (e.g. `cadene` or `lerobot`). For instance, run this to use your Hugging Face user name as repository:
791
+ ```bash
792
+ HF_USER=$(huggingface-cli whoami | head -n 1)
793
+ echo $HF_USER
794
+ ```
795
+ If you don't want to push to hub, use `--control.push_to_hub=false`.
796
+
797
+ Now run this to record 2 episodes:
798
+ ```bash
799
+ python lerobot/scripts/control_robot.py \
800
+ --robot.type=koch \
801
+ --control.type=record \
802
+ --control.single_task="Grasp a lego block and put it in the bin." \
803
+ --control.fps=30 \
804
+ --control.repo_id=${HF_USER}/koch_test \
805
+ --control.tags='["tutorial"]' \
806
+ --control.warmup_time_s=5 \
807
+ --control.episode_time_s=30 \
808
+ --control.reset_time_s=30 \
809
+ --control.num_episodes=2 \
810
+ --control.push_to_hub=true
811
+ ```
812
+
813
+
814
+ This will write your dataset locally to `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
815
+
816
+ You can look for other LeRobot datasets on the hub by searching for `LeRobot` tags: https://huggingface.co/datasets?other=LeRobot
817
+
818
+ You will see a lot of lines appearing like this one:
819
+ ```
820
+ INFO 2024-08-10 15:02:58 ol_robot.py:219 dt:33.34 (30.0hz) dtRlead: 5.06 (197.5hz) dtWfoll: 0.25 (3963.7hz) dtRfoll: 6.22 (160.7hz) dtRlaptop: 32.57 (30.7hz) dtRphone: 33.84 (29.5hz)
821
+ ```
822
+ It contains:
823
+ - `2024-08-10 15:02:58` which is the date and time of the call to the print function,
824
+ - `ol_robot.py:219` which is the end of the file name and the line number where the print function is called (`lerobot/scripts/control_robot.py` line `219`).
825
+ - `dt:33.34 (30.0hz)` which is the "delta time" or the number of milliseconds spent between the previous call to `robot.teleop_step(record_data=True)` and the current one, associated with the frequency (33.34 ms equals 30.0 Hz) ; note that we use `--fps 30` so we expect 30.0 Hz ; when a step takes more time, the line appears in yellow.
826
+ - `dtRlead: 5.06 (197.5hz)` which is the delta time of reading the present position of the leader arm.
827
+ - `dtWfoll: 0.25 (3963.7hz)` which is the delta time of writing the goal position on the follower arm ; writing is asynchronous so it takes less time than reading.
828
+ - `dtRfoll: 6.22 (160.7hz)` which is the delta time of reading the present position on the follower arm.
829
+ - `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchronously.
830
+ - `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchronously.
831
+
832
+ Troubleshooting:
833
+ - On Linux, if you encounter any issue during video encoding with `ffmpeg: unknown encoder libsvtav1`, you can:
834
+ - install with conda-forge by running `conda install -c conda-forge ffmpeg` (it should be compiled with `libsvtav1`),
835
+ > **NOTE:** This usually installs `ffmpeg 7.X` for your platform (check the version installed with `ffmpeg -encoders | grep libsvtav1`). If it isn't `ffmpeg 7.X` or lacks `libsvtav1` support, you can explicitly install `ffmpeg 7.X` using: `conda install ffmpeg=7.1.1 -c conda-forge`
836
+ - or, install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1),
837
+ - and, make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
838
+ - On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
839
+
840
+ At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/koch_test) that you can obtain by running:
841
+ ```bash
842
+ echo https://huggingface.co/datasets/${HF_USER}/koch_test
843
+ ```
844
+
845
+ ### b. Advice for recording dataset
846
+
847
+ Once you're comfortable with data recording, it's time to create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings.
848
+
849
+ In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions.
850
+
851
+ Avoid adding too much variation too quickly, as it may hinder your results.
852
+
853
+ In the coming months, we plan to release a foundational model for robotics. We anticipate that fine-tuning this model will enhance generalization, reducing the need for strict consistency during data collection.
854
+
855
+ ### c. Visualize all episodes
856
+
857
+ You can visualize your dataset by running:
858
+ ```bash
859
+ python lerobot/scripts/visualize_dataset_html.py \
860
+ --repo-id ${HF_USER}/koch_test
861
+ ```
862
+
863
+ Note: You might need to add `--local-files-only 1` if your dataset was not uploaded to hugging face hub.
864
+
865
+ This will launch a local web server that looks like this:
866
+ <div style="text-align:center;">
867
+ <img src="../media/tutorial/visualize_dataset_html.webp?raw=true" alt="Koch v1.1 leader and follower arms" title="Koch v1.1 leader and follower arms" width="100%">
868
+ </div>
869
+
870
+ ### d. Replay episode on your robot with the `replay` function
871
+
872
+ A useful feature of [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) is the `replay` function, which allows to replay on your robot any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model.
873
+
874
+ To replay the first episode of the dataset you just recorded, run the following command:
875
+ ```bash
876
+ python lerobot/scripts/control_robot.py \
877
+ --robot.type=koch \
878
+ --control.type=replay \
879
+ --control.fps=30 \
880
+ --control.repo_id=${HF_USER}/koch_test \
881
+ --control.episode=0
882
+ ```
883
+
884
+ Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com).
885
+
886
+ ## 4. Train a policy on your data
887
+
888
+ ### a. Use the `train` script
889
+
890
+ To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
891
+ ```bash
892
+ python lerobot/scripts/train.py \
893
+ --dataset.repo_id=${HF_USER}/koch_test \
894
+ --policy.type=act \
895
+ --output_dir=outputs/train/act_koch_test \
896
+ --job_name=act_koch_test \
897
+ --policy.device=cuda \
898
+ --wandb.enable=true
899
+ ```
900
+
901
+ Let's explain it:
902
+ 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/koch_test`.
903
+ 2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
904
+ 4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
905
+ 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
906
+
907
+ For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
908
+
909
+ ### b. (Optional) Upload policy checkpoints to the hub
910
+
911
+ Once training is done, upload the latest checkpoint with:
912
+ ```bash
913
+ huggingface-cli upload ${HF_USER}/act_koch_test \
914
+ outputs/train/act_koch_test/checkpoints/last/pretrained_model
915
+ ```
916
+
917
+ You can also upload intermediate checkpoints with:
918
+ ```bash
919
+ CKPT=010000
920
+ huggingface-cli upload ${HF_USER}/act_koch_test_${CKPT} \
921
+ outputs/train/act_koch_test/checkpoints/${CKPT}/pretrained_model
922
+ ```
923
+
924
+ ## 5. Evaluate your policy
925
+
926
+ Now that you have a policy checkpoint, you can easily control your robot with it using methods from [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) and the policy.
927
+
928
+ Try this code for running inference for 60 seconds at 30 fps:
929
+ ```python
930
+ from lerobot.common.policies.act.modeling_act import ACTPolicy
931
+
932
+ inference_time_s = 60
933
+ fps = 30
934
+ device = "cuda" # TODO: On Mac, use "mps" or "cpu"
935
+
936
+ ckpt_path = "outputs/train/act_koch_test/checkpoints/last/pretrained_model"
937
+ policy = ACTPolicy.from_pretrained(ckpt_path)
938
+ policy.to(device)
939
+
940
+ for _ in range(inference_time_s * fps):
941
+ start_time = time.perf_counter()
942
+
943
+ # Read the follower state and access the frames from the cameras
944
+ observation = robot.capture_observation()
945
+
946
+ # Convert to pytorch format: channel first and float32 in [0,1]
947
+ # with batch dimension
948
+ for name in observation:
949
+ if "image" in name:
950
+ observation[name] = observation[name].type(torch.float32) / 255
951
+ observation[name] = observation[name].permute(2, 0, 1).contiguous()
952
+ observation[name] = observation[name].unsqueeze(0)
953
+ observation[name] = observation[name].to(device)
954
+
955
+ # Compute the next action with the policy
956
+ # based on the current observation
957
+ action = policy.select_action(observation)
958
+ # Remove batch dimension
959
+ action = action.squeeze(0)
960
+ # Move to cpu, if not already the case
961
+ action = action.to("cpu")
962
+ # Order the robot to move
963
+ robot.send_action(action)
964
+
965
+ dt_s = time.perf_counter() - start_time
966
+ busy_wait(1 / fps - dt_s)
967
+ ```
968
+
969
+ ### a. Use our `record` function
970
+
971
+ Ideally, when controlling your robot with your neural network, you would want to record evaluation episodes and to be able to visualize them later on, or even train on them like in Reinforcement Learning. This pretty much corresponds to recording a new dataset but with a neural network providing the actions instead of teleoperation.
972
+
973
+ To this end, you can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
974
+ ```bash
975
+ python lerobot/scripts/control_robot.py \
976
+ --robot.type=koch \
977
+ --control.type=record \
978
+ --control.fps=30 \
979
+ --control.repo_id=${HF_USER}/eval_act_koch_test \
980
+ --control.tags='["tutorial"]' \
981
+ --control.warmup_time_s=5 \
982
+ --control.episode_time_s=30 \
983
+ --control.reset_time_s=30 \
984
+ --control.num_episodes=10 \
985
+ --control.push_to_hub=true \
986
+ --control.policy.path=outputs/train/act_koch_test/checkpoints/last/pretrained_model
987
+ ```
988
+
989
+ As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
990
+ 1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_koch_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_koch_test`).
991
+ 2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_koch_test`).
992
+
993
+ ### b. Visualize evaluation afterwards
994
+
995
+ You can then visualize your evaluation dataset by running the same command as before but with the new inference dataset as argument:
996
+ ```bash
997
+ python lerobot/scripts/visualize_dataset.py \
998
+ --repo-id ${HF_USER}/eval_act_koch_test
999
+ ```
1000
+
1001
+ ## 6. Next step
1002
+
1003
+ Join our [Discord](https://discord.com/invite/s3KuuzsPFb) to collaborate on data collection and help us train a fully open-source foundational models for robotics!
project/ManiSkill3/src/maniskill3_environment/lerobot/examples/8_use_stretch.md ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This tutorial explains how to use [Stretch 3](https://hello-robot.com/stretch-3-product) with LeRobot.
2
+
3
+ ## Setup
4
+
5
+ Familiarize yourself with Stretch by following its [tutorials](https://docs.hello-robot.com/0.3/getting_started/hello_robot/) (recommended).
6
+
7
+ To use LeRobot on Stretch, 3 options are available:
8
+ - [tethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup)
9
+ - [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup)
10
+ - ssh directly into Stretch (you will first need to install and configure openssh-server on stretch using one of the two above setups)
11
+
12
+
13
+ ## Install LeRobot
14
+
15
+ On Stretch's CLI, follow these steps:
16
+
17
+ 1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
18
+ ```bash
19
+ mkdir -p ~/miniconda3
20
+ wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
21
+ bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
22
+ rm ~/miniconda3/miniconda.sh
23
+ ~/miniconda3/bin/conda init bash
24
+ ```
25
+
26
+ 2. Comment out these lines in `~/.profile` (this can mess up paths used by conda and ~/.local/bin should already be in your PATH)
27
+ ```
28
+ # set PATH so it includes user's private bin if it exists
29
+ if [ -d "$HOME/.local/bin" ] ; then
30
+ PATH="$HOME/.local/bin:$PATH"
31
+ fi
32
+ ```
33
+
34
+ 3. Restart shell or `source ~/.bashrc`
35
+
36
+ 4. Create and activate a fresh conda environment for lerobot
37
+ ```bash
38
+ conda create -y -n lerobot python=3.10 && conda activate lerobot
39
+ ```
40
+
41
+ 5. Clone LeRobot:
42
+ ```bash
43
+ git clone https://github.com/huggingface/lerobot.git ~/lerobot
44
+ ```
45
+
46
+ 6. When using `miniconda`, install `ffmpeg` in your environment:
47
+ ```bash
48
+ conda install ffmpeg -c conda-forge
49
+ ```
50
+
51
+ 7. Install LeRobot with stretch dependencies:
52
+ ```bash
53
+ cd ~/lerobot && pip install -e ".[stretch]"
54
+ ```
55
+
56
+ > **Note:** If you get this message, you can ignore it: `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.`
57
+
58
+ 8. Run a [system check](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#system-check) to make sure your robot is ready:
59
+ ```bash
60
+ stretch_system_check.py
61
+ ```
62
+
63
+ > **Note:** You may need to free the "robot process" after booting Stretch by running `stretch_free_robot_process.py`. For more info this Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#turning-off-gamepad-teleoperation).
64
+
65
+ You should get something like this:
66
+ ```bash
67
+ For use with S T R E T C H (R) from Hello Robot Inc.
68
+ ---------------------------------------------------------------------
69
+
70
+ Model = Stretch 3
71
+ Tool = DexWrist 3 w/ Gripper
72
+ Serial Number = stretch-se3-3054
73
+
74
+ ---- Checking Hardware ----
75
+ [Pass] Comms are ready
76
+ [Pass] Actuators are ready
77
+ [Warn] Sensors not ready (IMU AZ = -10.19 out of range -10.1 to -9.5)
78
+ [Pass] Battery voltage is 13.6 V
79
+
80
+ ---- Checking Software ----
81
+ [Pass] Ubuntu 22.04 is ready
82
+ [Pass] All APT pkgs are setup correctly
83
+ [Pass] Firmware is up-to-date
84
+ [Pass] Python pkgs are up-to-date
85
+ [Pass] ROS2 Humble is ready
86
+ ```
87
+
88
+ ## Teleoperate, record a dataset and run a policy
89
+
90
+ **Calibrate (Optional)**
91
+ Before operating Stretch, you need to [home](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#homing) it first. Be mindful about giving Stretch some space as this procedure will move the robot's arm and gripper. Now run this command:
92
+ ```bash
93
+ python lerobot/scripts/control_robot.py \
94
+ --robot.type=stretch \
95
+ --control.type=calibrate
96
+ ```
97
+ This is equivalent to running `stretch_robot_home.py`
98
+
99
+ > **Note:** If you run any of the LeRobot scripts below and Stretch is not properly homed, it will automatically home/calibrate first.
100
+
101
+ **Teleoperate**
102
+ Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
103
+
104
+ Now try out teleoperation (see above documentation to learn about the gamepad controls):
105
+
106
+ > **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
107
+ ```bash
108
+ python lerobot/scripts/control_robot.py \
109
+ --robot.type=stretch \
110
+ --control.type=teleoperate
111
+ ```
112
+ This is essentially the same as running `stretch_gamepad_teleop.py`
113
+
114
+ **Record a dataset**
115
+ Once you're familiar with the gamepad controls and after a bit of practice, you can try to record your first dataset with Stretch.
116
+
117
+ If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
118
+ ```bash
119
+ huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
120
+ ```
121
+
122
+ Store your Hugging Face repository name in a variable to run these commands:
123
+ ```bash
124
+ HF_USER=$(huggingface-cli whoami | head -n 1)
125
+ echo $HF_USER
126
+ ```
127
+
128
+ Record one episode:
129
+ ```bash
130
+ python lerobot/scripts/control_robot.py \
131
+ --robot.type=stretch \
132
+ --control.type=record \
133
+ --control.fps=30 \
134
+ --control.single_task="Grasp a lego block and put it in the bin." \
135
+ --control.repo_id=${HF_USER}/stretch_test \
136
+ --control.tags='["tutorial"]' \
137
+ --control.warmup_time_s=5 \
138
+ --control.episode_time_s=30 \
139
+ --control.reset_time_s=30 \
140
+ --control.num_episodes=2 \
141
+ --control.push_to_hub=true
142
+ ```
143
+
144
+ > **Note:** If you're using ssh to connect to Stretch and run this script, you won't be able to visualize its cameras feed (though they will still be recording). To see the cameras stream, use [tethered](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup) or [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup).
145
+
146
+ **Replay an episode**
147
+ Now try to replay this episode (make sure the robot's initial position is the same):
148
+ ```bash
149
+ python lerobot/scripts/control_robot.py \
150
+ --robot.type=stretch \
151
+ --control.type=replay \
152
+ --control.fps=30 \
153
+ --control.repo_id=${HF_USER}/stretch_test \
154
+ --control.episode=0
155
+ ```
156
+
157
+ Follow [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) to train a policy on your data and run inference on your robot. You will need to adapt the code for Stretch.
158
+
159
+ > TODO(rcadene, aliberts): Add already setup environment and policy yaml configuration files
160
+
161
+ If you need help, please reach out on Discord in the channel `#stretch3-mobile-arm`.
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/backward_compatibility.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import packaging.version
16
+
17
+ V2_MESSAGE = """
18
+ The dataset you requested ({repo_id}) is in {version} format.
19
+
20
+ We introduced a new format since v2.0 which is not backward compatible with v1.x.
21
+ Please, use our conversion script. Modify the following command with your own task description:
22
+ ```
23
+ python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \\
24
+ --repo-id {repo_id} \\
25
+ --single-task "TASK DESCRIPTION." # <---- /!\\ Replace TASK DESCRIPTION /!\\
26
+ ```
27
+
28
+ A few examples to replace TASK DESCRIPTION: "Pick up the blue cube and place it into the bin.", "Insert the
29
+ peg into the socket.", "Slide open the ziploc bag.", "Take the elevator to the 1st floor.", "Open the top
30
+ cabinet, store the pot inside it then close the cabinet.", "Push the T-shaped block onto the T-shaped
31
+ target.", "Grab the spray paint on the shelf and place it in the bin on top of the robot dog.", "Fold the
32
+ sweatshirt.", ...
33
+
34
+ If you encounter a problem, contact LeRobot maintainers on [Discord](https://discord.com/invite/s3KuuzsPFb)
35
+ or open an [issue on GitHub](https://github.com/huggingface/lerobot/issues/new/choose).
36
+ """
37
+
38
+ V21_MESSAGE = """
39
+ The dataset you requested ({repo_id}) is in {version} format.
40
+ While current version of LeRobot is backward-compatible with it, the version of your dataset still uses global
41
+ stats instead of per-episode stats. Update your dataset stats to the new format using this command:
42
+ ```
43
+ python lerobot/common/datasets/v21/convert_dataset_v20_to_v21.py --repo-id={repo_id}
44
+ ```
45
+
46
+ If you encounter a problem, contact LeRobot maintainers on [Discord](https://discord.com/invite/s3KuuzsPFb)
47
+ or open an [issue on GitHub](https://github.com/huggingface/lerobot/issues/new/choose).
48
+ """
49
+
50
+ FUTURE_MESSAGE = """
51
+ The dataset you requested ({repo_id}) is only available in {version} format.
52
+ As we cannot ensure forward compatibility with it, please update your current version of lerobot.
53
+ """
54
+
55
+
56
+ class CompatibilityError(Exception): ...
57
+
58
+
59
+ class BackwardCompatibilityError(CompatibilityError):
60
+ def __init__(self, repo_id: str, version: packaging.version.Version):
61
+ message = V2_MESSAGE.format(repo_id=repo_id, version=version)
62
+ super().__init__(message)
63
+
64
+
65
+ class ForwardCompatibilityError(CompatibilityError):
66
+ def __init__(self, repo_id: str, version: packaging.version.Version):
67
+ message = FUTURE_MESSAGE.format(repo_id=repo_id, version=version)
68
+ super().__init__(message)
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/card_template.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ # For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1
3
+ # Doc / guide: https://huggingface.co/docs/hub/datasets-cards
4
+ {{ card_data }}
5
+ ---
6
+
7
+ This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
8
+
9
+ ## Dataset Description
10
+
11
+ {{ dataset_description | default("", true) }}
12
+
13
+ - **Homepage:** {{ url | default("[More Information Needed]", true)}}
14
+ - **Paper:** {{ paper | default("[More Information Needed]", true)}}
15
+ - **License:** {{ license | default("[More Information Needed]", true)}}
16
+
17
+ ## Dataset Structure
18
+
19
+ {{ dataset_structure | default("[More Information Needed]", true)}}
20
+
21
+ ## Citation
22
+
23
+ **BibTeX:**
24
+
25
+ ```bibtex
26
+ {{ citation_bibtex | default("[More Information Needed]", true)}}
27
+ ```
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/compute_stats.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import numpy as np
17
+
18
+ from lerobot.common.datasets.utils import load_image_as_numpy
19
+
20
+
21
+ def estimate_num_samples(
22
+ dataset_len: int, min_num_samples: int = 100, max_num_samples: int = 10_000, power: float = 0.75
23
+ ) -> int:
24
+ """Heuristic to estimate the number of samples based on dataset size.
25
+ The power controls the sample growth relative to dataset size.
26
+ Lower the power for less number of samples.
27
+
28
+ For default arguments, we have:
29
+ - from 1 to ~500, num_samples=100
30
+ - at 1000, num_samples=177
31
+ - at 2000, num_samples=299
32
+ - at 5000, num_samples=594
33
+ - at 10000, num_samples=1000
34
+ - at 20000, num_samples=1681
35
+ """
36
+ if dataset_len < min_num_samples:
37
+ min_num_samples = dataset_len
38
+ return max(min_num_samples, min(int(dataset_len**power), max_num_samples))
39
+
40
+
41
+ def sample_indices(data_len: int) -> list[int]:
42
+ num_samples = estimate_num_samples(data_len)
43
+ return np.round(np.linspace(0, data_len - 1, num_samples)).astype(int).tolist()
44
+
45
+
46
+ def auto_downsample_height_width(img: np.ndarray, target_size: int = 150, max_size_threshold: int = 300):
47
+ _, height, width = img.shape
48
+
49
+ if max(width, height) < max_size_threshold:
50
+ # no downsampling needed
51
+ return img
52
+
53
+ downsample_factor = int(width / target_size) if width > height else int(height / target_size)
54
+ return img[:, ::downsample_factor, ::downsample_factor]
55
+
56
+
57
+ def sample_images(image_paths: list[str]) -> np.ndarray:
58
+ sampled_indices = sample_indices(len(image_paths))
59
+
60
+ images = None
61
+ for i, idx in enumerate(sampled_indices):
62
+ path = image_paths[idx]
63
+ # we load as uint8 to reduce memory usage
64
+ img = load_image_as_numpy(path, dtype=np.uint8, channel_first=True)
65
+ img = auto_downsample_height_width(img)
66
+
67
+ if images is None:
68
+ images = np.empty((len(sampled_indices), *img.shape), dtype=np.uint8)
69
+
70
+ images[i] = img
71
+
72
+ return images
73
+
74
+
75
+ def get_feature_stats(array: np.ndarray, axis: tuple, keepdims: bool) -> dict[str, np.ndarray]:
76
+ return {
77
+ "min": np.min(array, axis=axis, keepdims=keepdims),
78
+ "max": np.max(array, axis=axis, keepdims=keepdims),
79
+ "mean": np.mean(array, axis=axis, keepdims=keepdims),
80
+ "std": np.std(array, axis=axis, keepdims=keepdims),
81
+ "count": np.array([len(array)]),
82
+ }
83
+
84
+
85
+ def compute_episode_stats(episode_data: dict[str, list[str] | np.ndarray], features: dict) -> dict:
86
+ ep_stats = {}
87
+ for key, data in episode_data.items():
88
+ if features[key]["dtype"] == "string":
89
+ continue # HACK: we should receive np.arrays of strings
90
+ elif features[key]["dtype"] in ["image", "video"]:
91
+ ep_ft_array = sample_images(data) # data is a list of image paths
92
+ axes_to_reduce = (0, 2, 3) # keep channel dim
93
+ keepdims = True
94
+ else:
95
+ ep_ft_array = data # data is already a np.ndarray
96
+ axes_to_reduce = 0 # compute stats over the first axis
97
+ keepdims = data.ndim == 1 # keep as np.array
98
+
99
+ ep_stats[key] = get_feature_stats(ep_ft_array, axis=axes_to_reduce, keepdims=keepdims)
100
+
101
+ # finally, we normalize and remove batch dim for images
102
+ if features[key]["dtype"] in ["image", "video"]:
103
+ ep_stats[key] = {
104
+ k: v if k == "count" else np.squeeze(v / 255.0, axis=0) for k, v in ep_stats[key].items()
105
+ }
106
+
107
+ return ep_stats
108
+
109
+
110
+ def _assert_type_and_shape(stats_list: list[dict[str, dict]]):
111
+ for i in range(len(stats_list)):
112
+ for fkey in stats_list[i]:
113
+ for k, v in stats_list[i][fkey].items():
114
+ if not isinstance(v, np.ndarray):
115
+ raise ValueError(
116
+ f"Stats must be composed of numpy array, but key '{k}' of feature '{fkey}' is of type '{type(v)}' instead."
117
+ )
118
+ if v.ndim == 0:
119
+ raise ValueError("Number of dimensions must be at least 1, and is 0 instead.")
120
+ if k == "count" and v.shape != (1,):
121
+ raise ValueError(f"Shape of 'count' must be (1), but is {v.shape} instead.")
122
+ if "image" in fkey and k != "count" and v.shape != (3, 1, 1):
123
+ raise ValueError(f"Shape of '{k}' must be (3,1,1), but is {v.shape} instead.")
124
+
125
+
126
+ def aggregate_feature_stats(stats_ft_list: list[dict[str, dict]]) -> dict[str, dict[str, np.ndarray]]:
127
+ """Aggregates stats for a single feature."""
128
+ means = np.stack([s["mean"] for s in stats_ft_list])
129
+ variances = np.stack([s["std"] ** 2 for s in stats_ft_list])
130
+ counts = np.stack([s["count"] for s in stats_ft_list])
131
+ total_count = counts.sum(axis=0)
132
+
133
+ # Prepare weighted mean by matching number of dimensions
134
+ while counts.ndim < means.ndim:
135
+ counts = np.expand_dims(counts, axis=-1)
136
+
137
+ # Compute the weighted mean
138
+ weighted_means = means * counts
139
+ total_mean = weighted_means.sum(axis=0) / total_count
140
+
141
+ # Compute the variance using the parallel algorithm
142
+ delta_means = means - total_mean
143
+ weighted_variances = (variances + delta_means**2) * counts
144
+ total_variance = weighted_variances.sum(axis=0) / total_count
145
+
146
+ return {
147
+ "min": np.min(np.stack([s["min"] for s in stats_ft_list]), axis=0),
148
+ "max": np.max(np.stack([s["max"] for s in stats_ft_list]), axis=0),
149
+ "mean": total_mean,
150
+ "std": np.sqrt(total_variance),
151
+ "count": total_count,
152
+ }
153
+
154
+
155
+ def aggregate_stats(stats_list: list[dict[str, dict]]) -> dict[str, dict[str, np.ndarray]]:
156
+ """Aggregate stats from multiple compute_stats outputs into a single set of stats.
157
+
158
+ The final stats will have the union of all data keys from each of the stats dicts.
159
+
160
+ For instance:
161
+ - new_min = min(min_dataset_0, min_dataset_1, ...)
162
+ - new_max = max(max_dataset_0, max_dataset_1, ...)
163
+ - new_mean = (mean of all data, weighted by counts)
164
+ - new_std = (std of all data)
165
+ """
166
+
167
+ _assert_type_and_shape(stats_list)
168
+
169
+ data_keys = {key for stats in stats_list for key in stats}
170
+ aggregated_stats = {key: {} for key in data_keys}
171
+
172
+ for key in data_keys:
173
+ stats_with_key = [stats[key] for stats in stats_list if key in stats]
174
+ aggregated_stats[key] = aggregate_feature_stats(stats_with_key)
175
+
176
+ return aggregated_stats
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/factory.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import logging
17
+ from pprint import pformat
18
+
19
+ import torch
20
+
21
+ from lerobot.common.datasets.lerobot_dataset import (
22
+ LeRobotDataset,
23
+ LeRobotDatasetMetadata,
24
+ MultiLeRobotDataset,
25
+ )
26
+ from lerobot.common.datasets.transforms import ImageTransforms
27
+ from lerobot.configs.policies import PreTrainedConfig
28
+ from lerobot.configs.train import TrainPipelineConfig
29
+
30
+ IMAGENET_STATS = {
31
+ "mean": [[[0.485]], [[0.456]], [[0.406]]], # (c,1,1)
32
+ "std": [[[0.229]], [[0.224]], [[0.225]]], # (c,1,1)
33
+ }
34
+
35
+
36
+ def resolve_delta_timestamps(
37
+ cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata
38
+ ) -> dict[str, list] | None:
39
+ """Resolves delta_timestamps by reading from the 'delta_indices' properties of the PreTrainedConfig.
40
+
41
+ Args:
42
+ cfg (PreTrainedConfig): The PreTrainedConfig to read delta_indices from.
43
+ ds_meta (LeRobotDatasetMetadata): The dataset from which features and fps are used to build
44
+ delta_timestamps against.
45
+
46
+ Returns:
47
+ dict[str, list] | None: A dictionary of delta_timestamps, e.g.:
48
+ {
49
+ "observation.state": [-0.04, -0.02, 0]
50
+ "observation.action": [-0.02, 0, 0.02]
51
+ }
52
+ returns `None` if the the resulting dict is empty.
53
+ """
54
+ delta_timestamps = {}
55
+ for key in ds_meta.features:
56
+ if key == "next.reward" and cfg.reward_delta_indices is not None:
57
+ delta_timestamps[key] = [i / ds_meta.fps for i in cfg.reward_delta_indices]
58
+ if key == "action" and cfg.action_delta_indices is not None:
59
+ delta_timestamps[key] = [i / ds_meta.fps for i in cfg.action_delta_indices]
60
+ if key.startswith("observation.") and cfg.observation_delta_indices is not None:
61
+ delta_timestamps[key] = [i / ds_meta.fps for i in cfg.observation_delta_indices]
62
+
63
+ if len(delta_timestamps) == 0:
64
+ delta_timestamps = None
65
+
66
+ return delta_timestamps
67
+
68
+
69
+ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDataset:
70
+ """Handles the logic of setting up delta timestamps and image transforms before creating a dataset.
71
+
72
+ Args:
73
+ cfg (TrainPipelineConfig): A TrainPipelineConfig config which contains a DatasetConfig and a PreTrainedConfig.
74
+
75
+ Raises:
76
+ NotImplementedError: The MultiLeRobotDataset is currently deactivated.
77
+
78
+ Returns:
79
+ LeRobotDataset | MultiLeRobotDataset
80
+ """
81
+ image_transforms = (
82
+ ImageTransforms(cfg.dataset.image_transforms) if cfg.dataset.image_transforms.enable else None
83
+ )
84
+
85
+ if isinstance(cfg.dataset.repo_id, str):
86
+ ds_meta = LeRobotDatasetMetadata(
87
+ cfg.dataset.repo_id, root=cfg.dataset.root, revision=cfg.dataset.revision
88
+ )
89
+ delta_timestamps = resolve_delta_timestamps(cfg.policy, ds_meta)
90
+ dataset = LeRobotDataset(
91
+ cfg.dataset.repo_id,
92
+ root=cfg.dataset.root,
93
+ episodes=cfg.dataset.episodes,
94
+ delta_timestamps=delta_timestamps,
95
+ image_transforms=image_transforms,
96
+ revision=cfg.dataset.revision,
97
+ video_backend=cfg.dataset.video_backend,
98
+ )
99
+ else:
100
+ raise NotImplementedError("The MultiLeRobotDataset isn't supported for now.")
101
+ dataset = MultiLeRobotDataset(
102
+ cfg.dataset.repo_id,
103
+ # TODO(aliberts): add proper support for multi dataset
104
+ # delta_timestamps=delta_timestamps,
105
+ image_transforms=image_transforms,
106
+ video_backend=cfg.dataset.video_backend,
107
+ )
108
+ logging.info(
109
+ "Multiple datasets were provided. Applied the following index mapping to the provided datasets: "
110
+ f"{pformat(dataset.repo_id_to_index, indent=2)}"
111
+ )
112
+
113
+ if cfg.dataset.use_imagenet_stats:
114
+ for key in dataset.meta.camera_keys:
115
+ for stats_type, stats in IMAGENET_STATS.items():
116
+ dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32)
117
+
118
+ return dataset
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/image_writer.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import multiprocessing
17
+ import queue
18
+ import threading
19
+ from pathlib import Path
20
+
21
+ import numpy as np
22
+ import PIL.Image
23
+ import torch
24
+
25
+
26
+ def safe_stop_image_writer(func):
27
+ def wrapper(*args, **kwargs):
28
+ try:
29
+ return func(*args, **kwargs)
30
+ except Exception as e:
31
+ dataset = kwargs.get("dataset")
32
+ image_writer = getattr(dataset, "image_writer", None) if dataset else None
33
+ if image_writer is not None:
34
+ print("Waiting for image writer to terminate...")
35
+ image_writer.stop()
36
+ raise e
37
+
38
+ return wrapper
39
+
40
+
41
+ def image_array_to_pil_image(image_array: np.ndarray, range_check: bool = True) -> PIL.Image.Image:
42
+ # TODO(aliberts): handle 1 channel and 4 for depth images
43
+ if image_array.ndim != 3:
44
+ raise ValueError(f"The array has {image_array.ndim} dimensions, but 3 is expected for an image.")
45
+
46
+ if image_array.shape[0] == 3:
47
+ # Transpose from pytorch convention (C, H, W) to (H, W, C)
48
+ image_array = image_array.transpose(1, 2, 0)
49
+
50
+ elif image_array.shape[-1] != 3:
51
+ raise NotImplementedError(
52
+ f"The image has {image_array.shape[-1]} channels, but 3 is required for now."
53
+ )
54
+
55
+ if image_array.dtype != np.uint8:
56
+ if range_check:
57
+ max_ = image_array.max().item()
58
+ min_ = image_array.min().item()
59
+ if max_ > 1.0 or min_ < 0.0:
60
+ raise ValueError(
61
+ "The image data type is float, which requires values in the range [0.0, 1.0]. "
62
+ f"However, the provided range is [{min_}, {max_}]. Please adjust the range or "
63
+ "provide a uint8 image with values in the range [0, 255]."
64
+ )
65
+
66
+ image_array = (image_array * 255).astype(np.uint8)
67
+
68
+ return PIL.Image.fromarray(image_array)
69
+
70
+
71
+ def write_image(image: np.ndarray | PIL.Image.Image, fpath: Path):
72
+ try:
73
+ if isinstance(image, np.ndarray):
74
+ img = image_array_to_pil_image(image)
75
+ elif isinstance(image, PIL.Image.Image):
76
+ img = image
77
+ else:
78
+ raise TypeError(f"Unsupported image type: {type(image)}")
79
+ img.save(fpath)
80
+ except Exception as e:
81
+ print(f"Error writing image {fpath}: {e}")
82
+
83
+
84
+ def worker_thread_loop(queue: queue.Queue):
85
+ while True:
86
+ item = queue.get()
87
+ if item is None:
88
+ queue.task_done()
89
+ break
90
+ image_array, fpath = item
91
+ write_image(image_array, fpath)
92
+ queue.task_done()
93
+
94
+
95
+ def worker_process(queue: queue.Queue, num_threads: int):
96
+ threads = []
97
+ for _ in range(num_threads):
98
+ t = threading.Thread(target=worker_thread_loop, args=(queue,))
99
+ t.daemon = True
100
+ t.start()
101
+ threads.append(t)
102
+ for t in threads:
103
+ t.join()
104
+
105
+
106
+ class AsyncImageWriter:
107
+ """
108
+ This class abstract away the initialisation of processes or/and threads to
109
+ save images on disk asynchrounously, which is critical to control a robot and record data
110
+ at a high frame rate.
111
+
112
+ When `num_processes=0`, it creates a threads pool of size `num_threads`.
113
+ When `num_processes>0`, it creates processes pool of size `num_processes`, where each subprocess starts
114
+ their own threads pool of size `num_threads`.
115
+
116
+ The optimal number of processes and threads depends on your computer capabilities.
117
+ We advise to use 4 threads per camera with 0 processes. If the fps is not stable, try to increase or lower
118
+ the number of threads. If it is still not stable, try to use 1 subprocess, or more.
119
+ """
120
+
121
+ def __init__(self, num_processes: int = 0, num_threads: int = 1):
122
+ self.num_processes = num_processes
123
+ self.num_threads = num_threads
124
+ self.queue = None
125
+ self.threads = []
126
+ self.processes = []
127
+ self._stopped = False
128
+
129
+ if num_threads <= 0 and num_processes <= 0:
130
+ raise ValueError("Number of threads and processes must be greater than zero.")
131
+
132
+ if self.num_processes == 0:
133
+ # Use threading
134
+ self.queue = queue.Queue()
135
+ for _ in range(self.num_threads):
136
+ t = threading.Thread(target=worker_thread_loop, args=(self.queue,))
137
+ t.daemon = True
138
+ t.start()
139
+ self.threads.append(t)
140
+ else:
141
+ # Use multiprocessing
142
+ self.queue = multiprocessing.JoinableQueue()
143
+ for _ in range(self.num_processes):
144
+ p = multiprocessing.Process(target=worker_process, args=(self.queue, self.num_threads))
145
+ p.daemon = True
146
+ p.start()
147
+ self.processes.append(p)
148
+
149
+ def save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path):
150
+ if isinstance(image, torch.Tensor):
151
+ # Convert tensor to numpy array to minimize main process time
152
+ image = image.cpu().numpy()
153
+ self.queue.put((image, fpath))
154
+
155
+ def wait_until_done(self):
156
+ self.queue.join()
157
+
158
+ def stop(self):
159
+ if self._stopped:
160
+ return
161
+
162
+ if self.num_processes == 0:
163
+ for _ in self.threads:
164
+ self.queue.put(None)
165
+ for t in self.threads:
166
+ t.join()
167
+ else:
168
+ num_nones = self.num_processes * self.num_threads
169
+ for _ in range(num_nones):
170
+ self.queue.put(None)
171
+ for p in self.processes:
172
+ p.join()
173
+ if p.is_alive():
174
+ p.terminate()
175
+ self.queue.close()
176
+ self.queue.join_thread()
177
+
178
+ self._stopped = True
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/lerobot_dataset.py ADDED
@@ -0,0 +1,1217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import contextlib
17
+ import logging
18
+ import shutil
19
+ from pathlib import Path
20
+ from typing import Callable
21
+
22
+ import datasets
23
+ import numpy as np
24
+ import packaging.version
25
+ import PIL.Image
26
+ import torch
27
+ import torch.utils
28
+ from datasets import concatenate_datasets, load_dataset
29
+ from huggingface_hub import HfApi, snapshot_download
30
+ from huggingface_hub.constants import REPOCARD_NAME
31
+ from huggingface_hub.errors import RevisionNotFoundError
32
+
33
+ from lerobot.common.constants import HF_LEROBOT_HOME
34
+ from lerobot.common.datasets.compute_stats import aggregate_stats, compute_episode_stats
35
+ from lerobot.common.datasets.image_writer import AsyncImageWriter, write_image
36
+ from lerobot.common.datasets.utils import (
37
+ DEFAULT_FEATURES,
38
+ DEFAULT_IMAGE_PATH,
39
+ INFO_PATH,
40
+ TASKS_PATH,
41
+ append_jsonlines,
42
+ backward_compatible_episodes_stats,
43
+ check_delta_timestamps,
44
+ check_timestamps_sync,
45
+ check_version_compatibility,
46
+ create_empty_dataset_info,
47
+ create_lerobot_dataset_card,
48
+ embed_images,
49
+ get_delta_indices,
50
+ get_episode_data_index,
51
+ get_features_from_robot,
52
+ get_hf_features_from_features,
53
+ get_safe_version,
54
+ hf_transform_to_torch,
55
+ is_valid_version,
56
+ load_episodes,
57
+ load_episodes_stats,
58
+ load_info,
59
+ load_stats,
60
+ load_tasks,
61
+ validate_episode_buffer,
62
+ validate_frame,
63
+ write_episode,
64
+ write_episode_stats,
65
+ write_info,
66
+ write_json,
67
+ )
68
+ from lerobot.common.datasets.video_utils import (
69
+ VideoFrame,
70
+ decode_video_frames,
71
+ encode_video_frames,
72
+ get_safe_default_codec,
73
+ get_video_info,
74
+ )
75
+ from lerobot.common.robot_devices.robots.utils import Robot
76
+
77
+ CODEBASE_VERSION = "v2.1"
78
+
79
+
80
+ class LeRobotDatasetMetadata:
81
+ def __init__(
82
+ self,
83
+ repo_id: str,
84
+ root: str | Path | None = None,
85
+ revision: str | None = None,
86
+ force_cache_sync: bool = False,
87
+ ):
88
+ self.repo_id = repo_id
89
+ self.revision = revision if revision else CODEBASE_VERSION
90
+ self.root = Path(root) if root is not None else HF_LEROBOT_HOME / repo_id
91
+
92
+ try:
93
+ if force_cache_sync:
94
+ raise FileNotFoundError
95
+ self.load_metadata()
96
+ except (FileNotFoundError, NotADirectoryError):
97
+ if is_valid_version(self.revision):
98
+ self.revision = get_safe_version(self.repo_id, self.revision)
99
+
100
+ (self.root / "meta").mkdir(exist_ok=True, parents=True)
101
+ self.pull_from_repo(allow_patterns="meta/")
102
+ self.load_metadata()
103
+
104
+ def load_metadata(self):
105
+ self.info = load_info(self.root)
106
+ check_version_compatibility(self.repo_id, self._version, CODEBASE_VERSION)
107
+ self.tasks, self.task_to_task_index = load_tasks(self.root)
108
+ self.episodes = load_episodes(self.root)
109
+ if self._version < packaging.version.parse("v2.1"):
110
+ self.stats = load_stats(self.root)
111
+ self.episodes_stats = backward_compatible_episodes_stats(self.stats, self.episodes)
112
+ else:
113
+ self.episodes_stats = load_episodes_stats(self.root)
114
+ self.stats = aggregate_stats(list(self.episodes_stats.values()))
115
+
116
+ def pull_from_repo(
117
+ self,
118
+ allow_patterns: list[str] | str | None = None,
119
+ ignore_patterns: list[str] | str | None = None,
120
+ ) -> None:
121
+ snapshot_download(
122
+ self.repo_id,
123
+ repo_type="dataset",
124
+ revision=self.revision,
125
+ local_dir=self.root,
126
+ allow_patterns=allow_patterns,
127
+ ignore_patterns=ignore_patterns,
128
+ )
129
+
130
+ @property
131
+ def _version(self) -> packaging.version.Version:
132
+ """Codebase version used to create this dataset."""
133
+ return packaging.version.parse(self.info["codebase_version"])
134
+
135
+ def get_data_file_path(self, ep_index: int) -> Path:
136
+ ep_chunk = self.get_episode_chunk(ep_index)
137
+ fpath = self.data_path.format(episode_chunk=ep_chunk, episode_index=ep_index)
138
+ return Path(fpath)
139
+
140
+ def get_video_file_path(self, ep_index: int, vid_key: str) -> Path:
141
+ ep_chunk = self.get_episode_chunk(ep_index)
142
+ fpath = self.video_path.format(episode_chunk=ep_chunk, video_key=vid_key, episode_index=ep_index)
143
+ return Path(fpath)
144
+
145
+ def get_episode_chunk(self, ep_index: int) -> int:
146
+ return ep_index // self.chunks_size
147
+
148
+ @property
149
+ def data_path(self) -> str:
150
+ """Formattable string for the parquet files."""
151
+ return self.info["data_path"]
152
+
153
+ @property
154
+ def video_path(self) -> str | None:
155
+ """Formattable string for the video files."""
156
+ return self.info["video_path"]
157
+
158
+ @property
159
+ def robot_type(self) -> str | None:
160
+ """Robot type used in recording this dataset."""
161
+ return self.info["robot_type"]
162
+
163
+ @property
164
+ def fps(self) -> int:
165
+ """Frames per second used during data collection."""
166
+ return self.info["fps"]
167
+
168
+ @property
169
+ def features(self) -> dict[str, dict]:
170
+ """All features contained in the dataset."""
171
+ return self.info["features"]
172
+
173
+ @property
174
+ def image_keys(self) -> list[str]:
175
+ """Keys to access visual modalities stored as images."""
176
+ return [key for key, ft in self.features.items() if ft["dtype"] == "image"]
177
+
178
+ @property
179
+ def video_keys(self) -> list[str]:
180
+ """Keys to access visual modalities stored as videos."""
181
+ return [key for key, ft in self.features.items() if ft["dtype"] == "video"]
182
+
183
+ @property
184
+ def camera_keys(self) -> list[str]:
185
+ """Keys to access visual modalities (regardless of their storage method)."""
186
+ return [key for key, ft in self.features.items() if ft["dtype"] in ["video", "image"]]
187
+
188
+ @property
189
+ def names(self) -> dict[str, list | dict]:
190
+ """Names of the various dimensions of vector modalities."""
191
+ return {key: ft["names"] for key, ft in self.features.items()}
192
+
193
+ @property
194
+ def shapes(self) -> dict:
195
+ """Shapes for the different features."""
196
+ return {key: tuple(ft["shape"]) for key, ft in self.features.items()}
197
+
198
+ @property
199
+ def total_episodes(self) -> int:
200
+ """Total number of episodes available."""
201
+ return self.info["total_episodes"]
202
+
203
+ @property
204
+ def total_frames(self) -> int:
205
+ """Total number of frames saved in this dataset."""
206
+ return self.info["total_frames"]
207
+
208
+ @property
209
+ def total_tasks(self) -> int:
210
+ """Total number of different tasks performed in this dataset."""
211
+ return self.info["total_tasks"]
212
+
213
+ @property
214
+ def total_chunks(self) -> int:
215
+ """Total number of chunks (groups of episodes)."""
216
+ return self.info["total_chunks"]
217
+
218
+ @property
219
+ def chunks_size(self) -> int:
220
+ """Max number of episodes per chunk."""
221
+ return self.info["chunks_size"]
222
+
223
+ def get_task_index(self, task: str) -> int | None:
224
+ """
225
+ Given a task in natural language, returns its task_index if the task already exists in the dataset,
226
+ otherwise return None.
227
+ """
228
+ return self.task_to_task_index.get(task, None)
229
+
230
+ def add_task(self, task: str):
231
+ """
232
+ Given a task in natural language, add it to the dictionary of tasks.
233
+ """
234
+ if task in self.task_to_task_index:
235
+ raise ValueError(f"The task '{task}' already exists and can't be added twice.")
236
+
237
+ task_index = self.info["total_tasks"]
238
+ self.task_to_task_index[task] = task_index
239
+ self.tasks[task_index] = task
240
+ self.info["total_tasks"] += 1
241
+
242
+ task_dict = {
243
+ "task_index": task_index,
244
+ "task": task,
245
+ }
246
+ append_jsonlines(task_dict, self.root / TASKS_PATH)
247
+
248
+ def save_episode(
249
+ self,
250
+ episode_index: int,
251
+ episode_length: int,
252
+ episode_tasks: list[str],
253
+ episode_stats: dict[str, dict],
254
+ ) -> None:
255
+ self.info["total_episodes"] += 1
256
+ self.info["total_frames"] += episode_length
257
+
258
+ chunk = self.get_episode_chunk(episode_index)
259
+ if chunk >= self.total_chunks:
260
+ self.info["total_chunks"] += 1
261
+
262
+ self.info["splits"] = {"train": f"0:{self.info['total_episodes']}"}
263
+ self.info["total_videos"] += len(self.video_keys)
264
+ if len(self.video_keys) > 0:
265
+ self.update_video_info()
266
+
267
+ write_info(self.info, self.root)
268
+
269
+ episode_dict = {
270
+ "episode_index": episode_index,
271
+ "tasks": episode_tasks,
272
+ "length": episode_length,
273
+ }
274
+ self.episodes[episode_index] = episode_dict
275
+ write_episode(episode_dict, self.root)
276
+
277
+ self.episodes_stats[episode_index] = episode_stats
278
+ self.stats = aggregate_stats([self.stats, episode_stats]) if self.stats else episode_stats
279
+ write_episode_stats(episode_index, episode_stats, self.root)
280
+
281
+ def update_video_info(self) -> None:
282
+ """
283
+ Warning: this function writes info from first episode videos, implicitly assuming that all videos have
284
+ been encoded the same way. Also, this means it assumes the first episode exists.
285
+ """
286
+ for key in self.video_keys:
287
+ if not self.features[key].get("info", None):
288
+ video_path = self.root / self.get_video_file_path(ep_index=0, vid_key=key)
289
+ self.info["features"][key]["info"] = get_video_info(video_path)
290
+
291
+ def __repr__(self):
292
+ feature_keys = list(self.features)
293
+ return (
294
+ f"{self.__class__.__name__}({{\n"
295
+ f" Repository ID: '{self.repo_id}',\n"
296
+ f" Total episodes: '{self.total_episodes}',\n"
297
+ f" Total frames: '{self.total_frames}',\n"
298
+ f" Features: '{feature_keys}',\n"
299
+ "})',\n"
300
+ )
301
+
302
+ @classmethod
303
+ def create(
304
+ cls,
305
+ repo_id: str,
306
+ fps: int,
307
+ root: str | Path | None = None,
308
+ robot: Robot | None = None,
309
+ robot_type: str | None = None,
310
+ features: dict | None = None,
311
+ use_videos: bool = True,
312
+ ) -> "LeRobotDatasetMetadata":
313
+ """Creates metadata for a LeRobotDataset."""
314
+ obj = cls.__new__(cls)
315
+ obj.repo_id = repo_id
316
+ obj.root = Path(root) if root is not None else HF_LEROBOT_HOME / repo_id
317
+
318
+ obj.root.mkdir(parents=True, exist_ok=False)
319
+
320
+ if robot is not None:
321
+ features = get_features_from_robot(robot, use_videos)
322
+ robot_type = robot.robot_type
323
+ if not all(cam.fps == fps for cam in robot.cameras.values()):
324
+ logging.warning(
325
+ f"Some cameras in your {robot.robot_type} robot don't have an fps matching the fps of your dataset."
326
+ "In this case, frames from lower fps cameras will be repeated to fill in the blanks."
327
+ )
328
+ elif features is None:
329
+ raise ValueError(
330
+ "Dataset features must either come from a Robot or explicitly passed upon creation."
331
+ )
332
+ else:
333
+ # TODO(aliberts, rcadene): implement sanity check for features
334
+ features = {**features, **DEFAULT_FEATURES}
335
+
336
+ # check if none of the features contains a "/" in their names,
337
+ # as this would break the dict flattening in the stats computation, which uses '/' as separator
338
+ for key in features:
339
+ if "/" in key:
340
+ raise ValueError(f"Feature names should not contain '/'. Found '/' in feature '{key}'.")
341
+
342
+ features = {**features, **DEFAULT_FEATURES}
343
+
344
+ obj.tasks, obj.task_to_task_index = {}, {}
345
+ obj.episodes_stats, obj.stats, obj.episodes = {}, {}, {}
346
+ obj.info = create_empty_dataset_info(CODEBASE_VERSION, fps, robot_type, features, use_videos)
347
+ if len(obj.video_keys) > 0 and not use_videos:
348
+ raise ValueError()
349
+ write_json(obj.info, obj.root / INFO_PATH)
350
+ obj.revision = None
351
+ return obj
352
+
353
+
354
+ class LeRobotDataset(torch.utils.data.Dataset):
355
+ def __init__(
356
+ self,
357
+ repo_id: str,
358
+ root: str | Path | None = None,
359
+ episodes: list[int] | None = None,
360
+ image_transforms: Callable | None = None,
361
+ delta_timestamps: dict[list[float]] | None = None,
362
+ tolerance_s: float = 1e-4,
363
+ revision: str | None = None,
364
+ force_cache_sync: bool = False,
365
+ download_videos: bool = True,
366
+ video_backend: str | None = None,
367
+ ):
368
+ """
369
+ 2 modes are available for instantiating this class, depending on 2 different use cases:
370
+
371
+ 1. Your dataset already exists:
372
+ - On your local disk in the 'root' folder. This is typically the case when you recorded your
373
+ dataset locally and you may or may not have pushed it to the hub yet. Instantiating this class
374
+ with 'root' will load your dataset directly from disk. This can happen while you're offline (no
375
+ internet connection).
376
+
377
+ - On the Hugging Face Hub at the address https://huggingface.co/datasets/{repo_id} and not on
378
+ your local disk in the 'root' folder. Instantiating this class with this 'repo_id' will download
379
+ the dataset from that address and load it, pending your dataset is compliant with
380
+ codebase_version v2.0. If your dataset has been created before this new format, you will be
381
+ prompted to convert it using our conversion script from v1.6 to v2.0, which you can find at
382
+ lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py.
383
+
384
+
385
+ 2. Your dataset doesn't already exists (either on local disk or on the Hub): you can create an empty
386
+ LeRobotDataset with the 'create' classmethod. This can be used for recording a dataset or port an
387
+ existing dataset to the LeRobotDataset format.
388
+
389
+
390
+ In terms of files, LeRobotDataset encapsulates 3 main things:
391
+ - metadata:
392
+ - info contains various information about the dataset like shapes, keys, fps etc.
393
+ - stats stores the dataset statistics of the different modalities for normalization
394
+ - tasks contains the prompts for each task of the dataset, which can be used for
395
+ task-conditioned training.
396
+ - hf_dataset (from datasets.Dataset), which will read any values from parquet files.
397
+ - videos (optional) from which frames are loaded to be synchronous with data from parquet files.
398
+
399
+ A typical LeRobotDataset looks like this from its root path:
400
+ .
401
+ ├── data
402
+ │ ├── chunk-000
403
+ │ │ ├── episode_000000.parquet
404
+ │ │ ├── episode_000001.parquet
405
+ │ │ ├── episode_000002.parquet
406
+ │ │ └── ...
407
+ │ ├── chunk-001
408
+ │ │ ├── episode_001000.parquet
409
+ │ │ ├── episode_001001.parquet
410
+ │ │ ├── episode_001002.parquet
411
+ │ │ └── ...
412
+ │ └── ...
413
+ ├── meta
414
+ │ ├── episodes.jsonl
415
+ │ ├── info.json
416
+ │ ├── stats.json
417
+ │ └── tasks.jsonl
418
+ └── videos
419
+ ├── chunk-000
420
+ │ ├── observation.images.laptop
421
+ │ │ ├── episode_000000.mp4
422
+ │ │ ├── episode_000001.mp4
423
+ │ │ ├── episode_000002.mp4
424
+ │ │ └── ...
425
+ │ ├── observation.images.phone
426
+ │ │ ├── episode_000000.mp4
427
+ │ │ ├── episode_000001.mp4
428
+ │ │ ├── episode_000002.mp4
429
+ │ │ └── ...
430
+ ├── chunk-001
431
+ └── ...
432
+
433
+ Note that this file-based structure is designed to be as versatile as possible. The files are split by
434
+ episodes which allows a more granular control over which episodes one wants to use and download. The
435
+ structure of the dataset is entirely described in the info.json file, which can be easily downloaded
436
+ or viewed directly on the hub before downloading any actual data. The type of files used are very
437
+ simple and do not need complex tools to be read, it only uses .parquet, .json and .mp4 files (and .md
438
+ for the README).
439
+
440
+ Args:
441
+ repo_id (str): This is the repo id that will be used to fetch the dataset. Locally, the dataset
442
+ will be stored under root/repo_id.
443
+ root (Path | None, optional): Local directory to use for downloading/writing files. You can also
444
+ set the LEROBOT_HOME environment variable to point to a different location. Defaults to
445
+ '~/.cache/huggingface/lerobot'.
446
+ episodes (list[int] | None, optional): If specified, this will only load episodes specified by
447
+ their episode_index in this list. Defaults to None.
448
+ image_transforms (Callable | None, optional): You can pass standard v2 image transforms from
449
+ torchvision.transforms.v2 here which will be applied to visual modalities (whether they come
450
+ from videos or images). Defaults to None.
451
+ delta_timestamps (dict[list[float]] | None, optional): _description_. Defaults to None.
452
+ tolerance_s (float, optional): Tolerance in seconds used to ensure data timestamps are actually in
453
+ sync with the fps value. It is used at the init of the dataset to make sure that each
454
+ timestamps is separated to the next by 1/fps +/- tolerance_s. This also applies to frames
455
+ decoded from video files. It is also used to check that `delta_timestamps` (when provided) are
456
+ multiples of 1/fps. Defaults to 1e-4.
457
+ revision (str, optional): An optional Git revision id which can be a branch name, a tag, or a
458
+ commit hash. Defaults to current codebase version tag.
459
+ sync_cache_first (bool, optional): Flag to sync and refresh local files first. If True and files
460
+ are already present in the local cache, this will be faster. However, files loaded might not
461
+ be in sync with the version on the hub, especially if you specified 'revision'. Defaults to
462
+ False.
463
+ download_videos (bool, optional): Flag to download the videos. Note that when set to True but the
464
+ video files are already present on local disk, they won't be downloaded again. Defaults to
465
+ True.
466
+ video_backend (str | None, optional): Video backend to use for decoding videos. Defaults to torchcodec when available int the platform; otherwise, defaults to 'pyav'.
467
+ You can also use the 'pyav' decoder used by Torchvision, which used to be the default option, or 'video_reader' which is another decoder of Torchvision.
468
+ """
469
+ super().__init__()
470
+ self.repo_id = repo_id
471
+ self.root = Path(root) if root else HF_LEROBOT_HOME / repo_id
472
+ self.image_transforms = image_transforms
473
+ self.delta_timestamps = delta_timestamps
474
+ self.episodes = episodes
475
+ self.tolerance_s = tolerance_s
476
+ self.revision = revision if revision else CODEBASE_VERSION
477
+ self.video_backend = video_backend if video_backend else get_safe_default_codec()
478
+ self.delta_indices = None
479
+
480
+ # Unused attributes
481
+ self.image_writer = None
482
+ self.episode_buffer = None
483
+
484
+ self.root.mkdir(exist_ok=True, parents=True)
485
+
486
+ # Load metadata
487
+ self.meta = LeRobotDatasetMetadata(
488
+ self.repo_id, self.root, self.revision, force_cache_sync=force_cache_sync
489
+ )
490
+ if self.episodes is not None and self.meta._version >= packaging.version.parse("v2.1"):
491
+ episodes_stats = [self.meta.episodes_stats[ep_idx] for ep_idx in self.episodes]
492
+ self.stats = aggregate_stats(episodes_stats)
493
+
494
+ # Load actual data
495
+ try:
496
+ if force_cache_sync:
497
+ raise FileNotFoundError
498
+ assert all((self.root / fpath).is_file() for fpath in self.get_episodes_file_paths())
499
+ self.hf_dataset = self.load_hf_dataset()
500
+ except (AssertionError, FileNotFoundError, NotADirectoryError):
501
+ self.revision = get_safe_version(self.repo_id, self.revision)
502
+ self.download_episodes(download_videos)
503
+ self.hf_dataset = self.load_hf_dataset()
504
+
505
+ self.episode_data_index = get_episode_data_index(self.meta.episodes, self.episodes)
506
+
507
+ # Check timestamps
508
+ timestamps = torch.stack(self.hf_dataset["timestamp"]).numpy()
509
+ episode_indices = torch.stack(self.hf_dataset["episode_index"]).numpy()
510
+ ep_data_index_np = {k: t.numpy() for k, t in self.episode_data_index.items()}
511
+ check_timestamps_sync(timestamps, episode_indices, ep_data_index_np, self.fps, self.tolerance_s)
512
+
513
+ # Setup delta_indices
514
+ if self.delta_timestamps is not None:
515
+ check_delta_timestamps(self.delta_timestamps, self.fps, self.tolerance_s)
516
+ self.delta_indices = get_delta_indices(self.delta_timestamps, self.fps)
517
+
518
+ def push_to_hub(
519
+ self,
520
+ branch: str | None = None,
521
+ tags: list | None = None,
522
+ license: str | None = "apache-2.0",
523
+ tag_version: bool = True,
524
+ push_videos: bool = True,
525
+ private: bool = False,
526
+ allow_patterns: list[str] | str | None = None,
527
+ upload_large_folder: bool = False,
528
+ **card_kwargs,
529
+ ) -> None:
530
+ ignore_patterns = ["images/"]
531
+ if not push_videos:
532
+ ignore_patterns.append("videos/")
533
+
534
+ hub_api = HfApi()
535
+ hub_api.create_repo(
536
+ repo_id=self.repo_id,
537
+ private=private,
538
+ repo_type="dataset",
539
+ exist_ok=True,
540
+ )
541
+ if branch:
542
+ hub_api.create_branch(
543
+ repo_id=self.repo_id,
544
+ branch=branch,
545
+ revision=self.revision,
546
+ repo_type="dataset",
547
+ exist_ok=True,
548
+ )
549
+
550
+ upload_kwargs = {
551
+ "repo_id": self.repo_id,
552
+ "folder_path": self.root,
553
+ "repo_type": "dataset",
554
+ "revision": branch,
555
+ "allow_patterns": allow_patterns,
556
+ "ignore_patterns": ignore_patterns,
557
+ }
558
+ if upload_large_folder:
559
+ hub_api.upload_large_folder(**upload_kwargs)
560
+ else:
561
+ hub_api.upload_folder(**upload_kwargs)
562
+
563
+ if not hub_api.file_exists(self.repo_id, REPOCARD_NAME, repo_type="dataset", revision=branch):
564
+ card = create_lerobot_dataset_card(
565
+ tags=tags, dataset_info=self.meta.info, license=license, **card_kwargs
566
+ )
567
+ card.push_to_hub(repo_id=self.repo_id, repo_type="dataset", revision=branch)
568
+
569
+ if tag_version:
570
+ with contextlib.suppress(RevisionNotFoundError):
571
+ hub_api.delete_tag(self.repo_id, tag=CODEBASE_VERSION, repo_type="dataset")
572
+ hub_api.create_tag(self.repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
573
+
574
+ def pull_from_repo(
575
+ self,
576
+ allow_patterns: list[str] | str | None = None,
577
+ ignore_patterns: list[str] | str | None = None,
578
+ ) -> None:
579
+ snapshot_download(
580
+ self.repo_id,
581
+ repo_type="dataset",
582
+ revision=self.revision,
583
+ local_dir=self.root,
584
+ allow_patterns=allow_patterns,
585
+ ignore_patterns=ignore_patterns,
586
+ )
587
+
588
+ def download_episodes(self, download_videos: bool = True) -> None:
589
+ """Downloads the dataset from the given 'repo_id' at the provided version. If 'episodes' is given, this
590
+ will only download those episodes (selected by their episode_index). If 'episodes' is None, the whole
591
+ dataset will be downloaded. Thanks to the behavior of snapshot_download, if the files are already present
592
+ in 'local_dir', they won't be downloaded again.
593
+ """
594
+ # TODO(rcadene, aliberts): implement faster transfer
595
+ # https://huggingface.co/docs/huggingface_hub/en/guides/download#faster-downloads
596
+ files = None
597
+ ignore_patterns = None if download_videos else "videos/"
598
+ if self.episodes is not None:
599
+ files = self.get_episodes_file_paths()
600
+
601
+ self.pull_from_repo(allow_patterns=files, ignore_patterns=ignore_patterns)
602
+
603
+ def get_episodes_file_paths(self) -> list[Path]:
604
+ episodes = self.episodes if self.episodes is not None else list(range(self.meta.total_episodes))
605
+ fpaths = [str(self.meta.get_data_file_path(ep_idx)) for ep_idx in episodes]
606
+ if len(self.meta.video_keys) > 0:
607
+ video_files = [
608
+ str(self.meta.get_video_file_path(ep_idx, vid_key))
609
+ for vid_key in self.meta.video_keys
610
+ for ep_idx in episodes
611
+ ]
612
+ fpaths += video_files
613
+
614
+ return fpaths
615
+
616
+ def load_hf_dataset(self) -> datasets.Dataset:
617
+ """hf_dataset contains all the observations, states, actions, rewards, etc."""
618
+ if self.episodes is None:
619
+ path = str(self.root / "data")
620
+ hf_dataset = load_dataset("parquet", data_dir=path, split="train")
621
+ else:
622
+ files = [str(self.root / self.meta.get_data_file_path(ep_idx)) for ep_idx in self.episodes]
623
+ hf_dataset = load_dataset("parquet", data_files=files, split="train")
624
+
625
+ # TODO(aliberts): hf_dataset.set_format("torch")
626
+ hf_dataset.set_transform(hf_transform_to_torch)
627
+ return hf_dataset
628
+
629
+ def create_hf_dataset(self) -> datasets.Dataset:
630
+ features = get_hf_features_from_features(self.features)
631
+ ft_dict = {col: [] for col in features}
632
+ hf_dataset = datasets.Dataset.from_dict(ft_dict, features=features, split="train")
633
+
634
+ # TODO(aliberts): hf_dataset.set_format("torch")
635
+ hf_dataset.set_transform(hf_transform_to_torch)
636
+ return hf_dataset
637
+
638
+ @property
639
+ def fps(self) -> int:
640
+ """Frames per second used during data collection."""
641
+ return self.meta.fps
642
+
643
+ @property
644
+ def num_frames(self) -> int:
645
+ """Number of frames in selected episodes."""
646
+ return len(self.hf_dataset) if self.hf_dataset is not None else self.meta.total_frames
647
+
648
+ @property
649
+ def num_episodes(self) -> int:
650
+ """Number of episodes selected."""
651
+ return len(self.episodes) if self.episodes is not None else self.meta.total_episodes
652
+
653
+ @property
654
+ def features(self) -> dict[str, dict]:
655
+ return self.meta.features
656
+
657
+ @property
658
+ def hf_features(self) -> datasets.Features:
659
+ """Features of the hf_dataset."""
660
+ if self.hf_dataset is not None:
661
+ return self.hf_dataset.features
662
+ else:
663
+ return get_hf_features_from_features(self.features)
664
+
665
+ def _get_query_indices(self, idx: int, ep_idx: int) -> tuple[dict[str, list[int | bool]]]:
666
+ ep_start = self.episode_data_index["from"][ep_idx]
667
+ ep_end = self.episode_data_index["to"][ep_idx]
668
+ query_indices = {
669
+ key: [max(ep_start.item(), min(ep_end.item() - 1, idx + delta)) for delta in delta_idx]
670
+ for key, delta_idx in self.delta_indices.items()
671
+ }
672
+ padding = { # Pad values outside of current episode range
673
+ f"{key}_is_pad": torch.BoolTensor(
674
+ [(idx + delta < ep_start.item()) | (idx + delta >= ep_end.item()) for delta in delta_idx]
675
+ )
676
+ for key, delta_idx in self.delta_indices.items()
677
+ }
678
+ return query_indices, padding
679
+
680
+ def _get_query_timestamps(
681
+ self,
682
+ current_ts: float,
683
+ query_indices: dict[str, list[int]] | None = None,
684
+ ) -> dict[str, list[float]]:
685
+ query_timestamps = {}
686
+ for key in self.meta.video_keys:
687
+ if query_indices is not None and key in query_indices:
688
+ timestamps = self.hf_dataset.select(query_indices[key])["timestamp"]
689
+ query_timestamps[key] = torch.stack(timestamps).tolist()
690
+ else:
691
+ query_timestamps[key] = [current_ts]
692
+
693
+ return query_timestamps
694
+
695
+ def _query_hf_dataset(self, query_indices: dict[str, list[int]]) -> dict:
696
+ return {
697
+ key: torch.stack(self.hf_dataset.select(q_idx)[key])
698
+ for key, q_idx in query_indices.items()
699
+ if key not in self.meta.video_keys
700
+ }
701
+
702
+ def _query_videos(self, query_timestamps: dict[str, list[float]], ep_idx: int) -> dict[str, torch.Tensor]:
703
+ """Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function
704
+ in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a
705
+ Segmentation Fault. This probably happens because a memory reference to the video loader is created in
706
+ the main process and a subprocess fails to access it.
707
+ """
708
+ item = {}
709
+ for vid_key, query_ts in query_timestamps.items():
710
+ video_path = self.root / self.meta.get_video_file_path(ep_idx, vid_key)
711
+ frames = decode_video_frames(video_path, query_ts, self.tolerance_s, self.video_backend)
712
+ item[vid_key] = frames.squeeze(0)
713
+
714
+ return item
715
+
716
+ def _add_padding_keys(self, item: dict, padding: dict[str, list[bool]]) -> dict:
717
+ for key, val in padding.items():
718
+ item[key] = torch.BoolTensor(val)
719
+ return item
720
+
721
+ def __len__(self):
722
+ return self.num_frames
723
+
724
+ def __getitem__(self, idx) -> dict:
725
+ item = self.hf_dataset[idx]
726
+ ep_idx = item["episode_index"].item()
727
+
728
+ query_indices = None
729
+ if self.delta_indices is not None:
730
+ query_indices, padding = self._get_query_indices(idx, ep_idx)
731
+ query_result = self._query_hf_dataset(query_indices)
732
+ item = {**item, **padding}
733
+ for key, val in query_result.items():
734
+ item[key] = val
735
+
736
+ if len(self.meta.video_keys) > 0:
737
+ current_ts = item["timestamp"].item()
738
+ query_timestamps = self._get_query_timestamps(current_ts, query_indices)
739
+ video_frames = self._query_videos(query_timestamps, ep_idx)
740
+ item = {**video_frames, **item}
741
+
742
+ if self.image_transforms is not None:
743
+ image_keys = self.meta.camera_keys
744
+ for cam in image_keys:
745
+ item[cam] = self.image_transforms(item[cam])
746
+
747
+ # Add task as a string
748
+ task_idx = item["task_index"].item()
749
+ item["task"] = self.meta.tasks[task_idx]
750
+
751
+ return item
752
+
753
+ def __repr__(self):
754
+ feature_keys = list(self.features)
755
+ return (
756
+ f"{self.__class__.__name__}({{\n"
757
+ f" Repository ID: '{self.repo_id}',\n"
758
+ f" Number of selected episodes: '{self.num_episodes}',\n"
759
+ f" Number of selected samples: '{self.num_frames}',\n"
760
+ f" Features: '{feature_keys}',\n"
761
+ "})',\n"
762
+ )
763
+
764
+ def create_episode_buffer(self, episode_index: int | None = None) -> dict:
765
+ current_ep_idx = self.meta.total_episodes if episode_index is None else episode_index
766
+ ep_buffer = {}
767
+ # size and task are special cases that are not in self.features
768
+ ep_buffer["size"] = 0
769
+ ep_buffer["task"] = []
770
+ for key in self.features:
771
+ ep_buffer[key] = current_ep_idx if key == "episode_index" else []
772
+ return ep_buffer
773
+
774
+ def _get_image_file_path(self, episode_index: int, image_key: str, frame_index: int) -> Path:
775
+ fpath = DEFAULT_IMAGE_PATH.format(
776
+ image_key=image_key, episode_index=episode_index, frame_index=frame_index
777
+ )
778
+ return self.root / fpath
779
+
780
+ def _save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path) -> None:
781
+ if self.image_writer is None:
782
+ if isinstance(image, torch.Tensor):
783
+ image = image.cpu().numpy()
784
+ write_image(image, fpath)
785
+ else:
786
+ self.image_writer.save_image(image=image, fpath=fpath)
787
+
788
+ def add_frame(self, frame: dict) -> None:
789
+ """
790
+ This function only adds the frame to the episode_buffer. Apart from images — which are written in a
791
+ temporary directory — nothing is written to disk. To save those frames, the 'save_episode()' method
792
+ then needs to be called.
793
+ """
794
+ # Convert torch to numpy if needed
795
+ for name in frame:
796
+ if isinstance(frame[name], torch.Tensor):
797
+ frame[name] = frame[name].numpy()
798
+
799
+ validate_frame(frame, self.features)
800
+
801
+ if self.episode_buffer is None:
802
+ self.episode_buffer = self.create_episode_buffer()
803
+
804
+ # Automatically add frame_index and timestamp to episode buffer
805
+ frame_index = self.episode_buffer["size"]
806
+ timestamp = frame.pop("timestamp") if "timestamp" in frame else frame_index / self.fps
807
+ self.episode_buffer["frame_index"].append(frame_index)
808
+ self.episode_buffer["timestamp"].append(timestamp)
809
+
810
+ # Add frame features to episode_buffer
811
+ for key in frame:
812
+ if key == "task":
813
+ # Note: we associate the task in natural language to its task index during `save_episode`
814
+ self.episode_buffer["task"].append(frame["task"])
815
+ continue
816
+
817
+ if key not in self.features:
818
+ raise ValueError(
819
+ f"An element of the frame is not in the features. '{key}' not in '{self.features.keys()}'."
820
+ )
821
+
822
+ if self.features[key]["dtype"] in ["image", "video"]:
823
+ img_path = self._get_image_file_path(
824
+ episode_index=self.episode_buffer["episode_index"], image_key=key, frame_index=frame_index
825
+ )
826
+ if frame_index == 0:
827
+ img_path.parent.mkdir(parents=True, exist_ok=True)
828
+ self._save_image(frame[key], img_path)
829
+ self.episode_buffer[key].append(str(img_path))
830
+ else:
831
+ self.episode_buffer[key].append(frame[key])
832
+
833
+ self.episode_buffer["size"] += 1
834
+
835
+ def save_episode(self, episode_data: dict | None = None) -> None:
836
+ """
837
+ This will save to disk the current episode in self.episode_buffer.
838
+
839
+ Args:
840
+ episode_data (dict | None, optional): Dict containing the episode data to save. If None, this will
841
+ save the current episode in self.episode_buffer, which is filled with 'add_frame'. Defaults to
842
+ None.
843
+ """
844
+ if not episode_data:
845
+ episode_buffer = self.episode_buffer
846
+
847
+ validate_episode_buffer(episode_buffer, self.meta.total_episodes, self.features)
848
+
849
+ # size and task are special cases that won't be added to hf_dataset
850
+ episode_length = episode_buffer.pop("size")
851
+ tasks = episode_buffer.pop("task")
852
+ episode_tasks = list(set(tasks))
853
+ episode_index = episode_buffer["episode_index"]
854
+
855
+ episode_buffer["index"] = np.arange(self.meta.total_frames, self.meta.total_frames + episode_length)
856
+ episode_buffer["episode_index"] = np.full((episode_length,), episode_index)
857
+
858
+ # Add new tasks to the tasks dictionary
859
+ for task in episode_tasks:
860
+ task_index = self.meta.get_task_index(task)
861
+ if task_index is None:
862
+ self.meta.add_task(task)
863
+
864
+ # Given tasks in natural language, find their corresponding task indices
865
+ episode_buffer["task_index"] = np.array([self.meta.get_task_index(task) for task in tasks])
866
+
867
+ for key, ft in self.features.items():
868
+ # index, episode_index, task_index are already processed above, and image and video
869
+ # are processed separately by storing image path and frame info as meta data
870
+ if key in ["index", "episode_index", "task_index"] or ft["dtype"] in ["image", "video"]:
871
+ continue
872
+ episode_buffer[key] = np.stack(episode_buffer[key])
873
+
874
+ self._wait_image_writer()
875
+ self._save_episode_table(episode_buffer, episode_index)
876
+ ep_stats = compute_episode_stats(episode_buffer, self.features)
877
+
878
+ if len(self.meta.video_keys) > 0:
879
+ video_paths = self.encode_episode_videos(episode_index)
880
+ for key in self.meta.video_keys:
881
+ episode_buffer[key] = video_paths[key]
882
+
883
+ # `meta.save_episode` be executed after encoding the videos
884
+ self.meta.save_episode(episode_index, episode_length, episode_tasks, ep_stats)
885
+
886
+ ep_data_index = get_episode_data_index(self.meta.episodes, [episode_index])
887
+ ep_data_index_np = {k: t.numpy() for k, t in ep_data_index.items()}
888
+ check_timestamps_sync(
889
+ episode_buffer["timestamp"],
890
+ episode_buffer["episode_index"],
891
+ ep_data_index_np,
892
+ self.fps,
893
+ self.tolerance_s,
894
+ )
895
+
896
+ video_files = list(self.root.rglob("*.mp4"))
897
+ assert len(video_files) == self.num_episodes * len(self.meta.video_keys)
898
+
899
+ parquet_files = list(self.root.rglob("*.parquet"))
900
+ assert len(parquet_files) == self.num_episodes
901
+
902
+ # delete images
903
+ img_dir = self.root / "images"
904
+ if img_dir.is_dir():
905
+ shutil.rmtree(self.root / "images")
906
+
907
+ if not episode_data: # Reset the buffer
908
+ self.episode_buffer = self.create_episode_buffer()
909
+
910
+ def _save_episode_table(self, episode_buffer: dict, episode_index: int) -> None:
911
+ episode_dict = {key: episode_buffer[key] for key in self.hf_features}
912
+ ep_dataset = datasets.Dataset.from_dict(episode_dict, features=self.hf_features, split="train")
913
+ ep_dataset = embed_images(ep_dataset)
914
+ self.hf_dataset = concatenate_datasets([self.hf_dataset, ep_dataset])
915
+ self.hf_dataset.set_transform(hf_transform_to_torch)
916
+ ep_data_path = self.root / self.meta.get_data_file_path(ep_index=episode_index)
917
+ ep_data_path.parent.mkdir(parents=True, exist_ok=True)
918
+ ep_dataset.to_parquet(ep_data_path)
919
+
920
+ def clear_episode_buffer(self) -> None:
921
+ episode_index = self.episode_buffer["episode_index"]
922
+ if self.image_writer is not None:
923
+ for cam_key in self.meta.camera_keys:
924
+ img_dir = self._get_image_file_path(
925
+ episode_index=episode_index, image_key=cam_key, frame_index=0
926
+ ).parent
927
+ if img_dir.is_dir():
928
+ shutil.rmtree(img_dir)
929
+
930
+ # Reset the buffer
931
+ self.episode_buffer = self.create_episode_buffer()
932
+
933
+ def start_image_writer(self, num_processes: int = 0, num_threads: int = 4) -> None:
934
+ if isinstance(self.image_writer, AsyncImageWriter):
935
+ logging.warning(
936
+ "You are starting a new AsyncImageWriter that is replacing an already existing one in the dataset."
937
+ )
938
+
939
+ self.image_writer = AsyncImageWriter(
940
+ num_processes=num_processes,
941
+ num_threads=num_threads,
942
+ )
943
+
944
+ def stop_image_writer(self) -> None:
945
+ """
946
+ Whenever wrapping this dataset inside a parallelized DataLoader, this needs to be called first to
947
+ remove the image_writer in order for the LeRobotDataset object to be pickleable and parallelized.
948
+ """
949
+ if self.image_writer is not None:
950
+ self.image_writer.stop()
951
+ self.image_writer = None
952
+
953
+ def _wait_image_writer(self) -> None:
954
+ """Wait for asynchronous image writer to finish."""
955
+ if self.image_writer is not None:
956
+ self.image_writer.wait_until_done()
957
+
958
+ def encode_videos(self) -> None:
959
+ """
960
+ Use ffmpeg to convert frames stored as png into mp4 videos.
961
+ Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding,
962
+ since video encoding with ffmpeg is already using multithreading.
963
+ """
964
+ for ep_idx in range(self.meta.total_episodes):
965
+ self.encode_episode_videos(ep_idx)
966
+
967
+ def encode_episode_videos(self, episode_index: int) -> dict:
968
+ """
969
+ Use ffmpeg to convert frames stored as png into mp4 videos.
970
+ Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding,
971
+ since video encoding with ffmpeg is already using multithreading.
972
+ """
973
+ video_paths = {}
974
+ for key in self.meta.video_keys:
975
+ video_path = self.root / self.meta.get_video_file_path(episode_index, key)
976
+ video_paths[key] = str(video_path)
977
+ if video_path.is_file():
978
+ # Skip if video is already encoded. Could be the case when resuming data recording.
979
+ continue
980
+ img_dir = self._get_image_file_path(
981
+ episode_index=episode_index, image_key=key, frame_index=0
982
+ ).parent
983
+ encode_video_frames(img_dir, video_path, self.fps, overwrite=True)
984
+
985
+ return video_paths
986
+
987
+ @classmethod
988
+ def create(
989
+ cls,
990
+ repo_id: str,
991
+ fps: int,
992
+ root: str | Path | None = None,
993
+ robot: Robot | None = None,
994
+ robot_type: str | None = None,
995
+ features: dict | None = None,
996
+ use_videos: bool = True,
997
+ tolerance_s: float = 1e-4,
998
+ image_writer_processes: int = 0,
999
+ image_writer_threads: int = 0,
1000
+ video_backend: str | None = None,
1001
+ ) -> "LeRobotDataset":
1002
+ """Create a LeRobot Dataset from scratch in order to record data."""
1003
+ obj = cls.__new__(cls)
1004
+ obj.meta = LeRobotDatasetMetadata.create(
1005
+ repo_id=repo_id,
1006
+ fps=fps,
1007
+ root=root,
1008
+ robot=robot,
1009
+ robot_type=robot_type,
1010
+ features=features,
1011
+ use_videos=use_videos,
1012
+ )
1013
+ obj.repo_id = obj.meta.repo_id
1014
+ obj.root = obj.meta.root
1015
+ obj.revision = None
1016
+ obj.tolerance_s = tolerance_s
1017
+ obj.image_writer = None
1018
+
1019
+ if image_writer_processes or image_writer_threads:
1020
+ obj.start_image_writer(image_writer_processes, image_writer_threads)
1021
+
1022
+ # TODO(aliberts, rcadene, alexander-soare): Merge this with OnlineBuffer/DataBuffer
1023
+ obj.episode_buffer = obj.create_episode_buffer()
1024
+
1025
+ obj.episodes = None
1026
+ obj.hf_dataset = obj.create_hf_dataset()
1027
+ obj.image_transforms = None
1028
+ obj.delta_timestamps = None
1029
+ obj.delta_indices = None
1030
+ obj.episode_data_index = None
1031
+ obj.video_backend = video_backend if video_backend is not None else get_safe_default_codec()
1032
+ return obj
1033
+
1034
+
1035
+ class MultiLeRobotDataset(torch.utils.data.Dataset):
1036
+ """A dataset consisting of multiple underlying `LeRobotDataset`s.
1037
+
1038
+ The underlying `LeRobotDataset`s are effectively concatenated, and this class adopts much of the API
1039
+ structure of `LeRobotDataset`.
1040
+ """
1041
+
1042
+ def __init__(
1043
+ self,
1044
+ repo_ids: list[str],
1045
+ root: str | Path | None = None,
1046
+ episodes: dict | None = None,
1047
+ image_transforms: Callable | None = None,
1048
+ delta_timestamps: dict[list[float]] | None = None,
1049
+ tolerances_s: dict | None = None,
1050
+ download_videos: bool = True,
1051
+ video_backend: str | None = None,
1052
+ ):
1053
+ super().__init__()
1054
+ self.repo_ids = repo_ids
1055
+ self.root = Path(root) if root else HF_LEROBOT_HOME
1056
+ self.tolerances_s = tolerances_s if tolerances_s else dict.fromkeys(repo_ids, 0.0001)
1057
+ # Construct the underlying datasets passing everything but `transform` and `delta_timestamps` which
1058
+ # are handled by this class.
1059
+ self._datasets = [
1060
+ LeRobotDataset(
1061
+ repo_id,
1062
+ root=self.root / repo_id,
1063
+ episodes=episodes[repo_id] if episodes else None,
1064
+ image_transforms=image_transforms,
1065
+ delta_timestamps=delta_timestamps,
1066
+ tolerance_s=self.tolerances_s[repo_id],
1067
+ download_videos=download_videos,
1068
+ video_backend=video_backend,
1069
+ )
1070
+ for repo_id in repo_ids
1071
+ ]
1072
+
1073
+ # Disable any data keys that are not common across all of the datasets. Note: we may relax this
1074
+ # restriction in future iterations of this class. For now, this is necessary at least for being able
1075
+ # to use PyTorch's default DataLoader collate function.
1076
+ self.disabled_features = set()
1077
+ intersection_features = set(self._datasets[0].features)
1078
+ for ds in self._datasets:
1079
+ intersection_features.intersection_update(ds.features)
1080
+ if len(intersection_features) == 0:
1081
+ raise RuntimeError(
1082
+ "Multiple datasets were provided but they had no keys common to all of them. "
1083
+ "The multi-dataset functionality currently only keeps common keys."
1084
+ )
1085
+ for repo_id, ds in zip(self.repo_ids, self._datasets, strict=True):
1086
+ extra_keys = set(ds.features).difference(intersection_features)
1087
+ logging.warning(
1088
+ f"keys {extra_keys} of {repo_id} were disabled as they are not contained in all the "
1089
+ "other datasets."
1090
+ )
1091
+ self.disabled_features.update(extra_keys)
1092
+
1093
+ self.image_transforms = image_transforms
1094
+ self.delta_timestamps = delta_timestamps
1095
+ # TODO(rcadene, aliberts): We should not perform this aggregation for datasets
1096
+ # with multiple robots of different ranges. Instead we should have one normalization
1097
+ # per robot.
1098
+ self.stats = aggregate_stats([dataset.meta.stats for dataset in self._datasets])
1099
+
1100
+ @property
1101
+ def repo_id_to_index(self):
1102
+ """Return a mapping from dataset repo_id to a dataset index automatically created by this class.
1103
+
1104
+ This index is incorporated as a data key in the dictionary returned by `__getitem__`.
1105
+ """
1106
+ return {repo_id: i for i, repo_id in enumerate(self.repo_ids)}
1107
+
1108
+ @property
1109
+ def repo_index_to_id(self):
1110
+ """Return the inverse mapping if repo_id_to_index."""
1111
+ return {v: k for k, v in self.repo_id_to_index}
1112
+
1113
+ @property
1114
+ def fps(self) -> int:
1115
+ """Frames per second used during data collection.
1116
+
1117
+ NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
1118
+ """
1119
+ return self._datasets[0].meta.info["fps"]
1120
+
1121
+ @property
1122
+ def video(self) -> bool:
1123
+ """Returns True if this dataset loads video frames from mp4 files.
1124
+
1125
+ Returns False if it only loads images from png files.
1126
+
1127
+ NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
1128
+ """
1129
+ return self._datasets[0].meta.info.get("video", False)
1130
+
1131
+ @property
1132
+ def features(self) -> datasets.Features:
1133
+ features = {}
1134
+ for dataset in self._datasets:
1135
+ features.update({k: v for k, v in dataset.hf_features.items() if k not in self.disabled_features})
1136
+ return features
1137
+
1138
+ @property
1139
+ def camera_keys(self) -> list[str]:
1140
+ """Keys to access image and video stream from cameras."""
1141
+ keys = []
1142
+ for key, feats in self.features.items():
1143
+ if isinstance(feats, (datasets.Image, VideoFrame)):
1144
+ keys.append(key)
1145
+ return keys
1146
+
1147
+ @property
1148
+ def video_frame_keys(self) -> list[str]:
1149
+ """Keys to access video frames that requires to be decoded into images.
1150
+
1151
+ Note: It is empty if the dataset contains images only,
1152
+ or equal to `self.cameras` if the dataset contains videos only,
1153
+ or can even be a subset of `self.cameras` in a case of a mixed image/video dataset.
1154
+ """
1155
+ video_frame_keys = []
1156
+ for key, feats in self.features.items():
1157
+ if isinstance(feats, VideoFrame):
1158
+ video_frame_keys.append(key)
1159
+ return video_frame_keys
1160
+
1161
+ @property
1162
+ def num_frames(self) -> int:
1163
+ """Number of samples/frames."""
1164
+ return sum(d.num_frames for d in self._datasets)
1165
+
1166
+ @property
1167
+ def num_episodes(self) -> int:
1168
+ """Number of episodes."""
1169
+ return sum(d.num_episodes for d in self._datasets)
1170
+
1171
+ @property
1172
+ def tolerance_s(self) -> float:
1173
+ """Tolerance in seconds used to discard loaded frames when their timestamps
1174
+ are not close enough from the requested frames. It is only used when `delta_timestamps`
1175
+ is provided or when loading video frames from mp4 files.
1176
+ """
1177
+ # 1e-4 to account for possible numerical error
1178
+ return 1 / self.fps - 1e-4
1179
+
1180
+ def __len__(self):
1181
+ return self.num_frames
1182
+
1183
+ def __getitem__(self, idx: int) -> dict[str, torch.Tensor]:
1184
+ if idx >= len(self):
1185
+ raise IndexError(f"Index {idx} out of bounds.")
1186
+ # Determine which dataset to get an item from based on the index.
1187
+ start_idx = 0
1188
+ dataset_idx = 0
1189
+ for dataset in self._datasets:
1190
+ if idx >= start_idx + dataset.num_frames:
1191
+ start_idx += dataset.num_frames
1192
+ dataset_idx += 1
1193
+ continue
1194
+ break
1195
+ else:
1196
+ raise AssertionError("We expect the loop to break out as long as the index is within bounds.")
1197
+ item = self._datasets[dataset_idx][idx - start_idx]
1198
+ item["dataset_index"] = torch.tensor(dataset_idx)
1199
+ for data_key in self.disabled_features:
1200
+ if data_key in item:
1201
+ del item[data_key]
1202
+
1203
+ return item
1204
+
1205
+ def __repr__(self):
1206
+ return (
1207
+ f"{self.__class__.__name__}(\n"
1208
+ f" Repository IDs: '{self.repo_ids}',\n"
1209
+ f" Number of Samples: {self.num_frames},\n"
1210
+ f" Number of Episodes: {self.num_episodes},\n"
1211
+ f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n"
1212
+ f" Recorded Frames per Second: {self.fps},\n"
1213
+ f" Camera Keys: {self.camera_keys},\n"
1214
+ f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n"
1215
+ f" Transformations: {self.image_transforms},\n"
1216
+ f")"
1217
+ )
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/online_buffer.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """An online buffer for the online training loop in train.py
17
+
18
+ Note to maintainers: This duplicates some logic from LeRobotDataset and EpisodeAwareSampler. We should
19
+ consider converging to one approach. Here we have opted to use numpy.memmap to back the data buffer. It's much
20
+ faster than using HuggingFace Datasets as there's no conversion to an intermediate non-python object. Also it
21
+ supports in-place slicing and mutation which is very handy for a dynamic buffer.
22
+ """
23
+
24
+ import os
25
+ from pathlib import Path
26
+ from typing import Any
27
+
28
+ import numpy as np
29
+ import torch
30
+
31
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
32
+
33
+
34
+ def _make_memmap_safe(**kwargs) -> np.memmap:
35
+ """Make a numpy memmap with checks on available disk space first.
36
+
37
+ Expected kwargs are: "filename", "dtype" (must by np.dtype), "mode" and "shape"
38
+
39
+ For information on dtypes:
40
+ https://numpy.org/doc/stable/reference/arrays.dtypes.html#arrays-dtypes-constructing
41
+ """
42
+ if kwargs["mode"].startswith("w"):
43
+ required_space = kwargs["dtype"].itemsize * np.prod(kwargs["shape"]) # bytes
44
+ stats = os.statvfs(Path(kwargs["filename"]).parent)
45
+ available_space = stats.f_bavail * stats.f_frsize # bytes
46
+ if required_space >= available_space * 0.8:
47
+ raise RuntimeError(
48
+ f"You're about to take up {required_space} of {available_space} bytes available."
49
+ )
50
+ return np.memmap(**kwargs)
51
+
52
+
53
+ class OnlineBuffer(torch.utils.data.Dataset):
54
+ """FIFO data buffer for the online training loop in train.py.
55
+
56
+ Follows the protocol of LeRobotDataset as much as is required to have it be used by the online training
57
+ loop in the same way that a LeRobotDataset would be used.
58
+
59
+ The underlying data structure will have data inserted in a circular fashion. Always insert after the
60
+ last index, and when you reach the end, wrap around to the start.
61
+
62
+ The data is stored in a numpy memmap.
63
+ """
64
+
65
+ NEXT_INDEX_KEY = "_next_index"
66
+ OCCUPANCY_MASK_KEY = "_occupancy_mask"
67
+ INDEX_KEY = "index"
68
+ FRAME_INDEX_KEY = "frame_index"
69
+ EPISODE_INDEX_KEY = "episode_index"
70
+ TIMESTAMP_KEY = "timestamp"
71
+ IS_PAD_POSTFIX = "_is_pad"
72
+
73
+ def __init__(
74
+ self,
75
+ write_dir: str | Path,
76
+ data_spec: dict[str, Any] | None,
77
+ buffer_capacity: int | None,
78
+ fps: float | None = None,
79
+ delta_timestamps: dict[str, list[float]] | dict[str, np.ndarray] | None = None,
80
+ ):
81
+ """
82
+ The online buffer can be provided from scratch or you can load an existing online buffer by passing
83
+ a `write_dir` associated with an existing buffer.
84
+
85
+ Args:
86
+ write_dir: Where to keep the numpy memmap files. One memmap file will be stored for each data key.
87
+ Note that if the files already exist, they are opened in read-write mode (used for training
88
+ resumption.)
89
+ data_spec: A mapping from data key to data specification, like {data_key: {"shape": tuple[int],
90
+ "dtype": np.dtype}}. This should include all the data that you wish to record into the buffer,
91
+ but note that "index", "frame_index" and "episode_index" are already accounted for by this
92
+ class, so you don't need to include them.
93
+ buffer_capacity: How many frames should be stored in the buffer as a maximum. Be aware of your
94
+ system's available disk space when choosing this.
95
+ fps: Same as the fps concept in LeRobot dataset. Here it needs to be provided for the
96
+ delta_timestamps logic. You can pass None if you are not using delta_timestamps.
97
+ delta_timestamps: Same as the delta_timestamps concept in LeRobotDataset. This is internally
98
+ converted to dict[str, np.ndarray] for optimization purposes.
99
+
100
+ """
101
+ self.set_delta_timestamps(delta_timestamps)
102
+ self._fps = fps
103
+ # Tolerance in seconds used to discard loaded frames when their timestamps are not close enough from
104
+ # the requested frames. It is only used when `delta_timestamps` is provided.
105
+ # minus 1e-4 to account for possible numerical error
106
+ self.tolerance_s = 1 / self.fps - 1e-4 if fps is not None else None
107
+ self._buffer_capacity = buffer_capacity
108
+ data_spec = self._make_data_spec(data_spec, buffer_capacity)
109
+ Path(write_dir).mkdir(parents=True, exist_ok=True)
110
+ self._data = {}
111
+ for k, v in data_spec.items():
112
+ self._data[k] = _make_memmap_safe(
113
+ filename=Path(write_dir) / k,
114
+ dtype=v["dtype"] if v is not None else None,
115
+ mode="r+" if (Path(write_dir) / k).exists() else "w+",
116
+ shape=tuple(v["shape"]) if v is not None else None,
117
+ )
118
+
119
+ @property
120
+ def delta_timestamps(self) -> dict[str, np.ndarray] | None:
121
+ return self._delta_timestamps
122
+
123
+ def set_delta_timestamps(self, value: dict[str, list[float]] | None):
124
+ """Set delta_timestamps converting the values to numpy arrays.
125
+
126
+ The conversion is for an optimization in the __getitem__. The loop is much slower if the arrays
127
+ need to be converted into numpy arrays.
128
+ """
129
+ if value is not None:
130
+ self._delta_timestamps = {k: np.array(v) for k, v in value.items()}
131
+ else:
132
+ self._delta_timestamps = None
133
+
134
+ def _make_data_spec(self, data_spec: dict[str, Any], buffer_capacity: int) -> dict[str, dict[str, Any]]:
135
+ """Makes the data spec for np.memmap."""
136
+ if any(k.startswith("_") for k in data_spec):
137
+ raise ValueError(
138
+ "data_spec keys should not start with '_'. This prefix is reserved for internal logic."
139
+ )
140
+ preset_keys = {
141
+ OnlineBuffer.INDEX_KEY,
142
+ OnlineBuffer.FRAME_INDEX_KEY,
143
+ OnlineBuffer.EPISODE_INDEX_KEY,
144
+ OnlineBuffer.TIMESTAMP_KEY,
145
+ }
146
+ if len(intersection := set(data_spec).intersection(preset_keys)) > 0:
147
+ raise ValueError(
148
+ f"data_spec should not contain any of {preset_keys} as these are handled internally. "
149
+ f"The provided data_spec has {intersection}."
150
+ )
151
+ complete_data_spec = {
152
+ # _next_index will be a pointer to the next index that we should start filling from when we add
153
+ # more data.
154
+ OnlineBuffer.NEXT_INDEX_KEY: {"dtype": np.dtype("int64"), "shape": ()},
155
+ # Since the memmap is initialized with all-zeros, this keeps track of which indices are occupied
156
+ # with real data rather than the dummy initialization.
157
+ OnlineBuffer.OCCUPANCY_MASK_KEY: {"dtype": np.dtype("?"), "shape": (buffer_capacity,)},
158
+ OnlineBuffer.INDEX_KEY: {"dtype": np.dtype("int64"), "shape": (buffer_capacity,)},
159
+ OnlineBuffer.FRAME_INDEX_KEY: {"dtype": np.dtype("int64"), "shape": (buffer_capacity,)},
160
+ OnlineBuffer.EPISODE_INDEX_KEY: {"dtype": np.dtype("int64"), "shape": (buffer_capacity,)},
161
+ OnlineBuffer.TIMESTAMP_KEY: {"dtype": np.dtype("float64"), "shape": (buffer_capacity,)},
162
+ }
163
+ for k, v in data_spec.items():
164
+ complete_data_spec[k] = {"dtype": v["dtype"], "shape": (buffer_capacity, *v["shape"])}
165
+ return complete_data_spec
166
+
167
+ def add_data(self, data: dict[str, np.ndarray]):
168
+ """Add new data to the buffer, which could potentially mean shifting old data out.
169
+
170
+ The new data should contain all the frames (in order) of any number of episodes. The indices should
171
+ start from 0 (note to the developer: this can easily be generalized). See the `rollout` and
172
+ `eval_policy` functions in `eval.py` for more information on how the data is constructed.
173
+
174
+ Shift the incoming data index and episode_index to continue on from the last frame. Note that this
175
+ will be done in place!
176
+ """
177
+ if len(missing_keys := (set(self.data_keys).difference(set(data)))) > 0:
178
+ raise ValueError(f"Missing data keys: {missing_keys}")
179
+ new_data_length = len(data[self.data_keys[0]])
180
+ if not all(len(data[k]) == new_data_length for k in self.data_keys):
181
+ raise ValueError("All data items should have the same length")
182
+
183
+ next_index = self._data[OnlineBuffer.NEXT_INDEX_KEY]
184
+
185
+ # Sanity check to make sure that the new data indices start from 0.
186
+ assert data[OnlineBuffer.EPISODE_INDEX_KEY][0].item() == 0
187
+ assert data[OnlineBuffer.INDEX_KEY][0].item() == 0
188
+
189
+ # Shift the incoming indices if necessary.
190
+ if self.num_frames > 0:
191
+ last_episode_index = self._data[OnlineBuffer.EPISODE_INDEX_KEY][next_index - 1]
192
+ last_data_index = self._data[OnlineBuffer.INDEX_KEY][next_index - 1]
193
+ data[OnlineBuffer.EPISODE_INDEX_KEY] += last_episode_index + 1
194
+ data[OnlineBuffer.INDEX_KEY] += last_data_index + 1
195
+
196
+ # Insert the new data starting from next_index. It may be necessary to wrap around to the start.
197
+ n_surplus = max(0, new_data_length - (self._buffer_capacity - next_index))
198
+ for k in self.data_keys:
199
+ if n_surplus == 0:
200
+ slc = slice(next_index, next_index + new_data_length)
201
+ self._data[k][slc] = data[k]
202
+ self._data[OnlineBuffer.OCCUPANCY_MASK_KEY][slc] = True
203
+ else:
204
+ self._data[k][next_index:] = data[k][:-n_surplus]
205
+ self._data[OnlineBuffer.OCCUPANCY_MASK_KEY][next_index:] = True
206
+ self._data[k][:n_surplus] = data[k][-n_surplus:]
207
+ if n_surplus == 0:
208
+ self._data[OnlineBuffer.NEXT_INDEX_KEY] = next_index + new_data_length
209
+ else:
210
+ self._data[OnlineBuffer.NEXT_INDEX_KEY] = n_surplus
211
+
212
+ @property
213
+ def data_keys(self) -> list[str]:
214
+ keys = set(self._data)
215
+ keys.remove(OnlineBuffer.OCCUPANCY_MASK_KEY)
216
+ keys.remove(OnlineBuffer.NEXT_INDEX_KEY)
217
+ return sorted(keys)
218
+
219
+ @property
220
+ def fps(self) -> float | None:
221
+ return self._fps
222
+
223
+ @property
224
+ def num_episodes(self) -> int:
225
+ return len(
226
+ np.unique(self._data[OnlineBuffer.EPISODE_INDEX_KEY][self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]])
227
+ )
228
+
229
+ @property
230
+ def num_frames(self) -> int:
231
+ return np.count_nonzero(self._data[OnlineBuffer.OCCUPANCY_MASK_KEY])
232
+
233
+ def __len__(self):
234
+ return self.num_frames
235
+
236
+ def _item_to_tensors(self, item: dict) -> dict:
237
+ item_ = {}
238
+ for k, v in item.items():
239
+ if isinstance(v, torch.Tensor):
240
+ item_[k] = v
241
+ elif isinstance(v, np.ndarray):
242
+ item_[k] = torch.from_numpy(v)
243
+ else:
244
+ item_[k] = torch.tensor(v)
245
+ return item_
246
+
247
+ def __getitem__(self, idx: int) -> dict[str, torch.Tensor]:
248
+ if idx >= len(self) or idx < -len(self):
249
+ raise IndexError
250
+
251
+ item = {k: v[idx] for k, v in self._data.items() if not k.startswith("_")}
252
+
253
+ if self.delta_timestamps is None:
254
+ return self._item_to_tensors(item)
255
+
256
+ episode_index = item[OnlineBuffer.EPISODE_INDEX_KEY]
257
+ current_ts = item[OnlineBuffer.TIMESTAMP_KEY]
258
+ episode_data_indices = np.where(
259
+ np.bitwise_and(
260
+ self._data[OnlineBuffer.EPISODE_INDEX_KEY] == episode_index,
261
+ self._data[OnlineBuffer.OCCUPANCY_MASK_KEY],
262
+ )
263
+ )[0]
264
+ episode_timestamps = self._data[OnlineBuffer.TIMESTAMP_KEY][episode_data_indices]
265
+
266
+ for data_key in self.delta_timestamps:
267
+ # Note: The logic in this loop is copied from `load_previous_and_future_frames`.
268
+ # Get timestamps used as query to retrieve data of previous/future frames.
269
+ query_ts = current_ts + self.delta_timestamps[data_key]
270
+
271
+ # Compute distances between each query timestamp and all timestamps of all the frames belonging to
272
+ # the episode.
273
+ dist = np.abs(query_ts[:, None] - episode_timestamps[None, :])
274
+ argmin_ = np.argmin(dist, axis=1)
275
+ min_ = dist[np.arange(dist.shape[0]), argmin_]
276
+
277
+ is_pad = min_ > self.tolerance_s
278
+
279
+ # Check violated query timestamps are all outside the episode range.
280
+ assert (
281
+ (query_ts[is_pad] < episode_timestamps[0]) | (episode_timestamps[-1] < query_ts[is_pad])
282
+ ).all(), (
283
+ f"One or several timestamps unexpectedly violate the tolerance ({min_} > {self.tolerance_s=}"
284
+ ") inside the episode range."
285
+ )
286
+
287
+ # Load frames for this data key.
288
+ item[data_key] = self._data[data_key][episode_data_indices[argmin_]]
289
+
290
+ item[f"{data_key}{OnlineBuffer.IS_PAD_POSTFIX}"] = is_pad
291
+
292
+ return self._item_to_tensors(item)
293
+
294
+ def get_data_by_key(self, key: str) -> torch.Tensor:
295
+ """Returns all data for a given data key as a Tensor."""
296
+ return torch.from_numpy(self._data[key][self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]])
297
+
298
+
299
+ def compute_sampler_weights(
300
+ offline_dataset: LeRobotDataset,
301
+ offline_drop_n_last_frames: int = 0,
302
+ online_dataset: OnlineBuffer | None = None,
303
+ online_sampling_ratio: float | None = None,
304
+ online_drop_n_last_frames: int = 0,
305
+ ) -> torch.Tensor:
306
+ """Compute the sampling weights for the online training dataloader in train.py.
307
+
308
+ Args:
309
+ offline_dataset: The LeRobotDataset used for offline pre-training.
310
+ online_drop_n_last_frames: Number of frames to drop from the end of each offline dataset episode.
311
+ online_dataset: The OnlineBuffer used in online training.
312
+ online_sampling_ratio: The proportion of data that should be sampled from the online dataset. If an
313
+ online dataset is provided, this value must also be provided.
314
+ online_drop_n_first_frames: See `offline_drop_n_last_frames`. This is the same, but for the online
315
+ dataset.
316
+ Returns:
317
+ Tensor of weights for [offline_dataset; online_dataset], normalized to 1.
318
+
319
+ Notes to maintainers:
320
+ - This duplicates some logic from EpisodeAwareSampler. We should consider converging to one approach.
321
+ - When used with `torch.utils.data.WeightedRandomSampler`, it could completely replace
322
+ `EpisodeAwareSampler` as the online dataset related arguments are optional. The only missing feature
323
+ is the ability to turn shuffling off.
324
+ - Options `drop_first_n_frames` and `episode_indices_to_use` can be added easily. They were not
325
+ included here to avoid adding complexity.
326
+ """
327
+ if len(offline_dataset) == 0 and (online_dataset is None or len(online_dataset) == 0):
328
+ raise ValueError("At least one of `offline_dataset` or `online_dataset` should be contain data.")
329
+ if (online_dataset is None) ^ (online_sampling_ratio is None):
330
+ raise ValueError(
331
+ "`online_dataset` and `online_sampling_ratio` must be provided together or not at all."
332
+ )
333
+ offline_sampling_ratio = 0 if online_sampling_ratio is None else 1 - online_sampling_ratio
334
+
335
+ weights = []
336
+
337
+ if len(offline_dataset) > 0:
338
+ offline_data_mask_indices = []
339
+ for start_index, end_index in zip(
340
+ offline_dataset.episode_data_index["from"],
341
+ offline_dataset.episode_data_index["to"],
342
+ strict=True,
343
+ ):
344
+ offline_data_mask_indices.extend(
345
+ range(start_index.item(), end_index.item() - offline_drop_n_last_frames)
346
+ )
347
+ offline_data_mask = torch.zeros(len(offline_dataset), dtype=torch.bool)
348
+ offline_data_mask[torch.tensor(offline_data_mask_indices)] = True
349
+ weights.append(
350
+ torch.full(
351
+ size=(len(offline_dataset),),
352
+ fill_value=offline_sampling_ratio / offline_data_mask.sum(),
353
+ )
354
+ * offline_data_mask
355
+ )
356
+
357
+ if online_dataset is not None and len(online_dataset) > 0:
358
+ online_data_mask_indices = []
359
+ episode_indices = online_dataset.get_data_by_key("episode_index")
360
+ for episode_idx in torch.unique(episode_indices):
361
+ where_episode = torch.where(episode_indices == episode_idx)
362
+ start_index = where_episode[0][0]
363
+ end_index = where_episode[0][-1] + 1
364
+ online_data_mask_indices.extend(
365
+ range(start_index.item(), end_index.item() - online_drop_n_last_frames)
366
+ )
367
+ online_data_mask = torch.zeros(len(online_dataset), dtype=torch.bool)
368
+ online_data_mask[torch.tensor(online_data_mask_indices)] = True
369
+ weights.append(
370
+ torch.full(
371
+ size=(len(online_dataset),),
372
+ fill_value=online_sampling_ratio / online_data_mask.sum(),
373
+ )
374
+ * online_data_mask
375
+ )
376
+
377
+ weights = torch.cat(weights)
378
+
379
+ if weights.sum() == 0:
380
+ weights += 1 / len(weights)
381
+ else:
382
+ weights /= weights.sum()
383
+
384
+ return weights
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/push_dataset_to_hub/utils.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import inspect
17
+ from concurrent.futures import ThreadPoolExecutor
18
+ from pathlib import Path
19
+ from typing import Dict
20
+
21
+ import datasets
22
+ import numpy
23
+ import PIL
24
+ import torch
25
+
26
+ from lerobot.common.datasets.video_utils import encode_video_frames
27
+
28
+
29
+ def concatenate_episodes(ep_dicts):
30
+ data_dict = {}
31
+
32
+ keys = ep_dicts[0].keys()
33
+ for key in keys:
34
+ if torch.is_tensor(ep_dicts[0][key][0]):
35
+ data_dict[key] = torch.cat([ep_dict[key] for ep_dict in ep_dicts])
36
+ else:
37
+ if key not in data_dict:
38
+ data_dict[key] = []
39
+ for ep_dict in ep_dicts:
40
+ for x in ep_dict[key]:
41
+ data_dict[key].append(x)
42
+
43
+ total_frames = data_dict["frame_index"].shape[0]
44
+ data_dict["index"] = torch.arange(0, total_frames, 1)
45
+ return data_dict
46
+
47
+
48
+ def save_images_concurrently(imgs_array: numpy.array, out_dir: Path, max_workers: int = 4):
49
+ out_dir = Path(out_dir)
50
+ out_dir.mkdir(parents=True, exist_ok=True)
51
+
52
+ def save_image(img_array, i, out_dir):
53
+ img = PIL.Image.fromarray(img_array)
54
+ img.save(str(out_dir / f"frame_{i:06d}.png"), quality=100)
55
+
56
+ num_images = len(imgs_array)
57
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
58
+ [executor.submit(save_image, imgs_array[i], i, out_dir) for i in range(num_images)]
59
+
60
+
61
+ def get_default_encoding() -> dict:
62
+ """Returns the default ffmpeg encoding parameters used by `encode_video_frames`."""
63
+ signature = inspect.signature(encode_video_frames)
64
+ return {
65
+ k: v.default
66
+ for k, v in signature.parameters.items()
67
+ if v.default is not inspect.Parameter.empty and k in ["vcodec", "pix_fmt", "g", "crf"]
68
+ }
69
+
70
+
71
+ def check_repo_id(repo_id: str) -> None:
72
+ if len(repo_id.split("/")) != 2:
73
+ raise ValueError(
74
+ f"""`repo_id` is expected to contain a community or user id `/` the name of the dataset
75
+ (e.g. 'lerobot/pusht'), but contains '{repo_id}'."""
76
+ )
77
+
78
+
79
+ # TODO(aliberts): remove
80
+ def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> Dict[str, torch.Tensor]:
81
+ """
82
+ Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset.
83
+
84
+ Parameters:
85
+ - hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index.
86
+
87
+ Returns:
88
+ - episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys:
89
+ - "from": A tensor containing the starting index of each episode.
90
+ - "to": A tensor containing the ending index of each episode.
91
+ """
92
+ episode_data_index = {"from": [], "to": []}
93
+
94
+ current_episode = None
95
+ """
96
+ The episode_index is a list of integers, each representing the episode index of the corresponding example.
97
+ For instance, the following is a valid episode_index:
98
+ [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]
99
+
100
+ Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and
101
+ ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this:
102
+ {
103
+ "from": [0, 3, 7],
104
+ "to": [3, 7, 12]
105
+ }
106
+ """
107
+ if len(hf_dataset) == 0:
108
+ episode_data_index = {
109
+ "from": torch.tensor([]),
110
+ "to": torch.tensor([]),
111
+ }
112
+ return episode_data_index
113
+ for idx, episode_idx in enumerate(hf_dataset["episode_index"]):
114
+ if episode_idx != current_episode:
115
+ # We encountered a new episode, so we append its starting location to the "from" list
116
+ episode_data_index["from"].append(idx)
117
+ # If this is not the first episode, we append the ending location of the previous episode to the "to" list
118
+ if current_episode is not None:
119
+ episode_data_index["to"].append(idx)
120
+ # Let's keep track of the current episode index
121
+ current_episode = episode_idx
122
+ else:
123
+ # We are still in the same episode, so there is nothing for us to do here
124
+ pass
125
+ # We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list
126
+ episode_data_index["to"].append(idx + 1)
127
+
128
+ for k in ["from", "to"]:
129
+ episode_data_index[k] = torch.tensor(episode_data_index[k])
130
+
131
+ return episode_data_index
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/sampler.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from typing import Iterator, Union
17
+
18
+ import torch
19
+
20
+
21
+ class EpisodeAwareSampler:
22
+ def __init__(
23
+ self,
24
+ episode_data_index: dict,
25
+ episode_indices_to_use: Union[list, None] = None,
26
+ drop_n_first_frames: int = 0,
27
+ drop_n_last_frames: int = 0,
28
+ shuffle: bool = False,
29
+ ):
30
+ """Sampler that optionally incorporates episode boundary information.
31
+
32
+ Args:
33
+ episode_data_index: Dictionary with keys 'from' and 'to' containing the start and end indices of each episode.
34
+ episode_indices_to_use: List of episode indices to use. If None, all episodes are used.
35
+ Assumes that episodes are indexed from 0 to N-1.
36
+ drop_n_first_frames: Number of frames to drop from the start of each episode.
37
+ drop_n_last_frames: Number of frames to drop from the end of each episode.
38
+ shuffle: Whether to shuffle the indices.
39
+ """
40
+ indices = []
41
+ for episode_idx, (start_index, end_index) in enumerate(
42
+ zip(episode_data_index["from"], episode_data_index["to"], strict=True)
43
+ ):
44
+ if episode_indices_to_use is None or episode_idx in episode_indices_to_use:
45
+ indices.extend(
46
+ range(start_index.item() + drop_n_first_frames, end_index.item() - drop_n_last_frames)
47
+ )
48
+
49
+ self.indices = indices
50
+ self.shuffle = shuffle
51
+
52
+ def __iter__(self) -> Iterator[int]:
53
+ if self.shuffle:
54
+ for i in torch.randperm(len(self.indices)):
55
+ yield self.indices[i]
56
+ else:
57
+ for i in self.indices:
58
+ yield i
59
+
60
+ def __len__(self) -> int:
61
+ return len(self.indices)
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/transforms.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import collections
17
+ from dataclasses import dataclass, field
18
+ from typing import Any, Callable, Sequence
19
+
20
+ import torch
21
+ from torchvision.transforms import v2
22
+ from torchvision.transforms.v2 import Transform
23
+ from torchvision.transforms.v2 import functional as F # noqa: N812
24
+
25
+
26
+ class RandomSubsetApply(Transform):
27
+ """Apply a random subset of N transformations from a list of transformations.
28
+
29
+ Args:
30
+ transforms: list of transformations.
31
+ p: represents the multinomial probabilities (with no replacement) used for sampling the transform.
32
+ If the sum of the weights is not 1, they will be normalized. If ``None`` (default), all transforms
33
+ have the same probability.
34
+ n_subset: number of transformations to apply. If ``None``, all transforms are applied.
35
+ Must be in [1, len(transforms)].
36
+ random_order: apply transformations in a random order.
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ transforms: Sequence[Callable],
42
+ p: list[float] | None = None,
43
+ n_subset: int | None = None,
44
+ random_order: bool = False,
45
+ ) -> None:
46
+ super().__init__()
47
+ if not isinstance(transforms, Sequence):
48
+ raise TypeError("Argument transforms should be a sequence of callables")
49
+ if p is None:
50
+ p = [1] * len(transforms)
51
+ elif len(p) != len(transforms):
52
+ raise ValueError(
53
+ f"Length of p doesn't match the number of transforms: {len(p)} != {len(transforms)}"
54
+ )
55
+
56
+ if n_subset is None:
57
+ n_subset = len(transforms)
58
+ elif not isinstance(n_subset, int):
59
+ raise TypeError("n_subset should be an int or None")
60
+ elif not (1 <= n_subset <= len(transforms)):
61
+ raise ValueError(f"n_subset should be in the interval [1, {len(transforms)}]")
62
+
63
+ self.transforms = transforms
64
+ total = sum(p)
65
+ self.p = [prob / total for prob in p]
66
+ self.n_subset = n_subset
67
+ self.random_order = random_order
68
+
69
+ self.selected_transforms = None
70
+
71
+ def forward(self, *inputs: Any) -> Any:
72
+ needs_unpacking = len(inputs) > 1
73
+
74
+ selected_indices = torch.multinomial(torch.tensor(self.p), self.n_subset)
75
+ if not self.random_order:
76
+ selected_indices = selected_indices.sort().values
77
+
78
+ self.selected_transforms = [self.transforms[i] for i in selected_indices]
79
+
80
+ for transform in self.selected_transforms:
81
+ outputs = transform(*inputs)
82
+ inputs = outputs if needs_unpacking else (outputs,)
83
+
84
+ return outputs
85
+
86
+ def extra_repr(self) -> str:
87
+ return (
88
+ f"transforms={self.transforms}, "
89
+ f"p={self.p}, "
90
+ f"n_subset={self.n_subset}, "
91
+ f"random_order={self.random_order}"
92
+ )
93
+
94
+
95
+ class SharpnessJitter(Transform):
96
+ """Randomly change the sharpness of an image or video.
97
+
98
+ Similar to a v2.RandomAdjustSharpness with p=1 and a sharpness_factor sampled randomly.
99
+ While v2.RandomAdjustSharpness applies — with a given probability — a fixed sharpness_factor to an image,
100
+ SharpnessJitter applies a random sharpness_factor each time. This is to have a more diverse set of
101
+ augmentations as a result.
102
+
103
+ A sharpness_factor of 0 gives a blurred image, 1 gives the original image while 2 increases the sharpness
104
+ by a factor of 2.
105
+
106
+ If the input is a :class:`torch.Tensor`,
107
+ it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
108
+
109
+ Args:
110
+ sharpness: How much to jitter sharpness. sharpness_factor is chosen uniformly from
111
+ [max(0, 1 - sharpness), 1 + sharpness] or the given
112
+ [min, max]. Should be non negative numbers.
113
+ """
114
+
115
+ def __init__(self, sharpness: float | Sequence[float]) -> None:
116
+ super().__init__()
117
+ self.sharpness = self._check_input(sharpness)
118
+
119
+ def _check_input(self, sharpness):
120
+ if isinstance(sharpness, (int, float)):
121
+ if sharpness < 0:
122
+ raise ValueError("If sharpness is a single number, it must be non negative.")
123
+ sharpness = [1.0 - sharpness, 1.0 + sharpness]
124
+ sharpness[0] = max(sharpness[0], 0.0)
125
+ elif isinstance(sharpness, collections.abc.Sequence) and len(sharpness) == 2:
126
+ sharpness = [float(v) for v in sharpness]
127
+ else:
128
+ raise TypeError(f"{sharpness=} should be a single number or a sequence with length 2.")
129
+
130
+ if not 0.0 <= sharpness[0] <= sharpness[1]:
131
+ raise ValueError(f"sharpnesss values should be between (0., inf), but got {sharpness}.")
132
+
133
+ return float(sharpness[0]), float(sharpness[1])
134
+
135
+ def make_params(self, flat_inputs: list[Any]) -> dict[str, Any]:
136
+ sharpness_factor = torch.empty(1).uniform_(self.sharpness[0], self.sharpness[1]).item()
137
+ return {"sharpness_factor": sharpness_factor}
138
+
139
+ def transform(self, inpt: Any, params: dict[str, Any]) -> Any:
140
+ sharpness_factor = params["sharpness_factor"]
141
+ return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor)
142
+
143
+
144
+ @dataclass
145
+ class ImageTransformConfig:
146
+ """
147
+ For each transform, the following parameters are available:
148
+ weight: This represents the multinomial probability (with no replacement)
149
+ used for sampling the transform. If the sum of the weights is not 1,
150
+ they will be normalized.
151
+ type: The name of the class used. This is either a class available under torchvision.transforms.v2 or a
152
+ custom transform defined here.
153
+ kwargs: Lower & upper bound respectively used for sampling the transform's parameter
154
+ (following uniform distribution) when it's applied.
155
+ """
156
+
157
+ weight: float = 1.0
158
+ type: str = "Identity"
159
+ kwargs: dict[str, Any] = field(default_factory=dict)
160
+
161
+
162
+ @dataclass
163
+ class ImageTransformsConfig:
164
+ """
165
+ These transforms are all using standard torchvision.transforms.v2
166
+ You can find out how these transformations affect images here:
167
+ https://pytorch.org/vision/0.18/auto_examples/transforms/plot_transforms_illustrations.html
168
+ We use a custom RandomSubsetApply container to sample them.
169
+ """
170
+
171
+ # Set this flag to `true` to enable transforms during training
172
+ enable: bool = False
173
+ # This is the maximum number of transforms (sampled from these below) that will be applied to each frame.
174
+ # It's an integer in the interval [1, number_of_available_transforms].
175
+ max_num_transforms: int = 3
176
+ # By default, transforms are applied in Torchvision's suggested order (shown below).
177
+ # Set this to True to apply them in a random order.
178
+ random_order: bool = False
179
+ tfs: dict[str, ImageTransformConfig] = field(
180
+ default_factory=lambda: {
181
+ "brightness": ImageTransformConfig(
182
+ weight=1.0,
183
+ type="ColorJitter",
184
+ kwargs={"brightness": (0.8, 1.2)},
185
+ ),
186
+ "contrast": ImageTransformConfig(
187
+ weight=1.0,
188
+ type="ColorJitter",
189
+ kwargs={"contrast": (0.8, 1.2)},
190
+ ),
191
+ "saturation": ImageTransformConfig(
192
+ weight=1.0,
193
+ type="ColorJitter",
194
+ kwargs={"saturation": (0.5, 1.5)},
195
+ ),
196
+ "hue": ImageTransformConfig(
197
+ weight=1.0,
198
+ type="ColorJitter",
199
+ kwargs={"hue": (-0.05, 0.05)},
200
+ ),
201
+ "sharpness": ImageTransformConfig(
202
+ weight=1.0,
203
+ type="SharpnessJitter",
204
+ kwargs={"sharpness": (0.5, 1.5)},
205
+ ),
206
+ }
207
+ )
208
+
209
+
210
+ def make_transform_from_config(cfg: ImageTransformConfig):
211
+ if cfg.type == "Identity":
212
+ return v2.Identity(**cfg.kwargs)
213
+ elif cfg.type == "ColorJitter":
214
+ return v2.ColorJitter(**cfg.kwargs)
215
+ elif cfg.type == "SharpnessJitter":
216
+ return SharpnessJitter(**cfg.kwargs)
217
+ else:
218
+ raise ValueError(f"Transform '{cfg.type}' is not valid.")
219
+
220
+
221
+ class ImageTransforms(Transform):
222
+ """A class to compose image transforms based on configuration."""
223
+
224
+ def __init__(self, cfg: ImageTransformsConfig) -> None:
225
+ super().__init__()
226
+ self._cfg = cfg
227
+
228
+ self.weights = []
229
+ self.transforms = {}
230
+ for tf_name, tf_cfg in cfg.tfs.items():
231
+ if tf_cfg.weight <= 0.0:
232
+ continue
233
+
234
+ self.transforms[tf_name] = make_transform_from_config(tf_cfg)
235
+ self.weights.append(tf_cfg.weight)
236
+
237
+ n_subset = min(len(self.transforms), cfg.max_num_transforms)
238
+ if n_subset == 0 or not cfg.enable:
239
+ self.tf = v2.Identity()
240
+ else:
241
+ self.tf = RandomSubsetApply(
242
+ transforms=list(self.transforms.values()),
243
+ p=self.weights,
244
+ n_subset=n_subset,
245
+ random_order=cfg.random_order,
246
+ )
247
+
248
+ def forward(self, *inputs: Any) -> Any:
249
+ return self.tf(*inputs)
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/utils.py ADDED
@@ -0,0 +1,813 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import contextlib
17
+ import importlib.resources
18
+ import json
19
+ import logging
20
+ from collections.abc import Iterator
21
+ from itertools import accumulate
22
+ from pathlib import Path
23
+ from pprint import pformat
24
+ from types import SimpleNamespace
25
+ from typing import Any
26
+
27
+ import datasets
28
+ import jsonlines
29
+ import numpy as np
30
+ import packaging.version
31
+ import torch
32
+ from datasets.table import embed_table_storage
33
+ from huggingface_hub import DatasetCard, DatasetCardData, HfApi
34
+ from huggingface_hub.errors import RevisionNotFoundError
35
+ from PIL import Image as PILImage
36
+ from torchvision import transforms
37
+
38
+ from lerobot.common.datasets.backward_compatibility import (
39
+ V21_MESSAGE,
40
+ BackwardCompatibilityError,
41
+ ForwardCompatibilityError,
42
+ )
43
+ from lerobot.common.robot_devices.robots.utils import Robot
44
+ from lerobot.common.utils.utils import is_valid_numpy_dtype_string
45
+ from lerobot.configs.types import DictLike, FeatureType, PolicyFeature
46
+
47
+ DEFAULT_CHUNK_SIZE = 1000 # Max number of episodes per chunk
48
+
49
+ INFO_PATH = "meta/info.json"
50
+ EPISODES_PATH = "meta/episodes.jsonl"
51
+ STATS_PATH = "meta/stats.json"
52
+ EPISODES_STATS_PATH = "meta/episodes_stats.jsonl"
53
+ TASKS_PATH = "meta/tasks.jsonl"
54
+
55
+ DEFAULT_VIDEO_PATH = "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4"
56
+ DEFAULT_PARQUET_PATH = "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet"
57
+ DEFAULT_IMAGE_PATH = "images/{image_key}/episode_{episode_index:06d}/frame_{frame_index:06d}.png"
58
+
59
+ DATASET_CARD_TEMPLATE = """
60
+ ---
61
+ # Metadata will go there
62
+ ---
63
+ This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
64
+
65
+ ## {}
66
+
67
+ """
68
+
69
+ DEFAULT_FEATURES = {
70
+ "timestamp": {"dtype": "float32", "shape": (1,), "names": None},
71
+ "frame_index": {"dtype": "int64", "shape": (1,), "names": None},
72
+ "episode_index": {"dtype": "int64", "shape": (1,), "names": None},
73
+ "index": {"dtype": "int64", "shape": (1,), "names": None},
74
+ "task_index": {"dtype": "int64", "shape": (1,), "names": None},
75
+ }
76
+
77
+
78
+ def flatten_dict(d: dict, parent_key: str = "", sep: str = "/") -> dict:
79
+ """Flatten a nested dictionary structure by collapsing nested keys into one key with a separator.
80
+
81
+ For example:
82
+ ```
83
+ >>> dct = {"a": {"b": 1, "c": {"d": 2}}, "e": 3}`
84
+ >>> print(flatten_dict(dct))
85
+ {"a/b": 1, "a/c/d": 2, "e": 3}
86
+ """
87
+ items = []
88
+ for k, v in d.items():
89
+ new_key = f"{parent_key}{sep}{k}" if parent_key else k
90
+ if isinstance(v, dict):
91
+ items.extend(flatten_dict(v, new_key, sep=sep).items())
92
+ else:
93
+ items.append((new_key, v))
94
+ return dict(items)
95
+
96
+
97
+ def unflatten_dict(d: dict, sep: str = "/") -> dict:
98
+ outdict = {}
99
+ for key, value in d.items():
100
+ parts = key.split(sep)
101
+ d = outdict
102
+ for part in parts[:-1]:
103
+ if part not in d:
104
+ d[part] = {}
105
+ d = d[part]
106
+ d[parts[-1]] = value
107
+ return outdict
108
+
109
+
110
+ def get_nested_item(obj: DictLike, flattened_key: str, sep: str = "/") -> Any:
111
+ split_keys = flattened_key.split(sep)
112
+ getter = obj[split_keys[0]]
113
+ if len(split_keys) == 1:
114
+ return getter
115
+
116
+ for key in split_keys[1:]:
117
+ getter = getter[key]
118
+
119
+ return getter
120
+
121
+
122
+ def serialize_dict(stats: dict[str, torch.Tensor | np.ndarray | dict]) -> dict:
123
+ serialized_dict = {}
124
+ for key, value in flatten_dict(stats).items():
125
+ if isinstance(value, (torch.Tensor, np.ndarray)):
126
+ serialized_dict[key] = value.tolist()
127
+ elif isinstance(value, np.generic):
128
+ serialized_dict[key] = value.item()
129
+ elif isinstance(value, (int, float)):
130
+ serialized_dict[key] = value
131
+ else:
132
+ raise NotImplementedError(f"The value '{value}' of type '{type(value)}' is not supported.")
133
+ return unflatten_dict(serialized_dict)
134
+
135
+
136
+ def embed_images(dataset: datasets.Dataset) -> datasets.Dataset:
137
+ # Embed image bytes into the table before saving to parquet
138
+ format = dataset.format
139
+ dataset = dataset.with_format("arrow")
140
+ dataset = dataset.map(embed_table_storage, batched=False)
141
+ dataset = dataset.with_format(**format)
142
+ return dataset
143
+
144
+
145
+ def load_json(fpath: Path) -> Any:
146
+ with open(fpath) as f:
147
+ return json.load(f)
148
+
149
+
150
+ def write_json(data: dict, fpath: Path) -> None:
151
+ fpath.parent.mkdir(exist_ok=True, parents=True)
152
+ with open(fpath, "w") as f:
153
+ json.dump(data, f, indent=4, ensure_ascii=False)
154
+
155
+
156
+ def load_jsonlines(fpath: Path) -> list[Any]:
157
+ with jsonlines.open(fpath, "r") as reader:
158
+ return list(reader)
159
+
160
+
161
+ def write_jsonlines(data: dict, fpath: Path) -> None:
162
+ fpath.parent.mkdir(exist_ok=True, parents=True)
163
+ with jsonlines.open(fpath, "w") as writer:
164
+ writer.write_all(data)
165
+
166
+
167
+ def append_jsonlines(data: dict, fpath: Path) -> None:
168
+ fpath.parent.mkdir(exist_ok=True, parents=True)
169
+ with jsonlines.open(fpath, "a") as writer:
170
+ writer.write(data)
171
+
172
+
173
+ def write_info(info: dict, local_dir: Path):
174
+ write_json(info, local_dir / INFO_PATH)
175
+
176
+
177
+ def load_info(local_dir: Path) -> dict:
178
+ info = load_json(local_dir / INFO_PATH)
179
+ for ft in info["features"].values():
180
+ ft["shape"] = tuple(ft["shape"])
181
+ return info
182
+
183
+
184
+ def write_stats(stats: dict, local_dir: Path):
185
+ serialized_stats = serialize_dict(stats)
186
+ write_json(serialized_stats, local_dir / STATS_PATH)
187
+
188
+
189
+ def cast_stats_to_numpy(stats) -> dict[str, dict[str, np.ndarray]]:
190
+ stats = {key: np.array(value) for key, value in flatten_dict(stats).items()}
191
+ return unflatten_dict(stats)
192
+
193
+
194
+ def load_stats(local_dir: Path) -> dict[str, dict[str, np.ndarray]]:
195
+ if not (local_dir / STATS_PATH).exists():
196
+ return None
197
+ stats = load_json(local_dir / STATS_PATH)
198
+ return cast_stats_to_numpy(stats)
199
+
200
+
201
+ def write_task(task_index: int, task: dict, local_dir: Path):
202
+ task_dict = {
203
+ "task_index": task_index,
204
+ "task": task,
205
+ }
206
+ append_jsonlines(task_dict, local_dir / TASKS_PATH)
207
+
208
+
209
+ def load_tasks(local_dir: Path) -> tuple[dict, dict]:
210
+ tasks = load_jsonlines(local_dir / TASKS_PATH)
211
+ tasks = {item["task_index"]: item["task"] for item in sorted(tasks, key=lambda x: x["task_index"])}
212
+ task_to_task_index = {task: task_index for task_index, task in tasks.items()}
213
+ return tasks, task_to_task_index
214
+
215
+
216
+ def write_episode(episode: dict, local_dir: Path):
217
+ append_jsonlines(episode, local_dir / EPISODES_PATH)
218
+
219
+
220
+ def load_episodes(local_dir: Path) -> dict:
221
+ episodes = load_jsonlines(local_dir / EPISODES_PATH)
222
+ return {item["episode_index"]: item for item in sorted(episodes, key=lambda x: x["episode_index"])}
223
+
224
+
225
+ def write_episode_stats(episode_index: int, episode_stats: dict, local_dir: Path):
226
+ # We wrap episode_stats in a dictionary since `episode_stats["episode_index"]`
227
+ # is a dictionary of stats and not an integer.
228
+ episode_stats = {"episode_index": episode_index, "stats": serialize_dict(episode_stats)}
229
+ append_jsonlines(episode_stats, local_dir / EPISODES_STATS_PATH)
230
+
231
+
232
+ def load_episodes_stats(local_dir: Path) -> dict:
233
+ episodes_stats = load_jsonlines(local_dir / EPISODES_STATS_PATH)
234
+ return {
235
+ item["episode_index"]: cast_stats_to_numpy(item["stats"])
236
+ for item in sorted(episodes_stats, key=lambda x: x["episode_index"])
237
+ }
238
+
239
+
240
+ def backward_compatible_episodes_stats(
241
+ stats: dict[str, dict[str, np.ndarray]], episodes: list[int]
242
+ ) -> dict[str, dict[str, np.ndarray]]:
243
+ return dict.fromkeys(episodes, stats)
244
+
245
+
246
+ def load_image_as_numpy(
247
+ fpath: str | Path, dtype: np.dtype = np.float32, channel_first: bool = True
248
+ ) -> np.ndarray:
249
+ img = PILImage.open(fpath).convert("RGB")
250
+ img_array = np.array(img, dtype=dtype)
251
+ if channel_first: # (H, W, C) -> (C, H, W)
252
+ img_array = np.transpose(img_array, (2, 0, 1))
253
+ if np.issubdtype(dtype, np.floating):
254
+ img_array /= 255.0
255
+ return img_array
256
+
257
+
258
+ def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]):
259
+ """Get a transform function that convert items from Hugging Face dataset (pyarrow)
260
+ to torch tensors. Importantly, images are converted from PIL, which corresponds to
261
+ a channel last representation (h w c) of uint8 type, to a torch image representation
262
+ with channel first (c h w) of float32 type in range [0,1].
263
+ """
264
+ for key in items_dict:
265
+ first_item = items_dict[key][0]
266
+ if isinstance(first_item, PILImage.Image):
267
+ to_tensor = transforms.ToTensor()
268
+ items_dict[key] = [to_tensor(img) for img in items_dict[key]]
269
+ elif first_item is None:
270
+ pass
271
+ else:
272
+ items_dict[key] = [x if isinstance(x, str) else torch.tensor(x) for x in items_dict[key]]
273
+ return items_dict
274
+
275
+
276
+ def is_valid_version(version: str) -> bool:
277
+ try:
278
+ packaging.version.parse(version)
279
+ return True
280
+ except packaging.version.InvalidVersion:
281
+ return False
282
+
283
+
284
+ def check_version_compatibility(
285
+ repo_id: str,
286
+ version_to_check: str | packaging.version.Version,
287
+ current_version: str | packaging.version.Version,
288
+ enforce_breaking_major: bool = True,
289
+ ) -> None:
290
+ v_check = (
291
+ packaging.version.parse(version_to_check)
292
+ if not isinstance(version_to_check, packaging.version.Version)
293
+ else version_to_check
294
+ )
295
+ v_current = (
296
+ packaging.version.parse(current_version)
297
+ if not isinstance(current_version, packaging.version.Version)
298
+ else current_version
299
+ )
300
+ if v_check.major < v_current.major and enforce_breaking_major:
301
+ raise BackwardCompatibilityError(repo_id, v_check)
302
+ elif v_check.minor < v_current.minor:
303
+ logging.warning(V21_MESSAGE.format(repo_id=repo_id, version=v_check))
304
+
305
+
306
+ def get_repo_versions(repo_id: str) -> list[packaging.version.Version]:
307
+ """Returns available valid versions (branches and tags) on given repo."""
308
+ api = HfApi()
309
+ repo_refs = api.list_repo_refs(repo_id, repo_type="dataset")
310
+ repo_refs = [b.name for b in repo_refs.branches + repo_refs.tags]
311
+ repo_versions = []
312
+ for ref in repo_refs:
313
+ with contextlib.suppress(packaging.version.InvalidVersion):
314
+ repo_versions.append(packaging.version.parse(ref))
315
+
316
+ return repo_versions
317
+
318
+
319
+ def get_safe_version(repo_id: str, version: str | packaging.version.Version) -> str:
320
+ """
321
+ Returns the version if available on repo or the latest compatible one.
322
+ Otherwise, will throw a `CompatibilityError`.
323
+ """
324
+ target_version = (
325
+ packaging.version.parse(version) if not isinstance(version, packaging.version.Version) else version
326
+ )
327
+ hub_versions = get_repo_versions(repo_id)
328
+
329
+ if not hub_versions:
330
+ raise RevisionNotFoundError(
331
+ f"""Your dataset must be tagged with a codebase version.
332
+ Assuming _version_ is the codebase_version value in the info.json, you can run this:
333
+ ```python
334
+ from huggingface_hub import HfApi
335
+
336
+ hub_api = HfApi()
337
+ hub_api.create_tag("{repo_id}", tag="_version_", repo_type="dataset")
338
+ ```
339
+ """
340
+ )
341
+
342
+ if target_version in hub_versions:
343
+ return f"v{target_version}"
344
+
345
+ compatibles = [
346
+ v for v in hub_versions if v.major == target_version.major and v.minor <= target_version.minor
347
+ ]
348
+ if compatibles:
349
+ return_version = max(compatibles)
350
+ if return_version < target_version:
351
+ logging.warning(f"Revision {version} for {repo_id} not found, using version v{return_version}")
352
+ return f"v{return_version}"
353
+
354
+ lower_major = [v for v in hub_versions if v.major < target_version.major]
355
+ if lower_major:
356
+ raise BackwardCompatibilityError(repo_id, max(lower_major))
357
+
358
+ upper_versions = [v for v in hub_versions if v > target_version]
359
+ assert len(upper_versions) > 0
360
+ raise ForwardCompatibilityError(repo_id, min(upper_versions))
361
+
362
+
363
+ def get_hf_features_from_features(features: dict) -> datasets.Features:
364
+ hf_features = {}
365
+ for key, ft in features.items():
366
+ if ft["dtype"] == "video":
367
+ continue
368
+ elif ft["dtype"] == "image":
369
+ hf_features[key] = datasets.Image()
370
+ elif ft["shape"] == (1,):
371
+ hf_features[key] = datasets.Value(dtype=ft["dtype"])
372
+ elif len(ft["shape"]) == 1:
373
+ hf_features[key] = datasets.Sequence(
374
+ length=ft["shape"][0], feature=datasets.Value(dtype=ft["dtype"])
375
+ )
376
+ elif len(ft["shape"]) == 2:
377
+ hf_features[key] = datasets.Array2D(shape=ft["shape"], dtype=ft["dtype"])
378
+ elif len(ft["shape"]) == 3:
379
+ hf_features[key] = datasets.Array3D(shape=ft["shape"], dtype=ft["dtype"])
380
+ elif len(ft["shape"]) == 4:
381
+ hf_features[key] = datasets.Array4D(shape=ft["shape"], dtype=ft["dtype"])
382
+ elif len(ft["shape"]) == 5:
383
+ hf_features[key] = datasets.Array5D(shape=ft["shape"], dtype=ft["dtype"])
384
+ else:
385
+ raise ValueError(f"Corresponding feature is not valid: {ft}")
386
+
387
+ return datasets.Features(hf_features)
388
+
389
+
390
+ def get_features_from_robot(robot: Robot, use_videos: bool = True) -> dict:
391
+ camera_ft = {}
392
+ if robot.cameras:
393
+ camera_ft = {
394
+ key: {"dtype": "video" if use_videos else "image", **ft}
395
+ for key, ft in robot.camera_features.items()
396
+ }
397
+ return {**robot.motor_features, **camera_ft, **DEFAULT_FEATURES}
398
+
399
+
400
+ def dataset_to_policy_features(features: dict[str, dict]) -> dict[str, PolicyFeature]:
401
+ # TODO(aliberts): Implement "type" in dataset features and simplify this
402
+ policy_features = {}
403
+ for key, ft in features.items():
404
+ shape = ft["shape"]
405
+ if ft["dtype"] in ["image", "video"]:
406
+ type = FeatureType.VISUAL
407
+ if len(shape) != 3:
408
+ raise ValueError(f"Number of dimensions of {key} != 3 (shape={shape})")
409
+
410
+ names = ft["names"]
411
+ # Backward compatibility for "channel" which is an error introduced in LeRobotDataset v2.0 for ported datasets.
412
+ if names[2] in ["channel", "channels"]: # (h, w, c) -> (c, h, w)
413
+ shape = (shape[2], shape[0], shape[1])
414
+ elif key == "observation.environment_state":
415
+ type = FeatureType.ENV
416
+ elif key.startswith("observation"):
417
+ type = FeatureType.STATE
418
+ elif key == "action":
419
+ type = FeatureType.ACTION
420
+ else:
421
+ continue
422
+
423
+ policy_features[key] = PolicyFeature(
424
+ type=type,
425
+ shape=shape,
426
+ )
427
+
428
+ return policy_features
429
+
430
+
431
+ def create_empty_dataset_info(
432
+ codebase_version: str,
433
+ fps: int,
434
+ robot_type: str,
435
+ features: dict,
436
+ use_videos: bool,
437
+ ) -> dict:
438
+ return {
439
+ "codebase_version": codebase_version,
440
+ "robot_type": robot_type,
441
+ "total_episodes": 0,
442
+ "total_frames": 0,
443
+ "total_tasks": 0,
444
+ "total_videos": 0,
445
+ "total_chunks": 0,
446
+ "chunks_size": DEFAULT_CHUNK_SIZE,
447
+ "fps": fps,
448
+ "splits": {},
449
+ "data_path": DEFAULT_PARQUET_PATH,
450
+ "video_path": DEFAULT_VIDEO_PATH if use_videos else None,
451
+ "features": features,
452
+ }
453
+
454
+
455
+ def get_episode_data_index(
456
+ episode_dicts: dict[dict], episodes: list[int] | None = None
457
+ ) -> dict[str, torch.Tensor]:
458
+ episode_lengths = {ep_idx: ep_dict["length"] for ep_idx, ep_dict in episode_dicts.items()}
459
+ if episodes is not None:
460
+ episode_lengths = {ep_idx: episode_lengths[ep_idx] for ep_idx in episodes}
461
+
462
+ cumulative_lengths = list(accumulate(episode_lengths.values()))
463
+ return {
464
+ "from": torch.LongTensor([0] + cumulative_lengths[:-1]),
465
+ "to": torch.LongTensor(cumulative_lengths),
466
+ }
467
+
468
+
469
+ def check_timestamps_sync(
470
+ timestamps: np.ndarray,
471
+ episode_indices: np.ndarray,
472
+ episode_data_index: dict[str, np.ndarray],
473
+ fps: int,
474
+ tolerance_s: float,
475
+ raise_value_error: bool = True,
476
+ ) -> bool:
477
+ """
478
+ This check is to make sure that each timestamp is separated from the next by (1/fps) +/- tolerance
479
+ to account for possible numerical error.
480
+
481
+ Args:
482
+ timestamps (np.ndarray): Array of timestamps in seconds.
483
+ episode_indices (np.ndarray): Array indicating the episode index for each timestamp.
484
+ episode_data_index (dict[str, np.ndarray]): A dictionary that includes 'to',
485
+ which identifies indices for the end of each episode.
486
+ fps (int): Frames per second. Used to check the expected difference between consecutive timestamps.
487
+ tolerance_s (float): Allowed deviation from the expected (1/fps) difference.
488
+ raise_value_error (bool): Whether to raise a ValueError if the check fails.
489
+
490
+ Returns:
491
+ bool: True if all checked timestamp differences lie within tolerance, False otherwise.
492
+
493
+ Raises:
494
+ ValueError: If the check fails and `raise_value_error` is True.
495
+ """
496
+ if timestamps.shape != episode_indices.shape:
497
+ raise ValueError(
498
+ "timestamps and episode_indices should have the same shape. "
499
+ f"Found {timestamps.shape=} and {episode_indices.shape=}."
500
+ )
501
+
502
+ # Consecutive differences
503
+ diffs = np.diff(timestamps)
504
+ within_tolerance = np.abs(diffs - (1.0 / fps)) <= tolerance_s
505
+
506
+ # Mask to ignore differences at the boundaries between episodes
507
+ mask = np.ones(len(diffs), dtype=bool)
508
+ ignored_diffs = episode_data_index["to"][:-1] - 1 # indices at the end of each episode
509
+ mask[ignored_diffs] = False
510
+ filtered_within_tolerance = within_tolerance[mask]
511
+
512
+ # Check if all remaining diffs are within tolerance
513
+ if not np.all(filtered_within_tolerance):
514
+ # Track original indices before masking
515
+ original_indices = np.arange(len(diffs))
516
+ filtered_indices = original_indices[mask]
517
+ outside_tolerance_filtered_indices = np.nonzero(~filtered_within_tolerance)[0]
518
+ outside_tolerance_indices = filtered_indices[outside_tolerance_filtered_indices]
519
+
520
+ outside_tolerances = []
521
+ for idx in outside_tolerance_indices:
522
+ entry = {
523
+ "timestamps": [timestamps[idx], timestamps[idx + 1]],
524
+ "diff": diffs[idx],
525
+ "episode_index": episode_indices[idx].item()
526
+ if hasattr(episode_indices[idx], "item")
527
+ else episode_indices[idx],
528
+ }
529
+ outside_tolerances.append(entry)
530
+
531
+ if raise_value_error:
532
+ raise ValueError(
533
+ f"""One or several timestamps unexpectedly violate the tolerance inside episode range.
534
+ This might be due to synchronization issues during data collection.
535
+ \n{pformat(outside_tolerances)}"""
536
+ )
537
+ return False
538
+
539
+ return True
540
+
541
+
542
+ def check_delta_timestamps(
543
+ delta_timestamps: dict[str, list[float]], fps: int, tolerance_s: float, raise_value_error: bool = True
544
+ ) -> bool:
545
+ """This will check if all the values in delta_timestamps are multiples of 1/fps +/- tolerance.
546
+ This is to ensure that these delta_timestamps added to any timestamp from a dataset will themselves be
547
+ actual timestamps from the dataset.
548
+ """
549
+ outside_tolerance = {}
550
+ for key, delta_ts in delta_timestamps.items():
551
+ within_tolerance = [abs(ts * fps - round(ts * fps)) / fps <= tolerance_s for ts in delta_ts]
552
+ if not all(within_tolerance):
553
+ outside_tolerance[key] = [
554
+ ts for ts, is_within in zip(delta_ts, within_tolerance, strict=True) if not is_within
555
+ ]
556
+
557
+ if len(outside_tolerance) > 0:
558
+ if raise_value_error:
559
+ raise ValueError(
560
+ f"""
561
+ The following delta_timestamps are found outside of tolerance range.
562
+ Please make sure they are multiples of 1/{fps} +/- tolerance and adjust
563
+ their values accordingly.
564
+ \n{pformat(outside_tolerance)}
565
+ """
566
+ )
567
+ return False
568
+
569
+ return True
570
+
571
+
572
+ def get_delta_indices(delta_timestamps: dict[str, list[float]], fps: int) -> dict[str, list[int]]:
573
+ delta_indices = {}
574
+ for key, delta_ts in delta_timestamps.items():
575
+ delta_indices[key] = [round(d * fps) for d in delta_ts]
576
+
577
+ return delta_indices
578
+
579
+
580
+ def cycle(iterable):
581
+ """The equivalent of itertools.cycle, but safe for Pytorch dataloaders.
582
+
583
+ See https://github.com/pytorch/pytorch/issues/23900 for information on why itertools.cycle is not safe.
584
+ """
585
+ iterator = iter(iterable)
586
+ while True:
587
+ try:
588
+ yield next(iterator)
589
+ except StopIteration:
590
+ iterator = iter(iterable)
591
+
592
+
593
+ def create_branch(repo_id, *, branch: str, repo_type: str | None = None) -> None:
594
+ """Create a branch on a existing Hugging Face repo. Delete the branch if it already
595
+ exists before creating it.
596
+ """
597
+ api = HfApi()
598
+
599
+ branches = api.list_repo_refs(repo_id, repo_type=repo_type).branches
600
+ refs = [branch.ref for branch in branches]
601
+ ref = f"refs/heads/{branch}"
602
+ if ref in refs:
603
+ api.delete_branch(repo_id, repo_type=repo_type, branch=branch)
604
+
605
+ api.create_branch(repo_id, repo_type=repo_type, branch=branch)
606
+
607
+
608
+ def create_lerobot_dataset_card(
609
+ tags: list | None = None,
610
+ dataset_info: dict | None = None,
611
+ **kwargs,
612
+ ) -> DatasetCard:
613
+ """
614
+ Keyword arguments will be used to replace values in ./lerobot/common/datasets/card_template.md.
615
+ Note: If specified, license must be one of https://huggingface.co/docs/hub/repositories-licenses.
616
+ """
617
+ card_tags = ["LeRobot"]
618
+
619
+ if tags:
620
+ card_tags += tags
621
+ if dataset_info:
622
+ dataset_structure = "[meta/info.json](meta/info.json):\n"
623
+ dataset_structure += f"```json\n{json.dumps(dataset_info, indent=4)}\n```\n"
624
+ kwargs = {**kwargs, "dataset_structure": dataset_structure}
625
+ card_data = DatasetCardData(
626
+ license=kwargs.get("license"),
627
+ tags=card_tags,
628
+ task_categories=["robotics"],
629
+ configs=[
630
+ {
631
+ "config_name": "default",
632
+ "data_files": "data/*/*.parquet",
633
+ }
634
+ ],
635
+ )
636
+
637
+ card_template = (importlib.resources.files("lerobot.common.datasets") / "card_template.md").read_text()
638
+
639
+ return DatasetCard.from_template(
640
+ card_data=card_data,
641
+ template_str=card_template,
642
+ **kwargs,
643
+ )
644
+
645
+
646
+ class IterableNamespace(SimpleNamespace):
647
+ """
648
+ A namespace object that supports both dictionary-like iteration and dot notation access.
649
+ Automatically converts nested dictionaries into IterableNamespaces.
650
+
651
+ This class extends SimpleNamespace to provide:
652
+ - Dictionary-style iteration over keys
653
+ - Access to items via both dot notation (obj.key) and brackets (obj["key"])
654
+ - Dictionary-like methods: items(), keys(), values()
655
+ - Recursive conversion of nested dictionaries
656
+
657
+ Args:
658
+ dictionary: Optional dictionary to initialize the namespace
659
+ **kwargs: Additional keyword arguments passed to SimpleNamespace
660
+
661
+ Examples:
662
+ >>> data = {"name": "Alice", "details": {"age": 25}}
663
+ >>> ns = IterableNamespace(data)
664
+ >>> ns.name
665
+ 'Alice'
666
+ >>> ns.details.age
667
+ 25
668
+ >>> list(ns.keys())
669
+ ['name', 'details']
670
+ >>> for key, value in ns.items():
671
+ ... print(f"{key}: {value}")
672
+ name: Alice
673
+ details: IterableNamespace(age=25)
674
+ """
675
+
676
+ def __init__(self, dictionary: dict[str, Any] = None, **kwargs):
677
+ super().__init__(**kwargs)
678
+ if dictionary is not None:
679
+ for key, value in dictionary.items():
680
+ if isinstance(value, dict):
681
+ setattr(self, key, IterableNamespace(value))
682
+ else:
683
+ setattr(self, key, value)
684
+
685
+ def __iter__(self) -> Iterator[str]:
686
+ return iter(vars(self))
687
+
688
+ def __getitem__(self, key: str) -> Any:
689
+ return vars(self)[key]
690
+
691
+ def items(self):
692
+ return vars(self).items()
693
+
694
+ def values(self):
695
+ return vars(self).values()
696
+
697
+ def keys(self):
698
+ return vars(self).keys()
699
+
700
+
701
+ def validate_frame(frame: dict, features: dict):
702
+ optional_features = {"timestamp"}
703
+ expected_features = (set(features) - set(DEFAULT_FEATURES.keys())) | {"task"}
704
+ actual_features = set(frame.keys())
705
+
706
+ error_message = validate_features_presence(actual_features, expected_features, optional_features)
707
+
708
+ if "task" in frame:
709
+ error_message += validate_feature_string("task", frame["task"])
710
+
711
+ common_features = actual_features & (expected_features | optional_features)
712
+ for name in common_features - {"task"}:
713
+ error_message += validate_feature_dtype_and_shape(name, features[name], frame[name])
714
+
715
+ if error_message:
716
+ raise ValueError(error_message)
717
+
718
+
719
+ def validate_features_presence(
720
+ actual_features: set[str], expected_features: set[str], optional_features: set[str]
721
+ ):
722
+ error_message = ""
723
+ missing_features = expected_features - actual_features
724
+ extra_features = actual_features - (expected_features | optional_features)
725
+
726
+ if missing_features or extra_features:
727
+ error_message += "Feature mismatch in `frame` dictionary:\n"
728
+ if missing_features:
729
+ error_message += f"Missing features: {missing_features}\n"
730
+ if extra_features:
731
+ error_message += f"Extra features: {extra_features}\n"
732
+
733
+ return error_message
734
+
735
+
736
+ def validate_feature_dtype_and_shape(name: str, feature: dict, value: np.ndarray | PILImage.Image | str):
737
+ expected_dtype = feature["dtype"]
738
+ expected_shape = feature["shape"]
739
+ if is_valid_numpy_dtype_string(expected_dtype):
740
+ return validate_feature_numpy_array(name, expected_dtype, expected_shape, value)
741
+ elif expected_dtype in ["image", "video"]:
742
+ return validate_feature_image_or_video(name, expected_shape, value)
743
+ elif expected_dtype == "string":
744
+ return validate_feature_string(name, value)
745
+ else:
746
+ raise NotImplementedError(f"The feature dtype '{expected_dtype}' is not implemented yet.")
747
+
748
+
749
+ def validate_feature_numpy_array(
750
+ name: str, expected_dtype: str, expected_shape: list[int], value: np.ndarray
751
+ ):
752
+ error_message = ""
753
+ if isinstance(value, np.ndarray):
754
+ actual_dtype = value.dtype
755
+ actual_shape = value.shape
756
+
757
+ if actual_dtype != np.dtype(expected_dtype):
758
+ error_message += f"The feature '{name}' of dtype '{actual_dtype}' is not of the expected dtype '{expected_dtype}'.\n"
759
+
760
+ if actual_shape != expected_shape:
761
+ error_message += f"The feature '{name}' of shape '{actual_shape}' does not have the expected shape '{expected_shape}'.\n"
762
+ else:
763
+ error_message += f"The feature '{name}' is not a 'np.ndarray'. Expected type is '{expected_dtype}', but type '{type(value)}' provided instead.\n"
764
+
765
+ return error_message
766
+
767
+
768
+ def validate_feature_image_or_video(name: str, expected_shape: list[str], value: np.ndarray | PILImage.Image):
769
+ # Note: The check of pixels range ([0,1] for float and [0,255] for uint8) is done by the image writer threads.
770
+ error_message = ""
771
+ if isinstance(value, np.ndarray):
772
+ actual_shape = value.shape
773
+ c, h, w = expected_shape
774
+ if len(actual_shape) != 3 or (actual_shape != (c, h, w) and actual_shape != (h, w, c)):
775
+ error_message += f"The feature '{name}' of shape '{actual_shape}' does not have the expected shape '{(c, h, w)}' or '{(h, w, c)}'.\n"
776
+ elif isinstance(value, PILImage.Image):
777
+ pass
778
+ else:
779
+ error_message += f"The feature '{name}' is expected to be of type 'PIL.Image' or 'np.ndarray' channel first or channel last, but type '{type(value)}' provided instead.\n"
780
+
781
+ return error_message
782
+
783
+
784
+ def validate_feature_string(name: str, value: str):
785
+ if not isinstance(value, str):
786
+ return f"The feature '{name}' is expected to be of type 'str', but type '{type(value)}' provided instead.\n"
787
+ return ""
788
+
789
+
790
+ def validate_episode_buffer(episode_buffer: dict, total_episodes: int, features: dict):
791
+ if "size" not in episode_buffer:
792
+ raise ValueError("size key not found in episode_buffer")
793
+
794
+ if "task" not in episode_buffer:
795
+ raise ValueError("task key not found in episode_buffer")
796
+
797
+ if episode_buffer["episode_index"] != total_episodes:
798
+ # TODO(aliberts): Add option to use existing episode_index
799
+ raise NotImplementedError(
800
+ "You might have manually provided the episode_buffer with an episode_index that doesn't "
801
+ "match the total number of episodes already in the dataset. This is not supported for now."
802
+ )
803
+
804
+ if episode_buffer["size"] == 0:
805
+ raise ValueError("You must add one or several frames with `add_frame` before calling `add_episode`.")
806
+
807
+ buffer_keys = set(episode_buffer.keys()) - {"task", "size"}
808
+ if not buffer_keys == set(features):
809
+ raise ValueError(
810
+ f"Features from `episode_buffer` don't match the ones in `features`."
811
+ f"In episode_buffer not in features: {buffer_keys - set(features)}"
812
+ f"In features not in episode_buffer: {set(features) - buffer_keys}"
813
+ )
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py ADDED
@@ -0,0 +1,884 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ This script is for internal use to convert all datasets under the 'lerobot' hub user account to v2.
19
+
20
+ Note: Since the original Aloha datasets don't use shadow motors, you need to comment those out in
21
+ lerobot/configs/robot/aloha.yaml before running this script.
22
+ """
23
+
24
+ import traceback
25
+ from pathlib import Path
26
+ from textwrap import dedent
27
+
28
+ from lerobot import available_datasets
29
+ from lerobot.common.datasets.v2.convert_dataset_v1_to_v2 import convert_dataset
30
+ from lerobot.common.robot_devices.robots.configs import AlohaRobotConfig
31
+
32
+ LOCAL_DIR = Path("data/")
33
+
34
+ # spellchecker:off
35
+ ALOHA_MOBILE_INFO = {
36
+ "robot_config": AlohaRobotConfig(),
37
+ "license": "mit",
38
+ "url": "https://mobile-aloha.github.io/",
39
+ "paper": "https://arxiv.org/abs/2401.02117",
40
+ "citation_bibtex": dedent(r"""
41
+ @inproceedings{fu2024mobile,
42
+ author = {Fu, Zipeng and Zhao, Tony Z. and Finn, Chelsea},
43
+ title = {Mobile ALOHA: Learning Bimanual Mobile Manipulation with Low-Cost Whole-Body Teleoperation},
44
+ booktitle = {arXiv},
45
+ year = {2024},
46
+ }""").lstrip(),
47
+ }
48
+ ALOHA_STATIC_INFO = {
49
+ "robot_config": AlohaRobotConfig(),
50
+ "license": "mit",
51
+ "url": "https://tonyzhaozh.github.io/aloha/",
52
+ "paper": "https://arxiv.org/abs/2304.13705",
53
+ "citation_bibtex": dedent(r"""
54
+ @article{Zhao2023LearningFB,
55
+ title={Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware},
56
+ author={Tony Zhao and Vikash Kumar and Sergey Levine and Chelsea Finn},
57
+ journal={RSS},
58
+ year={2023},
59
+ volume={abs/2304.13705},
60
+ url={https://arxiv.org/abs/2304.13705}
61
+ }""").lstrip(),
62
+ }
63
+ PUSHT_INFO = {
64
+ "license": "mit",
65
+ "url": "https://diffusion-policy.cs.columbia.edu/",
66
+ "paper": "https://arxiv.org/abs/2303.04137v5",
67
+ "citation_bibtex": dedent(r"""
68
+ @article{chi2024diffusionpolicy,
69
+ author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song},
70
+ title ={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion},
71
+ journal = {The International Journal of Robotics Research},
72
+ year = {2024},
73
+ }""").lstrip(),
74
+ }
75
+ XARM_INFO = {
76
+ "license": "mit",
77
+ "url": "https://www.nicklashansen.com/td-mpc/",
78
+ "paper": "https://arxiv.org/abs/2203.04955",
79
+ "citation_bibtex": dedent(r"""
80
+ @inproceedings{Hansen2022tdmpc,
81
+ title={Temporal Difference Learning for Model Predictive Control},
82
+ author={Nicklas Hansen and Xiaolong Wang and Hao Su},
83
+ booktitle={ICML},
84
+ year={2022}
85
+ }
86
+ """),
87
+ }
88
+ UNITREEH_INFO = {
89
+ "license": "apache-2.0",
90
+ }
91
+
92
+ DATASETS = {
93
+ "aloha_mobile_cabinet": {
94
+ "single_task": "Open the top cabinet, store the pot inside it then close the cabinet.",
95
+ **ALOHA_MOBILE_INFO,
96
+ },
97
+ "aloha_mobile_chair": {
98
+ "single_task": "Push the chairs in front of the desk to place them against it.",
99
+ **ALOHA_MOBILE_INFO,
100
+ },
101
+ "aloha_mobile_elevator": {
102
+ "single_task": "Take the elevator to the 1st floor.",
103
+ **ALOHA_MOBILE_INFO,
104
+ },
105
+ "aloha_mobile_shrimp": {
106
+ "single_task": "Sauté the raw shrimp on both sides, then serve it in the bowl.",
107
+ **ALOHA_MOBILE_INFO,
108
+ },
109
+ "aloha_mobile_wash_pan": {
110
+ "single_task": "Pick up the pan, rinse it in the sink and then place it in the drying rack.",
111
+ **ALOHA_MOBILE_INFO,
112
+ },
113
+ "aloha_mobile_wipe_wine": {
114
+ "single_task": "Pick up the wet cloth on the faucet and use it to clean the spilled wine on the table and underneath the glass.",
115
+ **ALOHA_MOBILE_INFO,
116
+ },
117
+ "aloha_static_battery": {
118
+ "single_task": "Place the battery into the slot of the remote controller.",
119
+ **ALOHA_STATIC_INFO,
120
+ },
121
+ "aloha_static_candy": {"single_task": "Pick up the candy and unwrap it.", **ALOHA_STATIC_INFO},
122
+ "aloha_static_coffee": {
123
+ "single_task": "Place the coffee capsule inside the capsule container, then place the cup onto the center of the cup tray, then push the 'Hot Water' and 'Travel Mug' buttons.",
124
+ **ALOHA_STATIC_INFO,
125
+ },
126
+ "aloha_static_coffee_new": {
127
+ "single_task": "Place the coffee capsule inside the capsule container, then place the cup onto the center of the cup tray.",
128
+ **ALOHA_STATIC_INFO,
129
+ },
130
+ "aloha_static_cups_open": {
131
+ "single_task": "Pick up the plastic cup and open its lid.",
132
+ **ALOHA_STATIC_INFO,
133
+ },
134
+ "aloha_static_fork_pick_up": {
135
+ "single_task": "Pick up the fork and place it on the plate.",
136
+ **ALOHA_STATIC_INFO,
137
+ },
138
+ "aloha_static_pingpong_test": {
139
+ "single_task": "Transfer one of the two balls in the right glass into the left glass, then transfer it back to the right glass.",
140
+ **ALOHA_STATIC_INFO,
141
+ },
142
+ "aloha_static_pro_pencil": {
143
+ "single_task": "Pick up the pencil with the right arm, hand it over to the left arm then place it back onto the table.",
144
+ **ALOHA_STATIC_INFO,
145
+ },
146
+ "aloha_static_screw_driver": {
147
+ "single_task": "Pick up the screwdriver with the right arm, hand it over to the left arm then place it into the cup.",
148
+ **ALOHA_STATIC_INFO,
149
+ },
150
+ "aloha_static_tape": {
151
+ "single_task": "Cut a small piece of tape from the tape dispenser then place it on the cardboard box's edge.",
152
+ **ALOHA_STATIC_INFO,
153
+ },
154
+ "aloha_static_thread_velcro": {
155
+ "single_task": "Pick up the velcro cable tie with the left arm, then insert the end of the velcro tie into the other end's loop with the right arm.",
156
+ **ALOHA_STATIC_INFO,
157
+ },
158
+ "aloha_static_towel": {
159
+ "single_task": "Pick up a piece of paper towel and place it on the spilled liquid.",
160
+ **ALOHA_STATIC_INFO,
161
+ },
162
+ "aloha_static_vinh_cup": {
163
+ "single_task": "Pick up the plastic cup with the right arm, then pop its lid open with the left arm.",
164
+ **ALOHA_STATIC_INFO,
165
+ },
166
+ "aloha_static_vinh_cup_left": {
167
+ "single_task": "Pick up the plastic cup with the left arm, then pop its lid open with the right arm.",
168
+ **ALOHA_STATIC_INFO,
169
+ },
170
+ "aloha_static_ziploc_slide": {"single_task": "Slide open the ziploc bag.", **ALOHA_STATIC_INFO},
171
+ "aloha_sim_insertion_scripted": {"single_task": "Insert the peg into the socket.", **ALOHA_STATIC_INFO},
172
+ "aloha_sim_insertion_scripted_image": {
173
+ "single_task": "Insert the peg into the socket.",
174
+ **ALOHA_STATIC_INFO,
175
+ },
176
+ "aloha_sim_insertion_human": {"single_task": "Insert the peg into the socket.", **ALOHA_STATIC_INFO},
177
+ "aloha_sim_insertion_human_image": {
178
+ "single_task": "Insert the peg into the socket.",
179
+ **ALOHA_STATIC_INFO,
180
+ },
181
+ "aloha_sim_transfer_cube_scripted": {
182
+ "single_task": "Pick up the cube with the right arm and transfer it to the left arm.",
183
+ **ALOHA_STATIC_INFO,
184
+ },
185
+ "aloha_sim_transfer_cube_scripted_image": {
186
+ "single_task": "Pick up the cube with the right arm and transfer it to the left arm.",
187
+ **ALOHA_STATIC_INFO,
188
+ },
189
+ "aloha_sim_transfer_cube_human": {
190
+ "single_task": "Pick up the cube with the right arm and transfer it to the left arm.",
191
+ **ALOHA_STATIC_INFO,
192
+ },
193
+ "aloha_sim_transfer_cube_human_image": {
194
+ "single_task": "Pick up the cube with the right arm and transfer it to the left arm.",
195
+ **ALOHA_STATIC_INFO,
196
+ },
197
+ "pusht": {"single_task": "Push the T-shaped block onto the T-shaped target.", **PUSHT_INFO},
198
+ "pusht_image": {"single_task": "Push the T-shaped block onto the T-shaped target.", **PUSHT_INFO},
199
+ "unitreeh1_fold_clothes": {"single_task": "Fold the sweatshirt.", **UNITREEH_INFO},
200
+ "unitreeh1_rearrange_objects": {"single_task": "Put the object into the bin.", **UNITREEH_INFO},
201
+ "unitreeh1_two_robot_greeting": {
202
+ "single_task": "Greet the other robot with a high five.",
203
+ **UNITREEH_INFO,
204
+ },
205
+ "unitreeh1_warehouse": {
206
+ "single_task": "Grab the spray paint on the shelf and place it in the bin on top of the robot dog.",
207
+ **UNITREEH_INFO,
208
+ },
209
+ "xarm_lift_medium": {"single_task": "Pick up the cube and lift it.", **XARM_INFO},
210
+ "xarm_lift_medium_image": {"single_task": "Pick up the cube and lift it.", **XARM_INFO},
211
+ "xarm_lift_medium_replay": {"single_task": "Pick up the cube and lift it.", **XARM_INFO},
212
+ "xarm_lift_medium_replay_image": {"single_task": "Pick up the cube and lift it.", **XARM_INFO},
213
+ "xarm_push_medium": {"single_task": "Push the cube onto the target.", **XARM_INFO},
214
+ "xarm_push_medium_image": {"single_task": "Push the cube onto the target.", **XARM_INFO},
215
+ "xarm_push_medium_replay": {"single_task": "Push the cube onto the target.", **XARM_INFO},
216
+ "xarm_push_medium_replay_image": {"single_task": "Push the cube onto the target.", **XARM_INFO},
217
+ "umi_cup_in_the_wild": {
218
+ "single_task": "Put the cup on the plate.",
219
+ "license": "apache-2.0",
220
+ },
221
+ "asu_table_top": {
222
+ "tasks_col": "language_instruction",
223
+ "license": "mit",
224
+ "paper": "https://link.springer.com/article/10.1007/s10514-023-10129-1",
225
+ "citation_bibtex": dedent(r"""
226
+ @inproceedings{zhou2023modularity,
227
+ title={Modularity through Attention: Efficient Training and Transfer of Language-Conditioned Policies for Robot Manipulation},
228
+ author={Zhou, Yifan and Sonawani, Shubham and Phielipp, Mariano and Stepputtis, Simon and Amor, Heni},
229
+ booktitle={Conference on Robot Learning},
230
+ pages={1684--1695},
231
+ year={2023},
232
+ organization={PMLR}
233
+ }
234
+ @article{zhou2023learning,
235
+ title={Learning modular language-conditioned robot policies through attention},
236
+ author={Zhou, Yifan and Sonawani, Shubham and Phielipp, Mariano and Ben Amor, Heni and Stepputtis, Simon},
237
+ journal={Autonomous Robots},
238
+ pages={1--21},
239
+ year={2023},
240
+ publisher={Springer}
241
+ }""").lstrip(),
242
+ },
243
+ "austin_buds_dataset": {
244
+ "tasks_col": "language_instruction",
245
+ "license": "mit",
246
+ "url": "https://ut-austin-rpl.github.io/BUDS-website/",
247
+ "paper": "https://arxiv.org/abs/2109.13841",
248
+ "citation_bibtex": dedent(r"""
249
+ @article{zhu2022bottom,
250
+ title={Bottom-Up Skill Discovery From Unsegmented Demonstrations for Long-Horizon Robot Manipulation},
251
+ author={Zhu, Yifeng and Stone, Peter and Zhu, Yuke},
252
+ journal={IEEE Robotics and Automation Letters},
253
+ volume={7},
254
+ number={2},
255
+ pages={4126--4133},
256
+ year={2022},
257
+ publisher={IEEE}
258
+ }""").lstrip(),
259
+ },
260
+ "austin_sailor_dataset": {
261
+ "tasks_col": "language_instruction",
262
+ "license": "mit",
263
+ "url": "https://ut-austin-rpl.github.io/sailor/",
264
+ "paper": "https://arxiv.org/abs/2210.11435",
265
+ "citation_bibtex": dedent(r"""
266
+ @inproceedings{nasiriany2022sailor,
267
+ title={Learning and Retrieval from Prior Data for Skill-based Imitation Learning},
268
+ author={Soroush Nasiriany and Tian Gao and Ajay Mandlekar and Yuke Zhu},
269
+ booktitle={Conference on Robot Learning (CoRL)},
270
+ year={2022}
271
+ }""").lstrip(),
272
+ },
273
+ "austin_sirius_dataset": {
274
+ "tasks_col": "language_instruction",
275
+ "license": "mit",
276
+ "url": "https://ut-austin-rpl.github.io/sirius/",
277
+ "paper": "https://arxiv.org/abs/2211.08416",
278
+ "citation_bibtex": dedent(r"""
279
+ @inproceedings{liu2022robot,
280
+ title = {Robot Learning on the Job: Human-in-the-Loop Autonomy and Learning During Deployment},
281
+ author = {Huihan Liu and Soroush Nasiriany and Lance Zhang and Zhiyao Bao and Yuke Zhu},
282
+ booktitle = {Robotics: Science and Systems (RSS)},
283
+ year = {2023}
284
+ }""").lstrip(),
285
+ },
286
+ "berkeley_autolab_ur5": {
287
+ "tasks_col": "language_instruction",
288
+ "license": "cc-by-4.0",
289
+ "url": "https://sites.google.com/view/berkeley-ur5/home",
290
+ "citation_bibtex": dedent(r"""
291
+ @misc{BerkeleyUR5Website,
292
+ title = {Berkeley {UR5} Demonstration Dataset},
293
+ author = {Lawrence Yunliang Chen and Simeon Adebola and Ken Goldberg},
294
+ howpublished = {https://sites.google.com/view/berkeley-ur5/home},
295
+ }""").lstrip(),
296
+ },
297
+ "berkeley_cable_routing": {
298
+ "tasks_col": "language_instruction",
299
+ "license": "cc-by-4.0",
300
+ "url": "https://sites.google.com/view/cablerouting/home",
301
+ "paper": "https://arxiv.org/abs/2307.08927",
302
+ "citation_bibtex": dedent(r"""
303
+ @article{luo2023multistage,
304
+ author = {Jianlan Luo and Charles Xu and Xinyang Geng and Gilbert Feng and Kuan Fang and Liam Tan and Stefan Schaal and Sergey Levine},
305
+ title = {Multi-Stage Cable Routing through Hierarchical Imitation Learning},
306
+ journal = {arXiv pre-print},
307
+ year = {2023},
308
+ url = {https://arxiv.org/abs/2307.08927},
309
+ }""").lstrip(),
310
+ },
311
+ "berkeley_fanuc_manipulation": {
312
+ "tasks_col": "language_instruction",
313
+ "license": "mit",
314
+ "url": "https://sites.google.com/berkeley.edu/fanuc-manipulation",
315
+ "citation_bibtex": dedent(r"""
316
+ @article{fanuc_manipulation2023,
317
+ title={Fanuc Manipulation: A Dataset for Learning-based Manipulation with FANUC Mate 200iD Robot},
318
+ author={Zhu, Xinghao and Tian, Ran and Xu, Chenfeng and Ding, Mingyu and Zhan, Wei and Tomizuka, Masayoshi},
319
+ year={2023},
320
+ }""").lstrip(),
321
+ },
322
+ "berkeley_gnm_cory_hall": {
323
+ "tasks_col": "language_instruction",
324
+ "license": "mit",
325
+ "paper": "https://arxiv.org/abs/1709.10489",
326
+ "citation_bibtex": dedent(r"""
327
+ @inproceedings{kahn2018self,
328
+ title={Self-supervised deep reinforcement learning with generalized computation graphs for robot navigation},
329
+ author={Kahn, Gregory and Villaflor, Adam and Ding, Bosen and Abbeel, Pieter and Levine, Sergey},
330
+ booktitle={2018 IEEE international conference on robotics and automation (ICRA)},
331
+ pages={5129--5136},
332
+ year={2018},
333
+ organization={IEEE}
334
+ }""").lstrip(),
335
+ },
336
+ "berkeley_gnm_recon": {
337
+ "tasks_col": "language_instruction",
338
+ "license": "mit",
339
+ "url": "https://sites.google.com/view/recon-robot",
340
+ "paper": "https://arxiv.org/abs/2104.05859",
341
+ "citation_bibtex": dedent(r"""
342
+ @inproceedings{shah2021rapid,
343
+ title={Rapid Exploration for Open-World Navigation with Latent Goal Models},
344
+ author={Dhruv Shah and Benjamin Eysenbach and Nicholas Rhinehart and Sergey Levine},
345
+ booktitle={5th Annual Conference on Robot Learning },
346
+ year={2021},
347
+ url={https://openreview.net/forum?id=d_SWJhyKfVw}
348
+ }""").lstrip(),
349
+ },
350
+ "berkeley_gnm_sac_son": {
351
+ "tasks_col": "language_instruction",
352
+ "license": "mit",
353
+ "url": "https://sites.google.com/view/SACSoN-review",
354
+ "paper": "https://arxiv.org/abs/2306.01874",
355
+ "citation_bibtex": dedent(r"""
356
+ @article{hirose2023sacson,
357
+ title={SACSoN: Scalable Autonomous Data Collection for Social Navigation},
358
+ author={Hirose, Noriaki and Shah, Dhruv and Sridhar, Ajay and Levine, Sergey},
359
+ journal={arXiv preprint arXiv:2306.01874},
360
+ year={2023}
361
+ }""").lstrip(),
362
+ },
363
+ "berkeley_mvp": {
364
+ "tasks_col": "language_instruction",
365
+ "license": "mit",
366
+ "paper": "https://arxiv.org/abs/2203.06173",
367
+ "citation_bibtex": dedent(r"""
368
+ @InProceedings{Radosavovic2022,
369
+ title = {Real-World Robot Learning with Masked Visual Pre-training},
370
+ author = {Ilija Radosavovic and Tete Xiao and Stephen James and Pieter Abbeel and Jitendra Malik and Trevor Darrell},
371
+ booktitle = {CoRL},
372
+ year = {2022}
373
+ }""").lstrip(),
374
+ },
375
+ "berkeley_rpt": {
376
+ "tasks_col": "language_instruction",
377
+ "license": "mit",
378
+ "paper": "https://arxiv.org/abs/2306.10007",
379
+ "citation_bibtex": dedent(r"""
380
+ @article{Radosavovic2023,
381
+ title={Robot Learning with Sensorimotor Pre-training},
382
+ author={Ilija Radosavovic and Baifeng Shi and Letian Fu and Ken Goldberg and Trevor Darrell and Jitendra Malik},
383
+ year={2023},
384
+ journal={arXiv:2306.10007}
385
+ }""").lstrip(),
386
+ },
387
+ "cmu_franka_exploration_dataset": {
388
+ "tasks_col": "language_instruction",
389
+ "license": "mit",
390
+ "url": "https://human-world-model.github.io/",
391
+ "paper": "https://arxiv.org/abs/2308.10901",
392
+ "citation_bibtex": dedent(r"""
393
+ @inproceedings{mendonca2023structured,
394
+ title={Structured World Models from Human Videos},
395
+ author={Mendonca, Russell and Bahl, Shikhar and Pathak, Deepak},
396
+ journal={RSS},
397
+ year={2023}
398
+ }""").lstrip(),
399
+ },
400
+ "cmu_play_fusion": {
401
+ "tasks_col": "language_instruction",
402
+ "license": "mit",
403
+ "url": "https://play-fusion.github.io/",
404
+ "paper": "https://arxiv.org/abs/2312.04549",
405
+ "citation_bibtex": dedent(r"""
406
+ @inproceedings{chen2023playfusion,
407
+ title={PlayFusion: Skill Acquisition via Diffusion from Language-Annotated Play},
408
+ author={Chen, Lili and Bahl, Shikhar and Pathak, Deepak},
409
+ booktitle={CoRL},
410
+ year={2023}
411
+ }""").lstrip(),
412
+ },
413
+ "cmu_stretch": {
414
+ "tasks_col": "language_instruction",
415
+ "license": "mit",
416
+ "url": "https://robo-affordances.github.io/",
417
+ "paper": "https://arxiv.org/abs/2304.08488",
418
+ "citation_bibtex": dedent(r"""
419
+ @inproceedings{bahl2023affordances,
420
+ title={Affordances from Human Videos as a Versatile Representation for Robotics},
421
+ author={Bahl, Shikhar and Mendonca, Russell and Chen, Lili and Jain, Unnat and Pathak, Deepak},
422
+ booktitle={CVPR},
423
+ year={2023}
424
+ }
425
+ @article{mendonca2023structured,
426
+ title={Structured World Models from Human Videos},
427
+ author={Mendonca, Russell and Bahl, Shikhar and Pathak, Deepak},
428
+ journal={CoRL},
429
+ year={2023}
430
+ }""").lstrip(),
431
+ },
432
+ "columbia_cairlab_pusht_real": {
433
+ "tasks_col": "language_instruction",
434
+ "license": "mit",
435
+ "url": "https://diffusion-policy.cs.columbia.edu/",
436
+ "paper": "https://arxiv.org/abs/2303.04137v5",
437
+ "citation_bibtex": dedent(r"""
438
+ @inproceedings{chi2023diffusionpolicy,
439
+ title={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion},
440
+ author={Chi, Cheng and Feng, Siyuan and Du, Yilun and Xu, Zhenjia and Cousineau, Eric and Burchfiel, Benjamin and Song, Shuran},
441
+ booktitle={Proceedings of Robotics: Science and Systems (RSS)},
442
+ year={2023}
443
+ }""").lstrip(),
444
+ },
445
+ "conq_hose_manipulation": {
446
+ "tasks_col": "language_instruction",
447
+ "license": "mit",
448
+ "url": "https://sites.google.com/view/conq-hose-manipulation-dataset/home",
449
+ "citation_bibtex": dedent(r"""
450
+ @misc{ConqHoseManipData,
451
+ author={Peter Mitrano and Dmitry Berenson},
452
+ title={Conq Hose Manipulation Dataset, v1.15.0},
453
+ year={2024},
454
+ howpublished={https://sites.google.com/view/conq-hose-manipulation-dataset}
455
+ }""").lstrip(),
456
+ },
457
+ "dlr_edan_shared_control": {
458
+ "tasks_col": "language_instruction",
459
+ "license": "mit",
460
+ "paper": "https://ieeexplore.ieee.org/document/9341156",
461
+ "citation_bibtex": dedent(r"""
462
+ @inproceedings{vogel_edan_2020,
463
+ title = {EDAN - an EMG-Controlled Daily Assistant to Help People with Physical Disabilities},
464
+ language = {en},
465
+ booktitle = {2020 {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems} ({IROS})},
466
+ author = {Vogel, Jörn and Hagengruber, Annette and Iskandar, Maged and Quere, Gabriel and Leipscher, Ulrike and Bustamante, Samuel and Dietrich, Alexander and Hoeppner, Hannes and Leidner, Daniel and Albu-Schäffer, Alin},
467
+ year = {2020}
468
+ }
469
+ @inproceedings{quere_shared_2020,
470
+ address = {Paris, France},
471
+ title = {Shared {Control} {Templates} for {Assistive} {Robotics}},
472
+ language = {en},
473
+ booktitle = {2020 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},
474
+ author = {Quere, Gabriel and Hagengruber, Annette and Iskandar, Maged and Bustamante, Samuel and Leidner, Daniel and Stulp, Freek and Vogel, Joern},
475
+ year = {2020},
476
+ pages = {7},
477
+ }""").lstrip(),
478
+ },
479
+ "dlr_sara_grid_clamp": {
480
+ "tasks_col": "language_instruction",
481
+ "license": "mit",
482
+ "paper": "https://www.researchsquare.com/article/rs-3289569/v1",
483
+ "citation_bibtex": dedent(r"""
484
+ @article{padalkar2023guided,
485
+ title={A guided reinforcement learning approach using shared control templates for learning manipulation skills in the real world},
486
+ author={Padalkar, Abhishek and Quere, Gabriel and Raffin, Antonin and Silv{\'e}rio, Jo{\~a}o and Stulp, Freek},
487
+ journal={Research square preprint rs-3289569/v1},
488
+ year={2023}
489
+ }""").lstrip(),
490
+ },
491
+ "dlr_sara_pour": {
492
+ "tasks_col": "language_instruction",
493
+ "license": "mit",
494
+ "paper": "https://elib.dlr.de/193739/1/padalkar2023rlsct.pdf",
495
+ "citation_bibtex": dedent(r"""
496
+ @inproceedings{padalkar2023guiding,
497
+ title={Guiding Reinforcement Learning with Shared Control Templates},
498
+ author={Padalkar, Abhishek and Quere, Gabriel and Steinmetz, Franz and Raffin, Antonin and Nieuwenhuisen, Matthias and Silv{\'e}rio, Jo{\~a}o and Stulp, Freek},
499
+ booktitle={40th IEEE International Conference on Robotics and Automation, ICRA 2023},
500
+ year={2023},
501
+ organization={IEEE}
502
+ }""").lstrip(),
503
+ },
504
+ "droid_100": {
505
+ "tasks_col": "language_instruction",
506
+ "license": "mit",
507
+ "url": "https://droid-dataset.github.io/",
508
+ "paper": "https://arxiv.org/abs/2403.12945",
509
+ "citation_bibtex": dedent(r"""
510
+ @article{khazatsky2024droid,
511
+ title = {DROID: A Large-Scale In-The-Wild Robot Manipulation Dataset},
512
+ author = {Alexander Khazatsky and Karl Pertsch and Suraj Nair and Ashwin Balakrishna and Sudeep Dasari and Siddharth Karamcheti and Soroush Nasiriany and Mohan Kumar Srirama and Lawrence Yunliang Chen and Kirsty Ellis and Peter David Fagan and Joey Hejna and Masha Itkina and Marion Lepert and Yecheng Jason Ma and Patrick Tree Miller and Jimmy Wu and Suneel Belkhale and Shivin Dass and Huy Ha and Arhan Jain and Abraham Lee and Youngwoon Lee and Marius Memmel and Sungjae Park and Ilija Radosavovic and Kaiyuan Wang and Albert Zhan and Kevin Black and Cheng Chi and Kyle Beltran Hatch and Shan Lin and Jingpei Lu and Jean Mercat and Abdul Rehman and Pannag R Sanketi and Archit Sharma and Cody Simpson and Quan Vuong and Homer Rich Walke and Blake Wulfe and Ted Xiao and Jonathan Heewon Yang and Arefeh Yavary and Tony Z. Zhao and Christopher Agia and Rohan Baijal and Mateo Guaman Castro and Daphne Chen and Qiuyu Chen and Trinity Chung and Jaimyn Drake and Ethan Paul Foster and Jensen Gao and David Antonio Herrera and Minho Heo and Kyle Hsu and Jiaheng Hu and Donovon Jackson and Charlotte Le and Yunshuang Li and Kevin Lin and Roy Lin and Zehan Ma and Abhiram Maddukuri and Suvir Mirchandani and Daniel Morton and Tony Nguyen and Abigail O'Neill and Rosario Scalise and Derick Seale and Victor Son and Stephen Tian and Emi Tran and Andrew E. Wang and Yilin Wu and Annie Xie and Jingyun Yang and Patrick Yin and Yunchu Zhang and Osbert Bastani and Glen Berseth and Jeannette Bohg and Ken Goldberg and Abhinav Gupta and Abhishek Gupta and Dinesh Jayaraman and Joseph J Lim and Jitendra Malik and Roberto Martín-Martín and Subramanian Ramamoorthy and Dorsa Sadigh and Shuran Song and Jiajun Wu and Michael C. Yip and Yuke Zhu and Thomas Kollar and Sergey Levine and Chelsea Finn},
513
+ year = {2024},
514
+ }""").lstrip(),
515
+ },
516
+ "fmb": {
517
+ "tasks_col": "language_instruction",
518
+ "license": "cc-by-4.0",
519
+ "url": "https://functional-manipulation-benchmark.github.io/",
520
+ "paper": "https://arxiv.org/abs/2401.08553",
521
+ "citation_bibtex": dedent(r"""
522
+ @article{luo2024fmb,
523
+ title={FMB: a Functional Manipulation Benchmark for Generalizable Robotic Learning},
524
+ author={Luo, Jianlan and Xu, Charles and Liu, Fangchen and Tan, Liam and Lin, Zipeng and Wu, Jeffrey and Abbeel, Pieter and Levine, Sergey},
525
+ journal={arXiv preprint arXiv:2401.08553},
526
+ year={2024}
527
+ }""").lstrip(),
528
+ },
529
+ "iamlab_cmu_pickup_insert": {
530
+ "tasks_col": "language_instruction",
531
+ "license": "mit",
532
+ "url": "https://openreview.net/forum?id=WuBv9-IGDUA",
533
+ "paper": "https://arxiv.org/abs/2401.14502",
534
+ "citation_bibtex": dedent(r"""
535
+ @inproceedings{saxena2023multiresolution,
536
+ title={Multi-Resolution Sensing for Real-Time Control with Vision-Language Models},
537
+ author={Saumya Saxena and Mohit Sharma and Oliver Kroemer},
538
+ booktitle={7th Annual Conference on Robot Learning},
539
+ year={2023},
540
+ url={https://openreview.net/forum?id=WuBv9-IGDUA}
541
+ }""").lstrip(),
542
+ },
543
+ "imperialcollege_sawyer_wrist_cam": {
544
+ "tasks_col": "language_instruction",
545
+ "license": "mit",
546
+ },
547
+ "jaco_play": {
548
+ "tasks_col": "language_instruction",
549
+ "license": "cc-by-4.0",
550
+ "url": "https://github.com/clvrai/clvr_jaco_play_dataset",
551
+ "citation_bibtex": dedent(r"""
552
+ @software{dass2023jacoplay,
553
+ author = {Dass, Shivin and Yapeter, Jullian and Zhang, Jesse and Zhang, Jiahui
554
+ and Pertsch, Karl and Nikolaidis, Stefanos and Lim, Joseph J.},
555
+ title = {CLVR Jaco Play Dataset},
556
+ url = {https://github.com/clvrai/clvr_jaco_play_dataset},
557
+ version = {1.0.0},
558
+ year = {2023}
559
+ }""").lstrip(),
560
+ },
561
+ "kaist_nonprehensile": {
562
+ "tasks_col": "language_instruction",
563
+ "license": "cc-by-4.0",
564
+ "url": "https://github.com/JaeHyung-Kim/rlds_dataset_builder",
565
+ "citation_bibtex": dedent(r"""
566
+ @article{kimpre,
567
+ title={Pre-and post-contact policy decomposition for non-prehensile manipulation with zero-shot sim-to-real transfer},
568
+ author={Kim, Minchan and Han, Junhyek and Kim, Jaehyung and Kim, Beomjoon},
569
+ booktitle={2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
570
+ year={2023},
571
+ organization={IEEE}
572
+ }""").lstrip(),
573
+ },
574
+ "nyu_door_opening_surprising_effectiveness": {
575
+ "tasks_col": "language_instruction",
576
+ "license": "mit",
577
+ "url": "https://jyopari.github.io/VINN/",
578
+ "paper": "https://arxiv.org/abs/2112.01511",
579
+ "citation_bibtex": dedent(r"""
580
+ @misc{pari2021surprising,
581
+ title={The Surprising Effectiveness of Representation Learning for Visual Imitation},
582
+ author={Jyothish Pari and Nur Muhammad Shafiullah and Sridhar Pandian Arunachalam and Lerrel Pinto},
583
+ year={2021},
584
+ eprint={2112.01511},
585
+ archivePrefix={arXiv},
586
+ primaryClass={cs.RO}
587
+ }""").lstrip(),
588
+ },
589
+ "nyu_franka_play_dataset": {
590
+ "tasks_col": "language_instruction",
591
+ "license": "mit",
592
+ "url": "https://play-to-policy.github.io/",
593
+ "paper": "https://arxiv.org/abs/2210.10047",
594
+ "citation_bibtex": dedent(r"""
595
+ @article{cui2022play,
596
+ title = {From Play to Policy: Conditional Behavior Generation from Uncurated Robot Data},
597
+ author = {Cui, Zichen Jeff and Wang, Yibin and Shafiullah, Nur Muhammad Mahi and Pinto, Lerrel},
598
+ journal = {arXiv preprint arXiv:2210.10047},
599
+ year = {2022}
600
+ }""").lstrip(),
601
+ },
602
+ "nyu_rot_dataset": {
603
+ "tasks_col": "language_instruction",
604
+ "license": "mit",
605
+ "url": "https://rot-robot.github.io/",
606
+ "paper": "https://arxiv.org/abs/2206.15469",
607
+ "citation_bibtex": dedent(r"""
608
+ @inproceedings{haldar2023watch,
609
+ title={Watch and match: Supercharging imitation with regularized optimal transport},
610
+ author={Haldar, Siddhant and Mathur, Vaibhav and Yarats, Denis and Pinto, Lerrel},
611
+ booktitle={Conference on Robot Learning},
612
+ pages={32--43},
613
+ year={2023},
614
+ organization={PMLR}
615
+ }""").lstrip(),
616
+ },
617
+ "roboturk": {
618
+ "tasks_col": "language_instruction",
619
+ "license": "mit",
620
+ "url": "https://roboturk.stanford.edu/dataset_real.html",
621
+ "paper": "PAPER",
622
+ "citation_bibtex": dedent(r"""
623
+ @inproceedings{mandlekar2019scaling,
624
+ title={Scaling robot supervision to hundreds of hours with roboturk: Robotic manipulation dataset through human reasoning and dexterity},
625
+ author={Mandlekar, Ajay and Booher, Jonathan and Spero, Max and Tung, Albert and Gupta, Anchit and Zhu, Yuke and Garg, Animesh and Savarese, Silvio and Fei-Fei, Li},
626
+ booktitle={2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
627
+ pages={1048--1055},
628
+ year={2019},
629
+ organization={IEEE}
630
+ }""").lstrip(),
631
+ },
632
+ "stanford_hydra_dataset": {
633
+ "tasks_col": "language_instruction",
634
+ "license": "mit",
635
+ "url": "https://sites.google.com/view/hydra-il-2023",
636
+ "paper": "https://arxiv.org/abs/2306.17237",
637
+ "citation_bibtex": dedent(r"""
638
+ @article{belkhale2023hydra,
639
+ title={HYDRA: Hybrid Robot Actions for Imitation Learning},
640
+ author={Belkhale, Suneel and Cui, Yuchen and Sadigh, Dorsa},
641
+ journal={arxiv},
642
+ year={2023}
643
+ }""").lstrip(),
644
+ },
645
+ "stanford_kuka_multimodal_dataset": {
646
+ "tasks_col": "language_instruction",
647
+ "license": "mit",
648
+ "url": "https://sites.google.com/view/visionandtouch",
649
+ "paper": "https://arxiv.org/abs/1810.10191",
650
+ "citation_bibtex": dedent(r"""
651
+ @inproceedings{lee2019icra,
652
+ title={Making sense of vision and touch: Self-supervised learning of multimodal representations for contact-rich tasks},
653
+ author={Lee, Michelle A and Zhu, Yuke and Srinivasan, Krishnan and Shah, Parth and Savarese, Silvio and Fei-Fei, Li and Garg, Animesh and Bohg, Jeannette},
654
+ booktitle={2019 IEEE International Conference on Robotics and Automation (ICRA)},
655
+ year={2019},
656
+ url={https://arxiv.org/abs/1810.10191}
657
+ }""").lstrip(),
658
+ },
659
+ "stanford_robocook": {
660
+ "tasks_col": "language_instruction",
661
+ "license": "mit",
662
+ "url": "https://hshi74.github.io/robocook/",
663
+ "paper": "https://arxiv.org/abs/2306.14447",
664
+ "citation_bibtex": dedent(r"""
665
+ @article{shi2023robocook,
666
+ title={RoboCook: Long-Horizon Elasto-Plastic Object Manipulation with Diverse Tools},
667
+ author={Shi, Haochen and Xu, Huazhe and Clarke, Samuel and Li, Yunzhu and Wu, Jiajun},
668
+ journal={arXiv preprint arXiv:2306.14447},
669
+ year={2023}
670
+ }""").lstrip(),
671
+ },
672
+ "taco_play": {
673
+ "tasks_col": "language_instruction",
674
+ "license": "cc-by-4.0",
675
+ "url": "https://www.kaggle.com/datasets/oiermees/taco-robot",
676
+ "paper": "https://arxiv.org/abs/2209.08959, https://arxiv.org/abs/2210.01911",
677
+ "citation_bibtex": dedent(r"""
678
+ @inproceedings{rosete2022tacorl,
679
+ author = {Erick Rosete-Beas and Oier Mees and Gabriel Kalweit and Joschka Boedecker and Wolfram Burgard},
680
+ title = {Latent Plans for Task Agnostic Offline Reinforcement Learning},
681
+ journal = {Proceedings of the 6th Conference on Robot Learning (CoRL)},
682
+ year = {2022}
683
+ }
684
+ @inproceedings{mees23hulc2,
685
+ title={Grounding Language with Visual Affordances over Unstructured Data},
686
+ author={Oier Mees and Jessica Borja-Diaz and Wolfram Burgard},
687
+ booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA)},
688
+ year={2023},
689
+ address = {London, UK}
690
+ }""").lstrip(),
691
+ },
692
+ "tokyo_u_lsmo": {
693
+ "tasks_col": "language_instruction",
694
+ "license": "mit",
695
+ "url": "URL",
696
+ "paper": "https://arxiv.org/abs/2107.05842",
697
+ "citation_bibtex": dedent(r"""
698
+ @Article{Osa22,
699
+ author = {Takayuki Osa},
700
+ journal = {The International Journal of Robotics Research},
701
+ title = {Motion Planning by Learning the Solution Manifold in Trajectory Optimization},
702
+ year = {2022},
703
+ number = {3},
704
+ pages = {291--311},
705
+ volume = {41},
706
+ }""").lstrip(),
707
+ },
708
+ "toto": {
709
+ "tasks_col": "language_instruction",
710
+ "license": "mit",
711
+ "url": "https://toto-benchmark.org/",
712
+ "paper": "https://arxiv.org/abs/2306.00942",
713
+ "citation_bibtex": dedent(r"""
714
+ @inproceedings{zhou2023train,
715
+ author={Zhou, Gaoyue and Dean, Victoria and Srirama, Mohan Kumar and Rajeswaran, Aravind and Pari, Jyothish and Hatch, Kyle and Jain, Aryan and Yu, Tianhe and Abbeel, Pieter and Pinto, Lerrel and Finn, Chelsea and Gupta, Abhinav},
716
+ booktitle={2023 IEEE International Conference on Robotics and Automation (ICRA)},
717
+ title={Train Offline, Test Online: A Real Robot Learning Benchmark},
718
+ year={2023},
719
+ }""").lstrip(),
720
+ },
721
+ "ucsd_kitchen_dataset": {
722
+ "tasks_col": "language_instruction",
723
+ "license": "mit",
724
+ "citation_bibtex": dedent(r"""
725
+ @ARTICLE{ucsd_kitchens,
726
+ author = {Ge Yan, Kris Wu, and Xiaolong Wang},
727
+ title = {{ucsd kitchens Dataset}},
728
+ year = {2023},
729
+ month = {August}
730
+ }""").lstrip(),
731
+ },
732
+ "ucsd_pick_and_place_dataset": {
733
+ "tasks_col": "language_instruction",
734
+ "license": "mit",
735
+ "url": "https://owmcorl.github.io/#",
736
+ "paper": "https://arxiv.org/abs/2310.16029",
737
+ "citation_bibtex": dedent(r"""
738
+ @preprint{Feng2023Finetuning,
739
+ title={Finetuning Offline World Models in the Real World},
740
+ author={Yunhai Feng, Nicklas Hansen, Ziyan Xiong, Chandramouli Rajagopalan, Xiaolong Wang},
741
+ year={2023}
742
+ }""").lstrip(),
743
+ },
744
+ "uiuc_d3field": {
745
+ "tasks_col": "language_instruction",
746
+ "license": "mit",
747
+ "url": "https://robopil.github.io/d3fields/",
748
+ "paper": "https://arxiv.org/abs/2309.16118",
749
+ "citation_bibtex": dedent(r"""
750
+ @article{wang2023d3field,
751
+ title={D^3Field: Dynamic 3D Descriptor Fields for Generalizable Robotic Manipulation},
752
+ author={Wang, Yixuan and Li, Zhuoran and Zhang, Mingtong and Driggs-Campbell, Katherine and Wu, Jiajun and Fei-Fei, Li and Li, Yunzhu},
753
+ journal={arXiv preprint arXiv:},
754
+ year={2023},
755
+ }""").lstrip(),
756
+ },
757
+ "usc_cloth_sim": {
758
+ "tasks_col": "language_instruction",
759
+ "license": "mit",
760
+ "url": "https://uscresl.github.io/dmfd/",
761
+ "paper": "https://arxiv.org/abs/2207.10148",
762
+ "citation_bibtex": dedent(r"""
763
+ @article{salhotra2022dmfd,
764
+ author={Salhotra, Gautam and Liu, I-Chun Arthur and Dominguez-Kuhne, Marcus and Sukhatme, Gaurav S.},
765
+ journal={IEEE Robotics and Automation Letters},
766
+ title={Learning Deformable Object Manipulation From Expert Demonstrations},
767
+ year={2022},
768
+ volume={7},
769
+ number={4},
770
+ pages={8775-8782},
771
+ doi={10.1109/LRA.2022.3187843}
772
+ }""").lstrip(),
773
+ },
774
+ "utaustin_mutex": {
775
+ "tasks_col": "language_instruction",
776
+ "license": "mit",
777
+ "url": "https://ut-austin-rpl.github.io/MUTEX/",
778
+ "paper": "https://arxiv.org/abs/2309.14320",
779
+ "citation_bibtex": dedent(r"""
780
+ @inproceedings{shah2023mutex,
781
+ title={{MUTEX}: Learning Unified Policies from Multimodal Task Specifications},
782
+ author={Rutav Shah and Roberto Mart{\'\i}n-Mart{\'\i}n and Yuke Zhu},
783
+ booktitle={7th Annual Conference on Robot Learning},
784
+ year={2023},
785
+ url={https://openreview.net/forum?id=PwqiqaaEzJ}
786
+ }""").lstrip(),
787
+ },
788
+ "utokyo_pr2_opening_fridge": {
789
+ "tasks_col": "language_instruction",
790
+ "license": "mit",
791
+ "citation_bibtex": dedent(r"""
792
+ @misc{oh2023pr2utokyodatasets,
793
+ author={Jihoon Oh and Naoaki Kanazawa and Kento Kawaharazuka},
794
+ title={X-Embodiment U-Tokyo PR2 Datasets},
795
+ year={2023},
796
+ url={https://github.com/ojh6404/rlds_dataset_builder},
797
+ }""").lstrip(),
798
+ },
799
+ "utokyo_pr2_tabletop_manipulation": {
800
+ "tasks_col": "language_instruction",
801
+ "license": "mit",
802
+ "citation_bibtex": dedent(r"""
803
+ @misc{oh2023pr2utokyodatasets,
804
+ author={Jihoon Oh and Naoaki Kanazawa and Kento Kawaharazuka},
805
+ title={X-Embodiment U-Tokyo PR2 Datasets},
806
+ year={2023},
807
+ url={https://github.com/ojh6404/rlds_dataset_builder},
808
+ }""").lstrip(),
809
+ },
810
+ "utokyo_saytap": {
811
+ "tasks_col": "language_instruction",
812
+ "license": "mit",
813
+ "url": "https://saytap.github.io/",
814
+ "paper": "https://arxiv.org/abs/2306.07580",
815
+ "citation_bibtex": dedent(r"""
816
+ @article{saytap2023,
817
+ author = {Yujin Tang and Wenhao Yu and Jie Tan and Heiga Zen and Aleksandra Faust and
818
+ Tatsuya Harada},
819
+ title = {SayTap: Language to Quadrupedal Locomotion},
820
+ eprint = {arXiv:2306.07580},
821
+ url = {https://saytap.github.io},
822
+ note = {https://saytap.github.io},
823
+ year = {2023}
824
+ }""").lstrip(),
825
+ },
826
+ "utokyo_xarm_bimanual": {
827
+ "tasks_col": "language_instruction",
828
+ "license": "cc-by-4.0",
829
+ "citation_bibtex": dedent(r"""
830
+ @misc{matsushima2023weblab,
831
+ title={Weblab xArm Dataset},
832
+ author={Tatsuya Matsushima and Hiroki Furuta and Yusuke Iwasawa and Yutaka Matsuo},
833
+ year={2023},
834
+ }""").lstrip(),
835
+ },
836
+ "utokyo_xarm_pick_and_place": {
837
+ "tasks_col": "language_instruction",
838
+ "license": "cc-by-4.0",
839
+ "citation_bibtex": dedent(r"""
840
+ @misc{matsushima2023weblab,
841
+ title={Weblab xArm Dataset},
842
+ author={Tatsuya Matsushima and Hiroki Furuta and Yusuke Iwasawa and Yutaka Matsuo},
843
+ year={2023},
844
+ }""").lstrip(),
845
+ },
846
+ "viola": {
847
+ "tasks_col": "language_instruction",
848
+ "license": "mit",
849
+ "url": "https://ut-austin-rpl.github.io/VIOLA/",
850
+ "paper": "https://arxiv.org/abs/2210.11339",
851
+ "citation_bibtex": dedent(r"""
852
+ @article{zhu2022viola,
853
+ title={VIOLA: Imitation Learning for Vision-Based Manipulation with Object Proposal Priors},
854
+ author={Zhu, Yifeng and Joshi, Abhishek and Stone, Peter and Zhu, Yuke},
855
+ journal={6th Annual Conference on Robot Learning (CoRL)},
856
+ year={2022}
857
+ }""").lstrip(),
858
+ },
859
+ }
860
+ # spellchecker:on
861
+
862
+
863
+ def batch_convert():
864
+ status = {}
865
+ logfile = LOCAL_DIR / "conversion_log.txt"
866
+ assert set(DATASETS) == {id_.split("/")[1] for id_ in available_datasets}
867
+ for num, (name, kwargs) in enumerate(DATASETS.items()):
868
+ repo_id = f"lerobot/{name}"
869
+ print(f"\nConverting {repo_id} ({num}/{len(DATASETS)})")
870
+ print("---------------------------------------------------------")
871
+ try:
872
+ convert_dataset(repo_id, LOCAL_DIR, **kwargs)
873
+ status = f"{repo_id}: success."
874
+ with open(logfile, "a") as file:
875
+ file.write(status + "\n")
876
+ except Exception:
877
+ status = f"{repo_id}: failed\n {traceback.format_exc()}"
878
+ with open(logfile, "a") as file:
879
+ file.write(status + "\n")
880
+ continue
881
+
882
+
883
+ if __name__ == "__main__":
884
+ batch_convert()
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py ADDED
@@ -0,0 +1,664 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 1.6 to
19
+ 2.0. You will be required to provide the 'tasks', which is a short but accurate description in plain English
20
+ for each of the task performed in the dataset. This will allow to easily train models with task-conditioning.
21
+
22
+ We support 3 different scenarios for these tasks (see instructions below):
23
+ 1. Single task dataset: all episodes of your dataset have the same single task.
24
+ 2. Single task episodes: the episodes of your dataset each contain a single task but they can differ from
25
+ one episode to the next.
26
+ 3. Multi task episodes: episodes of your dataset may each contain several different tasks.
27
+
28
+
29
+ Can you can also provide a robot config .yaml file (not mandatory) to this script via the option
30
+ '--robot-config' so that it writes information about the robot (robot type, motors names) this dataset was
31
+ recorded with. For now, only Aloha/Koch type robots are supported with this option.
32
+
33
+
34
+ # 1. Single task dataset
35
+ If your dataset contains a single task, you can simply provide it directly via the CLI with the
36
+ '--single-task' option.
37
+
38
+ Examples:
39
+
40
+ ```bash
41
+ python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
42
+ --repo-id lerobot/aloha_sim_insertion_human_image \
43
+ --single-task "Insert the peg into the socket." \
44
+ --robot-config lerobot/configs/robot/aloha.yaml \
45
+ --local-dir data
46
+ ```
47
+
48
+ ```bash
49
+ python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
50
+ --repo-id aliberts/koch_tutorial \
51
+ --single-task "Pick the Lego block and drop it in the box on the right." \
52
+ --robot-config lerobot/configs/robot/koch.yaml \
53
+ --local-dir data
54
+ ```
55
+
56
+
57
+ # 2. Single task episodes
58
+ If your dataset is a multi-task dataset, you have two options to provide the tasks to this script:
59
+
60
+ - If your dataset already contains a language instruction column in its parquet file, you can simply provide
61
+ this column's name with the '--tasks-col' arg.
62
+
63
+ Example:
64
+
65
+ ```bash
66
+ python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
67
+ --repo-id lerobot/stanford_kuka_multimodal_dataset \
68
+ --tasks-col "language_instruction" \
69
+ --local-dir data
70
+ ```
71
+
72
+ - If your dataset doesn't contain a language instruction, you should provide the path to a .json file with the
73
+ '--tasks-path' arg. This file should have the following structure where keys correspond to each
74
+ episode_index in the dataset, and values are the language instruction for that episode.
75
+
76
+ Example:
77
+
78
+ ```json
79
+ {
80
+ "0": "Do something",
81
+ "1": "Do something else",
82
+ "2": "Do something",
83
+ "3": "Go there",
84
+ ...
85
+ }
86
+ ```
87
+
88
+ # 3. Multi task episodes
89
+ If you have multiple tasks per episodes, your dataset should contain a language instruction column in its
90
+ parquet file, and you must provide this column's name with the '--tasks-col' arg.
91
+
92
+ Example:
93
+
94
+ ```bash
95
+ python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \
96
+ --repo-id lerobot/stanford_kuka_multimodal_dataset \
97
+ --tasks-col "language_instruction" \
98
+ --local-dir data
99
+ ```
100
+ """
101
+
102
+ import argparse
103
+ import contextlib
104
+ import filecmp
105
+ import json
106
+ import logging
107
+ import math
108
+ import shutil
109
+ import subprocess
110
+ import tempfile
111
+ from pathlib import Path
112
+
113
+ import datasets
114
+ import pyarrow.compute as pc
115
+ import pyarrow.parquet as pq
116
+ import torch
117
+ from datasets import Dataset
118
+ from huggingface_hub import HfApi
119
+ from huggingface_hub.errors import EntryNotFoundError, HfHubHTTPError
120
+ from safetensors.torch import load_file
121
+
122
+ from lerobot.common.datasets.utils import (
123
+ DEFAULT_CHUNK_SIZE,
124
+ DEFAULT_PARQUET_PATH,
125
+ DEFAULT_VIDEO_PATH,
126
+ EPISODES_PATH,
127
+ INFO_PATH,
128
+ STATS_PATH,
129
+ TASKS_PATH,
130
+ create_branch,
131
+ create_lerobot_dataset_card,
132
+ flatten_dict,
133
+ get_safe_version,
134
+ load_json,
135
+ unflatten_dict,
136
+ write_json,
137
+ write_jsonlines,
138
+ )
139
+ from lerobot.common.datasets.video_utils import (
140
+ VideoFrame, # noqa: F401
141
+ get_image_pixel_channels,
142
+ get_video_info,
143
+ )
144
+ from lerobot.common.robot_devices.robots.configs import RobotConfig
145
+ from lerobot.common.robot_devices.robots.utils import make_robot_config
146
+
147
+ V16 = "v1.6"
148
+ V20 = "v2.0"
149
+
150
+ GITATTRIBUTES_REF = "aliberts/gitattributes_reference"
151
+ V1_VIDEO_FILE = "{video_key}_episode_{episode_index:06d}.mp4"
152
+ V1_INFO_PATH = "meta_data/info.json"
153
+ V1_STATS_PATH = "meta_data/stats.safetensors"
154
+
155
+
156
+ def parse_robot_config(robot_cfg: RobotConfig) -> tuple[str, dict]:
157
+ if robot_cfg.type in ["aloha", "koch"]:
158
+ state_names = [
159
+ f"{arm}_{motor}" if len(robot_cfg.follower_arms) > 1 else motor
160
+ for arm in robot_cfg.follower_arms
161
+ for motor in robot_cfg.follower_arms[arm].motors
162
+ ]
163
+ action_names = [
164
+ # f"{arm}_{motor}" for arm in ["left", "right"] for motor in robot_cfg["leader_arms"][arm]["motors"]
165
+ f"{arm}_{motor}" if len(robot_cfg.leader_arms) > 1 else motor
166
+ for arm in robot_cfg.leader_arms
167
+ for motor in robot_cfg.leader_arms[arm].motors
168
+ ]
169
+ # elif robot_cfg["robot_type"] == "stretch3": TODO
170
+ else:
171
+ raise NotImplementedError(
172
+ "Please provide robot_config={'robot_type': ..., 'names': ...} directly to convert_dataset()."
173
+ )
174
+
175
+ return {
176
+ "robot_type": robot_cfg.type,
177
+ "names": {
178
+ "observation.state": state_names,
179
+ "observation.effort": state_names,
180
+ "action": action_names,
181
+ },
182
+ }
183
+
184
+
185
+ def convert_stats_to_json(v1_dir: Path, v2_dir: Path) -> None:
186
+ safetensor_path = v1_dir / V1_STATS_PATH
187
+ stats = load_file(safetensor_path)
188
+ serialized_stats = {key: value.tolist() for key, value in stats.items()}
189
+ serialized_stats = unflatten_dict(serialized_stats)
190
+
191
+ json_path = v2_dir / STATS_PATH
192
+ json_path.parent.mkdir(exist_ok=True, parents=True)
193
+ with open(json_path, "w") as f:
194
+ json.dump(serialized_stats, f, indent=4)
195
+
196
+ # Sanity check
197
+ with open(json_path) as f:
198
+ stats_json = json.load(f)
199
+
200
+ stats_json = flatten_dict(stats_json)
201
+ stats_json = {key: torch.tensor(value) for key, value in stats_json.items()}
202
+ for key in stats:
203
+ torch.testing.assert_close(stats_json[key], stats[key])
204
+
205
+
206
+ def get_features_from_hf_dataset(
207
+ dataset: Dataset, robot_config: RobotConfig | None = None
208
+ ) -> dict[str, list]:
209
+ robot_config = parse_robot_config(robot_config)
210
+ features = {}
211
+ for key, ft in dataset.features.items():
212
+ if isinstance(ft, datasets.Value):
213
+ dtype = ft.dtype
214
+ shape = (1,)
215
+ names = None
216
+ if isinstance(ft, datasets.Sequence):
217
+ assert isinstance(ft.feature, datasets.Value)
218
+ dtype = ft.feature.dtype
219
+ shape = (ft.length,)
220
+ motor_names = (
221
+ robot_config["names"][key] if robot_config else [f"motor_{i}" for i in range(ft.length)]
222
+ )
223
+ assert len(motor_names) == shape[0]
224
+ names = {"motors": motor_names}
225
+ elif isinstance(ft, datasets.Image):
226
+ dtype = "image"
227
+ image = dataset[0][key] # Assuming first row
228
+ channels = get_image_pixel_channels(image)
229
+ shape = (image.height, image.width, channels)
230
+ names = ["height", "width", "channels"]
231
+ elif ft._type == "VideoFrame":
232
+ dtype = "video"
233
+ shape = None # Add shape later
234
+ names = ["height", "width", "channels"]
235
+
236
+ features[key] = {
237
+ "dtype": dtype,
238
+ "shape": shape,
239
+ "names": names,
240
+ }
241
+
242
+ return features
243
+
244
+
245
+ def add_task_index_by_episodes(dataset: Dataset, tasks_by_episodes: dict) -> tuple[Dataset, list[str]]:
246
+ df = dataset.to_pandas()
247
+ tasks = list(set(tasks_by_episodes.values()))
248
+ tasks_to_task_index = {task: task_idx for task_idx, task in enumerate(tasks)}
249
+ episodes_to_task_index = {ep_idx: tasks_to_task_index[task] for ep_idx, task in tasks_by_episodes.items()}
250
+ df["task_index"] = df["episode_index"].map(episodes_to_task_index).astype(int)
251
+
252
+ features = dataset.features
253
+ features["task_index"] = datasets.Value(dtype="int64")
254
+ dataset = Dataset.from_pandas(df, features=features, split="train")
255
+ return dataset, tasks
256
+
257
+
258
+ def add_task_index_from_tasks_col(
259
+ dataset: Dataset, tasks_col: str
260
+ ) -> tuple[Dataset, dict[str, list[str]], list[str]]:
261
+ df = dataset.to_pandas()
262
+
263
+ # HACK: This is to clean some of the instructions in our version of Open X datasets
264
+ prefix_to_clean = "tf.Tensor(b'"
265
+ suffix_to_clean = "', shape=(), dtype=string)"
266
+ df[tasks_col] = df[tasks_col].str.removeprefix(prefix_to_clean).str.removesuffix(suffix_to_clean)
267
+
268
+ # Create task_index col
269
+ tasks_by_episode = df.groupby("episode_index")[tasks_col].unique().apply(lambda x: x.tolist()).to_dict()
270
+ tasks = df[tasks_col].unique().tolist()
271
+ tasks_to_task_index = {task: idx for idx, task in enumerate(tasks)}
272
+ df["task_index"] = df[tasks_col].map(tasks_to_task_index).astype(int)
273
+
274
+ # Build the dataset back from df
275
+ features = dataset.features
276
+ features["task_index"] = datasets.Value(dtype="int64")
277
+ dataset = Dataset.from_pandas(df, features=features, split="train")
278
+ dataset = dataset.remove_columns(tasks_col)
279
+
280
+ return dataset, tasks, tasks_by_episode
281
+
282
+
283
+ def split_parquet_by_episodes(
284
+ dataset: Dataset,
285
+ total_episodes: int,
286
+ total_chunks: int,
287
+ output_dir: Path,
288
+ ) -> list:
289
+ table = dataset.data.table
290
+ episode_lengths = []
291
+ for ep_chunk in range(total_chunks):
292
+ ep_chunk_start = DEFAULT_CHUNK_SIZE * ep_chunk
293
+ ep_chunk_end = min(DEFAULT_CHUNK_SIZE * (ep_chunk + 1), total_episodes)
294
+ chunk_dir = "/".join(DEFAULT_PARQUET_PATH.split("/")[:-1]).format(episode_chunk=ep_chunk)
295
+ (output_dir / chunk_dir).mkdir(parents=True, exist_ok=True)
296
+ for ep_idx in range(ep_chunk_start, ep_chunk_end):
297
+ ep_table = table.filter(pc.equal(table["episode_index"], ep_idx))
298
+ episode_lengths.insert(ep_idx, len(ep_table))
299
+ output_file = output_dir / DEFAULT_PARQUET_PATH.format(
300
+ episode_chunk=ep_chunk, episode_index=ep_idx
301
+ )
302
+ pq.write_table(ep_table, output_file)
303
+
304
+ return episode_lengths
305
+
306
+
307
+ def move_videos(
308
+ repo_id: str,
309
+ video_keys: list[str],
310
+ total_episodes: int,
311
+ total_chunks: int,
312
+ work_dir: Path,
313
+ clean_gittatributes: Path,
314
+ branch: str = "main",
315
+ ) -> None:
316
+ """
317
+ HACK: Since HfApi() doesn't provide a way to move files directly in a repo, this function will run git
318
+ commands to fetch git lfs video files references to move them into subdirectories without having to
319
+ actually download them.
320
+ """
321
+ _lfs_clone(repo_id, work_dir, branch)
322
+
323
+ videos_moved = False
324
+ video_files = [str(f.relative_to(work_dir)) for f in work_dir.glob("videos*/*.mp4")]
325
+ if len(video_files) == 0:
326
+ video_files = [str(f.relative_to(work_dir)) for f in work_dir.glob("videos*/*/*/*.mp4")]
327
+ videos_moved = True # Videos have already been moved
328
+
329
+ assert len(video_files) == total_episodes * len(video_keys)
330
+
331
+ lfs_untracked_videos = _get_lfs_untracked_videos(work_dir, video_files)
332
+
333
+ current_gittatributes = work_dir / ".gitattributes"
334
+ if not filecmp.cmp(current_gittatributes, clean_gittatributes, shallow=False):
335
+ fix_gitattributes(work_dir, current_gittatributes, clean_gittatributes)
336
+
337
+ if lfs_untracked_videos:
338
+ fix_lfs_video_files_tracking(work_dir, video_files)
339
+
340
+ if videos_moved:
341
+ return
342
+
343
+ video_dirs = sorted(work_dir.glob("videos*/"))
344
+ for ep_chunk in range(total_chunks):
345
+ ep_chunk_start = DEFAULT_CHUNK_SIZE * ep_chunk
346
+ ep_chunk_end = min(DEFAULT_CHUNK_SIZE * (ep_chunk + 1), total_episodes)
347
+ for vid_key in video_keys:
348
+ chunk_dir = "/".join(DEFAULT_VIDEO_PATH.split("/")[:-1]).format(
349
+ episode_chunk=ep_chunk, video_key=vid_key
350
+ )
351
+ (work_dir / chunk_dir).mkdir(parents=True, exist_ok=True)
352
+
353
+ for ep_idx in range(ep_chunk_start, ep_chunk_end):
354
+ target_path = DEFAULT_VIDEO_PATH.format(
355
+ episode_chunk=ep_chunk, video_key=vid_key, episode_index=ep_idx
356
+ )
357
+ video_file = V1_VIDEO_FILE.format(video_key=vid_key, episode_index=ep_idx)
358
+ if len(video_dirs) == 1:
359
+ video_path = video_dirs[0] / video_file
360
+ else:
361
+ for dir in video_dirs:
362
+ if (dir / video_file).is_file():
363
+ video_path = dir / video_file
364
+ break
365
+
366
+ video_path.rename(work_dir / target_path)
367
+
368
+ commit_message = "Move video files into chunk subdirectories"
369
+ subprocess.run(["git", "add", "."], cwd=work_dir, check=True)
370
+ subprocess.run(["git", "commit", "-m", commit_message], cwd=work_dir, check=True)
371
+ subprocess.run(["git", "push"], cwd=work_dir, check=True)
372
+
373
+
374
+ def fix_lfs_video_files_tracking(work_dir: Path, lfs_untracked_videos: list[str]) -> None:
375
+ """
376
+ HACK: This function fixes the tracking by git lfs which was not properly set on some repos. In that case,
377
+ there's no other option than to download the actual files and reupload them with lfs tracking.
378
+ """
379
+ for i in range(0, len(lfs_untracked_videos), 100):
380
+ files = lfs_untracked_videos[i : i + 100]
381
+ try:
382
+ subprocess.run(["git", "rm", "--cached", *files], cwd=work_dir, capture_output=True, check=True)
383
+ except subprocess.CalledProcessError as e:
384
+ print("git rm --cached ERROR:")
385
+ print(e.stderr)
386
+ subprocess.run(["git", "add", *files], cwd=work_dir, check=True)
387
+
388
+ commit_message = "Track video files with git lfs"
389
+ subprocess.run(["git", "commit", "-m", commit_message], cwd=work_dir, check=True)
390
+ subprocess.run(["git", "push"], cwd=work_dir, check=True)
391
+
392
+
393
+ def fix_gitattributes(work_dir: Path, current_gittatributes: Path, clean_gittatributes: Path) -> None:
394
+ shutil.copyfile(clean_gittatributes, current_gittatributes)
395
+ subprocess.run(["git", "add", ".gitattributes"], cwd=work_dir, check=True)
396
+ subprocess.run(["git", "commit", "-m", "Fix .gitattributes"], cwd=work_dir, check=True)
397
+ subprocess.run(["git", "push"], cwd=work_dir, check=True)
398
+
399
+
400
+ def _lfs_clone(repo_id: str, work_dir: Path, branch: str) -> None:
401
+ subprocess.run(["git", "lfs", "install"], cwd=work_dir, check=True)
402
+ repo_url = f"https://huggingface.co/datasets/{repo_id}"
403
+ env = {"GIT_LFS_SKIP_SMUDGE": "1"} # Prevent downloading LFS files
404
+ subprocess.run(
405
+ ["git", "clone", "--branch", branch, "--single-branch", "--depth", "1", repo_url, str(work_dir)],
406
+ check=True,
407
+ env=env,
408
+ )
409
+
410
+
411
+ def _get_lfs_untracked_videos(work_dir: Path, video_files: list[str]) -> list[str]:
412
+ lfs_tracked_files = subprocess.run(
413
+ ["git", "lfs", "ls-files", "-n"], cwd=work_dir, capture_output=True, text=True, check=True
414
+ )
415
+ lfs_tracked_files = set(lfs_tracked_files.stdout.splitlines())
416
+ return [f for f in video_files if f not in lfs_tracked_files]
417
+
418
+
419
+ def get_videos_info(repo_id: str, local_dir: Path, video_keys: list[str], branch: str) -> dict:
420
+ # Assumes first episode
421
+ video_files = [
422
+ DEFAULT_VIDEO_PATH.format(episode_chunk=0, video_key=vid_key, episode_index=0)
423
+ for vid_key in video_keys
424
+ ]
425
+ hub_api = HfApi()
426
+ hub_api.snapshot_download(
427
+ repo_id=repo_id, repo_type="dataset", local_dir=local_dir, revision=branch, allow_patterns=video_files
428
+ )
429
+ videos_info_dict = {}
430
+ for vid_key, vid_path in zip(video_keys, video_files, strict=True):
431
+ videos_info_dict[vid_key] = get_video_info(local_dir / vid_path)
432
+
433
+ return videos_info_dict
434
+
435
+
436
+ def convert_dataset(
437
+ repo_id: str,
438
+ local_dir: Path,
439
+ single_task: str | None = None,
440
+ tasks_path: Path | None = None,
441
+ tasks_col: Path | None = None,
442
+ robot_config: RobotConfig | None = None,
443
+ test_branch: str | None = None,
444
+ **card_kwargs,
445
+ ):
446
+ v1 = get_safe_version(repo_id, V16)
447
+ v1x_dir = local_dir / V16 / repo_id
448
+ v20_dir = local_dir / V20 / repo_id
449
+ v1x_dir.mkdir(parents=True, exist_ok=True)
450
+ v20_dir.mkdir(parents=True, exist_ok=True)
451
+
452
+ hub_api = HfApi()
453
+ hub_api.snapshot_download(
454
+ repo_id=repo_id, repo_type="dataset", revision=v1, local_dir=v1x_dir, ignore_patterns="videos*/"
455
+ )
456
+ branch = "main"
457
+ if test_branch:
458
+ branch = test_branch
459
+ create_branch(repo_id=repo_id, branch=test_branch, repo_type="dataset")
460
+
461
+ metadata_v1 = load_json(v1x_dir / V1_INFO_PATH)
462
+ dataset = datasets.load_dataset("parquet", data_dir=v1x_dir / "data", split="train")
463
+ features = get_features_from_hf_dataset(dataset, robot_config)
464
+ video_keys = [key for key, ft in features.items() if ft["dtype"] == "video"]
465
+
466
+ if single_task and "language_instruction" in dataset.column_names:
467
+ logging.warning(
468
+ "'single_task' provided but 'language_instruction' tasks_col found. Using 'language_instruction'.",
469
+ )
470
+ single_task = None
471
+ tasks_col = "language_instruction"
472
+
473
+ # Episodes & chunks
474
+ episode_indices = sorted(dataset.unique("episode_index"))
475
+ total_episodes = len(episode_indices)
476
+ assert episode_indices == list(range(total_episodes))
477
+ total_videos = total_episodes * len(video_keys)
478
+ total_chunks = total_episodes // DEFAULT_CHUNK_SIZE
479
+ if total_episodes % DEFAULT_CHUNK_SIZE != 0:
480
+ total_chunks += 1
481
+
482
+ # Tasks
483
+ if single_task:
484
+ tasks_by_episodes = dict.fromkeys(episode_indices, single_task)
485
+ dataset, tasks = add_task_index_by_episodes(dataset, tasks_by_episodes)
486
+ tasks_by_episodes = {ep_idx: [task] for ep_idx, task in tasks_by_episodes.items()}
487
+ elif tasks_path:
488
+ tasks_by_episodes = load_json(tasks_path)
489
+ tasks_by_episodes = {int(ep_idx): task for ep_idx, task in tasks_by_episodes.items()}
490
+ dataset, tasks = add_task_index_by_episodes(dataset, tasks_by_episodes)
491
+ tasks_by_episodes = {ep_idx: [task] for ep_idx, task in tasks_by_episodes.items()}
492
+ elif tasks_col:
493
+ dataset, tasks, tasks_by_episodes = add_task_index_from_tasks_col(dataset, tasks_col)
494
+ else:
495
+ raise ValueError
496
+
497
+ assert set(tasks) == {task for ep_tasks in tasks_by_episodes.values() for task in ep_tasks}
498
+ tasks = [{"task_index": task_idx, "task": task} for task_idx, task in enumerate(tasks)]
499
+ write_jsonlines(tasks, v20_dir / TASKS_PATH)
500
+ features["task_index"] = {
501
+ "dtype": "int64",
502
+ "shape": (1,),
503
+ "names": None,
504
+ }
505
+
506
+ # Videos
507
+ if video_keys:
508
+ assert metadata_v1.get("video", False)
509
+ dataset = dataset.remove_columns(video_keys)
510
+ clean_gitattr = Path(
511
+ hub_api.hf_hub_download(
512
+ repo_id=GITATTRIBUTES_REF, repo_type="dataset", local_dir=local_dir, filename=".gitattributes"
513
+ )
514
+ ).absolute()
515
+ with tempfile.TemporaryDirectory() as tmp_video_dir:
516
+ move_videos(
517
+ repo_id, video_keys, total_episodes, total_chunks, Path(tmp_video_dir), clean_gitattr, branch
518
+ )
519
+ videos_info = get_videos_info(repo_id, v1x_dir, video_keys=video_keys, branch=branch)
520
+ for key in video_keys:
521
+ features[key]["shape"] = (
522
+ videos_info[key].pop("video.height"),
523
+ videos_info[key].pop("video.width"),
524
+ videos_info[key].pop("video.channels"),
525
+ )
526
+ features[key]["video_info"] = videos_info[key]
527
+ assert math.isclose(videos_info[key]["video.fps"], metadata_v1["fps"], rel_tol=1e-3)
528
+ if "encoding" in metadata_v1:
529
+ assert videos_info[key]["video.pix_fmt"] == metadata_v1["encoding"]["pix_fmt"]
530
+ else:
531
+ assert metadata_v1.get("video", 0) == 0
532
+ videos_info = None
533
+
534
+ # Split data into 1 parquet file by episode
535
+ episode_lengths = split_parquet_by_episodes(dataset, total_episodes, total_chunks, v20_dir)
536
+
537
+ if robot_config is not None:
538
+ robot_type = robot_config.type
539
+ repo_tags = [robot_type]
540
+ else:
541
+ robot_type = "unknown"
542
+ repo_tags = None
543
+
544
+ # Episodes
545
+ episodes = [
546
+ {"episode_index": ep_idx, "tasks": tasks_by_episodes[ep_idx], "length": episode_lengths[ep_idx]}
547
+ for ep_idx in episode_indices
548
+ ]
549
+ write_jsonlines(episodes, v20_dir / EPISODES_PATH)
550
+
551
+ # Assemble metadata v2.0
552
+ metadata_v2_0 = {
553
+ "codebase_version": V20,
554
+ "robot_type": robot_type,
555
+ "total_episodes": total_episodes,
556
+ "total_frames": len(dataset),
557
+ "total_tasks": len(tasks),
558
+ "total_videos": total_videos,
559
+ "total_chunks": total_chunks,
560
+ "chunks_size": DEFAULT_CHUNK_SIZE,
561
+ "fps": metadata_v1["fps"],
562
+ "splits": {"train": f"0:{total_episodes}"},
563
+ "data_path": DEFAULT_PARQUET_PATH,
564
+ "video_path": DEFAULT_VIDEO_PATH if video_keys else None,
565
+ "features": features,
566
+ }
567
+ write_json(metadata_v2_0, v20_dir / INFO_PATH)
568
+ convert_stats_to_json(v1x_dir, v20_dir)
569
+ card = create_lerobot_dataset_card(tags=repo_tags, dataset_info=metadata_v2_0, **card_kwargs)
570
+
571
+ with contextlib.suppress(EntryNotFoundError, HfHubHTTPError):
572
+ hub_api.delete_folder(repo_id=repo_id, path_in_repo="data", repo_type="dataset", revision=branch)
573
+
574
+ with contextlib.suppress(EntryNotFoundError, HfHubHTTPError):
575
+ hub_api.delete_folder(repo_id=repo_id, path_in_repo="meta_data", repo_type="dataset", revision=branch)
576
+
577
+ with contextlib.suppress(EntryNotFoundError, HfHubHTTPError):
578
+ hub_api.delete_folder(repo_id=repo_id, path_in_repo="meta", repo_type="dataset", revision=branch)
579
+
580
+ hub_api.upload_folder(
581
+ repo_id=repo_id,
582
+ path_in_repo="data",
583
+ folder_path=v20_dir / "data",
584
+ repo_type="dataset",
585
+ revision=branch,
586
+ )
587
+ hub_api.upload_folder(
588
+ repo_id=repo_id,
589
+ path_in_repo="meta",
590
+ folder_path=v20_dir / "meta",
591
+ repo_type="dataset",
592
+ revision=branch,
593
+ )
594
+
595
+ card.push_to_hub(repo_id=repo_id, repo_type="dataset", revision=branch)
596
+
597
+ if not test_branch:
598
+ create_branch(repo_id=repo_id, branch=V20, repo_type="dataset")
599
+
600
+
601
+ def main():
602
+ parser = argparse.ArgumentParser()
603
+ task_args = parser.add_mutually_exclusive_group(required=True)
604
+
605
+ parser.add_argument(
606
+ "--repo-id",
607
+ type=str,
608
+ required=True,
609
+ help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset (e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).",
610
+ )
611
+ task_args.add_argument(
612
+ "--single-task",
613
+ type=str,
614
+ help="A short but accurate description of the single task performed in the dataset.",
615
+ )
616
+ task_args.add_argument(
617
+ "--tasks-col",
618
+ type=str,
619
+ help="The name of the column containing language instructions",
620
+ )
621
+ task_args.add_argument(
622
+ "--tasks-path",
623
+ type=Path,
624
+ help="The path to a .json file containing one language instruction for each episode_index",
625
+ )
626
+ parser.add_argument(
627
+ "--robot",
628
+ type=str,
629
+ default=None,
630
+ help="Robot config used for the dataset during conversion (e.g. 'koch', 'aloha', 'so100', etc.)",
631
+ )
632
+ parser.add_argument(
633
+ "--local-dir",
634
+ type=Path,
635
+ default=None,
636
+ help="Local directory to store the dataset during conversion. Defaults to /tmp/lerobot_dataset_v2",
637
+ )
638
+ parser.add_argument(
639
+ "--license",
640
+ type=str,
641
+ default="apache-2.0",
642
+ help="Repo license. Must be one of https://huggingface.co/docs/hub/repositories-licenses. Defaults to mit.",
643
+ )
644
+ parser.add_argument(
645
+ "--test-branch",
646
+ type=str,
647
+ default=None,
648
+ help="Repo branch to test your conversion first (e.g. 'v2.0.test')",
649
+ )
650
+
651
+ args = parser.parse_args()
652
+ if not args.local_dir:
653
+ args.local_dir = Path("/tmp/lerobot_dataset_v2")
654
+
655
+ if args.robot is not None:
656
+ robot_config = make_robot_config(args.robot)
657
+
658
+ del args.robot
659
+
660
+ convert_dataset(**vars(args), robot_config=robot_config)
661
+
662
+
663
+ if __name__ == "__main__":
664
+ main()
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v21/_remove_language_instruction.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import traceback
17
+ from pathlib import Path
18
+
19
+ from datasets import get_dataset_config_info
20
+ from huggingface_hub import HfApi
21
+
22
+ from lerobot import available_datasets
23
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata
24
+ from lerobot.common.datasets.utils import INFO_PATH, write_info
25
+ from lerobot.common.datasets.v21.convert_dataset_v20_to_v21 import V20, SuppressWarnings
26
+
27
+ LOCAL_DIR = Path("data/")
28
+
29
+ hub_api = HfApi()
30
+
31
+
32
+ def fix_dataset(repo_id: str) -> str:
33
+ if not hub_api.revision_exists(repo_id, V20, repo_type="dataset"):
34
+ return f"{repo_id}: skipped (not in {V20})."
35
+
36
+ dataset_info = get_dataset_config_info(repo_id, "default")
37
+ with SuppressWarnings():
38
+ lerobot_metadata = LeRobotDatasetMetadata(repo_id, revision=V20, force_cache_sync=True)
39
+
40
+ meta_features = {key for key, ft in lerobot_metadata.features.items() if ft["dtype"] != "video"}
41
+ parquet_features = set(dataset_info.features)
42
+
43
+ diff_parquet_meta = parquet_features - meta_features
44
+ diff_meta_parquet = meta_features - parquet_features
45
+
46
+ if diff_parquet_meta:
47
+ raise ValueError(f"In parquet not in info.json: {parquet_features - meta_features}")
48
+
49
+ if not diff_meta_parquet:
50
+ return f"{repo_id}: skipped (no diff)"
51
+
52
+ if diff_meta_parquet:
53
+ logging.warning(f"In info.json not in parquet: {meta_features - parquet_features}")
54
+ assert diff_meta_parquet == {"language_instruction"}
55
+ lerobot_metadata.features.pop("language_instruction")
56
+ write_info(lerobot_metadata.info, lerobot_metadata.root)
57
+ commit_info = hub_api.upload_file(
58
+ path_or_fileobj=lerobot_metadata.root / INFO_PATH,
59
+ path_in_repo=INFO_PATH,
60
+ repo_id=repo_id,
61
+ repo_type="dataset",
62
+ revision=V20,
63
+ commit_message="Remove 'language_instruction'",
64
+ create_pr=True,
65
+ )
66
+ return f"{repo_id}: success - PR: {commit_info.pr_url}"
67
+
68
+
69
+ def batch_fix():
70
+ status = {}
71
+ LOCAL_DIR.mkdir(parents=True, exist_ok=True)
72
+ logfile = LOCAL_DIR / "fix_features_v20.txt"
73
+ for num, repo_id in enumerate(available_datasets):
74
+ print(f"\nConverting {repo_id} ({num}/{len(available_datasets)})")
75
+ print("---------------------------------------------------------")
76
+ try:
77
+ status = fix_dataset(repo_id)
78
+ except Exception:
79
+ status = f"{repo_id}: failed\n {traceback.format_exc()}"
80
+
81
+ logging.info(status)
82
+ with open(logfile, "a") as file:
83
+ file.write(status + "\n")
84
+
85
+
86
+ if __name__ == "__main__":
87
+ batch_fix()
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v21/batch_convert_dataset_v20_to_v21.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """
18
+ This script is for internal use to convert all datasets under the 'lerobot' hub user account to v2.1.
19
+ """
20
+
21
+ import traceback
22
+ from pathlib import Path
23
+
24
+ from huggingface_hub import HfApi
25
+
26
+ from lerobot import available_datasets
27
+ from lerobot.common.datasets.v21.convert_dataset_v20_to_v21 import V21, convert_dataset
28
+
29
+ LOCAL_DIR = Path("data/")
30
+
31
+
32
+ def batch_convert():
33
+ status = {}
34
+ LOCAL_DIR.mkdir(parents=True, exist_ok=True)
35
+ logfile = LOCAL_DIR / "conversion_log_v21.txt"
36
+ hub_api = HfApi()
37
+ for num, repo_id in enumerate(available_datasets):
38
+ print(f"\nConverting {repo_id} ({num}/{len(available_datasets)})")
39
+ print("---------------------------------------------------------")
40
+ try:
41
+ if hub_api.revision_exists(repo_id, V21, repo_type="dataset"):
42
+ status = f"{repo_id}: success (already in {V21})."
43
+ else:
44
+ convert_dataset(repo_id)
45
+ status = f"{repo_id}: success."
46
+ except Exception:
47
+ status = f"{repo_id}: failed\n {traceback.format_exc()}"
48
+
49
+ with open(logfile, "a") as file:
50
+ file.write(status + "\n")
51
+
52
+
53
+ if __name__ == "__main__":
54
+ batch_convert()
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v21/convert_dataset_v20_to_v21.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 2.0 to
17
+ 2.1. It will:
18
+
19
+ - Generate per-episodes stats and writes them in `episodes_stats.jsonl`
20
+ - Check consistency between these new stats and the old ones.
21
+ - Remove the deprecated `stats.json`.
22
+ - Update codebase_version in `info.json`.
23
+ - Push this new version to the hub on the 'main' branch and tags it with "v2.1".
24
+
25
+ Usage:
26
+
27
+ ```bash
28
+ python lerobot/common/datasets/v21/convert_dataset_v20_to_v21.py \
29
+ --repo-id=aliberts/koch_tutorial
30
+ ```
31
+
32
+ """
33
+
34
+ import argparse
35
+ import logging
36
+
37
+ from huggingface_hub import HfApi
38
+
39
+ from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
40
+ from lerobot.common.datasets.utils import EPISODES_STATS_PATH, STATS_PATH, load_stats, write_info
41
+ from lerobot.common.datasets.v21.convert_stats import check_aggregate_stats, convert_stats
42
+
43
+ V20 = "v2.0"
44
+ V21 = "v2.1"
45
+
46
+
47
+ class SuppressWarnings:
48
+ def __enter__(self):
49
+ self.previous_level = logging.getLogger().getEffectiveLevel()
50
+ logging.getLogger().setLevel(logging.ERROR)
51
+
52
+ def __exit__(self, exc_type, exc_val, exc_tb):
53
+ logging.getLogger().setLevel(self.previous_level)
54
+
55
+
56
+ def convert_dataset(
57
+ repo_id: str,
58
+ branch: str | None = None,
59
+ num_workers: int = 4,
60
+ ):
61
+ with SuppressWarnings():
62
+ dataset = LeRobotDataset(repo_id, revision=V20, force_cache_sync=True)
63
+
64
+ if (dataset.root / EPISODES_STATS_PATH).is_file():
65
+ (dataset.root / EPISODES_STATS_PATH).unlink()
66
+
67
+ convert_stats(dataset, num_workers=num_workers)
68
+ ref_stats = load_stats(dataset.root)
69
+ check_aggregate_stats(dataset, ref_stats)
70
+
71
+ dataset.meta.info["codebase_version"] = CODEBASE_VERSION
72
+ write_info(dataset.meta.info, dataset.root)
73
+
74
+ dataset.push_to_hub(branch=branch, tag_version=False, allow_patterns="meta/")
75
+
76
+ # delete old stats.json file
77
+ if (dataset.root / STATS_PATH).is_file:
78
+ (dataset.root / STATS_PATH).unlink()
79
+
80
+ hub_api = HfApi()
81
+ if hub_api.file_exists(
82
+ repo_id=dataset.repo_id, filename=STATS_PATH, revision=branch, repo_type="dataset"
83
+ ):
84
+ hub_api.delete_file(
85
+ path_in_repo=STATS_PATH, repo_id=dataset.repo_id, revision=branch, repo_type="dataset"
86
+ )
87
+
88
+ hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset")
89
+
90
+
91
+ if __name__ == "__main__":
92
+ parser = argparse.ArgumentParser()
93
+ parser.add_argument(
94
+ "--repo-id",
95
+ type=str,
96
+ required=True,
97
+ help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset "
98
+ "(e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).",
99
+ )
100
+ parser.add_argument(
101
+ "--branch",
102
+ type=str,
103
+ default=None,
104
+ help="Repo branch to push your dataset. Defaults to the main branch.",
105
+ )
106
+ parser.add_argument(
107
+ "--num-workers",
108
+ type=int,
109
+ default=4,
110
+ help="Number of workers for parallelizing stats compute. Defaults to 4.",
111
+ )
112
+
113
+ args = parser.parse_args()
114
+ convert_dataset(**vars(args))
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/v21/convert_stats.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from concurrent.futures import ThreadPoolExecutor, as_completed
16
+
17
+ import numpy as np
18
+ from tqdm import tqdm
19
+
20
+ from lerobot.common.datasets.compute_stats import aggregate_stats, get_feature_stats, sample_indices
21
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
22
+ from lerobot.common.datasets.utils import write_episode_stats
23
+
24
+
25
+ def sample_episode_video_frames(dataset: LeRobotDataset, episode_index: int, ft_key: str) -> np.ndarray:
26
+ ep_len = dataset.meta.episodes[episode_index]["length"]
27
+ sampled_indices = sample_indices(ep_len)
28
+ query_timestamps = dataset._get_query_timestamps(0.0, {ft_key: sampled_indices})
29
+ video_frames = dataset._query_videos(query_timestamps, episode_index)
30
+ return video_frames[ft_key].numpy()
31
+
32
+
33
+ def convert_episode_stats(dataset: LeRobotDataset, ep_idx: int):
34
+ ep_start_idx = dataset.episode_data_index["from"][ep_idx]
35
+ ep_end_idx = dataset.episode_data_index["to"][ep_idx]
36
+ ep_data = dataset.hf_dataset.select(range(ep_start_idx, ep_end_idx))
37
+
38
+ ep_stats = {}
39
+ for key, ft in dataset.features.items():
40
+ if ft["dtype"] == "video":
41
+ # We sample only for videos
42
+ ep_ft_data = sample_episode_video_frames(dataset, ep_idx, key)
43
+ else:
44
+ ep_ft_data = np.array(ep_data[key])
45
+
46
+ axes_to_reduce = (0, 2, 3) if ft["dtype"] in ["image", "video"] else 0
47
+ keepdims = True if ft["dtype"] in ["image", "video"] else ep_ft_data.ndim == 1
48
+ ep_stats[key] = get_feature_stats(ep_ft_data, axis=axes_to_reduce, keepdims=keepdims)
49
+
50
+ if ft["dtype"] in ["image", "video"]: # remove batch dim
51
+ ep_stats[key] = {
52
+ k: v if k == "count" else np.squeeze(v, axis=0) for k, v in ep_stats[key].items()
53
+ }
54
+
55
+ dataset.meta.episodes_stats[ep_idx] = ep_stats
56
+
57
+
58
+ def convert_stats(dataset: LeRobotDataset, num_workers: int = 0):
59
+ assert dataset.episodes is None
60
+ print("Computing episodes stats")
61
+ total_episodes = dataset.meta.total_episodes
62
+ if num_workers > 0:
63
+ with ThreadPoolExecutor(max_workers=num_workers) as executor:
64
+ futures = {
65
+ executor.submit(convert_episode_stats, dataset, ep_idx): ep_idx
66
+ for ep_idx in range(total_episodes)
67
+ }
68
+ for future in tqdm(as_completed(futures), total=total_episodes):
69
+ future.result()
70
+ else:
71
+ for ep_idx in tqdm(range(total_episodes)):
72
+ convert_episode_stats(dataset, ep_idx)
73
+
74
+ for ep_idx in tqdm(range(total_episodes)):
75
+ write_episode_stats(ep_idx, dataset.meta.episodes_stats[ep_idx], dataset.root)
76
+
77
+
78
+ def check_aggregate_stats(
79
+ dataset: LeRobotDataset,
80
+ reference_stats: dict[str, dict[str, np.ndarray]],
81
+ video_rtol_atol: tuple[float] = (1e-2, 1e-2),
82
+ default_rtol_atol: tuple[float] = (5e-6, 6e-5),
83
+ ):
84
+ """Verifies that the aggregated stats from episodes_stats are close to reference stats."""
85
+ agg_stats = aggregate_stats(list(dataset.meta.episodes_stats.values()))
86
+ for key, ft in dataset.features.items():
87
+ # These values might need some fine-tuning
88
+ if ft["dtype"] == "video":
89
+ # to account for image sub-sampling
90
+ rtol, atol = video_rtol_atol
91
+ else:
92
+ rtol, atol = default_rtol_atol
93
+
94
+ for stat, val in agg_stats[key].items():
95
+ if key in reference_stats and stat in reference_stats[key]:
96
+ err_msg = f"feature='{key}' stats='{stat}'"
97
+ np.testing.assert_allclose(
98
+ val, reference_stats[key][stat], rtol=rtol, atol=atol, err_msg=err_msg
99
+ )
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/datasets/video_utils.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import importlib
17
+ import json
18
+ import logging
19
+ import subprocess
20
+ import warnings
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass, field
23
+ from pathlib import Path
24
+ from typing import Any, ClassVar
25
+
26
+ import pyarrow as pa
27
+ import torch
28
+ import torchvision
29
+ from datasets.features.features import register_feature
30
+ from PIL import Image
31
+
32
+
33
+ def get_safe_default_codec():
34
+ if importlib.util.find_spec("torchcodec"):
35
+ return "torchcodec"
36
+ else:
37
+ logging.warning(
38
+ "'torchcodec' is not available in your platform, falling back to 'pyav' as a default decoder"
39
+ )
40
+ return "pyav"
41
+
42
+
43
+ def decode_video_frames(
44
+ video_path: Path | str,
45
+ timestamps: list[float],
46
+ tolerance_s: float,
47
+ backend: str | None = None,
48
+ ) -> torch.Tensor:
49
+ """
50
+ Decodes video frames using the specified backend.
51
+
52
+ Args:
53
+ video_path (Path): Path to the video file.
54
+ timestamps (list[float]): List of timestamps to extract frames.
55
+ tolerance_s (float): Allowed deviation in seconds for frame retrieval.
56
+ backend (str, optional): Backend to use for decoding. Defaults to "torchcodec" when available in the platform; otherwise, defaults to "pyav"..
57
+
58
+ Returns:
59
+ torch.Tensor: Decoded frames.
60
+
61
+ Currently supports torchcodec on cpu and pyav.
62
+ """
63
+ if backend is None:
64
+ backend = get_safe_default_codec()
65
+ if backend == "torchcodec":
66
+ return decode_video_frames_torchcodec(video_path, timestamps, tolerance_s)
67
+ elif backend in ["pyav", "video_reader"]:
68
+ return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend)
69
+ else:
70
+ raise ValueError(f"Unsupported video backend: {backend}")
71
+
72
+
73
+ def decode_video_frames_torchvision(
74
+ video_path: Path | str,
75
+ timestamps: list[float],
76
+ tolerance_s: float,
77
+ backend: str = "pyav",
78
+ log_loaded_timestamps: bool = False,
79
+ ) -> torch.Tensor:
80
+ """Loads frames associated to the requested timestamps of a video
81
+
82
+ The backend can be either "pyav" (default) or "video_reader".
83
+ "video_reader" requires installing torchvision from source, see:
84
+ https://github.com/pytorch/vision/blob/main/torchvision/csrc/io/decoder/gpu/README.rst
85
+ (note that you need to compile against ffmpeg<4.3)
86
+
87
+ While both use cpu, "video_reader" is supposedly faster than "pyav" but requires additional setup.
88
+ For more info on video decoding, see `benchmark/video/README.md`
89
+
90
+ See torchvision doc for more info on these two backends:
91
+ https://pytorch.org/vision/0.18/index.html?highlight=backend#torchvision.set_video_backend
92
+
93
+ Note: Video benefits from inter-frame compression. Instead of storing every frame individually,
94
+ the encoder stores a reference frame (or a key frame) and subsequent frames as differences relative to
95
+ that key frame. As a consequence, to access a requested frame, we need to load the preceding key frame,
96
+ and all subsequent frames until reaching the requested frame. The number of key frames in a video
97
+ can be adjusted during encoding to take into account decoding time and video size in bytes.
98
+ """
99
+ video_path = str(video_path)
100
+
101
+ # set backend
102
+ keyframes_only = False
103
+ torchvision.set_video_backend(backend)
104
+ if backend == "pyav":
105
+ keyframes_only = True # pyav doesnt support accuracte seek
106
+
107
+ # set a video stream reader
108
+ # TODO(rcadene): also load audio stream at the same time
109
+ reader = torchvision.io.VideoReader(video_path, "video")
110
+
111
+ # set the first and last requested timestamps
112
+ # Note: previous timestamps are usually loaded, since we need to access the previous key frame
113
+ first_ts = min(timestamps)
114
+ last_ts = max(timestamps)
115
+
116
+ # access closest key frame of the first requested frame
117
+ # Note: closest key frame timestamp is usually smaller than `first_ts` (e.g. key frame can be the first frame of the video)
118
+ # for details on what `seek` is doing see: https://pyav.basswood-io.com/docs/stable/api/container.html?highlight=inputcontainer#av.container.InputContainer.seek
119
+ reader.seek(first_ts, keyframes_only=keyframes_only)
120
+
121
+ # load all frames until last requested frame
122
+ loaded_frames = []
123
+ loaded_ts = []
124
+ for frame in reader:
125
+ current_ts = frame["pts"]
126
+ if log_loaded_timestamps:
127
+ logging.info(f"frame loaded at timestamp={current_ts:.4f}")
128
+ loaded_frames.append(frame["data"])
129
+ loaded_ts.append(current_ts)
130
+ if current_ts >= last_ts:
131
+ break
132
+
133
+ if backend == "pyav":
134
+ reader.container.close()
135
+
136
+ reader = None
137
+
138
+ query_ts = torch.tensor(timestamps)
139
+ loaded_ts = torch.tensor(loaded_ts)
140
+
141
+ # compute distances between each query timestamp and timestamps of all loaded frames
142
+ dist = torch.cdist(query_ts[:, None], loaded_ts[:, None], p=1)
143
+ min_, argmin_ = dist.min(1)
144
+
145
+ is_within_tol = min_ < tolerance_s
146
+ assert is_within_tol.all(), (
147
+ f"One or several query timestamps unexpectedly violate the tolerance ({min_[~is_within_tol]} > {tolerance_s=})."
148
+ "It means that the closest frame that can be loaded from the video is too far away in time."
149
+ "This might be due to synchronization issues with timestamps during data collection."
150
+ "To be safe, we advise to ignore this item during training."
151
+ f"\nqueried timestamps: {query_ts}"
152
+ f"\nloaded timestamps: {loaded_ts}"
153
+ f"\nvideo: {video_path}"
154
+ f"\nbackend: {backend}"
155
+ )
156
+
157
+ # get closest frames to the query timestamps
158
+ closest_frames = torch.stack([loaded_frames[idx] for idx in argmin_])
159
+ closest_ts = loaded_ts[argmin_]
160
+
161
+ if log_loaded_timestamps:
162
+ logging.info(f"{closest_ts=}")
163
+
164
+ # convert to the pytorch format which is float32 in [0,1] range (and channel first)
165
+ closest_frames = closest_frames.type(torch.float32) / 255
166
+
167
+ assert len(timestamps) == len(closest_frames)
168
+ return closest_frames
169
+
170
+
171
+ def decode_video_frames_torchcodec(
172
+ video_path: Path | str,
173
+ timestamps: list[float],
174
+ tolerance_s: float,
175
+ device: str = "cpu",
176
+ log_loaded_timestamps: bool = False,
177
+ ) -> torch.Tensor:
178
+ """Loads frames associated with the requested timestamps of a video using torchcodec.
179
+
180
+ Note: Setting device="cuda" outside the main process, e.g. in data loader workers, will lead to CUDA initialization errors.
181
+
182
+ Note: Video benefits from inter-frame compression. Instead of storing every frame individually,
183
+ the encoder stores a reference frame (or a key frame) and subsequent frames as differences relative to
184
+ that key frame. As a consequence, to access a requested frame, we need to load the preceding key frame,
185
+ and all subsequent frames until reaching the requested frame. The number of key frames in a video
186
+ can be adjusted during encoding to take into account decoding time and video size in bytes.
187
+ """
188
+
189
+ if importlib.util.find_spec("torchcodec"):
190
+ from torchcodec.decoders import VideoDecoder
191
+ else:
192
+ raise ImportError("torchcodec is required but not available.")
193
+
194
+ # initialize video decoder
195
+ decoder = VideoDecoder(video_path, device=device, seek_mode="approximate")
196
+ loaded_frames = []
197
+ loaded_ts = []
198
+ # get metadata for frame information
199
+ metadata = decoder.metadata
200
+ average_fps = metadata.average_fps
201
+
202
+ # convert timestamps to frame indices
203
+ frame_indices = [round(ts * average_fps) for ts in timestamps]
204
+
205
+ # retrieve frames based on indices
206
+ frames_batch = decoder.get_frames_at(indices=frame_indices)
207
+
208
+ for frame, pts in zip(frames_batch.data, frames_batch.pts_seconds, strict=False):
209
+ loaded_frames.append(frame)
210
+ loaded_ts.append(pts.item())
211
+ if log_loaded_timestamps:
212
+ logging.info(f"Frame loaded at timestamp={pts:.4f}")
213
+
214
+ query_ts = torch.tensor(timestamps)
215
+ loaded_ts = torch.tensor(loaded_ts)
216
+
217
+ # compute distances between each query timestamp and loaded timestamps
218
+ dist = torch.cdist(query_ts[:, None], loaded_ts[:, None], p=1)
219
+ min_, argmin_ = dist.min(1)
220
+
221
+ is_within_tol = min_ < tolerance_s
222
+ assert is_within_tol.all(), (
223
+ f"One or several query timestamps unexpectedly violate the tolerance ({min_[~is_within_tol]} > {tolerance_s=})."
224
+ "It means that the closest frame that can be loaded from the video is too far away in time."
225
+ "This might be due to synchronization issues with timestamps during data collection."
226
+ "To be safe, we advise to ignore this item during training."
227
+ f"\nqueried timestamps: {query_ts}"
228
+ f"\nloaded timestamps: {loaded_ts}"
229
+ f"\nvideo: {video_path}"
230
+ )
231
+
232
+ # get closest frames to the query timestamps
233
+ closest_frames = torch.stack([loaded_frames[idx] for idx in argmin_])
234
+ closest_ts = loaded_ts[argmin_]
235
+
236
+ if log_loaded_timestamps:
237
+ logging.info(f"{closest_ts=}")
238
+
239
+ # convert to float32 in [0,1] range (channel first)
240
+ closest_frames = closest_frames.type(torch.float32) / 255
241
+
242
+ assert len(timestamps) == len(closest_frames)
243
+ return closest_frames
244
+
245
+
246
+ def encode_video_frames(
247
+ imgs_dir: Path | str,
248
+ video_path: Path | str,
249
+ fps: int,
250
+ vcodec: str = "libsvtav1",
251
+ pix_fmt: str = "yuv420p",
252
+ g: int | None = 2,
253
+ crf: int | None = 30,
254
+ fast_decode: int = 0,
255
+ log_level: str | None = "error",
256
+ overwrite: bool = False,
257
+ ) -> None:
258
+ """More info on ffmpeg arguments tuning on `benchmark/video/README.md`"""
259
+ video_path = Path(video_path)
260
+ imgs_dir = Path(imgs_dir)
261
+ video_path.parent.mkdir(parents=True, exist_ok=True)
262
+
263
+ ffmpeg_args = OrderedDict(
264
+ [
265
+ ("-f", "image2"),
266
+ ("-r", str(fps)),
267
+ ("-i", str(imgs_dir / "frame_%06d.png")),
268
+ ("-vcodec", vcodec),
269
+ ("-pix_fmt", pix_fmt),
270
+ ]
271
+ )
272
+
273
+ if g is not None:
274
+ ffmpeg_args["-g"] = str(g)
275
+
276
+ if crf is not None:
277
+ ffmpeg_args["-crf"] = str(crf)
278
+
279
+ if fast_decode:
280
+ key = "-svtav1-params" if vcodec == "libsvtav1" else "-tune"
281
+ value = f"fast-decode={fast_decode}" if vcodec == "libsvtav1" else "fastdecode"
282
+ ffmpeg_args[key] = value
283
+
284
+ if log_level is not None:
285
+ ffmpeg_args["-loglevel"] = str(log_level)
286
+
287
+ ffmpeg_args = [item for pair in ffmpeg_args.items() for item in pair]
288
+ if overwrite:
289
+ ffmpeg_args.append("-y")
290
+
291
+ ffmpeg_cmd = ["ffmpeg"] + ffmpeg_args + [str(video_path)]
292
+ # redirect stdin to subprocess.DEVNULL to prevent reading random keyboard inputs from terminal
293
+ subprocess.run(ffmpeg_cmd, check=True, stdin=subprocess.DEVNULL)
294
+
295
+ if not video_path.exists():
296
+ raise OSError(
297
+ f"Video encoding did not work. File not found: {video_path}. "
298
+ f"Try running the command manually to debug: `{''.join(ffmpeg_cmd)}`"
299
+ )
300
+
301
+
302
+ @dataclass
303
+ class VideoFrame:
304
+ # TODO(rcadene, lhoestq): move to Hugging Face `datasets` repo
305
+ """
306
+ Provides a type for a dataset containing video frames.
307
+
308
+ Example:
309
+
310
+ ```python
311
+ data_dict = [{"image": {"path": "videos/episode_0.mp4", "timestamp": 0.3}}]
312
+ features = {"image": VideoFrame()}
313
+ Dataset.from_dict(data_dict, features=Features(features))
314
+ ```
315
+ """
316
+
317
+ pa_type: ClassVar[Any] = pa.struct({"path": pa.string(), "timestamp": pa.float32()})
318
+ _type: str = field(default="VideoFrame", init=False, repr=False)
319
+
320
+ def __call__(self):
321
+ return self.pa_type
322
+
323
+
324
+ with warnings.catch_warnings():
325
+ warnings.filterwarnings(
326
+ "ignore",
327
+ "'register_feature' is experimental and might be subject to breaking changes in the future.",
328
+ category=UserWarning,
329
+ )
330
+ # to make VideoFrame available in HuggingFace `datasets`
331
+ register_feature(VideoFrame, "VideoFrame")
332
+
333
+
334
+ def get_audio_info(video_path: Path | str) -> dict:
335
+ ffprobe_audio_cmd = [
336
+ "ffprobe",
337
+ "-v",
338
+ "error",
339
+ "-select_streams",
340
+ "a:0",
341
+ "-show_entries",
342
+ "stream=channels,codec_name,bit_rate,sample_rate,bit_depth,channel_layout,duration",
343
+ "-of",
344
+ "json",
345
+ str(video_path),
346
+ ]
347
+ result = subprocess.run(ffprobe_audio_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
348
+ if result.returncode != 0:
349
+ raise RuntimeError(f"Error running ffprobe: {result.stderr}")
350
+
351
+ info = json.loads(result.stdout)
352
+ audio_stream_info = info["streams"][0] if info.get("streams") else None
353
+ if audio_stream_info is None:
354
+ return {"has_audio": False}
355
+
356
+ # Return the information, defaulting to None if no audio stream is present
357
+ return {
358
+ "has_audio": True,
359
+ "audio.channels": audio_stream_info.get("channels", None),
360
+ "audio.codec": audio_stream_info.get("codec_name", None),
361
+ "audio.bit_rate": int(audio_stream_info["bit_rate"]) if audio_stream_info.get("bit_rate") else None,
362
+ "audio.sample_rate": int(audio_stream_info["sample_rate"])
363
+ if audio_stream_info.get("sample_rate")
364
+ else None,
365
+ "audio.bit_depth": audio_stream_info.get("bit_depth", None),
366
+ "audio.channel_layout": audio_stream_info.get("channel_layout", None),
367
+ }
368
+
369
+
370
+ def get_video_info(video_path: Path | str) -> dict:
371
+ ffprobe_video_cmd = [
372
+ "ffprobe",
373
+ "-v",
374
+ "error",
375
+ "-select_streams",
376
+ "v:0",
377
+ "-show_entries",
378
+ "stream=r_frame_rate,width,height,codec_name,nb_frames,duration,pix_fmt",
379
+ "-of",
380
+ "json",
381
+ str(video_path),
382
+ ]
383
+ result = subprocess.run(ffprobe_video_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
384
+ if result.returncode != 0:
385
+ raise RuntimeError(f"Error running ffprobe: {result.stderr}")
386
+
387
+ info = json.loads(result.stdout)
388
+ video_stream_info = info["streams"][0]
389
+
390
+ # Calculate fps from r_frame_rate
391
+ r_frame_rate = video_stream_info["r_frame_rate"]
392
+ num, denom = map(int, r_frame_rate.split("/"))
393
+ fps = num / denom
394
+
395
+ pixel_channels = get_video_pixel_channels(video_stream_info["pix_fmt"])
396
+
397
+ video_info = {
398
+ "video.fps": fps,
399
+ "video.height": video_stream_info["height"],
400
+ "video.width": video_stream_info["width"],
401
+ "video.channels": pixel_channels,
402
+ "video.codec": video_stream_info["codec_name"],
403
+ "video.pix_fmt": video_stream_info["pix_fmt"],
404
+ "video.is_depth_map": False,
405
+ **get_audio_info(video_path),
406
+ }
407
+
408
+ return video_info
409
+
410
+
411
+ def get_video_pixel_channels(pix_fmt: str) -> int:
412
+ if "gray" in pix_fmt or "depth" in pix_fmt or "monochrome" in pix_fmt:
413
+ return 1
414
+ elif "rgba" in pix_fmt or "yuva" in pix_fmt:
415
+ return 4
416
+ elif "rgb" in pix_fmt or "yuv" in pix_fmt:
417
+ return 3
418
+ else:
419
+ raise ValueError("Unknown format")
420
+
421
+
422
+ def get_image_pixel_channels(image: Image):
423
+ if image.mode == "L":
424
+ return 1 # Grayscale
425
+ elif image.mode == "LA":
426
+ return 2 # Grayscale + Alpha
427
+ elif image.mode == "RGB":
428
+ return 3 # RGB
429
+ elif image.mode == "RGBA":
430
+ return 4 # RGBA
431
+ else:
432
+ raise ValueError("Unknown format")
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/envs/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .configs import AlohaEnv, EnvConfig, PushtEnv, XarmEnv # noqa: F401
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/envs/configs.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import abc
16
+ from dataclasses import dataclass, field
17
+
18
+ import draccus
19
+
20
+ from lerobot.common.constants import ACTION, OBS_ENV, OBS_IMAGE, OBS_IMAGES, OBS_ROBOT
21
+ from lerobot.configs.types import FeatureType, PolicyFeature
22
+
23
+
24
+ @dataclass
25
+ class EnvConfig(draccus.ChoiceRegistry, abc.ABC):
26
+ task: str | None = None
27
+ fps: int = 30
28
+ features: dict[str, PolicyFeature] = field(default_factory=dict)
29
+ features_map: dict[str, str] = field(default_factory=dict)
30
+
31
+ @property
32
+ def type(self) -> str:
33
+ return self.get_choice_name(self.__class__)
34
+
35
+ @abc.abstractproperty
36
+ def gym_kwargs(self) -> dict:
37
+ raise NotImplementedError()
38
+
39
+
40
+ @EnvConfig.register_subclass("aloha")
41
+ @dataclass
42
+ class AlohaEnv(EnvConfig):
43
+ task: str = "AlohaInsertion-v0"
44
+ fps: int = 50
45
+ episode_length: int = 400
46
+ obs_type: str = "pixels_agent_pos"
47
+ render_mode: str = "rgb_array"
48
+ features: dict[str, PolicyFeature] = field(
49
+ default_factory=lambda: {
50
+ "action": PolicyFeature(type=FeatureType.ACTION, shape=(14,)),
51
+ }
52
+ )
53
+ features_map: dict[str, str] = field(
54
+ default_factory=lambda: {
55
+ "action": ACTION,
56
+ "agent_pos": OBS_ROBOT,
57
+ "top": f"{OBS_IMAGE}.top",
58
+ "pixels/top": f"{OBS_IMAGES}.top",
59
+ }
60
+ )
61
+
62
+ def __post_init__(self):
63
+ if self.obs_type == "pixels":
64
+ self.features["top"] = PolicyFeature(type=FeatureType.VISUAL, shape=(480, 640, 3))
65
+ elif self.obs_type == "pixels_agent_pos":
66
+ self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(14,))
67
+ self.features["pixels/top"] = PolicyFeature(type=FeatureType.VISUAL, shape=(480, 640, 3))
68
+
69
+ @property
70
+ def gym_kwargs(self) -> dict:
71
+ return {
72
+ "obs_type": self.obs_type,
73
+ "render_mode": self.render_mode,
74
+ "max_episode_steps": self.episode_length,
75
+ }
76
+
77
+
78
+ @EnvConfig.register_subclass("pusht")
79
+ @dataclass
80
+ class PushtEnv(EnvConfig):
81
+ task: str = "PushT-v0"
82
+ fps: int = 10
83
+ episode_length: int = 300
84
+ obs_type: str = "pixels_agent_pos"
85
+ render_mode: str = "rgb_array"
86
+ visualization_width: int = 384
87
+ visualization_height: int = 384
88
+ features: dict[str, PolicyFeature] = field(
89
+ default_factory=lambda: {
90
+ "action": PolicyFeature(type=FeatureType.ACTION, shape=(2,)),
91
+ "agent_pos": PolicyFeature(type=FeatureType.STATE, shape=(2,)),
92
+ }
93
+ )
94
+ features_map: dict[str, str] = field(
95
+ default_factory=lambda: {
96
+ "action": ACTION,
97
+ "agent_pos": OBS_ROBOT,
98
+ "environment_state": OBS_ENV,
99
+ "pixels": OBS_IMAGE,
100
+ }
101
+ )
102
+
103
+ def __post_init__(self):
104
+ if self.obs_type == "pixels_agent_pos":
105
+ self.features["pixels"] = PolicyFeature(type=FeatureType.VISUAL, shape=(384, 384, 3))
106
+ elif self.obs_type == "environment_state_agent_pos":
107
+ self.features["environment_state"] = PolicyFeature(type=FeatureType.ENV, shape=(16,))
108
+
109
+ @property
110
+ def gym_kwargs(self) -> dict:
111
+ return {
112
+ "obs_type": self.obs_type,
113
+ "render_mode": self.render_mode,
114
+ "visualization_width": self.visualization_width,
115
+ "visualization_height": self.visualization_height,
116
+ "max_episode_steps": self.episode_length,
117
+ }
118
+
119
+
120
+ @EnvConfig.register_subclass("xarm")
121
+ @dataclass
122
+ class XarmEnv(EnvConfig):
123
+ task: str = "XarmLift-v0"
124
+ fps: int = 15
125
+ episode_length: int = 200
126
+ obs_type: str = "pixels_agent_pos"
127
+ render_mode: str = "rgb_array"
128
+ visualization_width: int = 384
129
+ visualization_height: int = 384
130
+ features: dict[str, PolicyFeature] = field(
131
+ default_factory=lambda: {
132
+ "action": PolicyFeature(type=FeatureType.ACTION, shape=(4,)),
133
+ "pixels": PolicyFeature(type=FeatureType.VISUAL, shape=(84, 84, 3)),
134
+ }
135
+ )
136
+ features_map: dict[str, str] = field(
137
+ default_factory=lambda: {
138
+ "action": ACTION,
139
+ "agent_pos": OBS_ROBOT,
140
+ "pixels": OBS_IMAGE,
141
+ }
142
+ )
143
+
144
+ def __post_init__(self):
145
+ if self.obs_type == "pixels_agent_pos":
146
+ self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(4,))
147
+
148
+ @property
149
+ def gym_kwargs(self) -> dict:
150
+ return {
151
+ "obs_type": self.obs_type,
152
+ "render_mode": self.render_mode,
153
+ "visualization_width": self.visualization_width,
154
+ "visualization_height": self.visualization_height,
155
+ "max_episode_steps": self.episode_length,
156
+ }
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/envs/factory.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import importlib
17
+
18
+ import gymnasium as gym
19
+
20
+ from lerobot.common.envs.configs import AlohaEnv, EnvConfig, PushtEnv, XarmEnv
21
+
22
+
23
+ def make_env_config(env_type: str, **kwargs) -> EnvConfig:
24
+ if env_type == "aloha":
25
+ return AlohaEnv(**kwargs)
26
+ elif env_type == "pusht":
27
+ return PushtEnv(**kwargs)
28
+ elif env_type == "xarm":
29
+ return XarmEnv(**kwargs)
30
+ else:
31
+ raise ValueError(f"Policy type '{env_type}' is not available.")
32
+
33
+
34
+ def make_env(cfg: EnvConfig, n_envs: int = 1, use_async_envs: bool = False) -> gym.vector.VectorEnv | None:
35
+ """Makes a gym vector environment according to the config.
36
+
37
+ Args:
38
+ cfg (EnvConfig): the config of the environment to instantiate.
39
+ n_envs (int, optional): The number of parallelized env to return. Defaults to 1.
40
+ use_async_envs (bool, optional): Whether to return an AsyncVectorEnv or a SyncVectorEnv. Defaults to
41
+ False.
42
+
43
+ Raises:
44
+ ValueError: if n_envs < 1
45
+ ModuleNotFoundError: If the requested env package is not installed
46
+
47
+ Returns:
48
+ gym.vector.VectorEnv: The parallelized gym.env instance.
49
+ """
50
+ if n_envs < 1:
51
+ raise ValueError("`n_envs must be at least 1")
52
+
53
+ package_name = f"gym_{cfg.type}"
54
+
55
+ try:
56
+ importlib.import_module(package_name)
57
+ except ModuleNotFoundError as e:
58
+ print(f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.type}]'`")
59
+ raise e
60
+
61
+ gym_handle = f"{package_name}/{cfg.task}"
62
+
63
+ # batched version of the env that returns an observation of shape (b, c)
64
+ env_cls = gym.vector.AsyncVectorEnv if use_async_envs else gym.vector.SyncVectorEnv
65
+ env = env_cls(
66
+ [lambda: gym.make(gym_handle, disable_env_checker=True, **cfg.gym_kwargs) for _ in range(n_envs)]
67
+ )
68
+
69
+ return env
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/envs/utils.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import warnings
17
+ from typing import Any
18
+
19
+ import einops
20
+ import gymnasium as gym
21
+ import numpy as np
22
+ import torch
23
+ from torch import Tensor
24
+
25
+ from lerobot.common.envs.configs import EnvConfig
26
+ from lerobot.common.utils.utils import get_channel_first_image_shape
27
+ from lerobot.configs.types import FeatureType, PolicyFeature
28
+
29
+
30
+ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Tensor]:
31
+ # TODO(aliberts, rcadene): refactor this to use features from the environment (no hardcoding)
32
+ """Convert environment observation to LeRobot format observation.
33
+ Args:
34
+ observation: Dictionary of observation batches from a Gym vector environment.
35
+ Returns:
36
+ Dictionary of observation batches with keys renamed to LeRobot format and values as tensors.
37
+ """
38
+ # map to expected inputs for the policy
39
+ return_observations = {}
40
+ if "pixels" in observations:
41
+ if isinstance(observations["pixels"], dict):
42
+ imgs = {f"observation.images.{key}": img for key, img in observations["pixels"].items()}
43
+ else:
44
+ imgs = {"observation.image": observations["pixels"]}
45
+
46
+ for imgkey, img in imgs.items():
47
+ # TODO(aliberts, rcadene): use transforms.ToTensor()?
48
+ img = torch.from_numpy(img)
49
+
50
+ # sanity check that images are channel last
51
+ _, h, w, c = img.shape
52
+ assert c < h and c < w, f"expect channel last images, but instead got {img.shape=}"
53
+
54
+ # sanity check that images are uint8
55
+ assert img.dtype == torch.uint8, f"expect torch.uint8, but instead {img.dtype=}"
56
+
57
+ # convert to channel first of type float32 in range [0,1]
58
+ img = einops.rearrange(img, "b h w c -> b c h w").contiguous()
59
+ img = img.type(torch.float32)
60
+ img /= 255
61
+
62
+ return_observations[imgkey] = img
63
+
64
+ if "environment_state" in observations:
65
+ return_observations["observation.environment_state"] = torch.from_numpy(
66
+ observations["environment_state"]
67
+ ).float()
68
+
69
+ # TODO(rcadene): enable pixels only baseline with `obs_type="pixels"` in environment by removing
70
+ # requirement for "agent_pos"
71
+ return_observations["observation.state"] = torch.from_numpy(observations["agent_pos"]).float()
72
+ return return_observations
73
+
74
+
75
+ def env_to_policy_features(env_cfg: EnvConfig) -> dict[str, PolicyFeature]:
76
+ # TODO(aliberts, rcadene): remove this hardcoding of keys and just use the nested keys as is
77
+ # (need to also refactor preprocess_observation and externalize normalization from policies)
78
+ policy_features = {}
79
+ for key, ft in env_cfg.features.items():
80
+ if ft.type is FeatureType.VISUAL:
81
+ if len(ft.shape) != 3:
82
+ raise ValueError(f"Number of dimensions of {key} != 3 (shape={ft.shape})")
83
+
84
+ shape = get_channel_first_image_shape(ft.shape)
85
+ feature = PolicyFeature(type=ft.type, shape=shape)
86
+ else:
87
+ feature = ft
88
+
89
+ policy_key = env_cfg.features_map[key]
90
+ policy_features[policy_key] = feature
91
+
92
+ return policy_features
93
+
94
+
95
+ def are_all_envs_same_type(env: gym.vector.VectorEnv) -> bool:
96
+ first_type = type(env.envs[0]) # Get type of first env
97
+ return all(type(e) is first_type for e in env.envs) # Fast type check
98
+
99
+
100
+ def check_env_attributes_and_types(env: gym.vector.VectorEnv) -> None:
101
+ with warnings.catch_warnings():
102
+ warnings.simplefilter("once", UserWarning) # Apply filter only in this function
103
+
104
+ if not (hasattr(env.envs[0], "task_description") and hasattr(env.envs[0], "task")):
105
+ warnings.warn(
106
+ "The environment does not have 'task_description' and 'task'. Some policies require these features.",
107
+ UserWarning,
108
+ stacklevel=2,
109
+ )
110
+ if not are_all_envs_same_type(env):
111
+ warnings.warn(
112
+ "The environments have different types. Make sure you infer the right task from each environment. Empty task will be passed instead.",
113
+ UserWarning,
114
+ stacklevel=2,
115
+ )
116
+
117
+
118
+ def add_envs_task(env: gym.vector.VectorEnv, observation: dict[str, Any]) -> dict[str, Any]:
119
+ """Adds task feature to the observation dict with respect to the first environment attribute."""
120
+ if hasattr(env.envs[0], "task_description"):
121
+ observation["task"] = env.call("task_description")
122
+ elif hasattr(env.envs[0], "task"):
123
+ observation["task"] = env.call("task")
124
+ else: # For envs without language instructions, e.g. aloha transfer cube and etc.
125
+ num_envs = observation[list(observation.keys())[0]].shape[0]
126
+ observation["task"] = ["" for _ in range(num_envs)]
127
+ return observation
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/optim/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .optimizers import OptimizerConfig as OptimizerConfig
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/optim/factory.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ from torch.optim import Optimizer
19
+ from torch.optim.lr_scheduler import LRScheduler
20
+
21
+ from lerobot.common.policies.pretrained import PreTrainedPolicy
22
+ from lerobot.configs.train import TrainPipelineConfig
23
+
24
+
25
+ def make_optimizer_and_scheduler(
26
+ cfg: TrainPipelineConfig, policy: PreTrainedPolicy
27
+ ) -> tuple[Optimizer, LRScheduler | None]:
28
+ """Generates the optimizer and scheduler based on configs.
29
+
30
+ Args:
31
+ cfg (TrainPipelineConfig): The training config that contains optimizer and scheduler configs
32
+ policy (PreTrainedPolicy): The policy config from which parameters and presets must be taken from.
33
+
34
+ Returns:
35
+ tuple[Optimizer, LRScheduler | None]: The couple (Optimizer, Scheduler). Scheduler can be `None`.
36
+ """
37
+ params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters()
38
+ optimizer = cfg.optimizer.build(params)
39
+ lr_scheduler = cfg.scheduler.build(optimizer, cfg.steps) if cfg.scheduler is not None else None
40
+ return optimizer, lr_scheduler
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/optim/optimizers.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import abc
17
+ from dataclasses import asdict, dataclass
18
+ from pathlib import Path
19
+
20
+ import draccus
21
+ import torch
22
+ from safetensors.torch import load_file, save_file
23
+
24
+ from lerobot.common.constants import (
25
+ OPTIMIZER_PARAM_GROUPS,
26
+ OPTIMIZER_STATE,
27
+ )
28
+ from lerobot.common.datasets.utils import flatten_dict, unflatten_dict, write_json
29
+ from lerobot.common.utils.io_utils import deserialize_json_into_object
30
+
31
+
32
+ @dataclass
33
+ class OptimizerConfig(draccus.ChoiceRegistry, abc.ABC):
34
+ lr: float
35
+ weight_decay: float
36
+ grad_clip_norm: float
37
+
38
+ @property
39
+ def type(self) -> str:
40
+ return self.get_choice_name(self.__class__)
41
+
42
+ @classmethod
43
+ def default_choice_name(cls) -> str | None:
44
+ return "adam"
45
+
46
+ @abc.abstractmethod
47
+ def build(self) -> torch.optim.Optimizer:
48
+ raise NotImplementedError
49
+
50
+
51
+ @OptimizerConfig.register_subclass("adam")
52
+ @dataclass
53
+ class AdamConfig(OptimizerConfig):
54
+ lr: float = 1e-3
55
+ betas: tuple[float, float] = (0.9, 0.999)
56
+ eps: float = 1e-8
57
+ weight_decay: float = 0.0
58
+ grad_clip_norm: float = 10.0
59
+
60
+ def build(self, params: dict) -> torch.optim.Optimizer:
61
+ kwargs = asdict(self)
62
+ kwargs.pop("grad_clip_norm")
63
+ return torch.optim.Adam(params, **kwargs)
64
+
65
+
66
+ @OptimizerConfig.register_subclass("adamw")
67
+ @dataclass
68
+ class AdamWConfig(OptimizerConfig):
69
+ lr: float = 1e-3
70
+ betas: tuple[float, float] = (0.9, 0.999)
71
+ eps: float = 1e-8
72
+ weight_decay: float = 1e-2
73
+ grad_clip_norm: float = 10.0
74
+
75
+ def build(self, params: dict) -> torch.optim.Optimizer:
76
+ kwargs = asdict(self)
77
+ kwargs.pop("grad_clip_norm")
78
+ return torch.optim.AdamW(params, **kwargs)
79
+
80
+
81
+ @OptimizerConfig.register_subclass("sgd")
82
+ @dataclass
83
+ class SGDConfig(OptimizerConfig):
84
+ lr: float = 1e-3
85
+ momentum: float = 0.0
86
+ dampening: float = 0.0
87
+ nesterov: bool = False
88
+ weight_decay: float = 0.0
89
+ grad_clip_norm: float = 10.0
90
+
91
+ def build(self, params: dict) -> torch.optim.Optimizer:
92
+ kwargs = asdict(self)
93
+ kwargs.pop("grad_clip_norm")
94
+ return torch.optim.SGD(params, **kwargs)
95
+
96
+
97
+ def save_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> None:
98
+ state = optimizer.state_dict()
99
+ param_groups = state.pop("param_groups")
100
+ flat_state = flatten_dict(state)
101
+ save_file(flat_state, save_dir / OPTIMIZER_STATE)
102
+ write_json(param_groups, save_dir / OPTIMIZER_PARAM_GROUPS)
103
+
104
+
105
+ def load_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> torch.optim.Optimizer:
106
+ current_state_dict = optimizer.state_dict()
107
+ flat_state = load_file(save_dir / OPTIMIZER_STATE)
108
+ state = unflatten_dict(flat_state)
109
+ loaded_state_dict = {"state": {int(k): v for k, v in state["state"].items()}}
110
+
111
+ if "param_groups" in current_state_dict:
112
+ param_groups = deserialize_json_into_object(
113
+ save_dir / OPTIMIZER_PARAM_GROUPS, current_state_dict["param_groups"]
114
+ )
115
+ loaded_state_dict["param_groups"] = param_groups
116
+
117
+ optimizer.load_state_dict(loaded_state_dict)
118
+ return optimizer
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/optim/schedulers.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import abc
17
+ import math
18
+ from dataclasses import asdict, dataclass
19
+ from pathlib import Path
20
+
21
+ import draccus
22
+ from torch.optim import Optimizer
23
+ from torch.optim.lr_scheduler import LambdaLR, LRScheduler
24
+
25
+ from lerobot.common.constants import SCHEDULER_STATE
26
+ from lerobot.common.datasets.utils import write_json
27
+ from lerobot.common.utils.io_utils import deserialize_json_into_object
28
+
29
+
30
+ @dataclass
31
+ class LRSchedulerConfig(draccus.ChoiceRegistry, abc.ABC):
32
+ num_warmup_steps: int
33
+
34
+ @property
35
+ def type(self) -> str:
36
+ return self.get_choice_name(self.__class__)
37
+
38
+ @abc.abstractmethod
39
+ def build(self, optimizer: Optimizer, num_training_steps: int) -> LRScheduler | None:
40
+ raise NotImplementedError
41
+
42
+
43
+ @LRSchedulerConfig.register_subclass("diffuser")
44
+ @dataclass
45
+ class DiffuserSchedulerConfig(LRSchedulerConfig):
46
+ name: str = "cosine"
47
+ num_warmup_steps: int | None = None
48
+
49
+ def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR:
50
+ from diffusers.optimization import get_scheduler
51
+
52
+ kwargs = {**asdict(self), "num_training_steps": num_training_steps, "optimizer": optimizer}
53
+ return get_scheduler(**kwargs)
54
+
55
+
56
+ @LRSchedulerConfig.register_subclass("vqbet")
57
+ @dataclass
58
+ class VQBeTSchedulerConfig(LRSchedulerConfig):
59
+ num_warmup_steps: int
60
+ num_vqvae_training_steps: int
61
+ num_cycles: float = 0.5
62
+
63
+ def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR:
64
+ def lr_lambda(current_step):
65
+ if current_step < self.num_vqvae_training_steps:
66
+ return float(1)
67
+ else:
68
+ adjusted_step = current_step - self.num_vqvae_training_steps
69
+ if adjusted_step < self.num_warmup_steps:
70
+ return float(adjusted_step) / float(max(1, self.num_warmup_steps))
71
+ progress = float(adjusted_step - self.num_warmup_steps) / float(
72
+ max(1, num_training_steps - self.num_warmup_steps)
73
+ )
74
+ return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(self.num_cycles) * 2.0 * progress)))
75
+
76
+ return LambdaLR(optimizer, lr_lambda, -1)
77
+
78
+
79
+ @LRSchedulerConfig.register_subclass("cosine_decay_with_warmup")
80
+ @dataclass
81
+ class CosineDecayWithWarmupSchedulerConfig(LRSchedulerConfig):
82
+ """Used by Physical Intelligence to train Pi0"""
83
+
84
+ num_warmup_steps: int
85
+ num_decay_steps: int
86
+ peak_lr: float
87
+ decay_lr: float
88
+
89
+ def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR:
90
+ del num_training_steps
91
+
92
+ def lr_lambda(current_step):
93
+ def linear_warmup_schedule(current_step):
94
+ if current_step <= 0:
95
+ return 1 / (self.num_warmup_steps + 1)
96
+ frac = 1 - current_step / self.num_warmup_steps
97
+ return (1 / (self.num_warmup_steps + 1) - 1) * frac + 1
98
+
99
+ def cosine_decay_schedule(current_step):
100
+ step = min(current_step, self.num_decay_steps)
101
+ cosine_decay = 0.5 * (1 + math.cos(math.pi * step / self.num_decay_steps))
102
+ alpha = self.decay_lr / self.peak_lr
103
+ decayed = (1 - alpha) * cosine_decay + alpha
104
+ return decayed
105
+
106
+ if current_step < self.num_warmup_steps:
107
+ return linear_warmup_schedule(current_step)
108
+
109
+ return cosine_decay_schedule(current_step)
110
+
111
+ return LambdaLR(optimizer, lr_lambda, -1)
112
+
113
+
114
+ def save_scheduler_state(scheduler: LRScheduler, save_dir: Path) -> None:
115
+ state_dict = scheduler.state_dict()
116
+ write_json(state_dict, save_dir / SCHEDULER_STATE)
117
+
118
+
119
+ def load_scheduler_state(scheduler: LRScheduler, save_dir: Path) -> LRScheduler:
120
+ state_dict = deserialize_json_into_object(save_dir / SCHEDULER_STATE, scheduler.state_dict())
121
+ scheduler.load_state_dict(state_dict)
122
+ return scheduler
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/policies/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .act.configuration_act import ACTConfig as ACTConfig
16
+ from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig
17
+ from .pi0.configuration_pi0 import PI0Config as PI0Config
18
+ from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig
19
+ from .vqbet.configuration_vqbet import VQBeTConfig as VQBeTConfig
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/policies/act/configuration_act.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from dataclasses import dataclass, field
17
+
18
+ from lerobot.common.optim.optimizers import AdamWConfig
19
+ from lerobot.configs.policies import PreTrainedConfig
20
+ from lerobot.configs.types import NormalizationMode
21
+
22
+
23
+ @PreTrainedConfig.register_subclass("act")
24
+ @dataclass
25
+ class ACTConfig(PreTrainedConfig):
26
+ """Configuration class for the Action Chunking Transformers policy.
27
+
28
+ Defaults are configured for training on bimanual Aloha tasks like "insertion" or "transfer".
29
+
30
+ The parameters you will most likely need to change are the ones which depend on the environment / sensors.
31
+ Those are: `input_shapes` and 'output_shapes`.
32
+
33
+ Notes on the inputs and outputs:
34
+ - Either:
35
+ - At least one key starting with "observation.image is required as an input.
36
+ AND/OR
37
+ - The key "observation.environment_state" is required as input.
38
+ - If there are multiple keys beginning with "observation.images." they are treated as multiple camera
39
+ views. Right now we only support all images having the same shape.
40
+ - May optionally work without an "observation.state" key for the proprioceptive robot state.
41
+ - "action" is required as an output key.
42
+
43
+ Args:
44
+ n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the
45
+ current step and additional steps going back).
46
+ chunk_size: The size of the action prediction "chunks" in units of environment steps.
47
+ n_action_steps: The number of action steps to run in the environment for one invocation of the policy.
48
+ This should be no greater than the chunk size. For example, if the chunk size size 100, you may
49
+ set this to 50. This would mean that the model predicts 100 steps worth of actions, runs 50 in the
50
+ environment, and throws the other 50 out.
51
+ input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents
52
+ the input data name, and the value is a list indicating the dimensions of the corresponding data.
53
+ For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96],
54
+ indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't
55
+ include batch dimension or temporal dimension.
56
+ output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents
57
+ the output data name, and the value is a list indicating the dimensions of the corresponding data.
58
+ For example, "action" refers to an output shape of [14], indicating 14-dimensional actions.
59
+ Importantly, `output_shapes` doesn't include batch dimension or temporal dimension.
60
+ input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"),
61
+ and the value specifies the normalization mode to apply. The two available modes are "mean_std"
62
+ which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a
63
+ [-1, 1] range.
64
+ output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the
65
+ original scale. Note that this is also used for normalizing the training targets.
66
+ vision_backbone: Name of the torchvision resnet backbone to use for encoding images.
67
+ pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
68
+ `None` means no pretrained weights.
69
+ replace_final_stride_with_dilation: Whether to replace the ResNet's final 2x2 stride with a dilated
70
+ convolution.
71
+ pre_norm: Whether to use "pre-norm" in the transformer blocks.
72
+ dim_model: The transformer blocks' main hidden dimension.
73
+ n_heads: The number of heads to use in the transformer blocks' multi-head attention.
74
+ dim_feedforward: The dimension to expand the transformer's hidden dimension to in the feed-forward
75
+ layers.
76
+ feedforward_activation: The activation to use in the transformer block's feed-forward layers.
77
+ n_encoder_layers: The number of transformer layers to use for the transformer encoder.
78
+ n_decoder_layers: The number of transformer layers to use for the transformer decoder.
79
+ use_vae: Whether to use a variational objective during training. This introduces another transformer
80
+ which is used as the VAE's encoder (not to be confused with the transformer encoder - see
81
+ documentation in the policy class).
82
+ latent_dim: The VAE's latent dimension.
83
+ n_vae_encoder_layers: The number of transformer layers to use for the VAE's encoder.
84
+ temporal_ensemble_coeff: Coefficient for the exponential weighting scheme to apply for temporal
85
+ ensembling. Defaults to None which means temporal ensembling is not used. `n_action_steps` must be
86
+ 1 when using this feature, as inference needs to happen at every step to form an ensemble. For
87
+ more information on how ensembling works, please see `ACTTemporalEnsembler`.
88
+ dropout: Dropout to use in the transformer layers (see code for details).
89
+ kl_weight: The weight to use for the KL-divergence component of the loss if the variational objective
90
+ is enabled. Loss is then calculated as: `reconstruction_loss + kl_weight * kld_loss`.
91
+ """
92
+
93
+ # Input / output structure.
94
+ n_obs_steps: int = 1
95
+ chunk_size: int = 100
96
+ n_action_steps: int = 100
97
+
98
+ normalization_mapping: dict[str, NormalizationMode] = field(
99
+ default_factory=lambda: {
100
+ "VISUAL": NormalizationMode.MEAN_STD,
101
+ "STATE": NormalizationMode.MEAN_STD,
102
+ "ACTION": NormalizationMode.MEAN_STD,
103
+ }
104
+ )
105
+
106
+ # Architecture.
107
+ # Vision backbone.
108
+ vision_backbone: str = "resnet18"
109
+ pretrained_backbone_weights: str | None = "ResNet18_Weights.IMAGENET1K_V1"
110
+ replace_final_stride_with_dilation: int = False
111
+ # Transformer layers.
112
+ pre_norm: bool = False
113
+ dim_model: int = 512
114
+ n_heads: int = 8
115
+ dim_feedforward: int = 3200
116
+ feedforward_activation: str = "relu"
117
+ n_encoder_layers: int = 4
118
+ # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
119
+ # that means only the first layer is used. Here we match the original implementation by setting this to 1.
120
+ # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
121
+ n_decoder_layers: int = 1
122
+ # VAE.
123
+ use_vae: bool = True
124
+ latent_dim: int = 32
125
+ n_vae_encoder_layers: int = 4
126
+
127
+ # Inference.
128
+ # Note: the value used in ACT when temporal ensembling is enabled is 0.01.
129
+ temporal_ensemble_coeff: float | None = None
130
+
131
+ # Training and loss computation.
132
+ dropout: float = 0.1
133
+ kl_weight: float = 10.0
134
+
135
+ # Training preset
136
+ optimizer_lr: float = 1e-5
137
+ optimizer_weight_decay: float = 1e-4
138
+ optimizer_lr_backbone: float = 1e-5
139
+
140
+ def __post_init__(self):
141
+ super().__post_init__()
142
+
143
+ """Input validation (not exhaustive)."""
144
+ if not self.vision_backbone.startswith("resnet"):
145
+ raise ValueError(
146
+ f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}."
147
+ )
148
+ if self.temporal_ensemble_coeff is not None and self.n_action_steps > 1:
149
+ raise NotImplementedError(
150
+ "`n_action_steps` must be 1 when using temporal ensembling. This is "
151
+ "because the policy needs to be queried every step to compute the ensembled action."
152
+ )
153
+ if self.n_action_steps > self.chunk_size:
154
+ raise ValueError(
155
+ f"The chunk size is the upper bound for the number of action steps per model invocation. Got "
156
+ f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`."
157
+ )
158
+ if self.n_obs_steps != 1:
159
+ raise ValueError(
160
+ f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`"
161
+ )
162
+
163
+ def get_optimizer_preset(self) -> AdamWConfig:
164
+ return AdamWConfig(
165
+ lr=self.optimizer_lr,
166
+ weight_decay=self.optimizer_weight_decay,
167
+ )
168
+
169
+ def get_scheduler_preset(self) -> None:
170
+ return None
171
+
172
+ def validate_features(self) -> None:
173
+ if not self.image_features and not self.env_state_feature:
174
+ raise ValueError("You must provide at least one image or the environment state among the inputs.")
175
+
176
+ @property
177
+ def observation_delta_indices(self) -> None:
178
+ return None
179
+
180
+ @property
181
+ def action_delta_indices(self) -> list:
182
+ return list(range(self.chunk_size))
183
+
184
+ @property
185
+ def reward_delta_indices(self) -> None:
186
+ return None
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/policies/act/modeling_act.py ADDED
@@ -0,0 +1,765 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Action Chunking Transformer Policy
17
+
18
+ As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://arxiv.org/abs/2304.13705).
19
+ The majority of changes here involve removing unused code, unifying naming, and adding helpful comments.
20
+ """
21
+
22
+ import math
23
+ from collections import deque
24
+ from itertools import chain
25
+ from typing import Callable
26
+
27
+ import einops
28
+ import numpy as np
29
+ import torch
30
+ import torch.nn.functional as F # noqa: N812
31
+ import torchvision
32
+ from torch import Tensor, nn
33
+ from torchvision.models._utils import IntermediateLayerGetter
34
+ from torchvision.ops.misc import FrozenBatchNorm2d
35
+
36
+ from lerobot.common.policies.act.configuration_act import ACTConfig
37
+ from lerobot.common.policies.normalize import Normalize, Unnormalize
38
+ from lerobot.common.policies.pretrained import PreTrainedPolicy
39
+
40
+
41
+ class ACTPolicy(PreTrainedPolicy):
42
+ """
43
+ Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost
44
+ Hardware (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act)
45
+ """
46
+
47
+ config_class = ACTConfig
48
+ name = "act"
49
+
50
+ def __init__(
51
+ self,
52
+ config: ACTConfig,
53
+ dataset_stats: dict[str, dict[str, Tensor]] | None = None,
54
+ ):
55
+ """
56
+ Args:
57
+ config: Policy configuration class instance or None, in which case the default instantiation of
58
+ the configuration class is used.
59
+ dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
60
+ that they will be passed with a call to `load_state_dict` before the policy is used.
61
+ """
62
+ super().__init__(config)
63
+ config.validate_features()
64
+ self.config = config
65
+
66
+ self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
67
+ self.normalize_targets = Normalize(
68
+ config.output_features, config.normalization_mapping, dataset_stats
69
+ )
70
+ self.unnormalize_outputs = Unnormalize(
71
+ config.output_features, config.normalization_mapping, dataset_stats
72
+ )
73
+
74
+ self.model = ACT(config)
75
+
76
+ if config.temporal_ensemble_coeff is not None:
77
+ self.temporal_ensembler = ACTTemporalEnsembler(config.temporal_ensemble_coeff, config.chunk_size)
78
+
79
+ self.reset()
80
+
81
+ def get_optim_params(self) -> dict:
82
+ # TODO(aliberts, rcadene): As of now, lr_backbone == lr
83
+ # Should we remove this and just `return self.parameters()`?
84
+ return [
85
+ {
86
+ "params": [
87
+ p
88
+ for n, p in self.named_parameters()
89
+ if not n.startswith("model.backbone") and p.requires_grad
90
+ ]
91
+ },
92
+ {
93
+ "params": [
94
+ p
95
+ for n, p in self.named_parameters()
96
+ if n.startswith("model.backbone") and p.requires_grad
97
+ ],
98
+ "lr": self.config.optimizer_lr_backbone,
99
+ },
100
+ ]
101
+
102
+ def reset(self):
103
+ """This should be called whenever the environment is reset."""
104
+ if self.config.temporal_ensemble_coeff is not None:
105
+ self.temporal_ensembler.reset()
106
+ else:
107
+ self._action_queue = deque([], maxlen=self.config.n_action_steps)
108
+
109
+ @torch.no_grad
110
+ def select_action(self, batch: dict[str, Tensor]) -> Tensor:
111
+ """Select a single action given environment observations.
112
+
113
+ This method wraps `select_actions` in order to return one action at a time for execution in the
114
+ environment. It works by managing the actions in a queue and only calling `select_actions` when the
115
+ queue is empty.
116
+ """
117
+ self.eval()
118
+
119
+ batch = self.normalize_inputs(batch)
120
+ if self.config.image_features:
121
+ batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
122
+ batch["observation.images"] = [batch[key] for key in self.config.image_features]
123
+
124
+ # If we are doing temporal ensembling, do online updates where we keep track of the number of actions
125
+ # we are ensembling over.
126
+ if self.config.temporal_ensemble_coeff is not None:
127
+ actions = self.model(batch)[0] # (batch_size, chunk_size, action_dim)
128
+ actions = self.unnormalize_outputs({"action": actions})["action"]
129
+ action = self.temporal_ensembler.update(actions)
130
+ return action
131
+
132
+ # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
133
+ # querying the policy.
134
+ if len(self._action_queue) == 0:
135
+ actions = self.model(batch)[0][:, : self.config.n_action_steps]
136
+
137
+ # TODO(rcadene): make _forward return output dictionary?
138
+ actions = self.unnormalize_outputs({"action": actions})["action"]
139
+
140
+ # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue
141
+ # effectively has shape (n_action_steps, batch_size, *), hence the transpose.
142
+ self._action_queue.extend(actions.transpose(0, 1))
143
+ return self._action_queue.popleft()
144
+
145
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]:
146
+ """Run the batch through the model and compute the loss for training or validation."""
147
+ batch = self.normalize_inputs(batch)
148
+ if self.config.image_features:
149
+ batch = dict(batch) # shallow copy so that adding a key doesn't modify the original
150
+ batch["observation.images"] = [batch[key] for key in self.config.image_features]
151
+
152
+ batch = self.normalize_targets(batch)
153
+ actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch)
154
+
155
+ l1_loss = (
156
+ F.l1_loss(batch["action"], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1)
157
+ ).mean()
158
+
159
+ loss_dict = {"l1_loss": l1_loss.item()}
160
+ if self.config.use_vae:
161
+ # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for
162
+ # each dimension independently, we sum over the latent dimension to get the total
163
+ # KL-divergence per batch element, then take the mean over the batch.
164
+ # (See App. B of https://arxiv.org/abs/1312.6114 for more details).
165
+ mean_kld = (
166
+ (-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - (log_sigma_x2_hat).exp())).sum(-1).mean()
167
+ )
168
+ loss_dict["kld_loss"] = mean_kld.item()
169
+ loss = l1_loss + mean_kld * self.config.kl_weight
170
+ else:
171
+ loss = l1_loss
172
+
173
+ return loss, loss_dict
174
+
175
+
176
+ class ACTTemporalEnsembler:
177
+ def __init__(self, temporal_ensemble_coeff: float, chunk_size: int) -> None:
178
+ """Temporal ensembling as described in Algorithm 2 of https://arxiv.org/abs/2304.13705.
179
+
180
+ The weights are calculated as wᵢ = exp(-temporal_ensemble_coeff * i) where w₀ is the oldest action.
181
+ They are then normalized to sum to 1 by dividing by Σwᵢ. Here's some intuition around how the
182
+ coefficient works:
183
+ - Setting it to 0 uniformly weighs all actions.
184
+ - Setting it positive gives more weight to older actions.
185
+ - Setting it negative gives more weight to newer actions.
186
+ NOTE: The default value for `temporal_ensemble_coeff` used by the original ACT work is 0.01. This
187
+ results in older actions being weighed more highly than newer actions (the experiments documented in
188
+ https://github.com/huggingface/lerobot/pull/319 hint at why highly weighing new actions might be
189
+ detrimental: doing so aggressively may diminish the benefits of action chunking).
190
+
191
+ Here we use an online method for computing the average rather than caching a history of actions in
192
+ order to compute the average offline. For a simple 1D sequence it looks something like:
193
+
194
+ ```
195
+ import torch
196
+
197
+ seq = torch.linspace(8, 8.5, 100)
198
+ print(seq)
199
+
200
+ m = 0.01
201
+ exp_weights = torch.exp(-m * torch.arange(len(seq)))
202
+ print(exp_weights)
203
+
204
+ # Calculate offline
205
+ avg = (exp_weights * seq).sum() / exp_weights.sum()
206
+ print("offline", avg)
207
+
208
+ # Calculate online
209
+ for i, item in enumerate(seq):
210
+ if i == 0:
211
+ avg = item
212
+ continue
213
+ avg *= exp_weights[:i].sum()
214
+ avg += item * exp_weights[i]
215
+ avg /= exp_weights[:i+1].sum()
216
+ print("online", avg)
217
+ ```
218
+ """
219
+ self.chunk_size = chunk_size
220
+ self.ensemble_weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size))
221
+ self.ensemble_weights_cumsum = torch.cumsum(self.ensemble_weights, dim=0)
222
+ self.reset()
223
+
224
+ def reset(self):
225
+ """Resets the online computation variables."""
226
+ self.ensembled_actions = None
227
+ # (chunk_size,) count of how many actions are in the ensemble for each time step in the sequence.
228
+ self.ensembled_actions_count = None
229
+
230
+ def update(self, actions: Tensor) -> Tensor:
231
+ """
232
+ Takes a (batch, chunk_size, action_dim) sequence of actions, update the temporal ensemble for all
233
+ time steps, and pop/return the next batch of actions in the sequence.
234
+ """
235
+ self.ensemble_weights = self.ensemble_weights.to(device=actions.device)
236
+ self.ensemble_weights_cumsum = self.ensemble_weights_cumsum.to(device=actions.device)
237
+ if self.ensembled_actions is None:
238
+ # Initializes `self._ensembled_action` to the sequence of actions predicted during the first
239
+ # time step of the episode.
240
+ self.ensembled_actions = actions.clone()
241
+ # Note: The last dimension is unsqueeze to make sure we can broadcast properly for tensor
242
+ # operations later.
243
+ self.ensembled_actions_count = torch.ones(
244
+ (self.chunk_size, 1), dtype=torch.long, device=self.ensembled_actions.device
245
+ )
246
+ else:
247
+ # self.ensembled_actions will have shape (batch_size, chunk_size - 1, action_dim). Compute
248
+ # the online update for those entries.
249
+ self.ensembled_actions *= self.ensemble_weights_cumsum[self.ensembled_actions_count - 1]
250
+ self.ensembled_actions += actions[:, :-1] * self.ensemble_weights[self.ensembled_actions_count]
251
+ self.ensembled_actions /= self.ensemble_weights_cumsum[self.ensembled_actions_count]
252
+ self.ensembled_actions_count = torch.clamp(self.ensembled_actions_count + 1, max=self.chunk_size)
253
+ # The last action, which has no prior online average, needs to get concatenated onto the end.
254
+ self.ensembled_actions = torch.cat([self.ensembled_actions, actions[:, -1:]], dim=1)
255
+ self.ensembled_actions_count = torch.cat(
256
+ [self.ensembled_actions_count, torch.ones_like(self.ensembled_actions_count[-1:])]
257
+ )
258
+ # "Consume" the first action.
259
+ action, self.ensembled_actions, self.ensembled_actions_count = (
260
+ self.ensembled_actions[:, 0],
261
+ self.ensembled_actions[:, 1:],
262
+ self.ensembled_actions_count[1:],
263
+ )
264
+ return action
265
+
266
+
267
+ class ACT(nn.Module):
268
+ """Action Chunking Transformer: The underlying neural network for ACTPolicy.
269
+
270
+ Note: In this code we use the terms `vae_encoder`, 'encoder', `decoder`. The meanings are as follows.
271
+ - The `vae_encoder` is, as per the literature around variational auto-encoders (VAE), the part of the
272
+ model that encodes the target data (a sequence of actions), and the condition (the robot
273
+ joint-space).
274
+ - A transformer with an `encoder` (not the VAE encoder) and `decoder` (not the VAE decoder) with
275
+ cross-attention is used as the VAE decoder. For these terms, we drop the `vae_` prefix because we
276
+ have an option to train this model without the variational objective (in which case we drop the
277
+ `vae_encoder` altogether, and nothing about this model has anything to do with a VAE).
278
+
279
+ Transformer
280
+ Used alone for inference
281
+ (acts as VAE decoder
282
+ during training)
283
+ ┌───────────────────────┐
284
+ │ Outputs │
285
+ │ ▲ │
286
+ │ ┌─────►┌───────┐ │
287
+ ┌──────┐ │ │ │Transf.│ │
288
+ │ │ │ ├─────►│decoder│ │
289
+ ┌────┴────┐ │ │ │ │ │ │
290
+ │ │ │ │ ┌───┴───┬─►│ │ │
291
+ │ VAE │ │ │ │ │ └───────┘ │
292
+ │ encoder │ │ │ │Transf.│ │
293
+ │ │ │ │ │encoder│ │
294
+ └───▲─────┘ │ │ │ │ │
295
+ │ │ │ └▲──▲─▲─┘ │
296
+ │ │ │ │ │ │ │
297
+ inputs └─────┼──┘ │ image emb. │
298
+ │ state emb. │
299
+ └───────────────────────┘
300
+ """
301
+
302
+ def __init__(self, config: ACTConfig):
303
+ # BERT style VAE encoder with input tokens [cls, robot_state, *action_sequence].
304
+ # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]).
305
+ super().__init__()
306
+ self.config = config
307
+
308
+ if self.config.use_vae:
309
+ self.vae_encoder = ACTEncoder(config, is_vae_encoder=True)
310
+ self.vae_encoder_cls_embed = nn.Embedding(1, config.dim_model)
311
+ # Projection layer for joint-space configuration to hidden dimension.
312
+ if self.config.robot_state_feature:
313
+ self.vae_encoder_robot_state_input_proj = nn.Linear(
314
+ self.config.robot_state_feature.shape[0], config.dim_model
315
+ )
316
+ # Projection layer for action (joint-space target) to hidden dimension.
317
+ self.vae_encoder_action_input_proj = nn.Linear(
318
+ self.config.action_feature.shape[0],
319
+ config.dim_model,
320
+ )
321
+ # Projection layer from the VAE encoder's output to the latent distribution's parameter space.
322
+ self.vae_encoder_latent_output_proj = nn.Linear(config.dim_model, config.latent_dim * 2)
323
+ # Fixed sinusoidal positional embedding for the input to the VAE encoder. Unsqueeze for batch
324
+ # dimension.
325
+ num_input_token_encoder = 1 + config.chunk_size
326
+ if self.config.robot_state_feature:
327
+ num_input_token_encoder += 1
328
+ self.register_buffer(
329
+ "vae_encoder_pos_enc",
330
+ create_sinusoidal_pos_embedding(num_input_token_encoder, config.dim_model).unsqueeze(0),
331
+ )
332
+
333
+ # Backbone for image feature extraction.
334
+ if self.config.image_features:
335
+ backbone_model = getattr(torchvision.models, config.vision_backbone)(
336
+ replace_stride_with_dilation=[False, False, config.replace_final_stride_with_dilation],
337
+ weights=config.pretrained_backbone_weights,
338
+ norm_layer=FrozenBatchNorm2d,
339
+ )
340
+ # Note: The assumption here is that we are using a ResNet model (and hence layer4 is the final
341
+ # feature map).
342
+ # Note: The forward method of this returns a dict: {"feature_map": output}.
343
+ self.backbone = IntermediateLayerGetter(backbone_model, return_layers={"layer4": "feature_map"})
344
+
345
+ # Transformer (acts as VAE decoder when training with the variational objective).
346
+ self.encoder = ACTEncoder(config)
347
+ self.decoder = ACTDecoder(config)
348
+
349
+ # Transformer encoder input projections. The tokens will be structured like
350
+ # [latent, (robot_state), (env_state), (image_feature_map_pixels)].
351
+ if self.config.robot_state_feature:
352
+ self.encoder_robot_state_input_proj = nn.Linear(
353
+ self.config.robot_state_feature.shape[0], config.dim_model
354
+ )
355
+ if self.config.env_state_feature:
356
+ self.encoder_env_state_input_proj = nn.Linear(
357
+ self.config.env_state_feature.shape[0], config.dim_model
358
+ )
359
+ self.encoder_latent_input_proj = nn.Linear(config.latent_dim, config.dim_model)
360
+ if self.config.image_features:
361
+ self.encoder_img_feat_input_proj = nn.Conv2d(
362
+ backbone_model.fc.in_features, config.dim_model, kernel_size=1
363
+ )
364
+ # Transformer encoder positional embeddings.
365
+ n_1d_tokens = 1 # for the latent
366
+ if self.config.robot_state_feature:
367
+ n_1d_tokens += 1
368
+ if self.config.env_state_feature:
369
+ n_1d_tokens += 1
370
+ self.encoder_1d_feature_pos_embed = nn.Embedding(n_1d_tokens, config.dim_model)
371
+ if self.config.image_features:
372
+ self.encoder_cam_feat_pos_embed = ACTSinusoidalPositionEmbedding2d(config.dim_model // 2)
373
+
374
+ # Transformer decoder.
375
+ # Learnable positional embedding for the transformer's decoder (in the style of DETR object queries).
376
+ self.decoder_pos_embed = nn.Embedding(config.chunk_size, config.dim_model)
377
+
378
+ # Final action regression head on the output of the transformer's decoder.
379
+ self.action_head = nn.Linear(config.dim_model, self.config.action_feature.shape[0])
380
+
381
+ self._reset_parameters()
382
+
383
+ def _reset_parameters(self):
384
+ """Xavier-uniform initialization of the transformer parameters as in the original code."""
385
+ for p in chain(self.encoder.parameters(), self.decoder.parameters()):
386
+ if p.dim() > 1:
387
+ nn.init.xavier_uniform_(p)
388
+
389
+ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tensor] | tuple[None, None]]:
390
+ """A forward pass through the Action Chunking Transformer (with optional VAE encoder).
391
+
392
+ `batch` should have the following structure:
393
+ {
394
+ [robot_state_feature] (optional): (B, state_dim) batch of robot states.
395
+
396
+ [image_features]: (B, n_cameras, C, H, W) batch of images.
397
+ AND/OR
398
+ [env_state_feature]: (B, env_dim) batch of environment states.
399
+
400
+ [action_feature] (optional, only if training with VAE): (B, chunk_size, action dim) batch of actions.
401
+ }
402
+
403
+ Returns:
404
+ (B, chunk_size, action_dim) batch of action sequences
405
+ Tuple containing the latent PDF's parameters (mean, log(σ²)) both as (B, L) tensors where L is the
406
+ latent dimension.
407
+ """
408
+ if self.config.use_vae and self.training:
409
+ assert "action" in batch, (
410
+ "actions must be provided when using the variational objective in training mode."
411
+ )
412
+
413
+ if "observation.images" in batch:
414
+ batch_size = batch["observation.images"][0].shape[0]
415
+ else:
416
+ batch_size = batch["observation.environment_state"].shape[0]
417
+
418
+ # Prepare the latent for input to the transformer encoder.
419
+ if self.config.use_vae and "action" in batch:
420
+ # Prepare the input to the VAE encoder: [cls, *joint_space_configuration, *action_sequence].
421
+ cls_embed = einops.repeat(
422
+ self.vae_encoder_cls_embed.weight, "1 d -> b 1 d", b=batch_size
423
+ ) # (B, 1, D)
424
+ if self.config.robot_state_feature:
425
+ robot_state_embed = self.vae_encoder_robot_state_input_proj(batch["observation.state"])
426
+ robot_state_embed = robot_state_embed.unsqueeze(1) # (B, 1, D)
427
+ action_embed = self.vae_encoder_action_input_proj(batch["action"]) # (B, S, D)
428
+
429
+ if self.config.robot_state_feature:
430
+ vae_encoder_input = [cls_embed, robot_state_embed, action_embed] # (B, S+2, D)
431
+ else:
432
+ vae_encoder_input = [cls_embed, action_embed]
433
+ vae_encoder_input = torch.cat(vae_encoder_input, axis=1)
434
+
435
+ # Prepare fixed positional embedding.
436
+ # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case.
437
+ pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D)
438
+
439
+ # Prepare key padding mask for the transformer encoder. We have 1 or 2 extra tokens at the start of the
440
+ # sequence depending whether we use the input states or not (cls and robot state)
441
+ # False means not a padding token.
442
+ cls_joint_is_pad = torch.full(
443
+ (batch_size, 2 if self.config.robot_state_feature else 1),
444
+ False,
445
+ device=batch["observation.state"].device,
446
+ )
447
+ key_padding_mask = torch.cat(
448
+ [cls_joint_is_pad, batch["action_is_pad"]], axis=1
449
+ ) # (bs, seq+1 or 2)
450
+
451
+ # Forward pass through VAE encoder to get the latent PDF parameters.
452
+ cls_token_out = self.vae_encoder(
453
+ vae_encoder_input.permute(1, 0, 2),
454
+ pos_embed=pos_embed.permute(1, 0, 2),
455
+ key_padding_mask=key_padding_mask,
456
+ )[0] # select the class token, with shape (B, D)
457
+ latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out)
458
+ mu = latent_pdf_params[:, : self.config.latent_dim]
459
+ # This is 2log(sigma). Done this way to match the original implementation.
460
+ log_sigma_x2 = latent_pdf_params[:, self.config.latent_dim :]
461
+
462
+ # Sample the latent with the reparameterization trick.
463
+ latent_sample = mu + log_sigma_x2.div(2).exp() * torch.randn_like(mu)
464
+ else:
465
+ # When not using the VAE encoder, we set the latent to be all zeros.
466
+ mu = log_sigma_x2 = None
467
+ # TODO(rcadene, alexander-soare): remove call to `.to` to speedup forward ; precompute and use buffer
468
+ latent_sample = torch.zeros([batch_size, self.config.latent_dim], dtype=torch.float32).to(
469
+ batch["observation.state"].device
470
+ )
471
+
472
+ # Prepare transformer encoder inputs.
473
+ encoder_in_tokens = [self.encoder_latent_input_proj(latent_sample)]
474
+ encoder_in_pos_embed = list(self.encoder_1d_feature_pos_embed.weight.unsqueeze(1))
475
+ # Robot state token.
476
+ if self.config.robot_state_feature:
477
+ encoder_in_tokens.append(self.encoder_robot_state_input_proj(batch["observation.state"]))
478
+ # Environment state token.
479
+ if self.config.env_state_feature:
480
+ encoder_in_tokens.append(
481
+ self.encoder_env_state_input_proj(batch["observation.environment_state"])
482
+ )
483
+
484
+ # Camera observation features and positional embeddings.
485
+ if self.config.image_features:
486
+ all_cam_features = []
487
+ all_cam_pos_embeds = []
488
+
489
+ # For a list of images, the H and W may vary but H*W is constant.
490
+ for img in batch["observation.images"]:
491
+ cam_features = self.backbone(img)["feature_map"]
492
+ cam_pos_embed = self.encoder_cam_feat_pos_embed(cam_features).to(dtype=cam_features.dtype)
493
+ cam_features = self.encoder_img_feat_input_proj(cam_features)
494
+
495
+ # Rearrange features to (sequence, batch, dim).
496
+ cam_features = einops.rearrange(cam_features, "b c h w -> (h w) b c")
497
+ cam_pos_embed = einops.rearrange(cam_pos_embed, "b c h w -> (h w) b c")
498
+
499
+ all_cam_features.append(cam_features)
500
+ all_cam_pos_embeds.append(cam_pos_embed)
501
+
502
+ encoder_in_tokens.extend(torch.cat(all_cam_features, axis=0))
503
+ encoder_in_pos_embed.extend(torch.cat(all_cam_pos_embeds, axis=0))
504
+
505
+ # Stack all tokens along the sequence dimension.
506
+ encoder_in_tokens = torch.stack(encoder_in_tokens, axis=0)
507
+ encoder_in_pos_embed = torch.stack(encoder_in_pos_embed, axis=0)
508
+
509
+ # Forward pass through the transformer modules.
510
+ encoder_out = self.encoder(encoder_in_tokens, pos_embed=encoder_in_pos_embed)
511
+ # TODO(rcadene, alexander-soare): remove call to `device` ; precompute and use buffer
512
+ decoder_in = torch.zeros(
513
+ (self.config.chunk_size, batch_size, self.config.dim_model),
514
+ dtype=encoder_in_pos_embed.dtype,
515
+ device=encoder_in_pos_embed.device,
516
+ )
517
+ decoder_out = self.decoder(
518
+ decoder_in,
519
+ encoder_out,
520
+ encoder_pos_embed=encoder_in_pos_embed,
521
+ decoder_pos_embed=self.decoder_pos_embed.weight.unsqueeze(1),
522
+ )
523
+
524
+ # Move back to (B, S, C).
525
+ decoder_out = decoder_out.transpose(0, 1)
526
+
527
+ actions = self.action_head(decoder_out)
528
+
529
+ return actions, (mu, log_sigma_x2)
530
+
531
+
532
+ class ACTEncoder(nn.Module):
533
+ """Convenience module for running multiple encoder layers, maybe followed by normalization."""
534
+
535
+ def __init__(self, config: ACTConfig, is_vae_encoder: bool = False):
536
+ super().__init__()
537
+ self.is_vae_encoder = is_vae_encoder
538
+ num_layers = config.n_vae_encoder_layers if self.is_vae_encoder else config.n_encoder_layers
539
+ self.layers = nn.ModuleList([ACTEncoderLayer(config) for _ in range(num_layers)])
540
+ self.norm = nn.LayerNorm(config.dim_model) if config.pre_norm else nn.Identity()
541
+
542
+ def forward(
543
+ self, x: Tensor, pos_embed: Tensor | None = None, key_padding_mask: Tensor | None = None
544
+ ) -> Tensor:
545
+ for layer in self.layers:
546
+ x = layer(x, pos_embed=pos_embed, key_padding_mask=key_padding_mask)
547
+ x = self.norm(x)
548
+ return x
549
+
550
+
551
+ class ACTEncoderLayer(nn.Module):
552
+ def __init__(self, config: ACTConfig):
553
+ super().__init__()
554
+ self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout)
555
+
556
+ # Feed forward layers.
557
+ self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward)
558
+ self.dropout = nn.Dropout(config.dropout)
559
+ self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model)
560
+
561
+ self.norm1 = nn.LayerNorm(config.dim_model)
562
+ self.norm2 = nn.LayerNorm(config.dim_model)
563
+ self.dropout1 = nn.Dropout(config.dropout)
564
+ self.dropout2 = nn.Dropout(config.dropout)
565
+
566
+ self.activation = get_activation_fn(config.feedforward_activation)
567
+ self.pre_norm = config.pre_norm
568
+
569
+ def forward(self, x, pos_embed: Tensor | None = None, key_padding_mask: Tensor | None = None) -> Tensor:
570
+ skip = x
571
+ if self.pre_norm:
572
+ x = self.norm1(x)
573
+ q = k = x if pos_embed is None else x + pos_embed
574
+ x = self.self_attn(q, k, value=x, key_padding_mask=key_padding_mask)
575
+ x = x[0] # note: [0] to select just the output, not the attention weights
576
+ x = skip + self.dropout1(x)
577
+ if self.pre_norm:
578
+ skip = x
579
+ x = self.norm2(x)
580
+ else:
581
+ x = self.norm1(x)
582
+ skip = x
583
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
584
+ x = skip + self.dropout2(x)
585
+ if not self.pre_norm:
586
+ x = self.norm2(x)
587
+ return x
588
+
589
+
590
+ class ACTDecoder(nn.Module):
591
+ def __init__(self, config: ACTConfig):
592
+ """Convenience module for running multiple decoder layers followed by normalization."""
593
+ super().__init__()
594
+ self.layers = nn.ModuleList([ACTDecoderLayer(config) for _ in range(config.n_decoder_layers)])
595
+ self.norm = nn.LayerNorm(config.dim_model)
596
+
597
+ def forward(
598
+ self,
599
+ x: Tensor,
600
+ encoder_out: Tensor,
601
+ decoder_pos_embed: Tensor | None = None,
602
+ encoder_pos_embed: Tensor | None = None,
603
+ ) -> Tensor:
604
+ for layer in self.layers:
605
+ x = layer(
606
+ x, encoder_out, decoder_pos_embed=decoder_pos_embed, encoder_pos_embed=encoder_pos_embed
607
+ )
608
+ if self.norm is not None:
609
+ x = self.norm(x)
610
+ return x
611
+
612
+
613
+ class ACTDecoderLayer(nn.Module):
614
+ def __init__(self, config: ACTConfig):
615
+ super().__init__()
616
+ self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout)
617
+ self.multihead_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout)
618
+
619
+ # Feed forward layers.
620
+ self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward)
621
+ self.dropout = nn.Dropout(config.dropout)
622
+ self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model)
623
+
624
+ self.norm1 = nn.LayerNorm(config.dim_model)
625
+ self.norm2 = nn.LayerNorm(config.dim_model)
626
+ self.norm3 = nn.LayerNorm(config.dim_model)
627
+ self.dropout1 = nn.Dropout(config.dropout)
628
+ self.dropout2 = nn.Dropout(config.dropout)
629
+ self.dropout3 = nn.Dropout(config.dropout)
630
+
631
+ self.activation = get_activation_fn(config.feedforward_activation)
632
+ self.pre_norm = config.pre_norm
633
+
634
+ def maybe_add_pos_embed(self, tensor: Tensor, pos_embed: Tensor | None) -> Tensor:
635
+ return tensor if pos_embed is None else tensor + pos_embed
636
+
637
+ def forward(
638
+ self,
639
+ x: Tensor,
640
+ encoder_out: Tensor,
641
+ decoder_pos_embed: Tensor | None = None,
642
+ encoder_pos_embed: Tensor | None = None,
643
+ ) -> Tensor:
644
+ """
645
+ Args:
646
+ x: (Decoder Sequence, Batch, Channel) tensor of input tokens.
647
+ encoder_out: (Encoder Sequence, B, C) output features from the last layer of the encoder we are
648
+ cross-attending with.
649
+ decoder_pos_embed: (ES, 1, C) positional embedding for keys (from the encoder).
650
+ encoder_pos_embed: (DS, 1, C) Positional_embedding for the queries (from the decoder).
651
+ Returns:
652
+ (DS, B, C) tensor of decoder output features.
653
+ """
654
+ skip = x
655
+ if self.pre_norm:
656
+ x = self.norm1(x)
657
+ q = k = self.maybe_add_pos_embed(x, decoder_pos_embed)
658
+ x = self.self_attn(q, k, value=x)[0] # select just the output, not the attention weights
659
+ x = skip + self.dropout1(x)
660
+ if self.pre_norm:
661
+ skip = x
662
+ x = self.norm2(x)
663
+ else:
664
+ x = self.norm1(x)
665
+ skip = x
666
+ x = self.multihead_attn(
667
+ query=self.maybe_add_pos_embed(x, decoder_pos_embed),
668
+ key=self.maybe_add_pos_embed(encoder_out, encoder_pos_embed),
669
+ value=encoder_out,
670
+ )[0] # select just the output, not the attention weights
671
+ x = skip + self.dropout2(x)
672
+ if self.pre_norm:
673
+ skip = x
674
+ x = self.norm3(x)
675
+ else:
676
+ x = self.norm2(x)
677
+ skip = x
678
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
679
+ x = skip + self.dropout3(x)
680
+ if not self.pre_norm:
681
+ x = self.norm3(x)
682
+ return x
683
+
684
+
685
+ def create_sinusoidal_pos_embedding(num_positions: int, dimension: int) -> Tensor:
686
+ """1D sinusoidal positional embeddings as in Attention is All You Need.
687
+
688
+ Args:
689
+ num_positions: Number of token positions required.
690
+ Returns: (num_positions, dimension) position embeddings (the first dimension is the batch dimension).
691
+
692
+ """
693
+
694
+ def get_position_angle_vec(position):
695
+ return [position / np.power(10000, 2 * (hid_j // 2) / dimension) for hid_j in range(dimension)]
696
+
697
+ sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(num_positions)])
698
+ sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
699
+ sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
700
+ return torch.from_numpy(sinusoid_table).float()
701
+
702
+
703
+ class ACTSinusoidalPositionEmbedding2d(nn.Module):
704
+ """2D sinusoidal positional embeddings similar to what's presented in Attention Is All You Need.
705
+
706
+ The variation is that the position indices are normalized in [0, 2π] (not quite: the lower bound is 1/H
707
+ for the vertical direction, and 1/W for the horizontal direction.
708
+ """
709
+
710
+ def __init__(self, dimension: int):
711
+ """
712
+ Args:
713
+ dimension: The desired dimension of the embeddings.
714
+ """
715
+ super().__init__()
716
+ self.dimension = dimension
717
+ self._two_pi = 2 * math.pi
718
+ self._eps = 1e-6
719
+ # Inverse "common ratio" for the geometric progression in sinusoid frequencies.
720
+ self._temperature = 10000
721
+
722
+ def forward(self, x: Tensor) -> Tensor:
723
+ """
724
+ Args:
725
+ x: A (B, C, H, W) batch of 2D feature map to generate the embeddings for.
726
+ Returns:
727
+ A (1, C, H, W) batch of corresponding sinusoidal positional embeddings.
728
+ """
729
+ not_mask = torch.ones_like(x[0, :1]) # (1, H, W)
730
+ # Note: These are like range(1, H+1) and range(1, W+1) respectively, but in most implementations
731
+ # they would be range(0, H) and range(0, W). Keeping it at as is to match the original code.
732
+ y_range = not_mask.cumsum(1, dtype=torch.float32)
733
+ x_range = not_mask.cumsum(2, dtype=torch.float32)
734
+
735
+ # "Normalize" the position index such that it ranges in [0, 2π].
736
+ # Note: Adding epsilon on the denominator should not be needed as all values of y_embed and x_range
737
+ # are non-zero by construction. This is an artifact of the original code.
738
+ y_range = y_range / (y_range[:, -1:, :] + self._eps) * self._two_pi
739
+ x_range = x_range / (x_range[:, :, -1:] + self._eps) * self._two_pi
740
+
741
+ inverse_frequency = self._temperature ** (
742
+ 2 * (torch.arange(self.dimension, dtype=torch.float32, device=x.device) // 2) / self.dimension
743
+ )
744
+
745
+ x_range = x_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1)
746
+ y_range = y_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1)
747
+
748
+ # Note: this stack then flatten operation results in interleaved sine and cosine terms.
749
+ # pos_embed_x and pos_embed_y are (1, H, W, C // 2).
750
+ pos_embed_x = torch.stack((x_range[..., 0::2].sin(), x_range[..., 1::2].cos()), dim=-1).flatten(3)
751
+ pos_embed_y = torch.stack((y_range[..., 0::2].sin(), y_range[..., 1::2].cos()), dim=-1).flatten(3)
752
+ pos_embed = torch.cat((pos_embed_y, pos_embed_x), dim=3).permute(0, 3, 1, 2) # (1, C, H, W)
753
+
754
+ return pos_embed
755
+
756
+
757
+ def get_activation_fn(activation: str) -> Callable:
758
+ """Return an activation function given a string."""
759
+ if activation == "relu":
760
+ return F.relu
761
+ if activation == "gelu":
762
+ return F.gelu
763
+ if activation == "glu":
764
+ return F.glu
765
+ raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.")
project/ManiSkill3/src/maniskill3_environment/lerobot/lerobot/common/policies/diffusion/configuration_diffusion.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 Columbia Artificial Intelligence, Robotics Lab,
4
+ # and The HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ from dataclasses import dataclass, field
18
+
19
+ from lerobot.common.optim.optimizers import AdamConfig
20
+ from lerobot.common.optim.schedulers import DiffuserSchedulerConfig
21
+ from lerobot.configs.policies import PreTrainedConfig
22
+ from lerobot.configs.types import NormalizationMode
23
+
24
+
25
+ @PreTrainedConfig.register_subclass("diffusion")
26
+ @dataclass
27
+ class DiffusionConfig(PreTrainedConfig):
28
+ """Configuration class for DiffusionPolicy.
29
+
30
+ Defaults are configured for training with PushT providing proprioceptive and single camera observations.
31
+
32
+ The parameters you will most likely need to change are the ones which depend on the environment / sensors.
33
+ Those are: `input_shapes` and `output_shapes`.
34
+
35
+ Notes on the inputs and outputs:
36
+ - "observation.state" is required as an input key.
37
+ - Either:
38
+ - At least one key starting with "observation.image is required as an input.
39
+ AND/OR
40
+ - The key "observation.environment_state" is required as input.
41
+ - If there are multiple keys beginning with "observation.image" they are treated as multiple camera
42
+ views. Right now we only support all images having the same shape.
43
+ - "action" is required as an output key.
44
+
45
+ Args:
46
+ n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the
47
+ current step and additional steps going back).
48
+ horizon: Diffusion model action prediction size as detailed in `DiffusionPolicy.select_action`.
49
+ n_action_steps: The number of action steps to run in the environment for one invocation of the policy.
50
+ See `DiffusionPolicy.select_action` for more details.
51
+ input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents
52
+ the input data name, and the value is a list indicating the dimensions of the corresponding data.
53
+ For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96],
54
+ indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't
55
+ include batch dimension or temporal dimension.
56
+ output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents
57
+ the output data name, and the value is a list indicating the dimensions of the corresponding data.
58
+ For example, "action" refers to an output shape of [14], indicating 14-dimensional actions.
59
+ Importantly, `output_shapes` doesn't include batch dimension or temporal dimension.
60
+ input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"),
61
+ and the value specifies the normalization mode to apply. The two available modes are "mean_std"
62
+ which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a
63
+ [-1, 1] range.
64
+ output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the
65
+ original scale. Note that this is also used for normalizing the training targets.
66
+ vision_backbone: Name of the torchvision resnet backbone to use for encoding images.
67
+ crop_shape: (H, W) shape to crop images to as a preprocessing step for the vision backbone. Must fit
68
+ within the image size. If None, no cropping is done.
69
+ crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval
70
+ mode).
71
+ pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone.
72
+ `None` means no pretrained weights.
73
+ use_group_norm: Whether to replace batch normalization with group normalization in the backbone.
74
+ The group sizes are set to be about 16 (to be precise, feature_dim // 16).
75
+ spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax.
76
+ use_separate_rgb_encoders_per_camera: Whether to use a separate RGB encoder for each camera view.
77
+ down_dims: Feature dimension for each stage of temporal downsampling in the diffusion modeling Unet.
78
+ You may provide a variable number of dimensions, therefore also controlling the degree of
79
+ downsampling.
80
+ kernel_size: The convolutional kernel size of the diffusion modeling Unet.
81
+ n_groups: Number of groups used in the group norm of the Unet's convolutional blocks.
82
+ diffusion_step_embed_dim: The Unet is conditioned on the diffusion timestep via a small non-linear
83
+ network. This is the output dimension of that network, i.e., the embedding dimension.
84
+ use_film_scale_modulation: FiLM (https://arxiv.org/abs/1709.07871) is used for the Unet conditioning.
85
+ Bias modulation is used be default, while this parameter indicates whether to also use scale
86
+ modulation.
87
+ noise_scheduler_type: Name of the noise scheduler to use. Supported options: ["DDPM", "DDIM"].
88
+ num_train_timesteps: Number of diffusion steps for the forward diffusion schedule.
89
+ beta_schedule: Name of the diffusion beta schedule as per DDPMScheduler from Hugging Face diffusers.
90
+ beta_start: Beta value for the first forward-diffusion step.
91
+ beta_end: Beta value for the last forward-diffusion step.
92
+ prediction_type: The type of prediction that the diffusion modeling Unet makes. Choose from "epsilon"
93
+ or "sample". These have equivalent outcomes from a latent variable modeling perspective, but
94
+ "epsilon" has been shown to work better in many deep neural network settings.
95
+ clip_sample: Whether to clip the sample to [-`clip_sample_range`, +`clip_sample_range`] for each
96
+ denoising step at inference time. WARNING: you will need to make sure your action-space is
97
+ normalized to fit within this range.
98
+ clip_sample_range: The magnitude of the clipping range as described above.
99
+ num_inference_steps: Number of reverse diffusion steps to use at inference time (steps are evenly
100
+ spaced). If not provided, this defaults to be the same as `num_train_timesteps`.
101
+ do_mask_loss_for_padding: Whether to mask the loss when there are copy-padded actions. See
102
+ `LeRobotDataset` and `load_previous_and_future_frames` for more information. Note, this defaults
103
+ to False as the original Diffusion Policy implementation does the same.
104
+ """
105
+
106
+ # Inputs / output structure.
107
+ n_obs_steps: int = 2
108
+ horizon: int = 16
109
+ n_action_steps: int = 8
110
+
111
+ normalization_mapping: dict[str, NormalizationMode] = field(
112
+ default_factory=lambda: {
113
+ "VISUAL": NormalizationMode.MEAN_STD,
114
+ "STATE": NormalizationMode.MIN_MAX,
115
+ "ACTION": NormalizationMode.MIN_MAX,
116
+ }
117
+ )
118
+
119
+ # The original implementation doesn't sample frames for the last 7 steps,
120
+ # which avoids excessive padding and leads to improved training results.
121
+ drop_n_last_frames: int = 7 # horizon - n_action_steps - n_obs_steps + 1
122
+
123
+ # Architecture / modeling.
124
+ # Vision backbone.
125
+ vision_backbone: str = "resnet18"
126
+ crop_shape: tuple[int, int] | None = (84, 84)
127
+ crop_is_random: bool = True
128
+ pretrained_backbone_weights: str | None = None
129
+ use_group_norm: bool = True
130
+ spatial_softmax_num_keypoints: int = 32
131
+ use_separate_rgb_encoder_per_camera: bool = False
132
+ # Unet.
133
+ down_dims: tuple[int, ...] = (512, 1024, 2048)
134
+ kernel_size: int = 5
135
+ n_groups: int = 8
136
+ diffusion_step_embed_dim: int = 128
137
+ use_film_scale_modulation: bool = True
138
+ # Noise scheduler.
139
+ noise_scheduler_type: str = "DDPM"
140
+ num_train_timesteps: int = 100
141
+ beta_schedule: str = "squaredcos_cap_v2"
142
+ beta_start: float = 0.0001
143
+ beta_end: float = 0.02
144
+ prediction_type: str = "epsilon"
145
+ clip_sample: bool = True
146
+ clip_sample_range: float = 1.0
147
+
148
+ # Inference
149
+ num_inference_steps: int | None = None
150
+
151
+ # Loss computation
152
+ do_mask_loss_for_padding: bool = False
153
+
154
+ # Training presets
155
+ optimizer_lr: float = 1e-4
156
+ optimizer_betas: tuple = (0.95, 0.999)
157
+ optimizer_eps: float = 1e-8
158
+ optimizer_weight_decay: float = 1e-6
159
+ scheduler_name: str = "cosine"
160
+ scheduler_warmup_steps: int = 500
161
+
162
+ def __post_init__(self):
163
+ super().__post_init__()
164
+
165
+ """Input validation (not exhaustive)."""
166
+ if not self.vision_backbone.startswith("resnet"):
167
+ raise ValueError(
168
+ f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}."
169
+ )
170
+
171
+ supported_prediction_types = ["epsilon", "sample"]
172
+ if self.prediction_type not in supported_prediction_types:
173
+ raise ValueError(
174
+ f"`prediction_type` must be one of {supported_prediction_types}. Got {self.prediction_type}."
175
+ )
176
+ supported_noise_schedulers = ["DDPM", "DDIM"]
177
+ if self.noise_scheduler_type not in supported_noise_schedulers:
178
+ raise ValueError(
179
+ f"`noise_scheduler_type` must be one of {supported_noise_schedulers}. "
180
+ f"Got {self.noise_scheduler_type}."
181
+ )
182
+
183
+ # Check that the horizon size and U-Net downsampling is compatible.
184
+ # U-Net downsamples by 2 with each stage.
185
+ downsampling_factor = 2 ** len(self.down_dims)
186
+ if self.horizon % downsampling_factor != 0:
187
+ raise ValueError(
188
+ "The horizon should be an integer multiple of the downsampling factor (which is determined "
189
+ f"by `len(down_dims)`). Got {self.horizon=} and {self.down_dims=}"
190
+ )
191
+
192
+ def get_optimizer_preset(self) -> AdamConfig:
193
+ return AdamConfig(
194
+ lr=self.optimizer_lr,
195
+ betas=self.optimizer_betas,
196
+ eps=self.optimizer_eps,
197
+ weight_decay=self.optimizer_weight_decay,
198
+ )
199
+
200
+ def get_scheduler_preset(self) -> DiffuserSchedulerConfig:
201
+ return DiffuserSchedulerConfig(
202
+ name=self.scheduler_name,
203
+ num_warmup_steps=self.scheduler_warmup_steps,
204
+ )
205
+
206
+ def validate_features(self) -> None:
207
+ if len(self.image_features) == 0 and self.env_state_feature is None:
208
+ raise ValueError("You must provide at least one image or the environment state among the inputs.")
209
+
210
+ if self.crop_shape is not None:
211
+ for key, image_ft in self.image_features.items():
212
+ if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]:
213
+ raise ValueError(
214
+ f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} "
215
+ f"for `crop_shape` and {image_ft.shape} for "
216
+ f"`{key}`."
217
+ )
218
+
219
+ # Check that all input images have the same shape.
220
+ first_image_key, first_image_ft = next(iter(self.image_features.items()))
221
+ for key, image_ft in self.image_features.items():
222
+ if image_ft.shape != first_image_ft.shape:
223
+ raise ValueError(
224
+ f"`{key}` does not match `{first_image_key}`, but we expect all image shapes to match."
225
+ )
226
+
227
+ @property
228
+ def observation_delta_indices(self) -> list:
229
+ return list(range(1 - self.n_obs_steps, 1))
230
+
231
+ @property
232
+ def action_delta_indices(self) -> list:
233
+ return list(range(1 - self.n_obs_steps, 1 - self.n_obs_steps + self.horizon))
234
+
235
+ @property
236
+ def reward_delta_indices(self) -> None:
237
+ return None