diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..7313166d2ed0386d124572322f984f80dc589440 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,160 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Misc +.git +tmp +wandb +data +outputs +.vscode +rl +media + + +# Logging +logs + +# HPC +nautilus/*.yaml +*.key + +# Slurm +sbatch*.sh + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +!tests/artifacts +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Ignore .cache except calibration +.cache/* +!.cache/calibration/ +!.cache/calibration/** + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..03f348b87be7f099e1a1e03bc51ddee53e36e1c6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,59 +1,32 @@ -*.7z filter=lfs diff=lfs merge=lfs -text -*.arrow filter=lfs diff=lfs merge=lfs -text -*.bin filter=lfs diff=lfs merge=lfs -text -*.bz2 filter=lfs diff=lfs merge=lfs -text -*.ckpt filter=lfs diff=lfs merge=lfs -text -*.ftz filter=lfs diff=lfs merge=lfs -text -*.gz filter=lfs diff=lfs merge=lfs -text -*.h5 filter=lfs diff=lfs merge=lfs -text -*.joblib filter=lfs diff=lfs merge=lfs -text -*.lfs.* filter=lfs diff=lfs merge=lfs -text -*.lz4 filter=lfs diff=lfs merge=lfs -text -*.mds filter=lfs diff=lfs merge=lfs -text -*.mlmodel filter=lfs diff=lfs merge=lfs -text -*.model filter=lfs diff=lfs merge=lfs -text -*.msgpack filter=lfs diff=lfs merge=lfs -text -*.npy filter=lfs diff=lfs merge=lfs -text -*.npz filter=lfs diff=lfs merge=lfs -text -*.onnx filter=lfs diff=lfs merge=lfs -text -*.ot filter=lfs diff=lfs merge=lfs -text -*.parquet filter=lfs diff=lfs merge=lfs -text -*.pb filter=lfs diff=lfs merge=lfs -text -*.pickle filter=lfs diff=lfs merge=lfs -text -*.pkl filter=lfs diff=lfs merge=lfs -text -*.pt filter=lfs diff=lfs merge=lfs -text -*.pth filter=lfs diff=lfs merge=lfs -text -*.rar filter=lfs diff=lfs merge=lfs -text -*.safetensors filter=lfs diff=lfs merge=lfs -text -saved_model/**/* filter=lfs diff=lfs merge=lfs -text -*.tar.* filter=lfs diff=lfs merge=lfs -text -*.tar filter=lfs diff=lfs merge=lfs -text -*.tflite filter=lfs diff=lfs merge=lfs -text -*.tgz filter=lfs diff=lfs merge=lfs -text -*.wasm filter=lfs diff=lfs merge=lfs -text -*.xz filter=lfs diff=lfs merge=lfs -text -*.zip filter=lfs diff=lfs merge=lfs -text -*.zst filter=lfs diff=lfs merge=lfs -text -*tfevents* filter=lfs diff=lfs merge=lfs -text -# Audio files - uncompressed -*.pcm filter=lfs diff=lfs merge=lfs -text -*.sam filter=lfs diff=lfs merge=lfs -text -*.raw filter=lfs diff=lfs merge=lfs -text -# Audio files - compressed -*.aac filter=lfs diff=lfs merge=lfs -text -*.flac filter=lfs diff=lfs merge=lfs -text -*.mp3 filter=lfs diff=lfs merge=lfs -text -*.ogg filter=lfs diff=lfs merge=lfs -text -*.wav filter=lfs diff=lfs merge=lfs -text -# Image files - uncompressed -*.bmp filter=lfs diff=lfs merge=lfs -text -*.gif filter=lfs diff=lfs merge=lfs -text -*.png filter=lfs diff=lfs merge=lfs -text -*.tiff filter=lfs diff=lfs merge=lfs -text -# Image files - compressed -*.jpg filter=lfs diff=lfs merge=lfs -text -*.jpeg filter=lfs diff=lfs merge=lfs -text -*.webp filter=lfs diff=lfs merge=lfs -text -# Video files - compressed -*.mp4 filter=lfs diff=lfs merge=lfs -text -*.webm filter=lfs diff=lfs merge=lfs -text +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +*.memmap filter=lfs diff=lfs merge=lfs -text +*.stl filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +*.mp4 filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.json !text !filter !merge !diff +tests/artifacts/cameras/*.png filter=lfs diff=lfs merge=lfs -text +*.bag filter=lfs diff=lfs merge=lfs -text +media/gym/aloha_act.gif filter=lfs diff=lfs merge=lfs -text +media/gym/pusht_diffusion.gif filter=lfs diff=lfs merge=lfs -text +media/gym/simxarm_tdmpc.gif filter=lfs diff=lfs merge=lfs -text +media/hope_jr/hopejr.png filter=lfs diff=lfs merge=lfs -text +media/lekiwi/kiwi.webp filter=lfs diff=lfs merge=lfs -text +media/lerobot-logo-light.png filter=lfs diff=lfs merge=lfs -text +media/lerobot-logo-thumbnail.png filter=lfs diff=lfs merge=lfs -text +media/so100/leader_follower.webp filter=lfs diff=lfs merge=lfs -text +media/so101/so101-leader.webp filter=lfs diff=lfs merge=lfs -text +media/so101/so101.webp filter=lfs diff=lfs merge=lfs -text +media/wandb.png filter=lfs diff=lfs merge=lfs -text diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 0000000000000000000000000000000000000000..fc13cb308d96ff5257c5e69611453be9cb97a96b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,68 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: "\U0001F41B Bug Report" +description: Submit a bug report to help us improve LeRobot +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to submit a bug report! 🐛 + If this is not a bug related to the LeRobot library directly, but instead a general question about your code or the library specifically please use our [discord](https://discord.gg/s3KuuzsPFb). + + - type: textarea + id: system-info + attributes: + label: System Info + description: Please share your LeRobot configuration by running `lerobot-info` (if installed) or `python -m lerobot.scripts.display_sys_info` (if not installed) and pasting the output below. + render: Shell + placeholder: lerobot version, OS, python version, numpy version, torch version, and lerobot's configuration + validations: + required: true + + - type: checkboxes + id: information-scripts-examples + attributes: + label: Information + description: 'The problem arises when using:' + options: + - label: "One of the scripts in the examples/ folder of LeRobot" + - label: "My own task or dataset (give details below)" + + - type: textarea + id: reproduction + validations: + required: true + attributes: + label: Reproduction + description: | + If needed, provide a simple code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet. + Sharing error messages or stack traces could be useful as well! + Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting + Try to avoid screenshots, as they are hard to read and don't allow copy-and-pasting. + + placeholder: | + Steps to reproduce the behavior: + + 1. + 2. + 3. + + - type: textarea + id: expected-behavior + validations: + required: true + attributes: + label: Expected behavior + description: "A clear and concise description of what you would expect to happen." diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000000000000000000000000000000000..cc06083b41f068ba5cd79549102115dd35bc7600 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,41 @@ +## What this does + +Explain what this PR does. Feel free to tag your PR with the appropriate label(s). + +Examples: +| Title | Label | +|----------------------|-----------------| +| Fixes #[issue] | (🐛 Bug) | +| Adds new dataset | (🗃️ Dataset) | +| Optimizes something | (⚡️ Performance) | + +## How it was tested + +Explain/show how you tested your changes. + +Examples: + +- Added `test_something` in `tests/test_stuff.py`. +- Added `new_feature` and checked that training converges with policy X on dataset/environment Y. +- Optimized `some_function`, it now runs X times faster than previously. + +## How to checkout & try? (for the reviewer) + +Provide a simple way for the reviewer to try out your changes. + +Examples: + +```bash +pytest -sx tests/test_stuff.py::test_something +``` + +```bash +lerobot-train --some.option=true +``` + +## SECTION TO REMOVE BEFORE SUBMITTING YOUR PR + +**Note**: Anyone in the community is free to review the PR once the tests have passed. Feel free to tag +members/contributors who may be interested in your PR. Try to avoid tagging more than 3 people. + +**Note**: Before submitting this PR, please read the [contributor guideline](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md#submitting-a-pull-request-pr). diff --git a/.github/workflows/documentation-upload-pr.yml b/.github/workflows/documentation-upload-pr.yml new file mode 100644 index 0000000000000000000000000000000000000000..cba32f36c0c137805568d4f88ba216aac3fd1c5a --- /dev/null +++ b/.github/workflows/documentation-upload-pr.yml @@ -0,0 +1,40 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow uploads the documentation preview built for a PR and comments the link on the PR. +name: Documentation PR Upload +permissions: + contents: read + pull-requests: write + +on: + # Triggered by the completion of the main 'Documentation' workflow. + workflow_run: # zizmor: ignore[dangerous-triggers] We follow the same pattern as in Transformers + workflows: ["Documentation"] + types: + - completed + +jobs: + # This job uploads a preview of the documentation for a pull request. + upload_and_comment: + name: Upload Preview and Comment + if: > + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'success' + uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main + with: + package_name: lerobot + secrets: + hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} + comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }} diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml new file mode 100644 index 0000000000000000000000000000000000000000..be1d8b75e7616b85ef4f0f81e6808b0415be13b4 --- /dev/null +++ b/.github/workflows/documentation.yml @@ -0,0 +1,70 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow handles building documentation for both main branches and PRs. +name: Documentation + +on: + # Allows running this workflow manually from the Actions tab + workflow_dispatch: + + # Triggers the workflow on push events to main for the docs folder + push: + branches: + - main + paths: + - "docs/**" + + # Triggers the workflow on pull request events targeting main for the docs folder + pull_request: + branches: + - main + paths: + - "docs/**" + +# Ensures that only the latest commit for a PR or branch is built, canceling older runs. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + # This job builds and deploys the official documentation. + build_main_docs: + name: Build Main Docs + if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' + permissions: + contents: read + uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main + with: + commit_sha: ${{ github.sha }} + package: lerobot + additional_args: --not_python_module + secrets: + token: ${{ secrets.HUGGINGFACE_PUSH }} + hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} + + # This job builds a preview of the documentation for a pull request. + # The result of this job triggers the 'Upload PR Documentation' workflow. + build_pr_docs: + name: Build PR Docs + if: github.event_name == 'pull_request' + permissions: + contents: read + pull-requests: write + uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main + with: + commit_sha: ${{ github.event.pull_request.head.sha }} + pr_number: ${{ github.event.number }} + package: lerobot + additional_args: --not_python_module diff --git a/.github/workflows/fast_tests.yml b/.github/workflows/fast_tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..38b8b1a20d7ca91557c0923caedc030b77f72974 --- /dev/null +++ b/.github/workflows/fast_tests.yml @@ -0,0 +1,87 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow handles fast testing. +name: Fast Tests + +on: + # Allows running this workflow manually from the Actions tab + workflow_dispatch: + + pull_request: + branches: + - main + paths: + - "src/**" + - "tests/**" + - ".github/workflows/**" + - "pyproject.toml" + - "Makefile" + push: + branches: + - main + paths: + - "src/**" + - "tests/**" + - ".github/workflows/**" + - "pyproject.toml" + - "Makefile" + +permissions: + contents: read + +# Sets up the environment variables +env: + UV_VERSION: "0.8.0" + PYTHON_VERSION: "3.10" + DOCKER_IMAGE_NAME: huggingface/lerobot-gpu + +# Ensures that only the latest commit for a PR or branch is built, canceling older runs. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + # This job runs pytests with the default dependencies. + # It runs everytime we commit to a PR or push to main + fast-pytest-tests: + name: Fast Pytest Tests + runs-on: ubuntu-latest + env: + MUJOCO_GL: egl + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + lfs: true + + # TODO(Steven): Evaluate the need of these dependencies + - name: Install apt dependencies + run: | + sudo apt-get update && sudo apt-get install -y build-essential git \ + curl libglib2.0-0 libegl1-mesa-dev ffmpeg \ + libusb-1.0-0-dev speech-dispatcher libgeos-dev portaudio19-dev + + - name: Setup uv and Python + uses: astral-sh/setup-uv@v6 # zizmor: ignore[unpinned-uses] + with: + enable-cache: true + version: ${{ env.UV_VERSION }} + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install lerobot with test extras + run: uv sync --extra "test" + + - name: Run pytest + run: uv run pytest tests -vv --maxfail=10 diff --git a/.github/workflows/full_tests.yml b/.github/workflows/full_tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..a4e9f4bd64002d1067abe2fa8d33840071ee18f2 --- /dev/null +++ b/.github/workflows/full_tests.yml @@ -0,0 +1,210 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow handles full testing. +name: Full Tests + +on: + # Allows running this workflow manually from the Actions tab + workflow_dispatch: + + pull_request_review: + types: [submitted] + push: + branches: + - main + paths: + - "src/**" + - "tests/**" + - ".github/workflows/**" + - "pyproject.toml" + - "Makefile" + +permissions: + contents: read + +# Sets up the environment variables +env: + UV_VERSION: "0.8.0" + PYTHON_VERSION: "3.10" + DOCKER_IMAGE_NAME: huggingface/lerobot-gpu + +# Ensures that only the latest action is built, canceling older runs. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + + # This job runs the E2E tests + pytest with all extras + # It runs everytime a PR is approved or a push to main + full-tests: + name: Full Tests + runs-on: ubuntu-latest + if: | + (github.event_name == 'pull_request_review' && github.event.review.state == 'approved') || + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' + env: + MUJOCO_GL: egl + steps: + - uses: actions/checkout@v4 + with: + lfs: true + persist-credentials: false + + - name: Install apt dependencies + run: | + sudo apt-get update && sudo apt-get install -y build-essential \ + git curl libglib2.0-0 libegl1-mesa-dev ffmpeg libusb-1.0-0-dev \ + speech-dispatcher libgeos-dev portaudio19-dev + + - name: Setup uv and Python + uses: astral-sh/setup-uv@v6 # zizmor: ignore[unpinned-uses] + with: + enable-cache: true + version: ${{ env.UV_VERSION }} + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install lerobot with all extras + run: uv sync --all-extras --no-extra groot # TODO(Steven): Make flash-attn optional + + - name: Run pytest (all extras) + run: uv run pytest tests -vv --maxfail=10 + + - name: Run end-to-end tests + run: uv run make test-end-to-end + + # This job builds a GPU enabled image for testing + # It runs everytime a PR is approved or a push to main + # TODO(Steven): For now we skip this job for community PRs + build-and-push-docker: + name: Build and Push Docker + runs-on: + group: aws-general-8-plus + if: | + (github.event_name == 'pull_request_review' && github.event.review.state == 'approved' && github.event.pull_request.head.repo.fork == false) || + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' + outputs: + image_tag: ${{ steps.set_tag.outputs.image_tag }} + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + GITHUB_REF: ${{ github.ref }} + GITHUB_PR_NUMBER: ${{ github.event.pull_request.number }} + steps: + - name: Set Docker image tag + id: set_tag + run: | + if [[ "${GITHUB_EVENT_NAME}" == "push" ]]; then + TAG="${DOCKER_IMAGE_NAME}:latest" + elif [[ -n "${GITHUB_PR_NUMBER}" ]]; then + TAG="${DOCKER_IMAGE_NAME}:pr-${GITHUB_PR_NUMBER}" + else + TAG="${DOCKER_IMAGE_NAME}:pr-${GITHUB_REF##*/}" + fi + echo "image_tag=$TAG" >> $GITHUB_OUTPUT + - name: Install Git LFS + run: | + sudo apt-get update + sudo apt-get install git-lfs + git lfs install + - uses: actions/checkout@v4 + with: + lfs: true + persist-credentials: false + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + - name: Login to Docker Hub + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + - name: Build and push Docker image + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: ./docker/Dockerfile.internal + push: true + tags: ${{ steps.set_tag.outputs.image_tag }} + + # This job runs pytest with all extras in a GPU enabled host + # It runs everytime a test image is created + gpu-tests: + name: GPU Tests + needs: [build-and-push-docker] + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_HOME: /home/user_lerobot/.cache/huggingface + HF_LEROBOT_HOME: /home/user_lerobot/.cache/huggingface/lerobot + TORCH_HOME: /home/user_lerobot/.cache/torch + TRITON_CACHE_DIR: /home/user_lerobot/.cache/triton + container: + image: ${{ needs.build-and-push-docker.outputs.image_tag }} # zizmor: ignore[unpinned-images] + options: --gpus all --shm-size "16gb" + credentials: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + defaults: + run: + shell: bash + working-directory: /lerobot + steps: + - name: Run pytest on GPU + run: pytest tests -vv --maxfail=10 + - name: Run end-to-end tests + run: make test-end-to-end + + # This job deletes the test image recently created + # It runs everytime after the gpu-tests have finished + delete-pr-image: + name: Delete PR Image + needs: [gpu-tests, build-and-push-docker] + if: always() && ((github.event.review.state == 'approved') || (github.event_name == 'workflow_dispatch')) && needs.build-and-push-docker.result == 'success' + runs-on: ubuntu-latest + steps: + - name: Get Docker Hub Token and Delete Image + # zizmor: ignore[template-injection] + run: | + IMAGE_NAME=$(echo "${{ needs.build-and-push-docker.outputs.image_tag }}" | cut -d':' -f1) + IMAGE_TAG=$(echo "${{ needs.build-and-push-docker.outputs.image_tag }}" | cut -d':' -f2) + + echo "Attempting to delete image: $IMAGE_NAME:$IMAGE_TAG" + + TOKEN=$(curl -s -H "Content-Type: application/json" \ + -X POST \ + -d '{"username": "${{ secrets.DOCKERHUB_LEROBOT_USERNAME }}", "password": "${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }}"}' \ + https://hub.docker.com/v2/users/login/ | jq -r .token) + + if [ "$TOKEN" == "null" ] || [ -z "$TOKEN" ]; then + echo "::error::Failed to get Docker Hub token." + exit 1 + fi + + HTTP_RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" \ + -H "Authorization: JWT ${TOKEN}" \ + -X DELETE \ + https://hub.docker.com/v2/repositories/${IMAGE_NAME}/tags/${IMAGE_TAG}/) + + if [ "$HTTP_RESPONSE" -eq 204 ]; then + echo "Successfully deleted Docker image tag: $IMAGE_NAME:$IMAGE_TAG" + else + echo "::error::Failed to delete Docker image. HTTP status: $HTTP_RESPONSE" + exit 1 + fi + +# TODO(Steven): Check dockerimages pull in ubuntu diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 0000000000000000000000000000000000000000..5a07b2ab2b31f53b5ebd3895439ae58af7f2eed7 --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,194 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow handles nightly testing & docker images publishing. +name: Nightly +permissions: + contents: read + +on: + # Allows running this workflow manually from the Actions tab + workflow_dispatch: + + # Runs at 02:00 + schedule: + - cron: "0 2 * * *" + +# Sets up the environment variables +env: + UV_VERSION: "0.8.0" + PYTHON_VERSION: "3.10" + DOCKER_IMAGE_NAME_CPU: huggingface/lerobot-cpu:latest + DOCKER_IMAGE_NAME_GPU: huggingface/lerobot-gpu:latest + +# Ensures that only the latest commit is built, canceling older runs. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + # This job builds a CPU image for testing & distribution + build-docker-cpu-nightly: + name: Build CPU Docker for Nightly + runs-on: + group: aws-general-8-plus + outputs: + image_tag: ${{ env.DOCKER_IMAGE_NAME_CPU }} + steps: + - name: Install Git LFS + run: | + sudo apt-get update + sudo apt-get install git-lfs + git lfs install + - uses: actions/checkout@v4 + with: + lfs: true + persist-credentials: false + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + - name: Login to Docker Hub + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + - name: Build and push Docker image CPU + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: ./docker/Dockerfile.user + push: true + tags: ${{ env.DOCKER_IMAGE_NAME_CPU }} + + # This job builds a GPU image for testing & distribution + build-docker-gpu-nightly: + name: Build GPU Docker for Nightly + runs-on: + group: aws-general-8-plus + outputs: + image_tag: ${{ env.DOCKER_IMAGE_NAME_GPU }} + steps: + - name: Install Git LFS + run: | + sudo apt-get update + sudo apt-get install git-lfs + git lfs install + - uses: actions/checkout@v4 + with: + lfs: true + persist-credentials: false + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + - name: Login to Docker Hub + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + - name: Build and push Docker image GPU + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: ./docker/Dockerfile.internal + push: true + tags: ${{ env.DOCKER_IMAGE_NAME_GPU }} + + # This job runs the E2E tests + pytest with all extras in the CPU image + nightly-cpu-tests: + name: Nightly CPU Tests + needs: [build-docker-cpu-nightly] + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_HOME: /home/user_lerobot/.cache/huggingface + HF_LEROBOT_HOME: /home/user_lerobot/.cache/huggingface/lerobot + TORCH_HOME: /home/user_lerobot/.cache/torch + TRITON_CACHE_DIR: /home/user_lerobot/.cache/triton + container: + image: ${{ needs.build-docker-cpu-nightly.outputs.image_tag }} # zizmor: ignore[unpinned-images] + options: --shm-size "16gb" + credentials: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + defaults: + run: + shell: bash + working-directory: /lerobot + steps: + - name: Run pytest on CPU + run: pytest tests -vv --maxfail=10 + - name: Run end-to-end tests + run: make test-end-to-end + + # This job runs the E2E tests + pytest with all extras in the GPU image + nightly-gpu-tests: + name: Nightly GPU Tests + needs: [build-docker-gpu-nightly] + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_HOME: /home/user_lerobot/.cache/huggingface + HF_LEROBOT_HOME: /home/user_lerobot/.cache/huggingface/lerobot + TORCH_HOME: /home/user_lerobot/.cache/torch + TRITON_CACHE_DIR: /home/user_lerobot/.cache/triton + container: + image: ${{ needs.build-docker-gpu-nightly.outputs.image_tag }} # zizmor: ignore[unpinned-images] + options: --gpus all --shm-size "16gb" + credentials: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + defaults: + run: + shell: bash + working-directory: /lerobot + steps: + - name: Run pytest on GPU + run: pytest tests -vv --maxfail=10 + - name: Run end-to-end tests + run: make test-end-to-end + + # This job runs multi-GPU training tests with 4 GPUs + nightly-multi-gpu-tests: + name: Nightly Multi-GPU Tests + needs: [build-docker-gpu-nightly] + runs-on: + group: aws-g4dn-12xlarge # Instance with 4 GPUs + env: + HF_HOME: /home/user_lerobot/.cache/huggingface + HF_LEROBOT_HOME: /home/user_lerobot/.cache/huggingface/lerobot + TORCH_HOME: /home/user_lerobot/.cache/torch + TRITON_CACHE_DIR: /home/user_lerobot/.cache/triton + CUDA_VISIBLE_DEVICES: "0,1,2,3" + container: + image: ${{ needs.build-docker-gpu-nightly.outputs.image_tag }} # zizmor: ignore[unpinned-images] + options: --gpus all --shm-size "16gb" + credentials: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + defaults: + run: + shell: bash + working-directory: /lerobot + steps: + - name: Verify GPU availability + run: | + nvidia-smi + python -c "import torch; print(f'PyTorch CUDA available: {torch.cuda.is_available()}'); print(f'Number of GPUs: {torch.cuda.device_count()}')" + + - name: Run multi-GPU training tests + # TODO(Steven): Investigate why motors tests are failing in multi-GPU setup + run: pytest tests -vv --maxfail=10 --ignore=tests/motors/ + timeout-minutes: 10 diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml new file mode 100644 index 0000000000000000000000000000000000000000..e5f596aa23483cc57906b70d06fe5e3bea2d688c --- /dev/null +++ b/.github/workflows/quality.yml @@ -0,0 +1,58 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow handles linting, formatting, and static analysis checks for the codebase. +name: Quality +permissions: + contents: read + +on: + # Allows running this workflow manually from the Actions tab + workflow_dispatch: + + # Triggers the workflow on push events to main + push: + branches: + - main + + # Triggers the workflow on pull request events targeting main + pull_request: + branches: + - main + +# Ensures that only the latest commit for a PR or branch is built, canceling older runs. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + # This job runs pre-commit hooks to check code style and formatting. + pre-commit-checks: + name: Run Pre-commit Hooks (Lint, Format & Static Analysis) + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Run pre-commit hooks + uses: pre-commit/action@v3.0.1 # zizmor: ignore[unpinned-uses] + with: + extra_args: --all-files --show-diff-on-failure --color=always diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000000000000000000000000000000000..e00ede4e01eaafb357878ce8a7ae95a5c4fc98ad --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,179 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Create Release and Publish to PyPI + +on: + push: + tags: + - 'v*.*.*' # Trigger on tags like v0.1.0, v1.0.0 + +# Sets up the environment variables +env: + UV_VERSION: "0.8.0" + PYTHON_VERSION: "3.10" + +jobs: + # This job builds the Python package and publishes it to PyPI + build-and-publish: + name: Build and publish Python distributions + runs-on: ubuntu-latest + outputs: + version: ${{ steps.extract_info.outputs.tag_version }} + permissions: + contents: write + id-token: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Extract Version + id: extract_info + # Extract version from tag (e.g., v0.1.0 -> 0.1.0) + # zizmor: ignore[template-injection] + run: | + VERSION=${{ github.ref_name }} + VERSION_NUMBER=${VERSION#v} + echo "tag_version=$VERSION_NUMBER" >> $GITHUB_OUTPUT + - name: Check if version matches pyproject.toml + if: startsWith(github.ref, 'refs/tags/v') && !contains(github.ref, '-') + # zizmor: ignore[template-injection] + run: | + TAG_VERSION=${{ steps.extract_info.outputs.tag_version }} + + PYPROJECT_VERSION=$(grep '^version = ' pyproject.toml | awk -F' = ' '{print $2}' | tr -d '"') + + if [[ "$TAG_VERSION" != "$PYPROJECT_VERSION" ]]; then + echo "Error: Tag version ($TAG_VERSION) does not match pyproject.toml version ($PYPROJECT_VERSION)." >&2 + exit 1 + else + echo "Tag version matches pyproject.toml version: $TAG_VERSION. Proceeding with release." + fi + + - name: Check if version exists on PyPI + # zizmor: ignore[template-injection] + run: | + NEW_VERSION=${{ steps.extract_info.outputs.tag_version }} + + response=$(curl -s "https://pypi.org/pypi/lerobot/$NEW_VERSION/json") + if echo "$response" | grep -q "message"; then + echo "Version $NEW_VERSION is available on PyPI. Proceeding with release." + else + echo "Error: Version $NEW_VERSION already exists on PyPI. Aborting." + exit 1 + fi + + - name: Remove Tags with Git dependencies + # TODO(Steven): Temporary patch to remove pi from PyPi 0.4.0 release due to its reliance on git dependencies. + run: | + echo "::info:: Checking for Git dependencies to remove from pyproject.toml..." + grep -E '@ git\+https|lerobot\[pi\]' pyproject.toml | sed 's/^/::warning:: Removing line: /' || true + sed -E -i '/@ git\+https|lerobot\[pi\]/d' pyproject.toml + echo "::info:: Git dependencies removed. Proceeding with build." + + - name: Install build dependencies + run: python -m pip install build + + - name: Build package + run: python -m build + + - name: Create GitHub Release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # zizmor: ignore[template-injection] + run: | + gh release create ${{ github.ref_name }} \ + --title "Release ${{ github.ref_name }}" \ + --generate-notes \ + --draft=$([[ "${{ github.ref_name }}" == *-* ]] && echo true || echo false) \ + --prerelease=$([[ "${{ github.ref_name }}" == *-* ]] && echo true || echo false) \ + ./dist/* + + - name: Publish to TestPyPI for pre-releases + # True for tags like 'v0.2.0-rc1' + if: startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '-') + uses: pypa/gh-action-pypi-publish@v1.13.0 # zizmor: ignore[unpinned-uses, use-trusted-publishing] + with: + repository-url: https://test.pypi.org/legacy/ + verbose: true + print-hash: true + + - name: Publish to PyPI + if: startsWith(github.ref, 'refs/tags/v') && !contains(github.ref, '-') + uses: pypa/gh-action-pypi-publish@v1.13.0 # zizmor: ignore[unpinned-uses, use-trusted-publishing] + with: + verbose: true + print-hash: true + + # This job runs end-to-end tests on the release + test-release: + name: Test Release + needs: [build-and-publish] + runs-on: ubuntu-latest + permissions: + contents: read + env: + MUJOCO_GL: egl + steps: + - uses: actions/checkout@v4 + with: + lfs: true + persist-credentials: false + - name: Install apt dependencies + run: | + sudo apt-get update && sudo apt-get install -y build-essential \ + git curl libglib2.0-0 libegl1-mesa-dev ffmpeg libusb-1.0-0-dev \ + speech-dispatcher libgeos-dev portaudio19-dev + - name: Setup uv and Python + uses: astral-sh/setup-uv@v6 # zizmor: ignore[unpinned-uses] + with: + enable-cache: true # zizmor: ignore[cache-poisoning] + version: ${{ env.UV_VERSION }} + python-version: ${{ env.PYTHON_VERSION }} + - name: Create uv virtual environment + run: uv venv + - name: Install lerobot release + # zizmor: ignore[template-injection] + run: | + VERSION="${{ needs.build-and-publish.outputs.version }}" + if [[ "$VERSION" == *-* ]]; then + BASE_VERSION="${VERSION%%-*}" + echo "Installing pre-release version $BASE_VERSION from TestPyPI..." + uv pip install \ + --index-url https://test.pypi.org/simple/ \ + --extra-index-url https://pypi.org/simple \ + --index-strategy unsafe-best-match \ + "lerobot[all]==$BASE_VERSION" + else + echo "Installing release version $VERSION from PyPI..." + uv pip install "lerobot[all]==$VERSION" + fi + - name: Check lerobot version + run: uv run python -c "import lerobot; print(lerobot.__version__)" + + - name: Run end-to-end tests + run: uv run make test-end-to-end + + +# TODO(Steven): Publish draft/pre-release and to test pypi weekly +# TODO(Steven): Separate build and publish job +# TODO(Steven): Tag documentation with the same version as the package diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml new file mode 100644 index 0000000000000000000000000000000000000000..18520a91854dd83bb9f2e9607902ce70f528a200 --- /dev/null +++ b/.github/workflows/security.yml @@ -0,0 +1,54 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow handles secret scanning using TruffleHog to detect sensitive information in the codebase. +name: Security +permissions: + contents: read + +on: + # Allows running this workflow manually from the Actions tab + workflow_dispatch: + + # Triggers the workflow on push events to main + push: + branches: + - main + + # Triggers the workflow on pull request events targeting main + pull_request: + branches: + - main + +# Ensures that only the latest commit for a PR or branch is built, canceling older runs. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + # This job runs TruffleHog to scan the full history of the repository for secrets. + trufflehog: + name: Secret Leaks Scan + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 # zizmor: ignore[unpinned-uses] + with: + fetch-depth: 0 + persist-credentials: false + + - name: Secret Scanning + uses: trufflesecurity/trufflehog@v3.90.0 # zizmor: ignore[unpinned-uses] + with: + extra_args: --only-verified diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000000000000000000000000000000000..cb7ce45d41108a47aaf4e91f2e19442f7a8c50fd --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,70 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow handles closing stale issues and PRs. +name: Stale +on: + # Allows running this workflow manually from the Actions tab + workflow_dispatch: + + # Runs at 02:00 + schedule: + - cron: "0 2 * * *" + +env: + CLOSE_ISSUE_MESSAGE: > + This issue was closed because it has been stalled for 14 days with no activity. + Feel free to reopen if is still relevant, or to ping a collaborator if you have any questions. + CLOSE_PR_MESSAGE: > + This PR was closed because it has been stalled for 21 days with no activity. + Feel free to reopen if is still relevant, or to ping a collaborator if you have any questions. + WARN_ISSUE_MESSAGE: > + This issue has been automatically marked as stale because it has not had + recent activity (6 months). It will be closed if no further activity occurs. + Any change, comment or update to this issue will reset this count. + Thank you for your contributions. + WARN_PR_MESSAGE: > + This PR has been automatically marked as stale because it has not had + recent activity (1 year). It will be closed if no further activity occurs. + Any change, comment or update to this PR will reset this count. + Thank you for your contributions. + +jobs: + # This job runs the actions/stale action to close stale issues and PRs. + stale: + name: Close Stale Issues and PRs + runs-on: ubuntu-latest + permissions: + actions: write + contents: write # only for delete-branch option + issues: write + pull-requests: write + steps: + - uses: actions/stale@v10 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-label: stale + stale-pr-label: stale + exempt-issue-labels: never-stale + exempt-pr-labels: never-stale + days-before-issue-stale: 180 + days-before-issue-close: 14 + days-before-pr-stale: 365 + days-before-pr-close: 21 + delete-branch: true + close-issue-message: ${{ env.CLOSE_ISSUE_MESSAGE }} + close-pr-message: ${{ env.CLOSE_PR_MESSAGE }} + stale-issue-message: ${{ env.WARN_ISSUE_MESSAGE }} + stale-pr-message: ${{ env.WARN_PR_MESSAGE }} + operations-per-run: 500 diff --git a/.github/workflows/unbound_deps_tests.yml b/.github/workflows/unbound_deps_tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..93b1b1070869bf5f8b933daca101657a36899604 --- /dev/null +++ b/.github/workflows/unbound_deps_tests.yml @@ -0,0 +1,183 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow handles full testing with unboud dependencies versions. +name: Unbound Dependency Tests + +on: + # Allows running this workflow manually from the Actions tab + workflow_dispatch: + + # Run on the 1st and 15th of every month at 09:00 UTC + schedule: + - cron: '0 2 1,15 * *' + +permissions: + contents: read + +# Sets up the environment variables +env: + UV_VERSION: "0.8.0" + PYTHON_VERSION: "3.10" + DOCKER_IMAGE_NAME: huggingface/lerobot-gpu:unbound + +# Ensures that only the latest action is built, canceling older runs. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + + # This job runs the E2E tests + pytest with all unbound extras + full-tests: + name: Full Unbound Tests + runs-on: ubuntu-latest + env: + MUJOCO_GL: egl + steps: + - uses: actions/checkout@v4 + with: + lfs: true + persist-credentials: false + + - name: Install apt dependencies + run: | + sudo apt-get update && sudo apt-get install -y build-essential \ + git curl libglib2.0-0 libegl1-mesa-dev ffmpeg libusb-1.0-0-dev \ + speech-dispatcher libgeos-dev portaudio19-dev + + - name: Setup uv and Python + uses: astral-sh/setup-uv@v6 # zizmor: ignore[unpinned-uses] + with: + enable-cache: true + version: ${{ env.UV_VERSION }} + python-version: ${{ env.PYTHON_VERSION }} + + - name: Unbound dependencies + run: | + sed -i 's/,[[:space:]]*<[0-9\.]*//g' pyproject.toml + echo "Dependencies unbound:" && cat pyproject.toml + + - name: Install lerobot with all extras + run: uv sync --all-extras --no-extra groot # TODO(Steven): Make flash-attn optional + + - name: Run pytest (all extras) + run: uv run pytest tests -vv + + - name: Run end-to-end tests + run: uv run make test-end-to-end + + # This job builds a GPU enabled image for testing + build-and-push-docker: + name: Build and Push Docker + runs-on: + group: aws-general-8-plus + outputs: + image_tag: ${{ env.DOCKER_IMAGE_NAME }} + env: + GITHUB_REF: ${{ github.ref }} + steps: + - name: Install Git LFS + run: | + sudo apt-get update + sudo apt-get install git-lfs + git lfs install + - uses: actions/checkout@v4 + with: + lfs: true + persist-credentials: false + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # zizmor: ignore[unpinned-uses] + with: + cache-binary: false + - name: Login to Docker Hub + uses: docker/login-action@v3 # zizmor: ignore[unpinned-uses] + with: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + - name: Build and push Docker image + uses: docker/build-push-action@v6 # zizmor: ignore[unpinned-uses] + with: + context: . + file: ./docker/Dockerfile.internal + push: true + tags: ${{ env.DOCKER_IMAGE_NAME }} + build-args: | + UNBOUND_DEPS=true + + # This job runs pytest with all unbound extras in a GPU enabled host + # It runs everytime a test image is created + gpu-tests: + name: GPU Unbound Tests + needs: [build-and-push-docker] + runs-on: + group: aws-g6-4xlarge-plus + env: + HF_HOME: /home/user_lerobot/.cache/huggingface + HF_LEROBOT_HOME: /home/user_lerobot/.cache/huggingface/lerobot + TORCH_HOME: /home/user_lerobot/.cache/torch + TRITON_CACHE_DIR: /home/user_lerobot/.cache/triton + container: + image: ${{ needs.build-and-push-docker.outputs.image_tag }} # zizmor: ignore[unpinned-images] + options: --gpus all --shm-size "16gb" + credentials: + username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }} + password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }} + defaults: + run: + shell: bash + working-directory: /lerobot + steps: + - name: Run pytest on GPU + run: pytest tests -vv + - name: Run end-to-end tests + run: make test-end-to-end + + # This job deletes the test image recently created + # It runs everytime after the gpu-tests have finished + delete-unbound-image: + name: Delete Unbound Image + needs: [gpu-tests, build-and-push-docker] + if: always() && needs.build-and-push-docker.result == 'success' + runs-on: ubuntu-latest + steps: + - name: Get Docker Hub Token and Delete Image + # zizmor: ignore[template-injection] + run: | + IMAGE_NAME=$(echo "${{ needs.build-and-push-docker.outputs.image_tag }}" | cut -d':' -f1) + IMAGE_TAG=$(echo "${{ needs.build-and-push-docker.outputs.image_tag }}" | cut -d':' -f2) + + echo "Attempting to delete image: $IMAGE_NAME:$IMAGE_TAG" + + TOKEN=$(curl -s -H "Content-Type: application/json" \ + -X POST \ + -d '{"username": "${{ secrets.DOCKERHUB_LEROBOT_USERNAME }}", "password": "${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }}"}' \ + https://hub.docker.com/v2/users/login/ | jq -r .token) + + if [ "$TOKEN" == "null" ] || [ -z "$TOKEN" ]; then + echo "::error::Failed to get Docker Hub token." + exit 1 + fi + + HTTP_RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" \ + -H "Authorization: JWT ${TOKEN}" \ + -X DELETE \ + https://hub.docker.com/v2/repositories/${IMAGE_NAME}/tags/${IMAGE_TAG}/) + + if [ "$HTTP_RESPONSE" -eq 204 ]; then + echo "Successfully deleted Docker image tag: $IMAGE_NAME:$IMAGE_TAG" + else + echo "::error::Failed to delete Docker image. HTTP status: $HTTP_RESPONSE" + exit 1 + fi diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e5f7a2774e85acf9a920f5e4b478cc0020a0a523 --- /dev/null +++ b/.gitignore @@ -0,0 +1,179 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +### Environments & Dependencies ### +.env +.venv +env/ +venv/ +env.bak/ +venv.bak/ +.python-version +__pypackages__/ +node_modules/ + +# Lock files +poetry.lock +uv.lock +Pipfile.lock + +### Build & Distribution ### +build/ +dist/ +sdist/ +wheels/ +downloads/ +eggs/ +.eggs/ +parts/ +var/ +pip-wheel-metadata/ +share/python-wheels/ +develop-eggs/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +lib/ +lib64/ + +# PyInstaller +*.manifest +*.spec + +### Compiled & Cached Files ### +__pycache__/ +*.py[cod] +*$py.class +*.so +*.sage.py +.cache/ +.ruff_cache/ +.mypy_cache/ +.pyre/ +.pytype/ +cython_debug/ + +### Testing & Coverage ### +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.pytest_cache/ +.hypothesis/ +nosetests.xml +coverage.xml +*.cover +*.py,cover +!tests/artifacts + +### Logs & Temporary Files ### +logs/ +tmp/ +*.log +pip-log.txt +pip-delete-this-directory.txt +celerybeat-schedule +celerybeat.pid + +### IDE & Editor Config ### +# VS Code +.vscode/ +.devcontainer/ + +# JetBrains / PyCharm +.idea/ + +# Spyder +.spyderproject +.spyproject + +# Rope +.ropeproject + +# Vim +*.swp + +# Other +*~ + +### OS Specific ### +# macOS +.DS_Store + +# Windows +Thumbs.db + +### Framework & Tool Specific ### + +.Python + +# Django +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask +instance/ +.webassets-cache + +# Scrapy +.scrapy + +# Jupyter +.ipynb_checkpoints/ +profile_default/ +ipython_config.py + +# Sphinx +docs/_build/ + +# MkDocs +/site + +# PyBuilder +.pybuilder/ +target/ + +# mypy +.dmypy.json +dmypy.json + +### HPC & Slurm ### +nautilus/*.yaml +*.key +sbatch*.sh + +### Miscellaneous ### +# W&B +wandb/ + +# Dev scripts +.dev/ + +# Data folders +data/ +outputs/ + +# Translations +*.mo +*.pot + +# Dev folders +.cache/* +*.stl +*.urdf +*.xml +*.part diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b40fa581bccb1069c618cbcec85dd453ca3e0d0f --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,108 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +default_language_version: + python: python3.10 + +exclude: "tests/artifacts/.*\\.safetensors$" + +repos: + ##### Meta ##### + - repo: meta + hooks: + - id: check-useless-excludes + - id: check-hooks-apply + + ##### General Code Quality & Formatting ##### + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v6.0.0 + hooks: + - id: check-added-large-files + args: ['--maxkb=1024'] + - id: debug-statements + - id: check-merge-conflict + - id: check-case-conflict + - id: check-yaml + - id: check-toml + - id: end-of-file-fixer + - id: trailing-whitespace + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.14.1 + hooks: + - id: ruff-format + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + + - repo: https://github.com/adhtruong/mirrors-typos + rev: v1.38.1 + hooks: + - id: typos + args: [--force-exclude] + + - repo: https://github.com/asottile/pyupgrade + rev: v3.21.0 + hooks: + - id: pyupgrade + args: [--py310-plus] + + ##### Markdown Quality ##### + - repo: https://github.com/rbubley/mirrors-prettier + rev: v3.6.2 + hooks: + - id: prettier + name: Format Markdown with Prettier + types_or: [markdown, mdx] + args: [--prose-wrap=preserve] + + ##### Security ##### + - repo: https://github.com/gitleaks/gitleaks + rev: v8.28.0 + hooks: + - id: gitleaks + + - repo: https://github.com/woodruffw/zizmor-pre-commit + rev: v1.15.2 + hooks: + - id: zizmor + + - repo: https://github.com/PyCQA/bandit + rev: 1.8.6 + hooks: + - id: bandit + args: ["-c", "pyproject.toml"] + additional_dependencies: ["bandit[toml]"] + + # TODO(Steven): Uncomment when ready to use + ##### Static Analysis & Typing ##### + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.18.2 + hooks: + - id: mypy + args: [--config-file=pyproject.toml] + exclude: ^(examples|benchmarks|tests)/ + + ##### Docstring Checks ##### + # - repo: https://github.com/akaihola/darglint2 + # rev: v1.8.2 + # hooks: + # - id: darglint2 + # args: ["--docstring-style", "google", "-v", "2"] + # exclude: ^tests/.*$ + + # - repo: https://github.com/econchick/interrogate + # rev: 1.7.0 + # hooks: + # - id: interrogate + # args: ["-vv", "--config=pyproject.toml"] diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..39e5e409bdedfb6fac0782ab497945121c512bcc --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,132 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +- Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +- The use of sexualized language or imagery, and sexual attention or advances of + any kind +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or email address, + without their explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official email address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +[feedback@huggingface.co](mailto:feedback@huggingface.co). +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..7242fffb08fb5fe60b67cde650fc95482de0ad7e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,323 @@ +# How to contribute to 🤗 LeRobot? + +Everyone is welcome to contribute, and we value everybody's contribution. Code +is thus not the only way to help the community. Answering questions, helping +others, reaching out and improving the documentations are immensely valuable to +the community. + +It also helps us if you spread the word: reference the library from blog posts +on the awesome projects it made possible, shout out on Twitter when it has +helped you, or simply ⭐️ the repo to say "thank you". + +Whichever way you choose to contribute, please be mindful to respect our +[code of conduct](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md). + +## You can contribute in so many ways! + +Some of the ways you can contribute to 🤗 LeRobot: + +- Fixing outstanding issues with the existing code. +- Implementing new models, datasets or simulation environments. +- Contributing to the examples or to the documentation. +- Submitting issues related to bugs or desired new features. + +Following the guides below, feel free to open issues and PRs and to coordinate your efforts with the community on our [Discord Channel](https://discord.gg/VjFz58wn3R). For specific inquiries, reach out to [Remi Cadene](mailto:remi.cadene@huggingface.co). + +If you are not sure how to contribute or want to know the next features we working on, look on this project page: [LeRobot TODO](https://github.com/orgs/huggingface/projects/46) + +## Submitting a new issue or feature request + +Do your best to follow these guidelines when submitting an issue or a feature +request. It will make it easier for us to come back to you quickly and with good +feedback. + +### Did you find a bug? + +The 🤗 LeRobot library is robust and reliable thanks to the users who notify us of +the problems they encounter. So thank you for reporting an issue. + +First, we would really appreciate it if you could **make sure the bug was not +already reported** (use the search bar on Github under Issues). + +Did not find it? :( So we can act quickly on it, please follow these steps: + +- Include your **OS type and version**, the versions of **Python** and **PyTorch**. +- A short, self-contained, code snippet that allows us to reproduce the bug in + less than 30s. +- The full traceback if an exception is raised. +- Attach any other additional information, like screenshots, you think may help. + +### Do you want a new feature? + +A good feature request addresses the following points: + +1. Motivation first: + +- Is it related to a problem/frustration with the library? If so, please explain + why. Providing a code snippet that demonstrates the problem is best. +- Is it related to something you would need for a project? We'd love to hear + about it! +- Is it something you worked on and think could benefit the community? + Awesome! Tell us what problem it solved for you. + +2. Write a _paragraph_ describing the feature. +3. Provide a **code snippet** that demonstrates its future use. +4. In case this is related to a paper, please attach a link. +5. Attach any additional information (drawings, screenshots, etc.) you think may help. + +If your issue is well written we're already 80% of the way there by the time you +post it. + +## Adding new policies, datasets or environments + +Look at our implementations for [datasets](./src/lerobot/datasets/), [policies](./src/lerobot/policies/), +environments ([aloha](https://github.com/huggingface/gym-aloha), +[pusht](https://github.com/huggingface/gym-pusht)) +and follow the same api design. + +When implementing a new dataset loadable with LeRobotDataset follow these steps: + +- Update `available_datasets_per_env` in `lerobot/__init__.py` + +When implementing a new environment (e.g. `gym_aloha`), follow these steps: + +- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py` + +When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps: + +- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py` +- Set the required `name` class attribute. +- Update variables in `tests/test_available.py` by importing your new Policy class + +## Submitting a pull request (PR) + +Before writing code, we strongly advise you to search through the existing PRs or +issues to make sure that nobody is already working on the same thing. If you are +unsure, it is always a good idea to open an issue to get some feedback. + +You will need basic `git` proficiency to be able to contribute to +🤗 LeRobot. `git` is not the easiest tool to use but it has the greatest +manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro +Git](https://git-scm.com/book/en/v2) is a very good reference. + +Follow these steps to start contributing: + +1. Fork the [repository](https://github.com/huggingface/lerobot) by + clicking on the 'Fork' button on the repository's page. This creates a copy of the code + under your GitHub user account. + +2. Clone your fork to your local disk, and add the base repository as a remote. The following command + assumes you have your public SSH key uploaded to GitHub. See the following guide for more + [information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). + + ```bash + git clone git@github.com:/lerobot.git + cd lerobot + git remote add upstream https://github.com/huggingface/lerobot.git + ``` + +3. Create a new branch to hold your development changes, and do this for every new PR you work on. + + Start by synchronizing your `main` branch with the `upstream/main` branch (more details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)): + + ```bash + git checkout main + git fetch upstream + git rebase upstream/main + ``` + + Once your `main` branch is synchronized, create a new branch from it: + + ```bash + git checkout -b a-descriptive-name-for-my-changes + ``` + + 🚨 **Do not** work on the `main` branch. + +4. for development, we advise to use a tool like `poetry` or `uv` instead of just `pip` to easily track our dependencies. + Follow the instructions to [install poetry](https://python-poetry.org/docs/#installation) (use a version >=2.1.0) or to [install uv](https://docs.astral.sh/uv/getting-started/installation/#installation-methods) if you don't have one of them already. + + Set up a development environment with conda: + + ```bash + conda create -y -n lerobot-dev python=3.10 && conda activate lerobot-dev + ``` + + If you're using `uv`, it can manage python versions so you can instead do: + + ```bash + uv venv --python 3.10 && source .venv/bin/activate + ``` + + To develop on 🤗 LeRobot, you will at least need to install the `dev` and `test` extras dependencies along with the core library: + + using `poetry` + + ```bash + poetry sync --extras "dev test" + ``` + + using `uv` + + ```bash + uv sync --extra dev --extra test + ``` + + You can also install the project with all its dependencies (including environments): + + using `poetry` + + ```bash + poetry sync --all-extras + ``` + + using `uv` + + ```bash + uv sync --all-extras + ``` + + > **Note:** If you don't install simulation environments with `--all-extras`, the tests that require them will be skipped when running the pytest suite locally. However, they _will_ be tested in the CI. In general, we advise you to install everything and test locally before pushing. + + Whichever command you chose to install the project (e.g. `poetry sync --all-extras`), you should run it again when pulling code with an updated version of `pyproject.toml` and `poetry.lock` in order to synchronize your virtual environment with the new dependencies. + + The equivalent of `pip install some-package`, would just be: + + using `poetry` + + ```bash + poetry add some-package + ``` + + using `uv` + + ```bash + uv add some-package + ``` + + When making changes to the poetry sections of the `pyproject.toml`, you should run the following command to lock dependencies. + using `poetry` + + ```bash + poetry lock + ``` + + using `uv` + + ```bash + uv lock + ``` + +5. Develop the features on your branch. + + As you work on the features, you should make sure that the test suite + passes. You should run the tests impacted by your changes like this (see + below an explanation regarding the environment variable): + + ```bash + pytest tests/.py + ``` + +6. Follow our style. + + `lerobot` relies on `ruff` to format its source code + consistently. Set up [`pre-commit`](https://pre-commit.com/) to run these checks + automatically as Git commit hooks. + + Install `pre-commit` hooks: + + ```bash + pre-commit install + ``` + + You can run these hooks whenever you need on staged files with: + + ```bash + pre-commit + ``` + + Once you're happy with your changes, add changed files using `git add` and + make a commit with `git commit` to record your changes locally: + + ```bash + git add modified_file.py + git commit + ``` + + Note, if you already committed some changes that have a wrong formatting, you can use: + + ```bash + pre-commit run --all-files + ``` + + Please write [good commit messages](https://chris.beams.io/posts/git-commit/). + + It is a good idea to sync your copy of the code with the original + repository regularly. This way you can quickly account for changes: + + ```bash + git fetch upstream + git rebase upstream/main + ``` + + Push the changes to your account using: + + ```bash + git push -u origin a-descriptive-name-for-my-changes + ``` + +7. Once you are satisfied (**and the checklist below is happy too**), go to the + webpage of your fork on GitHub. Click on 'Pull request' to send your changes + to the project maintainers for review. + +8. It's ok if maintainers ask you for changes. It happens to core contributors + too! So everyone can see the changes in the Pull request, work in your local + branch and push the changes to your fork. They will automatically appear in + the pull request. + +### Checklist + +1. The title of your pull request should be a summary of its contribution; +2. If your pull request addresses an issue, please mention the issue number in + the pull request description to make sure they are linked (and people + consulting the issue know you are working on it); +3. To indicate a work in progress please prefix the title with `[WIP]`, or preferably mark + the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate + it from PRs ready to be merged; +4. Make sure existing tests pass; + +### Tests + +An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/lerobot/tree/main/tests). + +Install [git lfs](https://git-lfs.com/) to retrieve test artifacts (if you don't have it already). + +On Mac: + +```bash +brew install git-lfs +git lfs install +``` + +On Ubuntu: + +```bash +sudo apt-get install git-lfs +git lfs install +``` + +Pull artifacts if they're not in [tests/artifacts](tests/artifacts) + +```bash +git lfs pull +``` + +We use `pytest` in order to run the tests. From the root of the +repository, here's how to run tests with `pytest` for the library: + +```bash +python -m pytest -sv ./tests +``` + +You can specify a smaller set of tests in order to test only the feature +you're working on. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4e008813bdf5275a2b468feb2f6d63572c54a54a --- /dev/null +++ b/LICENSE @@ -0,0 +1,507 @@ +Copyright 2024 The Hugging Face team. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +## Some of lerobot's code is derived from Diffusion Policy, which is subject to the following copyright notice: + +MIT License + +Copyright (c) 2023 Columbia Artificial Intelligence and Robotics Lab + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +## Some of lerobot's code is derived from FOWM, which is subject to the following copyright notice: + +MIT License + +Copyright (c) 2023 Yunhai Feng + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +## Some of lerobot's code is derived from simxarm, which is subject to the following copyright notice: + +MIT License + +Copyright (c) 2023 Nicklas Hansen & Yanjie Ze + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +## Some of lerobot's code is derived from ALOHA, which is subject to the following copyright notice: + +MIT License + +Copyright (c) 2023 Tony Z. Zhao + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +## Some of lerobot's code is derived from DETR, which is subject to the following copyright notice: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 - present, Facebook, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..b2e9d372c89067451897c04d3598256b041066bc --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +include src/lerobot/templates/lerobot_modelcard_template.md +include src/lerobot/datasets/card_template.md diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..67d1bbb7faff5374d5b09e7ec380e9d096410dc3 --- /dev/null +++ b/Makefile @@ -0,0 +1,180 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: tests + +PYTHON_PATH := $(shell which python) + +# If uv is installed and a virtual environment exists, use it +UV_CHECK := $(shell command -v uv) +ifneq ($(UV_CHECK),) + PYTHON_PATH := $(shell .venv/bin/python) +endif + +export PATH := $(dir $(PYTHON_PATH)):$(PATH) + +DEVICE ?= cpu + +build-user: + docker build -f docker/Dockerfile.user -t lerobot-user . + +build-internal: + docker build -f docker/Dockerfile.internal -t lerobot-internal . + +test-end-to-end: + ${MAKE} DEVICE=$(DEVICE) test-act-ete-train + ${MAKE} DEVICE=$(DEVICE) test-act-ete-train-resume + ${MAKE} DEVICE=$(DEVICE) test-act-ete-eval + ${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-train + ${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-eval + ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train + ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval + ${MAKE} DEVICE=$(DEVICE) test-smolvla-ete-train + ${MAKE} DEVICE=$(DEVICE) test-smolvla-ete-eval + +test-act-ete-train: + lerobot-train \ + --policy.type=act \ + --policy.dim_model=64 \ + --policy.n_action_steps=20 \ + --policy.chunk_size=20 \ + --policy.device=$(DEVICE) \ + --policy.push_to_hub=false \ + --env.type=aloha \ + --env.episode_length=5 \ + --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ + --dataset.image_transforms.enable=true \ + --dataset.episodes="[0]" \ + --batch_size=2 \ + --steps=4 \ + --eval_freq=2 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --save_freq=2 \ + --save_checkpoint=true \ + --log_freq=1 \ + --wandb.enable=false \ + --output_dir=tests/outputs/act/ + +test-act-ete-train-resume: + lerobot-train \ + --config_path=tests/outputs/act/checkpoints/000002/pretrained_model/train_config.json \ + --resume=true + +test-act-ete-eval: + lerobot-eval \ + --policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \ + --policy.device=$(DEVICE) \ + --env.type=aloha \ + --env.episode_length=5 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 + +test-diffusion-ete-train: + lerobot-train \ + --policy.type=diffusion \ + --policy.down_dims='[64,128,256]' \ + --policy.diffusion_step_embed_dim=32 \ + --policy.num_inference_steps=10 \ + --policy.device=$(DEVICE) \ + --policy.push_to_hub=false \ + --env.type=pusht \ + --env.episode_length=5 \ + --dataset.repo_id=lerobot/pusht \ + --dataset.image_transforms.enable=true \ + --dataset.episodes="[0]" \ + --batch_size=2 \ + --steps=2 \ + --eval_freq=2 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --save_checkpoint=true \ + --save_freq=2 \ + --log_freq=1 \ + --wandb.enable=false \ + --output_dir=tests/outputs/diffusion/ + +test-diffusion-ete-eval: + lerobot-eval \ + --policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \ + --policy.device=$(DEVICE) \ + --env.type=pusht \ + --env.episode_length=5 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 + +test-tdmpc-ete-train: + lerobot-train \ + --policy.type=tdmpc \ + --policy.device=$(DEVICE) \ + --policy.push_to_hub=false \ + --env.type=pusht \ + --env.episode_length=5 \ + --dataset.repo_id=lerobot/pusht_image \ + --dataset.image_transforms.enable=true \ + --dataset.episodes="[0]" \ + --batch_size=2 \ + --steps=2 \ + --eval_freq=2 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --save_checkpoint=true \ + --save_freq=2 \ + --log_freq=1 \ + --wandb.enable=false \ + --output_dir=tests/outputs/tdmpc/ + +test-tdmpc-ete-eval: + lerobot-eval \ + --policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \ + --policy.device=$(DEVICE) \ + --env.type=pusht \ + --env.episode_length=5 \ + --env.observation_height=96 \ + --env.observation_width=96 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 + + +test-smolvla-ete-train: + lerobot-train \ + --policy.type=smolvla \ + --policy.n_action_steps=20 \ + --policy.chunk_size=20 \ + --policy.device=$(DEVICE) \ + --policy.push_to_hub=false \ + --env.type=aloha \ + --env.episode_length=5 \ + --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ + --dataset.image_transforms.enable=true \ + --dataset.episodes="[0]" \ + --batch_size=2 \ + --steps=4 \ + --eval_freq=2 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --save_freq=2 \ + --save_checkpoint=true \ + --log_freq=1 \ + --wandb.enable=false \ + --output_dir=tests/outputs/smolvla/ + +test-smolvla-ete-eval: + lerobot-eval \ + --policy.path=tests/outputs/smolvla/checkpoints/000004/pretrained_model \ + --policy.device=$(DEVICE) \ + --env.type=aloha \ + --env.episode_length=5 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b37a69c288c624b10239a8fa7ca09861c7b67822 --- /dev/null +++ b/README.md @@ -0,0 +1,344 @@ +

+ LeRobot, Hugging Face Robotics Library +
+
+

+ +
+ +[![Tests](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml/badge.svg?branch=main)](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml?query=branch%3Amain) +[![Python versions](https://img.shields.io/pypi/pyversions/lerobot)](https://www.python.org/downloads/) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/huggingface/lerobot/blob/main/LICENSE) +[![Status](https://img.shields.io/pypi/status/lerobot)](https://pypi.org/project/lerobot/) +[![Version](https://img.shields.io/pypi/v/lerobot)](https://pypi.org/project/lerobot/) +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.1-ff69b4.svg)](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md) +[![Discord](https://dcbadge.vercel.app/api/server/C5P34WJ68S?style=flat)](https://discord.gg/s3KuuzsPFb) + + + +
+ +

+

+ Build Your Own HopeJR Robot!

+

+ +
+ HopeJR robot + +

Meet HopeJR – A humanoid robot arm and hand for dexterous manipulation!

+

Control it with exoskeletons and gloves for precise hand movements.

+

Perfect for advanced manipulation tasks! 🤖

+ +

+ See the full HopeJR tutorial here.

+
+ +
+ +

+

+ Build Your Own SO-101 Robot!

+

+ +
+ + + + + +
SO-101 follower armSO-101 leader arm
+ +

Meet the updated SO100, the SO-101 – Just €114 per arm!

+

Train it in minutes with a few simple moves on your laptop.

+

Then sit back and watch your creation act autonomously! 🤯

+ +

+ See the full SO-101 tutorial here.

+ +

Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!

+

Check out the LeKiwi tutorial and bring your robot to life on wheels.

+ + LeKiwi mobile robot +
+ +
+ +

+

LeRobot: State-of-the-art AI for real-world robotics

+

+ +--- + +🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models. + +🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning. + +🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulation environments to get started without assembling a robot. In the coming weeks, the plan is to add more and more support for real-world robotics on the most affordable and capable robots out there. + +🤗 LeRobot hosts pretrained models and datasets on this Hugging Face community page: [huggingface.co/lerobot](https://huggingface.co/lerobot) + +#### Examples of pretrained models on simulation environments + + + + + + + + + + + + +
ACT policy on ALOHA envTDMPC policy on SimXArm envDiffusion policy on PushT env
ACT policy on ALOHA envTDMPC policy on SimXArm envDiffusion policy on PushT env
+ +## Installation + +LeRobot works with Python 3.10+ and PyTorch 2.2+. + +### Environment Setup + +Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniforge`](https://conda-forge.org/download/): + +```bash +conda create -y -n lerobot python=3.10 +conda activate lerobot +``` + +When using `conda`, install `ffmpeg` in your environment: + +```bash +conda install ffmpeg -c conda-forge +``` + +> **NOTE:** This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can: +> +> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using: +> +> ```bash +> conda install ffmpeg=7.1.1 -c conda-forge +> ``` +> +> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`. + +### Install LeRobot 🤗 + +#### From Source + +First, clone the repository and navigate into the directory: + +```bash +git clone https://github.com/huggingface/lerobot.git +cd lerobot +``` + +Then, install the library in editable mode. This is useful if you plan to contribute to the code. + +```bash +pip install -e . +``` + +> **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run: +> `sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg) + +For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras: + +- [aloha](https://github.com/huggingface/gym-aloha) +- [xarm](https://github.com/huggingface/gym-xarm) +- [pusht](https://github.com/huggingface/gym-pusht) + +For instance, to install 🤗 LeRobot with aloha and pusht, use: + +```bash +pip install -e ".[aloha, pusht]" +``` + +### Installation from PyPI + +**Core Library:** +Install the base package with: + +```bash +pip install lerobot +``` + +_This installs only the default dependencies._ + +**Extra Features:** +To install additional functionality, use one of the following: + +```bash +pip install 'lerobot[all]' # All available features +pip install 'lerobot[aloha,pusht]' # Specific features (Aloha & Pusht) +pip install 'lerobot[feetech]' # Feetech motor support +``` + +_Replace `[...]` with your desired features._ + +**Available Tags:** +For a full list of optional dependencies, see: +https://pypi.org/project/lerobot/ + +> [!NOTE] +> For lerobot 0.4.0, if you want to install pi tags, you will have to do: `pip install "lerobot[pi]@git+https://github.com/huggingface/lerobot.git"`. +> +> This will be solved in the next patch release + +### Weights & Biases + +To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with + +```bash +wandb login +``` + +(note: you will also need to enable WandB in the configuration. See below.) + +### Visualize datasets + +Check out [example 1](https://github.com/huggingface/lerobot/blob/main/examples/dataset/load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub. + +You can also locally visualize episodes from a dataset on the hub by executing our script from the command line: + +```bash +lerobot-dataset-viz \ + --repo-id lerobot/pusht \ + --episode-index 0 +``` + +or from a dataset in a local folder with the `root` option and the `--mode local` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`) + +```bash +lerobot-dataset-viz \ + --repo-id lerobot/pusht \ + --root ./my_local_data_dir \ + --mode local \ + --episode-index 0 +``` + +It will open `rerun.io` and display the camera streams, robot states and actions, like this: + +https://github-production-user-asset-6210df.s3.amazonaws.com/4681518/328035972-fd46b787-b532-47e2-bb6f-fd536a55a7ed.mov?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240505T172924Z&X-Amz-Expires=300&X-Amz-Signature=d680b26c532eeaf80740f08af3320d22ad0b8a4e4da1bcc4f33142c15b509eda&X-Amz-SignedHeaders=host&actor_id=24889239&key_id=0&repo_id=748713144 + +Our script can also visualize datasets stored on a distant server. See `lerobot-dataset-viz --help` for more instructions. + +### The `LeRobotDataset` format + +A dataset in `LeRobotDataset` format is very simple to use. It can be loaded from a repository on the Hugging Face hub or a local folder simply with e.g. `dataset = LeRobotDataset("lerobot/aloha_static_coffee")` and can be indexed into like any Hugging Face and PyTorch dataset. For instance `dataset[0]` will retrieve a single temporal frame from the dataset containing observation(s) and an action as PyTorch tensors ready to be fed to a model. + +A specificity of `LeRobotDataset` is that, rather than retrieving a single frame by its index, we can retrieve several frames based on their temporal relationship with the indexed frame, by setting `delta_timestamps` to a list of relative times with respect to the indexed frame. For example, with `delta_timestamps = {"observation.image": [-1, -0.5, -0.2, 0]}` one can retrieve, for a given index, 4 frames: 3 "previous" frames 1 second, 0.5 seconds, and 0.2 seconds before the indexed frame, and the indexed frame itself (corresponding to the 0 entry). See example [1_load_lerobot_dataset.py](https://github.com/huggingface/lerobot/blob/main/examples/dataset/load_lerobot_dataset.py) for more details on `delta_timestamps`. + +Under the hood, the `LeRobotDataset` format makes use of several ways to serialize data which can be useful to understand if you plan to work more closely with this format. We tried to make a flexible yet simple dataset format that would cover most type of features and specificities present in reinforcement learning and robotics, in simulation and in real-world, with a focus on cameras and robot states but easily extended to other types of sensory inputs as long as they can be represented by a tensor. + +Here are the important details and internal structure organization of a typical `LeRobotDataset` instantiated with `dataset = LeRobotDataset("lerobot/aloha_static_coffee")`. The exact features will change from dataset to dataset but not the main aspects: + +``` +dataset attributes: + ├ hf_dataset: a Hugging Face dataset (backed by Arrow/parquet). Typical features example: + │ ├ observation.images.cam_high (VideoFrame): + │ │ VideoFrame = {'path': path to a mp4 video, 'timestamp' (float32): timestamp in the video} + │ ├ observation.state (list of float32): position of an arm joints (for instance) + │ ... (more observations) + │ ├ action (list of float32): goal position of an arm joints (for instance) + │ ├ episode_index (int64): index of the episode for this sample + │ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode + │ ├ timestamp (float32): timestamp in the episode + │ ├ next.done (bool): indicates the end of an episode ; True for the last frame in each episode + │ └ index (int64): general index in the whole dataset + ├ meta: a LeRobotDatasetMetadata object containing: + │ ├ info: a dictionary of metadata on the dataset + │ │ ├ codebase_version (str): this is to keep track of the codebase version the dataset was created with + │ │ ├ fps (int): frame per second the dataset is recorded/synchronized to + │ │ ├ features (dict): all features contained in the dataset with their shapes and types + │ │ ├ total_episodes (int): total number of episodes in the dataset + │ │ ├ total_frames (int): total number of frames in the dataset + │ │ ├ robot_type (str): robot type used for recording + │ │ ├ data_path (str): formattable string for the parquet files + │ │ └ video_path (str): formattable string for the video files (if using videos) + │ ├ episodes: a DataFrame containing episode metadata with columns: + │ │ ├ episode_index (int): index of the episode + │ │ ├ tasks (list): list of tasks for this episode + │ │ ├ length (int): number of frames in this episode + │ │ ├ dataset_from_index (int): start index of this episode in the dataset + │ │ └ dataset_to_index (int): end index of this episode in the dataset + │ ├ stats: a dictionary of statistics (max, mean, min, std) for each feature in the dataset, for instance + │ │ ├ observation.images.front_cam: {'max': tensor with same number of dimensions (e.g. `(c, 1, 1)` for images, `(c,)` for states), etc.} + │ │ └ ... + │ └ tasks: a DataFrame containing task information with task names as index and task_index as values + ├ root (Path): local directory where the dataset is stored + ├ image_transforms (Callable): optional image transformations to apply to visual modalities + └ delta_timestamps (dict): optional delta timestamps for temporal queries +``` + +A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely: + +- hf_dataset stored using Hugging Face datasets library serialization to parquet +- videos are stored in mp4 format to save space +- metadata are stored in plain json/jsonl files + +Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location. + +#### Reproduce state-of-the-art (SOTA) + +We provide some pretrained policies on our [hub page](https://huggingface.co/lerobot) that can achieve state-of-the-art performances. +You can reproduce their training by loading the config from their run. Simply running: + +```bash +lerobot-train --config_path=lerobot/diffusion_pusht +``` + +reproduces SOTA results for Diffusion Policy on the PushT task. + +## Contribute + +If you would like to contribute to 🤗 LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md). + +### Add a pretrained policy + +Once you have trained a policy you may upload it to the Hugging Face hub using a hub id that looks like `${hf_user}/${repo_name}` (e.g. [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)). + +You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain: + +- `config.json`: A serialized version of the policy configuration (following the policy's dataclass config). +- `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format. +- `train_config.json`: A consolidated configuration containing all parameters used for training. The policy configuration should match `config.json` exactly. This is useful for anyone who wants to evaluate your policy or for reproducibility. + +To upload these to the hub, run the following: + +```bash +huggingface-cli upload ${hf_user}/${repo_name} path/to/pretrained_model +``` + +See [lerobot_eval.py](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/lerobot_eval.py) for an example of how other people may use your policy. + +### Acknowledgment + +- The LeRobot team 🤗 for building SmolVLA [Paper](https://arxiv.org/abs/2506.01844), [Blog](https://huggingface.co/blog/smolvla). +- Thanks to Tony Zhao, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io). +- Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io). +- Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM). +- Thanks to Antonio Loquercio and Ashish Kumar for their early support. +- Thanks to [Seungjae (Jay) Lee](https://sjlee.cc/), [Mahi Shafiullah](https://mahis.life/) and colleagues for open sourcing [VQ-BeT](https://sjlee.cc/vq-bet/) policy and helping us adapt the codebase to our repository. The policy is adapted from [VQ-BeT repo](https://github.com/jayLEE0301/vq_bet_official). + +## Citation + +If you want, you can cite this work with: + +```bibtex +@misc{cadene2024lerobot, + author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Palma, Steven and Kooijmans, Pepijn and Aractingi, Michel and Shukor, Mustafa and Aubakirova, Dana and Russi, Martino and Capuano, Francesco and Pascal, Caroline and Choghari, Jade and Moss, Jess and Wolf, Thomas}, + title = {LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch}, + howpublished = "\url{https://github.com/huggingface/lerobot}", + year = {2024} +} +``` + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=huggingface/lerobot&type=Timeline)](https://star-history.com/#huggingface/lerobot&Timeline) diff --git a/benchmarks/video/README.md b/benchmarks/video/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1b383f5885e745e6b1b5cc58e7c9938c8a23b184 --- /dev/null +++ b/benchmarks/video/README.md @@ -0,0 +1,288 @@ +# Video benchmark + +## Questions + +What is the optimal trade-off between: + +- maximizing loading time with random access, +- minimizing memory space on disk, +- maximizing success rate of policies, +- compatibility across devices/platforms for decoding videos (e.g. video players, web browsers). + +How to encode videos? + +- Which video codec (`-vcodec`) to use? h264, h265, AV1? +- What pixel format to use (`-pix_fmt`)? `yuv444p` or `yuv420p`? +- How much compression (`-crf`)? No compression with `0`, intermediate compression with `25` or extreme with `50+`? +- Which frequency to chose for key frames (`-g`)? A key frame every `10` frames? + +How to decode videos? + +- Which `decoder`? `torchvision`, `torchaudio`, `ffmpegio`, `decord`, or `nvc`? +- What scenarios to use for the requesting timestamps during benchmark? (`timestamps_mode`) + +## Variables + +**Image content & size** +We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an apartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution). +For these reasons, we run this benchmark on four representative datasets: + +- `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera. +- `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera. +- `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera. +- `aliberts/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera. + +Note: The datasets used for this benchmark need to be image datasets, not video datasets. + +**Data augmentations** +We might revisit this benchmark and find better settings if we train our policies with various data augmentations to make them more robust (e.g. robust to color changes, compression, etc.). + +### Encoding parameters + +| parameter | values | +| ----------- | ------------------------------------------------------------ | +| **vcodec** | `libx264`, `libx265`, `libsvtav1` | +| **pix_fmt** | `yuv444p`, `yuv420p` | +| **g** | `1`, `2`, `3`, `4`, `5`, `6`, `10`, `15`, `20`, `40`, `None` | +| **crf** | `0`, `5`, `10`, `15`, `20`, `25`, `30`, `40`, `50`, `None` | + +Note that `crf` value might be interpreted differently by various video codecs. In other words, the same value used with one codec doesn't necessarily translate into the same compression level with another codec. In fact, the default value (`None`) isn't the same amongst the different video codecs. Importantly, it is also the case for many other ffmpeg arguments like `g` which specifies the frequency of the key frames. + +For a comprehensive list and documentation of these parameters, see the ffmpeg documentation depending on the video codec used: + +- h264: https://trac.ffmpeg.org/wiki/Encode/H.264 +- h265: https://trac.ffmpeg.org/wiki/Encode/H.265 +- AV1: https://trac.ffmpeg.org/wiki/Encode/AV1 + +### Decoding parameters + +**Decoder** +We tested two video decoding backends from torchvision: + +- `pyav` +- `video_reader` (requires to build torchvision from source) + +**Requested timestamps** +Given the way video decoding works, once a keyframe has been loaded, the decoding of subsequent frames is fast. +This of course is affected by the `-g` parameter during encoding, which specifies the frequency of the keyframes. Given our typical use cases in robotics policies which might request a few timestamps in different random places, we want to replicate these use cases with the following scenarios: + +- `1_frame`: 1 frame, +- `2_frames`: 2 consecutive frames (e.g. `[t, t + 1 / fps]`), +- `6_frames`: 6 consecutive frames (e.g. `[t + i / fps for i in range(6)]`) + +Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`. + +Additionally, because some policies might request single timestamps that are a few frames apart, we also have the following scenario: + +- `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`), + +However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded. + +## Metrics + +**Data compression ratio (lower is better)** +`video_images_size_ratio` is the ratio of the memory space on disk taken by the encoded video over the memory space taken by the original images. For instance, `video_images_size_ratio=25%` means that the video takes 4 times less memory space on disk compared to the original images. + +**Loading time ratio (lower is better)** +`video_images_load_time_ratio` is the ratio of the time it takes to decode frames from the video at a given timestamps over the time it takes to load the exact same original images. Lower is better. For instance, `video_images_load_time_ratio=200%` means that decoding from video is 2 times slower than loading the original images. + +**Average Mean Square Error (lower is better)** +`avg_mse` is the average mean square error between each decoded frame and its corresponding original image over all requested timestamps, and also divided by the number of pixels in the image to be comparable when switching to different image sizes. + +**Average Peak Signal to Noise Ratio (higher is better)** +`avg_psnr` measures the ratio between the maximum possible power of a signal and the power of corrupting noise that affects the fidelity of its representation. Higher PSNR indicates better quality. + +**Average Structural Similarity Index Measure (higher is better)** +`avg_ssim` evaluates the perceived quality of images by comparing luminance, contrast, and structure. SSIM values range from -1 to 1, where 1 indicates perfect similarity. + +One aspect that can't be measured here with those metrics is the compatibility of the encoding across platforms, in particular on web browser, for visualization purposes. +h264, h265 and AV1 are all commonly used codecs and should not pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility: + +- `yuv420p` is more widely supported across various platforms, including web browsers. +- `yuv444p` offers higher color fidelity but might not be supported as broadly. + + + +## How the benchmark works + +The benchmark evaluates both encoding and decoding of video frames on the first episode of each dataset. + +**Encoding:** for each `vcodec` and `pix_fmt` pair, we use a default value for `g` and `crf` upon which we change a single value (either `g` or `crf`) to one of the specified values (we don't test every combination of those as this would be computationally too heavy). +This gives a unique set of encoding parameters which is used to encode the episode. + +**Decoding:** Then, for each of those unique encodings, we iterate through every combination of the decoding parameters `backend` and `timestamps_mode`. For each of them, we record the metrics of a number of samples (given by `--num-samples`). This is parallelized for efficiency and the number of processes can be controlled with `--num-workers`. Ideally, it's best to have a `--num-samples` that is divisible by `--num-workers`. + +Intermediate results saved for each `vcodec` and `pix_fmt` combination in csv tables. +These are then all concatenated to a single table ready for analysis. + +## Caveats + +We tried to measure the most impactful parameters for both encoding and decoding. However, for computational reasons we can't test out every combination. + +Additional encoding parameters exist that are not included in this benchmark. In particular: + +- `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1. +- `-tune` which allows to optimize the encoding for certain aspects (e.g. film quality, fast decoding, etc.). + +See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters. + +Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few: + +- `torchaudio` +- `ffmpegio` +- `decord` +- `nvc` + +Note as well that since we are mostly interested in the performance at decoding time (also because encoding is done only once before uploading a dataset), we did not measure encoding times nor have any metrics regarding encoding. +However, besides the necessity to build ffmpeg from source, encoding did not pose any issue and it didn't take a significant amount of time during this benchmark. + +## Install + +Building ffmpeg from source is required to include libx265 and libaom/libsvtav1 (av1) video codecs ([compilation guide](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu)). + +**Note:** While you still need to build torchvision with a conda-installed `ffmpeg<4.3` to use the `video_reader` decoder (as described in [#220](https://github.com/huggingface/lerobot/pull/220)), you also need another version which is custom-built with all the video codecs for encoding. For the script to then use that version, you can prepend the command above with `PATH="$HOME/bin:$PATH"`, which is where ffmpeg should be built. + +## Adding a video decoder + +Right now, we're only benchmarking the two video decoder available with torchvision: `pyav` and `video_reader`. +You can easily add a new decoder to benchmark by adding it to this function in the script: + +```diff +def decode_video_frames( + video_path: str, + timestamps: list[float], + tolerance_s: float, + backend: str, +) -> torch.Tensor: + if backend in ["pyav", "video_reader"]: + return decode_video_frames_torchvision( + video_path, timestamps, tolerance_s, backend + ) ++ elif backend == ["your_decoder"]: ++ return your_decoder_function( ++ video_path, timestamps, tolerance_s, backend ++ ) + else: + raise NotImplementedError(backend) +``` + +## Example + +For a quick run, you can try these parameters: + +```bash +python benchmark/video/run_video_benchmark.py \ + --output-dir outputs/video_benchmark \ + --repo-ids \ + lerobot/pusht_image \ + aliberts/aloha_mobile_shrimp_image \ + --vcodec libx264 libx265 \ + --pix-fmt yuv444p yuv420p \ + --g 2 20 None \ + --crf 10 40 None \ + --timestamps-modes 1_frame 2_frames \ + --backends pyav video_reader \ + --num-samples 5 \ + --num-workers 5 \ + --save-frames 0 +``` + +## Results + +### Reproduce + +We ran the benchmark with the following parameters: + +```bash +# h264 and h265 encodings +python benchmark/video/run_video_benchmark.py \ + --output-dir outputs/video_benchmark \ + --repo-ids \ + lerobot/pusht_image \ + aliberts/aloha_mobile_shrimp_image \ + aliberts/paris_street \ + aliberts/kitchen \ + --vcodec libx264 libx265 \ + --pix-fmt yuv444p yuv420p \ + --g 1 2 3 4 5 6 10 15 20 40 None \ + --crf 0 5 10 15 20 25 30 40 50 None \ + --timestamps-modes 1_frame 2_frames 6_frames \ + --backends pyav video_reader \ + --num-samples 50 \ + --num-workers 5 \ + --save-frames 1 + +# av1 encoding (only compatible with yuv420p and pyav decoder) +python benchmark/video/run_video_benchmark.py \ + --output-dir outputs/video_benchmark \ + --repo-ids \ + lerobot/pusht_image \ + aliberts/aloha_mobile_shrimp_image \ + aliberts/paris_street \ + aliberts/kitchen \ + --vcodec libsvtav1 \ + --pix-fmt yuv420p \ + --g 1 2 3 4 5 6 10 15 20 40 None \ + --crf 0 5 10 15 20 25 30 40 50 None \ + --timestamps-modes 1_frame 2_frames 6_frames \ + --backends pyav \ + --num-samples 50 \ + --num-workers 5 \ + --save-frames 1 +``` + +The full results are available [here](https://docs.google.com/spreadsheets/d/1OYJB43Qu8fC26k_OyoMFgGBBKfQRCi4BIuYitQnq3sw/edit?usp=sharing) + +### Parameters selected for LeRobotDataset + +Considering these results, we chose what we think is the best set of encoding parameter: + +- vcodec: `libsvtav1` +- pix-fmt: `yuv420p` +- g: `2` +- crf: `30` + +Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_reader` does not support it (and `pyav` doesn't require a custom build of `torchvision`). + +### Summary + +These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav` + +| video_images_size_ratio | vcodec | pix_fmt | | | | +| ---------------------------------- | ---------- | ------- | --------- | --------- | --------- | +| | libx264 | | libx265 | | libsvtav1 | +| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | +| lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% | +| aliberts/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% | +| aliberts/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% | +| aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% | + +| video_images_load_time_ratio | vcodec | pix_fmt | | | | +| ---------------------------------- | ------- | ------- | -------- | ------- | --------- | +| | libx264 | | libx265 | | libsvtav1 | +| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | +| lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 | +| aliberts/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** | +| aliberts/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** | +| aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** | + +| | | vcodec | pix_fmt | | | | +| ---------------------------------- | -------- | -------- | ------------ | -------- | --------- | ------------ | +| | | libx264 | | libx265 | | libsvtav1 | +| repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | +| lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 | +| | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 | +| | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% | +| aliberts/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** | +| | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** | +| | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** | +| aliberts/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** | +| | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** | +| | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** | +| aliberts/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** | +| | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** | +| | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** | diff --git a/benchmarks/video/benchmark.py b/benchmarks/video/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..45f3ad4028fa5cfa23bf8580c80fc6f7cd09232f --- /dev/null +++ b/benchmarks/video/benchmark.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import threading +import time +from contextlib import ContextDecorator + + +class TimeBenchmark(ContextDecorator): + """ + Measures execution time using a context manager or decorator. + + This class supports both context manager and decorator usage, and is thread-safe for multithreaded + environments. + + Args: + print: If True, prints the elapsed time upon exiting the context or completing the function. Defaults + to False. + + Examples: + + Using as a context manager: + + >>> benchmark = TimeBenchmark() + >>> with benchmark: + ... time.sleep(1) + >>> print(f"Block took {benchmark.result:.4f} seconds") + Block took approximately 1.0000 seconds + + Using with multithreading: + + ```python + import threading + + benchmark = TimeBenchmark() + + + def context_manager_example(): + with benchmark: + time.sleep(0.01) + print(f"Block took {benchmark.result_ms:.2f} milliseconds") + + + threads = [] + for _ in range(3): + t1 = threading.Thread(target=context_manager_example) + threads.append(t1) + + for t in threads: + t.start() + + for t in threads: + t.join() + ``` + Expected output: + Block took approximately 10.00 milliseconds + Block took approximately 10.00 milliseconds + Block took approximately 10.00 milliseconds + """ + + def __init__(self, print=False): + self.local = threading.local() + self.print_time = print + + def __enter__(self): + self.local.start_time = time.perf_counter() + return self + + def __exit__(self, *exc): + self.local.end_time = time.perf_counter() + self.local.elapsed_time = self.local.end_time - self.local.start_time + if self.print_time: + print(f"Elapsed time: {self.local.elapsed_time:.4f} seconds") + return False + + @property + def result(self): + return getattr(self.local, "elapsed_time", None) + + @property + def result_ms(self): + return self.result * 1e3 diff --git a/benchmarks/video/capture_camera_feed.py b/benchmarks/video/capture_camera_feed.py new file mode 100644 index 0000000000000000000000000000000000000000..747534bb7d533a4681da5f69476cedcca4814bd9 --- /dev/null +++ b/benchmarks/video/capture_camera_feed.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Capture video feed from a camera as raw images.""" + +import argparse +import datetime as dt +import os +import time +from pathlib import Path + +import cv2 +import rerun as rr + +# see https://rerun.io/docs/howto/visualization/limit-ram +RERUN_MEMORY_LIMIT = os.getenv("LEROBOT_RERUN_MEMORY_LIMIT", "5%") + + +def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int, duration: int): + rr.init("lerobot_capture_camera_feed") + rr.spawn(memory_limit=RERUN_MEMORY_LIMIT) + + now = dt.datetime.now() + capture_dir = output_dir / f"{now:%Y-%m-%d}" / f"{now:%H-%M-%S}" + if not capture_dir.exists(): + capture_dir.mkdir(parents=True, exist_ok=True) + + # Opens the default webcam + cap = cv2.VideoCapture(0) + if not cap.isOpened(): + print("Error: Could not open video stream.") + return + + cap.set(cv2.CAP_PROP_FPS, fps) + cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) + cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) + + frame_index = 0 + start_time = time.time() + while time.time() - start_time < duration: + ret, frame = cap.read() + + if not ret: + print("Error: Could not read frame.") + break + rr.log("video/stream", rr.Image(frame), static=True) + cv2.imwrite(str(capture_dir / f"frame_{frame_index:06d}.png"), frame) + frame_index += 1 + + # Release the capture + cap.release() + + # TODO(Steven): Add a graceful shutdown via a close() method for the Viewer context, though not currently supported in the Rerun API. + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "--output-dir", + type=Path, + default=Path("outputs/cam_capture/"), + help="Directory where the capture images are written. A subfolder named with the current date & time will be created inside it for each capture.", + ) + parser.add_argument( + "--fps", + type=int, + default=30, + help="Frames Per Second of the capture.", + ) + parser.add_argument( + "--width", + type=int, + default=1280, + help="Width of the captured images.", + ) + parser.add_argument( + "--height", + type=int, + default=720, + help="Height of the captured images.", + ) + parser.add_argument( + "--duration", + type=int, + default=20, + help="Duration in seconds for which the video stream should be captured.", + ) + args = parser.parse_args() + display_and_save_video_stream(**vars(args)) diff --git a/benchmarks/video/run_video_benchmark.py b/benchmarks/video/run_video_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..9cc409372449d3c4a763c415a288c0ce19a1ad01 --- /dev/null +++ b/benchmarks/video/run_video_benchmark.py @@ -0,0 +1,493 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Assess the performance of video decoding in various configurations. + +This script will benchmark different video encoding and decoding parameters. +See the provided README.md or run `python benchmark/video/run_video_benchmark.py --help` for usage info. +""" + +import argparse +import datetime as dt +import random +import shutil +from collections import OrderedDict +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path + +import einops +import numpy as np +import pandas as pd +import PIL +import torch +from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity +from tqdm import tqdm + +from benchmarks.video.benchmark import TimeBenchmark +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.video_utils import ( + decode_video_frames_torchvision, + encode_video_frames, +) +from lerobot.utils.constants import OBS_IMAGE + +BASE_ENCODING = OrderedDict( + [ + ("vcodec", "libx264"), + ("pix_fmt", "yuv444p"), + ("g", 2), + ("crf", None), + # TODO(aliberts): Add fastdecode + # ("fastdecode", 0), + ] +) + + +# TODO(rcadene, aliberts): move to `utils.py` folder when we want to refactor +def parse_int_or_none(value) -> int | None: + if value.lower() == "none": + return None + try: + return int(value) + except ValueError as e: + raise argparse.ArgumentTypeError(f"Invalid int or None: {value}") from e + + +def check_datasets_formats(repo_ids: list) -> None: + for repo_id in repo_ids: + dataset = LeRobotDataset(repo_id) + if len(dataset.meta.video_keys) > 0: + raise ValueError( + f"Use only image dataset for running this benchmark. Video dataset provided: {repo_id}" + ) + + +def get_directory_size(directory: Path) -> int: + total_size = 0 + for item in directory.rglob("*"): + if item.is_file(): + total_size += item.stat().st_size + return total_size + + +def load_original_frames(imgs_dir: Path, timestamps: list[float], fps: int) -> torch.Tensor: + frames = [] + for ts in timestamps: + idx = int(ts * fps) + frame = PIL.Image.open(imgs_dir / f"frame_{idx:06d}.png") + frame = torch.from_numpy(np.array(frame)) + frame = frame.type(torch.float32) / 255 + frame = einops.rearrange(frame, "h w c -> c h w") + frames.append(frame) + return torch.stack(frames) + + +def save_decoded_frames( + imgs_dir: Path, save_dir: Path, frames: torch.Tensor, timestamps: list[float], fps: int +) -> None: + if save_dir.exists() and len(list(save_dir.glob("frame_*.png"))) == len(timestamps): + return + + save_dir.mkdir(parents=True, exist_ok=True) + for i, ts in enumerate(timestamps): + idx = int(ts * fps) + frame_hwc = (frames[i].permute((1, 2, 0)) * 255).type(torch.uint8).cpu().numpy() + PIL.Image.fromarray(frame_hwc).save(save_dir / f"frame_{idx:06d}_decoded.png") + shutil.copyfile(imgs_dir / f"frame_{idx:06d}.png", save_dir / f"frame_{idx:06d}_original.png") + + +def save_first_episode(imgs_dir: Path, dataset: LeRobotDataset) -> None: + episode_index = 0 + ep_num_images = dataset.meta.episodes["length"][episode_index] + if imgs_dir.exists() and len(list(imgs_dir.glob("frame_*.png"))) == ep_num_images: + return + + imgs_dir.mkdir(parents=True, exist_ok=True) + hf_dataset = dataset.hf_dataset.with_format(None) + + # We only save images from the first camera + img_keys = [key for key in hf_dataset.features if key.startswith(OBS_IMAGE)] + imgs_dataset = hf_dataset.select_columns(img_keys[0]) + + for i, item in enumerate( + tqdm(imgs_dataset, desc=f"saving {dataset.repo_id} first episode images", leave=False) + ): + img = item[img_keys[0]] + img.save(str(imgs_dir / f"frame_{i:06d}.png"), quality=100) + + if i >= ep_num_images - 1: + break + + +def sample_timestamps(timestamps_mode: str, ep_num_images: int, fps: int) -> list[float]: + # Start at 5 to allow for 2_frames_4_space and 6_frames + idx = random.randint(5, ep_num_images - 1) + match timestamps_mode: + case "1_frame": + frame_indexes = [idx] + case "2_frames": + frame_indexes = [idx - 1, idx] + case "2_frames_4_space": + frame_indexes = [idx - 5, idx] + case "6_frames": + frame_indexes = [idx - i for i in range(6)][::-1] + case _: + raise ValueError(timestamps_mode) + + return [idx / fps for idx in frame_indexes] + + +def decode_video_frames( + video_path: str, + timestamps: list[float], + tolerance_s: float, + backend: str, +) -> torch.Tensor: + if backend in ["pyav", "video_reader"]: + return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend) + else: + raise NotImplementedError(backend) + + +def benchmark_decoding( + imgs_dir: Path, + video_path: Path, + timestamps_mode: str, + backend: str, + ep_num_images: int, + fps: int, + num_samples: int = 50, + num_workers: int = 4, + save_frames: bool = False, +) -> dict: + def process_sample(sample: int): + time_benchmark = TimeBenchmark() + timestamps = sample_timestamps(timestamps_mode, ep_num_images, fps) + num_frames = len(timestamps) + result = { + "psnr_values": [], + "ssim_values": [], + "mse_values": [], + } + + with time_benchmark: + frames = decode_video_frames(video_path, timestamps=timestamps, tolerance_s=5e-1, backend=backend) + result["load_time_video_ms"] = time_benchmark.result_ms / num_frames + + with time_benchmark: + original_frames = load_original_frames(imgs_dir, timestamps, fps) + result["load_time_images_ms"] = time_benchmark.result_ms / num_frames + + frames_np, original_frames_np = frames.numpy(), original_frames.numpy() + for i in range(num_frames): + result["mse_values"].append(mean_squared_error(original_frames_np[i], frames_np[i])) + result["psnr_values"].append( + peak_signal_noise_ratio(original_frames_np[i], frames_np[i], data_range=1.0) + ) + result["ssim_values"].append( + structural_similarity(original_frames_np[i], frames_np[i], data_range=1.0, channel_axis=0) + ) + + if save_frames and sample == 0: + save_dir = video_path.with_suffix("") / f"{timestamps_mode}_{backend}" + save_decoded_frames(imgs_dir, save_dir, frames, timestamps, fps) + + return result + + load_times_video_ms = [] + load_times_images_ms = [] + mse_values = [] + psnr_values = [] + ssim_values = [] + + # A sample is a single set of decoded frames specified by timestamps_mode (e.g. a single frame, 2 frames, etc.). + # For each sample, we record metrics (loading time and quality metrics) which are then averaged over all samples. + # As these samples are independent, we run them in parallel threads to speed up the benchmark. + with ThreadPoolExecutor(max_workers=num_workers) as executor: + futures = [executor.submit(process_sample, i) for i in range(num_samples)] + for future in tqdm(as_completed(futures), total=num_samples, desc="samples", leave=False): + result = future.result() + load_times_video_ms.append(result["load_time_video_ms"]) + load_times_images_ms.append(result["load_time_images_ms"]) + psnr_values.extend(result["psnr_values"]) + ssim_values.extend(result["ssim_values"]) + mse_values.extend(result["mse_values"]) + + avg_load_time_video_ms = float(np.array(load_times_video_ms).mean()) + avg_load_time_images_ms = float(np.array(load_times_images_ms).mean()) + video_images_load_time_ratio = avg_load_time_video_ms / avg_load_time_images_ms + + return { + "avg_load_time_video_ms": avg_load_time_video_ms, + "avg_load_time_images_ms": avg_load_time_images_ms, + "video_images_load_time_ratio": video_images_load_time_ratio, + "avg_mse": float(np.mean(mse_values)), + "avg_psnr": float(np.mean(psnr_values)), + "avg_ssim": float(np.mean(ssim_values)), + } + + +def benchmark_encoding_decoding( + dataset: LeRobotDataset, + video_path: Path, + imgs_dir: Path, + encoding_cfg: dict, + decoding_cfg: dict, + num_samples: int, + num_workers: int, + save_frames: bool, + overwrite: bool = False, + seed: int = 1337, +) -> list[dict]: + fps = dataset.fps + + if overwrite or not video_path.is_file(): + tqdm.write(f"encoding {video_path}") + encode_video_frames( + imgs_dir=imgs_dir, + video_path=video_path, + fps=fps, + vcodec=encoding_cfg["vcodec"], + pix_fmt=encoding_cfg["pix_fmt"], + g=encoding_cfg.get("g"), + crf=encoding_cfg.get("crf"), + # fast_decode=encoding_cfg.get("fastdecode"), + overwrite=True, + ) + + episode_index = 0 + ep_num_images = dataset.meta.episodes["length"][episode_index] + width, height = tuple(dataset[0][dataset.meta.camera_keys[0]].shape[-2:]) + num_pixels = width * height + video_size_bytes = video_path.stat().st_size + images_size_bytes = get_directory_size(imgs_dir) + video_images_size_ratio = video_size_bytes / images_size_bytes + + random.seed(seed) + benchmark_table = [] + for timestamps_mode in tqdm( + decoding_cfg["timestamps_modes"], desc="decodings (timestamps_modes)", leave=False + ): + for backend in tqdm(decoding_cfg["backends"], desc="decodings (backends)", leave=False): + benchmark_row = benchmark_decoding( + imgs_dir, + video_path, + timestamps_mode, + backend, + ep_num_images, + fps, + num_samples, + num_workers, + save_frames, + ) + benchmark_row.update( + **{ + "repo_id": dataset.repo_id, + "resolution": f"{width} x {height}", + "num_pixels": num_pixels, + "video_size_bytes": video_size_bytes, + "images_size_bytes": images_size_bytes, + "video_images_size_ratio": video_images_size_ratio, + "timestamps_mode": timestamps_mode, + "backend": backend, + }, + **encoding_cfg, + ) + benchmark_table.append(benchmark_row) + + return benchmark_table + + +def main( + output_dir: Path, + repo_ids: list[str], + vcodec: list[str], + pix_fmt: list[str], + g: list[int], + crf: list[int], + # fastdecode: list[int], + timestamps_modes: list[str], + backends: list[str], + num_samples: int, + num_workers: int, + save_frames: bool, +): + check_datasets_formats(repo_ids) + encoding_benchmarks = { + "g": g, + "crf": crf, + # "fastdecode": fastdecode, + } + decoding_benchmarks = { + "timestamps_modes": timestamps_modes, + "backends": backends, + } + headers = ["repo_id", "resolution", "num_pixels"] + headers += list(BASE_ENCODING.keys()) + headers += [ + "timestamps_mode", + "backend", + "video_size_bytes", + "images_size_bytes", + "video_images_size_ratio", + "avg_load_time_video_ms", + "avg_load_time_images_ms", + "video_images_load_time_ratio", + "avg_mse", + "avg_psnr", + "avg_ssim", + ] + file_paths = [] + for video_codec in tqdm(vcodec, desc="encodings (vcodec)"): + for pixel_format in tqdm(pix_fmt, desc="encodings (pix_fmt)", leave=False): + benchmark_table = [] + for repo_id in tqdm(repo_ids, desc="encodings (datasets)", leave=False): + dataset = LeRobotDataset(repo_id) + imgs_dir = output_dir / "images" / dataset.repo_id.replace("/", "_") + # We only use the first episode + save_first_episode(imgs_dir, dataset) + for key, values in tqdm(encoding_benchmarks.items(), desc="encodings (g, crf)", leave=False): + for value in tqdm(values, desc=f"encodings ({key})", leave=False): + encoding_cfg = BASE_ENCODING.copy() + encoding_cfg["vcodec"] = video_codec + encoding_cfg["pix_fmt"] = pixel_format + encoding_cfg[key] = value + args_path = Path("_".join(str(value) for value in encoding_cfg.values())) + video_path = output_dir / "videos" / args_path / f"{repo_id.replace('/', '_')}.mp4" + benchmark_table += benchmark_encoding_decoding( + dataset, + video_path, + imgs_dir, + encoding_cfg, + decoding_benchmarks, + num_samples, + num_workers, + save_frames, + ) + + # Save intermediate results + benchmark_df = pd.DataFrame(benchmark_table, columns=headers) + now = dt.datetime.now() + csv_path = ( + output_dir + / f"{now:%Y-%m-%d}_{now:%H-%M-%S}_{video_codec}_{pixel_format}_{num_samples}-samples.csv" + ) + benchmark_df.to_csv(csv_path, header=True, index=False) + file_paths.append(csv_path) + del benchmark_df + + # Concatenate all results + df_list = [pd.read_csv(csv_path) for csv_path in file_paths] + concatenated_df = pd.concat(df_list, ignore_index=True) + concatenated_path = output_dir / f"{now:%Y-%m-%d}_{now:%H-%M-%S}_all_{num_samples}-samples.csv" + concatenated_df.to_csv(concatenated_path, header=True, index=False) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--output-dir", + type=Path, + default=Path("outputs/video_benchmark"), + help="Directory where the video benchmark outputs are written.", + ) + parser.add_argument( + "--repo-ids", + type=str, + nargs="*", + default=[ + "lerobot/pusht_image", + "aliberts/aloha_mobile_shrimp_image", + "aliberts/paris_street", + "aliberts/kitchen", + ], + help="Datasets repo-ids to test against. First episodes only are used. Must be images.", + ) + parser.add_argument( + "--vcodec", + type=str, + nargs="*", + default=["libx264", "hevc", "libsvtav1"], + help="Video codecs to be tested", + ) + parser.add_argument( + "--pix-fmt", + type=str, + nargs="*", + default=["yuv444p", "yuv420p"], + help="Pixel formats (chroma subsampling) to be tested", + ) + parser.add_argument( + "--g", + type=parse_int_or_none, + nargs="*", + default=[1, 2, 3, 4, 5, 6, 10, 15, 20, 40, 100, None], + help="Group of pictures sizes to be tested.", + ) + parser.add_argument( + "--crf", + type=parse_int_or_none, + nargs="*", + default=[0, 5, 10, 15, 20, 25, 30, 40, 50, None], + help="Constant rate factors to be tested.", + ) + # parser.add_argument( + # "--fastdecode", + # type=int, + # nargs="*", + # default=[0, 1], + # help="Use the fastdecode tuning option. 0 disables it. " + # "For libx264 and libx265/hevc, only 1 is possible. " + # "For libsvtav1, 1, 2 or 3 are possible values with a higher number meaning a faster decoding optimization", + # ) + parser.add_argument( + "--timestamps-modes", + type=str, + nargs="*", + default=[ + "1_frame", + "2_frames", + "2_frames_4_space", + "6_frames", + ], + help="Timestamps scenarios to be tested.", + ) + parser.add_argument( + "--backends", + type=str, + nargs="*", + default=["pyav", "video_reader"], + help="Torchvision decoding backend to be tested.", + ) + parser.add_argument( + "--num-samples", + type=int, + default=50, + help="Number of samples for each encoding x decoding config.", + ) + parser.add_argument( + "--num-workers", + type=int, + default=10, + help="Number of processes for parallelized sample processing.", + ) + parser.add_argument( + "--save-frames", + type=int, + default=0, + help="Whether to save decoded frames or not. Enter a non-zero number for true.", + ) + args = parser.parse_args() + main(**vars(args)) diff --git a/docker/Dockerfile.internal b/docker/Dockerfile.internal new file mode 100644 index 0000000000000000000000000000000000000000..fbc1d6086a5edd41583b33f3c5beafd4af7e3853 --- /dev/null +++ b/docker/Dockerfile.internal @@ -0,0 +1,93 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This Dockerfile is designed for HuggingFace internal CI environments +# that require GPU access. It starts from an NVIDIA CUDA base image. + +# docker build -f docker/Dockerfile.internal -t lerobot-internal . + +# Configure the base image for CI with GPU access +# TODO(Steven): Bump these versions +ARG CUDA_VERSION=12.4.1 +ARG OS_VERSION=22.04 +FROM nvidia/cuda:${CUDA_VERSION}-base-ubuntu${OS_VERSION} + +# Define Python version argument +ARG PYTHON_VERSION=3.10 + +# Configure environment variables +ENV DEBIAN_FRONTEND=noninteractive \ + MUJOCO_GL=egl \ + PATH=/lerobot/.venv/bin:$PATH \ + CUDA_VISIBLE_DEVICES=0 \ + TEST_TYPE=single_gpu \ + DEVICE=cuda + +# Install Python, system dependencies, and uv (as root) +RUN apt-get update && apt-get install -y --no-install-recommends \ + software-properties-common build-essential git curl \ + libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \ + libusb-1.0-0-dev speech-dispatcher libgeos-dev portaudio19-dev \ + cmake pkg-config ninja-build \ + && add-apt-repository -y ppa:deadsnakes/ppa \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + python${PYTHON_VERSION} \ + python${PYTHON_VERSION}-venv \ + python${PYTHON_VERSION}-dev \ + && curl -LsSf https://astral.sh/uv/install.sh | sh \ + && mv /root/.local/bin/uv /usr/local/bin/uv \ + && useradd --create-home --shell /bin/bash user_lerobot \ + && usermod -aG sudo user_lerobot \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Create application directory and set permissions +WORKDIR /lerobot +RUN chown -R user_lerobot:user_lerobot /lerobot + +# Switch to the non-root user +USER user_lerobot + +# Environment variables for the testing +ENV HOME=/home/user_lerobot \ + HF_HOME=/home/user_lerobot/.cache/huggingface \ + HF_LEROBOT_HOME=/home/user_lerobot/.cache/huggingface/lerobot \ + TORCH_HOME=/home/user_lerobot/.cache/torch \ + TRITON_CACHE_DIR=/home/user_lerobot/.cache/triton + +# Create the virtual environment +# We use a virtual environment inside the container—even though the container itself \ +# provides isolation—to ensure compatibility with the cluster and to prevent \ +# issues with MuJoCo and OpenGL drivers. +RUN uv venv --python python${PYTHON_VERSION} + +# Install Python dependencies for caching +COPY --chown=user_lerobot:user_lerobot pyproject.toml README.md MANIFEST.in ./ +COPY --chown=user_lerobot:user_lerobot src/ src/ + +ARG UNBOUND_DEPS=false + +RUN if [ "$UNBOUND_DEPS" = "true" ]; then \ + sed -i 's/,[[:space:]]*<[0-9\.]*//g' pyproject.toml; \ + echo "Dependencies unbound:" && cat pyproject.toml; \ + fi + +RUN uv pip install --no-cache ".[all]" + +# Copy the rest of the application source code +# Make sure to have the git-LFS files for testing +COPY --chown=user_lerobot:user_lerobot . . + +# Set the default command +CMD ["/bin/bash"] diff --git a/docker/Dockerfile.user b/docker/Dockerfile.user new file mode 100644 index 0000000000000000000000000000000000000000..9592ca99dd214f2d4965c379b2c43972d205c3f1 --- /dev/null +++ b/docker/Dockerfile.user @@ -0,0 +1,79 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This Dockerfile is designed for a lerobot user who wants to +# experiment with the project. It starts from an Python Slim base image. + +# docker build -f docker/Dockerfile.user -t lerobot-user . +# docker run -it --rm lerobot-user + +# Configure the base image +ARG PYTHON_VERSION=3.10 +FROM python:${PYTHON_VERSION}-slim + +# Configure environment variables +ENV DEBIAN_FRONTEND=noninteractive \ + MUJOCO_GL=egl \ + PATH=/lerobot/.venv/bin:$PATH + +# Install system dependencies and uv (as root) +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential git curl libglib2.0-0 libegl1-mesa-dev ffmpeg \ + libusb-1.0-0-dev speech-dispatcher libgeos-dev portaudio19-dev \ + cmake pkg-config ninja-build \ + && curl -LsSf https://astral.sh/uv/install.sh | sh \ + && mv /root/.local/bin/uv /usr/local/bin/uv \ + && useradd --create-home --shell /bin/bash user_lerobot \ + && usermod -aG sudo user_lerobot \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Create application directory and set permissions +WORKDIR /lerobot +RUN chown -R user_lerobot:user_lerobot /lerobot + +# Switch to the non-root user +USER user_lerobot + +# Environment variables for the testing +ENV HOME=/home/user_lerobot \ + HF_HOME=/home/user_lerobot/.cache/huggingface \ + HF_LEROBOT_HOME=/home/user_lerobot/.cache/huggingface/lerobot \ + TORCH_HOME=/home/user_lerobot/.cache/torch \ + TRITON_CACHE_DIR=/home/user_lerobot/.cache/triton + +# Create the virtual environment +# We use a virtual environment inside the container—even though the container itself \ +# provides isolation—to closely resemble local development and allow users to \ +# run other Python projects in the same container without dependency conflicts. +RUN uv venv + +# Install Python dependencies for caching +COPY --chown=user_lerobot:user_lerobot pyproject.toml README.md MANIFEST.in ./ +COPY --chown=user_lerobot:user_lerobot src/ src/ + +ARG UNBOUND_DEPS=false + +RUN if [ "$UNBOUND_DEPS" = "true" ]; then \ + sed -i 's/,[[:space:]]*<[0-9\.]*//g' pyproject.toml; \ + echo "Dependencies unbound:" && cat pyproject.toml; \ + fi + +RUN uv pip install --no-cache ".[all]" + +# Copy the rest of the application code +# Make sure to have the git-LFS files for testing +COPY --chown=user_lerobot:user_lerobot . . + +# Set the default command +CMD ["/bin/bash"] diff --git a/docs-requirements.txt b/docs-requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a0c62a82a11be82a76b1c57181d0cd6be79205a --- /dev/null +++ b/docs-requirements.txt @@ -0,0 +1,3 @@ +# docs-requirements.txt +hf-doc-builder @ git+https://github.com/huggingface/doc-builder.git@main +watchdog>=6.0.0 diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7ad600c2244a999f400dc1e5ace81fb10c0d9618 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,139 @@ + + +# Generating the documentation + +To generate the documentation, you first have to build it. Several packages are necessary to build the doc, +you can install them with the following command, at the root of the code repository: + +```bash +pip install -e . -r docs-requirements.txt +``` + +You will also need `nodejs`. Please refer to their [installation page](https://nodejs.org/en/download) + +--- + +**NOTE** + +You only need to generate the documentation to inspect it locally (if you're planning changes and want to +check how they look before committing for instance). You don't have to `git commit` the built documentation. + +--- + +## Building the documentation + +Once you have setup the `doc-builder` and additional packages, you can generate the documentation by +typing the following command: + +```bash +doc-builder build lerobot docs/source/ --build_dir ~/tmp/test-build +``` + +You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate +the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite +Markdown editor. + +## Previewing the documentation + +To preview the docs, first install the `watchdog` module with: + +```bash +pip install watchdog +``` + +Then run the following command: + +```bash +doc-builder preview lerobot docs/source/ +``` + +The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. + +--- + +**NOTE** + +The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). + +--- + +## Adding a new element to the navigation bar + +Accepted files are Markdown (.md). + +Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting +the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/lerobot/blob/main/docs/source/_toctree.yml) file. + +## Renaming section headers and moving sections + +It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. + +Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. + +So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: + +``` +Sections that were moved: + +[ Section A ] +``` + +and of course, if you moved it to another file, then: + +``` +Sections that were moved: + +[ Section A ] +``` + +Use the relative style to link to the new file so that the versioned docs continue to work. + +For an example of a rich moved sections set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md). + +### Adding a new tutorial + +Adding a new tutorial or section is done in two steps: + +- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). +- Link that file in `./source/_toctree.yml` on the correct toc-tree. + +Make sure to put your new file under the proper section. If you have a doubt, feel free to ask in a Github Issue or PR. + +### Writing source documentation + +Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names +and objects like True, None or any strings should usually be put in `code`. + +#### Writing a multi-line code block + +Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: + +```` +``` +# first line of code +# second line +# etc +``` +```` + +#### Adding an image + +Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like +the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference +them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). +If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images +to this dataset. diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml new file mode 100644 index 0000000000000000000000000000000000000000..3cd9994f281d7122b94e31ba8ba110645f2bae5f --- /dev/null +++ b/docs/source/_toctree.yml @@ -0,0 +1,98 @@ +- sections: + - local: index + title: LeRobot + - local: installation + title: Installation + title: Get started +- sections: + - local: il_robots + title: Imitation Learning for Robots + - local: cameras + title: Cameras + - local: integrate_hardware + title: Bring Your Own Hardware + - local: hilserl + title: Train a Robot with RL + - local: hilserl_sim + title: Train RL in Simulation + - local: multi_gpu_training + title: Multi GPU training + title: "Tutorials" +- sections: + - local: lerobot-dataset-v3 + title: Using LeRobotDataset + - local: porting_datasets_v3 + title: Porting Large Datasets + - local: using_dataset_tools + title: Using the Dataset Tools + title: "Datasets" +- sections: + - local: act + title: ACT + - local: smolvla + title: SmolVLA + - local: pi0 + title: π₀ (Pi0) + - local: pi05 + title: π₀.₅ (Pi05) + - local: groot + title: NVIDIA GR00T N1.5 + title: "Policies" +- sections: + - local: async + title: Use Async Inference + - local: rtc + title: Real-Time Chunking (RTC) + title: "Inference" +- sections: + - local: envhub + title: Environments from the Hub + - local: il_sim + title: Imitation Learning in Sim + - local: libero + title: Using Libero + - local: metaworld + title: Using MetaWorld + title: "Simulation" +- sections: + - local: introduction_processors + title: Introduction to Robot Processors + - local: debug_processor_pipeline + title: Debug your processor pipeline + - local: implement_your_own_processor + title: Implement your own processor + - local: processors_robots_teleop + title: Processors for Robots and Teleoperators + - local: env_processor + title: Environment Processors + title: "Robot Processors" +- sections: + - local: so101 + title: SO-101 + - local: so100 + title: SO-100 + - local: koch + title: Koch v1.1 + - local: lekiwi + title: LeKiwi + - local: hope_jr + title: Hope Jr + - local: reachy2 + title: Reachy 2 + title: "Robots" +- sections: + - local: phone_teleop + title: Phone + title: "Teleoperators" +- sections: + - local: notebooks + title: Notebooks + - local: feetech + title: Updating Feetech Firmware + title: "Resources" +- sections: + - local: contributing + title: Contribute to LeRobot + - local: backwardcomp + title: Backward compatibility + title: "About" diff --git a/docs/source/act.mdx b/docs/source/act.mdx new file mode 100644 index 0000000000000000000000000000000000000000..0867ebe2f31a02736dcb8c3bb4173a82ef3fb530 --- /dev/null +++ b/docs/source/act.mdx @@ -0,0 +1,92 @@ +# ACT (Action Chunking with Transformers) + +ACT is a **lightweight and efficient policy for imitation learning**, especially well-suited for fine-grained manipulation tasks. It's the **first model we recommend when you're starting out** with LeRobot due to its fast training time, low computational requirements, and strong performance. + +
+ +
+ +_Watch this tutorial from the LeRobot team to learn how ACT works: [LeRobot ACT Tutorial](https://www.youtube.com/watch?v=ft73x0LfGpM)_ + +## Model Overview + +Action Chunking with Transformers (ACT) was introduced in the paper [Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware](https://arxiv.org/abs/2304.13705) by Zhao et al. The policy was designed to enable precise, contact-rich manipulation tasks using affordable hardware and minimal demonstration data. + +### Why ACT is Great for Beginners + +ACT stands out as an excellent starting point for several reasons: + +- **Fast Training**: Trains in a few hours on a single GPU +- **Lightweight**: Only ~80M parameters, making it efficient and easy to work with +- **Data Efficient**: Often achieves high success rates with just 50 demonstrations + +### Architecture + +ACT uses a transformer-based architecture with three main components: + +1. **Vision Backbone**: ResNet-18 processes images from multiple camera viewpoints +2. **Transformer Encoder**: Synthesizes information from camera features, joint positions, and a learned latent variable +3. **Transformer Decoder**: Generates coherent action sequences using cross-attention + +The policy takes as input: + +- Multiple RGB images (e.g., from wrist cameras, front/top cameras) +- Current robot joint positions +- A latent style variable `z` (learned during training, set to zero during inference) + +And outputs a chunk of `k` future action sequences. + +## Installation Requirements + +1. Install LeRobot by following our [Installation Guide](./installation). +2. ACT is included in the base LeRobot installation, so no additional dependencies are needed! + +## Training ACT + +ACT works seamlessly with the standard LeRobot training pipeline. Here's a complete example for training ACT on your dataset: + +```bash +lerobot-train \ + --dataset.repo_id=${HF_USER}/your_dataset \ + --policy.type=act \ + --output_dir=outputs/train/act_your_dataset \ + --job_name=act_your_dataset \ + --policy.device=cuda \ + --wandb.enable=true \ + --policy.repo_id=${HF_USER}/act_policy +``` + +### Training Tips + +1. **Start with defaults**: ACT's default hyperparameters work well for most tasks +2. **Training duration**: Expect a few hours for 100k training steps on a single GPU +3. **Batch size**: Start with batch size 8 and adjust based on your GPU memory + +### Train using Google Colab + +If your local computer doesn't have a powerful GPU, you can utilize Google Colab to train your model by following the [ACT training notebook](./notebooks#training-act). + +## Evaluating ACT + +Once training is complete, you can evaluate your ACT policy using the `lerobot-record` command with your trained policy. This will run inference and record evaluation episodes: + +```bash +lerobot-record \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM0 \ + --robot.id=my_robot \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --display_data=true \ + --dataset.repo_id=${HF_USER}/eval_act_your_dataset \ + --dataset.num_episodes=10 \ + --dataset.single_task="Your task description" \ + --policy.path=${HF_USER}/act_policy +``` diff --git a/docs/source/async.mdx b/docs/source/async.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cb767b01c1ec9c23e8f58bca4ff37d8105570d19 --- /dev/null +++ b/docs/source/async.mdx @@ -0,0 +1,312 @@ +# Asynchronous Inference + +With our [SmolVLA](https://huggingface.co/papers/2506.01844) we introduced a new way to run inference on real-world robots, **decoupling action prediction from action execution**. +In this tutorial, we'll show how to use asynchronous inference (_async inference_) using a finetuned version of SmolVLA, and all the policies supported by LeRobot. +**Try async inference with all the policies** supported by LeRobot! + +**What you'll learn:** + +1. Why asynchronous inference matters and how it compares to, more traditional, sequential inference. +2. How to spin-up a `PolicyServer` and connect a `RobotClient` from the same machine, and even over the network. +3. How to tune key parameters (`actions_per_chunk`, `chunk_size_threshold`) for your robot and policy. + +If you get stuck, hop into our [Discord community](https://discord.gg/s3KuuzsPFb)! + +In a nutshell: with _async inference_, your robot keeps acting while the policy server is already busy computing the next chunk of actions---eliminating "wait-for-inference" lags and unlocking smoother, more reactive behaviours. +This is fundamentally different from synchronous inference (sync), where the robot stays idle while the policy computes the next chunk of actions. + +--- + +## Getting started with async inference + +You can read more information on asynchronous inference in our [blogpost](https://huggingface.co/blog/async-robot-inference). This guide is designed to help you quickly set up and run asynchronous inference in your environment. + +First, install `lerobot` with the `async` tag, to install the extra dependencies required to run async inference. + +```shell +pip install -e ".[async]" +``` + +Then, spin up a policy server (in one terminal, or in a separate machine) specifying the host address and port for the client to connect to. +You can spin up a policy server running: + +```shell +python -m lerobot.async_inference.policy_server \ + --host=127.0.0.1 \ + --port=8080 +``` + +This will start a policy server listening on `127.0.0.1:8080` (`localhost`, port 8080). At this stage, the policy server is empty, as all information related to which policy to run and with which parameters are specified during the first handshake with the client. Spin up a client with: + +```shell +python -m lerobot.async_inference.robot_client \ + --server_address=127.0.0.1:8080 \ # SERVER: the host address and port of the policy server + --robot.type=so100_follower \ # ROBOT: your robot type + --robot.port=/dev/tty.usbmodem585A0076841 \ # ROBOT: your robot port + --robot.id=follower_so100 \ # ROBOT: your robot id, to load calibration file + --robot.cameras="{ laptop: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}, phone: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ # POLICY: the cameras used to acquire frames, with keys matching the keys expected by the policy + --task="dummy" \ # POLICY: The task to run the policy on (`Fold my t-shirt`). Not necessarily defined for all policies, such as `act` + --policy_type=your_policy_type \ # POLICY: the type of policy to run (smolvla, act, etc) + --pretrained_name_or_path=user/model \ # POLICY: the model name/path on server to the checkpoint to run (e.g., lerobot/smolvla_base) + --policy_device=mps \ # POLICY: the device to run the policy on, on the server + --actions_per_chunk=50 \ # POLICY: the number of actions to output at once + --chunk_size_threshold=0.5 \ # CLIENT: the threshold for the chunk size before sending a new observation to the server + --aggregate_fn_name=weighted_average \ # CLIENT: the function to aggregate actions on overlapping portions + --debug_visualize_queue_size=True # CLIENT: whether to visualize the queue size at runtime +``` + +In summary, you need to specify instructions for: + +- `SERVER`: the address and port of the policy server +- `ROBOT`: the type of robot to connect to, the port to connect to, and the local `id` of the robot +- `POLICY`: the type of policy to run, and the model name/path on server to the checkpoint to run. You also need to specify which device should the sever be using, and how many actions to output at once (capped at the policy max actions value). +- `CLIENT`: the threshold for the chunk size before sending a new observation to the server, and the function to aggregate actions on overlapping portions. Optionally, you can also visualize the queue size at runtime, to help you tune the `CLIENT` parameters. + +Importantly, + +- `actions_per_chunk` and `chunk_size_threshold` are key parameters to tune for your setup. +- `aggregate_fn_name` is the function to aggregate actions on overlapping portions. You can either add a new one to a registry of functions, or add your own in `robot_client.py` (see [here](NOTE:addlinktoLOC)) +- `debug_visualize_queue_size` is a useful tool to tune the `CLIENT` parameters. + +## Done! You should see your robot moving around by now 😉 + +## Async vs. synchronous inference + +Synchronous inference relies on interleaving action chunk prediction and action execution. This inherently results in _idle frames_, frames where the robot awaits idle the policy's output: a new action chunk. +In turn, inference is plagued by evident real-time lags, where the robot simply stops acting due to the lack of available actions. +With robotics models increasing in size, this problem risks becoming only more severe. + +

+ +

+

+ Synchronous inference makes the robot idle while the policy is + computing the next chunk of actions. +

+ +To overcome this, we design async inference, a paradigm where action planning and execution are decoupled, resulting in (1) higher adaptability and, most importantly, (2) no idle frames. +Crucially, with async inference, the next action chunk is computed _before_ the current one is exhausted, resulting in no idleness. +Higher adaptability is ensured by aggregating the different action chunks on overlapping portions, obtaining an up-to-date plan and a tighter control loop. + +

+ +

+

+ Asynchronous inference results in no idleness because the next chunk is + computed before the current chunk is exhausted. +

+ +--- + +## Start the Policy Server + +Policy servers are wrappers around a `PreTrainedPolicy` interfacing them with observations coming from a robot client. +Policy servers are initialized as empty containers which are populated with the requested policy specified in the initial handshake between the robot client and the policy server. +As such, spinning up a policy server is as easy as specifying the host address and port. If you're running the policy server on the same machine as the robot client, you can use `localhost` as the host address. + + + +```bash +python -m lerobot.async_inference.policy_server \ + --host=127.0.0.1 \ + --port=8080 +``` + + + + +```python +from lerobot.async_inference.configs import PolicyServerConfig +from lerobot.async_inference.policy_server import serve + +config = PolicyServerConfig( + host="localhost", + port=8080, +) +serve(config) +``` + + + + + +This listens on `localhost:8080` for an incoming connection from the associated`RobotClient`, which will communicate which policy to run during the first client-server handshake. + +--- + +## Launch the Robot Client + +`RobotClient` is a wrapper around a `Robot` instance, which `RobotClient` connects to the (possibly remote) `PolicyServer`. +The `RobotClient` streams observations to the `PolicyServer`, and receives action chunks obtained running inference on the server (which we assume to have better computational resources than the robot controller). + + + +```bash +python -m lerobot.async_inference.robot_client \ + --server_address=127.0.0.1:8080 \ # SERVER: the host address and port of the policy server + --robot.type=so100_follower \ # ROBOT: your robot type + --robot.port=/dev/tty.usbmodem585A0076841 \ # ROBOT: your robot port + --robot.id=follower_so100 \ # ROBOT: your robot id, to load calibration file + --robot.cameras="{ laptop: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}, phone: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ # POLICY: the cameras used to acquire frames, with keys matching the keys expected by the policy + --task="dummy" \ # POLICY: The task to run the policy on (`Fold my t-shirt`). Not necessarily defined for all policies, such as `act` + --policy_type=your_policy_type \ # POLICY: the type of policy to run (smolvla, act, etc) + --pretrained_name_or_path=user/model \ # POLICY: the model name/path on server to the checkpoint to run (e.g., lerobot/smolvla_base) + --policy_device=mps \ # POLICY: the device to run the policy on, on the server + --actions_per_chunk=50 \ # POLICY: the number of actions to output at once + --chunk_size_threshold=0.5 \ # CLIENT: the threshold for the chunk size before sending a new observation to the server + --aggregate_fn_name=weighted_average \ # CLIENT: the function to aggregate actions on overlapping portions + --debug_visualize_queue_size=True # CLIENT: whether to visualize the queue size at runtime +``` + + + + +```python +import threading +from lerobot.robots.so100_follower import SO100FollowerConfig +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.async_inference.configs import RobotClientConfig +from lerobot.async_inference.robot_client import RobotClient +from lerobot.async_inference.helpers import visualize_action_queue_size + +# 1. Create the robot instance +"""Check out the cameras available in your setup by running `python lerobot/find_cameras.py`""" +# these cameras must match the ones expected by the policy +# check the config.json on the Hub for the policy you are using +camera_cfg = { + "top": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30), + "side": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30) +} + +robot_cfg = SO100FollowerConfig( + port="/dev/tty.usbmodem585A0076841", + id="follower_so100", + cameras=camera_cfg +) + +# 3. Create client configuration +client_cfg = RobotClientConfig( + robot=robot_cfg, + server_address="localhost:8080", + policy_device="mps", + policy_type="smolvla", + pretrained_name_or_path="fracapuano/smolvla_async", + chunk_size_threshold=0.5, + actions_per_chunk=50, # make sure this is less than the max actions of the policy +) + +# 4. Create and start client +client = RobotClient(client_cfg) + +# 5. Specify the task +task = "Don't do anything, stay still" + +if client.start(): + # Start action receiver thread + action_receiver_thread = threading.Thread(target=client.receive_actions, daemon=True) + action_receiver_thread.start() + + try: + # Run the control loop + client.control_loop(task) + except KeyboardInterrupt: + client.stop() + action_receiver_thread.join() + # (Optionally) plot the action queue size + visualize_action_queue_size(client.action_queue_size) +``` + + + + + +The following two parameters are key in every setup: + + + + + + + + + + + + + + + + + + + + + +
HyperparameterDefaultWhat it does
+ actions_per_chunk + 50 + How many actions the policy outputs at once. Typical values: 10-50. +
+ chunk_size_threshold + 0.7 + When the queue is ≤ 50% full, the client sends a fresh observation. + Value in [0, 1]. +
+ + + Different values of `actions_per_chunk` and `chunk_size_threshold` do result + in different behaviours. + + +On the one hand, increasing the value of `actions_per_chunk` will result in reducing the likelihood of ending up with no actions to execute, as more actions will be available when the new chunk is computed. +However, larger values of `actions_per_chunk` might also result in less precise actions, due to the compounding errors consequent to predicting actions over longer timespans. + +On the other hand, increasing the value of `chunk_size_threshold` will result in sending out to the `PolicyServer` observations for inference more often, resulting in a larger number of updates action chunks, overlapping on significant portions. This results in high adaptability, in the limit predicting one action chunk for each observation, which is in turn only marginally consumed while a new one is produced. +This option does also put more pressure on the inference pipeline, as a consequence of the many requests. Conversely, values of `chunk_size_threshold` close to 0.0 collapse to the synchronous edge case, whereby new observations are only sent out whenever the current chunk is exhausted. + +We found the default values of `actions_per_chunk` and `chunk_size_threshold` to work well in the experiments we developed for the [SmolVLA paper](https://huggingface.co/papers/2506.01844), but recommend experimenting with different values to find the best fit for your setup. + +### Tuning async inference for your setup + +1. **Choose your computational resources carefully.** [PI0](https://huggingface.co/lerobot/pi0) occupies 14GB of memory at inference time, while [SmolVLA](https://huggingface.co/lerobot/smolvla_base) requires only ~2GB. You should identify the best computational resource for your use case keeping in mind smaller policies require less computational resources. The combination of policy and device used (CPU-intensive, using MPS, or the number of CUDA cores on a given NVIDIA GPU) directly impacts the average inference latency you should expect. +2. **Adjust your `fps` based on inference latency.** While the server generates a new action chunk, the client is not idle and is stepping through its current action queue. If the two processes happen at fundamentally different speeds, the client might end up with an empty queue. As such, you should reduce your fps if you consistently run out of actions in queue. +3. **Adjust `chunk_size_threshold`**. + - Values closer to `0.0` result in almost sequential behavior. Values closer to `1.0` → send observation every step (more bandwidth, relies on good world-model). + - We found values around 0.5-0.6 to work well. If you want to tweak this, spin up a `RobotClient` setting the `--debug-visualize-queue-size` to `True`. This will plot the action queue size evolution at runtime, and you can use it to find the value of `chunk_size_threshold` that works best for your setup. + +

+ +

+

+ + The action queue size is plotted at runtime when the + `--debug-visualize-queue-size` flag is passed, for various levels of + `chunk_size_threshold` (`g` in the SmolVLA paper). + +

+ +--- + +## Conclusion + +Asynchronous inference represents a significant advancement in real-time robotics control, addressing the fundamental challenge of inference latency that has long plagued robotics applications. Through this tutorial, you've learned how to implement a complete async inference pipeline that eliminates idle frames and enables smoother, more reactive robot behaviors. + +**Key Takeaways:** + +- **Paradigm Shift**: Async inference decouples action prediction from execution, allowing robots to continue acting while new action chunks are computed in parallel +- **Performance Benefits**: Eliminates "wait-for-inference" lags that are inherent in synchronous approaches, becoming increasingly important as policy models grow larger +- **Flexible Architecture**: The server-client design enables distributed computing, where inference can run on powerful remote hardware while maintaining real-time robot control +- **Tunable Parameters**: Success depends on properly configuring `actions_per_chunk` and `chunk_size_threshold` for your specific hardware, policy, and task requirements +- **Universal Compatibility**: Works with all LeRobot-supported policies, from lightweight ACT models to vision-language models like SmolVLA + +Start experimenting with the default parameters, monitor your action queue sizes, and iteratively refine your setup to achieve optimal performance for your specific use case. +If you want to discuss this further, hop into our [Discord community](https://discord.gg/s3KuuzsPFb), or open an issue on our [GitHub repository](https://github.com/lerobot/lerobot/issues). diff --git a/docs/source/backwardcomp.mdx b/docs/source/backwardcomp.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a0546eee722d123bcb7ab8ebaeedcd27b936f6f6 --- /dev/null +++ b/docs/source/backwardcomp.mdx @@ -0,0 +1,151 @@ +# Backward compatibility + +## Policy Normalization Migration (PR #1452) + +**Breaking Change**: LeRobot policies no longer have built-in normalization layers embedded in their weights. Normalization is now handled by external `PolicyProcessorPipeline` components. + +### What changed? + +| | Before PR #1452 | After PR #1452 | +| -------------------------- | ------------------------------------------------ | ------------------------------------------------------------ | +| **Normalization Location** | Embedded in model weights (`normalize_inputs.*`) | External `PolicyProcessorPipeline` components | +| **Model State Dict** | Contains normalization statistics | **Clean weights only** - no normalization parameters | +| **Usage** | `policy(batch)` handles everything | `preprocessor(batch)` → `policy(...)` → `postprocessor(...)` | + +### Impact on existing models + +- Models trained **before** PR #1452 have normalization embedded in their weights +- These models need migration to work with the new `PolicyProcessorPipeline` system +- The migration extracts normalization statistics and creates separate processor pipelines + +### Migrating old models + +Use the migration script to convert models with embedded normalization: + +```shell +python src/lerobot/processor/migrate_policy_normalization.py \ + --pretrained-path lerobot/act_aloha_sim_transfer_cube_human \ + --push-to-hub \ + --branch migrated +``` + +The script: + +1. **Extracts** normalization statistics from model weights +2. **Creates** external preprocessor and postprocessor pipelines +3. **Removes** normalization layers from model weights +4. **Saves** clean model + processor pipelines +5. **Pushes** to Hub with automatic PR creation + +### Using migrated models + +```python +# New usage pattern (after migration) +from lerobot.policies.factory import make_policy, make_pre_post_processors + +# Load model and processors separately +policy = make_policy(config, ds_meta=dataset.meta) +preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=config, + dataset_stats=dataset.meta.stats +) + +# Process data through pipeline +processed_batch = preprocessor(raw_batch) +action = policy.select_action(processed_batch) +final_action = postprocessor(action) +``` + +## Hardware API redesign + +PR [#777](https://github.com/huggingface/lerobot/pull/777) improves the LeRobot calibration but is **not backward-compatible**. Below is a overview of what changed and how you can continue to work with datasets created before this pull request. + +### What changed? + +| | Before PR #777 | After PR #777 | +| --------------------------------- | ------------------------------------------------- | ------------------------------------------------------------ | +| **Joint range** | Degrees `-180...180°` | **Normalised range** Joints: `–100...100` Gripper: `0...100` | +| **Zero position (SO100 / SO101)** | Arm fully extended horizontally | **In middle of the range for each joint** | +| **Boundary handling** | Software safeguards to detect ±180 ° wrap-arounds | No wrap-around logic needed due to mid-range zero | + +--- + +### Impact on existing datasets + +- Recorded trajectories created **before** PR #777 will replay incorrectly if loaded directly: + - Joint angles are offset and incorrectly normalized. +- Any models directly finetuned or trained on the old data will need their inputs and outputs converted. + +### Using datasets made with the previous calibration system + +We provide a migration example script for replaying an episode recorded with the previous calibration here: `examples/backward_compatibility/replay.py`. +Below we take you through the modifications that are done in the example script to make the previous calibration datasets work. + +```diff ++ key = f"{name.removeprefix('main_')}.pos" + action[key] = action_array[i].item() ++ action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90) ++ action["elbow_flex.pos"] -= 90 +``` + +Let's break this down. +New codebase uses `.pos` suffix for the position observations and we have removed `main_` prefix: + + +```python +key = f"{name.removeprefix('main_')}.pos" +``` + + +For `"shoulder_lift"` (id = 2), the 0 position is changed by -90 degrees and the direction is reversed compared to old calibration/code. + + +```python +action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90) +``` + + +For `"elbow_flex"` (id = 3), the 0 position is changed by -90 degrees compared to old calibration/code. + + +```python +action["elbow_flex.pos"] -= 90 +``` + + +To use degrees normalization we then set the `--robot.use_degrees` option to `true`. + +```diff +python examples/backward_compatibility/replay.py \ + --robot.type=so101_follower \ + --robot.port=/dev/tty.usbmodem5A460814411 \ + --robot.id=blue \ ++ --robot.use_degrees=true \ + --dataset.repo_id=my_dataset_id \ + --dataset.episode=0 +``` + +### Using policies trained with the previous calibration system + +Policies output actions in the same format as the datasets (`torch.Tensors`). Therefore, the same transformations should be applied. + +To find these transformations, we recommend to first try and and replay an episode of the dataset your policy was trained on using the section above. +Then, add these same transformations on your inference script (shown here in the `record.py` script): + +```diff +action_values = predict_action( + observation_frame, + policy, + get_safe_torch_device(policy.config.device), + policy.config.use_amp, + task=single_task, + robot_type=robot.robot_type, + ) + action = {key: action_values[i].item() for i, key in enumerate(robot.action_features)} + ++ action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90) ++ action["elbow_flex.pos"] -= 90 + robot.send_action(action) +``` + +If you have questions or run into migration issues, feel free to ask them on [Discord](https://discord.gg/s3KuuzsPFb) diff --git a/docs/source/cameras.mdx b/docs/source/cameras.mdx new file mode 100644 index 0000000000000000000000000000000000000000..98205ce105bb2191f67da993cc6d85e0f7873b42 --- /dev/null +++ b/docs/source/cameras.mdx @@ -0,0 +1,206 @@ +# Cameras + +LeRobot offers multiple options for video capture, including phone cameras, built-in laptop cameras, external webcams, and Intel RealSense cameras. To efficiently record frames from most cameras, you can use either the `OpenCVCamera` or `RealSenseCamera` class. For additional compatibility details on the `OpenCVCamera` class, refer to the [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html). + +### Finding your camera + +To instantiate a camera, you need a camera identifier. This identifier might change if you reboot your computer or re-plug your camera, a behavior mostly dependant on your operating system. + +To find the camera indices of the cameras plugged into your system, run the following script: + +```bash +lerobot-find-cameras opencv # or realsense for Intel Realsense cameras +``` + +The output will look something like this if you have two cameras connected: + +``` +--- Detected Cameras --- +Camera #0: + Name: OpenCV Camera @ 0 + Type: OpenCV + Id: 0 + Backend api: AVFOUNDATION + Default stream profile: + Format: 16.0 + Width: 1920 + Height: 1080 + Fps: 15.0 +-------------------- +(more cameras ...) +``` + +> [!WARNING] +> When using Intel RealSense cameras in `macOS`, you could get this [error](https://github.com/IntelRealSense/librealsense/issues/12307): `Error finding RealSense cameras: failed to set power state`, this can be solved by running the same command with `sudo` permissions. Note that using RealSense cameras in `macOS` is unstable. + +## Use Cameras + +Below are two examples, demonstrating how to work with the API. + +- **Asynchronous frame capture** using an OpenCV-based camera +- **Color and depth capture** using an Intel RealSense camera + + + + + +```python +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.cameras.opencv.camera_opencv import OpenCVCamera +from lerobot.cameras.configs import ColorMode, Cv2Rotation + +# Construct an `OpenCVCameraConfig` with your desired FPS, resolution, color mode, and rotation. +config = OpenCVCameraConfig( + index_or_path=0, + fps=15, + width=1920, + height=1080, + color_mode=ColorMode.RGB, + rotation=Cv2Rotation.NO_ROTATION +) + +# Instantiate and connect an `OpenCVCamera`, performing a warm-up read (default). +camera = OpenCVCamera(config) +camera.connect() + +# Read frames asynchronously in a loop via `async_read(timeout_ms)` +try: + for i in range(10): + frame = camera.async_read(timeout_ms=200) + print(f"Async frame {i} shape:", frame.shape) +finally: + camera.disconnect() +``` + + + + + + +```python +from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig +from lerobot.cameras.realsense.camera_realsense import RealSenseCamera +from lerobot.cameras.configs import ColorMode, Cv2Rotation + +# Create a `RealSenseCameraConfig` specifying your camera’s serial number and enabling depth. +config = RealSenseCameraConfig( + serial_number_or_name="233522074606", + fps=15, + width=640, + height=480, + color_mode=ColorMode.RGB, + use_depth=True, + rotation=Cv2Rotation.NO_ROTATION +) + +# Instantiate and connect a `RealSenseCamera` with warm-up read (default). +camera = RealSenseCamera(config) +camera.connect() + +# Capture a color frame via `read()` and a depth map via `read_depth()`. +try: + color_frame = camera.read() + depth_map = camera.read_depth() + print("Color frame shape:", color_frame.shape) + print("Depth map shape:", depth_map.shape) +finally: + camera.disconnect() +``` + + + + + +## Use your phone + + + + +To use your iPhone as a camera on macOS, enable the Continuity Camera feature: + +- Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later. +- Sign in both devices with the same Apple ID. +- Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection. + +For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac). + +Your iPhone should be detected automatically when running the camera setup script in the next section. + + + + +If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera + +1. _Install `v4l2loopback-dkms` and `v4l-utils`_. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using: + + +```python +sudo apt install v4l2loopback-dkms v4l-utils +``` + + +2. _Install [DroidCam](https://droidcam.app) on your phone_. This app is available for both iOS and Android. +3. _Install [OBS Studio](https://obsproject.com)_. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org): + + +```python +flatpak install flathub com.obsproject.Studio +``` + + +4. _Install the DroidCam OBS plugin_. This plugin integrates DroidCam with OBS Studio. Install it with: + + +```python +flatpak install flathub com.obsproject.Studio.Plugin.DroidCam +``` + + +5. _Start OBS Studio_. Launch with: + + +```python +flatpak run com.obsproject.Studio +``` + + +6. _Add your phone as a source_. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`. +7. _Adjust resolution settings_. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in. +8. _Start virtual camera_. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide). +9. _Verify the virtual camera setup_. Use `v4l2-ctl` to list the devices: + + +```python +v4l2-ctl --list-devices +``` + + +You should see an entry like: + +``` +VirtualCam (platform:v4l2loopback-000): +/dev/video1 +``` + +10. _Check the camera resolution_. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`. + + +```python +v4l2-ctl -d /dev/video1 --get-fmt-video +``` + + +You should see an entry like: + +``` +>>> Format Video Capture: +>>> Width/Height : 640/480 +>>> Pixel Format : 'YUYV' (YUYV 4:2:2) +``` + +Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed. + +If everything is set up correctly, you can proceed with the rest of the tutorial. + + + diff --git a/docs/source/contributing.md b/docs/source/contributing.md new file mode 100644 index 0000000000000000000000000000000000000000..f939e75f21a8badb5c40f527abd0e098fe9bc472 --- /dev/null +++ b/docs/source/contributing.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file diff --git a/docs/source/debug_processor_pipeline.mdx b/docs/source/debug_processor_pipeline.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d39eda92c2405ea51cfe20fbb0acf8e3f1f71049 --- /dev/null +++ b/docs/source/debug_processor_pipeline.mdx @@ -0,0 +1,299 @@ +# Debug Your Processor Pipeline + +Processor pipelines can be complex, especially when chaining multiple transformation steps. +Unlike simple function calls, pipelines lack natural observability, you can't easily see what happens +between each step or where things go wrong. +This guide provides debugging tools and techniques specifically designed to address these challenges +and help you understand data flow through your pipelines. + +We'll explore three complementary debugging approaches: **hooks** for runtime monitoring, **step-through debugging** for detailed inspection, and **feature validation** for catching structural mismatches. Each serves a different purpose and together they provide complete visibility into your pipeline's behavior. + +## Understanding Hooks + +Hooks are functions that get called at specific points during pipeline execution. +They provide a way to inspect, monitor, or modify data without changing your pipeline code. +Think of them as "event listeners" for your pipeline. + +### What is a Hook? + +A hook is a callback function that gets automatically invoked at specific moments during pipeline execution. +The concept comes from event-driven programming, imagine you could "hook into" the pipeline's execution flow to observe or react to what's happening. + +Think of hooks like inserting checkpoints into your pipeline. Every time the pipeline reaches one of these checkpoints, it pauses briefly to call your hook function, giving you a chance to inspect the current state, log information, and validate data. + +A hook is simply a function that accepts two parameters: + +- `step_idx: int` - The index of the current processing step (0, 1, 2, etc.) +- `transition: EnvTransition` - The data transition at that point in the pipeline + +The beauty of hooks is their non-invasive nature: you can add monitoring, validation, or debugging logic without changing a single line of your pipeline code. The pipeline remains clean and focused on its core logic, while hooks handle the cross-cutting concerns like logging, monitoring, and debugging. + +### Before vs After Hooks + +The pipeline supports two types of hooks: + +- **Before hooks** (`register_before_step_hook`) - Called before each step executes +- **After hooks** (`register_after_step_hook`) - Called after each step completes + +```python +def before_hook(step_idx: int, transition: EnvTransition): + """Called before step processes the transition.""" + print(f"About to execute step {step_idx}") + # Useful for: logging, validation, setup + +def after_hook(step_idx: int, transition: EnvTransition): + """Called after step has processed the transition.""" + print(f"Completed step {step_idx}") + # Useful for: monitoring results, cleanup, debugging + +processor.register_before_step_hook(before_hook) +processor.register_after_step_hook(after_hook) +``` + +### Implementing a NaN Detection Hook + +Here's a practical example of a hook that detects NaN values: + +```python +def check_nans(step_idx: int, transition: EnvTransition): + """Check for NaN values in observations.""" + obs = transition.get(TransitionKey.OBSERVATION) + if obs: + for key, value in obs.items(): + if isinstance(value, torch.Tensor) and torch.isnan(value).any(): + print(f"NaN detected in {key} at step {step_idx}") + +# Register the hook to run after each step +processor.register_after_step_hook(check_nans) + +# Process your data - the hook will be called automatically +output = processor(input_data) + +# Remove the hook when done debugging +processor.unregister_after_step_hook(check_nans) +``` + +### How Hooks Work Internally + +Understanding the internal mechanism helps you use hooks more effectively. The pipeline maintains two separate lists: one for before-step hooks and another for after-step hooks. When you register a hook, it's simply appended to the appropriate list. + +During execution, the pipeline follows a strict sequence: for each processing step, it first calls all before-hooks in registration order, then executes the actual step transformation, and finally calls all after-hooks in registration order. This creates a predictable, sandwich-like structure around each step. + +The key insight is that hooks don't change the core pipeline logic—they're purely additive. The pipeline's `_forward` method orchestrates this dance between hooks and processing steps, ensuring that your debugging or monitoring code runs at exactly the right moments without interfering with the main data flow. + +Here's a simplified view of how the pipeline executes hooks: + +```python +class DataProcessorPipeline: + def __init__(self): + self.steps = [...] + self.before_step_hooks = [] # List of before hooks + self.after_step_hooks = [] # List of after hooks + + def _forward(self, transition): + """Internal method that processes the transition through all steps.""" + for step_idx, processor_step in enumerate(self.steps): + # 1. Call all BEFORE hooks + for hook in self.before_step_hooks: + hook(step_idx, transition) + + # 2. Execute the actual processing step + transition = processor_step(transition) + + # 3. Call all AFTER hooks + for hook in self.after_step_hooks: + hook(step_idx, transition) + + return transition + + def register_before_step_hook(self, hook_fn): + self.before_step_hooks.append(hook_fn) + + def register_after_step_hook(self, hook_fn): + self.after_step_hooks.append(hook_fn) +``` + +### Execution Flow + +The execution flow looks like this: + +``` +Input → Before Hook → Step 0 → After Hook → Before Hook → Step 1 → After Hook → ... → Output +``` + +For example, with 3 steps and both hook types: + +```python +def timing_before(step_idx, transition): + print(f"⏱️ Starting step {step_idx}") + +def validation_after(step_idx, transition): + print(f"✅ Completed step {step_idx}") + +processor.register_before_step_hook(timing_before) +processor.register_after_step_hook(validation_after) + +# This will output: +# ⏱️ Starting step 0 +# ✅ Completed step 0 +# ⏱️ Starting step 1 +# ✅ Completed step 1 +# ⏱️ Starting step 2 +# ✅ Completed step 2 +``` + +### Multiple Hooks + +You can register multiple hooks of the same type - they execute in the order registered: + +```python +def log_shapes(step_idx: int, transition: EnvTransition): + obs = transition.get(TransitionKey.OBSERVATION) + if obs: + print(f"Step {step_idx} observation shapes:") + for key, value in obs.items(): + if isinstance(value, torch.Tensor): + print(f" {key}: {value.shape}") + +processor.register_after_step_hook(check_nans) # Executes first +processor.register_after_step_hook(log_shapes) # Executes second + +# Both hooks will be called after each step in registration order +output = processor(input_data) +``` + +While hooks are excellent for monitoring specific issues (like NaN detection) or gathering metrics during normal pipeline execution, sometimes you need to dive deeper. When you want to understand exactly what happens at each step or debug complex transformation logic, step-through debugging provides the detailed inspection you need. + +## Step-Through Debugging + +Step-through debugging is like having a slow-motion replay for your pipeline. Instead of watching your data get transformed in one quick blur from input to output, you can pause and examine what happens after each individual step. + +This approach is particularly valuable when you're trying to understand a complex pipeline, debug unexpected behavior, or verify that each transformation is working as expected. Unlike hooks, which are great for automated monitoring, step-through debugging gives you manual, interactive control over the inspection process. + +The `step_through()` method is a generator that yields the transition state after each processing step, allowing you to inspect intermediate results. Think of it as creating a series of snapshots of your data as it flows through the pipeline—each snapshot shows you exactly what your data looks like after one more transformation has been applied. + +### How Step-Through Works + +The `step_through()` method fundamentally changes how the pipeline executes. Instead of running all steps in sequence and only returning the final result, it transforms the pipeline into an iterator that yields intermediate results. + +Here's what happens internally: the method starts by converting your input data into the pipeline's internal transition format, then yields this initial state. Next, it applies the first processing step and yields the result. Then it applies the second step to that result and yields again, and so on. Each `yield` gives you a complete snapshot of the transition at that point. + +This generator pattern is powerful because it's lazy—the pipeline only computes the next step when you ask for it. This means you can stop at any point, inspect the current state thoroughly, and decide whether to continue. You're not forced to run the entire pipeline just to debug one problematic step. + +Instead of running the entire pipeline and only seeing the final result, `step_through()` pauses after each step and gives you the intermediate transition: + +```python +# This creates a generator that yields intermediate states +for i, intermediate_result in enumerate(processor.step_through(input_data)): + print(f"=== After step {i} ===") + + # Inspect the observation at this stage + obs = intermediate_result.get(TransitionKey.OBSERVATION) + if obs: + for key, value in obs.items(): + if isinstance(value, torch.Tensor): + print(f"{key}: shape={value.shape}, dtype={value.dtype}") +``` + +### Interactive Debugging with Breakpoints + +You can add breakpoints in the step-through loop to interactively debug: + +```python +# Step through the pipeline with debugging +for i, intermediate in enumerate(processor.step_through(data)): + print(f"Step {i}: {processor.steps[i].__class__.__name__}") + + # Set a breakpoint to inspect the current state + breakpoint() # Debugger will pause here + + # You can now inspect 'intermediate' in the debugger: + # - Check tensor shapes and values + # - Verify expected transformations + # - Look for unexpected changes +``` + +During the debugger session, you can: + +- Examine `intermediate[TransitionKey.OBSERVATION]` to see observation data +- Check `intermediate[TransitionKey.ACTION]` for action transformations +- Inspect any part of the transition to understand what each step does + +Step-through debugging is perfect for understanding the _data_ transformations, but what about the _structure_ of that data? While hooks and step-through help you debug runtime behavior, you also need to ensure your pipeline produces data in the format expected by downstream components. This is where feature contract validation comes in. + +## Validating Feature Contracts + +Feature contracts define what data structure your pipeline expects as input and produces as output. +Validating these contracts helps catch mismatches early. + +### Understanding Feature Contracts + +Each processor step has a `transform_features()` method that describes how it changes the data structure: + +```python +# Get the expected output features from your pipeline +initial_features = { + PipelineFeatureType.OBSERVATION: { + "observation.state": PolicyFeature(type=FeatureType.STATE, shape=(7,)), + "observation.image": PolicyFeature(type=FeatureType.IMAGE, shape=(3, 224, 224)) + }, + PipelineFeatureType.ACTION: { + "action": PolicyFeature(type=FeatureType.ACTION, shape=(4,)) + } +} + +# Check what your pipeline will output +output_features = processor.transform_features(initial_features) + +print("Input features:") +for feature_type, features in initial_features.items(): + print(f" {feature_type}:") + for key, feature in features.items(): + print(f" {key}: {feature.type.value}, shape={feature.shape}") + +print("\nOutput features:") +for feature_type, features in output_features.items(): + print(f" {feature_type}:") + for key, feature in features.items(): + print(f" {key}: {feature.type.value}, shape={feature.shape}") +``` + +### Verifying Expected Features + +Check that your pipeline produces the features you expect: + +```python +# Define what features you expect the pipeline to produce +expected_keys = ["observation.state", "observation.image", "action"] + +print("Validating feature contract...") +for expected_key in expected_keys: + found = False + for feature_type, features in output_features.items(): + if expected_key in features: + feature = features[expected_key] + print(f"✅ {expected_key}: {feature.type.value}, shape={feature.shape}") + found = True + break + + if not found: + print(f"❌ Missing expected feature: {expected_key}") +``` + +This validation helps ensure your pipeline will work correctly with downstream components that expect specific data structures. + +## Summary + +Now that you understand the three debugging approaches, you can tackle any pipeline issue systematically: + +1. **Hooks** - For runtime monitoring and validation without modifying pipeline code +2. **Step-through** - For inspecting intermediate states and understanding transformations +3. **Feature validation** - For ensuring data structure contracts are met + +**When to use each approach:** + +- Start with **step-through debugging** when you need to understand what your pipeline does or when something unexpected happens +- Add **hooks** for continuous monitoring during development and production to catch issues automatically +- Use **feature validation** before deployment to ensure your pipeline works with downstream components + +These three tools work together to give you the complete observability that complex pipelines naturally lack. With hooks watching for issues, step-through helping you understand behavior, and feature validation ensuring compatibility, you'll be able to debug any pipeline confidently and efficiently. diff --git a/docs/source/env_processor.mdx b/docs/source/env_processor.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7f5cee2832b1311f70fa6833b4d15a2effbce904 --- /dev/null +++ b/docs/source/env_processor.mdx @@ -0,0 +1,418 @@ +# Environment Processors + +Environment processors are a critical layer in LeRobot's data processing architecture that handle **environment-specific** transformations, separate from policy-specific processing. This separation of concerns enables cleaner code, better modularity, and easier experimentation with different environments and policies. + +## Why Environment Processors? + +When working with different robot environments (LIBERO, MetaWorld, Aloha, etc.), each environment often has unique data formats, coordinate systems, and conventions that need standardization **before** policy processing. Without environment processors, these transformations would be: + +1. **Hardcoded in environment code** - Making it difficult to experiment with different state representations +2. **Duplicated across policies** - Each policy would need to handle environment-specific quirks +3. **Mixed with policy logic** - Violating separation of concerns and making debugging harder + +Environment processors solve this by providing a **dedicated processing layer** between raw environment observations and policy inputs. + +## The Processing Pipeline + +Here's how data flows through the complete processing pipeline during evaluation: + +```python +# In lerobot_eval.py rollout() function: + +# 1. Raw environment observation (numpy arrays, various formats) +raw_observation = env.step(action) + +# 2. Convert numpy to torch, normalize images [0,1] +observation = preprocess_observation(raw_observation) + +# 3. Add task metadata (for multi-task environments) +observation = add_envs_task(env, observation) + +# 4. ENVIRONMENT-SPECIFIC preprocessing (NEW!) +# - Flatten robot states +# - Rotate images to match dataset conventions +# - Handle environment-specific coordinate systems +observation = env_preprocessor(observation) + +# 5. POLICY-SPECIFIC preprocessing +# - Normalize with dataset statistics +# - Add batch dimensions +# - Move to GPU +# - Tokenize language instructions +observation = preprocessor(observation) + +# 6. Policy inference +action = policy.select_action(observation) + +# 7. POLICY-SPECIFIC postprocessing +# - Unnormalize actions +# - Remove batch dimensions +action = postprocessor(action) + +# 8. ENVIRONMENT-SPECIFIC postprocessing (NEW!) +# - Convert action formats if needed +# - Apply environment-specific constraints +action_transition = {"action": action} +action_transition = env_postprocessor(action_transition) +action = action_transition["action"] + +# 9. Execute in environment +env.step(action) +``` + +## The Benefits + +### 1. **Separation of Concerns** + +Environment processors handle transformations specific to the **environment's data format**, while policy processors handle transformations specific to the **model's requirements**. + +```python +# ❌ Before: Mixed concerns +class LiberoVLAPolicy: + def preprocess(self, obs): + # Environment-specific: Flatten robot state (shouldn't be in policy!) + state = self._flatten_robot_state(obs["robot_state"]) + # Policy-specific: Normalize with dataset stats + state = self.normalizer(state) + return state + +# ✅ After: Clear separation +# Environment processor: Handles LIBERO's nested robot state +env_preprocessor = LiberoProcessorStep() # Flattens robot_state + +# Policy processor: Handles model requirements +policy_preprocessor = NormalizerProcessorStep(stats=dataset_stats) +``` + +### 2. **Flexibility and Reusability** + +The same policy can work with different environment processors, and the same environment processor can work with different policies: + +```python +# Use SmolVLA policy with LIBERO environment +libero_preprocessor, libero_postprocessor = make_env_pre_post_processors(libero_cfg) +smolvla_preprocessor, smolvla_postprocessor = make_pre_post_processors(smolvla_cfg) + +# Or use ACT policy with the same LIBERO environment +libero_preprocessor, libero_postprocessor = make_env_pre_post_processors(libero_cfg) +act_preprocessor, act_postprocessor = make_pre_post_processors(act_cfg) +``` + +### 3. **Easier Experimentation** + +Want to try different state representations for LIBERO? Just create a new processor: + +```python +# Original: 8D state (pos + quat→axisangle + gripper) +@ProcessorStepRegistry.register("libero_processor") +class LiberoProcessorStep(ObservationProcessorStep): + def _process_observation(self, obs): + eef_pos = robot_state["eef"]["pos"] # 3D + eef_axisangle = quat2axisangle(quat) # 3D + gripper = robot_state["gripper"]["qpos"] # 2D + state = torch.cat([eef_pos, eef_axisangle, gripper], dim=-1) # 8D + return state + +# Experiment: Add velocity for better control +@ProcessorStepRegistry.register("libero_velocity_processor") +class LiberoVelocityProcessorStep(ObservationProcessorStep): + def _process_observation(self, obs): + # Include velocities for 14D state + eef_pos = robot_state["eef"]["pos"] # 3D + eef_axisangle = quat2axisangle(quat) # 3D + eef_vel = robot_state["eef"]["vel"] # 3D (NEW) + gripper_pos = robot_state["gripper"]["qpos"] # 2D + gripper_vel = robot_state["gripper"]["qvel"] # 3D (NEW) + state = torch.cat([eef_pos, eef_axisangle, eef_vel, + gripper_pos, gripper_vel], dim=-1) # 14D + return state +``` + +### 4. **Cleaner Environment Code** + +Environments expose **all available data** without needing to know what downstream models will use: + +```python +# LIBERO environment exposes full robot state +observation = { + "pixels": {"image": img, "image2": img2}, + "robot_state": { + "eef": {"pos": ..., "quat": ..., "vel": ..., "mat": ..., "axisangle": ...}, + "gripper": {"qpos": ..., "qvel": ...}, + "joints": {"pos": ..., "vel": ...} + } +} + +# Environment processor decides what to use +# Policy processor handles model-specific transformations +``` + +## Using Environment Processors + +### Factory Function + +The `make_env_pre_post_processors` function follows the same pattern as `make_pre_post_processors` for policies: + +```python +from lerobot.envs.factory import make_env_pre_post_processors +from lerobot.envs.configs import LiberoEnv, PushtEnv + +# For LIBERO: Returns LiberoProcessorStep in preprocessor +libero_cfg = LiberoEnv(task="libero_spatial", camera_name=["agentview"]) +env_preprocessor, env_postprocessor = make_env_pre_post_processors(libero_cfg) + +# For other environments: Returns identity processors (no-op) +pusht_cfg = PushtEnv() +env_preprocessor, env_postprocessor = make_env_pre_post_processors(pusht_cfg) +``` + +### Implementation in `envs/factory.py` + +```python +def make_env_pre_post_processors( + env_cfg: EnvConfig, +) -> tuple[ + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], +]: + """ + Create preprocessor and postprocessor pipelines for environment observations. + + Args: + env_cfg: The configuration of the environment. + + Returns: + A tuple containing: + - preprocessor: Pipeline that processes environment observations + - postprocessor: Pipeline that processes environment outputs + """ + # For LIBERO environments, add the LiberoProcessorStep to preprocessor + if isinstance(env_cfg, LiberoEnv) or "libero" in env_cfg.type: + preprocessor = PolicyProcessorPipeline(steps=[LiberoProcessorStep()]) + else: + # For all other environments, return an identity preprocessor + preprocessor = PolicyProcessorPipeline(steps=[]) + + # Postprocessor is currently identity for all environments + # Future: Could add environment-specific action transformations + postprocessor = PolicyProcessorPipeline(steps=[]) + + return preprocessor, postprocessor +``` + +### Integration in Evaluation + +In `lerobot_eval.py`, the environment processors are created once and used throughout: + +```python +def eval_main(cfg: EvalPipelineConfig): + # Create environment + envs = make_env(cfg.env, n_envs=cfg.eval.batch_size) + + # Create policy + policy = make_policy(cfg=cfg.policy, env_cfg=cfg.env) + + # Create policy processors + preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=cfg.policy, + pretrained_path=cfg.policy.pretrained_path, + ) + + # Create environment processors (NEW!) + env_preprocessor, env_postprocessor = make_env_pre_post_processors(env_cfg=cfg.env) + + # Run evaluation with both processor types + eval_policy_all( + envs=envs, + policy=policy, + env_preprocessor=env_preprocessor, # Environment-specific + env_postprocessor=env_postprocessor, # Environment-specific + preprocessor=preprocessor, # Policy-specific + postprocessor=postprocessor, # Policy-specific + n_episodes=cfg.eval.n_episodes, + ) +``` + +## Example: LIBERO Environment Processor + +The `LiberoProcessorStep` demonstrates a real-world environment processor: + +```python +from lerobot.processor.pipeline import ObservationProcessorStep + +@dataclass +@ProcessorStepRegistry.register(name="libero_processor") +class LiberoProcessorStep(ObservationProcessorStep): + """ + Processes LIBERO observations into the LeRobot format. + + **State Processing:** + - Extracts end-effector position (3D) + - Converts quaternion to axis-angle representation (3D) + - Extracts gripper joint positions (2D) + - Concatenates into 8D state vector + + **Image Processing:** + - Rotates images 180° to match HuggingFaceVLA/libero convention + """ + + def _process_observation(self, observation): + processed_obs = observation.copy() + + # Process images: Flip 180° for camera convention + for key in list(processed_obs.keys()): + if key.startswith("observation.images."): + img = processed_obs[key] + img = torch.flip(img, dims=[2, 3]) # Flip H and W + processed_obs[key] = img + + # Process robot_state: Flatten to 8D vector + if "observation.robot_state" in processed_obs: + robot_state = processed_obs.pop("observation.robot_state") + + eef_pos = robot_state["eef"]["pos"] # (B, 3) + eef_quat = robot_state["eef"]["quat"] # (B, 4) + gripper_qpos = robot_state["gripper"]["qpos"] # (B, 2) + + # Convert quaternion to axis-angle + eef_axisangle = self._quat2axisangle(eef_quat) # (B, 3) + + # Concatenate into single state vector + state = torch.cat((eef_pos, eef_axisangle, gripper_qpos), dim=-1) + state = state.float() + + processed_obs["observation.state"] = state + + return processed_obs +``` + +### Why These Transformations? + +1. **Image Rotation**: The HuggingFaceVLA/libero dataset has images rotated 180° from the raw LIBERO simulator. The processor handles this convention mismatch so policies trained on the dataset work seamlessly. + +2. **State Flattening**: The raw LIBERO environment exposes nested dictionaries with all available state information (position, quaternion, velocity, matrix representation, etc.). The processor: + - Selects the relevant components (pos, quat, gripper) + - Converts quaternion to axis-angle (more suitable for learning) + - Flattens to a single 8D vector that policies expect + +3. **Flexibility**: The environment still exposes **all** raw data. If you want to try different state representations (e.g., including velocities, using matrix representation instead of axis-angle), you can create a new processor without modifying the environment code. + +## Adding Environment Processors for New Environments + +To add environment processors for a new environment: + +### 1. Create the Processor Step + +```python +# In src/lerobot/processor/env_processor.py + +@dataclass +@ProcessorStepRegistry.register(name="myenv_processor") +class MyEnvProcessorStep(ObservationProcessorStep): + """Process observations from MyEnv.""" + + def _process_observation(self, observation): + processed = observation.copy() + + # Your environment-specific transformations + if "myenv.specific.state" in processed: + state = processed.pop("myenv.specific.state") + # Transform to standard format + processed["observation.state"] = self._transform_state(state) + + return processed +``` + +### 2. Update the Factory + +```python +# In src/lerobot/envs/factory.py + +def make_env_pre_post_processors(env_cfg: EnvConfig): + if isinstance(env_cfg, LiberoEnv) or "libero" in env_cfg.type: + preprocessor = PolicyProcessorPipeline(steps=[LiberoProcessorStep()]) + elif isinstance(env_cfg, MyEnvConfig) or "myenv" in env_cfg.type: + preprocessor = PolicyProcessorPipeline(steps=[MyEnvProcessorStep()]) + else: + preprocessor = PolicyProcessorPipeline(steps=[]) + + postprocessor = PolicyProcessorPipeline(steps=[]) + return preprocessor, postprocessor +``` + +### 3. Use in Evaluation + +No changes needed! The evaluation script automatically uses the appropriate processor: + +```bash +lerobot-eval \ + --policy.path=lerobot/my_policy \ + --env.type=myenv \ # Automatically uses MyEnvProcessorStep + --eval.n_episodes=10 +``` + +## Future: Environment Postprocessors + +Currently, postprocessors are identity (no-op) for all environments. Future use cases include: + +### Action Space Transformations + +```python +@dataclass +class MyEnvActionPostprocessor(ProcessorStep): + """Convert policy actions to environment-specific format.""" + + def __call__(self, transition: EnvTransition) -> EnvTransition: + action = transition["action"] + + # Example: Convert from Cartesian to joint space + if self.action_space == "joint": + action = self.ik_solver(action) + + # Example: Apply environment-specific safety limits + action = torch.clamp(action, self.min_action, self.max_action) + + transition["action"] = action + return transition +``` + +### Coordinate System Conversions + +```python +@dataclass +class CoordinateTransformPostprocessor(ProcessorStep): + """Transform actions between coordinate systems.""" + + def __call__(self, transition: EnvTransition) -> EnvTransition: + action = transition["action"] + + # Example: Policy outputs in world frame, env expects base frame + action = self.world_to_base_transform(action) + + transition["action"] = action + return transition +``` + +## Best Practices + +1. **Keep environment processors simple**: They should only handle environment-specific data format issues, not complex learning-related transformations. + +2. **Use policy processors for model requirements**: Normalization, batching, device placement, and tokenization belong in policy processors. + +3. **Expose all data from environments**: Let processors decide what to use rather than hardcoding choices in the environment. + +4. **Document conventions**: Clearly document any coordinate system conventions, camera orientations, or data formats that your processor handles. + +5. **Test independently**: Environment processors should be testable without loading full policies or environments. + +## Summary + +Environment processors provide a **clean separation** between environment-specific data transformations and policy-specific model requirements. This architecture: + +- ✅ Enables easy experimentation with different state representations +- ✅ Allows policies to work seamlessly across different environments +- ✅ Keeps environment code focused on simulation/hardware interface +- ✅ Makes processor pipelines more maintainable and debuggable +- ✅ Follows the single responsibility principle + +The key insight: **Environments define data formats, processors standardize them, policies consume standardized data.** Each layer has a clear, focused responsibility. diff --git a/docs/source/envhub.mdx b/docs/source/envhub.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a460cbda2f6b4077af63e0ed52735e938bb1ac27 --- /dev/null +++ b/docs/source/envhub.mdx @@ -0,0 +1,424 @@ +# Loading Environments from the Hub + +The **EnvHub** feature allows you to load simulation environments directly from the Hugging Face Hub with a single line of code. This unlocks a powerful new model for collaboration: instead of environments being locked away inside monolithic libraries, anyone can publish custom environments and share them with the community. + +## Overview + +With EnvHub, you can: + +- Load environments from the Hub instantly +- Share your custom simulation tasks with the community +- Version control your environments using Git +- Distribute complex physics simulations without packaging hassles + +## Quick Start + +Loading an environment from the Hub is as simple as: + +```python +from lerobot.envs.factory import make_env + +# Load a hub environment (requires explicit consent to run remote code) +env = make_env("lerobot/cartpole-env", trust_remote_code=True) +``` + + + **Security Notice**: Loading environments from the Hub executes Python code + from third-party repositories. Only use `trust_remote_code=True` with + repositories you trust. We strongly recommend pinning to a specific commit + hash for reproducibility and security. + + +## What is EnvHub? + +EnvHub is a framework that allows researchers and developers to: + +1. **Publish environments** to the Hugging Face Hub as Git repositories +2. **Load environments** dynamically without installing them as packages +3. **Version and track** environment changes using Git semantics +4. **Discover** new simulation tasks shared by the community + +This design means you can go from discovering an interesting environment on the Hub to running experiments in seconds, without worrying about dependency conflicts or complex installation procedures. + +## Repository Structure + +To make your environment loadable from the Hub, your repository must contain at minimum: + +### Required Files + +**`env.py`** (or custom Python file) + +- Must expose a `make_env(n_envs: int, use_async_envs: bool)` function +- This function should return one of: + - A `gym.vector.VectorEnv` (most common) + - A single `gym.Env` (will be automatically wrapped) + - A dict mapping `{suite_name: {task_id: VectorEnv}}` (for multi-task benchmarks) + +### Optional Files + +**`requirements.txt`** + +- List any additional dependencies your environment needs +- Users will need to install these manually before loading your environment + +**`README.md`** + +- Document your environment: what task it implements, observation/action spaces, rewards, etc. +- Include usage examples and any special setup instructions + +**`.gitignore`** + +- Exclude unnecessary files from your repository + +### Example Repository Structure + +``` +my-environment-repo/ +├── env.py # Main environment definition (required) +├── requirements.txt # Dependencies (optional) +├── README.md # Documentation (recommended) +├── assets/ # Images, videos, etc. (optional) +│ └── demo.gif +└── configs/ # Config files if needed (optional) + └── task_config.yaml +``` + +## Creating Your Environment Repository + +### Step 1: Define Your Environment + +Create an `env.py` file with a `make_env` function: + +```python +# env.py +import gymnasium as gym + +def make_env(n_envs: int = 1, use_async_envs: bool = False): + """ + Create vectorized environments for your custom task. + + Args: + n_envs: Number of parallel environments + use_async_envs: Whether to use AsyncVectorEnv or SyncVectorEnv + + Returns: + gym.vector.VectorEnv or dict mapping suite names to vectorized envs + """ + def _make_single_env(): + # Create your custom environment + return gym.make("CartPole-v1") + + # Choose vector environment type + env_cls = gym.vector.AsyncVectorEnv if use_async_envs else gym.vector.SyncVectorEnv + + # Create vectorized environment + vec_env = env_cls([_make_single_env for _ in range(n_envs)]) + + return vec_env +``` + +### Step 2: Test Locally + +Before uploading, test your environment locally: + +```python +from lerobot.envs.utils import _load_module_from_path, _call_make_env, _normalize_hub_result + +# Load your module +module = _load_module_from_path("./env.py") + +# Test the make_env function +result = _call_make_env(module, n_envs=2, use_async_envs=False) +normalized = _normalize_hub_result(result) + +# Verify it works +suite_name = next(iter(normalized)) +env = normalized[suite_name][0] +obs, info = env.reset() +print(f"Observation shape: {obs.shape if hasattr(obs, 'shape') else type(obs)}") +env.close() +``` + +### Step 3: Upload to the Hub + +Upload your repository to Hugging Face: + +```bash +# Install huggingface_hub if needed +pip install huggingface_hub + +# Login to Hugging Face +huggingface-cli login + +# Create a new repository +huggingface-cli repo create my-custom-env --type space --org my-org + +# Initialize git and push +git init +git add . +git commit -m "Initial environment implementation" +git remote add origin https://huggingface.co/my-org/my-custom-env +git push -u origin main +``` + +Alternatively, use the `huggingface_hub` Python API: + +```python +from huggingface_hub import HfApi + +api = HfApi() + +# Create repository +api.create_repo("my-custom-env", repo_type="space") + +# Upload files +api.upload_folder( + folder_path="./my-env-folder", + repo_id="username/my-custom-env", + repo_type="space", +) +``` + +## Loading Environments from the Hub + +### Basic Usage + +```python +from lerobot.envs.factory import make_env + +# Load from the hub +envs_dict = make_env( + "username/my-custom-env", + n_envs=4, + trust_remote_code=True +) + +# Access the environment +suite_name = next(iter(envs_dict)) +env = envs_dict[suite_name][0] + +# Use it like any gym environment +obs, info = env.reset() +action = env.action_space.sample() +obs, reward, terminated, truncated, info = env.step(action) +``` + +### Advanced: Pinning to Specific Versions + +For reproducibility and security, pin to a specific Git revision: + +```python +# Pin to a specific branch +env = make_env("username/my-env@main", trust_remote_code=True) + +# Pin to a specific commit (recommended for papers/experiments) +env = make_env("username/my-env@abc123def456", trust_remote_code=True) + +# Pin to a tag +env = make_env("username/my-env@v1.0.0", trust_remote_code=True) +``` + +### Custom File Paths + +If your environment definition is not in `env.py`: + +```python +# Load from a custom file +env = make_env("username/my-env:custom_env.py", trust_remote_code=True) + +# Combine with version pinning +env = make_env("username/my-env@v1.0:envs/task_a.py", trust_remote_code=True) +``` + +### Async Environments + +For better performance with multiple environments: + +```python +envs_dict = make_env( + "username/my-env", + n_envs=8, + use_async_envs=True, # Use AsyncVectorEnv for parallel execution + trust_remote_code=True +) +``` + +## URL Format Reference + +The hub URL format supports several patterns: + +| Pattern | Description | Example | +| -------------------- | ------------------------------ | -------------------------------------- | +| `user/repo` | Load `env.py` from main branch | `make_env("lerobot/pusht-env")` | +| `user/repo@revision` | Load from specific revision | `make_env("lerobot/pusht-env@main")` | +| `user/repo:path` | Load custom file | `make_env("lerobot/envs:pusht.py")` | +| `user/repo@rev:path` | Revision + custom file | `make_env("lerobot/envs@v1:pusht.py")` | + +## Multi-Task Environments + +For benchmarks with multiple tasks (like LIBERO), return a nested dictionary: + +```python +def make_env(n_envs: int = 1, use_async_envs: bool = False): + env_cls = gym.vector.AsyncVectorEnv if use_async_envs else gym.vector.SyncVectorEnv + + # Return dict: {suite_name: {task_id: VectorEnv}} + return { + "suite_1": { + 0: env_cls([lambda: gym.make("Task1-v0") for _ in range(n_envs)]), + 1: env_cls([lambda: gym.make("Task2-v0") for _ in range(n_envs)]), + }, + "suite_2": { + 0: env_cls([lambda: gym.make("Task3-v0") for _ in range(n_envs)]), + } + } +``` + +## Security Considerations + + + **Important**: The `trust_remote_code=True` flag is required to execute + environment code from the Hub. This is by design for security. + + +When loading environments from the Hub: + +1. **Review the code first**: Visit the repository and inspect `env.py` before loading +2. **Pin to commits**: Use specific commit hashes for reproducibility +3. **Check dependencies**: Review `requirements.txt` for suspicious packages +4. **Use trusted sources**: Prefer official organizations or well-known researchers +5. **Sandbox if needed**: Run untrusted code in isolated environments (containers, VMs) + +Example of safe usage: + +```python +# ❌ BAD: Loading without inspection +env = make_env("random-user/untrusted-env", trust_remote_code=True) + +# ✅ GOOD: Review code, then pin to specific commit +# 1. Visit https://huggingface.co/trusted-org/verified-env +# 2. Review the env.py file +# 3. Copy the commit hash +env = make_env("trusted-org/verified-env@a1b2c3d4", trust_remote_code=True) +``` + +## Example: CartPole from the Hub + +Here's a complete example using the reference CartPole environment: + +```python +from lerobot.envs.factory import make_env +import numpy as np + +# Load the environment +envs_dict = make_env("lerobot/cartpole-env", n_envs=4, trust_remote_code=True) + +# Get the vectorized environment +suite_name = next(iter(envs_dict)) +env = envs_dict[suite_name][0] + +# Run a simple episode +obs, info = env.reset() +done = np.zeros(env.num_envs, dtype=bool) +total_reward = np.zeros(env.num_envs) + +while not done.all(): + # Random policy + action = env.action_space.sample() + obs, reward, terminated, truncated, info = env.step(action) + total_reward += reward + done = terminated | truncated + +print(f"Average reward: {total_reward.mean():.2f}") +env.close() +``` + +## Benefits of EnvHub + +### For Environment Authors + +- **Easy distribution**: No PyPI packaging required +- **Version control**: Use Git for environment versioning +- **Rapid iteration**: Push updates instantly +- **Documentation**: Hub README renders beautifully +- **Community**: Reach LeRobot users directly + +### For Researchers + +- **Quick experiments**: Load any environment in one line +- **Reproducibility**: Pin to specific commits +- **Discovery**: Browse environments on the Hub +- **No conflicts**: No need to install conflicting packages + +### For the Community + +- **Growing ecosystem**: More diverse simulation tasks +- **Standardization**: Common `make_env` API +- **Collaboration**: Fork and improve existing environments +- **Accessibility**: Lower barrier to sharing research + +## Troubleshooting + +### "Refusing to execute remote code" + +You must explicitly pass `trust_remote_code=True`: + +```python +env = make_env("user/repo", trust_remote_code=True) +``` + +### "Module X not found" + +The hub environment has dependencies you need to install: + +```bash +# Check the repo's requirements.txt and install dependencies +pip install gymnasium numpy +``` + +### "make_env not found in module" + +Your `env.py` must expose a `make_env` function: + +```python +def make_env(n_envs: int, use_async_envs: bool): + # Your implementation + pass +``` + +### Environment returns wrong type + +The `make_env` function must return: + +- A `gym.vector.VectorEnv`, or +- A single `gym.Env`, or +- A dict `{suite_name: {task_id: VectorEnv}}` + +## Best Practices + +1. **Document your environment**: Include observation/action space descriptions, reward structure, and termination conditions in your README +2. **Add requirements.txt**: List all dependencies with versions +3. **Test thoroughly**: Verify your environment works locally before pushing +4. **Use semantic versioning**: Tag releases with version numbers +5. **Add examples**: Include usage examples in your README +6. **Keep it simple**: Minimize dependencies when possible +7. **License your work**: Add a LICENSE file to clarify usage terms + +## Future Directions + +The EnvHub ecosystem enables exciting possibilities: + +- **GPU-accelerated physics**: Share Isaac Gym or Brax environments +- **Photorealistic rendering**: Distribute environments with advanced graphics +- **Multi-agent scenarios**: Complex interaction tasks +- **Real-world simulators**: Digital twins of physical setups +- **Procedural generation**: Infinite task variations +- **Domain randomization**: Pre-configured DR pipelines + +As more researchers and developers contribute, the diversity and quality of available environments will grow, benefiting the entire robotics learning community. + +## See Also + +- [Hugging Face Hub Documentation](https://huggingface.co/docs/hub/en/index) +- [Gymnasium Documentation](https://gymnasium.farama.org/index.html) +- [Example Hub Environment](https://huggingface.co/lerobot/cartpole-env) diff --git a/docs/source/feetech.mdx b/docs/source/feetech.mdx new file mode 100644 index 0000000000000000000000000000000000000000..777f8a5093521a93470a1e6a5356c443f45fb0d8 --- /dev/null +++ b/docs/source/feetech.mdx @@ -0,0 +1,71 @@ +# Feetech Motor Firmware Update + +This tutorial guides you through updating the firmware of Feetech motors using the official Feetech software. + +## Prerequisites + +- Windows computer (Feetech software is only available for Windows) +- Feetech motor control board +- USB cable to connect the control board to your computer +- Feetech motors connected to the control board + +## Step 1: Download Feetech Software + +1. Visit the official Feetech software download page: [https://www.feetechrc.com/software.html](https://www.feetechrc.com/software.html) +2. Download the latest version of the Feetech debugging software (FD) +3. Install the software on your Windows computer + +## Step 2: Hardware Setup + +1. Connect your Feetech motors to the motor control board +2. Connect the motor control board to your Windows computer via USB cable +3. Ensure power is supplied to the motors + +## Step 3: Configure Connection + +1. Launch the Feetech debugging software +2. Select the correct COM port from the port dropdown menu + - If unsure which port to use, check Windows Device Manager under "Ports (COM & LPT)" +3. Set the appropriate baud rate (typically 1000000 for most Feetech motors) +4. Click "Open" to establish communication with the control board + +## Step 4: Scan for Motors + +1. Once connected, click the "Search" button to detect all connected motors +2. The software will automatically discover and list all motors on the bus +3. Each motor will appear with its ID number + +## Step 5: Update Firmware + +For each motor you want to update: + +1. **Select the motor** from the list by clicking on it +2. **Click on Upgrade tab**: +3. **Click on Online button**: + - If an potential firmware update is found, it will be displayed in the box +4. **Click on Upgrade button**: + - The update progress will be displayed + +## Step 6: Verify Update + +1. After the update completes, the software should automatically refresh the motor information +2. Verify that the firmware version has been updated to the expected version + +## Important Notes + +⚠️ **Warning**: Do not disconnect power or USB during firmware updates, it will potentially brick the motor. + +## Bonus: Motor Debugging on Linux/macOS + +For debugging purposes only, you can use the open-source Feetech Debug Tool: + +- **Repository**: [FT_SCServo_Debug_Qt](https://github.com/CarolinePascal/FT_SCServo_Debug_Qt/tree/fix/port-search-timer) + +### Installation Instructions + +Follow the instructions in the repository to install the tool, for Ubuntu you can directly install it, for MacOS you need to build it from source. + +**Limitations:** + +- This tool is for debugging and parameter adjustment only +- Firmware updates must still be done on Windows with official Feetech software diff --git a/docs/source/groot.mdx b/docs/source/groot.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ebd289e907cfce8fcef2ffa49fbde8e8618d27a6 --- /dev/null +++ b/docs/source/groot.mdx @@ -0,0 +1,125 @@ +# GR00T N1.5 Policy + +GR00T N1.5 is an open foundation model from NVIDIA designed for generalized humanoid robot reasoning and skills. It is a cross-embodiment model that accepts multimodal input, including language and images, to perform manipulation tasks in diverse environments. + +This document outlines the specifics of its integration and usage within the LeRobot framework. + +## Model Overview + +NVIDIA Isaac GR00T N1.5 is an upgraded version of the GR00T N1 foundation model. It is built to improve generalization and language-following abilities for humanoid robots. + +Developers and researchers can post-train GR00T N1.5 with their own real or synthetic data to adapt it for specific humanoid robots or tasks. + +GR00T N1.5 (specifically the GR00T-N1.5-3B model) is built using pre-trained vision and language encoders. It utilizes a flow matching action transformer to model a chunk of actions, conditioned on vision, language, and proprioception. + +Its strong performance comes from being trained on an expansive and diverse humanoid dataset, which includes: + +- Real captured data from robots. +- Synthetic data generated using NVIDIA Isaac GR00T Blueprint. +- Internet-scale video data. + +This approach allows the model to be highly adaptable through post-training for specific embodiments, tasks, and environments. + +## Installation Requirements + +As of today, GR00T N1.5 requires flash attention for it's internal working. + +We are working on making this optional, but in the meantime that means that we require an extra installation step and it can only be used in CUDA enabled devices. + +1. Following the Environment Setup of our [Installation Guide](./installation). **Attention** don't install `lerobot` in this step. +2. Install [Flash Attention](https://github.com/Dao-AILab/flash-attention) by running: + +```bash +# Check https://pytorch.org/get-started/locally/ for your system +pip install "torch>=2.2.1,<2.8.0" "torchvision>=0.21.0,<0.23.0" # --index-url https://download.pytorch.org/whl/cu1XX +pip install ninja "packaging>=24.2,<26.0" # flash attention dependencies +pip install "flash-attn>=2.5.9,<3.0.0" --no-build-isolation +python -c "import flash_attn; print(f'Flash Attention {flash_attn.__version__} imported successfully')" +``` + +3. Install LeRobot by running: + +```bash +pip install lerobot[groot] +``` + +## Usage + +To use GR00T in your LeRobot configuration, specify the policy type as: + +```python +policy.type=groot +``` + +## Training + +### Training Command Example + +Here's a complete training command for finetuning the base GR00T model on your own dataset: + +```bash +# Using a multi-GPU setup +accelerate launch \ + --multi_gpu \ + --num_processes=$NUM_GPUS \ + $(which lerobot-train) \ + --output_dir=$OUTPUT_DIR \ + --save_checkpoint=true \ + --batch_size=$BATCH_SIZE \ + --steps=$NUM_STEPS \ + --save_freq=$SAVE_FREQ \ + --log_freq=$LOG_FREQ \ + --policy.push_to_hub=true \ + --policy.type=groot \ + --policy.repo_id=$REPO_ID \ + --policy.tune_diffusion_model=false \ + --dataset.repo_id=$DATASET_ID \ + --wandb.enable=true \ + --wandb.disable_artifact=true \ + --job_name=$JOB_NAME +``` + +## Performance Results + +### Libero Benchmark Results + +> [!NOTE] +> Follow our instructions for Libero usage: [Libero](./libero) + +GR00T has demonstrated strong performance on the Libero benchmark suite. To compare and test its LeRobot implementation, we finetuned the GR00T N1.5 model for 30k steps on the Libero dataset and compared the results to the GR00T reference results. + +| Benchmark | LeRobot Implementation | GR00T Reference | +| ------------------ | ---------------------- | --------------- | +| **Libero Spatial** | 82.0% | 92.0% | +| **Libero Object** | 99.0% | 92.0% | +| **Libero Long** | 82.0% | 76.0% | +| **Average** | 87.0% | 87.0% | + +These results demonstrate GR00T's strong generalization capabilities across diverse robotic manipulation tasks. To reproduce these results, you can follow the instructions in the [Libero](https://huggingface.co/docs/lerobot/libero) section. + +### Evaluate in your hardware setup + +Once you have trained your model using your parameters you can run inference in your downstream task. Follow the instructions in [Imitation Learning for Robots](./il_robots). For example: + +```bash +lerobot-record \ + --robot.type=bi_so100_follower \ + --robot.left_arm_port=/dev/ttyACM1 \ + --robot.right_arm_port=/dev/ttyACM0 \ + --robot.id=bimanual_follower \ + --robot.cameras='{ right: {"type": "opencv", "index_or_path": 0, "width": 640, "height": 480, "fps": 30}, + left: {"type": "opencv", "index_or_path": 2, "width": 640, "height": 480, "fps": 30}, + top: {"type": "opencv", "index_or_path": 4, "width": 640, "height": 480, "fps": 30}, + }' \ + --display_data=true \ + --dataset.repo_id=/eval_groot-bimanual \ + --dataset.num_episodes=10 \ + --dataset.single_task="Grab and handover the red cube to the other arm" + --policy.path=/groot-bimanual # your trained model + --dataset.episode_time_s=30 + --dataset.reset_time_s=10 +``` + +## License + +This model follows the **Apache 2.0 License**, consistent with the original [GR00T repository](https://github.com/NVIDIA/Isaac-GR00T). diff --git a/docs/source/hilserl.mdx b/docs/source/hilserl.mdx new file mode 100644 index 0000000000000000000000000000000000000000..dacaa960aa73c93460aaaa294fb60d54c594e971 --- /dev/null +++ b/docs/source/hilserl.mdx @@ -0,0 +1,923 @@ +# HIL-SERL Real Robot Training Workflow Guide + +In this tutorial you will go through the full Human-in-the-Loop Sample-Efficient Reinforcement Learning (HIL-SERL) workflow using LeRobot. You will master training a policy with RL on a real robot in just a few hours. + +HIL-SERL is a sample-efficient reinforcement learning algorithm that combines human demonstrations with online learning and human interventions. The approach starts from a small set of human demonstrations, uses them to train a reward classifier, and then employs an actor-learner architecture where humans can intervene during policy execution to guide exploration and correct unsafe behaviors. In this tutorial, you'll use a gamepad to provide interventions and control the robot during the learning process. + +It combines three key ingredients: + +1. **Offline demonstrations & reward classifier:** a handful of human-teleop episodes plus a vision-based success detector give the policy a shaped starting point. + +2. **On-robot actor / learner loop with human interventions:** a distributed Soft Actor Critic (SAC) learner updates the policy while an actor explores on the physical robot; the human can jump in at any time to correct dangerous or unproductive behaviour. + +3. **Safety & efficiency tools:** joint/end-effector (EE) bounds, crop region of interest (ROI) preprocessing and WandB monitoring keep the data useful and the hardware safe. + +Together these elements let HIL-SERL reach near-perfect task success and faster cycle times than imitation-only baselines. + +

+ HIL-SERL workflow +

+ +

+ HIL-SERL workflow, Luo et al. 2024 +

+ +This guide provides step-by-step instructions for training a robot policy using LeRobot's HilSerl implementation to train on a real robot. + +## What do I need? + +- A gamepad (recommended) or keyboard to control the robot +- A Nvidia GPU +- A real robot with a follower and leader arm (optional if you use the keyboard or the gamepad) +- A URDF file for the robot for the kinematics package (check `lerobot/model/kinematics.py`) + +## What kind of tasks can I train? + +One can use HIL-SERL to train on a variety of manipulation tasks. Some recommendations: + +- Start with a simple task to understand how the system works. + - Push cube to a goal region + - Pick and lift cube with the gripper +- Avoid extremely long horizon tasks. Focus on tasks that can be completed in 5-10 seconds. +- Once you have a good idea of how the system works, you can try more complex tasks and longer horizons. + - Pick and place cube + - Bimanual tasks to pick objects with two arms + - Hand-over tasks to transfer objects from one arm to another + - Go crazy! + +## Install LeRobot with HIL-SERL + +To install LeRobot with HIL-SERL, you need to install the `hilserl` extra. + +```bash +pip install -e ".[hilserl]" +``` + +## Real Robot Training Workflow + +### Understanding Configuration + +The training process begins with proper configuration for the HILSerl environment. The main configuration class is `GymManipulatorConfig` in `lerobot/rl/gym_manipulator.py`, which contains nested `HILSerlRobotEnvConfig` and `DatasetConfig`. The configuration is organized into focused, nested sub-configs: + + +```python +class GymManipulatorConfig: + env: HILSerlRobotEnvConfig # Environment configuration (nested) + dataset: DatasetConfig # Dataset recording/replay configuration (nested) + mode: str | None = None # "record", "replay", or None (for training) + device: str = "cpu" # Compute device + +class HILSerlRobotEnvConfig(EnvConfig): + robot: RobotConfig | None = None # Main robot agent (defined in `lerobot/robots`) + teleop: TeleoperatorConfig | None = None # Teleoperator agent, e.g., gamepad or leader arm + processor: HILSerlProcessorConfig # Processing pipeline configuration (nested) + name: str = "real_robot" # Environment name + task: str | None = None # Task identifier + fps: int = 10 # Control frequency + +# Nested processor configuration +class HILSerlProcessorConfig: + control_mode: str = "gamepad" # Control mode + observation: ObservationConfig | None = None # Observation processing settings + image_preprocessing: ImagePreprocessingConfig | None = None # Image crop/resize settings + gripper: GripperConfig | None = None # Gripper control and penalty settings + reset: ResetConfig | None = None # Environment reset and timing settings + inverse_kinematics: InverseKinematicsConfig | None = None # IK processing settings + reward_classifier: RewardClassifierConfig | None = None # Reward classifier settings + max_gripper_pos: float | None = 100.0 # Maximum gripper position + +# Sub-configuration classes +class ObservationConfig: + add_joint_velocity_to_observation: bool = False # Add joint velocities to state + add_current_to_observation: bool = False # Add motor currents to state + display_cameras: bool = False # Display camera feeds during execution + +class ImagePreprocessingConfig: + crop_params_dict: dict[str, tuple[int, int, int, int]] | None = None # Image cropping parameters + resize_size: tuple[int, int] | None = None # Target image size + +class GripperConfig: + use_gripper: bool = True # Enable gripper control + gripper_penalty: float = 0.0 # Penalty for inappropriate gripper usage + +class ResetConfig: + fixed_reset_joint_positions: Any | None = None # Joint positions for reset + reset_time_s: float = 5.0 # Time to wait during reset + control_time_s: float = 20.0 # Maximum episode duration + terminate_on_success: bool = True # Whether to terminate episodes on success detection + +class InverseKinematicsConfig: + urdf_path: str | None = None # Path to robot URDF file + target_frame_name: str | None = None # End-effector frame name + end_effector_bounds: dict[str, list[float]] | None = None # EE workspace bounds + end_effector_step_sizes: dict[str, float] | None = None # EE step sizes per axis + +class RewardClassifierConfig: + pretrained_path: str | None = None # Path to pretrained reward classifier + success_threshold: float = 0.5 # Success detection threshold + success_reward: float = 1.0 # Reward value for successful episodes + +# Dataset configuration +class DatasetConfig: + repo_id: str # LeRobot dataset repository ID + task: str # Task identifier + root: str | None = None # Local dataset root directory + num_episodes_to_record: int = 5 # Number of episodes for recording + replay_episode: int | None = None # Episode index for replay + push_to_hub: bool = False # Whether to push datasets to Hub +``` + + +### Processor Pipeline Architecture + +HIL-SERL uses a modular processor pipeline architecture that processes robot observations and actions through a series of composable steps. The pipeline is divided into two main components: + +#### Environment Processor Pipeline + +The environment processor (`env_processor`) handles incoming observations and environment state: + +1. **VanillaObservationProcessorStep**: Converts raw robot observations into standardized format +2. **JointVelocityProcessorStep** (optional): Adds joint velocity information to observations +3. **MotorCurrentProcessorStep** (optional): Adds motor current readings to observations +4. **ForwardKinematicsJointsToEE** (optional): Computes end-effector pose from joint positions +5. **ImageCropResizeProcessorStep** (optional): Crops and resizes camera images +6. **TimeLimitProcessorStep** (optional): Enforces episode time limits +7. **GripperPenaltyProcessorStep** (optional): Applies penalties for inappropriate gripper usage +8. **RewardClassifierProcessorStep** (optional): Automated reward detection using vision models +9. **AddBatchDimensionProcessorStep**: Converts data to batch format for neural network processing +10. **DeviceProcessorStep**: Moves data to the specified compute device (CPU/GPU) + +#### Action Processor Pipeline + +The action processor (`action_processor`) handles outgoing actions and human interventions: + +1. **AddTeleopActionAsComplimentaryDataStep**: Captures teleoperator actions for logging +2. **AddTeleopEventsAsInfoStep**: Records intervention events and episode control signals +3. **InterventionActionProcessorStep**: Handles human interventions and episode termination +4. **Inverse Kinematics Pipeline** (when enabled): + - **MapDeltaActionToRobotActionStep**: Converts delta actions to robot action format + - **EEReferenceAndDelta**: Computes end-effector reference and delta movements + - **EEBoundsAndSafety**: Enforces workspace safety bounds + - **InverseKinematicsEEToJoints**: Converts end-effector actions to joint targets + - **GripperVelocityToJoint**: Handles gripper control commands + +#### Configuration Examples + +**Basic Observation Processing**: + +```json +{ + "env": { + "processor": { + "observation": { + "add_joint_velocity_to_observation": true, + "add_current_to_observation": false, + "display_cameras": false + } + } + } +} +``` + +**Image Processing**: + +```json +{ + "env": { + "processor": { + "image_preprocessing": { + "crop_params_dict": { + "observation.images.front": [180, 250, 120, 150], + "observation.images.side": [180, 207, 180, 200] + }, + "resize_size": [128, 128] + } + } + } +} +``` + +**Inverse Kinematics Setup**: + +```json +{ + "env": { + "processor": { + "inverse_kinematics": { + "urdf_path": "path/to/robot.urdf", + "target_frame_name": "end_effector", + "end_effector_bounds": { + "min": [0.16, -0.08, 0.03], + "max": [0.24, 0.2, 0.1] + }, + "end_effector_step_sizes": { + "x": 0.02, + "y": 0.02, + "z": 0.02 + } + } + } + } +} +``` + +### Advanced Observation Processing + +The HIL-SERL framework supports additional observation processing features that can improve policy learning: + +#### Joint Velocity Processing + +Enable joint velocity estimation to provide the policy with motion information: + +```json +{ + "env": { + "processor": { + "observation": { + "add_joint_velocity_to_observation": true + } + } + } +} +``` + +This processor: + +- Estimates joint velocities using finite differences between consecutive joint position readings +- Adds velocity information to the observation state vector +- Useful for policies that need motion awareness for dynamic tasks + +#### Motor Current Processing + +Monitor motor currents to detect contact forces and load conditions: + +```json +{ + "env": { + "processor": { + "observation": { + "add_current_to_observation": true + } + } + } +} +``` + +This processor: + +- Reads motor current values from the robot's control system +- Adds current measurements to the observation state vector +- Helps detect contact events, object weights, and mechanical resistance +- Useful for contact-rich manipulation tasks + +#### Combined Observation Processing + +You can enable multiple observation processing features simultaneously: + +```json +{ + "env": { + "processor": { + "observation": { + "add_joint_velocity_to_observation": true, + "add_current_to_observation": true, + "display_cameras": false + } + } + } +} +``` + +**Note**: Enabling additional observation features increases the state space dimensionality, which may require adjusting your policy network architecture and potentially collecting more training data. + +### Finding Robot Workspace Bounds + +Before collecting demonstrations, you need to determine the appropriate operational bounds for your robot. + +This helps simplify the problem of learning on the real robot in two ways: 1) by limiting the robot's operational space to a specific region that solves the task and avoids unnecessary or unsafe exploration, and 2) by allowing training in end-effector space rather than joint space. Empirically, learning in joint space for reinforcement learning in manipulation is often a harder problem - some tasks are nearly impossible to learn in joint space but become learnable when the action space is transformed to end-effector coordinates. + +**Using lerobot-find-joint-limits** + +This script helps you find the safe operational bounds for your robot's end-effector. Given that you have a follower and leader arm, you can use the script to find the bounds for the follower arm that will be applied during training. +Bounding the action space will reduce the redundant exploration of the agent and guarantees safety. + +```bash +lerobot-find-joint-limits \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58760431541 \ + --robot.id=black \ + --teleop.type=so100_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ + --teleop.id=blue +``` + +**Workflow** + +1. Run the script and move the robot through the space that solves the task +2. The script will record the minimum and maximum end-effector positions and the joint angles and prints them to the console, for example: + ``` + Max ee position [0.2417 0.2012 0.1027] + Min ee position [0.1663 -0.0823 0.0336] + Max joint positions [-20.0, -20.0, -20.0, -20.0, -20.0, -20.0] + Min joint positions [50.0, 50.0, 50.0, 50.0, 50.0, 50.0] + ``` +3. Use these values in the configuration of your teleoperation device (TeleoperatorConfig) under the `end_effector_bounds` field + +**Example Configuration** + +```json +"end_effector_bounds": { + "max": [0.24, 0.20, 0.10], + "min": [0.16, -0.08, 0.03] +} +``` + +### Collecting Demonstrations + +With the bounds defined, you can safely collect demonstrations for training. Training RL with off-policy algorithm allows us to use offline datasets collected in order to improve the efficiency of the learning process. + +**Setting Up Record Mode** + +Create a configuration file for recording demonstrations (or edit an existing one like [env_config.json](https://huggingface.co/datasets/lerobot/config_examples/resolve/main/rl/env_config.json)): + +1. Set `mode` to `"record"` at the root level +2. Specify a unique `repo_id` for your dataset in the `dataset` section (e.g., "username/task_name") +3. Set `num_episodes_to_record` in the `dataset` section to the number of demonstrations you want to collect +4. Set `env.processor.image_preprocessing.crop_params_dict` to `{}` initially (we'll determine crops later) +5. Configure `env.robot`, `env.teleop`, and other hardware settings in the `env` section + +Example configuration section: + +```json +{ + "env": { + "type": "gym_manipulator", + "name": "real_robot", + "fps": 10, + "processor": { + "control_mode": "gamepad", + "observation": { + "display_cameras": false + }, + "image_preprocessing": { + "crop_params_dict": {}, + "resize_size": [128, 128] + }, + "gripper": { + "use_gripper": true, + "gripper_penalty": 0.0 + }, + "reset": { + "reset_time_s": 5.0, + "control_time_s": 20.0 + } + }, + "robot": { + // ... robot configuration ... + }, + "teleop": { + // ... teleoperator configuration ... + } + }, + "dataset": { + "repo_id": "username/pick_lift_cube", + "root": null, + "task": "pick_and_lift", + "num_episodes_to_record": 15, + "replay_episode": 0, + "push_to_hub": true + }, + "mode": "record", + "device": "cpu" +} +``` + +### Using a Teleoperation Device + +Along with your robot, you will need a teleoperation device to control it in order to collect datasets of your task and perform interventions during the online training. +We support using a gamepad or a keyboard or the leader arm of the robot. + +HIL-Serl learns actions in the end-effector space of the robot. Therefore, the teleoperation will control the end-effector's x,y,z displacements. + +For that we need to define a version of the robot that takes actions in the end-effector space. Check the robot class `SO100FollowerEndEffector` and its configuration `SO100FollowerEndEffectorConfig` for the default parameters related to the end-effector space. + + +```python +class SO100FollowerEndEffectorConfig(SO100FollowerConfig): + """Configuration for the SO100FollowerEndEffector robot.""" + + # Default bounds for the end-effector position (in meters) + end_effector_bounds: dict[str, list[float]] = field( # bounds for the end-effector in x,y,z direction + default_factory=lambda: { + "min": [-1.0, -1.0, -1.0], # min x, y, z + "max": [1.0, 1.0, 1.0], # max x, y, z + } + ) + + max_gripper_pos: float = 50 # maximum gripper position that the gripper will be open at + + end_effector_step_sizes: dict[str, float] = field( # maximum step size for the end-effector in x,y,z direction + default_factory=lambda: { + "x": 0.02, + "y": 0.02, + "z": 0.02, + } + ) +``` + + +The `Teleoperator` defines the teleoperation device. You can check the list of available teleoperators in `lerobot/teleoperators`. + +**Setting up the Gamepad** + +The gamepad provides a very convenient way to control the robot and the episode state. + +To setup the gamepad, you need to set the `control_mode` to `"gamepad"` and define the `teleop` section in the configuration file. + +```json +{ + "env": { + "teleop": { + "type": "gamepad", + "use_gripper": true + }, + "processor": { + "control_mode": "gamepad", + "gripper": { + "use_gripper": true + } + } + } +} +``` + +

+ Figure shows the control mappings on a Logitech gamepad. +

+

+ Gamepad button mapping for robot control and episode management +

+ +**Setting up the SO101 leader** + +The SO101 leader arm has reduced gears that allows it to move and track the follower arm during exploration. Therefore, taking over is much smoother than the gearless SO100. + +To setup the SO101 leader, you need to set the `control_mode` to `"leader"` and define the `teleop` section in the configuration file. + +```json +{ + "env": { + "teleop": { + "type": "so101_leader", + "port": "/dev/tty.usbmodem585A0077921", + "use_degrees": true + }, + "processor": { + "control_mode": "leader", + "gripper": { + "use_gripper": true + } + } + } +} +``` + +In order to annotate the success/failure of the episode, **you will need** to use a keyboard to press `s` for success, `esc` for failure. +During the online training, press `space` to take over the policy and `space` again to give the control back to the policy. + +
+Video: SO101 leader teleoperation + +
+ +
+ +

SO101 leader teleoperation example, the leader tracks the follower, press `space` to intervene

+
+ +**Recording Demonstrations** + +Start the recording process, an example of the config file can be found [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/env_config_so100.json): + +```bash +python -m lerobot.rl.gym_manipulator --config_path src/lerobot/configs/env_config_so100.json +``` + +During recording: + +1. The robot will reset to the initial position defined in the configuration file `env.processor.reset.fixed_reset_joint_positions` +2. Complete the task successfully +3. The episode ends with a reward of 1 when you press the "success" button +4. If the time limit is reached, or the fail button is pressed, the episode ends with a reward of 0 +5. You can rerecord an episode by pressing the "rerecord" button +6. The process automatically continues to the next episode +7. After recording all episodes, the dataset is pushed to the Hugging Face Hub (optional) and saved locally + +### Processing the Dataset + +After collecting demonstrations, process them to determine optimal camera crops. +Reinforcement learning is sensitive to background distractions, so it is important to crop the images to the relevant workspace area. + +Visual RL algorithms learn directly from pixel inputs, making them vulnerable to irrelevant visual information. Background elements like changing lighting, shadows, people moving, or objects outside the workspace can confuse the learning process. Good ROI selection should: + +- Include only the essential workspace where the task happens +- Capture the robot's end-effector and all objects involved in the task +- Exclude unnecessary background elements and distractions + +Note: If you already know the crop parameters, you can skip this step and just set the `crop_params_dict` in the configuration file during recording. + +**Determining Crop Parameters** + +Use the `crop_dataset_roi.py` script to interactively select regions of interest in your camera images: + +```bash +python -m lerobot.rl.crop_dataset_roi --repo-id username/pick_lift_cube +``` + +1. For each camera view, the script will display the first frame +2. Draw a rectangle around the relevant workspace area +3. Press 'c' to confirm the selection +4. Repeat for all camera views +5. The script outputs cropping parameters and creates a new cropped dataset + +Example output: + +``` +Selected Rectangular Regions of Interest (top, left, height, width): +observation.images.side: [180, 207, 180, 200] +observation.images.front: [180, 250, 120, 150] +``` + +

+ +

+ +

+ Interactive cropping tool for selecting regions of interest +

+ +**Updating Configuration** + +Add these crop parameters to your training configuration: + +```json +{ + "env": { + "processor": { + "image_preprocessing": { + "crop_params_dict": { + "observation.images.side": [180, 207, 180, 200], + "observation.images.front": [180, 250, 120, 150] + }, + "resize_size": [128, 128] + } + } + } +} +``` + +**Recommended image resolution** + +Most vision-based policies have been validated on square inputs of either **128×128** (default) or **64×64** pixels. We therefore advise setting the resize_size parameter to [128, 128] – or [64, 64] if you need to save GPU memory and bandwidth. Other resolutions are possible but have not been extensively tested. + +### Training a Reward Classifier + +The reward classifier plays an important role in the HIL-SERL workflow by automating reward assignment and automatically detecting episode success. Instead of manually defining reward functions or relying on human feedback for every timestep, the reward classifier learns to predict success/failure from visual observations. This enables the RL algorithm to learn efficiently by providing consistent and automated reward signals based on the robot's camera inputs. + +This guide explains how to train a reward classifier for human-in-the-loop reinforcement learning implementation of LeRobot. Reward classifiers learn to predict the reward value given a state which can be used in an RL setup to train a policy. + +**Note**: Training a reward classifier is optional. You can start the first round of RL experiments by annotating the success manually with your gamepad or keyboard device. + +The reward classifier implementation in `modeling_classifier.py` uses a pretrained vision model to process the images. It can output either a single value for binary rewards to predict success/fail cases or multiple values for multi-class settings. + +**Collecting a Dataset for the reward classifier** + +Before training, you need to collect a dataset with labeled examples. The `record_dataset` function in `gym_manipulator.py` enables the process of collecting a dataset of observations, actions, and rewards. + +To collect a dataset, you need to modify some parameters in the environment configuration based on HILSerlRobotEnvConfig. + +```bash +python -m lerobot.rl.gym_manipulator --config_path src/lerobot/configs/reward_classifier_train_config.json +``` + +**Key Parameters for Data Collection** + +- **mode**: set it to `"record"` to collect a dataset (at root level) +- **dataset.repo_id**: `"hf_username/dataset_name"`, name of the dataset and repo on the hub +- **dataset.num_episodes_to_record**: Number of episodes to record +- **env.processor.reset.terminate_on_success**: Whether to automatically terminate episodes when success is detected (default: `true`) +- **env.fps**: Number of frames per second to record +- **dataset.push_to_hub**: Whether to push the dataset to the hub + +The `env.processor.reset.terminate_on_success` parameter allows you to control episode termination behavior. When set to `false`, episodes will continue even after success is detected, allowing you to collect more positive examples with the reward=1 label. This is crucial for training reward classifiers as it provides more success state examples in your dataset. When set to `true` (default), episodes terminate immediately upon success detection. + +**Important**: For reward classifier training, set `terminate_on_success: false` to collect sufficient positive examples. For regular HIL-SERL training, keep it as `true` to enable automatic episode termination when the task is completed successfully. + +Example configuration section for data collection: + +```json +{ + "env": { + "type": "gym_manipulator", + "name": "real_robot", + "fps": 10, + "processor": { + "reset": { + "reset_time_s": 5.0, + "control_time_s": 20.0, + "terminate_on_success": false + }, + "gripper": { + "use_gripper": true + } + }, + "robot": { + // ... robot configuration ... + }, + "teleop": { + // ... teleoperator configuration ... + } + }, + "dataset": { + "repo_id": "hf_username/dataset_name", + "dataset_root": "data/your_dataset", + "task": "reward_classifier_task", + "num_episodes_to_record": 20, + "replay_episode": null, + "push_to_hub": true + }, + "mode": "record", + "device": "cpu" +} +``` + +**Reward Classifier Configuration** + +The reward classifier is configured using `configuration_classifier.py`. Here are the key parameters: + +- **model_name**: Base model architecture (e.g., we mainly use `"helper2424/resnet10"`) +- **model_type**: `"cnn"` or `"transformer"` +- **num_cameras**: Number of camera inputs +- **num_classes**: Number of output classes (typically 2 for binary success/failure) +- **hidden_dim**: Size of hidden representation +- **dropout_rate**: Regularization parameter +- **learning_rate**: Learning rate for optimizer + +Example configuration for training the [reward classifier](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/reward_classifier_train_config.json): + +```json +{ + "policy": { + "type": "reward_classifier", + "model_name": "helper2424/resnet10", + "model_type": "cnn", + "num_cameras": 2, + "num_classes": 2, + "hidden_dim": 256, + "dropout_rate": 0.1, + "learning_rate": 1e-4, + "device": "cuda", + "use_amp": true, + "input_features": { + "observation.images.front": { + "type": "VISUAL", + "shape": [3, 128, 128] + }, + "observation.images.side": { + "type": "VISUAL", + "shape": [3, 128, 128] + } + } + } +} +``` + +**Training the Classifier** + +To train the classifier, use the `train.py` script with your configuration: + +```bash +lerobot-train --config_path path/to/reward_classifier_train_config.json +``` + +**Deploying and Testing the Model** + +To use your trained reward classifier, configure the `HILSerlRobotEnvConfig` to use your model: + + +```python +config = GymManipulatorConfig( + env=HILSerlRobotEnvConfig( + processor=HILSerlProcessorConfig( + reward_classifier=RewardClassifierConfig( + pretrained_path="path_to_your_pretrained_trained_model" + ) + ), + # Other environment parameters + ), + dataset=DatasetConfig(...), + mode=None # For training +) +``` + + +or set the argument in the json config file. + +```json +{ + "env": { + "processor": { + "reward_classifier": { + "pretrained_path": "path_to_your_pretrained_model", + "success_threshold": 0.7, + "success_reward": 1.0 + }, + "reset": { + "terminate_on_success": true + } + } + } +} +``` + +Run `gym_manipulator.py` to test the model. + +```bash +python -m lerobot.rl.gym_manipulator --config_path path/to/env_config.json +``` + +The reward classifier will automatically provide rewards based on the visual input from the robot's cameras. + +**Example Workflow for training the reward classifier** + +1. **Create the configuration files**: + Create the necessary json configuration files for the reward classifier and the environment. Check the examples [here](https://huggingface.co/datasets/lerobot/config_examples/resolve/main/reward_classifier/config.json). + +2. **Collect a dataset**: + + ```bash + python -m lerobot.rl.gym_manipulator --config_path src/lerobot/configs/env_config.json + ``` + +3. **Train the classifier**: + + ```bash + lerobot-train --config_path src/lerobot/configs/reward_classifier_train_config.json + ``` + +4. **Test the classifier**: + ```bash + python -m lerobot.rl.gym_manipulator --config_path src/lerobot/configs/env_config.json + ``` + +### Training with Actor-Learner + +The LeRobot system uses a distributed actor-learner architecture for training. This architecture decouples robot interactions from the learning process, allowing them to run concurrently without blocking each other. The actor server handles robot observations and actions, sending interaction data to the learner server. The learner server performs gradient descent and periodically updates the actor's policy weights. You will need to start two processes: a learner and an actor. + +**Configuration Setup** + +Create a training configuration file (example available [here](https://huggingface.co/datasets/lerobot/config_examples/resolve/main/rl/train_config.json)). The training config is based on the main `TrainRLServerPipelineConfig` class in `lerobot/configs/train.py`. + +1. Configure the policy settings (`type="sac"`, `device`, etc.) +2. Set `dataset` to your cropped dataset +3. Configure environment settings with crop parameters +4. Check the other parameters related to SAC in [configuration_sac.py](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/sac/configuration_sac.py#L79). +5. Verify that the `policy` config is correct with the right `input_features` and `output_features` for your task. + +**Starting the Learner** + +First, start the learner server process: + +```bash +python -m lerobot.rl.learner --config_path src/lerobot/configs/train_config_hilserl_so100.json +``` + +The learner: + +- Initializes the policy network +- Prepares replay buffers +- Opens a `gRPC` server to communicate with actors +- Processes transitions and updates the policy + +**Starting the Actor** + +In a separate terminal, start the actor process with the same configuration: + +```bash +python -m lerobot.rl.actor --config_path src/lerobot/configs/train_config_hilserl_so100.json +``` + +The actor: + +- Connects to the learner via `gRPC` +- Initializes the environment +- Execute rollouts of the policy to collect experience +- Sends transitions to the learner +- Receives updated policy parameters + +**Training Flow** + +The training proceeds automatically: + +1. The actor executes the policy in the environment +2. Transitions are collected and sent to the learner +3. The learner updates the policy based on these transitions +4. Updated policy parameters are sent back to the actor +5. The process continues until the specified step limit is reached + +**Human in the Loop** + +- The key to learning efficiently is to have human interventions to provide corrective feedback and completing the task to aide the policy learning and exploration. +- To perform human interventions, you can press the upper right trigger button on the gamepad (or the `space` key on the keyboard). This will pause the policy actions and allow you to take over. +- A successful experiment is one where the human has to intervene at the start but then reduces the amount of interventions as the policy improves. You can monitor the intervention rate in the `wandb` dashboard. + +

+ Figure shows the control mappings on a Logitech gamepad. +

+ +

+ + Example showing how human interventions help guide policy learning over time + +

+ +- The figure shows the plot of the episodic reward over interaction step. The figure shows the effect of human interventions on the policy learning. +- The orange curve is an experiment without any human interventions. While the pink and blue curves are experiments with human interventions. +- We can observe that the number of steps where the policy starts achieving the maximum reward is cut by a quarter when human interventions are present. + +**Monitoring and Debugging** + +If you have `wandb.enable` set to `true` in your configuration, you can monitor training progress in real-time through the [Weights & Biases](https://wandb.ai/site/) dashboard. + +### Guide to Human Interventions + +The learning process is very sensitive to the intervention strategy. It will takes a few runs to understand how to intervene effectively. Some tips and hints: + +- Allow the policy to explore for a few episodes at the start of training. +- Avoid intervening for long periods of time. Try to intervene in situation to correct the robot's behaviour when it goes off track. +- Once the policy starts achieving the task, even if its not perfect, you can limit your interventions to simple quick actions like a simple grasping commands. + +The ideal behaviour is that your intervention rate should drop gradually during training as shown in the figure below. + +

+ Intervention rate +

+ +

+ + Plot of the intervention rate during a training run on a pick and lift cube + task + +

+ +### Key hyperparameters to tune + +Some configuration values have a disproportionate impact on training stability and speed: + +- **`temperature_init`** (`policy.temperature_init`) – initial entropy temperature in SAC. Higher values encourage more exploration; lower values make the policy more deterministic early on. A good starting point is `1e-2`. We observed that setting it too high can make human interventions ineffective and slow down learning. +- **`policy_parameters_push_frequency`** (`policy.actor_learner_config.policy_parameters_push_frequency`) – interval in _seconds_ between two weight pushes from the learner to the actor. The default is `4 s`. Decrease to **1-2 s** to provide fresher weights (at the cost of more network traffic); increase only if your connection is slow, as this will reduce sample efficiency. +- **`storage_device`** (`policy.storage_device`) – device on which the learner keeps the policy parameters. If you have spare GPU memory, set this to `"cuda"` (instead of the default `"cpu"`). Keeping the weights on-GPU removes CPU→GPU transfer overhead and can significantly increase the number of learner updates per second. + +Congrats 🎉, you have finished this tutorial! + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). + +Paper citation: + +``` +@article{luo2024precise, + title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning}, + author={Luo, Jianlan and Xu, Charles and Wu, Jeffrey and Levine, Sergey}, + journal={arXiv preprint arXiv:2410.21845}, + year={2024} +} +``` diff --git a/docs/source/hilserl_sim.mdx b/docs/source/hilserl_sim.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3af01f55a333e8e28468d0757bf1c1c658cbb659 --- /dev/null +++ b/docs/source/hilserl_sim.mdx @@ -0,0 +1,154 @@ +# Train RL in Simulation + +This guide explains how to use the `gym_hil` simulation environments as an alternative to real robots when working with the LeRobot framework for Human-In-the-Loop (HIL) reinforcement learning. + +`gym_hil` is a package that provides Gymnasium-compatible simulation environments specifically designed for Human-In-the-Loop reinforcement learning. These environments allow you to: + +- Train policies in simulation to test the RL stack before training on real robots + +- Collect demonstrations in sim using external devices like gamepads or keyboards +- Perform human interventions during policy learning + +Currently, the main environment is a Franka Panda robot simulation based on MuJoCo, with tasks like picking up a cube. + +## Installation + +First, install the `gym_hil` package within the LeRobot environment: + +```bash +pip install -e ".[hilserl]" +``` + +## What do I need? + +- A gamepad or keyboard to control the robot +- A Nvidia GPU + +## Configuration + +To use `gym_hil` with LeRobot, you need to create a configuration file. An example is provided [here](https://huggingface.co/datasets/lerobot/config_examples/resolve/main/rl/gym_hil/env_config.json). Key configuration sections include: + +### Environment Type and Task + +```json +{ + "env": { + "type": "gym_manipulator", + "name": "gym_hil", + "task": "PandaPickCubeGamepad-v0", + "fps": 10 + }, + "device": "cuda" +} +``` + +Available tasks: + +- `PandaPickCubeBase-v0`: Basic environment +- `PandaPickCubeGamepad-v0`: With gamepad control +- `PandaPickCubeKeyboard-v0`: With keyboard control + +### Processor Configuration + +```json +{ + "env": { + "processor": { + "control_mode": "gamepad", + "gripper": { + "use_gripper": true, + "gripper_penalty": -0.02 + }, + "reset": { + "control_time_s": 15.0, + "fixed_reset_joint_positions": [ + 0.0, 0.195, 0.0, -2.43, 0.0, 2.62, 0.785 + ] + }, + "inverse_kinematics": { + "end_effector_step_sizes": { + "x": 0.025, + "y": 0.025, + "z": 0.025 + } + } + } + } +} +``` + +Important parameters: + +- `gripper.gripper_penalty`: Penalty for excessive gripper movement +- `gripper.use_gripper`: Whether to enable gripper control +- `inverse_kinematics.end_effector_step_sizes`: Size of the steps in the x,y,z axes of the end-effector +- `control_mode`: Set to `"gamepad"` to use a gamepad controller + +## Running with HIL RL of LeRobot + +### Basic Usage + +To run the environment, set mode to null: + +```bash +python -m lerobot.rl.gym_manipulator --config_path path/to/gym_hil_env.json +``` + +### Recording a Dataset + +To collect a dataset, set the mode to `record` whilst defining the repo_id and number of episodes to record: + +```json +{ + "env": { + "type": "gym_manipulator", + "name": "gym_hil", + "task": "PandaPickCubeGamepad-v0" + }, + "dataset": { + "repo_id": "username/sim_dataset", + "root": null, + "task": "pick_cube", + "num_episodes_to_record": 10, + "replay_episode": null, + "push_to_hub": true + }, + "mode": "record" +} +``` + +```bash +python -m lerobot.rl.gym_manipulator --config_path path/to/gym_hil_env.json +``` + +### Training a Policy + +To train a policy, checkout the configuration example available [here](https://huggingface.co/datasets/lerobot/config_examples/resolve/main/rl/gym_hil/train_config.json) and run the actor and learner servers: + +```bash +python -m lerobot.rl.actor --config_path path/to/train_gym_hil_env.json +``` + +In a different terminal, run the learner server: + +```bash +python -m lerobot.rl.learner --config_path path/to/train_gym_hil_env.json +``` + +The simulation environment provides a safe and repeatable way to develop and test your Human-In-the-Loop reinforcement learning components before deploying to real robots. + +Congrats 🎉, you have finished this tutorial! + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). + +Paper citation: + +``` +@article{luo2024precise, + title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning}, + author={Luo, Jianlan and Xu, Charles and Wu, Jeffrey and Levine, Sergey}, + journal={arXiv preprint arXiv:2410.21845}, + year={2024} +} +``` diff --git a/docs/source/hope_jr.mdx b/docs/source/hope_jr.mdx new file mode 100644 index 0000000000000000000000000000000000000000..91e4e608d3e05c4a885d5aacbd694882a4de8ba8 --- /dev/null +++ b/docs/source/hope_jr.mdx @@ -0,0 +1,277 @@ +# HopeJR + +## Prerequisites + +- [Hardware Setup](https://github.com/TheRobotStudio/HOPEJr) + +## Install LeRobot + +Follow the [installation instructions](https://github.com/huggingface/lerobot#installation) to install LeRobot. + +Install LeRobot with HopeJR dependencies: + +```bash +pip install -e ".[hopejr]" +``` + +## Device Configuration + +Before starting calibration and operation, you need to identify the USB ports for each HopeJR component. Run this script to find the USB ports for the arm, hand, glove, and exoskeleton: + +```bash +lerobot-find-port +``` + +This will display the available USB ports and their associated devices. Make note of the port paths (e.g., `/dev/tty.usbmodem58760433331`, `/dev/tty.usbmodem11301`) as you'll need to specify them in the `--robot.port` and `--teleop.port` parameters when recording data, replaying episodes, or running teleoperation scripts. + +## Step 1: Calibration + +Before performing teleoperation, HopeJR's limbs need to be calibrated. Calibration files will be saved in `~/.cache/huggingface/lerobot/calibration` + +### 1.1 Calibrate Robot Hand + +```bash +lerobot-calibrate \ + --robot.type=hope_jr_hand \ + --robot.port=/dev/tty.usbmodem58760432281 \ + --robot.id=blue \ + --robot.side=right +``` + +When running the calibration script, a calibration GUI will pop up. Finger joints are named as follows: + +**Thumb**: + +- **CMC**: base joint connecting thumb to hand +- **MCP**: knuckle joint +- **PIP**: first finger joint +- **DIP** : fingertip joint + +**Index, Middle, Ring, and Pinky fingers**: + +- **Radial flexor**: Moves base of finger towards the thumb +- **Ulnar flexor**: Moves base of finger towards the pinky +- **PIP/DIP**: Flexes the distal and proximal phalanx of the finger + +Each one of these will need to be calibrated individually via the GUI. +Note that ulnar and radial flexors should have ranges of the same size (but with different offsets) in order to get symmetric movement. + +

+ Setting boundaries in the hand calibration GUI +

+ +Use the calibration interface to set the range boundaries for each joint as shown above. + +

+ Saving calibration values +

+ +Once you have set the appropriate boundaries for all joints, click "Save" to save the calibration values to the motors. + +### 1.2 Calibrate Teleoperator Glove + +```bash +lerobot-calibrate \ + --teleop.type=homunculus_glove \ + --teleop.port=/dev/tty.usbmodem11201 \ + --teleop.id=red \ + --teleop.side=right +``` + +Move each finger through its full range of motion, starting from the thumb. + +``` +Move thumb through its entire range of motion. +Recording positions. Press ENTER to stop... + +------------------------------------------- +NAME | MIN | POS | MAX +thumb_cmc | 1790 | 1831 | 1853 +thumb_mcp | 1497 | 1514 | 1528 +thumb_pip | 1466 | 1496 | 1515 +thumb_dip | 1463 | 1484 | 1514 +``` + +Continue with each finger: + +``` +Move middle through its entire range of motion. +Recording positions. Press ENTER to stop... + +------------------------------------------- +NAME | MIN | POS | MAX +middle_mcp_abduction | 1598 | 1718 | 1820 +middle_mcp_flexion | 1512 | 1658 | 2136 +middle_dip | 1484 | 1500 | 1547 +``` + +Once calibration is complete, the system will save the calibration to `/Users/your_username/.cache/huggingface/lerobot/calibration/teleoperators/homunculus_glove/red.json` + +### 1.3 Calibrate Robot Arm + +```bash +lerobot-calibrate \ + --robot.type=hope_jr_arm \ + --robot.port=/dev/tty.usbserial-1110 \ + --robot.id=white +``` + +This will open a calibration GUI where you can set the range limits for each motor. The arm motions are organized as follows: + +- **Shoulder**: pitch, yaw, and roll +- **Elbow**: flex +- **Wrist**: pitch, yaw, and roll + +

+ Setting boundaries in the arm calibration GUI +

+ +Use the calibration interface to set the range boundaries for each joint. Move each joint through its full range of motion and adjust the minimum and maximum values accordingly. Once you have set the appropriate boundaries for all joints, save the calibration. + +### 1.4 Calibrate Teleoperator Exoskeleton + +```bash +lerobot-calibrate \ + --teleop.type=homunculus_arm \ + --teleop.port=/dev/tty.usbmodem11201 \ + --teleop.id=black +``` + +The exoskeleton allows one to control the robot arm. During calibration, you'll be prompted to move all joints through their full range of motion: + +``` +Move all joints through their entire range of motion. +Recording positions. Press ENTER to stop... + +------------------------------------------- +------------------------------------------- +NAME | MIN | POS | MAX +shoulder_pitch | 586 | 736 | 895 +shoulder_yaw | 1257 | 1374 | 1390 +shoulder_roll | 449 | 1034 | 2564 +elbow_flex | 3023 | 3117 | 3134 +wrist_roll | 3073 | 3096 | 3147 +wrist_yaw | 2143 | 2171 | 2185 +wrist_pitch | 1975 | 1993 | 2074 +Calibration saved to /Users/your_username/.cache/huggingface/lerobot/calibration/teleoperators/homunculus_arm/black.json +``` + +## Step 2: Teleoperation + +Due to global variable conflicts in the Feetech middleware, teleoperation for arm and hand must run in separate shell sessions: + +### Hand + +```bash +lerobot-teleoperate \ + --robot.type=hope_jr_hand \ + --robot.port=/dev/tty.usbmodem58760432281 \ + --robot.id=blue \ + --robot.side=right \ + --teleop.type=homunculus_glove \ + --teleop.port=/dev/tty.usbmodem11201 \ + --teleop.id=red \ + --teleop.side=right \ + --display_data=true \ + --fps=30 +``` + +### Arm + +```bash +lerobot-teleoperate \ + --robot.type=hope_jr_arm \ + --robot.port=/dev/tty.usbserial-1110 \ + --robot.id=white \ + --teleop.type=homunculus_arm \ + --teleop.port=/dev/tty.usbmodem11201 \ + --teleop.id=black \ + --display_data=true \ + --fps=30 +``` + +## Step 3: Record, Replay, Train + +Record, Replay and Train with Hope-JR is still experimental. + +### Record + +This step records the dataset, which can be seen as an example [here](https://huggingface.co/datasets/nepyope/hand_record_test_with_video_data/settings). + +```bash +lerobot-record \ + --robot.type=hope_jr_hand \ + --robot.port=/dev/tty.usbmodem58760432281 \ + --robot.id=right \ + --robot.side=right \ + --robot.cameras='{"main": {"type": "opencv", "index_or_path": 0, "width": 640, "height": 480, "fps": 30}}' \ + --teleop.type=homunculus_glove \ + --teleop.port=/dev/tty.usbmodem1201 \ + --teleop.id=right \ + --teleop.side=right \ + --dataset.repo_id=nepyope/hand_record_test_with_video_data \ + --dataset.single_task="Hand recording test with video data" \ + --dataset.num_episodes=1 \ + --dataset.episode_time_s=5 \ + --dataset.push_to_hub=true \ + --dataset.private=true \ + --display_data=true +``` + +### Replay + +```bash +lerobot-replay \ + --robot.type=hope_jr_hand \ + --robot.port=/dev/tty.usbmodem58760432281 \ + --robot.id=right \ + --robot.side=right \ + --dataset.repo_id=nepyope/hand_record_test_with_camera \ + --dataset.episode=0 +``` + +### Train + +```bash +lerobot-train \ + --dataset.repo_id=nepyope/hand_record_test_with_video_data \ + --policy.type=act \ + --output_dir=outputs/train/hopejr_hand \ + --job_name=hopejr \ + --policy.device=mps \ + --wandb.enable=true \ + --policy.repo_id=nepyope/hand_test_policy +``` + +### Evaluate + +This training run can be viewed as an example [here](https://wandb.ai/tino/lerobot/runs/rp0k8zvw?nw=nwusertino). + +```bash +lerobot-record \ + --robot.type=hope_jr_hand \ + --robot.port=/dev/tty.usbmodem58760432281 \ + --robot.id=right \ + --robot.side=right \ + --robot.cameras='{"main": {"type": "opencv", "index_or_path": 0, "width": 640, "height": 480, "fps": 30}}' \ + --display_data=false \ + --dataset.repo_id=nepyope/eval_hopejr \ + --dataset.single_task="Evaluate hopejr hand policy" \ + --dataset.num_episodes=10 \ + --policy.path=outputs/train/hopejr_hand/checkpoints/last/pretrained_model +``` diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d70e14784ba0ac2b325b3280247033a6be54c683 --- /dev/null +++ b/docs/source/il_robots.mdx @@ -0,0 +1,603 @@ +# Imitation Learning on Real-World Robots + +This tutorial will explain how to train a neural network to control a real robot autonomously. + +**You'll learn:** + +1. How to record and visualize your dataset. +2. How to train a policy using your data and prepare it for evaluation. +3. How to evaluate your policy and visualize the results. + +By following these steps, you'll be able to replicate tasks, such as picking up a Lego block and placing it in a bin with a high success rate, as shown in the video below. + +
+Video: pickup lego block task + +
+ +
+ +
+ +This tutorial isn’t tied to a specific robot: we walk you through the commands and API snippets you can adapt for any supported platform. + +During data collection, you’ll use a “teloperation” device, such as a leader arm or keyboard to teleoperate the robot and record its motion trajectories. + +Once you’ve gathered enough trajectories, you’ll train a neural network to imitate these trajectories and deploy the trained model so your robot can perform the task autonomously. + +If you run into any issues at any point, jump into our [Discord community](https://discord.com/invite/s3KuuzsPFb) for support. + +## Set up and Calibrate + +If you haven't yet set up and calibrated your robot and teleop device, please do so by following the robot-specific tutorial. + +## Teleoperate + +In this example, we’ll demonstrate how to teleoperate the SO101 robot. For each command, we also provide a corresponding API example. + +Note that the `id` associated with a robot is used to store the calibration file. It's important to use the same `id` when teleoperating, recording, and evaluating when using the same setup. + + + +```bash +lerobot-teleoperate \ + --robot.type=so101_follower \ + --robot.port=/dev/tty.usbmodem58760431541 \ + --robot.id=my_awesome_follower_arm \ + --teleop.type=so101_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ + --teleop.id=my_awesome_leader_arm +``` + + + + +```python +from lerobot.teleoperators.so101_leader import SO101LeaderConfig, SO101Leader +from lerobot.robots.so101_follower import SO101FollowerConfig, SO101Follower + +robot_config = SO101FollowerConfig( + port="/dev/tty.usbmodem58760431541", + id="my_red_robot_arm", +) + +teleop_config = SO101LeaderConfig( + port="/dev/tty.usbmodem58760431551", + id="my_blue_leader_arm", +) + +robot = SO101Follower(robot_config) +teleop_device = SO101Leader(teleop_config) +robot.connect() +teleop_device.connect() + +while True: + action = teleop_device.get_action() + robot.send_action(action) +``` + + + + + +The teleoperate command will automatically: + +1. Identify any missing calibrations and initiate the calibration procedure. +2. Connect the robot and teleop device and start teleoperation. + +## Cameras + +To add cameras to your setup, follow this [Guide](./cameras#setup-cameras). + +## Teleoperate with cameras + +With `rerun`, you can teleoperate again while simultaneously visualizing the camera feeds and joint positions. In this example, we’re using the Koch arm. + + + +```bash +lerobot-teleoperate \ + --robot.type=koch_follower \ + --robot.port=/dev/tty.usbmodem58760431541 \ + --robot.id=my_awesome_follower_arm \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ + --teleop.type=koch_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ + --teleop.id=my_awesome_leader_arm \ + --display_data=true +``` + + + + +```python +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.teleoperators.koch_leader import KochLeaderConfig, KochLeader +from lerobot.robots.koch_follower import KochFollowerConfig, KochFollower + +camera_config = { + "front": OpenCVCameraConfig(index_or_path=0, width=1920, height=1080, fps=30) +} + +robot_config = KochFollowerConfig( + port="/dev/tty.usbmodem585A0076841", + id="my_red_robot_arm", + cameras=camera_config +) + +teleop_config = KochLeaderConfig( + port="/dev/tty.usbmodem58760431551", + id="my_blue_leader_arm", +) + +robot = KochFollower(robot_config) +teleop_device = KochLeader(teleop_config) +robot.connect() +teleop_device.connect() + +while True: + observation = robot.get_observation() + action = teleop_device.get_action() + robot.send_action(action) +``` + + + + + +## Record a dataset + +Once you're familiar with teleoperation, you can record your first dataset. + +We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens). + +Add your token to the CLI by running this command: + +```bash +huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential +``` + +Then store your Hugging Face repository name in a variable: + +```bash +HF_USER=$(hf auth whoami | head -n 1) +echo $HF_USER +``` + +Now you can record a dataset. To record 5 episodes and upload your dataset to the hub, adapt the code below for your robot and execute the command or API example. + + + +```bash +lerobot-record \ + --robot.type=so101_follower \ + --robot.port=/dev/tty.usbmodem585A0076841 \ + --robot.id=my_awesome_follower_arm \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ + --teleop.type=so101_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ + --teleop.id=my_awesome_leader_arm \ + --display_data=true \ + --dataset.repo_id=${HF_USER}/record-test \ + --dataset.num_episodes=5 \ + --dataset.single_task="Grab the black cube" +``` + + + + +```python +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.utils import hw_to_dataset_features +from lerobot.robots.so100_follower import SO100Follower, SO100FollowerConfig +from lerobot.teleoperators.so100_leader.config_so100_leader import SO100LeaderConfig +from lerobot.teleoperators.so100_leader.so100_leader import SO100Leader +from lerobot.utils.control_utils import init_keyboard_listener +from lerobot.utils.utils import log_say +from lerobot.utils.visualization_utils import init_rerun +from lerobot.record import record_loop + +NUM_EPISODES = 5 +FPS = 30 +EPISODE_TIME_SEC = 60 +RESET_TIME_SEC = 10 +TASK_DESCRIPTION = "My task description" + +# Create the robot and teleoperator configurations +camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", cameras=camera_config +) +teleop_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm") + +# Initialize the robot and teleoperator +robot = SO100Follower(robot_config) +teleop = SO100Leader(teleop_config) + +# Configure the dataset features +action_features = hw_to_dataset_features(robot.action_features, "action") +obs_features = hw_to_dataset_features(robot.observation_features, "observation") +dataset_features = {**action_features, **obs_features} + +# Create the dataset +dataset = LeRobotDataset.create( + repo_id="/", + fps=FPS, + features=dataset_features, + robot_type=robot.name, + use_videos=True, + image_writer_threads=4, +) + +# Initialize the keyboard listener and rerun visualization +_, events = init_keyboard_listener() +init_rerun(session_name="recording") + +# Connect the robot and teleoperator +robot.connect() +teleop.connect() + +episode_idx = 0 +while episode_idx < NUM_EPISODES and not events["stop_recording"]: + log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}") + + record_loop( + robot=robot, + events=events, + fps=FPS, + teleop=teleop, + dataset=dataset, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + ) + + # Reset the environment if not stopping or re-recording + if not events["stop_recording"] and (episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]): + log_say("Reset the environment") + record_loop( + robot=robot, + events=events, + fps=FPS, + teleop=teleop, + control_time_s=RESET_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + ) + + if events["rerecord_episode"]: + log_say("Re-recording episode") + events["rerecord_episode"] = False + events["exit_early"] = False + dataset.clear_episode_buffer() + continue + + dataset.save_episode() + episode_idx += 1 + +# Clean up +log_say("Stop recording") +robot.disconnect() +teleop.disconnect() +dataset.push_to_hub() +``` + + + + + +#### Dataset upload + +Locally, your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}`. At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. `https://huggingface.co/datasets/${HF_USER}/so101_test`) that you can obtain by running: + +```bash +echo https://huggingface.co/datasets/${HF_USER}/so101_test +``` + +Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example). + +You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot). + +You can also push your local dataset to the Hub manually, running: + +```bash +huggingface-cli upload ${HF_USER}/record-test ~/.cache/huggingface/lerobot/{repo-id} --repo-type dataset +``` + +#### Record function + +The `record` function provides a suite of tools for capturing and managing data during robot operation: + +##### 1. Data Storage + +- Data is stored using the `LeRobotDataset` format and is stored on disk during recording. +- By default, the dataset is pushed to your Hugging Face page after recording. + - To disable uploading, use `--dataset.push_to_hub=False`. + +##### 2. Checkpointing and Resuming + +- Checkpoints are automatically created during recording. +- If an issue occurs, you can resume by re-running the same command with `--resume=true`. When resuming a recording, `--dataset.num_episodes` must be set to the **number of additional episodes to be recorded**, and not to the targeted total number of episodes in the dataset ! +- To start recording from scratch, **manually delete** the dataset directory. + +##### 3. Recording Parameters + +Set the flow of data recording using command-line arguments: + +- `--dataset.episode_time_s=60` + Duration of each data recording episode (default: **60 seconds**). +- `--dataset.reset_time_s=60` + Duration for resetting the environment after each episode (default: **60 seconds**). +- `--dataset.num_episodes=50` + Total number of episodes to record (default: **50**). + +##### 4. Keyboard Controls During Recording + +Control the data recording flow using keyboard shortcuts: + +- Press **Right Arrow (`→`)**: Early stop the current episode or reset time and move to the next. +- Press **Left Arrow (`←`)**: Cancel the current episode and re-record it. +- Press **Escape (`ESC`)**: Immediately stop the session, encode videos, and upload the dataset. + +#### Tips for gathering data + +Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images. + +In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions. + +Avoid adding too much variation too quickly, as it may hinder your results. + +If you want to dive deeper into this important topic, you can check out the [blog post](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset) we wrote on what makes a good dataset. + +#### Troubleshooting: + +- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux). + +## Visualize a dataset + +If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: + +```bash +echo ${HF_USER}/so101_test +``` + +## Replay an episode + +A useful feature is the `replay` function, which allows you to replay any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model. + +You can replay the first episode on your robot with either the command below or with the API example: + + + +```bash +lerobot-replay \ + --robot.type=so101_follower \ + --robot.port=/dev/tty.usbmodem58760431541 \ + --robot.id=my_awesome_follower_arm \ + --dataset.repo_id=${HF_USER}/record-test \ + --dataset.episode=0 # choose the episode you want to replay +``` + + + + +```python +import time + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.utils.robot_utils import busy_wait +from lerobot.utils.utils import log_say + +episode_idx = 0 + +robot_config = SO100FollowerConfig(port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm") + +robot = SO100Follower(robot_config) +robot.connect() + +dataset = LeRobotDataset("/", episodes=[episode_idx]) +actions = dataset.hf_dataset.select_columns("action") + +log_say(f"Replaying episode {episode_idx}") +for idx in range(dataset.num_frames): + t0 = time.perf_counter() + + action = { + name: float(actions[idx]["action"][i]) for i, name in enumerate(dataset.features["action"]["names"]) + } + robot.send_action(action) + + busy_wait(1.0 / dataset.fps - (time.perf_counter() - t0)) + +robot.disconnect() +``` + + + + + +Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com). + +## Train a policy + +To train a policy to control your robot, use the [`lerobot-train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: + +```bash +lerobot-train \ + --dataset.repo_id=${HF_USER}/so101_test \ + --policy.type=act \ + --output_dir=outputs/train/act_so101_test \ + --job_name=act_so101_test \ + --policy.device=cuda \ + --wandb.enable=true \ + --policy.repo_id=${HF_USER}/my_policy +``` + +Let's explain the command: + +1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +3. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. +4. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. + +Training should take several hours. You will find checkpoints in `outputs/train/act_so101_test/checkpoints`. + +To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy: + +```bash +lerobot-train \ + --config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \ + --resume=true +``` + +If you do not want to push your model to the hub after training use `--policy.push_to_hub=false`. + +Additionally you can provide extra `tags` or specify a `license` for your model or make the model repo `private` by adding this: `--policy.private=true --policy.tags=\[ppo,rl\] --policy.license=mit` + +#### Train using Google Colab + +If your local computer doesn't have a powerful GPU you could utilize Google Colab to train your model by following the [ACT training notebook](./notebooks#training-act). + +#### Upload policy checkpoints + +Once training is done, upload the latest checkpoint with: + +```bash +huggingface-cli upload ${HF_USER}/act_so101_test \ + outputs/train/act_so101_test/checkpoints/last/pretrained_model +``` + +You can also upload intermediate checkpoints with: + +```bash +CKPT=010000 +huggingface-cli upload ${HF_USER}/act_so101_test${CKPT} \ + outputs/train/act_so101_test/checkpoints/${CKPT}/pretrained_model +``` + +## Run inference and evaluate your policy + +You can use the `record` script from [`lerobot/record.py`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/record.py) with a policy checkpoint as input, to run inference and evaluate your policy. For instance, run this command or API example to run inference and record 10 evaluation episodes: + + + +```bash +lerobot-record \ + --robot.type=so100_follower \ + --robot.port=/dev/ttyACM1 \ + --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \ + --robot.id=my_awesome_follower_arm \ + --display_data=false \ + --dataset.repo_id=${HF_USER}/eval_so100 \ + --dataset.single_task="Put lego brick into the transparent box" \ + # <- Teleop optional if you want to teleoperate in between episodes \ + # --teleop.type=so100_leader \ + # --teleop.port=/dev/ttyACM0 \ + # --teleop.id=my_awesome_leader_arm \ + --policy.path=${HF_USER}/my_policy +``` + + + + +```python +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.utils import hw_to_dataset_features +from lerobot.policies.act.modeling_act import ACTPolicy +from lerobot.policies.factory import make_pre_post_processors +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.scripts.lerobot_record import record_loop +from lerobot.utils.control_utils import init_keyboard_listener +from lerobot.utils.utils import log_say +from lerobot.utils.visualization_utils import init_rerun + + +NUM_EPISODES = 5 +FPS = 30 +EPISODE_TIME_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" +HF_DATASET_ID = "/" + +# Create the robot configuration +camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", cameras=camera_config +) + +# Initialize the robot +robot = SO100Follower(robot_config) + +# Initialize the policy +policy = ACTPolicy.from_pretrained(HF_MODEL_ID) + +# Configure the dataset features +action_features = hw_to_dataset_features(robot.action_features, "action") +obs_features = hw_to_dataset_features(robot.observation_features, "observation") +dataset_features = {**action_features, **obs_features} + +# Create the dataset +dataset = LeRobotDataset.create( + repo_id=HF_DATASET_ID, + fps=FPS, + features=dataset_features, + robot_type=robot.name, + use_videos=True, + image_writer_threads=4, +) + +# Initialize the keyboard listener and rerun visualization +_, events = init_keyboard_listener() +init_rerun(session_name="recording") + +# Connect the robot +robot.connect() + +preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=policy, + pretrained_path=HF_MODEL_ID, + dataset_stats=dataset.meta.stats, +) + +for episode_idx in range(NUM_EPISODES): + log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") + + # Run the policy inference loop + record_loop( + robot=robot, + events=events, + fps=FPS, + policy=policy, + preprocessor=preprocessor, + postprocessor=postprocessor, + dataset=dataset, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + ) + + dataset.save_episode() + +# Clean up +robot.disconnect() +dataset.push_to_hub() +``` + + + + + +As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: + +1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`). diff --git a/docs/source/il_sim.mdx b/docs/source/il_sim.mdx new file mode 100644 index 0000000000000000000000000000000000000000..eebd7f136e897c756605a013fd202dfef802d59c --- /dev/null +++ b/docs/source/il_sim.mdx @@ -0,0 +1,220 @@ +# Imitation Learning in Sim + +This tutorial will explain how to train a neural network to control a robot in simulation with imitation learning. + +**You'll learn:** + +1. How to record a dataset in simulation with [gym-hil](https://github.com/huggingface/gym-hil) and visualize the dataset. +2. How to train a policy using your data. +3. How to evaluate your policy in simulation and visualize the results. + +For the simulation environment we use the same [repo](https://github.com/huggingface/gym-hil) that is also being used by the Human-In-the-Loop (HIL) reinforcement learning algorithm. +This environment is based on [MuJoCo](https://mujoco.org) and allows you to record datasets in LeRobotDataset format. +Teleoperation is easiest with a controller like the Logitech F710, but you can also use your keyboard if you are up for the challenge. + +## Installation + +First, install the `gym_hil` package within the LeRobot environment, go to your LeRobot folder and run this command: + +```bash +pip install -e ".[hilserl]" +``` + +## Teleoperate and Record a Dataset + +To use `gym_hil` with LeRobot, you need to use a configuration file. An example config file can be found [here](https://huggingface.co/datasets/lerobot/config_examples/resolve/main/sim_il/env_config.json). + +To teleoperate and collect a dataset, we need to modify this config file. Here's an example configuration for imitation learning data collection: + +```json +{ + "env": { + "type": "gym_manipulator", + "name": "gym_hil", + "task": "PandaPickCubeGamepad-v0", + "fps": 10 + }, + "dataset": { + "repo_id": "your_username/il_gym", + "root": null, + "task": "pick_cube", + "num_episodes_to_record": 30, + "replay_episode": null, + "push_to_hub": true + }, + "mode": "record", + "device": "cuda" +} +``` + +Key configuration points: + +- Set your `repo_id` in the `dataset` section: `"repo_id": "your_username/il_gym"` +- Set `num_episodes_to_record: 30` to collect 30 demonstration episodes +- Ensure `mode` is set to `"record"` +- If you don't have an NVIDIA GPU, change `"device": "cuda"` to `"mps"` for macOS or `"cpu"` +- To use keyboard instead of gamepad, change `"task"` to `"PandaPickCubeKeyboard-v0"` + +Then we can run this command to start: + + + + +```bash +python -m lerobot.rl.gym_manipulator --config_path path/to/env_config_gym_hil_il.json +``` + + + + +```bash +mjpython -m lerobot.rl.gym_manipulator --config_path path/to/env_config_gym_hil_il.json +``` + + + + +Once rendered you can teleoperate the robot with the gamepad or keyboard, below you can find the gamepad/keyboard controls. + +Note that to teleoperate the robot you have to hold the "Human Take Over Pause Policy" Button `RB` to enable control! + +**Gamepad Controls** + +

+ Figure shows the control mappings on a Logitech gamepad. +

+

+ Gamepad button mapping for robot control and episode management +

+ +**Keyboard controls** + +For keyboard controls use the `spacebar` to enable control and the following keys to move the robot: + +```bash + Arrow keys: Move in X-Y plane + Shift and Shift_R: Move in Z axis + Right Ctrl and Left Ctrl: Open and close gripper + ESC: Exit +``` + +## Visualize a dataset + +If you uploaded your dataset to the hub you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id. + +

+ Figure shows the dataset visualizer +

+

+ Dataset visualizer +

+ +## Train a policy + +To train a policy to control your robot, use the [`lerobot-train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: + +```bash +lerobot-train \ + --dataset.repo_id=${HF_USER}/il_gym \ + --policy.type=act \ + --output_dir=outputs/train/il_sim_test \ + --job_name=il_sim_test \ + --policy.device=cuda \ + --wandb.enable=true +``` + +Let's explain the command: + +1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/il_gym`. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +3. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. +4. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. + +Training should take several hours, 100k steps (which is the default) will take about 1h on Nvidia A100. You will find checkpoints in `outputs/train/il_sim_test/checkpoints`. + +#### Train using Collab + +If your local computer doesn't have a powerful GPU you could utilize Google Collab to train your model by following the [ACT training notebook](./notebooks#training-act). + +#### Upload policy checkpoints + +Once training is done, upload the latest checkpoint with: + +```bash +huggingface-cli upload ${HF_USER}/il_sim_test \ + outputs/train/il_sim_test/checkpoints/last/pretrained_model +``` + +You can also upload intermediate checkpoints with: + +```bash +CKPT=010000 +huggingface-cli upload ${HF_USER}/il_sim_test${CKPT} \ + outputs/train/il_sim_test/checkpoints/${CKPT}/pretrained_model +``` + +## Evaluate your policy in Sim + +To evaluate your policy we have to use a configuration file. An example can be found [here](https://huggingface.co/datasets/lerobot/config_examples/resolve/main/sim_il/eval_config.json). + +Here's an example evaluation configuration: + +```json +{ + "env": { + "type": "gym_manipulator", + "name": "gym_hil", + "task": "PandaPickCubeGamepad-v0", + "fps": 10 + }, + "dataset": { + "repo_id": "your_username/il_sim_dataset", + "dataset_root": null, + "task": "pick_cube" + }, + "pretrained_policy_name_or_path": "your_username/il_sim_model", + "device": "cuda" +} +``` + +Make sure to replace: + +- `repo_id` with the dataset you trained on (e.g., `your_username/il_sim_dataset`) +- `pretrained_policy_name_or_path` with your model ID (e.g., `your_username/il_sim_model`) + +Then you can run this command to visualize your trained policy + + + + +```bash +python -m lerobot.rl.eval_policy --config_path=path/to/eval_config_gym_hil.json +``` + + + + +```bash +mjpython -m lerobot.rl.eval_policy --config_path=path/to/eval_config_gym_hil.json +``` + + + + +> [!WARNING] +> While the main workflow of training ACT in simulation is straightforward, there is significant room for exploring how to set up the task, define the initial state of the environment, and determine the type of data required during collection to learn the most effective policy. If your trained policy doesn't perform well, investigate the quality of the dataset it was trained on using our visualizers, as well as the action values and various hyperparameters related to ACT and the simulation. + +Congrats 🎉, you have finished this tutorial. If you want to continue with using LeRobot in simulation follow this [Tutorial on reinforcement learning in sim with HIL-SERL](https://huggingface.co/docs/lerobot/hilserl_sim) + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). diff --git a/docs/source/implement_your_own_processor.mdx b/docs/source/implement_your_own_processor.mdx new file mode 100644 index 0000000000000000000000000000000000000000..f59ff3f0bf3c5f383b894088f6e5e3da6b17df22 --- /dev/null +++ b/docs/source/implement_your_own_processor.mdx @@ -0,0 +1,273 @@ +# Implement your own Robot Processor + +In this tutorial, you'll learn how to implement your own Robot Processor. +It begins by exploring the need for a custom processor, then uses the `NormalizerProcessorStep` as the running example to explain how to implement, configure, and serialize a processor. Finally, it lists all helper processors that ship with LeRobot. + +## Why would you need a custom processor? + +In most cases, when reading raw data from sensors or when models output actions, you need to process this data to make it compatible with your target system. For example, a common need is normalizing data ranges to make them suitable for neural networks. + +LeRobot's `NormalizerProcessorStep` handles this crucial task: + +```python +# Input: raw joint positions in [0, 180] degrees +raw_action = torch.tensor([90.0, 45.0, 135.0]) + +# After processing: normalized to [-1, 1] range for model training +normalizer = NormalizerProcessorStep(features=features, norm_map=norm_map, stats=dataset_stats) +normalized_result = normalizer(transition) +# ... +``` + +Other common processing needs include: + +- **Device placement**: Moving tensors between CPU/GPU and converting data types +- **Format conversion**: Transforming between different data structures +- **Batching**: Adding/removing batch dimensions for model compatibility +- **Safety constraints**: Applying limits to robot commands + +```python +# Example pipeline combining multiple processors +pipeline = PolicyProcessorPipeline([ + RenameObservationsProcessorStep(rename_map={}), + AddBatchDimensionProcessorStep(), + NormalizerProcessorStep(features=features, stats=stats), + DeviceProcessorStep(device="cuda"), + # ... +]) +``` + +LeRobot provides a pipeline mechanism to implement sequences of processing steps for both input data and output actions, making it easy to compose these transformations in the right order for optimal performance. + +## How to implement your own processor? + +We'll use the `NormalizerProcessorStep` as our main example because it demonstrates essential processor patterns including state management, configuration serialization, and tensor handling that you'll commonly need. + +Prepare the sequence of processing steps necessary for your problem. A processor step is a class that implements the following methods: + +- `__call__`: implements the processing step for the input transition. +- `get_config`: gets the configuration of the processor step. +- `state_dict`: gets the state of the processor step. +- `load_state_dict`: loads the state of the processor step. +- `reset`: resets the state of the processor step. +- `feature_contract`: displays the modification to the feature space during the processor step. + +### Implement the `__call__` method + +The `__call__` method is the core of your processor step. It takes an `EnvTransition` and returns a modified `EnvTransition`. Here's how the `NormalizerProcessorStep` works: + +```python +@dataclass +@ProcessorStepRegistry.register("normalizer_processor") +class NormalizerProcessorStep(ProcessorStep): + """Normalize observations/actions using dataset statistics.""" + + features: dict[str, PolicyFeature] + norm_map: dict[FeatureType, NormalizationMode] + stats: dict[str, dict[str, Any]] | None = None + eps: float = 1e-8 + _tensor_stats: dict = field(default_factory=dict, init=False, repr=False) + + def __post_init__(self): + """Convert stats to tensors for efficient computation.""" + self.stats = self.stats or {} + self._tensor_stats = to_tensor(self.stats, device=self.device, dtype=torch.float32) + + def __call__(self, transition: EnvTransition) -> EnvTransition: + new_transition = transition.copy() + # Normalize observations + # ... + # Normalize action + # ... + return new_transition + +``` + +See the full implementation in `src/lerobot/processor/normalize_processor.py` for complete details. + +**Key principles:** + +- **Always use `transition.copy()`** to avoid side effects +- **Handle both observations and actions** consistently +- **Separate config from state**: `get_config()` returns JSON-serializable params, `state_dict()` returns tensors +- **Convert stats to tensors** in `__post_init__()` for efficient computation + +### Configuration and State Management + +Processors support serialization through three methods that separate configuration from tensor state. The `NormalizerProcessorStep` demonstrates this perfectly - it carries dataset statistics (tensors) in its state, and hyperparameters in its config: + +```python +# Continuing the NormalizerProcessorStep example... + +def get_config(self) -> dict[str, Any]: + """JSON-serializable configuration (no tensors).""" + return { + "eps": self.eps, + "features": {k: {"type": v.type.value, "shape": v.shape} for k, v in self.features.items()}, + "norm_map": {ft.value: nm.value for ft, nm in self.norm_map.items()}, + # ... + } + +def state_dict(self) -> dict[str, torch.Tensor]: + """Tensor state only (e.g., dataset statistics).""" + flat: dict[str, torch.Tensor] = {} + for key, sub in self._tensor_stats.items(): + for stat_name, tensor in sub.items(): + flat[f"{key}.{stat_name}"] = tensor.cpu() # Always save to CPU + return flat + +def load_state_dict(self, state: dict[str, torch.Tensor]) -> None: + """Restore tensor state at runtime.""" + self._tensor_stats.clear() + for flat_key, tensor in state.items(): + key, stat_name = flat_key.rsplit(".", 1) + # Load to processor's configured device + self._tensor_stats.setdefault(key, {})[stat_name] = tensor.to( + dtype=torch.float32, device=self.device + ) + # ... +``` + +**Usage:** + +```python +# Save (e.g., inside a policy) +config = normalizer.get_config() +tensors = normalizer.state_dict() + +# Restore (e.g., loading a pretrained policy) +new_normalizer = NormalizerProcessorStep(**config) +new_normalizer.load_state_dict(tensors) +# Now new_normalizer has the same stats and configuration +``` + +### Transform features + +The `transform_features` method defines how your processor transforms feature names and shapes. This is crucial for policy configuration and debugging. + +For `NormalizerProcessorStep`, features are typically preserved unchanged since normalization doesn't alter keys or shapes: + +```python +def transform_features(self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]]) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + """Normalization preserves all feature definitions.""" + return features # No changes to feature structure + # ... +``` + +When your processor renames or reshapes data, implement this method to reflect the mapping for downstream components. For example, a simple rename processor: + +```python +def transform_features(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]: + # Simple renaming + if "pixels" in features: + features["observation.image"] = features.pop("pixels") + + # Pattern-based renaming + for key in list(features.keys()): + if key.startswith("env_state."): + suffix = key[len("env_state."):] + features[f"observation.{suffix}"] = features.pop(key) + # ... + + return features +``` + +**Key principles:** + +- Use `features.pop(old_key)` to remove and get the old feature +- Use `features[new_key] = old_feature` to add the renamed feature +- Always return the modified features dictionary +- Document transformations clearly in the docstring + +### Using overrides + +You can override step parameters at load-time using `overrides`. This is handy for non-serializable objects or site-specific settings. It works both in policy factories and with `DataProcessorPipeline.from_pretrained(...)`. + +**Foundational model adaptation**: This is particularly useful when working with foundational pretrained policies where you rarely have access to the original training statistics. You can inject your own dataset statistics to adapt the normalizer to your specific robot or environment data. + +Example: during policy evaluation on the robot, override the device and rename map. +Use this to run a policy trained on CUDA on a CPU-only robot, or to remap camera keys when the robot uses different names than the dataset. + +Direct usage with `from_pretrained`: + +```python +from lerobot.processor import RobotProcessorPipeline + +# Load a foundational policy trained on diverse robot data +# but adapt normalization to your specific robot/environment +new_stats = LeRobotDataset(repo_id="username/my-dataset").meta.stats +processor = RobotProcessorPipeline.from_pretrained( + "huggingface/foundational-robot-policy", # Pretrained foundation model + overrides={ + "normalizer_processor": {"stats": new_stats}, # Inject your robot's statistics + "device_processor": {"device": "cuda:0"}, # registry name for registered steps + "rename_processor": {"rename_map": robot_key_map}, # Map your robot's observation keys + # ... + }, +) +``` + +## Best Practices + +Based on analysis of all LeRobot processor implementations, here are the key patterns and practices: + +### 1. **Safe Data Handling** + +Always create copies of input data to avoid unintended side effects. Use `transition.copy()` and `observation.copy()` rather than modifying data in-place. This prevents your processor from accidentally affecting other components in the pipeline. + +Check for required data before processing and handle missing data gracefully. If your processor expects certain keys (like `"pixels"` for image processing), validate their presence first. For optional data, use safe access patterns like `transition.get()` and handle `None` values appropriately. + +When data validation fails, provide clear, actionable error messages that help users understand what went wrong and how to fix it. + +### 2. **Choose Appropriate Base Classes** + +LeRobot provides specialized base classes that reduce boilerplate code and ensure consistency. Use `ObservationProcessorStep` when you only need to modify observations, `ActionProcessorStep` for action-only processing, and `RobotActionProcessorStep` specifically for dictionary-based robot actions. + +Only inherit directly from `ProcessorStep` when you need full control over the entire transition or when processing multiple transition components simultaneously. The specialized base classes handle the transition management for you and provide type safety. + +### 3. **Registration and Naming** + +Register your processors with descriptive, namespaced names using `@ProcessorStepRegistry.register()`. Use organization prefixes like `"robotics_lab/safety_clipper"` or `"acme_corp/vision_enhancer"` to avoid naming conflicts. Avoid generic names like `"processor"` or `"step"` that could clash with other implementations. + +Good registration makes your processors discoverable and enables clean serialization/deserialization when saving and loading pipelines. + +### 4. **State Management Patterns** + +Distinguish between configuration parameters (JSON-serializable values) and internal state (tensors, buffers). Use dataclass fields with `init=False, repr=False` for internal state that shouldn't appear in the constructor or string representation. + +Implement the `reset()` method to clear internal state between episodes. This is crucial for stateful processors that accumulate data over time, like moving averages or temporal filters. + +Remember that `get_config()` should only return JSON-serializable configuration, while `state_dict()` handles tensor state separately. + +### 5. **Input Validation and Error Handling** + +Validate input types and shapes before processing. Check tensor properties like `dtype` and dimensions to ensure compatibility with your algorithms. For robot actions, verify that required pose components or joint values are present and within expected ranges. + +Use early returns for edge cases where no processing is needed. Provide clear, descriptive error messages that include the expected vs. actual data types or shapes. This makes debugging much easier for users. + +### 6. **Device and Dtype Awareness** + +Design your processors to automatically adapt to the device and dtype of input tensors. Internal tensors (like normalization statistics) should match the input tensor's device and dtype to ensure compatibility with multi-GPU training, mixed precision, and distributed setups. + +Implement a `to()` method that moves your processor's internal state to the specified device. Check device/dtype compatibility at runtime and automatically migrate internal state when needed. This pattern enables seamless operation across different hardware configurations without manual intervention. + +## Conclusion + +You now have all the tools to implement custom processors in LeRobot! The key steps are: + +1. **Define your processor** as a dataclass with the required methods (`__call__`, `get_config`, `state_dict`, `load_state_dict`, `reset`, `transform_features`) +2. **Register it** using `@ProcessorStepRegistry.register("name")` for discoverability +3. **Integrate it** into a `DataProcessorPipeline` with other processing steps +4. **Use base classes** like `ObservationProcessorStep` when possible to reduce boilerplate +5. **Implement device/dtype awareness** to support multi-GPU and mixed precision setups + +The processor system is designed to be modular and composable, allowing you to build complex data processing pipelines from simple, focused components. Whether you're preprocessing sensor data for training or post-processing model outputs for robot execution, custom processors give you the flexibility to handle any data transformation your robotics application requires. + +Key principles for robust processors: + +- **Device/dtype adaptation**: Internal tensors should match input tensors +- **Clear error messages**: Help users understand what went wrong +- **Base class usage**: Leverage specialized base classes to reduce boilerplate +- **Feature contracts**: Declare data structure changes with `transform_features()` + +Start simple, test thoroughly, and ensure your processors work seamlessly across different hardware configurations! diff --git a/docs/source/index.mdx b/docs/source/index.mdx new file mode 100644 index 0000000000000000000000000000000000000000..5f214f9a2dcca17df60ba56842e0556d08737cfb --- /dev/null +++ b/docs/source/index.mdx @@ -0,0 +1,23 @@ + + +# LeRobot + +**State-of-the-art machine learning for real-world robotics** + +🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier for entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models. + +🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning. + +🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulated environments so that everyone can get started. + +🤗 LeRobot hosts pretrained models and datasets on the LeRobot HuggingFace page. + +Join the LeRobot community on [Discord](https://discord.gg/s3KuuzsPFb) diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b7b0d57c94280aac33ca1127a0704724fcd6a4d1 --- /dev/null +++ b/docs/source/installation.mdx @@ -0,0 +1,127 @@ +# Installation + +## Install [`miniforge`](https://conda-forge.org/download/) + +```bash +wget "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh" +bash Miniforge3-$(uname)-$(uname -m).sh +``` + +## Environment Setup + +Create a virtual environment with Python 3.10, using conda: + +```bash +conda create -y -n lerobot python=3.10 +``` + +Then activate your conda environment, you have to do this each time you open a shell to use lerobot: + +```bash +conda activate lerobot +``` + +When using `conda`, install `ffmpeg` in your environment: + +```bash +conda install ffmpeg -c conda-forge +``` + +> [!TIP] +> This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can: +> +> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using: +> +> ```bash +> conda install ffmpeg=7.1.1 -c conda-forge +> ``` +> +> - _[On Linux only]_ If you want to bring your own ffmpeg: Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`. + +## Install LeRobot 🤗 + +### From Source + +First, clone the repository and navigate into the directory: + +```bash +git clone https://github.com/huggingface/lerobot.git +cd lerobot +``` + +Then, install the library in editable mode. This is useful if you plan to contribute to the code. + +```bash +pip install -e . +``` + +### Installation from PyPI + +**Core Library:** +Install the base package with: + +```bash +pip install lerobot +``` + +_This installs only the default dependencies._ + +**Extra Features:** +To install additional functionality, use one of the following: + +```bash +pip install 'lerobot[all]' # All available features +pip install 'lerobot[aloha,pusht]' # Specific features (Aloha & Pusht) +pip install 'lerobot[feetech]' # Feetech motor support +``` + +_Replace `[...]` with your desired features._ + +**Available Tags:** +For a full list of optional dependencies, see: +https://pypi.org/project/lerobot/ + +> [!NOTE] +> For lerobot 0.4.0, if you want to install pi, you will have to do: `pip install "lerobot[pi]@git+https://github.com/huggingface/lerobot.git"` + +### Troubleshooting + +If you encounter build errors, you may need to install additional dependencies: `cmake`, `build-essential`, and `ffmpeg libs`. +To install these for linux run: + +```bash +sudo apt-get install cmake build-essential python-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config +``` + +For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg) + +## Optional dependencies + +LeRobot provides optional extras for specific functionalities. Multiple extras can be combined (e.g., `.[aloha,feetech]`). For all available extras, refer to `pyproject.toml`. + +### Simulations + +Install environment packages: `aloha` ([gym-aloha](https://github.com/huggingface/gym-aloha)), or `pusht` ([gym-pusht](https://github.com/huggingface/gym-pusht)) +Example: + +```bash +pip install -e ".[aloha]" # or "[pusht]" for example +``` + +### Motor Control + +For Koch v1.1 install the Dynamixel SDK, for SO100/SO101/Moss install the Feetech SDK. + +```bash +pip install -e ".[feetech]" # or "[dynamixel]" for example +``` + +### Experiment Tracking + +To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with + +```bash +wandb login +``` + +You can now assemble your robot if it's not ready yet, look for your robot type on the left. Then follow the link below to use Lerobot with your robot. diff --git a/docs/source/integrate_hardware.mdx b/docs/source/integrate_hardware.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7626b80367bc1b7d14337eb8e6631cb14608e38e --- /dev/null +++ b/docs/source/integrate_hardware.mdx @@ -0,0 +1,476 @@ +# Bring Your Own Hardware + +This tutorial will explain how to integrate your own robot design into the LeRobot ecosystem and have it access all of our tools (data collection, control pipelines, policy training and inference). + +To that end, we provide the [`Robot`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/robots/robot.py) base class in the LeRobot which specifies a standard interface for physical robot integration. Let's see how to implement it. + +## Prerequisites + +- Your own robot which exposes a communication interface (e.g. serial, CAN, TCP) +- A way to read sensor data and send motor commands programmatically, e.g. manufacturer's SDK or API, or your own protocol implementation. +- LeRobot installed in your environment. Follow our [Installation Guide](./installation). + +## Choose your motors + +If you're using Feetech or Dynamixel motors, LeRobot provides built-in bus interfaces: + +- [`FeetechMotorsBus`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/motors/feetech/feetech.py) – for controlling Feetech servos +- [`DynamixelMotorsBus`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/motors/dynamixel/dynamixel.py) – for controlling Dynamixel servos + +Please refer to the [`MotorsBus`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/motors/motors_bus.py) abstract class to learn about its API. +For a good example of how it can be used, you can have a look at our own [SO101 follower implementation](https://github.com/huggingface/lerobot/blob/main/src/lerobot/robots/so101_follower/so101_follower.py) + +Use these if compatible. Otherwise, you'll need to find or write a Python interface (not covered in this tutorial): + +- Find an existing SDK in Python (or use bindings to C/C++) +- Or implement a basic communication wrapper (e.g., via pyserial, socket, or CANopen) + +You're not alone—many community contributions use custom boards or firmware! + +For Feetech and Dynamixel, we currently support these servos: - Feetech: - STS & SMS series (protocol 0): `sts3215`, `sts3250`, `sm8512bl` - SCS series (protocol 1): `scs0009` - Dynamixel (protocol 2.0 only): `xl330-m077`, `xl330-m288`, `xl430-w250`, `xm430-w350`, `xm540-w270`, `xc430-w150` + +If you are using Feetech or Dynamixel servos that are not in this list, you can add those in the [Feetech table](https://github.com/huggingface/lerobot/blob/main/src/lerobot/motors/feetech/tables.py) or [Dynamixel table](https://github.com/huggingface/lerobot/blob/main/src/lerobot/motors/dynamixel/tables.py). Depending on the model, this will require you to add model-specific information. In most cases though, there shouldn't be a lot of additions to do. + +In the next sections, we'll use a `FeetechMotorsBus` as the motors interface for the examples. Replace it and adapt to your motors if necessary. + +## Step 1: Subclass the `Robot` Interface + +You’ll first need to specify the config class and a string identifier (`name`) for your robot. If your robot has special needs that you'd like to be able to change easily, it should go here (e.g. port/address, baudrate). + +Here, we'll add the port name and one camera by default for our robot: + + +```python +from dataclasses import dataclass, field + +from lerobot.cameras import CameraConfig +from lerobot.cameras.opencv import OpenCVCameraConfig +from lerobot.robots import RobotConfig + + +@RobotConfig.register_subclass("my_cool_robot") +@dataclass +class MyCoolRobotConfig(RobotConfig): + port: str + cameras: dict[str, CameraConfig] = field( + default_factory={ + "cam_1": OpenCVCameraConfig( + index_or_path=2, + fps=30, + width=480, + height=640, + ), + } + ) +``` + + +[Cameras tutorial](./cameras) to understand how to detect and add your camera. + +Next, we'll create our actual robot class which inherits from `Robot`. This abstract class defines a contract you must follow for your robot to be usable with the rest of the LeRobot tools. + +Here we'll create a simple 5-DoF robot with one camera. It could be a simple arm but notice that the `Robot` abstract class does not assume anything on your robot's form factor. You can let you imagination run wild when designing new robots! + + +```python +from lerobot.cameras import make_cameras_from_configs +from lerobot.motors import Motor, MotorNormMode +from lerobot.motors.feetech import FeetechMotorsBus +from lerobot.robots import Robot + +class MyCoolRobot(Robot): + config_class = MyCoolRobotConfig + name = "my_cool_robot" + + def __init__(self, config: MyCoolRobotConfig): + super().__init__(config) + self.bus = FeetechMotorsBus( + port=self.config.port, + motors={ + "joint_1": Motor(1, "sts3250", MotorNormMode.RANGE_M100_100), + "joint_2": Motor(2, "sts3215", MotorNormMode.RANGE_M100_100), + "joint_3": Motor(3, "sts3215", MotorNormMode.RANGE_M100_100), + "joint_4": Motor(4, "sts3215", MotorNormMode.RANGE_M100_100), + "joint_5": Motor(5, "sts3215", MotorNormMode.RANGE_M100_100), + }, + calibration=self.calibration, + ) + self.cameras = make_cameras_from_configs(config.cameras) +``` + + +## Step 2: Define Observation and Action Features + +These two properties define the _interface contract_ between your robot and tools that consume it (such as data collection or learning pipelines). + +> [!WARNING] +> Note that these properties must be callable even if the robot is not yet connected, so avoid relying on runtime hardware state to define them. + +### `observation_features` + +This property should return a dictionary describing the structure of sensor outputs from your robot. The keys match what `get_observation()` returns, and the values describe either the shape (for arrays/images) or the type (for simple values). + +Example for our 5-DoF arm with one camera: + + +```python +@property +def _motors_ft(self) -> dict[str, type]: + return { + "joint_1.pos": float, + "joint_2.pos": float, + "joint_3.pos": float, + "joint_4.pos": float, + "joint_5.pos": float, + } + +@property +def _cameras_ft(self) -> dict[str, tuple]: + return { + cam: (self.cameras[cam].height, self.cameras[cam].width, 3) for cam in self.cameras + } + +@property +def observation_features(self) -> dict: + return {**self._motors_ft, **self._cameras_ft} +``` + + +In this case, observations consist of a simple dict storing each motor's position and a camera image. + +### `action_features` + +This property describes the commands your robot expects via `send_action()`. Again, keys must match the expected input format, and values define the shape/type of each command. + +Here, we simply use the same joints proprioceptive features (`self._motors_ft`) as with `observation_features`: the action sent will simply the goal position for each motor. + + +```python +def action_features(self) -> dict: + return self._motors_ft +``` + + +## Step 3: Handle Connection and Disconnection + +These methods should handle opening and closing communication with your hardware (e.g. serial ports, CAN interfaces, USB devices, cameras). + +### `is_connected` + +This property should simply reflect that communication with the robot's hardware is established. When this property is `True`, it should be possible to read and write to the hardware using `get_observation()` and `send_action()`. + + +```python +@property +def is_connected(self) -> bool: + return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) +``` + + +### `connect()` + +This method should establish communication with the hardware. Moreover, if your robot needs calibration and is not calibrated, it should start a calibration procedure by default. If your robot needs some specific configuration, this should also be called here. + + +```python +def connect(self, calibrate: bool = True) -> None: + self.bus.connect() + if not self.is_calibrated and calibrate: + self.calibrate() + + for cam in self.cameras.values(): + cam.connect() + + self.configure() +``` + + +### `disconnect()` + +This method should gracefully terminate communication with the hardware: free any related resources (threads or processes), close ports, etc. + +Here, we already handle this in our `MotorsBus` and `Camera` classes so we just need to call their own `disconnect()` methods: + + +```python +def disconnect(self) -> None: + self.bus.disconnect() + for cam in self.cameras.values(): + cam.disconnect() +``` + + +## Step 4: Support Calibration and Configuration + +LeRobot supports saving and loading calibration data automatically. This is useful for joint offsets, zero positions, or sensor alignment. + +> Note that depending on your hardware, this may not apply. If that's the case, you can simply leave these methods as no-ops: + + +```python +@property +def is_calibrated(self) -> bool: + return True + +def calibrate(self) -> None: + pass +``` + + +### `is_calibrated` + +This should reflect whether your robot has the required calibration loaded. + + +```python +@property +def is_calibrated(self) -> bool: + return self.bus.is_calibrated +``` + + +### `calibrate()` + +The goal of the calibration is twofold: + +- Know the physical range of motion of each motors in order to only send commands within this range. +- Normalize raw motors positions to sensible continuous values (e.g. percentages, degrees) instead of arbitrary discrete value dependant on the specific motor used that will not replicate elsewhere. + +It should implement the logic for calibration (if relevant) and update the `self.calibration` dictionary. If you are using Feetech or Dynamixel motors, our bus interfaces already include methods to help with this. + + +```python +def calibrate(self) -> None: + self.bus.disable_torque() + for motor in self.bus.motors: + self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) + + input(f"Move {self} to the middle of its range of motion and press ENTER....") + homing_offsets = self.bus.set_half_turn_homings() + + print( + "Move all joints sequentially through their entire ranges " + "of motion.\nRecording positions. Press ENTER to stop..." + ) + range_mins, range_maxes = self.bus.record_ranges_of_motion() + + self.calibration = {} + for motor, m in self.bus.motors.items(): + self.calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=0, + homing_offset=homing_offsets[motor], + range_min=range_mins[motor], + range_max=range_maxes[motor], + ) + + self.bus.write_calibration(self.calibration) + self._save_calibration() + print("Calibration saved to", self.calibration_fpath) +``` + + +### `configure()` + +Use this to set up any configuration for your hardware (servos control modes, controller gains, etc.). This should usually be run at connection time and be idempotent. + + +```python +def configure(self) -> None: + with self.bus.torque_disabled(): + self.bus.configure_motors() + for motor in self.bus.motors: + self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) + self.bus.write("P_Coefficient", motor, 16) + self.bus.write("I_Coefficient", motor, 0) + self.bus.write("D_Coefficient", motor, 32) +``` + + +## Step 5: Implement Sensors Reading and Action Sending + +These are the most important runtime functions: the core I/O loop. + +### `get_observation()` + +Returns a dictionary of sensor values from the robot. These typically include motor states, camera frames, various sensors, etc. In the LeRobot framework, these observations are what will be fed to a policy in order to predict the actions to take. The dictionary keys and structure must match `observation_features`. + + +```python +def get_observation(self) -> dict[str, Any]: + if not self.is_connected: + raise ConnectionError(f"{self} is not connected.") + + # Read arm position + obs_dict = self.bus.sync_read("Present_Position") + obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} + + # Capture images from cameras + for cam_key, cam in self.cameras.items(): + obs_dict[cam_key] = cam.async_read() + + return obs_dict +``` + + +### `send_action()` + +Takes a dictionary that matches `action_features`, and sends it to your hardware. You can add safety limits (clipping, smoothing) and return what was actually sent. + +For simplicity, we won't be adding any modification of the actions in our example here. + + +```python +def send_action(self, action: dict[str, Any]) -> dict[str, Any]: + goal_pos = {key.removesuffix(".pos"): val for key, val in action.items()} + + # Send goal position to the arm + self.bus.sync_write("Goal_Position", goal_pos) + + return action +``` + + +## Adding a Teleoperator + +For implementing teleoperation devices, we also provide a [`Teleoperator`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/teleoperators/teleoperator.py) base class. This class is very similar to the `Robot` base class and also doesn't assume anything on form factor. + +The main differences are in the I/O functions: a teleoperator allows you to produce action via `get_action` and can receive feedback actions via `send_feedback`. Feedback could be anything controllable on the teleoperation device that could help the person controlling it understand the consequences of the actions sent. Think motion/force feedback on a leader arm, vibrations on a gamepad controller for example. To implement a teleoperator, you can follow this same tutorial and adapt it for these two methods. + +## Using Your Own `LeRobot` Devices 🔌 + +You can easily extend `lerobot` with your own custom hardware—be it a camera, robot, or teleoperation device—by creating a separate, installable Python package. If you follow a few simple conventions, the `lerobot` command-line tools (like `lerobot-teleop` and `lerobot-record`) will **automatically discover and integrate your creations** without requiring any changes to the `lerobot` source code. + +This guide outlines the conventions your plugin must follow. + +### The 4 Core Conventions + +To ensure your custom device is discoverable, you must adhere to the following four rules. + +#### 1\. Create an Installable Package with a Specific Prefix + +Your project must be a standard, installable Python package. Crucially, the name of your package (as defined in `pyproject.toml` or `setup.py`) must begin with one of these prefixes: + +- `lerobot_robot_` for a robot. +- `lerobot_camera_` for a camera. +- `lerobot_teleoperator_` for a teleoperation device. + +This prefix system is how `lerobot` automatically finds your plugin in the Python environment. + +#### 2\. Follow the `SomethingConfig`/`Something` Naming Pattern + +Your device's implementation class must be named after its configuration class, simply by removing the `Config` suffix. + +- **Config Class:** `MyAwesomeTeleopConfig` +- **Device Class:** `MyAwesomeTeleop` + +#### 3\. Place Your Files in a Predictable Structure + +The device class (`MyAwesomeTeleop`) must be located in a predictable module relative to its configuration class (`MyAwesomeTeleopConfig`). `lerobot` will automatically search in these locations: + +- In the **same module** as the config class. +- In a **submodule named after the device** (e.g., `my_awesome_teleop.py`). + +The recommended and simplest structure is to place them in separate, clearly named files within the same directory. + +#### 4\. Expose Classes in `__init__.py` + +Your package's `__init__.py` file should import and expose both the configuration and the device classes, making them easily accessible. + +### Putting It All Together: A Complete Example + +Let's create a new teleoperator called `my_awesome_teleop`. + +#### Directory Structure + +Here is what the project folder should look like. The package name, `lerobot_teleoperator_my_awesome_teleop`, follows **Convention \#1**. + +``` +lerobot_teleoperator_my_awesome_teleop/ +├── pyproject.toml # (or setup.py) lists lerobot as a dependency +└── lerobot_teleoperator_my_awesome_teleop/ + ├── __init__.py + ├── config_my_awesome_teleop.py + └── my_awesome_teleop.py +``` + +#### File Contents + +- **`config_my_awesome_teleop.py`**: Defines the configuration class. Note the `Config` suffix (**Convention \#2**). + + ```python + from dataclasses import dataclass + + from lerobot.teleoperators.config import TeleoperatorConfig + + @TeleoperatorConfig.register_subclass("my_awesome_teleop") + @dataclass + class MyAwesomeTeleopConfig(TeleoperatorConfig): + # Your configuration fields go here + port: str = "192.168.1.1" + ``` + +- **`my_awesome_teleop.py`**: Implements the device. The class name `MyAwesomeTeleop` matches its config class name (**Convention \#2**). This file structure adheres to **Convention \#3**. + + ```python + from lerobot.teleoperators.teleoperator import Teleoperator + + from .config_my_awesome_teleop import MyAwesomeTeleopConfig + + class MyAwesomeTeleop(Teleoperator): + config_class = MyAwesomeTeleopConfig + name = "my_awesome_teleop" + + def __init__(self, config: MyAwesomeTeleopConfig): + super().__init__(config) + self.config = config + + # Your device logic (e.g., connect) goes here + ``` + +- **`__init__.py`**: Exposes the key classes (**Convention \#4**). + + ```python + from .config_my_awesome_teleop import MyAwesomeTeleopConfig + from .my_awesome_teleop import MyAwesomeTeleop + ``` + +### Installation and Usage + +1. **Install your new plugin in your Python environment.** You can install your local plugin package using `pip`'s editable mode or from PyPi. + + ```bash + # Locally + # Navigate to your plugin's root directory and install it + cd lerobot_teleoperator_my_awesome_teleop + pip install -e . + + # From PyPi + pip install lerobot_teleoperator_my_awesome_teleop + ``` + +2. **Use it directly from the command line.** Now, you can use your custom device by referencing its type. + + ```bash + lerobot-teleoperate --teleop.type=my_awesome_teleop \ + # other arguments + ``` + +And that's it\! Your custom device is now fully integrated. + +### Looking for an example ? + +Check out these two packages from the community: + +- https://github.com/SpesRobotics/lerobot-robot-xarm +- https://github.com/SpesRobotics/lerobot-teleoperator-teleop + +## Wrapping Up + +Once your robot class is complete, you can leverage the LeRobot ecosystem: + +- Control your robot with available teleoperators or integrate directly your teleoperating device +- Record training data and visualize it +- Integrate it into RL or imitation learning pipelines + +Don't hesitate to reach out to the community for help on our [Discord](https://discord.gg/s3KuuzsPFb) 🤗 diff --git a/docs/source/introduction_processors.mdx b/docs/source/introduction_processors.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cd579b22d150c57fda0ef3303a60e08067d11f03 --- /dev/null +++ b/docs/source/introduction_processors.mdx @@ -0,0 +1,314 @@ +# Introduction to Processors + +In robotics, there's a fundamental mismatch between the data that robots and humans produce and what machine learning models expect. +Robots output raw sensor data like camera images and joint positions that need normalization, batching, and device placement before models can process them. +Language instructions from humans must be tokenized into numerical representations, and different robots use different coordinate systems that need standardization. + +The challenge extends to model outputs as well. +Models might output end-effector positions while robots need joint-space commands, or teleoperators produce relative movements while robots expect absolute commands. +Model predictions are often normalized and need conversion back to real-world scales. + +Cross-domain translation adds another layer of complexity. +Training data from one robot setup needs adaptation for deployment on different hardware, models trained with specific camera configurations must work with new arrangements, and datasets with different naming conventions need harmonization. + +**That's where processors come in.** They serve as universal translators that bridge these gaps, ensuring seamless data flow from sensors to models to actuators. +Processors handle all the preprocessing and postprocessing steps needed to convert raw environment data into model-ready inputs and vice versa. + +This means that your favorite policy can be used like this: + +```python +import torch + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.policies.factory import make_pre_post_processors +from lerobot.policies.your_policy import YourPolicy +from lerobot.processor.pipeline import RobotProcessorPipeline, PolicyProcessorPipeline +dataset = LeRobotDataset("hf_user/dataset", episodes=[0]) +sample = dataset[10] + +model = YourPolicy.from_pretrained( + "hf_user/model", +) +model.eval() +model.to("cuda") +preprocessor, postprocessor = make_pre_post_processors(model.config, pretrained_path="hf_user/model", dataset_stats=dataset.meta.stats) + +preprocessed_sample = preprocessor(sample) +action = model.select_action(preprocessed_sample) +postprocessed_action = postprocessor(action) +``` + +## What are Processors? + +In robotics, data comes in many forms: images from cameras, joint positions from sensors, text instructions from users, and more. Each type of data requires specific transformations before a model can use it effectively. Models need this data to be: + +- **Normalized**: Scaled to appropriate ranges for neural network processing +- **Batched**: Organized with proper dimensions for batch processing +- **Tokenized**: Text converted to numerical representations +- **Device-placed**: Moved to the right hardware (CPU/GPU) +- **Type-converted**: Cast to appropriate data types + +Processors handle these transformations through composable, reusable steps that can be chained together into pipelines. Think of them as a modular assembly line where each station performs a specific transformation on your data. + +## Core Concepts + +### EnvTransition: The Universal Data Container + +The `EnvTransition` is the fundamental data structure that flows through all processors. +It's a typed dictionary that represents a complete robot-environment interaction: + +- **OBSERVATION**: All sensor data (images, states, proprioception) +- **ACTION**: The action to execute or that was executed +- **REWARD**: Reinforcement learning signal +- **DONE/TRUNCATED**: Episode boundary indicators +- **INFO**: Arbitrary metadata +- **COMPLEMENTARY_DATA**: Task descriptions, indices, padding flags, inter-step data + +### ProcessorStep: The Building Block + +A `ProcessorStep` is a single transformation unit that processes transitions. It's an abstract base class with two required methods: + +```python +from lerobot.processor import ProcessorStep, EnvTransition + +class MyProcessorStep(ProcessorStep): + """Example processor step - inherit and implement abstract methods.""" + + def __call__(self, transition: EnvTransition) -> EnvTransition: + """Transform the transition - REQUIRED abstract method.""" + # Your processing logic here + return transition + + def transform_features(self, features): + """Declare how this step transforms feature shapes/types - REQUIRED abstract method.""" + return features # Most processors return features unchanged +``` + +`__call__` is the core of your processor step. It takes an `EnvTransition` and returns a modified `EnvTransition`. + +`transform_features` is used to declare how this step transforms feature shapes/types. + +### DataProcessorPipeline: The Generic Orchestrator + +The `DataProcessorPipeline[TInput, TOutput]` chains multiple `ProcessorStep` instances together: + +```python +from lerobot.processor import RobotProcessorPipeline, PolicyProcessorPipeline + +# For robot hardware (unbatched data) +robot_processor = RobotProcessorPipeline[RobotAction, RobotAction]( + steps=[step1, step2, step3], + name="robot_pipeline" +) + +# For model training/inference (batched data) +policy_processor = PolicyProcessorPipeline[dict[str, Any], dict[str, Any]]( + steps=[step1, step2, step3], + name="policy_pipeline" +) +``` + +## RobotProcessorPipeline vs PolicyProcessorPipeline + +The key distinction is in the data structures they handle: + +| Aspect | RobotProcessorPipeline | PolicyProcessorPipeline | +| --------------- | -------------------------------------------- | ---------------------------------------- | +| **Input** | `dict[str, Any]` - Individual robot values | `dict[str, Any]` - Batched tensors | +| **Output** | `dict[str, Any]` - Individual robot commands | `torch.Tensor` - Policy predictions | +| **Use Case** | Real-time robot control | Model training/inference | +| **Data Format** | Unbatched, heterogeneous | Batched, homogeneous | +| **Examples** | `{"joint_1": 0.5}` | `{"observation.state": tensor([[0.5]])}` | + +**Use `RobotProcessorPipeline`** for robot hardware interfaces: + +```python +# Robot data structures: dict[str, Any] for observations and actions +robot_obs: dict[str, Any] = { + "joint_1": 0.5, # Individual joint values + "joint_2": -0.3, + "camera_0": image_array # Raw camera data +} + +robot_action: dict[str, Any] = { + "joint_1": 0.2, # Target joint positions + "joint_2": 0.1, + "gripper": 0.8 +} +``` + +**Use `PolicyProcessorPipeline`** for model training and batch processing: + +```python +# Policy data structures: batch dicts and tensors +policy_batch: dict[str, Any] = { + "observation.state": torch.tensor([[0.5, -0.3]]), # Batched states + "observation.images.camera0": torch.tensor(...), # Batched images + "action": torch.tensor([[0.2, 0.1, 0.8]]) # Batched actions +} + +policy_action: torch.Tensor = torch.tensor([[0.2, 0.1, 0.8]]) # Model output tensor +``` + +## Converter Functions + +LeRobot provides converter functions to bridge different data formats in `lerobot.processor.converters`. These functions handle the crucial translations between robot hardware data structures, policy model formats, and the internal `EnvTransition` representation that flows through processor pipelines. + +| Category | Function | Description | +| ------------------------------ | ----------------------------- | ------------------------------- | +| **Robot Hardware Converters** | `robot_action_to_transition` | Robot dict → EnvTransition | +| | `observation_to_transition` | Robot obs → EnvTransition | +| | `transition_to_robot_action` | EnvTransition → Robot dict | +| **Policy/Training Converters** | `batch_to_transition` | Batch dict → EnvTransition | +| | `transition_to_batch` | EnvTransition → Batch dict | +| | `policy_action_to_transition` | Policy tensor → EnvTransition | +| | `transition_to_policy_action` | EnvTransition → Policy tensor | +| **Utilities** | `create_transition` | Build transitions with defaults | +| | `identity_transition` | Pass-through converter | + +The key insight is that **robot hardware converters** work with individual values and dictionaries, while **policy/training converters** work with batched tensors and model outputs. The converter functions automatically handle the structural differences, so your processor steps can focus on the core transformations without worrying about data format compatibility. + +## Processor Examples + +The following examples demonstrate real-world processor configurations for policy training and inference. + +Here is an example processor for policy training and inference: + +```python +# Training data preprocessing (optimized order for GPU performance) +training_preprocessor = PolicyProcessorPipeline[dict[str, Any], dict[str, Any]]( + steps=[ + RenameObservationsProcessorStep(rename_map={}), # Standardize keys + AddBatchDimensionProcessorStep(), # Add batch dims + TokenizerProcessorStep(tokenizer_name="...", ...), # Tokenize language + DeviceProcessorStep(device="cuda"), # Move to GPU first + NormalizerProcessorStep(features=..., stats=...), # Normalize on GPU + ] +) + +# Model output postprocessing +training_postprocessor = PolicyProcessorPipeline[torch.Tensor, torch.Tensor]( + steps=[ + DeviceProcessorStep(device="cpu"), # Move to CPU + UnnormalizerProcessorStep(features=..., stats=...), # Denormalize + ] + to_transition=policy_action_to_transition, + to_output=transition_to_policy_action, +) +``` + +### An interaction between a robot and a policy with processors + +The most common real-world scenario combines both pipeline types robot hardware generates observations that need policy processing, and policy outputs need robot-compatible postprocessing: + +```python +# Real deployment: Robot sensors → Model → Robot commands +with torch.no_grad(): + while not done: + raw_obs = robot.get_observation() # dict[str, Any] + + # Add your robot observation to policy observation processor + + policy_input = policy_preprocessor(raw_obs) # Batched dict + + policy_output = policy.select_action(policy_input) # Policy tensor + + policy_action = policy_postprocessor(policy_output) + + # Add your robot action to policy action processor + + robot.send_action(policy_action) +``` + +## Feature Contracts: Shape and Type Transformation + +Processors don't just transform data - they can also **change the data structure itself**. The `transform_features()` method declares these changes, which is crucial for dataset recording and policy creation. + +### Why Feature Contracts Matter + +When building datasets or policies, LeRobot needs to know: + +- **What data fields will exist** after processing +- **What shapes and types** each field will have +- **How to configure models** for the expected data structure + +```python +# Example: A processor that adds velocity to observations +class VelocityProcessor(ObservationProcessorStep): + def observation(self, obs): + new_obs = obs.copy() + if "observation.state" in obs: + # concatenate computed velocity field to the state + new_obs["observation.state"] = self._compute_velocity(obs["observation.state"]) + return new_obs + + def transform_features(self, features): + """Declare the new velocity field we're adding.""" + state_feature = features[PipelineFeatureType.OBSERVATION].get("observation.state") + if state_feature: + double_shape = (state_feature.shape[0] * 2,) if state_feature.shape else (2,) + features[PipelineFeatureType.OBSERVATION]["observation.state"] = PolicyFeature( + type=FeatureType.STATE, shape=double_shape + ) + return features +``` + +### Feature Specification Functions + +`create_initial_features()` and `aggregate_pipeline_dataset_features()` solve a critical dataset creation problem: determining the exact final data structure before any data is processed. +Since processor pipelines can add new features (like velocity fields), change tensor shapes (like cropping images), or rename keys, datasets need to know the complete output specification upfront to allocate proper storage and define schemas. +These functions work together by starting with robot hardware specifications (`create_initial_features()`) then simulating the entire pipeline transformation (`aggregate_pipeline_dataset_features()`) to compute the final feature dictionary that gets passed to `LeRobotDataset.create()`, ensuring perfect alignment between what processors output and what datasets expect to store. + +```python +from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features + +# Start with robot's raw features +initial_features = create_initial_features( + observation=robot.observation_features, # {"joint_1.pos": float, "camera_0": (480,640,3)} + action=robot.action_features # {"joint_1.pos": float, "gripper.pos": float} +) + +# Apply processor pipeline to compute final features +final_features = aggregate_pipeline_dataset_features( + pipeline=my_processor_pipeline, + initial_features=initial_features, + use_videos=True +) + +# Use for dataset creation +dataset = LeRobotDataset.create( + repo_id="my_dataset", + features=final_features, # Knows exactly what data to expect + ... +) +``` + +## Common Processor Steps + +LeRobot provides many registered processor steps. Here are the most commonly used core processors: + +### Essential Processors + +- **`normalizer_processor`**: Normalize observations/actions using dataset statistics (mean/std or min/max) +- **`device_processor`**: Move tensors to CPU/GPU with optional dtype conversion +- **`to_batch_processor`**: Add batch dimensions to transitions for model compatibility +- **`rename_observations_processor`**: Rename observation keys using mapping dictionaries +- **`tokenizer_processor`**: Tokenize natural language task descriptions into tokens and attention masks + +### Next Steps + +- **[Implement Your Own Processor](./implement_your_own_processor)** - Create custom processor steps +- **[Debug Your Pipeline](./debug_processor_pipeline)** - Troubleshoot and optimize pipelines +- **[Processors for Robots and Teleoperators](./processors_robots_teleop)** - Real-world integration patterns + +## Summary + +Processors solve the data translation problem in robotics by providing: + +- **Modular transformations**: Composable, reusable processing steps +- **Type safety**: Generic pipelines with compile-time checking +- **Performance optimization**: GPU-accelerated operations +- **Robot/Policy distinction**: Separate pipelines for different data structures +- **Comprehensive ecosystem**: 30+ registered processors for common tasks + +The key insight: `RobotProcessorPipeline` handles unbatched robot hardware data, while `PolicyProcessorPipeline` handles batched model data. Choose the right tool for your data structure! diff --git a/docs/source/koch.mdx b/docs/source/koch.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b31e0b100007f2eb70c428f5ca0e4017b769d725 --- /dev/null +++ b/docs/source/koch.mdx @@ -0,0 +1,283 @@ +# Koch v1.1 + +In the steps below, we explain how to assemble the Koch v1.1 robot. + +## Order and assemble the parts + +Follow the sourcing and assembling instructions provided in this [README](https://github.com/jess-moss/koch-v1-1). This will guide you through setting up both the follower and leader arms, as shown in the image below. + +For a visual walkthrough of the assembly process, you can refer to [this video tutorial](https://youtu.be/8nQIg9BwwTk). + +> [!WARNING] +> Since the production of this video, we simplified the configuration phase. Because of this, two things differ from the instructions in that video: +> +> - Don't plug in all the motor cables right away and wait to be instructed to do so in [Configure the motors](#configure-the-motors). +> - Don't screw in the controller board (PCB) to the base right away and wait for being instructed to do so in [Configure the motors](#configure-the-motors). + +## Install LeRobot 🤗 + +To install LeRobot follow, our [Installation Guide](./installation) + +In addition to these instructions, you need to install the Dynamixel SDK: + +```bash +pip install -e ".[dynamixel]" +``` + +## Configure the motors + +### 1. Find the USB ports associated with each arm + +To find the port for each bus servo adapter, run this script: + +```bash +lerobot-find-port +``` + + + + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the USB cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/tty.usbmodem575E0032081 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your leader or follower arm. + + + + +On Linux, you might need to give access to the USB ports by running: + +```bash +sudo chmod 666 /dev/ttyACM0 +sudo chmod 666 /dev/ttyACM1 +``` + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/ttyACM0', '/dev/ttyACM1'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/ttyACM1 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/ttyACM1` corresponding to your leader or follower arm. + + + + +### 2. Set the motors ids and baudrates + +Each motor is identified by a unique id on the bus. When brand new, motors usually come with a default id of `1`. For the communication to work properly between the motors and the controller, we first need to set a unique, different id to each motor. Additionally, the speed at which data is transmitted on the bus is determined by the baudrate. In order to talk to each other, the controller and all the motors need to be configured with the same baudrate. + +To that end, we first need to connect to each motor individually with the controller in order to set these. Since we will write these parameters in the non-volatile section of the motors' internal memory (EEPROM), we'll only need to do this once. + +If you are repurposing motors from another robot, you will probably also need to perform this step, as the ids and baudrate likely won't match. + +#### Follower + +Connect the usb cable from your computer and the 5V power supply to the follower arm's controller board. Then, run the following command or run the API example with the port you got from the previous step. You'll also need to give your leader arm a name with the `id` parameter. + +For a visual reference on how to set the motor ids please refer to [this video](https://huggingface.co/docs/lerobot/en/so101#setup-motors-video) where we follow the process for the SO101 arm. + + + + +```bash +lerobot-setup-motors \ + --robot.type=koch_follower \ + --robot.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step +``` + + + + + +```python +from lerobot.robots.koch_follower import KochFollower, KochFollowerConfig + +config = KochFollowerConfig( + port="/dev/tty.usbmodem575E0031751", + id="my_awesome_follower_arm", +) +follower = KochFollower(config) +follower.setup_motors() +``` + + + + + +You should see the following instruction. + +``` +Connect the controller board to the 'gripper' motor only and press enter. +``` + +As instructed, plug the gripper's motor. Make sure it's the only motor connected to the board, and that the motor itself is not yet daisy-chained to any other motor. As you press `[Enter]`, the script will automatically set the id and baudrate for that motor. + +
+Troubleshooting + +If you get an error at that point, check your cables and make sure they are plugged in properly: + +
    +
  • Power supply
  • +
  • USB cable between your computer and the controller board
  • +
  • The 3-pin cable from the controller board to the motor
  • +
+ +If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB). + +
+ +You should then see the following message: + +``` +'gripper' motor id set to 6 +``` + +Followed by the next instruction: + +``` +Connect the controller board to the 'wrist_roll' motor only and press enter. +``` + +You can disconnect the 3-pin cable from the controller board but you can leave it connected to the gripper motor on the other end as it will already be in the right place. Now, plug in another 3-pin cable to the wrist roll motor and connect it to the controller board. As with the previous motor, make sure it is the only motor connected to the board and that the motor itself isn't connected to any other one. + +Repeat the operation for each motor as instructed. + +> [!TIP] +> Check your cabling at each step before pressing Enter. For instance, the power supply cable might disconnect as you manipulate the board. + +When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm. + +#### Leader + +Do the same steps for the leader arm but modify the command or script accordingly. + + + + +```bash +lerobot-setup-motors \ + --teleop.type=koch_leader \ + --teleop.port=/dev/tty.usbmodem575E0031751 \ # <- paste here the port found at previous step +``` + + + + + +```python +from lerobot.teleoperators.koch_leader import KochLeader, KochLeaderConfig + +config = KochLeaderConfig( + port="/dev/tty.usbmodem575E0031751", + id="my_awesome_leader_arm", +) +leader = KochLeader(config) +leader.setup_motors() +``` + + + + + +## Calibrate + +Next, you'll need to calibrate your robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. +The calibration process is very important because it allows a neural network trained on one robot to work on another. + +#### Follower + +Run the following command or API example to calibrate the follower arm: + + + + +```bash +lerobot-calibrate \ + --robot.type=koch_follower \ + --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot + --robot.id=my_awesome_follower_arm # <- Give the robot a unique name +``` + + + + + +```python +from lerobot.robots.koch_follower import KochFollowerConfig, KochFollower + +config = KochFollowerConfig( + port="/dev/tty.usbmodem585A0076891", + id="my_awesome_follower_arm", +) + +follower = KochFollower(config) +follower.connect(calibrate=False) +follower.calibrate() +follower.disconnect() +``` + + + + + +We unified the calibration method for most robots. Thus, the calibration steps for this Koch arm are the same as the steps for the SO100 and SO101. First, we have to move the robot to the position where each joint is in the middle of its range, then we press `Enter`. Secondly, we move all joints through their full range of motion. A video of this same process for the SO101 as reference can be found [here](https://huggingface.co/docs/lerobot/en/so101#calibration-video). + +#### Leader + +Do the same steps to calibrate the leader arm, run the following command or API example: + + + + +```bash +lerobot-calibrate \ + --teleop.type=koch_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot + --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name +``` + + + + + +```python +from lerobot.teleoperators.koch_leader import KochLeaderConfig, KochLeader + +config = KochLeaderConfig( + port="/dev/tty.usbmodem575E0031751", + id="my_awesome_leader_arm", +) + +leader = KochLeader(config) +leader.connect(calibrate=False) +leader.calibrate() +leader.disconnect() +``` + + + + + +Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./il_robots) + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). diff --git a/docs/source/lekiwi.mdx b/docs/source/lekiwi.mdx new file mode 100644 index 0000000000000000000000000000000000000000..0a12e4c10f65381711cd3c9bb42ae52288c4921b --- /dev/null +++ b/docs/source/lekiwi.mdx @@ -0,0 +1,337 @@ +# LeKiwi + +In the steps below, we explain how to assemble the LeKiwi mobile robot. + +## Source the parts + +Follow this [README](https://github.com/SIGRobotics-UIUC/LeKiwi). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts. +And advise if it's your first time printing or if you don't own a 3D printer. + +### Wired version + +If you have the **wired** LeKiwi version, you can skip the installation of the Raspberry Pi and setting up SSH. You can also run all commands directly on your PC for both the LeKiwi scripts and the leader arm scripts for teleoperating. + +## Install software on Pi + +Now we have to set up the remote PC that will run on the LeKiwi Robot. This is normally a Raspberry Pi, but can be any PC that can run on 5V and has enough usb ports (2 or more) for the cameras and motor control board. + +### Install OS + +For setting up the Raspberry Pi and its SD-card see: [Setup PI](https://www.raspberrypi.com/documentation/computers/getting-started.html). Here is explained how to download the [Imager](https://www.raspberrypi.com/software/) to install Raspberry Pi OS or Ubuntu. + +### Setup SSH + +After setting up your Pi, you should enable and set up [SSH](https://www.raspberrypi.com/news/coding-on-raspberry-pi-remotely-with-visual-studio-code/) (Secure Shell Protocol) so you can log in to the Pi from your laptop without requiring a screen, keyboard, and mouse on the Pi. A great tutorial on how to do this can be found [here](https://www.raspberrypi.com/documentation/computers/remote-access.html#ssh). Logging into your Pi can be done in your Command Prompt (cmd) or, if you use VSCode you can use [this](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-ssh) extension. + +### Install LeRobot on Pi 🤗 + +On your Raspberry Pi install LeRobot using our [Installation Guide](./installation) + +In addition to these instructions, you need to install the Feetech SDK & ZeroMQ on your Pi: + +```bash +pip install -e ".[lekiwi]" +``` + +## Install LeRobot locally + +If you already have installed LeRobot on your laptop/pc you can skip this step; otherwise, please follow along as we do the same steps we did on the Pi. + +Follow our [Installation Guide](./installation) + +In addition to these instructions, you need to install the Feetech SDK & ZeroMQ on your laptop/pc: + +```bash +pip install -e ".[lekiwi]" +``` + +Great :hugs:! You are now done installing LeRobot, and we can begin assembling the SO100/SO101 arms and the mobile base :robot:. +Every time you now want to use LeRobot, you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands. + +# Step-by-Step Assembly Instructions + +First, we will assemble the two SO100/SO101 arms. One to attach to the mobile base and one for teleoperation. Then we will assemble the mobile base. The instructions for assembling can be found on these two pages: + +- [Assemble SO101](./so101#step-by-step-assembly-instructions) +- [Assemble LeKiwi](https://github.com/SIGRobotics-UIUC/LeKiwi/blob/main/Assembly.md) + +### Find the USB ports associated with motor board + +To find the port for each bus servo adapter, run this script: + +```bash +lerobot-find-port +``` + + + + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081'] +Remove the USB cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/tty.usbmodem575E0032081 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your board. + + + + +On Linux, you might need to give access to the USB ports by running: + +```bash +sudo chmod 666 /dev/ttyACM0 +sudo chmod 666 /dev/ttyACM1 +``` + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/ttyACM0'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/ttyACM0 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/ttyACM0` corresponding to your board. + + + + +### Configure motors + +The instructions for configuring the motors can be found in the SO101 [docs](./so101#configure-the-motors). Besides the ids for the arm motors, we also need to set the motor ids for the mobile base. These need to be in a specific order to work. Below an image of the motor ids and motor mounting positions for the mobile base. Note that we only use one Motor Control board on LeKiwi. This means the motor ids for the wheels are 7, 8 and 9. + +You can run this command to setup motors for LeKiwi. It will first setup the motors for arm (id 6..1) and then setup motors for wheels (9,8,7) + +```bash +lerobot-setup-motors \ + --robot.type=lekiwi \ + --robot.port=/dev/tty.usbmodem58760431551 # <- paste here the port found at previous step +``` + +Motor ID's for mobile robot + +### Troubleshoot communication + +If you are having trouble connecting to the Mobile SO100, follow these steps to diagnose and resolve the issue. + +#### 1. Verify IP Address Configuration + +Make sure that the correct IP for the Pi is used in the commands or in your code. To check the Raspberry Pi's IP address, run (on the Pi command line): + +```bash +hostname -I +``` + +#### 2. Check if Pi is reachable from laptop/pc + +Try pinging the Raspberry Pi from your laptop: + +```bach +ping +``` + +If the ping fails: + +- Ensure the Pi is powered on and connected to the same network. +- Check if SSH is enabled on the Pi. + +#### 3. Try SSH connection + +If you can't SSH into the Pi, it might not be properly connected. Use: + +```bash +ssh @ +``` + +If you get a connection error: + +- Ensure SSH is enabled on the Pi by running: + ```bash + sudo raspi-config + ``` + Then navigate to: **Interfacing Options -> SSH** and enable it. + +### Calibration + +Now we have to calibrate the leader arm and the follower arm. The wheel motors don't have to be calibrated. +The calibration process is very important because it allows a neural network trained on one robot to work on another. + +### Calibrate follower arm (on mobile base) + +Make sure the arm is connected to the Raspberry Pi and run this script or API example (on the Raspberry Pi via SSH) to launch calibration of the follower arm: + +```bash +lerobot-calibrate \ + --robot.type=lekiwi \ + --robot.id=my_awesome_kiwi # <- Give the robot a unique name +``` + +We unified the calibration method for most robots, thus, the calibration steps for this SO100 arm are the same as the steps for the Koch and SO101. First, we have to move the robot to the position where each joint is in the middle of its range, then we press `Enter`. Secondly, we move all joints through their full range of motion. A video of this same process for the SO101 as reference can be found [here](https://huggingface.co/docs/lerobot/en/so101#calibration-video). + +### Wired version + +If you have the **wired** LeKiwi version, please run all commands on your laptop. + +### Calibrate leader arm + +Then, to calibrate the leader arm (which is attached to the laptop/pc). Run the following command of API example on your laptop: + + + + +```bash +lerobot-calibrate \ + --teleop.type=so100_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot + --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name +``` + + + + + +```python +from lerobot.teleoperators.so100_leader import SO100LeaderConfig, SO100Leader + +config = SO100LeaderConfig( + port="/dev/tty.usbmodem58760431551", + id="my_awesome_leader_arm", +) + +leader = SO100Leader(config) +leader.connect(calibrate=False) +leader.calibrate() +leader.disconnect() +``` + + + + + +## Teleoperate LeKiwi + +> [!TIP] +> If you're using a Mac, you might need to give Terminal permission to access your keyboard for teleoperation. Go to System Preferences > Security & Privacy > Input Monitoring and check the box for Terminal. + +To teleoperate, SSH into your Raspberry Pi, and run `conda activate lerobot` and this command: + +```bash +python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi +``` + +Then on your laptop, also run `conda activate lerobot` and run the API example, make sure you set the correct `remote_ip` and `port` in `examples/lekiwi/teleoperate.py`. + +```bash +python examples/lekiwi/teleoperate.py +``` + +You should see on your laptop something like this: `[INFO] Connected to remote robot at tcp://172.17.133.91:5555 and video stream at tcp://172.17.133.91:5556.` Now you can move the leader arm and use the keyboard (w,a,s,d) to drive forward, left, backwards, right. And use (z,x) to turn left or turn right. You can use (r,f) to increase and decrease the speed of the mobile robot. There are three speed modes, see the table below: + +| Speed Mode | Linear Speed (m/s) | Rotation Speed (deg/s) | +| ---------- | ------------------ | ---------------------- | +| Fast | 0.4 | 90 | +| Medium | 0.25 | 60 | +| Slow | 0.1 | 30 | + +| Key | Action | +| --- | -------------- | +| W | Move forward | +| A | Move left | +| S | Move backward | +| D | Move right | +| Z | Turn left | +| X | Turn right | +| R | Increase speed | +| F | Decrease speed | + +> [!TIP] +> If you use a different keyboard, you can change the keys for each command in the [`LeKiwiClientConfig`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/robots/lekiwi/config_lekiwi.py). + +### Wired version + +If you have the **wired** LeKiwi version, please run all commands on your laptop. + +## Record a dataset + +Once you're familiar with teleoperation, you can record your first dataset. + +We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens). + +Add your token to the CLI by running this command: + +```bash +huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential +``` + +Then store your Hugging Face repository name in a variable: + +```bash +HF_USER=$(huggingface-cli whoami | head -n 1) +echo $HF_USER +``` + +Now you can record a dataset. To record episodes and upload your dataset to the hub, execute this API example tailored for LeKiwi. Make sure to first adapt the `remote_ip`, `repo_id`, `port` and `task` in the script. If you would like to run the script for longer you can increase `NB_CYCLES_CLIENT_CONNECTION`. + +```bash +python examples/lekiwi/record.py +``` + +#### Dataset upload + +Locally, your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}`. At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running: + +```bash +echo https://huggingface.co/datasets/${HF_USER}/so101_test +``` + +Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example). + +You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot). + +#### Tips for gathering data + +Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images. + +In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions. + +Avoid adding too much variation too quickly, as it may hinder your results. + +If you want to dive deeper into this important topic, you can check out the [blog post](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset) we wrote on what makes a good dataset. + +#### Troubleshooting: + +- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux). + +## Replay an episode + +To replay an episode run the API example below, make sure to change `remote_ip`, `port`, LeRobotDatasetId and episode index. + +```bash +python examples/lekiwi/replay.py +``` + +Congrats 🎉, your robot is all set to learn a task on its own. Start training it by the training part of this tutorial: [Getting started with real-world robots](./il_robots) + +## Evaluate your policy + +To evaluate your policy run the `evaluate.py` API example, make sure to change `remote_ip`, `port`, model.. + +```bash +python examples/lekiwi/evaluate.py +``` + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). diff --git a/docs/source/lerobot-dataset-v3.mdx b/docs/source/lerobot-dataset-v3.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1071074381c35f062d139980948bf0621c8b26ed --- /dev/null +++ b/docs/source/lerobot-dataset-v3.mdx @@ -0,0 +1,314 @@ +# LeRobotDataset v3.0 + +`LeRobotDataset v3.0` is a standardized format for robot learning data. It provides unified access to multi-modal time-series data, sensorimotor signals and multi‑camera video, as well as rich metadata for indexing, search, and visualization on the Hugging Face Hub. + +This docs will guide you to: + +- Understand the v3.0 design and directory layout +- Record a dataset and push it to the Hub +- Load datasets for training with `LeRobotDataset` +- Stream datasets without downloading using `StreamingLeRobotDataset` +- Apply image transforms for data augmentation during training +- Migrate existing `v2.1` datasets to `v3.0` + +## What’s new in `v3` + +- **File-based storage**: Many episodes per Parquet/MP4 file (v2 used one file per episode). +- **Relational metadata**: Episode boundaries and lookups are resolved through metadata, not filenames. +- **Hub-native streaming**: Consume datasets directly from the Hub with `StreamingLeRobotDataset`. +- **Lower file-system pressure**: Fewer, larger files ⇒ faster initialization and fewer issues at scale. +- **Unified organization**: Clean directory layout with consistent path templates across data and videos. + +## Installation + +`LeRobotDataset v3.0` will be included in `lerobot >= 0.4.0`. + +Until that stable release, you can use the main branch by following the [build from source instructions](./installation#from-source). + +## Record a dataset + +Run the command below to record a dataset with the SO-101 and push to the Hub: + +```bash +lerobot-record \ + --robot.type=so101_follower \ + --robot.port=/dev/tty.usbmodem585A0076841 \ + --robot.id=my_awesome_follower_arm \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ + --teleop.type=so101_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ + --teleop.id=my_awesome_leader_arm \ + --display_data=true \ + --dataset.repo_id=${HF_USER}/record-test \ + --dataset.num_episodes=5 \ + --dataset.single_task="Grab the black cube" +``` + +See the [recording guide](./il_robots#record-a-dataset) for more details. + +## Format design + +A core v3 principle is **decoupling storage from the user API**: data is stored efficiently (few large files), while the public API exposes intuitive episode-level access. + +`v3` has three pillars: + +1. **Tabular data**: Low‑dimensional, high‑frequency signals (states, actions, timestamps) stored in **Apache Parquet**. Access is memory‑mapped or streamed via the `datasets` stack. +2. **Visual data**: Camera frames concatenated and encoded into **MP4**. Frames from the same episode are grouped; videos are sharded per camera for practical sizes. +3. **Metadata**: JSON/Parquet records describing schema (feature names, dtypes, shapes), frame rates, normalization stats, and **episode segmentation** (start/end offsets into shared Parquet/MP4 files). + +> To scale to millions of episodes, tabular rows and video frames from multiple episodes are **concatenated** into larger files. Episode‑specific views are reconstructed **via metadata**, not file boundaries. + +
+
+ LeRobotDataset v3 diagram +
+ From episode‑based to file‑based datasets +
+
+
+ +### Directory layout (simplified) + +- **`meta/info.json`**: canonical schema (features, shapes/dtypes), FPS, codebase version, and **path templates** to locate data/video shards. +- **`meta/stats.json`**: global feature statistics (mean/std/min/max) used for normalization; exposed as `dataset.meta.stats`. +- **`meta/tasks.jsonl`**: natural‑language task descriptions mapped to integer IDs for task‑conditioned policies. +- **`meta/episodes/`**: per‑episode records (lengths, tasks, offsets) stored as **chunked Parquet** for scalability. +- **`data/`**: frame‑by‑frame **Parquet** shards; each file typically contains **many episodes**. +- **`videos/`**: **MP4** shards per camera; each file typically contains **many episodes**. + +## Load a dataset for training + +`LeRobotDataset` returns Python dictionaries of PyTorch tensors and integrates with `torch.utils.data.DataLoader`. Here is a code example showing its use: + +```python +import torch +from lerobot.datasets.lerobot_dataset import LeRobotDataset + +repo_id = "yaak-ai/L2D-v3" + +# 1) Load from the Hub (cached locally) +dataset = LeRobotDataset(repo_id) + +# 2) Random access by index +sample = dataset[100] +print(sample) +# { +# 'observation.state': tensor([...]), +# 'action': tensor([...]), +# 'observation.images.front_left': tensor([C, H, W]), +# 'timestamp': tensor(1.234), +# ... +# } + +# 3) Temporal windows via delta_timestamps (seconds relative to t) +delta_timestamps = { + "observation.images.front_left": [-0.2, -0.1, 0.0] # 0.2s and 0.1s before current frame +} + +dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps) + +# Accessing an index now returns a stack for the specified key(s) +sample = dataset[100] +print(sample["observation.images.front_left"].shape) # [T, C, H, W], where T=3 + +# 4) Wrap with a DataLoader for training +batch_size = 16 +data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size) + +device = "cuda" if torch.cuda.is_available() else "cpu" +for batch in data_loader: + observations = batch["observation.state"].to(device) + actions = batch["action"].to(device) + images = batch["observation.images.front_left"].to(device) + # model.forward(batch) +``` + +## Stream a dataset (no downloads) + +Use `StreamingLeRobotDataset` to iterate directly from the Hub without local copies. This allows to stream large datasets without the need to downloading them onto disk or loading them onto memory, and is a key feature of the new dataset format. + +```python +from lerobot.datasets.streaming_dataset import StreamingLeRobotDataset + +repo_id = "yaak-ai/L2D-v3" +dataset = StreamingLeRobotDataset(repo_id) # streams directly from the Hub +``` + +
+
+ StreamingLeRobotDataset +
+ Stream directly from the Hub for on‑the‑fly training. +
+
+
+ +## Image transforms + +Image transforms are data augmentations applied to camera frames during training to improve model robustness and generalization. LeRobot supports various transforms including brightness, contrast, saturation, hue, and sharpness adjustments. + +### Using transforms during dataset creation/recording + +Currently, transforms are applied during **training time only**, not during recording. When you create or record a dataset, the raw images are stored without transforms. This allows you to experiment with different augmentations later without re-recording data. + +### Adding transforms to existing datasets (API) + +Use the `image_transforms` parameter when loading a dataset for training: + +```python +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.transforms import ImageTransforms, ImageTransformsConfig, ImageTransformConfig + +# Option 1: Use default transform configuration (disabled by default) +transforms_config = ImageTransformsConfig( + enable=True, # Enable transforms + max_num_transforms=3, # Apply up to 3 transforms per frame + random_order=False, # Apply in standard order +) +transforms = ImageTransforms(transforms_config) + +dataset = LeRobotDataset( + repo_id="your-username/your-dataset", + image_transforms=transforms +) + +# Option 2: Create custom transform configuration +custom_transforms_config = ImageTransformsConfig( + enable=True, + max_num_transforms=2, + random_order=True, + tfs={ + "brightness": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"brightness": (0.7, 1.3)} # Adjust brightness range + ), + "contrast": ImageTransformConfig( + weight=2.0, # Higher weight = more likely to be selected + type="ColorJitter", + kwargs={"contrast": (0.8, 1.2)} + ), + "sharpness": ImageTransformConfig( + weight=0.5, # Lower weight = less likely to be selected + type="SharpnessJitter", + kwargs={"sharpness": (0.3, 2.0)} + ), + } +) + +dataset = LeRobotDataset( + repo_id="your-username/your-dataset", + image_transforms=ImageTransforms(custom_transforms_config) +) + +# Option 3: Use pure torchvision transforms +from torchvision.transforms import v2 + +torchvision_transforms = v2.Compose([ + v2.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1), + v2.GaussianBlur(kernel_size=3, sigma=(0.1, 2.0)), +]) + +dataset = LeRobotDataset( + repo_id="your-username/your-dataset", + image_transforms=torchvision_transforms +) +``` + +### Available transform types + +LeRobot provides several transform types: + +- **`ColorJitter`**: Adjusts brightness, contrast, saturation, and hue +- **`SharpnessJitter`**: Randomly adjusts image sharpness +- **`Identity`**: No transformation (useful for testing) + +You can also use any `torchvision.transforms.v2` transform by passing it directly to the `image_transforms` parameter. + +### Configuration options + +- **`enable`**: Enable/disable transforms (default: `False`) +- **`max_num_transforms`**: Maximum number of transforms applied per frame (default: `3`) +- **`random_order`**: Apply transforms in random order vs. standard order (default: `False`) +- **`weight`**: Sampling probability for each transform (higher = more likely, if sum of weights is not 1, they will be normalized) +- **`kwargs`**: Transform-specific parameters (e.g., brightness range) + +### Visualizing transforms + +Use the visualization script to preview how transforms affect your data: + +```bash +lerobot-imgtransform-viz \ + --repo-id=your-username/your-dataset \ + --output-dir=./transform_examples \ + --n-examples=5 +``` + +This saves example images showing the effect of each transform, helping you tune parameters. + +### Best practices + +- **Start conservative**: Begin with small ranges (e.g., brightness 0.9-1.1) and increase gradually +- **Test first**: Use the visualization script to ensure transforms look reasonable +- **Monitor training**: Strong augmentations can hurt performance if too aggressive +- **Match your domain**: If your robot operates in varying lighting, use brightness/contrast transforms +- **Combine wisely**: Using too many transforms simultaneously can make training unstable + +## Migrate `v2.1` → `v3.0` + +A converter aggregates per‑episode files into larger shards and writes episode offsets/metadata. Convert your dataset using the instructions below. + +```bash +# Pre-release build with v3 support: +pip install "https://github.com/huggingface/lerobot/archive/33cad37054c2b594ceba57463e8f11ee374fa93c.zip" + +# Convert an existing v2.1 dataset hosted on the Hub: +python -m lerobot.datasets.v30.convert_dataset_v21_to_v30 --repo-id= +``` + +**What it does** + +- Aggregates parquet files: `episode-0000.parquet`, `episode-0001.parquet`, … → **`file-0000.parquet`**, … +- Aggregates mp4 files: `episode-0000.mp4`, `episode-0001.mp4`, … → **`file-0000.mp4`**, … +- Updates `meta/episodes/*` (chunked Parquet) with per‑episode lengths, tasks, and byte/frame offsets. + +## Common Issues + +### Always call `finalize()` before pushing + +When creating or recording datasets, you **must** call `dataset.finalize()` to properly close parquet writers. See the [PR #1903](https://github.com/huggingface/lerobot/pull/1903) for more details. + +```python +from lerobot.datasets.lerobot_dataset import LeRobotDataset + +# Create dataset and record episodes +dataset = LeRobotDataset.create(...) + +for episode in range(num_episodes): + # Record frames + for frame in episode_data: + dataset.add_frame(frame) + dataset.save_episode() + +# Call finalize() when done recording and before push_to_hub() +dataset.finalize() # Closes parquet writers, writes metadata footers +dataset.push_to_hub() +``` + +**Why is this necessary?** + +Dataset v3.0 uses incremental parquet writing with buffered metadata for efficiency. The `finalize()` method: + +- Flushes any buffered episode metadata to disk +- Closes parquet writers to write footer metadata, otherwise the parquet files will be corrupt +- Ensures the dataset is valid for loading + +Without calling `finalize()`, your parquet files will be incomplete and the dataset won't load properly. diff --git a/docs/source/libero.mdx b/docs/source/libero.mdx new file mode 100644 index 0000000000000000000000000000000000000000..bb5b98ed525343f01eb7ca0a09e4116c0e0cfdca --- /dev/null +++ b/docs/source/libero.mdx @@ -0,0 +1,166 @@ +# LIBERO + +**LIBERO** is a benchmark designed to study **lifelong robot learning**. The idea is that robots won’t just be pretrained once in a factory, they’ll need to keep learning and adapting with their human users over time. This ongoing adaptation is called **lifelong learning in decision making (LLDM)**, and it’s a key step toward building robots that become truly personalized helpers. + +- 📄 [LIBERO paper](https://arxiv.org/abs/2306.03310) +- 💻 [Original LIBERO repo](https://github.com/Lifelong-Robot-Learning/LIBERO) + +To make progress on this challenge, LIBERO provides a set of standardized tasks that focus on **knowledge transfer**: how well a robot can apply what it has already learned to new situations. By evaluating on LIBERO, different algorithms can be compared fairly and researchers can build on each other’s work. + +LIBERO includes **five task suites**: + +- **LIBERO-Spatial (`libero_spatial`)** – tasks that require reasoning about spatial relations. +- **LIBERO-Object (`libero_object`)** – tasks centered on manipulating different objects. +- **LIBERO-Goal (`libero_goal`)** – goal-conditioned tasks where the robot must adapt to changing targets. +- **LIBERO-90 (`libero_90`)** – 90 short-horizon tasks from the LIBERO-100 collection. +- **LIBERO-Long (`libero_10`)** – 10 long-horizon tasks from the LIBERO-100 collection. + +Together, these suites cover **130 tasks**, ranging from simple object manipulations to complex multi-step scenarios. LIBERO is meant to grow over time, and to serve as a shared benchmark where the community can test and improve lifelong learning algorithms. + +![An overview of the LIBERO benchmark](https://libero-project.github.io/assets/img/libero/fig1.png) + +## Evaluating with LIBERO + +At **LeRobot**, we ported [LIBERO](https://github.com/Lifelong-Robot-Learning/LIBERO) into our framework and used it mainly to **evaluate [SmolVLA](https://huggingface.co/docs/lerobot/en/smolvla)**, our lightweight Vision-Language-Action model. + +LIBERO is now part of our **multi-eval supported simulation**, meaning you can benchmark your policies either on a **single suite of tasks** or across **multiple suites at once** with just a flag. + +To Install LIBERO, after following LeRobot official instructions, just do: +`pip install -e ".[libero]"` + +### Single-suite evaluation + +Evaluate a policy on one LIBERO suite: + +```bash +lerobot-eval \ + --policy.path="your-policy-id" \ + --env.type=libero \ + --env.task=libero_object \ + --eval.batch_size=2 \ + --eval.n_episodes=3 +``` + +- `--env.task` picks the suite (`libero_object`, `libero_spatial`, etc.). +- `--eval.batch_size` controls how many environments run in parallel. +- `--eval.n_episodes` sets how many episodes to run in total. + +--- + +### Multi-suite evaluation + +Benchmark a policy across multiple suites at once: + +```bash +lerobot-eval \ + --policy.path="your-policy-id" \ + --env.type=libero \ + --env.task=libero_object,libero_spatial \ + --eval.batch_size=1 \ + --eval.n_episodes=2 +``` + +- Pass a comma-separated list to `--env.task` for multi-suite evaluation. + +### Policy inputs and outputs + +When using LIBERO through LeRobot, policies interact with the environment via **observations** and **actions**: + +- **Observations** + - `observation.state` – proprioceptive features (agent state). + - `observation.images.image` – main camera view (`agentview_image`). + - `observation.images.image2` – wrist camera view (`robot0_eye_in_hand_image`). + + ⚠️ **Note:** LeRobot enforces the `.images.*` prefix for any multi-modal visual features. Always ensure that your policy config `input_features` use the same naming keys, and that your dataset metadata keys follow this convention during evaluation. + If your data contains different keys, you must rename the observations to match what the policy expects, since naming keys are encoded inside the normalization statistics layer. + This will be fixed with the upcoming Pipeline PR. + +- **Actions** + - Continuous control values in a `Box(-1, 1, shape=(7,))` space. + +We also provide a notebook for quick testing: +Training with LIBERO + +## Training with LIBERO + +When training on LIBERO tasks, make sure your dataset parquet and metadata keys follow the LeRobot convention. + +The environment expects: + +- `observation.state` → 8-dim agent state +- `observation.images.image` → main camera (`agentview_image`) +- `observation.images.image2` → wrist camera (`robot0_eye_in_hand_image`) + +⚠️ Cleaning the dataset upfront is **cleaner and more efficient** than remapping keys inside the code. +To avoid potential mismatches and key errors, we provide a **preprocessed LIBERO dataset** that is fully compatible with the current LeRobot codebase and requires no additional manipulation: +👉 [HuggingFaceVLA/libero](https://huggingface.co/datasets/HuggingFaceVLA/libero) + +For reference, here is the **original dataset** published by Physical Intelligence: +👉 [physical-intelligence/libero](https://huggingface.co/datasets/physical-intelligence/libero) + +--- + +### Example training command + +```bash +lerobot-train \ + --policy.type=smolvla \ + --policy.repo_id=${HF_USER}/libero-test \ + --policy.load_vlm_weights=true \ + --dataset.repo_id=HuggingFaceVLA/libero \ + --env.type=libero \ + --env.task=libero_10 \ + --output_dir=./outputs/ \ + --steps=100000 \ + --batch_size=4 \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --eval_freq=1000 \ +``` + +--- + +### Note on rendering + +LeRobot uses MuJoCo for simulation. You need to set the rendering backend before training or evaluation: + +- `export MUJOCO_GL=egl` → for headless servers (e.g. HPC, cloud) + +## Reproducing π₀.₅ results + +We reproduce the results of π₀.₅ on the LIBERO benchmark using the LeRobot implementation. We take the Physical Intelligence LIBERO base model (`pi05_libero`) and finetune for an additional 6k steps in bfloat16, with batch size of 256 on 8 H100 GPUs using the [HuggingFace LIBERO dataset](https://huggingface.co/datasets/HuggingFaceVLA/libero). + +The finetuned model can be found here: + +- **π₀.₅ LIBERO**: [lerobot/pi05_libero_finetuned](https://huggingface.co/lerobot/pi05_libero_finetuned) + +We then evaluate the finetuned model using the LeRobot LIBERO implementation, by running the following command: + +```bash +lerobot-eval \ + --output_dir=/logs/ \ + --env.type=libero \ + --env.task=libero_spatial,libero_object,libero_goal,libero_10 \ + --eval.batch_size=1 \ + --eval.n_episodes=10 \ + --policy.path=pi05_libero_finetuned \ + --policy.n_action_steps=10 \ + --output_dir=./eval_logs/ \ + --env.max_parallel_tasks=1 +``` + +**Note:** We set `n_action_steps=10`, similar to the original OpenPI implementation. + +### Results + +We obtain the following results on the LIBERO benchmark: + +| Model | LIBERO Spatial | LIBERO Object | LIBERO Goal | LIBERO 10 | Average | +| -------- | -------------- | ------------- | ----------- | --------- | -------- | +| **π₀.₅** | 97.0 | 99.0 | 98.0 | 96.0 | **97.5** | + +These results are consistent with the original [results](https://github.com/Physical-Intelligence/openpi/tree/main/examples/libero#results) reported by Physical Intelligence: + +| Model | LIBERO Spatial | LIBERO Object | LIBERO Goal | LIBERO 10 | Average | +| -------- | -------------- | ------------- | ----------- | --------- | --------- | +| **π₀.₅** | 98.8 | 98.2 | 98.0 | 92.4 | **96.85** | diff --git a/docs/source/metaworld.mdx b/docs/source/metaworld.mdx new file mode 100644 index 0000000000000000000000000000000000000000..205cd6db44e6e7e925fc834c7099f51d8175d9ce --- /dev/null +++ b/docs/source/metaworld.mdx @@ -0,0 +1,80 @@ +# Meta-World + +Meta-World is a well-designed, open-source simulation benchmark for multi-task and meta reinforcement learning in continuous-control robotic manipulation. It gives researchers a shared, realistic playground to test whether algorithms can _learn many different tasks_ and _generalize quickly to new ones_ — two central challenges for real-world robotics. + +- 📄 [MetaWorld paper](https://arxiv.org/pdf/1910.10897) +- 💻 [Original MetaWorld repo](https://github.com/Farama-Foundation/Metaworld) + +![MetaWorld MT10 demo](https://meta-world.github.io/figures/ml45.gif) + +## Why Meta-World matters + +- **Diverse, realistic tasks.** Meta-World bundles a large suite of simulated manipulation tasks (50 in the MT50 suite) using everyday objects and a common tabletop Sawyer arm. This diversity exposes algorithms to a wide variety of dynamics, contacts and goal specifications while keeping a consistent control and observation structure. +- **Focus on generalization and multi-task learning.** By evaluating across task distributions that share structure but differ in goals and objects, Meta-World reveals whether an agent truly learns transferable skills rather than overfitting to a narrow task. +- **Standardized evaluation protocol.** It provides clear evaluation modes and difficulty splits, so different methods can be compared fairly across easy, medium, hard and very-hard regimes. +- **Empirical insight.** Past evaluations on Meta-World show impressive progress on some fronts, but also highlight that current multi-task and meta-RL methods still struggle with large, diverse task sets. That gap points to important research directions. + +## What it enables in LeRobot + +In LeRobot, you can evaluate any policy or vision-language-action (VLA) model on Meta-World tasks and get a clear success-rate measure. The integration is designed to be straightforward: + +- We provide a LeRobot-ready dataset for Meta-World (MT50) on the HF Hub: `https://huggingface.co/datasets/lerobot/metaworld_mt50`. + - This dataset is formatted for the MT50 evaluation that uses all 50 tasks (the most challenging multi-task setting). + - MT50 gives the policy a one-hot task vector and uses fixed object/goal positions for consistency. + +- Task descriptions and the exact keys required for evaluation are available in the repo/dataset — use these to ensure your policy outputs the right success signals. + +## Quick start, train a SmolVLA policy on Meta-World + +Example command to train a SmolVLA policy on a subset of tasks: + +```bash +lerobot-train \ + --policy.type=smolvla \ + --policy.repo_id=${HF_USER}/metaworld-test \ + --policy.load_vlm_weights=true \ + --dataset.repo_id=lerobot/metaworld_mt50 \ + --env.type=metaworld \ + --env.task=assembly-v3,dial-turn-v3,handle-press-side-v3 \ + --output_dir=./outputs/ \ + --steps=100000 \ + --batch_size=4 \ + --eval.batch_size=1 \ + --eval.n_episodes=1 \ + --eval_freq=1000 +``` + +Notes: + +- `--env.task` accepts explicit task lists (comma separated) or difficulty groups (e.g., `env.task="hard"`). +- Adjust `batch_size`, `steps`, and `eval_freq` to match your compute budget. +- **Gymnasium Assertion Error**: if you encounter an error like + `AssertionError: ['human', 'rgb_array', 'depth_array']` when running MetaWorld environments, this comes from a mismatch between MetaWorld and your Gymnasium version. + We recommend using: + +```bash + pip install "gymnasium==1.1.0" +``` + +to ensure proper compatibility. + +## Quick start — evaluate a trained policy + +To evaluate a trained policy on the Meta-World medium difficulty split: + +```bash +lerobot-eval \ + --policy.path="your-policy-id" \ + --env.type=metaworld \ + --env.task=medium \ + --eval.batch_size=1 \ + --eval.n_episodes=2 +``` + +This will run episodes and return per-task success rates using the standard Meta-World evaluation keys. + +## Practical tips + +- If you care about generalization, run on the full MT50 suite — it’s intentionally challenging and reveals strengths/weaknesses better than a few narrow tasks. +- Use the one-hot task conditioning for multi-task training (MT10 / MT50 conventions) so policies have explicit task context. +- Inspect the dataset task descriptions and the `info["is_success"]` keys when writing post-processing or logging so your success metrics line up with the benchmark. diff --git a/docs/source/multi_gpu_training.mdx b/docs/source/multi_gpu_training.mdx new file mode 100644 index 0000000000000000000000000000000000000000..af89a4a188d53987f8beee626a96e99530c2928e --- /dev/null +++ b/docs/source/multi_gpu_training.mdx @@ -0,0 +1,125 @@ +# Multi-GPU Training + +This guide shows you how to train policies on multiple GPUs using [Hugging Face Accelerate](https://huggingface.co/docs/accelerate). + +## Installation + +First, ensure you have accelerate installed: + +```bash +pip install accelerate +``` + +## Training with Multiple GPUs + +You can launch training in two ways: + +### Option 1: Without config (specify parameters directly) + +You can specify all parameters directly in the command without running `accelerate config`: + +```bash +accelerate launch \ + --multi_gpu \ + --num_processes=2 \ + $(which lerobot-train) \ + --dataset.repo_id=${HF_USER}/my_dataset \ + --policy.type=act \ + --policy.repo_id=${HF_USER}/my_trained_policy \ + --output_dir=outputs/train/act_multi_gpu \ + --job_name=act_multi_gpu \ + --wandb.enable=true +``` + +**Key accelerate parameters:** + +- `--multi_gpu`: Enable multi-GPU training +- `--num_processes=2`: Number of GPUs to use +- `--mixed_precision=fp16`: Use fp16 mixed precision (or `bf16` if supported) + +### Option 2: Using accelerate config + +If you prefer to save your configuration, you can optionally configure accelerate for your hardware setup by running: + +```bash +accelerate config +``` + +This interactive setup will ask you questions about your training environment (number of GPUs, mixed precision settings, etc.) and saves the configuration for future use. For a simple multi-GPU setup on a single machine, you can use these recommended settings: + +- Compute environment: This machine +- Number of machines: 1 +- Number of processes: (number of GPUs you want to use) +- GPU ids to use: (leave empty to use all) +- Mixed precision: fp16 or bf16 (recommended for faster training) + +Then launch training with: + +```bash +accelerate launch $(which lerobot-train) \ + --dataset.repo_id=${HF_USER}/my_dataset \ + --policy.type=act \ + --policy.repo_id=${HF_USER}/my_trained_policy \ + --output_dir=outputs/train/act_multi_gpu \ + --job_name=act_multi_gpu \ + --wandb.enable=true +``` + +## How It Works + +When you launch training with accelerate: + +1. **Automatic detection**: LeRobot automatically detects if it's running under accelerate +2. **Data distribution**: Your batch is automatically split across GPUs +3. **Gradient synchronization**: Gradients are synchronized across GPUs during backpropagation +4. **Single process logging**: Only the main process logs to wandb and saves checkpoints + +## Learning Rate and Training Steps Scaling + +**Important:** LeRobot does **NOT** automatically scale learning rates or training steps based on the number of GPUs. This gives you full control over your training hyperparameters. + +### Why No Automatic Scaling? + +Many distributed training frameworks automatically scale the learning rate by the number of GPUs (e.g., `lr = base_lr × num_gpus`). +However, LeRobot keeps the learning rate exactly as you specify it. + +### When and How to Scale + +If you want to scale your hyperparameters when using multiple GPUs, you should do it manually: + +**Learning Rate Scaling:** + +```bash +# Example: 2 GPUs with linear LR scaling +# Base LR: 1e-4, with 2 GPUs -> 2e-4 +accelerate launch --num_processes=2 $(which lerobot-train) \ + --optimizer.lr=2e-4 \ + --dataset.repo_id=lerobot/pusht \ + --policy=act +``` + +**Training Steps Scaling:** + +Since the effective batch size `bs` increases with multiple GPUs (batch_size × num_gpus), you may want to reduce the number of training steps proportionally: + +```bash +# Example: 2 GPUs with effective batch size 2x larger +# Original: batch_size=8, steps=100000 +# With 2 GPUs: batch_size=8 (16 in total), steps=50000 +accelerate launch --num_processes=2 $(which lerobot-train) \ + --batch_size=8 \ + --steps=50000 \ + --dataset.repo_id=lerobot/pusht \ + --policy=act +``` + +## Notes + +- The `--policy.use_amp` flag in `lerobot-train` is only used when **not** running with accelerate. When using accelerate, mixed precision is controlled by accelerate's configuration. +- Training logs, checkpoints, and hub uploads are only done by the main process to avoid conflicts. Non-main processes have console logging disabled to prevent duplicate output. +- The effective batch size is `batch_size × num_gpus`. If you use 4 GPUs with `--batch_size=8`, your effective batch size is 32. +- Learning rate scheduling is handled correctly across multiple processes—LeRobot sets `step_scheduler_with_optimizer=False` to prevent accelerate from adjusting scheduler steps based on the number of processes. +- When saving or pushing models, LeRobot automatically unwraps the model from accelerate's distributed wrapper to ensure compatibility. +- WandB integration automatically initializes only on the main process, preventing multiple runs from being created. + +For more advanced configurations and troubleshooting, see the [Accelerate documentation](https://huggingface.co/docs/accelerate). If you want to learn more about how to train on a large number of GPUs, checkout this awesome guide: [Ultrascale Playbook](https://huggingface.co/spaces/nanotron/ultrascale-playbook). diff --git a/docs/source/notebooks.mdx b/docs/source/notebooks.mdx new file mode 100644 index 0000000000000000000000000000000000000000..34b45f80f2d83391b033082e5cb609ca7ed666ec --- /dev/null +++ b/docs/source/notebooks.mdx @@ -0,0 +1,29 @@ +# 🤗 LeRobot Notebooks + +This repository contains example notebooks for using LeRobot. These notebooks demonstrate how to train policies on real or simulation datasets using standardized policies. + +--- + +### Training ACT + +[ACT](https://huggingface.co/papers/2304.13705) (Action Chunking Transformer) is a transformer-based policy architecture for imitation learning that processes robot states and camera inputs to generate smooth, chunked action sequences. + +We provide a ready-to-run Google Colab notebook to help you train ACT policies using datasets from the Hugging Face Hub, with optional logging to Weights & Biases. + +| Notebook | Colab | +| :------------------------------------------------------------------------------------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Train ACT with LeRobot](https://github.com/huggingface/notebooks/blob/main/lerobot/training-act.ipynb) | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-act.ipynb) | + +Expected training time for 100k steps: ~1.5 hours on an NVIDIA A100 GPU with batch size of `64`. + +### Training SmolVLA + +[SmolVLA](https://huggingface.co/papers/2506.01844) is a small but efficient Vision-Language-Action model. It is compact in size with 450 M-parameter and is developed by Hugging Face. + +We provide a ready-to-run Google Colab notebook to help you train SmolVLA policies using datasets from the Hugging Face Hub, with optional logging to Weights & Biases. + +| Notebook | Colab | +| :-------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [Train SmolVLA with LeRobot](https://github.com/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) | + +Expected training time for 20k steps: ~5 hours on an NVIDIA A100 GPU with batch size of `64`. diff --git a/docs/source/phone_teleop.mdx b/docs/source/phone_teleop.mdx new file mode 100644 index 0000000000000000000000000000000000000000..30a98ca7742fcb88a3459931f916a37fc8b3f917 --- /dev/null +++ b/docs/source/phone_teleop.mdx @@ -0,0 +1,191 @@ +# Phone + +Use your phone (iOS or Android) to control your robot. + +**In this guide you'll learn:** + +- How to connect an iOS/Android phone +- How phone pose is mapped to robot end‑effector (EE) targets +- How to tweak safety limits, gripper control, and IK settings + +To use phone to control your robot, install the relevant dependencies with: + +```bash +pip install lerobot[phone] +``` + +## Get started + +### Supported platforms + +- iOS: Uses the HEBI Mobile I/O app (ARKit pose + buttons). Download the app first, open it and the examples will discover it on your network and stream the phone pose and inputs. +- Android: Uses the `teleop` package (WebXR). When you start the Python process, it prints a local URL. Open the link on your phone, tap Start, then use Move to stream pose. + +Links: + +- Android WebXR library: [`teleop` on PyPI](https://pypi.org/project/teleop/) +- iOS app: [HEBI Mobile I/O](https://docs.hebi.us/tools.html#mobile-io) + +### Phone orientation and controls + +- Orientation: hold the phone with the screen facing up and the top edge pointing in the same direction as the robot gripper. This ensures calibration aligns the phone’s frame with the robot frame so motion feels natural, see the image below for reference. +- Enable/disable: + - iOS: Hold `B1` to enable teleoperation, release to stop. The first press captures a reference pose. + - Android: Press and hold the `Move` button, release to stop. The first press captures a reference pose. +- Gripper control: + - iOS: Analog input `A3` controls the gripper as velocity input. + - Android: Buttons `A` and `B` act like increment/decrement (A opens, B closes). You can tune velocity in the `GripperVelocityToJoint` step. + +Phone teleop orientation + +### Step 1: Choose the platform + +Modify the examples to use `PhoneOS.IOS` or `PhoneOS.ANDROID` in `PhoneConfig`. The API is identical across platforms, only the input source differs. All examples are under `examples/` and have `phone_so100_*.py` variants. + +Teleoperation example: + +```36:43:examples/phone_so100_teleop.py +from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS + +teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID +teleop_device = Phone(teleop_config) +``` + +### Step 2: Connect and calibrate + +When `Phone(teleop_config)` is created and `connect()` is called, calibration is prompted automatically. Hold the phone in the orientation described above, then: + +- iOS: press and hold `B1` to capture the reference pose. +- Android: press `Move` button on the WebXR page to capture the reference pose. + +Why calibrate? We capture the current pose so subsequent poses are expressed in a robot aligned frame. When you again press the button to enable control, the position is recaptured to avoid drift when your phone is repositioned while it was disabled. + +### Step 3: Run an example + +Run on of the examples scripts to teleoperate, record a dataset, replay a dataset or evaluate a policy. + +All scripts assume you configured your robot (e.g., SO-100 follower) and set the correct serial port. + +Additionally you need to **copy the urdf of the robot to the examples folder**. For the examples in this tutorial (Using SO100/SO101) it is highly recommended to use the urdf in the [SO-ARM100 repo](https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf) + +- Run this example to teleoperate: + + ```bash + python examples/phone_to_so100/teleoperate.py + ``` + +After running the example: + +- Android: after starting the script, open the printed local URL on your phone, tap Start, then press and hold Move. +- iOS: open HEBI Mobile I/O first; B1 enables motion. A3 controls the gripper. + +Additionally you can customize mapping or safety limits by editing the processor steps shown in the examples. You can also remap inputs (e.g., use a different analog input) or adapt the pipeline to other robots (e.g., LeKiwi) by modifying the input and kinematics steps. More about this in the [Processors for Robots and Teleoperators](./processors_robots_teleop) guide. + +- Run this example to record a dataset, which saves absolute end effector observations and actions: + + ```bash + python examples/phone_to_so100/record.py + ``` + +- Run this example to replay recorded episodes: + + ```bash + python examples/phone_to_so100/replay.py + ``` + +- Run this example to evaluate a pretrained policy: + + ```bash + python examples/phone_to_so100/evaluate.py + ``` + +### Important pipeline steps and options + +- Kinematics are used in multiple steps. We use [Placo](https://github.com/Rhoban/placo) which is a wrapper around Pinocchio for handling our kinematics. We construct the kinematics object by passing the robot's URDF and target frame. We set `target_frame_name` to the gripper frame. + + ```examples/phone_to_so100/teleoperate.py + kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(robot.bus.motors.keys()), + ) + + ``` + +- The `MapPhoneActionToRobotAction` step converts the calibrated phone pose and inputs into target deltas and gripper commands, below is shown what the step outputs. + + ```src/lerobot/teleoperators/phone/phone_processor.py + action["enabled"] = enabled + action["target_x"] = -pos[1] if enabled else 0.0 + action["target_y"] = pos[0] if enabled else 0.0 + action["target_z"] = pos[2] if enabled else 0.0 + action["target_wx"] = rotvec[1] if enabled else 0.0 + action["target_wy"] = rotvec[0] if enabled else 0.0 + action["target_wz"] = -rotvec[2] if enabled else 0.0 + action["gripper_vel"] = gripper_vel # Still send gripper action when disabled + ``` + +- The `EEReferenceAndDelta` step converts target deltas to an absolute desired EE pose, storing a reference on enable, the `end_effector_step_sizes` are the step sizes for the EE pose and can be modified to change the motion speed. + + ```examples/phone_to_so100/teleoperate.py + EEReferenceAndDelta( + kinematics=kinematics_solver, + end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5}, + motor_names=list(robot.bus.motors.keys()), + use_latched_reference=True, + ), + ``` + +- The `EEBoundsAndSafety` step clamps EE motion to a workspace and checks for large ee step jumps to ensure safety. The `end_effector_bounds` are the bounds for the EE pose and can be modified to change the workspace. The `max_ee_step_m` are the step limits for the EE pose and can be modified to change the safety limits. + + ```examples/phone_to_so100/teleoperate.py + EEBoundsAndSafety( + end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, + max_ee_step_m=0.10, + ) + ``` + +- The `GripperVelocityToJoint` step turns a velocity‑like gripper input into absolute gripper position using the current measured state. The `speed_factor` is the factor by which the velocity is multiplied. + + ```examples/phone_to_so100/teleoperate.py + GripperVelocityToJoint(speed_factor=20.0) + ``` + +#### Different IK initial guesses + +We use different IK initial guesses in the kinematic steps. As initial guess either the current measured joints or the previous IK solution is used. + +- Closed loop (used in record/eval): sets `initial_guess_current_joints=True` so IK starts from the measured joints each frame. + + ```examples/phone_to_so100/record.py + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=True, # closed loop + ) + ``` + +- Open loop (used in replay): sets `initial_guess_current_joints=False` so IK continues from the previous IK solution rather than the measured state. This preserves action stability when we replay without feedback. + + ```examples/phone_to_so100/replay.py + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=False, # open loop + ) + ``` + +### Pipeline steps explained + +- MapPhoneActionToRobotAction: converts calibrated phone pose and inputs into target deltas and a gripper command. Motion is gated by an enable signal (B1 on iOS, Move on Android). +- EEReferenceAndDelta: latches a reference EE pose on enable and combines it with target deltas to produce an absolute desired EE pose each frame. When disabled, it keeps sending the last commanded pose. +- EEBoundsAndSafety: clamps the EE pose to a workspace and rate‑limits jumps for safety. Also declares `action.ee.*` features. +- InverseKinematicsEEToJoints: turns an EE pose into joint positions with IK. `initial_guess_current_joints=True` is recommended for closed‑loop control; set `False` for open‑loop replay for stability. +- GripperVelocityToJoint: integrates a velocity‑like gripper input into an absolute gripper position using the current measured state. +- ForwardKinematicsJointsToEE: computes `observation.state.ee.*` from observed joints for logging and training on EE state. + +### Troubleshooting + +- iOS not discovered: ensure HEBI Mobile I/O is open and your laptop/phone are on the same network. +- Android URL not reachable: check local you used `https` instead of `http`, use the exact IP printed by the script and allow your browser to enter and ignore the certificate issue. +- Motion feels inverted: adjust the sign flips in `MapPhoneActionToRobotAction` or swap axes to match your setup. diff --git a/docs/source/pi0.mdx b/docs/source/pi0.mdx new file mode 100644 index 0000000000000000000000000000000000000000..bbdfda6d59bdf2680d1e4a958874dd9d76e3fb21 --- /dev/null +++ b/docs/source/pi0.mdx @@ -0,0 +1,84 @@ +# π₀ (Pi0) + +π₀ is a **Vision-Language-Action model for general robot control**, from Physical Intelligence. The LeRobot implementation is adapted from their open source [OpenPI](https://github.com/Physical-Intelligence/openpi) repository. + +## Model Overview + +π₀ represents a breakthrough in robotics as the first general-purpose robot foundation model developed by [Physical Intelligence](https://www.physicalintelligence.company/blog/pi0). Unlike traditional robot programs that are narrow specialists programmed for repetitive motions, π₀ is designed to be a generalist policy that can understand visual inputs, interpret natural language instructions, and control a variety of different robots across diverse tasks. + +### The Vision for Physical Intelligence + +As described by Physical Intelligence, while AI has achieved remarkable success in digital domains, from chess-playing to drug discovery, human intelligence still dramatically outpaces AI in the physical world. To paraphrase Moravec's paradox, winning a game of chess represents an "easy" problem for AI, but folding a shirt or cleaning up a table requires solving some of the most difficult engineering problems ever conceived. π₀ represents a first step toward developing artificial physical intelligence that enables users to simply ask robots to perform any task they want, just like they can with large language models. + +### Architecture and Approach + +π₀ combines several key innovations: + +- **Flow Matching**: Uses a novel method to augment pre-trained VLMs with continuous action outputs via flow matching (a variant of diffusion models) +- **Cross-Embodiment Training**: Trained on data from 8 distinct robot platforms including UR5e, Bimanual UR5e, Franka, Bimanual Trossen, Bimanual ARX, Mobile Trossen, and Mobile Fibocom +- **Internet-Scale Pre-training**: Inherits semantic knowledge from a pre-trained 3B parameter Vision-Language Model +- **High-Frequency Control**: Outputs motor commands at up to 50 Hz for real-time dexterous manipulation + +## Installation Requirements + +1. Install LeRobot by following our [Installation Guide](./installation). +2. Install Pi0 dependencies by running: + + ```bash + pip install -e ".[pi]" + ``` + + > [!NOTE] + > For lerobot 0.4.0, if you want to install pi tag, you will have to do: `pip install "lerobot[pi]@git+https://github.com/huggingface/lerobot.git"`. + > + > This will be solved in the next patch release + +## Training Data and Capabilities + +π₀ is trained on the largest robot interaction dataset to date, combining three key data sources: + +1. **Internet-Scale Pre-training**: Vision-language data from the web for semantic understanding +2. **Open X-Embodiment Dataset**: Open-source robot manipulation datasets +3. **Physical Intelligence Dataset**: Large and diverse dataset of dexterous tasks across 8 distinct robots + +## Usage + +To use π₀ in LeRobot, specify the policy type as: + +```python +policy.type=pi0 +``` + +## Training + +For training π₀, you can use the standard LeRobot training script with the appropriate configuration: + +```bash +python src/lerobot/scripts/lerobot_train.py \ + --dataset.repo_id=your_dataset \ + --policy.type=pi0 \ + --output_dir=./outputs/pi0_training \ + --job_name=pi0_training \ + --policy.pretrained_path=lerobot/pi0_base \ + --policy.repo_id=your_repo_id \ + --policy.compile_model=true \ + --policy.gradient_checkpointing=true \ + --policy.dtype=bfloat16 \ + --steps=3000 \ + --policy.device=cuda \ + --batch_size=32 +``` + +### Key Training Parameters + +- **`--policy.compile_model=true`**: Enables model compilation for faster training +- **`--policy.gradient_checkpointing=true`**: Reduces memory usage significantly during training +- **`--policy.dtype=bfloat16`**: Use mixed precision training for efficiency +- **`--batch_size=32`**: Batch size for training, adapt this based on your GPU memory +- **`--policy.pretrained_path=lerobot/pi0_base`**: The base π₀ model you want to finetune, options are: + - [lerobot/pi0_base](https://huggingface.co/lerobot/pi0_base) + - [lerobot/pi0_libero](https://huggingface.co/lerobot/pi0_libero) (specifically trained on the Libero dataset) + +## License + +This model follows the **Apache 2.0 License**, consistent with the original [OpenPI repository](https://github.com/Physical-Intelligence/openpi). diff --git a/docs/source/pi05.mdx b/docs/source/pi05.mdx new file mode 100644 index 0000000000000000000000000000000000000000..04f4c7f82d3863c2e8470e131349b1ab98e23532 --- /dev/null +++ b/docs/source/pi05.mdx @@ -0,0 +1,112 @@ +# π₀.₅ (Pi05) Policy + +π₀.₅ is a **Vision-Language-Action model with open-world generalization**, from Physical Intelligence. The LeRobot implementation is adapted from their open source [OpenPI](https://github.com/Physical-Intelligence/openpi) repository. + +## Model Overview + +π₀.₅ represents a significant evolution from π₀, developed by [Physical Intelligence](https://www.physicalintelligence.company/blog/pi05) to address a big challenge in robotics: **open-world generalization**. While robots can perform impressive tasks in controlled environments, π₀.₅ is designed to generalize to entirely new environments and situations that were never seen during training. + +### The Generalization Challenge + +As Physical Intelligence explains, the fundamental challenge isn't performing tasks of agility or dexterity, but generalization, the ability to correctly perform tasks in new settings with new objects. Consider a robot cleaning different homes: each home has different objects in different places. Generalization must occur at multiple levels: + +- **Physical Level**: Understanding how to pick up a spoon (by the handle) or plate (by the edge), even with unseen objects in cluttered environments +- **Semantic Level**: Understanding task semantics, where to put clothes and shoes (laundry hamper, not on the bed), and what tools are appropriate for cleaning spills +- **Environmental Level**: Adapting to "messy" real-world environments like homes, grocery stores, offices, and hospitals + +### Co-Training on Heterogeneous Data + +The breakthrough innovation in π₀.₅ is **co-training on heterogeneous data sources**. The model learns from: + +1. **Multimodal Web Data**: Image captioning, visual question answering, object detection +2. **Verbal Instructions**: Humans coaching robots through complex tasks step-by-step +3. **Subtask Commands**: High-level semantic behavior labels (e.g., "pick up the pillow" for an unmade bed) +4. **Cross-Embodiment Robot Data**: Data from various robot platforms with different capabilities +5. **Multi-Environment Data**: Static robots deployed across many different homes +6. **Mobile Manipulation Data**: ~400 hours of mobile robot demonstrations + +This diverse training mixture creates a "curriculum" that enables generalization across physical, visual, and semantic levels simultaneously. + +## Installation Requirements + +1. Install LeRobot by following our [Installation Guide](./installation). +2. Install Pi0.5 dependencies by running: + + ```bash + pip install -e ".[pi]" + ``` + + > [!NOTE] + > For lerobot 0.4.0, if you want to install pi tag, you will have to do: `pip install "lerobot[pi]@git+https://github.com/huggingface/lerobot.git"`. + > + > This will be solved in the next patch release + +## Usage + +To use π₀.₅ in your LeRobot configuration, specify the policy type as: + +```python +policy.type=pi05 +``` + +## Training + +### Training Command Example + +Here's a complete training command for finetuning the base π₀.₅ model on your own dataset: + +```bash +python src/lerobot/scripts/lerobot_train.py\ + --dataset.repo_id=your_dataset \ + --policy.type=pi05 \ + --output_dir=./outputs/pi05_training \ + --job_name=pi05_training \ + --policy.repo_id=your_repo_id \ + --policy.pretrained_path=lerobot/pi05_base \ + --policy.compile_model=true \ + --policy.gradient_checkpointing=true \ + --wandb.enable=true \ + --policy.dtype=bfloat16 \ + --steps=3000 \ + --policy.device=cuda \ + --batch_size=32 +``` + +### Key Training Parameters + +- **`--policy.compile_model=true`**: Enables model compilation for faster training +- **`--policy.gradient_checkpointing=true`**: Reduces memory usage significantly during training +- **`--policy.dtype=bfloat16`**: Use mixed precision training for efficiency +- **`--batch_size=32`**: Batch size for training, adapt this based on your GPU memory +- **`--policy.pretrained_path=lerobot/pi05_base`**: The base π₀.₅ model you want to finetune, options are: + - [lerobot/pi05_base](https://huggingface.co/lerobot/pi05_base) + - [lerobot/pi05_libero](https://huggingface.co/lerobot/pi05_libero) (specifically trained on the Libero dataset) + +If your dataset is not converted with `quantiles`, you can convert it with the following command: + +```bash +python src/lerobot/datasets/v30/augment_dataset_quantile_stats.py \ + --repo-id=your_dataset \ +``` + +Or train pi05 with this normalization mapping: `--policy.normalization_mapping='{"ACTION": "MEAN_STD", "STATE": "MEAN_STD", "VISUAL": "IDENTITY"}'` + +## Performance Results + +### Libero Benchmark Results + +π₀.₅ has demonstrated strong performance on the Libero benchmark suite. To compare and test its LeRobot implementation, we finetuned the libero base model for an additional 6k steps on the Libero dataset and compared the results to the OpenPI reference results. + +| Benchmark | LeRobot Implementation | OpenPI Reference | +| ------------------ | ---------------------- | ---------------- | +| **Libero Spatial** | 97.0% | 98.8% | +| **Libero Object** | 99.0% | 98.2% | +| **Libero Goal** | 98.0% | 98.0% | +| **Libero 10** | 96.0% | 92.4% | +| **Average** | 97.5% | 96.85% | + +These results demonstrate π₀.₅'s strong generalization capabilities across diverse robotic manipulation tasks. To reproduce these results, you can follow the instructions in the [Libero](https://huggingface.co/docs/lerobot/libero) section. + +## License + +This model follows the **Apache 2.0 License**, consistent with the original [OpenPI repository](https://github.com/Physical-Intelligence/openpi). diff --git a/docs/source/policy_act_README.md b/docs/source/policy_act_README.md new file mode 100644 index 0000000000000000000000000000000000000000..ed884402c4c1094c69308aeeeed2ceb577e94628 --- /dev/null +++ b/docs/source/policy_act_README.md @@ -0,0 +1,14 @@ +## Paper + +https://tonyzhaozh.github.io/aloha + +## Citation + +```bibtex +@article{zhao2023learning, + title={Learning fine-grained bimanual manipulation with low-cost hardware}, + author={Zhao, Tony Z and Kumar, Vikash and Levine, Sergey and Finn, Chelsea}, + journal={arXiv preprint arXiv:2304.13705}, + year={2023} +} +``` diff --git a/docs/source/policy_diffusion_README.md b/docs/source/policy_diffusion_README.md new file mode 100644 index 0000000000000000000000000000000000000000..b8493afe0dbb363f69cd813f69150b77fc3f44d7 --- /dev/null +++ b/docs/source/policy_diffusion_README.md @@ -0,0 +1,14 @@ +## Paper + +https://diffusion-policy.cs.columbia.edu + +## Citation + +```bibtex +@article{chi2024diffusionpolicy, + author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song}, + title ={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion}, + journal = {The International Journal of Robotics Research}, + year = {2024}, +} +``` diff --git a/docs/source/policy_groot_README.md b/docs/source/policy_groot_README.md new file mode 100644 index 0000000000000000000000000000000000000000..c2e435d9e7a91c6644cb0e626947ae60f7cf888c --- /dev/null +++ b/docs/source/policy_groot_README.md @@ -0,0 +1,27 @@ +## Research Paper + +Paper: https://research.nvidia.com/labs/gear/gr00t-n1_5/ + +## Repository + +Code: https://github.com/NVIDIA/Isaac-GR00T + +## Citation + +```bibtex +@inproceedings{gr00tn1_2025, + archivePrefix = {arxiv}, + eprint = {2503.14734}, + title = {{GR00T} {N1}: An Open Foundation Model for Generalist Humanoid Robots}, + author = {NVIDIA and Johan Bjorck andFernando Castañeda, Nikita Cherniadev and Xingye Da and Runyu Ding and Linxi "Jim" Fan and Yu Fang and Dieter Fox and Fengyuan Hu and Spencer Huang and Joel Jang and Zhenyu Jiang and Jan Kautz and Kaushil Kundalia and Lawrence Lao and Zhiqi Li and Zongyu Lin and Kevin Lin and Guilin Liu and Edith Llontop and Loic Magne and Ajay Mandlekar and Avnish Narayan and Soroush Nasiriany and Scott Reed and You Liang Tan and Guanzhi Wang and Zu Wang and Jing Wang and Qi Wang and Jiannan Xiang and Yuqi Xie and Yinzhen Xu and Zhenjia Xu and Seonghyeon Ye and Zhiding Yu and Ao Zhang and Hao Zhang and Yizhou Zhao and Ruijie Zheng and Yuke Zhu}, + month = {March}, + year = {2025}, + booktitle = {ArXiv Preprint}, +} +``` + +## Additional Resources + +Blog: https://developer.nvidia.com/isaac/gr00t + +Hugging Face Model: https://huggingface.co/nvidia/GR00T-N1.5-3B diff --git a/docs/source/policy_smolvla_README.md b/docs/source/policy_smolvla_README.md new file mode 100644 index 0000000000000000000000000000000000000000..2e83a080c19b5c23aa48f80b254cbb28191f3a99 --- /dev/null +++ b/docs/source/policy_smolvla_README.md @@ -0,0 +1,14 @@ +## Paper + +https://arxiv.org/abs/2506.01844 + +## Citation + +```bibtex +@article{shukor2025smolvla, + title={SmolVLA: A Vision-Language-Action Model for Affordable and Efficient Robotics}, + author={Shukor, Mustafa and Aubakirova, Dana and Capuano, Francesco and Kooijmans, Pepijn and Palma, Steven and Zouitine, Adil and Aractingi, Michel and Pascal, Caroline and Russi, Martino and Marafioti, Andres and Alibert, Simon and Cord, Matthieu and Wolf, Thomas and Cadene, Remi}, + journal={arXiv preprint arXiv:2506.01844}, + year={2025} +} +``` diff --git a/docs/source/policy_tdmpc_README.md b/docs/source/policy_tdmpc_README.md new file mode 100644 index 0000000000000000000000000000000000000000..6a9eb295a3ac08f166aeae96209bdf1c7b0c3995 --- /dev/null +++ b/docs/source/policy_tdmpc_README.md @@ -0,0 +1,14 @@ +## Paper + +https://www.nicklashansen.com/td-mpc/ + +## Citation + +```bibtex +@inproceedings{Hansen2022tdmpc, + title={Temporal Difference Learning for Model Predictive Control}, + author={Nicklas Hansen and Xiaolong Wang and Hao Su}, + booktitle={ICML}, + year={2022} +} +``` diff --git a/docs/source/policy_vqbet_README.md b/docs/source/policy_vqbet_README.md new file mode 100644 index 0000000000000000000000000000000000000000..1d1aa29aa65590c3465543aad288a7705151caab --- /dev/null +++ b/docs/source/policy_vqbet_README.md @@ -0,0 +1,14 @@ +## Paper + +https://sjlee.cc/vq-bet/ + +## Citation + +```bibtex +@article{lee2024behavior, + title={Behavior generation with latent actions}, + author={Lee, Seungjae and Wang, Yibin and Etukuru, Haritheja and Kim, H Jin and Shafiullah, Nur Muhammad Mahi and Pinto, Lerrel}, + journal={arXiv preprint arXiv:2403.03181}, + year={2024} +} +``` diff --git a/docs/source/porting_datasets_v3.mdx b/docs/source/porting_datasets_v3.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ff5088e2d702e2b2f6de69b0fb0e98ae4dff524d --- /dev/null +++ b/docs/source/porting_datasets_v3.mdx @@ -0,0 +1,321 @@ +# Porting Large Datasets to LeRobot Dataset v3.0 + +This tutorial explains how to port large-scale robotic datasets to the LeRobot Dataset v3.0 format. We'll use the **DROID 1.0.1** dataset as our primary example, which demonstrates handling multi-terabyte datasets with thousands of shards across SLURM clusters. + +## File Organization: v2.1 vs v3.0 + +Dataset v3.0 fundamentally changes how data is organized and stored: + +**v2.1 Structure (Episode-based)**: + +``` +dataset/ +├── data/chunk-000/episode_000000.parquet +├── data/chunk-000/episode_000001.parquet +├── videos/chunk-000/camera/episode_000000.mp4 +└── meta/episodes.jsonl +``` + +**v3.0 Structure (File-based)**: + +``` +dataset/ +├── data/chunk-000/file-000.parquet # Multiple episodes per file +├── videos/camera/chunk-000/file-000.mp4 # Consolidated video chunks +└── meta/episodes/chunk-000/file-000.parquet # Structured metadata +``` + +This transition from individual episode files to file-based chunks dramatically improves performance and reduces storage overhead. + +## What's New in Dataset v3.0 + +Dataset v3.0 introduces significant improvements for handling large datasets: + +### 🏗️ **Enhanced File Organization** + +- **File-based structure**: Episodes are now grouped into chunked files rather than individual episode files +- **Configurable file sizes**: for data and video files +- **Improved storage efficiency**: Better compression and reduced overhead + +### 📊 **Modern Metadata Management** + +- **Parquet-based metadata**: Replaced JSON Lines with efficient parquet format +- **Structured episode access**: Direct pandas DataFrame access via `dataset.meta.episodes` +- **Per-episode statistics**: Enhanced statistics tracking at episode level + +### 🚀 **Performance Enhancements** + +- **Memory-mapped access**: Improved RAM usage through PyArrow memory mapping +- **Faster loading**: Significantly reduced dataset initialization time +- **Better scalability**: Designed for datasets with millions of episodes + +## Prerequisites + +Before porting large datasets, ensure you have: + +- **LeRobot installed** with v3.0 support. Follow our [Installation Guide](./installation). +- **Sufficient storage**: Raw datasets can be very large (e.g., DROID requires 2TB) +- **Cluster access** (recommended for large datasets): SLURM or similar job scheduler +- **Dataset-specific dependencies**: For DROID, you'll need TensorFlow Dataset utilities + +## Understanding the DROID Dataset + +[DROID 1.0.1](https://droid-dataset.github.io/droid/the-droid-dataset) is an excellent example of a large-scale robotic dataset: + +- **Size**: 1.7TB (RLDS format), 8.7TB (raw data) +- **Structure**: 2048 pre-defined TensorFlow dataset shards +- **Content**: 76,000+ robot manipulation trajectories from Franka Emika Panda robots +- **Scope**: Real-world manipulation tasks across multiple environments and objects +- **Format**: Originally in TensorFlow Records/RLDS format, requiring conversion to LeRobot format +- **Hosting**: Google Cloud Storage with public access via `gsutil` + +The dataset contains diverse manipulation demonstrations with: + +- Multiple camera views (wrist camera, exterior cameras) +- Natural language task descriptions +- Robot proprioceptive state and actions +- Success/failure annotations + +### DROID Features Schema + +```python +DROID_FEATURES = { + # Episode markers + "is_first": {"dtype": "bool", "shape": (1,)}, + "is_last": {"dtype": "bool", "shape": (1,)}, + "is_terminal": {"dtype": "bool", "shape": (1,)}, + + # Language instructions + "language_instruction": {"dtype": "string", "shape": (1,)}, + "language_instruction_2": {"dtype": "string", "shape": (1,)}, + "language_instruction_3": {"dtype": "string", "shape": (1,)}, + + # Robot state + "observation.state.gripper_position": {"dtype": "float32", "shape": (1,)}, + "observation.state.cartesian_position": {"dtype": "float32", "shape": (6,)}, + "observation.state.joint_position": {"dtype": "float32", "shape": (7,)}, + + # Camera observations + "observation.images.wrist_left": {"dtype": "image"}, + "observation.images.exterior_1_left": {"dtype": "image"}, + "observation.images.exterior_2_left": {"dtype": "image"}, + + # Actions + "action.gripper_position": {"dtype": "float32", "shape": (1,)}, + "action.cartesian_position": {"dtype": "float32", "shape": (6,)}, + "action.joint_position": {"dtype": "float32", "shape": (7,)}, + + # Standard LeRobot format + "observation.state": {"dtype": "float32", "shape": (8,)}, # joints + gripper + "action": {"dtype": "float32", "shape": (8,)}, # joints + gripper +} +``` + +## Approach 1: Single Computer Porting + +### Step 1: Install Dependencies + +For DROID specifically: + +```bash +pip install tensorflow +pip install tensorflow_datasets +``` + +For other datasets, install the appropriate readers for your source format. + +### Step 2: Download Raw Data + +Download DROID from Google Cloud Storage using `gsutil`: + +```bash +# Install Google Cloud SDK if not already installed +# https://cloud.google.com/sdk/docs/install + +# Download the full RLDS dataset (1.7TB) +gsutil -m cp -r gs://gresearch/robotics/droid/1.0.1 /your/data/ + +# Or download just the 100-episode sample (2GB) for testing +gsutil -m cp -r gs://gresearch/robotics/droid_100 /your/data/ +``` + +> [!WARNING] +> Large datasets require substantial time and storage: +> +> - **Full DROID (1.7TB)**: Several days to download depending on bandwidth +> - **Processing time**: 7+ days for local porting of full dataset +> - **Upload time**: 3+ days to push to Hugging Face Hub +> - **Local storage**: ~400GB for processed LeRobot format + +### Step 3: Port the Dataset + +```bash +python examples/port_datasets/port_droid.py \ + --raw-dir /your/data/droid/1.0.1 \ + --repo-id your_id/droid_1.0.1 \ + --push-to-hub +``` + +### Development and Testing + +For development, you can port a single shard: + +```bash +python examples/port_datasets/port_droid.py \ + --raw-dir /your/data/droid/1.0.1 \ + --repo-id your_id/droid_1.0.1_test \ + --num-shards 2048 \ + --shard-index 0 +``` + +This approach works for smaller datasets or testing, but large datasets require cluster computing. + +## Approach 2: SLURM Cluster Porting (Recommended) + +For large datasets like DROID, parallel processing across multiple nodes dramatically reduces processing time. + +### Step 1: Install Cluster Dependencies + +```bash +pip install datatrove # Hugging Face's distributed processing library +``` + +### Step 2: Configure Your SLURM Environment + +Find your partition information: + +```bash +sinfo --format="%R" # List available partitions +sinfo -N -p your_partition -h -o "%N cpus=%c mem=%m" # Check resources +``` + +Choose a **CPU partition** - no GPU needed for dataset porting. + +### Step 3: Launch Parallel Porting Jobs + +```bash +python examples/port_datasets/slurm_port_shards.py \ + --raw-dir /your/data/droid/1.0.1 \ + --repo-id your_id/droid_1.0.1 \ + --logs-dir /your/logs \ + --job-name port_droid \ + --partition your_partition \ + --workers 2048 \ + --cpus-per-task 8 \ + --mem-per-cpu 1950M +``` + +#### Parameter Guidelines + +- **`--workers`**: Number of parallel jobs (max 2048 for DROID's shard count) +- **`--cpus-per-task`**: 8 CPUs recommended for frame encoding parallelization +- **`--mem-per-cpu`**: ~16GB total RAM (8×1950M) for loading raw frames + +> [!TIP] +> Start with fewer workers (e.g., 100) to test your cluster configuration before launching thousands of jobs. + +### Step 4: Monitor Progress + +Check running jobs: + +```bash +squeue -u $USER +``` + +Monitor overall progress: + +```bash +jobs_status /your/logs +``` + +Inspect individual job logs: + +```bash +less /your/logs/port_droid/slurm_jobs/JOB_ID_WORKER_ID.out +``` + +Debug failed jobs: + +```bash +failed_logs /your/logs/port_droid +``` + +### Step 5: Aggregate Shards + +Once all porting jobs complete: + +```bash +python examples/port_datasets/slurm_aggregate_shards.py \ + --repo-id your_id/droid_1.0.1 \ + --logs-dir /your/logs \ + --job-name aggr_droid \ + --partition your_partition \ + --workers 2048 \ + --cpus-per-task 8 \ + --mem-per-cpu 1950M +``` + +### Step 6: Upload to Hub + +```bash +python examples/port_datasets/slurm_upload.py \ + --repo-id your_id/droid_1.0.1 \ + --logs-dir /your/logs \ + --job-name upload_droid \ + --partition your_partition \ + --workers 50 \ + --cpus-per-task 4 \ + --mem-per-cpu 1950M +``` + +> [!NOTE] +> Upload uses fewer workers (50) since it's network-bound rather than compute-bound. + +## Dataset v3.0 File Structure + +Your completed dataset will have this modern structure: + +``` +dataset/ +├── meta/ +│ ├── episodes/ +│ │ └── chunk-000/ +│ │ └── file-000.parquet # Episode metadata +│ ├── tasks.parquet # Task definitions +│ ├── stats.json # Aggregated statistics +│ └── info.json # Dataset information +├── data/ +│ └── chunk-000/ +│ └── file-000.parquet # Consolidated episode data +└── videos/ + └── camera_key/ + └── chunk-000/ + └── file-000.mp4 # Consolidated video files +``` + +This replaces the old episode-per-file structure with efficient, optimally-sized chunks. + +## Migrating from Dataset v2.1 + +If you have existing datasets in v2.1 format, use the migration tool: + +```bash +python src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py \ + --repo-id your_id/existing_dataset +``` + +This automatically: + +- Converts file structure to v3.0 format +- Migrates metadata from JSON Lines to parquet +- Aggregates statistics and creates per-episode stats +- Updates version information + +## Performance Benefits + +Dataset v3.0 provides significant improvements for large datasets: + +- **Faster loading**: 3-5x reduction in initialization time +- **Memory efficiency**: Better RAM usage through memory mapping +- **Scalable processing**: Handles millions of episodes efficiently +- **Storage optimization**: Reduced file count and improved compression diff --git a/docs/source/processors_robots_teleop.mdx b/docs/source/processors_robots_teleop.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b4aeb3ed58822f2c1a8bd83f456ee4453923092b --- /dev/null +++ b/docs/source/processors_robots_teleop.mdx @@ -0,0 +1,151 @@ +# Processors for Robots and Teleoperators + +This guide shows how to build and modify processing pipelines that connect teleoperators (e.g., phone) to robots and datasets. Pipelines standardize conversions between different action/observation spaces so you can swap teleops and robots without rewriting glue code. + +We use the Phone to SO‑100 follower examples for concreteness, but the same patterns apply to other robots. + +**What you'll learn** + +- Absolute vs. relative EE control: What each means, trade‑offs, and how to choose for your task. +- Three-pipeline pattern: How to map teleop actions → dataset actions → robot commands, and robot observations → dataset observations. +- Adapters (`to_transition` / `to_output`): How these convert raw dicts to `EnvTransition` and back to reduce boilerplate. +- Dataset feature contracts: How steps declare features via `transform_features(...)`, and how to aggregate/merge them for recording. +- Choosing a representation: When to store joints, absolute EE poses, or relative EE deltas—and how that affects training. +- Pipeline customization guidance: How to swap robots/URDFs safely and tune bounds, step sizes, and options like IK initialization. + +### Absolute vs relative EE control + +The examples in this guide use absolute end effector (EE) poses because they are easy to reason about. In practice, relative EE deltas or joint position are often preferred as learning features. + +With processors, you choose the learning features you want to use for your policy. This could be joints positions/velocities, absolute EE, or relative EE positions. You can also choose to store other features, such as joint torques, motor currents, etc. + +## Three pipelines + +We often compose three pipelines. Depending on your setup, some can be empty if action and observation spaces already match. +Each of these pipelines handle different conversions between different action and observation spaces. Below is a quick explanation of each pipeline. + +1. Pipeline 1: Teleop action space → dataset action space (phone pose → EE targets) +2. Pipeline 2: Dataset action space → robot command space (EE targets → joints) +3. Pipeline 3: Robot observation space → dataset observation space (joints → EE pose) + +Below is an example of the three pipelines that we use in the phone to SO-100 follower examples: + +```69:90:examples/phone_so100_record.py +phone_to_robot_ee_pose_processor = RobotProcessorPipeline[RobotAction, RobotAction]( # teleop -> dataset action + steps=[ + MapPhoneActionToRobotAction(platform=teleop_config.phone_os), + EEReferenceAndDelta( + kinematics=kinematics_solver, end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5}, motor_names=list(robot.bus.motors.keys()), + ), + EEBoundsAndSafety( + end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, max_ee_step_m=0.20, + ), + GripperVelocityToJoint(), + ], + to_transition=robot_action_to_transition, + to_output=transition_to_robot_action, +) + +robot_ee_to_joints_processor = RobotProcessorPipeline[RobotAction, RobotAction]( # dataset action -> robot + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys()), initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_to_transition, + to_output=transition_to_robot_action, +) + +robot_joints_to_ee_pose = RobotProcessorPipeline[RobotObservation, RobotObservation]( # robot obs -> dataset obs + steps=[ + ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())) + ], + to_transition=observation_to_transition, + to_output=transition_to_observation, +) +``` + +## Why to_transition / to_output + +To convert from robot/teleoperator to pipeline and back, we use the `to_transition` and `to_output` pipeline adapters. +They standardize conversions to reduce boilerplate code, and form the bridge between the robot and teleoperators raw dictionaries and the pipeline’s `EnvTransition` format. +In the phone to SO-100 follower examples we use the following adapters: + +- `robot_action_to_transition`: transforms the teleop action dict to a pipeline transition. +- `transition_to_robot_action`: transforms the pipeline transition to a robot action dict. +- `observation_to_transition`: transforms the robot observation dict to a pipeline transition. +- `transition_to_observation`: transforms the pipeline transition to a observation dict. + +Checkout [src/lerobot/processor/converters.py](https://github.com/huggingface/lerobot/blob/main/src/lerobot/processor/converters.py) for more details. + +## Dataset feature contracts + +Dataset features are determined by the keys saved in the dataset. Each step can declare what features it modifies in a contract called `transform_features(...)`. Once you build a processor, the processor can then aggregate all of these features with `aggregate_pipeline_dataset_features()` and merge multiple feature dicts with `combine_feature_dicts(...)`. + +Below is and example of how we declare features with the `transform_features` method in the phone to SO-100 follower examples: + +```src/lerobot/robots/so100_follower/robot_kinematic_processor.py + def transform_features( + self, features: dict[PipelineFeatureType, dict[str, PolicyFeature]] + ) -> dict[PipelineFeatureType, dict[str, PolicyFeature]]: + # We only use the ee pose in the dataset, so we don't need the joint positions + for n in self.motor_names: + features[PipelineFeatureType.ACTION].pop(f"{n}.pos", None) + # We specify the dataset features of this step that we want to be stored in the dataset + for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"]: + features[PipelineFeatureType.ACTION][f"ee.{k}"] = PolicyFeature( + type=FeatureType.STATE, shape=(1,) + ) + return features +``` + +Here we declare what PolicyFeatures we modify in this step, so we know what features we can expect when we run the processor. These features can then be aggregated and used to create the dataset features. + +Below is an example of how we aggregate and merge features in the phone to SO-100 record example: + +```121:145:examples/phone_so100_record.py +features=combine_feature_dicts( + # Run the feature contract of the pipelines + # This tells you how the features would look like after the pipeline steps + aggregate_pipeline_dataset_features( + pipeline=phone_to_robot_ee_pose_processor, + initial_features=create_initial_features(action=phone.action_features), # <- Action features we can expect, these come from our teleop device (phone) and action processor + use_videos=True, + ), + aggregate_pipeline_dataset_features( + pipeline=robot_joints_to_ee_pose, + initial_features=create_initial_features(observation=robot.observation_features), # <- Observation features we can expect, these come from our robot and observation processor + use_videos=True, + patterns=["observation.state.ee"], # <- Here you could optionally filter the features we want to store in the dataset, with a specific pattern + + ), + ), +``` + +How it works: + +- `aggregate_pipeline_dataset_features(...)`: applies `transform_features` across the pipeline and filters by patterns (images included when `use_videos=True`, and state features included when `patterns` is specified). +- `combine_feature_dicts(...)`: combine multiple feature dicts. +- Recording with `record_loop(...)` uses `build_dataset_frame(...)` to build frames consistent with `dataset.features` before we call `add_frame(...)` to add the frame to the dataset. + +## Guidance when customizing robot pipelines + +You can store any of the following features as your action/observation space: + +- Joint positions +- Absolute EE poses +- Relative EE deltas +- Other features: joint velocity, torques, etc. + +Pick what you want to use for your policy action and observation space and configure/modify the pipelines and steps accordingly. + +### Different robots + +- You can easily reuse pipelines, for example to use another robot with phone teleop, modify the examples and swap the robot `RobotKinematics` (URDF) and `motor_names` to use your own robot with Phone teleop. Additionally you should ensure `target_frame_name` points to your gripper/wrist. + +### Safety first + +- When changing pipelines, start with tight bounds, implement safety steps when working with real robots. +- Its advised to start with simulation first and then move to real robots. + +Thats it! We hope this guide helps you get started with customizing your robot pipelines, If you run into any issues at any point, jump into our [Discord community](https://discord.com/invite/s3KuuzsPFb) for support. diff --git a/docs/source/reachy2.mdx b/docs/source/reachy2.mdx new file mode 100644 index 0000000000000000000000000000000000000000..44ac41ff9f1dd46ba65c82ef87c333404207a64f --- /dev/null +++ b/docs/source/reachy2.mdx @@ -0,0 +1,288 @@ +# Reachy 2 + +Reachy 2 is an open-source humanoid robot made by Pollen Robotics, specifically designed for the development of embodied AI and real-world applications. +Check out [Pollen Robotics website](https://www.pollen-robotics.com/reachy/), or access [Reachy 2 documentation](https://docs.pollen-robotics.com/) for more information on the platform! + +## Teleoperate Reachy 2 + +Currently, there are two ways to teleoperate Reachy 2: + +- Pollen Robotics’ VR teleoperation (not included in LeRobot). +- Robot-to-robot teleoperation (use one Reachy 2 to control another). + +## Reachy 2 Simulation + +**(Linux only)** You can run Reachy 2 in simulation (Gazebo or MuJoCo) using the provided [Docker image](https://hub.docker.com/r/pollenrobotics/reachy2_core). + +1. Install [Docker Engine](https://docs.docker.com/engine/). +2. Run (for MuJoCo): + +``` +docker run --rm -it \ + --name reachy \ + --privileged \ + --network host \ + --ipc host \ + --device-cgroup-rule='c 189:* rwm' \ + --group-add audio \ + -e ROS_DOMAIN_ID="$ROS_DOMAIN_ID" \ + -e DISPLAY="$DISPLAY" \ + -e RCUTILS_CONSOLE_OUTPUT_FORMAT="[{severity}]: {message}" \ + -e REACHY2_CORE_SERVICE_FAKE="${REACHY2_CORE_SERVICE_FAKE:-true}" \ + -v /dev:/dev \ + -v "$HOME/.reachy_config":/home/reachy/.reachy_config_override \ + -v "$HOME/.reachy.log":/home/reachy/.ros/log \ + -v /usr/lib/x86_64-linux-gnu:/opt/host-libs \ + --entrypoint /package/launch.sh \ + pollenrobotics/reachy2_core:1.7.5.9_deploy \ + start_rviz:=true start_sdk_server:=true mujoco:=true +``` + +> If MuJoCo runs slowly (low simulation frequency), append `-e LD_LIBRARY_PATH="/opt/host-libs:$LD_LIBRARY_PATH" \` to the previous command to improve performance: +> +> ``` +> docker run --rm -it \ +> --name reachy \ +> --privileged \ +> --network host \ +> --ipc host \ +> --device-cgroup-rule='c 189:* rwm' \ +> --group-add audio \ +> -e ROS_DOMAIN_ID="$ROS_DOMAIN_ID" \ +> -e DISPLAY="$DISPLAY" \ +> -e RCUTILS_CONSOLE_OUTPUT_FORMAT="[{severity}]: {message}" \ +> -e REACHY2_CORE_SERVICE_FAKE="${REACHY2_CORE_SERVICE_FAKE:-true}" \ +> -e LD_LIBRARY_PATH="/opt/host-libs:$LD_LIBRARY_PATH" \ +> -v /dev:/dev \ +> -v "$HOME/.reachy_config":/home/reachy/.reachy_config_override \ +> -v "$HOME/.reachy.log":/home/reachy/.ros/log \ +> -v /usr/lib/x86_64-linux-gnu:/opt/host-libs \ +> --entrypoint /package/launch.sh \ +> pollenrobotics/reachy2_core:1.7.5.9_deploy \ +> start_rviz:=true start_sdk_server:=true mujoco:=true +> ``` + +## Setup + +### Prerequisites + +- On your robot, check the **service images** meet the minimum versions: + - **reachy2-core >= 1.7.5.2** + - **webrtc >= 2.0.1.1** + +Then, if you want to use VR teleoperation: + +- Install the [Reachy 2 teleoperation application](https://docs.pollen-robotics.com/teleoperation/teleoperation-introduction/discover-teleoperation/). + Use version **>=v1.2.0** + +We recommend using two computers: one for teleoperation (Windows required) and another for recording with LeRobot. + +### Install LeRobot + +Follow the [installation instructions](https://github.com/huggingface/lerobot#installation) to install LeRobot. + +Install LeRobot with Reachy 2 dependencies: + +```bash +pip install -e ".[reachy2]" +``` + +### (Optional but recommended) Install pollen_data_acquisition_server + +How you manage Reachy 2 recording sessions is up to you, but the **easiest** way is to use this server so you can control sessions directly from the VR teleoperation app. + +> **Note:** Currently, only the VR teleoperation application works as a client for this server, so this step primarily targets teleoperation. You’re free to develop custom clients to manage sessions to your needs. + +In your LeRobot environment, install the server from source: + +```bash +git clone https://github.com/pollen-robotics/pollen_data_acquisition_server.git +cd pollen_data_acquisition_server +pip install -e . +``` + +Find the [pollen_data_acquisition_server documentation here](https://github.com/pollen-robotics/pollen_data_acquisition_server). + +## Step 1: Recording + +### Get Reachy 2 IP address + +Before starting teleoperation and data recording, find the [robot's IP address](https://docs.pollen-robotics.com/getting-started/setup-reachy2/connect-reachy2/). +We strongly recommend connecting all devices (PC and robot) via **Ethernet**. + +### Launch recording + +There are two ways to manage recording sessions when using the Reachy 2 VR teleoperation application: + +- **Using the data acquisition server (recommended for VR teleop)**: The VR app orchestrates sessions (via the server it tells LeRobot when to create datasets, start/stop episodes) while also controlling the robot’s motions. +- **Using LeRobot’s record script**: LeRobot owns session control and decides when to start/stop episodes. If you also use the VR teleop app, it’s only for motion control. + +### Option 1: Using Pollen data acquisition server (recommended for VR teleop) + +Make sure you have installed pollen_data_acquisition_server, as explained in the Setup section. + +Launch the data acquisition server to be able to manage your session directly from the teleoperation application: + +```bash +python -m pollen_data_acquisition_server.server +``` + +Then get into the teleoperation application and choose "Data acquisition session". +You can finally setup your session by following the screens displayed. + +> Even without the VR app, you can use the `pollen_data_acquisition_server` with your own client implementation. + +### Option 2: Using lerobot.record + +Reachy 2 is fully supported by LeRobot’s recording features. +If you choose this option but still want to use the VR teleoperation application, select "Standard session" in the app. + +**Example: start a recording without the mobile base:** +First add reachy2 and reachy2_teleoperator to the imports of the record script. Then you can use the following command: + +```bash +python -m lerobot.record \ + --robot.type=reachy2 \ + --robot.ip_address=192.168.0.200 \ + --robot.id=r2-0000 \ + --robot.use_external_commands=true \ + --robot.with_mobile_base=false \ + --teleop.type=reachy2_teleoperator \ + --teleop.ip_address=192.168.0.200 \ + --teleop.with_mobile_base=false \ + --dataset.repo_id=pollen_robotics/record_test \ + --dataset.single_task="Reachy 2 recording test" \ + --dataset.num_episodes=1 \ + --dataset.episode_time_s=5 \ + --dataset.fps=15 \ + --dataset.push_to_hub=true \ + --dataset.private=true \ + --display_data=true +``` + +#### Specific Options + +**Extended setup overview (all options included):** + +```bash +python -m lerobot.record \ + --robot.type=reachy2 \ + --robot.ip_address=192.168.0.200 \ + --robot.use_external_commands=true \ + --robot.with_mobile_base=true \ + --robot.with_l_arm=true \ + --robot.with_r_arm=true \ + --robot.with_neck=true \ + --robot.with_antennas=true \ + --robot.with_left_teleop_camera=true \ + --robot.with_right_teleop_camera=true \ + --robot.with_torso_camera=false \ + --robot.disable_torque_on_disconnect=false \ + --robot.max_relative_target=5.0 \ + --teleop.type=reachy2_teleoperator \ + --teleop.ip_address=192.168.0.200 \ + --teleop.use_present_position=false \ + --teleop.with_mobile_base=false \ + --teleop.with_l_arm=true \ + --teleop.with_r_arm=true \ + --teleop.with_neck=true \ + --teleop.with_antennas=true \ + --dataset.repo_id=pollen_robotics/record_test \ + --dataset.single_task="Reachy 2 recording test" \ + --dataset.num_episodes=1 \ + --dataset.episode_time_s=5 \ + --dataset.fps=15 \ + --dataset.push_to_hub=true \ + --dataset.private=true \ + --display_data=true +``` + +##### `--robot.use_external_commands` + +Determine whether LeRobot robot.send_action() sends commands to the robot. +**Must** be set to false while using the VR teleoperation application, as the app already sends commands. + +##### `--teleop.use_present_position` + +Determine whether the teleoperator reads the goal or present position of the robot. +Must be set to true if a compliant Reachy 2 is used to control another one. + +##### Use the relevant parts + +From our initial tests, recording **all** joints when only some are moving can reduce model quality with certain policies. +To avoid this, you can exclude specific parts from recording and replay using: + +```` +--robot.with_=false +```, +with `` being one of : `mobile_base`, `l_arm`, `r_arm", `neck`, `antennas`. +It determine whether the corresponding part is recorded in the observations. True if not set. + +By default, **all parts are recorded**. + +The same per-part mechanism is available in `reachy2_teleoperator` as well. + +```` + +--teleop.with\_ + +``` +with `` being one of : `mobile_base`, `l_arm`, `r_arm", `neck`, `antennas`. +Determine whether the corresponding part is recorded in the actions. True if not set. + +> **Important:** In a given session, the **enabled parts must match** on both the robot and the teleoperator. +For example, if the robot runs with `--robot.with_mobile_base=false`, the teleoperator must disable the same part `--teleoperator.with_mobile_base=false`. + +##### Use the relevant cameras + +You can do the same for **cameras**. By default, only the **teleoperation cameras** are recorded (both `left_teleop_camera` and `right_teleop_camera`). Enable or disable each camera with: + +``` + +--robot.with_left_teleop_camera= +--robot.with_right_teleop_camera= +--robot.with_torso_camera= + +```` + + +## Step 2: Replay + +Make sure the robot is configured with the same parts as the dataset: + +```bash +python -m lerobot.replay \ + --robot.type=reachy2 \ + --robot.ip_address=192.168.0.200 \ + --robot.use_external_commands=false \ + --robot.with_mobile_base=false \ + --dataset.repo_id=pollen_robotics/record_test \ + --dataset.episode=0 + --display_data=true +```` + +## Step 3: Train + +```bash +python -m lerobot.scripts.train \ + --dataset.repo_id=pollen_robotics/record_test \ + --policy.type=act \ + --output_dir=outputs/train/reachy2_test \ + --job_name=reachy2 \ + --policy.device=mps \ + --wandb.enable=true \ + --policy.repo_id=pollen_robotics/record_test_policy +``` + +## Step 4: Evaluate + +```bash +python -m lerobot.record \ + --robot.type=reachy2 \ + --robot.ip_address=192.168.0.200 \ + --display_data=false \ + --dataset.repo_id=pollen_robotics/eval_record_test \ + --dataset.single_task="Evaluate reachy2 policy" \ + --dataset.num_episodes=10 \ + --policy.path=outputs/train/reachy2_test/checkpoints/last/pretrained_model +``` diff --git a/docs/source/rtc.mdx b/docs/source/rtc.mdx new file mode 100644 index 0000000000000000000000000000000000000000..729519768032534d3599719a7191709bf63cb08e --- /dev/null +++ b/docs/source/rtc.mdx @@ -0,0 +1,188 @@ +# Real-Time Chunking (RTC) + +Real-Time Chunking (RTC) is an inference-time method that allows large, flow-matching based robotic policies, such as [Pi0](./pi0), [Pi0.5](./pi05), and [SmolVLA](./smolvla), to produce smooth, continuous, and reactive motion despite having high inference latency. + +These policies generate chunks of future actions (e.g., 50 steps at a time) instead of single actions. +Because the models are large, producing each chunk takes longer than the time it takes the robot to execute it. +Naively executing chunks leads to problems such as pauses, jerky transitions, or sudden changes in strategy whenever the next chunk arrives late or disagrees with the previously executed actions. + +RTC solves this by asynchronously generating the next chunk while the robot continues executing the current one, and by guiding the new chunk so it aligns smoothly with the portion of the previous chunk that has already been executed. + +## How RTC Works (simplified) + +RTC lets the robot think ahead while it’s still moving. When the robot is carrying out one chunk of actions, RTC starts creating the next chunk early. +But since the robot has already moved a bit by the time the new chunk is ready, RTC has to make sure the new chunk still lines up smoothly with what the robot is currently doing. + +To do this, RTC treats the beginning of the new chunk like an inpainting or “fill-in-the-gaps” problem: +it gently adjusts the first part of the new chunk so it blends naturally with the robot’s ongoing motion. The result is no pauses, no sudden jumps. + +In technical terms, RTC adds a guidance term to the flow-matching denoising process that forces the overlapping timesteps of the new chunk to stay close to the executed portion of the previous chunk, typically using a soft transition mask. + +## Quick Start + +### Installation + +RTC is built into LeRobot. Just install the policy dependencies you need: + +```bash +# For Pi0 or Pi0.5 +pip install -e ".[pi]" + +# For SmolVLA +pip install -e ".[smolvla]" +``` + +### Using RTC with Pi0 + +You can find a complete reference implementation in [eval_with_real_robot.py](examples/rtc/eval_with_real_robot.py). +The snippet below provides a simplified pseudo-example of how RTC operates with Pi0 in your pipeline: + +```python +from lerobot.policies.pi0 import PI0Policy, PI0Config +from lerobot.configs.types import RTCAttentionSchedule +from lerobot.policies.rtc.configuration_rtc import RTCConfig +from lerobot.policies.rtc.action_queue import ActionQueue + +# Load Pi0 with RTC enabled +policy_cfg = PI0Config() + +# Enable RTC +policy_cfg.rtc_config = RTCConfig( + enabled=True, + execution_horizon=10, # How many steps to blend with previous chunk + max_guidance_weight=10.0, # How strongly to enforce consistency + prefix_attention_schedule=RTCAttentionSchedule.EXP, # Exponential blend +) + +# Load the policy +policy = PI0Policy.from_pretrained("lerobot/pi0_base", policy_cfg=policy_cfg, device="cuda") + +# Now use predict_action_chunk with RTC parameters +inference_delay = 4 # How many steps of inference latency, this values should be calculated based on the inference latency of the policy + +# Initialize the action queue +action_queue = ActionQueue(policy_cfg.rtc_config) + +# Start in a separate thread with the following function +def get_actions(): + while True: + if should_get_actions: + + prev_actions = action_queue.get_left_over() + obs = get_robot_observations(robot) + + # Generate actions WITH RTC + actions = policy.predict_action_chunk( + obs, + inference_delay=inference_delay, + prev_chunk_left_over=prev_actions, + ) + + action_queue.merge( + actions, actions, inference_delay + ) + +for step in range(num_steps): + action = action_queue.get() + + # Execute the first N actions + execute_actions(action) +``` + +## Key Parameters + +`RTCConfig` has the following parameters to tune: + +**`execution_horizon`**: How many timesteps from the previous chunk to maintain consistency with. Higher values mean smoother transitions but potentially less reactivity. + +Typical values: 8-12 steps + +```python +RTCConfig(execution_horizon=10) +``` + +**`max_guidance_weight`**: How strongly to enforce consistency with the previous chunk. This is a hyperparameter that can be tuned to balance the smoothness of the transitions and the reactivity of the policy. For 10 steps flow matching (SmolVLA, Pi0, Pi0.5), a value of 10.0 is a optimal value. + +**`prefix_attention_schedule`**: How to weight consistency across the overlap region. + +- `LINEAR`: Linear decay from inference_delay to execution_horizon +- `EXP`: Exponential decay (recommended for getting started) +- `ONES`: Full weight across entire execution_horizon +- `ZEROS`: Binary (full weight up to inference_delay, then zero) + +**`inference_delay`**: How many timesteps of inference latency your system has. This is passed to `predict_action_chunk()` rather than the config, since it may vary at runtime. + +## Testing RTC Offline + +Before running on a real robot, test RTC with dataset samples to visualize how it works: + +```bash +python examples/rtc/eval_dataset.py \ + --policy.path=lerobot/pi0_libero_finetuned \ + --dataset.repo_id=HuggingFaceVLA/libero \ + --rtc.execution_horizon=10 \ + --rtc.max_guidance_weight=10.0 \ + --device=cuda +``` + +The script generates a visualization of the denoising process, comparing standard generation (left) with RTC (right). In the RTC plots, you can see how the first few steps (blue/purple lines) are guided to match the red ground truth trajectory (previous chunk's tail), ensuring a smooth transition between chunks. + +

+ Denoising steps with and without RTC +

+ +## Testing RTC with a Real Robot + +```bash +python examples/rtc/eval_with_real_robot.py \ + --policy.path=${HF_USERNAME}/policy_repo_id \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58FA0834591 \ + --robot.cameras="{ gripper: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --task="Move green small object into the purple platform" \ + --duration=120 \ + --device=cuda +``` + +## How It Differs from the Async Inference in LeRobot + +Both RTC and [async inference](./async) improve real-time robot control, but they solve different problems. + +| Aspect | Async Inference | RTC | +| ------------- | -------------------------------------------------------------------------- | --------------------------------------------------- | +| **Problem** | Idle frames while waiting for inference | Discontinuities between action chunks | +| **Solution** | Decouple prediction from execution | Guide new chunks to continue smoothly from previous | +| **Benefit** | No waiting, continuous action | Smooth transitions, natural motion | +| **Best Used** | Async inference is best used with large models with high inference latency | Flow-matching based policies | + +**Use both together** for maximum smoothness and reactivity! + +## Advanced: Debug Tracking + +RTC includes built-in debug tracking to help you understand what's happening during inference: + +```python +# Enable debug tracking +policy_cfg.rtc_config.debug = True +policy_cfg.rtc_config.debug_maxlen = 100 + +# After inference, access debug data +debug_data = policy.rtc_processor.get_debug_data() + +# Visualize denoising steps, corrections, etc. +from lerobot.policies.rtc.debug_visualizer import RTCDebugVisualizer +visualizer = RTCDebugVisualizer() +# ... create plots +``` + +See `examples/rtc/eval_dataset.py` for a complete example of visualization. + +## References + +- [Smooth-As-Butter Robot Policies](https://alexander-soare.github.io/robotics/2025/08/05/smooth-as-butter-robot-policies.html) - Excellent technical explanation with real robot results +- [Physical Intelligence - Real-Time Chunking](https://www.physicalintelligence.company/research/real_time_chunking) - Original paper and research +- [Kinetix RTC Implementation](https://github.com/Physical-Intelligence/real-time-chunking-kinetix) - Reference implementation from Physical Intelligence diff --git a/docs/source/smolvla.mdx b/docs/source/smolvla.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a9a498d6476b6f46157465ab3c4f5ab1c9ab1c5d --- /dev/null +++ b/docs/source/smolvla.mdx @@ -0,0 +1,116 @@ +# SmolVLA + +SmolVLA is Hugging Face’s lightweight foundation model for robotics. Designed for easy fine-tuning on LeRobot datasets, it helps accelerate your development! + +

+ SmolVLA architecture. +
+ + Figure 1. SmolVLA takes as input (i) multiple cameras views, (ii) the + robot’s current sensorimotor state, and (iii) a natural language + instruction, encoded into contextual features used to condition the action + expert when generating an action chunk. + +

+ +## Set Up Your Environment + +1. Install LeRobot by following our [Installation Guide](./installation). +2. Install SmolVLA dependencies by running: + + ```bash + pip install -e ".[smolvla]" + ``` + +## Collect a dataset + +SmolVLA is a base model, so fine-tuning on your own data is required for optimal performance in your setup. +We recommend recording ~50 episodes of your task as a starting point. Follow our guide to get started: [Recording a Dataset](./il_robots) + + + +In your dataset, make sure to have enough demonstrations per each variation (e.g. the cube position on the table if it is cube pick-place task) you are introducing. + +We recommend checking out the dataset linked below for reference that was used in the [SmolVLA paper](https://huggingface.co/papers/2506.01844): + +🔗 [SVLA SO100 PickPlace](https://huggingface.co/spaces/lerobot/visualize_dataset?path=%2Flerobot%2Fsvla_so100_pickplace%2Fepisode_0) + +In this dataset, we recorded 50 episodes across 5 distinct cube positions. For each position, we collected 10 episodes of pick-and-place interactions. This structure, repeating each variation several times, helped the model generalize better. We tried similar dataset with 25 episodes, and it was not enough leading to a bad performance. So, the data quality and quantity is definitely a key. +After you have your dataset available on the Hub, you are good to go to use our finetuning script to adapt SmolVLA to your application. + + + +## Finetune SmolVLA on your data + +Use [`smolvla_base`](https://hf.co/lerobot/smolvla_base), our pretrained 450M model, and fine-tune it on your data. +Training the model for 20k steps will roughly take ~4 hrs on a single A100 GPU. You should tune the number of steps based on performance and your use-case. + +If you don't have a gpu device, you can train using our notebook on [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) + +Pass your dataset to the training script using `--dataset.repo_id`. If you want to test your installation, run the following command where we use one of the datasets we collected for the [SmolVLA Paper](https://huggingface.co/papers/2506.01844). + +```bash +cd lerobot && lerobot-train \ + --policy.path=lerobot/smolvla_base \ + --dataset.repo_id=${HF_USER}/mydataset \ + --batch_size=64 \ + --steps=20000 \ + --output_dir=outputs/train/my_smolvla \ + --job_name=my_smolvla_training \ + --policy.device=cuda \ + --wandb.enable=true +``` + + + You can start with a small batch size and increase it incrementally, if the + GPU allows it, as long as loading times remain short. + + +Fine-tuning is an art. For a complete overview of the options for finetuning, run + +```bash +lerobot-train --help +``` + +

+ Comparison of SmolVLA across task variations. +
+ + Figure 2: Comparison of SmolVLA across task variations. From left to right: + (1) pick-place cube counting, (2) pick-place cube counting, (3) pick-place + cube counting under perturbations, and (4) generalization on pick-and-place + of the lego block with real-world SO101. + +

+ +## Evaluate the finetuned model and run it in real-time + +Similarly for when recording an episode, it is recommended that you are logged in to the HuggingFace Hub. You can follow the corresponding steps: [Record a dataset](./il_robots). +Once you are logged in, you can run inference in your setup by doing: + +```bash +lerobot-record \ + --robot.type=so101_follower \ + --robot.port=/dev/ttyACM0 \ # <- Use your port + --robot.id=my_blue_follower_arm \ # <- Use your robot id + --robot.cameras="{ front: {type: opencv, index_or_path: 8, width: 640, height: 480, fps: 30}}" \ # <- Use your cameras + --dataset.single_task="Grasp a lego block and put it in the bin." \ # <- Use the same task description you used in your dataset recording + --dataset.repo_id=${HF_USER}/eval_DATASET_NAME_test \ # <- This will be the dataset name on HF Hub + --dataset.episode_time_s=50 \ + --dataset.num_episodes=10 \ + # <- Teleop optional if you want to teleoperate in between episodes \ + # --teleop.type=so100_leader \ + # --teleop.port=/dev/ttyACM0 \ + # --teleop.id=my_red_leader_arm \ + --policy.path=HF_USER/FINETUNE_MODEL_NAME # <- Use your fine-tuned model +``` + +Depending on your evaluation setup, you can configure the duration and the number of episodes to record for your evaluation suite. diff --git a/docs/source/so100.mdx b/docs/source/so100.mdx new file mode 100644 index 0000000000000000000000000000000000000000..6f818136747a129d9faf7be99756866a12792927 --- /dev/null +++ b/docs/source/so100.mdx @@ -0,0 +1,640 @@ +# SO-100 + +In the steps below, we explain how to assemble the SO-100 robot. + +## Source the parts + +Follow this [README](https://github.com/TheRobotStudio/SO-ARM100/blob/main/SO100.md). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts. And advise if it's your first time printing or if you don't own a 3D printer. + +## Install LeRobot 🤗 + +To install LeRobot, follow our [Installation Guide](./installation) + +In addition to these instructions, you need to install the Feetech SDK: + +```bash +pip install -e ".[feetech]" +``` + +## Configure the motors + +**Note:** +Unlike the SO-101, the motor connectors are not easily accessible once the arm is assembled, so the configuration step must be done beforehand. + +### 1. Find the USB ports associated with each arm + +To find the port for each bus servo adapter, run this script: + +```bash +lerobot-find-port +``` + + + + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the USB cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/tty.usbmodem575E0032081 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your leader or follower arm. + + + + +On Linux, you might need to give access to the USB ports by running: + +```bash +sudo chmod 666 /dev/ttyACM0 +sudo chmod 666 /dev/ttyACM1 +``` + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/ttyACM0', '/dev/ttyACM1'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/ttyACM1 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/ttyACM1` corresponding to your leader or follower arm. + + + + +### 2. Set the motors ids and baudrates + +Each motor is identified by a unique id on the bus. When brand new, motors usually come with a default id of `1`. For the communication to work properly between the motors and the controller, we first need to set a unique, different id to each motor. Additionally, the speed at which data is transmitted on the bus is determined by the baudrate. In order to talk to each other, the controller and all the motors need to be configured with the same baudrate. + +To that end, we first need to connect to each motor individually with the controller in order to set these. Since we will write these parameters in the non-volatile section of the motors' internal memory (EEPROM), we'll only need to do this once. + +If you are repurposing motors from another robot, you will probably also need to perform this step as the ids and baudrate likely won't match. + +#### Follower + +Connect the usb cable from your computer and the power supply to the follower arm's controller board. Then, run the following command or run the API example with the port you got from the previous step. You'll also need to give your leader arm a name with the `id` parameter. + +For a visual reference on how to set the motor ids please refer to [this video](https://huggingface.co/docs/lerobot/en/so101#setup-motors-video) where we follow the process for the SO101 arm. + + + + +```bash +lerobot-setup-motors \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step +``` + + + + + +```python +from lerobot.robots.so100_follower import SO100Follower, SO100FollowerConfig + +config = SO100FollowerConfig( + port="/dev/tty.usbmodem585A0076841", + id="my_awesome_follower_arm", +) +follower = SO100Follower(config) +follower.setup_motors() +``` + + + + + +You should see the following instruction + +``` +Connect the controller board to the 'gripper' motor only and press enter. +``` + +As instructed, plug the gripper's motor. Make sure it's the only motor connected to the board, and that the motor itself is not yet daisy-chained to any other motor. As you press `[Enter]`, the script will automatically set the id and baudrate for that motor. + +
+Troubleshooting + +If you get an error at that point, check your cables and make sure they are plugged in properly: + +
    +
  • Power supply
  • +
  • USB cable between your computer and the controller board
  • +
  • The 3-pin cable from the controller board to the motor
  • +
+ +If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB). + +
+ +You should then see the following message: + +``` +'gripper' motor id set to 6 +``` + +Followed by the next instruction: + +``` +Connect the controller board to the 'wrist_roll' motor only and press enter. +``` + +You can disconnect the 3-pin cable from the controller board, but you can leave it connected to the gripper motor on the other end, as it will already be in the right place. Now, plug in another 3-pin cable to the wrist roll motor and connect it to the controller board. As with the previous motor, make sure it is the only motor connected to the board and that the motor itself isn't connected to any other one. + +Repeat the operation for each motor as instructed. + +> [!TIP] +> Check your cabling at each step before pressing Enter. For instance, the power supply cable might disconnect as you manipulate the board. + +When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm. + +#### Leader + +Do the same steps for the leader arm. + + + +```bash +lerobot-setup-motors \ + --teleop.type=so100_leader \ + --teleop.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step +``` + + + + +```python +from lerobot.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig + +config = SO100LeaderConfig( + port="/dev/tty.usbmodem585A0076841", + id="my_awesome_leader_arm", +) +leader = SO100Leader(config) +leader.setup_motors() +``` + + + + + +## Step-by-Step Assembly Instructions + +## Remove the gears of the 6 leader motors + +
+Video removing gears + +
+ +
+ +
+ +Follow the video for removing gears. You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm. + +### Clean Parts + +Remove all support material from the 3D-printed parts. The easiest way to do this is using a small screwdriver to get underneath the support material. + +### Additional Guidance + +
+Video assembling arms + +
+ +
+ +
+ +**Note:** +This video provides visual guidance for assembling the arms, but it doesn't specify when or how to do the wiring. Inserting the cables beforehand is much easier than doing it afterward. The first arm may take a bit more than 1 hour to assemble, but once you get used to it, you can assemble the second arm in under 1 hour. + +--- + +### First Motor + +**Step 2: Insert Wires** + +- Insert two wires into the first motor. + + + +**Step 3: Install in Base** + +- Place the first motor into the base. + + + +**Step 4: Secure Motor** + +- Fasten the motor with 4 screws. Two from the bottom and two from top. + +**Step 5: Attach Motor Holder** + +- Slide over the first motor holder and fasten it using two screws (one on each side). + + + +**Step 6: Attach Motor Horns** + +- Install both motor horns, securing the top horn with a screw. Try not to move the motor position when attaching the motor horn, especially for the leader arms, where we removed the gears. + + + +
+ + Video adding motor horn + + +
+ +**Step 7: Attach Shoulder Part** + +- Route one wire to the back of the robot and the other to the left or towards you (see photo). +- Attach the shoulder part. + + + +**Step 8: Secure Shoulder** + +- Tighten the shoulder part with 4 screws on top and 4 on the bottom + _(access bottom holes by turning the shoulder)._ + +--- + +### Second Motor Assembly + +**Step 9: Install Motor 2** + +- Slide the second motor in from the top and link the wire from motor 1 to motor 2. + + + +**Step 10: Attach Shoulder Holder** + +- Add the shoulder motor holder. +- Ensure the wire from motor 1 to motor 2 goes behind the holder while the other wire is routed upward (see photo). +- This part can be tight to assemble, you can use a workbench like the image or a similar setup to push the part around the motor. + +
+ + + +
+ +**Step 11: Secure Motor 2** + +- Fasten the second motor with 4 screws. + +**Step 12: Attach Motor Horn** + +- Attach both motor horns to motor 2, again use the horn screw. + +**Step 13: Attach Base** + +- Install the base attachment using 2 screws. + + + +**Step 14: Attach Upper Arm** + +- Attach the upper arm with 4 screws on each side. + + + +--- + +### Third Motor Assembly + +**Step 15: Install Motor 3** + +- Route the motor cable from motor 2 through the cable holder to motor 3, then secure motor 3 with 4 screws. + +**Step 16: Attach Motor Horn** + +- Attach both motor horns to motor 3 and secure one again with a horn screw. + + + +**Step 17: Attach Forearm** + +- Connect the forearm to motor 3 using 4 screws on each side. + + + +--- + +### Fourth Motor Assembly + +**Step 18: Install Motor 4** + +- Slide in motor 4, attach the cable from motor 3, and secure the cable in its holder with a screw. + +
+ + +
+ +**Step 19: Attach Motor Holder 4** + +- Install the fourth motor holder (a tight fit). Ensure one wire is routed upward and the wire from motor 3 is routed downward (see photo). + + + +**Step 20: Secure Motor 4 & Attach Horn** + +- Fasten motor 4 with 4 screws and attach its motor horns, use for one a horn screw. + + + +--- + +### Wrist Assembly + +**Step 21: Install Motor 5** + +- Insert motor 5 into the wrist holder and secure it with 2 front screws. + + + +**Step 22: Attach Wrist** + +- Connect the wire from motor 4 to motor 5. And already insert the other wire for the gripper. +- Secure the wrist to motor 4 using 4 screws on both sides. + + + +**Step 23: Attach Wrist Horn** + +- Install only one motor horn on the wrist motor and secure it with a horn screw. + + + +--- + +### Follower Configuration + +**Step 24: Attach Gripper** + +- Attach the gripper to motor 5. + + + +**Step 25: Install Gripper Motor** + +- Insert the gripper motor, connect the motor wire from motor 5 to motor 6, and secure it with 3 screws on each side. + + + +**Step 26: Attach Gripper Horn & Claw** + +- Attach the motor horns and again use a horn screw. +- Install the gripper claw and secure it with 4 screws on both sides. + + + +**Step 27: Mount Controller** + +- Attach the motor controller to the back of the robot. + +
+ + +
+ +_Assembly complete – proceed to Leader arm assembly._ + +--- + +### Leader Configuration + +For the leader configuration, perform **Steps 1–23**. Make sure that you removed the motor gears from the motors. + +**Step 24: Attach Leader Holder** + +- Mount the leader holder onto the wrist and secure it with a screw. + + + +**Step 25: Attach Handle** + +- Attach the handle to motor 5 using 4 screws. + + + +**Step 26: Install Gripper Motor** + +- Insert the gripper motor, secure it with 3 screws on each side, attach a motor horn using a horn screw, and connect the motor wire. + + + +**Step 27: Attach Trigger** + +- Attach the follower trigger with 4 screws. + + + +**Step 28: Mount Controller** + +- Attach the motor controller to the back of the robot. + +
+ + +
+ +## Calibrate + +Next, you'll need to calibrate your robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. +The calibration process is very important because it allows a neural network trained on one robot to work on another. + +#### Follower + +Run the following command or API example to calibrate the follower arm: + + + + +```bash +lerobot-calibrate \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot + --robot.id=my_awesome_follower_arm # <- Give the robot a unique name +``` + + + + + +```python +from lerobot.robots.so100_follower import SO100FollowerConfig, SO100Follower + +config = SO100FollowerConfig( + port="/dev/tty.usbmodem585A0076891", + id="my_awesome_follower_arm", +) + +follower = SO100Follower(config) +follower.connect(calibrate=False) +follower.calibrate() +follower.disconnect() +``` + + + + + +We unified the calibration method for most robots. Thus, the calibration steps for this SO100 arm are the same as the steps for the Koch and SO101. First, we have to move the robot to the position where each joint is in the middle of its range, then we press `Enter`. Secondly, we move all joints through their full range of motion. A video of this same process for the SO101 as reference can be found [here](https://huggingface.co/docs/lerobot/en/so101#calibration-video) + +#### Leader + +Do the same steps to calibrate the leader arm, run the following command or API example: + + + + +```bash +lerobot-calibrate \ + --teleop.type=so100_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot + --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name +``` + + + + + +```python +from lerobot.teleoperators.so100_leader import SO100LeaderConfig, SO100Leader + +config = SO100LeaderConfig( + port="/dev/tty.usbmodem58760431551", + id="my_awesome_leader_arm", +) + +leader = SO100Leader(config) +leader.connect(calibrate=False) +leader.calibrate() +leader.disconnect() +``` + + + + + +Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./il_robots) + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). diff --git a/docs/source/so101.mdx b/docs/source/so101.mdx new file mode 100644 index 0000000000000000000000000000000000000000..9f13945c138c085070100586a42eb08cac5a9a28 --- /dev/null +++ b/docs/source/so101.mdx @@ -0,0 +1,436 @@ +# SO-101 + +In the steps below, we explain how to assemble our flagship robot, the SO-101. + +## Source the parts + +Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts. +And advise if it's your first time printing or if you don't own a 3D printer. + +## Install LeRobot 🤗 + +To install LeRobot, follow our [Installation Guide](./installation) + +In addition to these instructions, you need to install the Feetech SDK: + +```bash +pip install -e ".[feetech]" +``` + +## Step-by-Step Assembly Instructions + +The follower arm uses 6x STS3215 motors with 1/345 gearing. The leader, however, uses three differently geared motors to make sure it can both sustain its own weight and it can be moved without requiring much force. Which motor is needed for which joint is shown in the table below. + +| Leader-Arm Axis | Motor | Gear Ratio | +| ------------------- | :---: | :--------: | +| Base / Shoulder Pan | 1 | 1 / 191 | +| Shoulder Lift | 2 | 1 / 345 | +| Elbow Flex | 3 | 1 / 191 | +| Wrist Flex | 4 | 1 / 147 | +| Wrist Roll | 5 | 1 / 147 | +| Gripper | 6 | 1 / 147 | + +### Clean Parts + +Remove all support material from the 3D-printed parts. The easiest way to do this is using a small screwdriver to get underneath the support material. + +It is advisable to install one 3-pin cable in the motor after placing them before continuing assembly. + +### Joint 1 + +- Place the first motor into the base. +- Fasten the motor with 4 M2x6mm screws (smallest screws). Two from the top and two from the bottom. +- Slide over the first motor holder and fasten it using two M2x6mm screws (one on each side). +- Install both motor horns, securing the top horn with a M3x6mm screw. +- Attach the shoulder part. +- Tighten the shoulder part with 4 M3x6mm screws on top and 4 M3x6mm screws on the bottom +- Add the shoulder motor holder. + +
+ +
+ +### Joint 2 + +- Slide the second motor in from the top. +- Fasten the second motor with 4 M2x6mm screws. +- Attach both motor horns to motor 2, again use the M3x6mm horn screw. +- Attach the upper arm with 4 M3x6mm screws on each side. + +
+ +
+ +### Joint 3 + +- Insert motor 3 and fasten using 4 M2x6mm screws +- Attach both motor horns to motor 3 and secure one again with a M3x6mm horn screw. +- Connect the forearm to motor 3 using 4 M3x6mm screws on each side. + +
+ +
+ +### Joint 4 + +- Slide over motor holder 4. +- Slide in motor 4. +- Fasten motor 4 with 4 M2x6mm screws and attach its motor horns, use a M3x6mm horn screw. + +
+ +
+ +### Joint 5 + +- Insert motor 5 into the wrist holder and secure it with 2 M2x6mm front screws. +- Install only one motor horn on the wrist motor and secure it with a M3x6mm horn screw. +- Secure the wrist to motor 4 using 4 M3x6mm screws on both sides. + +
+ +
+ +### Gripper / Handle + + + + +- Attach the gripper to motor 5, attach it to the motor horn on the wrist using 4 M3x6mm screws. +- Insert the gripper motor and secure it with 2 M2x6mm screws on each side. +- Attach the motor horns and again use a M3x6mm horn screw. +- Install the gripper claw and secure it with 4 M3x6mm screws on both sides. + +
+ +
+ +
+ + +- Mount the leader holder onto the wrist and secure it with 4 M3x6mm screws. +- Attach the handle to motor 5 using 1 M2x6mm screw. +- Insert the gripper motor, secure it with 2 M2x6mm screws on each side, attach a motor horn using a M3x6mm horn screw. +- Attach the follower trigger with 4 M3x6mm screws. + +
+ +
+ +
+
+ +## Configure the motors + +### 1. Find the USB ports associated with each arm + +To find the port for each bus servo adapter, connect MotorBus to your computer via USB and power. Run the following script and disconnect the MotorBus when prompted: + +```bash +lerobot-find-port +``` + + + + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the USB cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/tty.usbmodem575E0032081 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your leader or follower arm. + + + + +On Linux, you might need to give access to the USB ports by running: + +```bash +sudo chmod 666 /dev/ttyACM0 +sudo chmod 666 /dev/ttyACM1 +``` + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/ttyACM0', '/dev/ttyACM1'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/ttyACM1 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/ttyACM1` corresponding to your leader or follower arm. + + + + +### 2. Set the motors ids and baudrates + +Each motor is identified by a unique id on the bus. When brand new, motors usually come with a default id of `1`. For the communication to work properly between the motors and the controller, we first need to set a unique, different id to each motor. Additionally, the speed at which data is transmitted on the bus is determined by the baudrate. In order to talk to each other, the controller and all the motors need to be configured with the same baudrate. + +To that end, we first need to connect to each motor individually with the controller in order to set these. Since we will write these parameters in the non-volatile section of the motors' internal memory (EEPROM), we'll only need to do this once. + +If you are repurposing motors from another robot, you will probably also need to perform this step as the ids and baudrate likely won't match. + +The video below shows the sequence of steps for setting the motor ids. + +##### Setup motors video + +
+ +
+ +#### Follower + +Connect the usb cable from your computer and the power supply to the follower arm's controller board. Then, run the following command or run the API example with the port you got from the previous step. You'll also need to give your leader arm a name with the `id` parameter. + + + + +```bash +lerobot-setup-motors \ + --robot.type=so101_follower \ + --robot.port=/dev/tty.usbmodem585A0076841 # <- paste here the port found at previous step +``` + + + + + +```python +from lerobot.robots.so101_follower import SO101Follower, SO101FollowerConfig + +config = SO101FollowerConfig( + port="/dev/tty.usbmodem585A0076841", + id="my_awesome_follower_arm", +) +follower = SO101Follower(config) +follower.setup_motors() +``` + + + + + +You should see the following instruction + +```bash +Connect the controller board to the 'gripper' motor only and press enter. +``` + +As instructed, plug the gripper's motor. Make sure it's the only motor connected to the board, and that the motor itself is not yet daisy-chained to any other motor. As you press `[Enter]`, the script will automatically set the id and baudrate for that motor. + +
+Troubleshooting + +If you get an error at that point, check your cables and make sure they are plugged in properly: + +
    +
  • Power supply
  • +
  • USB cable between your computer and the controller board
  • +
  • The 3-pin cable from the controller board to the motor
  • +
+ +If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB). + +
+ +You should then see the following message: + +```bash +'gripper' motor id set to 6 +``` + +Followed by the next instruction: + +```bash +Connect the controller board to the 'wrist_roll' motor only and press enter. +``` + +You can disconnect the 3-pin cable from the controller board, but you can leave it connected to the gripper motor on the other end, as it will already be in the right place. Now, plug in another 3-pin cable to the wrist roll motor and connect it to the controller board. As with the previous motor, make sure it is the only motor connected to the board and that the motor itself isn't connected to any other one. + +Repeat the operation for each motor as instructed. + +> [!TIP] +> Check your cabling at each step before pressing Enter. For instance, the power supply cable might disconnect as you manipulate the board. + +When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm. + +#### Leader + +Do the same steps for the leader arm. + + + + +```bash +lerobot-setup-motors \ + --teleop.type=so101_leader \ + --teleop.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step +``` + + + + + +```python +from lerobot.teleoperators.so101_leader import SO101Leader, SO101LeaderConfig + +config = SO101LeaderConfig( + port="/dev/tty.usbmodem585A0076841", + id="my_awesome_leader_arm", +) +leader = SO101Leader(config) +leader.setup_motors() +``` + + + + + +## Calibrate + +Next, you'll need to calibrate your robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. +The calibration process is very important because it allows a neural network trained on one robot to work on another. + +#### Follower + +Run the following command or API example to calibrate the follower arm: + + + + +```bash +lerobot-calibrate \ + --robot.type=so101_follower \ + --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot + --robot.id=my_awesome_follower_arm # <- Give the robot a unique name +``` + + + + + +```python +from lerobot.robots.so101_follower import SO101FollowerConfig, SO101Follower + +config = SO101FollowerConfig( + port="/dev/tty.usbmodem585A0076891", + id="my_awesome_follower_arm", +) + +follower = SO101Follower(config) +follower.connect(calibrate=False) +follower.calibrate() +follower.disconnect() +``` + + + + + +The video below shows how to perform the calibration. First you need to move the robot to the position where all joints are in the middle of their ranges. Then after pressing enter you have to move each joint through its full range of motion. + +##### Calibration video + +
+ +
+ +#### Leader + +Do the same steps to calibrate the leader arm, run the following command or API example: + + + + +```bash +lerobot-calibrate \ + --teleop.type=so101_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot + --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name +``` + + + + + +```python +from lerobot.teleoperators.so101_leader import SO101LeaderConfig, SO101Leader + +config = SO101LeaderConfig( + port="/dev/tty.usbmodem58760431551", + id="my_awesome_leader_arm", +) + +leader = SO101Leader(config) +leader.connect(calibrate=False) +leader.calibrate() +leader.disconnect() +``` + + + + + +Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./il_robots) + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). diff --git a/docs/source/using_dataset_tools.mdx b/docs/source/using_dataset_tools.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e5de2198d6a81b285efba33b283db00d302afaad --- /dev/null +++ b/docs/source/using_dataset_tools.mdx @@ -0,0 +1,102 @@ +# Using Dataset Tools + +This guide covers the dataset tools utilities available in LeRobot for modifying and editing existing datasets. + +## Overview + +LeRobot provides several utilities for manipulating datasets: + +1. **Delete Episodes** - Remove specific episodes from a dataset +2. **Split Dataset** - Divide a dataset into multiple smaller datasets +3. **Merge Datasets** - Combine multiple datasets into one. The datasets must have identical features, and episodes are concatenated in the order specified in `repo_ids` +4. **Add Features** - Add new features to a dataset +5. **Remove Features** - Remove features from a dataset + +The core implementation is in `lerobot.datasets.dataset_tools`. +An example script detailing how to use the tools API is available in `examples/dataset/use_dataset_tools.py`. + +## Command-Line Tool: lerobot-edit-dataset + +`lerobot-edit-dataset` is a command-line script for editing datasets. It can be used to delete episodes, split datasets, merge datasets, add features, and remove features. + +Run `lerobot-edit-dataset --help` for more information on the configuration of each operation. + +### Usage Examples + +#### Delete Episodes + +Remove specific episodes from a dataset. This is useful for filtering out undesired data. + +```bash +# Delete episodes 0, 2, and 5 (modifies original dataset) +lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --operation.type delete_episodes \ + --operation.episode_indices "[0, 2, 5]" + +# Delete episodes and save to a new dataset (preserves original dataset) +lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --new_repo_id lerobot/pusht_after_deletion \ + --operation.type delete_episodes \ + --operation.episode_indices "[0, 2, 5]" +``` + +#### Split Dataset + +Divide a dataset into multiple subsets. + +```bash +# Split by fractions (e.g. 80% train, 20% test, 20% val) +lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --operation.type split \ + --operation.splits '{"train": 0.8, "test": 0.2, "val": 0.2}' + +# Split by specific episode indices +lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --operation.type split \ + --operation.splits '{"task1": [0, 1, 2, 3], "task2": [4, 5]}' +``` + +There are no constraints on the split names, they can be determined by the user. Resulting datasets are saved under the repo id with the split name appended, e.g. `lerobot/pusht_train`, `lerobot/pusht_task1`, `lerobot/pusht_task2`. + +#### Merge Datasets + +Combine multiple datasets into a single dataset. + +```bash +# Merge train and validation splits back into one dataset +lerobot-edit-dataset \ + --repo_id lerobot/pusht_merged \ + --operation.type merge \ + --operation.repo_ids "['lerobot/pusht_train', 'lerobot/pusht_val']" +``` + +#### Remove Features + +Remove features from a dataset. + +```bash +# Remove a camera feature +lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --operation.type remove_feature \ + --operation.feature_names "['observation.images.top']" +``` + +### Push to Hub + +Add the `--push_to_hub` flag to any command to automatically upload the resulting dataset to the Hugging Face Hub: + +```bash +lerobot-edit-dataset \ + --repo_id lerobot/pusht \ + --new_repo_id lerobot/pusht_after_deletion \ + --operation.type delete_episodes \ + --operation.episode_indices "[0, 2, 5]" \ + --push_to_hub +``` + +There is also a tool for adding features to a dataset that is not yet covered in `lerobot-edit-dataset`. diff --git a/examples/backward_compatibility/replay.py b/examples/backward_compatibility/replay.py new file mode 100644 index 0000000000000000000000000000000000000000..4d72410a0a3fe1c3659561843b5e8694a056e20a --- /dev/null +++ b/examples/backward_compatibility/replay.py @@ -0,0 +1,106 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Replays the actions of an episode from a dataset on a robot. + +Example: + +```shell +lerobot-replay \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58760431541 \ + --robot.id=black \ + --dataset.repo_id=aliberts/record-test \ + --dataset.episode=2 +``` +""" + +import logging +import time +from dataclasses import asdict, dataclass +from pathlib import Path +from pprint import pformat + +import draccus + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.robots import ( # noqa: F401 + Robot, + RobotConfig, + koch_follower, + make_robot_from_config, + so100_follower, + so101_follower, +) +from lerobot.utils.constants import ACTION +from lerobot.utils.robot_utils import busy_wait +from lerobot.utils.utils import ( + init_logging, + log_say, +) + + +@dataclass +class DatasetReplayConfig: + # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`). + repo_id: str + # Episode to replay. + episode: int + # Root directory where the dataset will be stored (e.g. 'dataset/path'). + root: str | Path | None = None + # Limit the frames per second. By default, uses the policy fps. + fps: int = 30 + + +@dataclass +class ReplayConfig: + robot: RobotConfig + dataset: DatasetReplayConfig + # Use vocal synthesis to read events. + play_sounds: bool = True + + +@draccus.wrap() +def replay(cfg: ReplayConfig): + init_logging() + logging.info(pformat(asdict(cfg))) + + robot = make_robot_from_config(cfg.robot) + dataset = LeRobotDataset(cfg.dataset.repo_id, root=cfg.dataset.root, episodes=[cfg.dataset.episode]) + actions = dataset.hf_dataset.select_columns(ACTION) + robot.connect() + + log_say("Replaying episode", cfg.play_sounds, blocking=True) + for idx in range(dataset.num_frames): + start_episode_t = time.perf_counter() + + action_array = actions[idx][ACTION] + action = {} + for i, name in enumerate(dataset.features[ACTION]["names"]): + key = f"{name.removeprefix('main_')}.pos" + action[key] = action_array[i].item() + + action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90) + action["elbow_flex.pos"] -= 90 + robot.send_action(action) + + dt_s = time.perf_counter() - start_episode_t + busy_wait(1 / dataset.fps - dt_s) + + robot.disconnect() + + +if __name__ == "__main__": + replay() diff --git a/examples/dataset/load_lerobot_dataset.py b/examples/dataset/load_lerobot_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9971e4ae9749694853c9316a90415b944b818bfd --- /dev/null +++ b/examples/dataset/load_lerobot_dataset.py @@ -0,0 +1,146 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script demonstrates the use of `LeRobotDataset` class for handling and processing robotic datasets from Hugging Face. +It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch. + +Features included in this script: +- Viewing a dataset's metadata and exploring its properties. +- Loading an existing dataset from the hub or a subset of it. +- Accessing frames by episode number. +- Using advanced dataset features like timestamp-based frame selection. +- Demonstrating compatibility with PyTorch DataLoader for batch processing. + +The script ends with examples of how to batch process data using PyTorch's DataLoader. +""" + +from pprint import pprint + +import torch +from huggingface_hub import HfApi + +import lerobot +from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata + +# We ported a number of existing datasets ourselves, use this to see the list: +print("List of available datasets:") +pprint(lerobot.available_datasets) + +# You can also browse through the datasets created/ported by the community on the hub using the hub api: +hub_api = HfApi() +repo_ids = [info.id for info in hub_api.list_datasets(task_categories="robotics", tags=["LeRobot"])] +pprint(repo_ids) + +# Or simply explore them in your web browser directly at: +# https://huggingface.co/datasets?other=LeRobot + +# Let's take this one for this example +repo_id = "lerobot/aloha_mobile_cabinet" +# We can have a look and fetch its metadata to know more about it: +ds_meta = LeRobotDatasetMetadata(repo_id) + +# By instantiating just this class, you can quickly access useful information about the content and the +# structure of the dataset without downloading the actual data yet (only metadata files — which are +# lightweight). +print(f"Total number of episodes: {ds_meta.total_episodes}") +print(f"Average number of frames per episode: {ds_meta.total_frames / ds_meta.total_episodes:.3f}") +print(f"Frames per second used during data collection: {ds_meta.fps}") +print(f"Robot type: {ds_meta.robot_type}") +print(f"keys to access images from cameras: {ds_meta.camera_keys=}\n") + +print("Tasks:") +print(ds_meta.tasks) +print("Features:") +pprint(ds_meta.features) + +# You can also get a short summary by simply printing the object: +print(ds_meta) + +# You can then load the actual dataset from the hub. +# Either load any subset of episodes: +dataset = LeRobotDataset(repo_id, episodes=[0, 10, 11, 23]) + +# And see how many frames you have: +print(f"Selected episodes: {dataset.episodes}") +print(f"Number of episodes selected: {dataset.num_episodes}") +print(f"Number of frames selected: {dataset.num_frames}") + +# Or simply load the entire dataset: +dataset = LeRobotDataset(repo_id) +print(f"Number of episodes selected: {dataset.num_episodes}") +print(f"Number of frames selected: {dataset.num_frames}") + +# The previous metadata class is contained in the 'meta' attribute of the dataset: +print(dataset.meta) + +# LeRobotDataset actually wraps an underlying Hugging Face dataset +# (see https://huggingface.co/docs/datasets for more information). +print(dataset.hf_dataset) + +# LeRobot datasets also subclasses PyTorch datasets so you can do everything you know and love from working +# with the latter, like iterating through the dataset. +# The __getitem__ iterates over the frames of the dataset. Since our datasets are also structured by +# episodes, you can access the frame indices of any episode using dataset.meta.episodes. Here, we access +# frame indices associated to the first episode: +episode_index = 0 +from_idx = dataset.meta.episodes["dataset_from_index"][episode_index] +to_idx = dataset.meta.episodes["dataset_to_index"][episode_index] + +# Then we grab all the image frames from the first camera: +camera_key = dataset.meta.camera_keys[0] +frames = [dataset[idx][camera_key] for idx in range(from_idx, to_idx)] + +# The objects returned by the dataset are all torch.Tensors +print(type(frames[0])) +print(frames[0].shape) + +# Since we're using pytorch, the shape is in pytorch, channel-first convention (c, h, w). +# We can compare this shape with the information available for that feature +pprint(dataset.features[camera_key]) +# In particular: +print(dataset.features[camera_key]["shape"]) +# The shape is in (h, w, c) which is a more universal format. + +# For many machine learning applications we need to load the history of past observations or trajectories of +# future actions. Our datasets can load previous and future frames for each key/modality, using timestamps +# differences with the current loaded frame. For instance: +delta_timestamps = { + # loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame + camera_key: [-1, -0.5, -0.20, 0], + # loads 6 state vectors: 1.5 seconds before, 1 second before, ... 200 ms, 100 ms, and current frame + "observation.state": [-1.5, -1, -0.5, -0.20, -0.10, 0], + # loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future + "action": [t / dataset.fps for t in range(64)], +} +# Note that in any case, these delta_timestamps values need to be multiples of (1/fps) so that added to any +# timestamp, you still get a valid timestamp. + +dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps) +print(f"\n{dataset[0][camera_key].shape=}") # (4, c, h, w) +print(f"{dataset[0]['observation.state'].shape=}") # (6, c) +print(f"{dataset[0]['action'].shape=}\n") # (64, c) + +if __name__ == "__main__": + dataloader = torch.utils.data.DataLoader( + dataset, + num_workers=4, + batch_size=32, + shuffle=True, + ) + for batch in dataloader: + print(f"{batch[camera_key].shape=}") # (32, 4, c, h, w) + print(f"{batch['observation.state'].shape=}") # (32, 6, c) + print(f"{batch['action'].shape=}") # (32, 64, c) + break diff --git a/examples/dataset/use_dataset_image_transforms.py b/examples/dataset/use_dataset_image_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..598bf0babdb279d6042a91229a2f3f82f4605e6a --- /dev/null +++ b/examples/dataset/use_dataset_image_transforms.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This example demonstrates how to use image transforms with LeRobot datasets for data augmentation during training. + +Image transforms are applied to camera frames to improve model robustness and generalization. They are applied +at training time only, not during dataset recording, allowing you to experiment with different augmentations +without re-recording data. +""" + +import torch +from torchvision.transforms import v2 +from torchvision.transforms.functional import to_pil_image + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.transforms import ImageTransformConfig, ImageTransforms, ImageTransformsConfig + + +def save_image(tensor, filename): + """Helper function to save a tensor as an image file.""" + if tensor.dim() == 3: # [C, H, W] + if tensor.max() > 1.0: + tensor = tensor / 255.0 + tensor = torch.clamp(tensor, 0.0, 1.0) + pil_image = to_pil_image(tensor) + pil_image.save(filename) + print(f"Saved: {filename}") + else: + print(f"Skipped {filename}: unexpected tensor shape {tensor.shape}") + + +def example_1_default_transforms(): + """Example 1: Use default transform configuration and save original vs transformed images""" + print("\n Example 1: Default Transform Configuration with Image Saving") + + repo_id = "pepijn223/record_main_0" # Example dataset + + try: + # Load dataset without transforms (original) + dataset_original = LeRobotDataset(repo_id=repo_id) + + # Load dataset with transforms enabled + transforms_config = ImageTransformsConfig( + enable=True, # Enable transforms (disabled by default) + max_num_transforms=2, # Apply up to 2 transforms per frame + random_order=False, # Apply in standard order + ) + dataset_with_transforms = LeRobotDataset( + repo_id=repo_id, image_transforms=ImageTransforms(transforms_config) + ) + + # Save original and transformed images for comparison + if len(dataset_original) > 0: + frame_idx = 0 # Use first frame + original_sample = dataset_original[frame_idx] + transformed_sample = dataset_with_transforms[frame_idx] + + print(f"Saving comparison images (frame {frame_idx}):") + + for cam_key in dataset_original.meta.camera_keys: + if cam_key in original_sample and cam_key in transformed_sample: + cam_name = cam_key.replace(".", "_").replace("/", "_") + + # Save original and transformed images + save_image(original_sample[cam_key], f"{cam_name}_original.png") + save_image(transformed_sample[cam_key], f"{cam_name}_transformed.png") + + except Exception as e: + print(f"Could not load dataset '{repo_id}': {e}") + + +def example_2_custom_transforms(): + """Example 2: Create custom transform configuration and save examples""" + print("\n Example 2: Custom Transform Configuration") + + repo_id = "pepijn223/record_main_0" # Example dataset + + try: + # Create custom transform configuration with strong effects + custom_transforms_config = ImageTransformsConfig( + enable=True, + max_num_transforms=2, # Apply up to 2 transforms per frame + random_order=True, # Apply transforms in random order + tfs={ + "brightness": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"brightness": (0.5, 1.5)}, # Strong brightness range + ), + "contrast": ImageTransformConfig( + weight=1.0, # Higher weight = more likely to be selected + type="ColorJitter", + kwargs={"contrast": (0.6, 1.4)}, # Strong contrast + ), + "sharpness": ImageTransformConfig( + weight=0.5, # Lower weight = less likely to be selected + type="SharpnessJitter", + kwargs={"sharpness": (0.2, 2.0)}, # Strong sharpness variation + ), + }, + ) + + dataset_with_custom_transforms = LeRobotDataset( + repo_id=repo_id, image_transforms=ImageTransforms(custom_transforms_config) + ) + + # Save examples with strong transforms + if len(dataset_with_custom_transforms) > 0: + sample = dataset_with_custom_transforms[0] + print("Saving custom transform examples:") + + for cam_key in dataset_with_custom_transforms.meta.camera_keys: + if cam_key in sample: + cam_name = cam_key.replace(".", "_").replace("/", "_") + save_image(sample[cam_key], f"{cam_name}_custom_transforms.png") + + except Exception as e: + print(f"Could not load dataset '{repo_id}': {e}") + + +def example_3_torchvision_transforms(): + """Example 3: Use pure torchvision transforms and save examples""" + print("\n Example 3: Pure Torchvision Transforms") + + repo_id = "pepijn223/record_main_0" # Example dataset + + try: + # Create torchvision transform pipeline + torchvision_transforms = v2.Compose( + [ + v2.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1), + v2.GaussianBlur(kernel_size=3, sigma=(0.1, 2.0)), + v2.RandomRotation(degrees=10), # Small rotation + ] + ) + + dataset_with_torchvision = LeRobotDataset(repo_id=repo_id, image_transforms=torchvision_transforms) + + # Save examples with torchvision transforms + if len(dataset_with_torchvision) > 0: + sample = dataset_with_torchvision[0] + print("Saving torchvision transform examples:") + + for cam_key in dataset_with_torchvision.meta.camera_keys: + if cam_key in sample: + cam_name = cam_key.replace(".", "_").replace("/", "_") + save_image(sample[cam_key], f"{cam_name}_torchvision.png") + + except Exception as e: + print(f"Could not load dataset '{repo_id}': {e}") + + +def main(): + """Run all examples""" + print("LeRobot Dataset Image Transforms Examples") + + example_1_default_transforms() + example_2_custom_transforms() + example_3_torchvision_transforms() + + +if __name__ == "__main__": + main() diff --git a/examples/dataset/use_dataset_tools.py b/examples/dataset/use_dataset_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..412eaab15cc9b3c05a5747728271b741411a3010 --- /dev/null +++ b/examples/dataset/use_dataset_tools.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Example script demonstrating dataset tools utilities. + +This script shows how to: +1. Delete episodes from a dataset +2. Split a dataset into train/val sets +3. Add/remove features +4. Merge datasets + +Usage: + python examples/dataset/use_dataset_tools.py +""" + +import numpy as np + +from lerobot.datasets.dataset_tools import ( + add_features, + delete_episodes, + merge_datasets, + modify_features, + remove_feature, + split_dataset, +) +from lerobot.datasets.lerobot_dataset import LeRobotDataset + + +def main(): + dataset = LeRobotDataset("lerobot/pusht") + + print(f"Original dataset: {dataset.meta.total_episodes} episodes, {dataset.meta.total_frames} frames") + print(f"Features: {list(dataset.meta.features.keys())}") + + print("\n1. Deleting episodes 0 and 2...") + filtered_dataset = delete_episodes(dataset, episode_indices=[0, 2], repo_id="lerobot/pusht_filtered") + print(f"Filtered dataset: {filtered_dataset.meta.total_episodes} episodes") + + print("\n2. Splitting dataset into train/val...") + splits = split_dataset( + dataset, + splits={"train": 0.8, "val": 0.2}, + ) + print(f"Train split: {splits['train'].meta.total_episodes} episodes") + print(f"Val split: {splits['val'].meta.total_episodes} episodes") + + print("\n3. Adding features...") + + reward_values = np.random.randn(dataset.meta.total_frames).astype(np.float32) + + def compute_success(row_dict, episode_index, frame_index): + episode_length = 10 + return float(frame_index >= episode_length - 10) + + dataset_with_features = add_features( + dataset, + features={ + "reward": ( + reward_values, + {"dtype": "float32", "shape": (1,), "names": None}, + ), + "success": ( + compute_success, + {"dtype": "float32", "shape": (1,), "names": None}, + ), + }, + repo_id="lerobot/pusht_with_features", + ) + + print(f"New features: {list(dataset_with_features.meta.features.keys())}") + + print("\n4. Removing the success feature...") + dataset_cleaned = remove_feature( + dataset_with_features, feature_names="success", repo_id="lerobot/pusht_cleaned" + ) + print(f"Features after removal: {list(dataset_cleaned.meta.features.keys())}") + + print("\n5. Using modify_features to add and remove features simultaneously...") + dataset_modified = modify_features( + dataset_with_features, + add_features={ + "discount": ( + np.ones(dataset.meta.total_frames, dtype=np.float32) * 0.99, + {"dtype": "float32", "shape": (1,), "names": None}, + ), + }, + remove_features="reward", + repo_id="lerobot/pusht_modified", + ) + print(f"Modified features: {list(dataset_modified.meta.features.keys())}") + + print("\n6. Merging train and val splits back together...") + merged = merge_datasets([splits["train"], splits["val"]], output_repo_id="lerobot/pusht_merged") + print(f"Merged dataset: {merged.meta.total_episodes} episodes") + + print("\n7. Complex workflow example...") + + if len(dataset.meta.camera_keys) > 1: + camera_to_remove = dataset.meta.camera_keys[0] + print(f"Removing camera: {camera_to_remove}") + dataset_no_cam = remove_feature( + dataset, feature_names=camera_to_remove, repo_id="pusht_no_first_camera" + ) + print(f"Remaining cameras: {dataset_no_cam.meta.camera_keys}") + + print("\nDone! Check ~/.cache/huggingface/lerobot/ for the created datasets.") + + +if __name__ == "__main__": + main() diff --git a/examples/lekiwi/evaluate.py b/examples/lekiwi/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..0e027e5ff57351c3f0f12b468b0845229c36e1e7 --- /dev/null +++ b/examples/lekiwi/evaluate.py @@ -0,0 +1,138 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.utils import hw_to_dataset_features +from lerobot.policies.act.modeling_act import ACTPolicy +from lerobot.policies.factory import make_pre_post_processors +from lerobot.processor import make_default_processors +from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig +from lerobot.scripts.lerobot_record import record_loop +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.control_utils import init_keyboard_listener +from lerobot.utils.utils import log_say +from lerobot.utils.visualization_utils import init_rerun + +NUM_EPISODES = 2 +FPS = 30 +EPISODE_TIME_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" +HF_DATASET_ID = "/" + +# Create the robot configuration & robot +robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi") + +robot = LeKiwiClient(robot_config) + +# Create policy +policy = ACTPolicy.from_pretrained(HF_MODEL_ID) + +# Configure the dataset features +action_features = hw_to_dataset_features(robot.action_features, ACTION) +obs_features = hw_to_dataset_features(robot.observation_features, OBS_STR) +dataset_features = {**action_features, **obs_features} + +# Create the dataset +dataset = LeRobotDataset.create( + repo_id=HF_DATASET_ID, + fps=FPS, + features=dataset_features, + robot_type=robot.name, + use_videos=True, + image_writer_threads=4, +) + +# Build Policy Processors +preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=policy, + pretrained_path=HF_MODEL_ID, + dataset_stats=dataset.meta.stats, + # The inference device is automatically set to match the detected hardware, overriding any previous device settings from training to ensure compatibility. + preprocessor_overrides={"device_processor": {"device": str(policy.config.device)}}, +) + +# Connect the robot +# To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi` +robot.connect() + +# TODO(Steven): Update this example to use pipelines +teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors() + +# Initialize the keyboard listener and rerun visualization +listener, events = init_keyboard_listener() +init_rerun(session_name="lekiwi_evaluate") + +if not robot.is_connected: + raise ValueError("Robot is not connected!") + +print("Starting evaluate loop...") +recorded_episodes = 0 +while recorded_episodes < NUM_EPISODES and not events["stop_recording"]: + log_say(f"Running inference, recording eval episode {recorded_episodes} of {NUM_EPISODES}") + + # Main record loop + record_loop( + robot=robot, + events=events, + fps=FPS, + policy=policy, + preprocessor=preprocessor, # Pass the pre and post policy processors + postprocessor=postprocessor, + dataset=dataset, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, + ) + + # Reset the environment if not stopping or re-recording + if not events["stop_recording"] and ( + (recorded_episodes < NUM_EPISODES - 1) or events["rerecord_episode"] + ): + log_say("Reset the environment") + record_loop( + robot=robot, + events=events, + fps=FPS, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, + ) + + if events["rerecord_episode"]: + log_say("Re-record episode") + events["rerecord_episode"] = False + events["exit_early"] = False + dataset.clear_episode_buffer() + continue + + # Save episode + dataset.save_episode() + recorded_episodes += 1 + +# Clean up +log_say("Stop recording") +robot.disconnect() +listener.stop() + +dataset.finalize() +dataset.push_to_hub() diff --git a/examples/lekiwi/record.py b/examples/lekiwi/record.py new file mode 100644 index 0000000000000000000000000000000000000000..98a36e0a1c8a71cc08bf9248582525c7a864bcaa --- /dev/null +++ b/examples/lekiwi/record.py @@ -0,0 +1,135 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.utils import hw_to_dataset_features +from lerobot.processor import make_default_processors +from lerobot.robots.lekiwi.config_lekiwi import LeKiwiClientConfig +from lerobot.robots.lekiwi.lekiwi_client import LeKiwiClient +from lerobot.scripts.lerobot_record import record_loop +from lerobot.teleoperators.keyboard import KeyboardTeleop, KeyboardTeleopConfig +from lerobot.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig +from lerobot.utils.constants import ACTION, OBS_STR +from lerobot.utils.control_utils import init_keyboard_listener +from lerobot.utils.utils import log_say +from lerobot.utils.visualization_utils import init_rerun + +NUM_EPISODES = 2 +FPS = 30 +EPISODE_TIME_SEC = 30 +RESET_TIME_SEC = 10 +TASK_DESCRIPTION = "My task description" +HF_REPO_ID = "/" + +# Create the robot and teleoperator configurations +robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi") +leader_arm_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm") +keyboard_config = KeyboardTeleopConfig() + +# Initialize the robot and teleoperator +robot = LeKiwiClient(robot_config) +leader_arm = SO100Leader(leader_arm_config) +keyboard = KeyboardTeleop(keyboard_config) + +# TODO(Steven): Update this example to use pipelines +teleop_action_processor, robot_action_processor, robot_observation_processor = make_default_processors() + +# Configure the dataset features +action_features = hw_to_dataset_features(robot.action_features, ACTION) +obs_features = hw_to_dataset_features(robot.observation_features, OBS_STR) +dataset_features = {**action_features, **obs_features} + +# Create the dataset +dataset = LeRobotDataset.create( + repo_id=HF_REPO_ID, + fps=FPS, + features=dataset_features, + robot_type=robot.name, + use_videos=True, + image_writer_threads=4, +) + +# Connect the robot and teleoperator +# To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi` +robot.connect() +leader_arm.connect() +keyboard.connect() + +# Initialize the keyboard listener and rerun visualization +listener, events = init_keyboard_listener() +init_rerun(session_name="lekiwi_record") + +if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected: + raise ValueError("Robot or teleop is not connected!") + +print("Starting record loop...") +recorded_episodes = 0 +while recorded_episodes < NUM_EPISODES and not events["stop_recording"]: + log_say(f"Recording episode {recorded_episodes}") + + # Main record loop + record_loop( + robot=robot, + events=events, + fps=FPS, + dataset=dataset, + teleop=[leader_arm, keyboard], + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, + ) + + # Reset the environment if not stopping or re-recording + if not events["stop_recording"] and ( + (recorded_episodes < NUM_EPISODES - 1) or events["rerecord_episode"] + ): + log_say("Reset the environment") + record_loop( + robot=robot, + events=events, + fps=FPS, + teleop=[leader_arm, keyboard], + control_time_s=RESET_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=teleop_action_processor, + robot_action_processor=robot_action_processor, + robot_observation_processor=robot_observation_processor, + ) + + if events["rerecord_episode"]: + log_say("Re-record episode") + events["rerecord_episode"] = False + events["exit_early"] = False + dataset.clear_episode_buffer() + continue + + # Save episode + dataset.save_episode() + recorded_episodes += 1 + +# Clean up +log_say("Stop recording") +robot.disconnect() +leader_arm.disconnect() +keyboard.disconnect() +listener.stop() + +dataset.finalize() +dataset.push_to_hub() diff --git a/examples/lekiwi/replay.py b/examples/lekiwi/replay.py new file mode 100644 index 0000000000000000000000000000000000000000..d7dac596fb1676e7b8800b32e7425d6abaf68f9f --- /dev/null +++ b/examples/lekiwi/replay.py @@ -0,0 +1,61 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.robots.lekiwi.config_lekiwi import LeKiwiClientConfig +from lerobot.robots.lekiwi.lekiwi_client import LeKiwiClient +from lerobot.utils.constants import ACTION +from lerobot.utils.robot_utils import busy_wait +from lerobot.utils.utils import log_say + +EPISODE_IDX = 0 + +# Initialize the robot config +robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi") + +# Initialize the robot +robot = LeKiwiClient(robot_config) + +# Fetch the dataset to replay +dataset = LeRobotDataset("/", episodes=[EPISODE_IDX]) +# Filter dataset to only include frames from the specified episode since episodes are chunked in dataset V3.0 +episode_frames = dataset.hf_dataset.filter(lambda x: x["episode_index"] == EPISODE_IDX) +actions = episode_frames.select_columns(ACTION) + +# Connect to the robot +robot.connect() + +if not robot.is_connected: + raise ValueError("Robot is not connected!") + +print("Starting replay loop...") +log_say(f"Replaying episode {EPISODE_IDX}") +for idx in range(len(episode_frames)): + t0 = time.perf_counter() + + # Get recorded action from dataset + action = { + name: float(actions[idx][ACTION][i]) for i, name in enumerate(dataset.features[ACTION]["names"]) + } + + # Send action to robot + _ = robot.send_action(action) + + busy_wait(max(1.0 / dataset.fps - (time.perf_counter() - t0), 0.0)) + +robot.disconnect() diff --git a/examples/lekiwi/teleoperate.py b/examples/lekiwi/teleoperate.py new file mode 100644 index 0000000000000000000000000000000000000000..d2d22943d19c8040236655897bac232dc1be8a2f --- /dev/null +++ b/examples/lekiwi/teleoperate.py @@ -0,0 +1,72 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig +from lerobot.teleoperators.keyboard.teleop_keyboard import KeyboardTeleop, KeyboardTeleopConfig +from lerobot.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig +from lerobot.utils.robot_utils import busy_wait +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data + +FPS = 30 + +# Create the robot and teleoperator configurations +robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="my_lekiwi") +teleop_arm_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm") +keyboard_config = KeyboardTeleopConfig(id="my_laptop_keyboard") + +# Initialize the robot and teleoperator +robot = LeKiwiClient(robot_config) +leader_arm = SO100Leader(teleop_arm_config) +keyboard = KeyboardTeleop(keyboard_config) + +# Connect to the robot and teleoperator +# To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi` +robot.connect() +leader_arm.connect() +keyboard.connect() + +# Init rerun viewer +init_rerun(session_name="lekiwi_teleop") + +if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected: + raise ValueError("Robot or teleop is not connected!") + +print("Starting teleop loop...") +while True: + t0 = time.perf_counter() + + # Get robot observation + observation = robot.get_observation() + + # Get teleop action + # Arm + arm_action = leader_arm.get_action() + arm_action = {f"arm_{k}": v for k, v in arm_action.items()} + # Keyboard + keyboard_keys = keyboard.get_action() + base_action = robot._from_keyboard_to_base_action(keyboard_keys) + + action = {**arm_action, **base_action} if len(base_action) > 0 else arm_action + + # Send action to robot + _ = robot.send_action(action) + + # Visualize + log_rerun_data(observation=observation, action=action) + + busy_wait(max(1.0 / FPS - (time.perf_counter() - t0), 0.0)) diff --git a/examples/phone_to_so100/evaluate.py b/examples/phone_to_so100/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..b851d24ce5bc9ba7b34582c3b868f0c5a61691a7 --- /dev/null +++ b/examples/phone_to_so100/evaluate.py @@ -0,0 +1,199 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.configs.types import FeatureType, PolicyFeature +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features +from lerobot.datasets.utils import combine_feature_dicts +from lerobot.model.kinematics import RobotKinematics +from lerobot.policies.act.modeling_act import ACTPolicy +from lerobot.policies.factory import make_pre_post_processors +from lerobot.processor import ( + RobotAction, + RobotObservation, + RobotProcessorPipeline, + make_default_teleop_action_processor, +) +from lerobot.processor.converters import ( + observation_to_transition, + robot_action_observation_to_transition, + transition_to_observation, + transition_to_robot_action, +) +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + ForwardKinematicsJointsToEE, + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.scripts.lerobot_record import record_loop +from lerobot.utils.control_utils import init_keyboard_listener +from lerobot.utils.utils import log_say +from lerobot.utils.visualization_utils import init_rerun + +NUM_EPISODES = 5 +FPS = 30 +EPISODE_TIME_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" +HF_DATASET_ID = "/" + +# Create the robot configuration & robot +camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem58760434471", + id="my_awesome_follower_arm", + cameras=camera_config, + use_degrees=True, +) + +robot = SO100Follower(robot_config) + +# Create policy +policy = ACTPolicy.from_pretrained(HF_MODEL_ID) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(robot.bus.motors.keys()), +) + +# Build pipeline to convert EE action to joints action +robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Build pipeline to convert joints observation to EE observation +robot_joints_to_ee_pose_processor = RobotProcessorPipeline[RobotObservation, RobotObservation]( + steps=[ + ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())) + ], + to_transition=observation_to_transition, + to_output=transition_to_observation, +) + +# Create the dataset +dataset = LeRobotDataset.create( + repo_id=HF_DATASET_ID, + fps=FPS, + features=combine_feature_dicts( + aggregate_pipeline_dataset_features( + pipeline=robot_joints_to_ee_pose_processor, + initial_features=create_initial_features(observation=robot.observation_features), + use_videos=True, + ), + # User for now should be explicit on the feature keys that were used for record + # Alternatively, the user can pass the processor step that has the right features + aggregate_pipeline_dataset_features( + pipeline=make_default_teleop_action_processor(), + initial_features=create_initial_features( + action={ + f"ee.{k}": PolicyFeature(type=FeatureType.ACTION, shape=(1,)) + for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"] + } + ), + use_videos=True, + ), + ), + robot_type=robot.name, + use_videos=True, + image_writer_threads=4, +) + +# Build Policy Processors +preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=policy, + pretrained_path=HF_MODEL_ID, + dataset_stats=dataset.meta.stats, + # The inference device is automatically set to match the detected hardware, overriding any previous device settings from training to ensure compatibility. + preprocessor_overrides={"device_processor": {"device": str(policy.config.device)}}, +) + +# Connect the robot +robot.connect() + +# Initialize the keyboard listener and rerun visualization +listener, events = init_keyboard_listener() +init_rerun(session_name="phone_so100_evaluate") + +if not robot.is_connected: + raise ValueError("Robot is not connected!") + +print("Starting evaluate loop...") +episode_idx = 0 +for episode_idx in range(NUM_EPISODES): + log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") + + # Main record loop + record_loop( + robot=robot, + events=events, + fps=FPS, + policy=policy, + preprocessor=preprocessor, # Pass the pre and post policy processors + postprocessor=postprocessor, + dataset=dataset, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=make_default_teleop_action_processor(), + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose_processor, + ) + + # Reset the environment if not stopping or re-recording + if not events["stop_recording"] and ((episode_idx < NUM_EPISODES - 1) or events["rerecord_episode"]): + log_say("Reset the environment") + record_loop( + robot=robot, + events=events, + fps=FPS, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=make_default_teleop_action_processor(), + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose_processor, + ) + + if events["rerecord_episode"]: + log_say("Re-record episode") + events["rerecord_episode"] = False + events["exit_early"] = False + dataset.clear_episode_buffer() + continue + + # Save episode + dataset.save_episode() + episode_idx += 1 + +# Clean up +log_say("Stop recording") +robot.disconnect() +listener.stop() + +dataset.finalize() +dataset.push_to_hub() diff --git a/examples/phone_to_so100/record.py b/examples/phone_to_so100/record.py new file mode 100644 index 0000000000000000000000000000000000000000..b1f5751c5d68b2b35f306f91ec5aeb8550590168 --- /dev/null +++ b/examples/phone_to_so100/record.py @@ -0,0 +1,205 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features +from lerobot.datasets.utils import combine_feature_dicts +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline +from lerobot.processor.converters import ( + observation_to_transition, + robot_action_observation_to_transition, + transition_to_observation, + transition_to_robot_action, +) +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + EEBoundsAndSafety, + EEReferenceAndDelta, + ForwardKinematicsJointsToEE, + GripperVelocityToJoint, + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.scripts.lerobot_record import record_loop +from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS +from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction +from lerobot.teleoperators.phone.teleop_phone import Phone +from lerobot.utils.control_utils import init_keyboard_listener +from lerobot.utils.utils import log_say +from lerobot.utils.visualization_utils import init_rerun + +NUM_EPISODES = 2 +FPS = 30 +EPISODE_TIME_SEC = 60 +RESET_TIME_SEC = 30 +TASK_DESCRIPTION = "My task description" +HF_REPO_ID = "/" + +# Create the robot and teleoperator configurations +camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", + id="my_awesome_follower_arm", + cameras=camera_config, + use_degrees=True, +) +teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID + +# Initialize the robot and teleoperator +robot = SO100Follower(robot_config) +phone = Phone(teleop_config) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(robot.bus.motors.keys()), +) + +# Build pipeline to convert phone action to EE action +phone_to_robot_ee_pose_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + MapPhoneActionToRobotAction(platform=teleop_config.phone_os), + EEReferenceAndDelta( + kinematics=kinematics_solver, + end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5}, + motor_names=list(robot.bus.motors.keys()), + use_latched_reference=True, + ), + EEBoundsAndSafety( + end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, + max_ee_step_m=0.20, + ), + GripperVelocityToJoint(speed_factor=20.0), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Build pipeline to convert EE action to joints action +robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Build pipeline to convert joint observation to EE observation +robot_joints_to_ee_pose = RobotProcessorPipeline[RobotObservation, RobotObservation]( + steps=[ + ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())) + ], + to_transition=observation_to_transition, + to_output=transition_to_observation, +) + +# Create the dataset +dataset = LeRobotDataset.create( + repo_id=HF_REPO_ID, + fps=FPS, + features=combine_feature_dicts( + # Run the feature contract of the pipelines + # This tells you how the features would look like after the pipeline steps + aggregate_pipeline_dataset_features( + pipeline=phone_to_robot_ee_pose_processor, + initial_features=create_initial_features(action=phone.action_features), + use_videos=True, + ), + aggregate_pipeline_dataset_features( + pipeline=robot_joints_to_ee_pose, + initial_features=create_initial_features(observation=robot.observation_features), + use_videos=True, + ), + ), + robot_type=robot.name, + use_videos=True, + image_writer_threads=4, +) + +# Connect the robot and teleoperator +robot.connect() +phone.connect() + +# Initialize the keyboard listener and rerun visualization +listener, events = init_keyboard_listener() +init_rerun(session_name="phone_so100_record") + +if not robot.is_connected or not phone.is_connected: + raise ValueError("Robot or teleop is not connected!") + + +print("Starting record loop. Move your phone to teleoperate the robot...") +episode_idx = 0 +while episode_idx < NUM_EPISODES and not events["stop_recording"]: + log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}") + + # Main record loop + record_loop( + robot=robot, + events=events, + fps=FPS, + teleop=phone, + dataset=dataset, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=phone_to_robot_ee_pose_processor, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose, + ) + + # Reset the environment if not stopping or re-recording + if not events["stop_recording"] and (episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]): + log_say("Reset the environment") + record_loop( + robot=robot, + events=events, + fps=FPS, + teleop=phone, + control_time_s=RESET_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=phone_to_robot_ee_pose_processor, + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose, + ) + + if events["rerecord_episode"]: + log_say("Re-recording episode") + events["rerecord_episode"] = False + events["exit_early"] = False + dataset.clear_episode_buffer() + continue + + # Save episode + dataset.save_episode() + episode_idx += 1 + +# Clean up +log_say("Stop recording") +robot.disconnect() +phone.disconnect() +listener.stop() + +dataset.finalize() +dataset.push_to_hub() diff --git a/examples/phone_to_so100/replay.py b/examples/phone_to_so100/replay.py new file mode 100644 index 0000000000000000000000000000000000000000..7ae0d0a5939136168578a5d1345bba6197af06d2 --- /dev/null +++ b/examples/phone_to_so100/replay.py @@ -0,0 +1,100 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline +from lerobot.processor.converters import ( + robot_action_observation_to_transition, + transition_to_robot_action, +) +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.utils.constants import ACTION +from lerobot.utils.robot_utils import busy_wait +from lerobot.utils.utils import log_say + +EPISODE_IDX = 0 +HF_REPO_ID = "/" + +# Initialize the robot config +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", use_degrees=True +) + +# Initialize the robot +robot = SO100Follower(robot_config) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(robot.bus.motors.keys()), +) + +# Build pipeline to convert EE action to joints action +robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=False, # Because replay is open loop + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Fetch the dataset to replay +dataset = LeRobotDataset(HF_REPO_ID, episodes=[EPISODE_IDX]) +# Filter dataset to only include frames from the specified episode since episodes are chunked in dataset V3.0 +episode_frames = dataset.hf_dataset.filter(lambda x: x["episode_index"] == EPISODE_IDX) +actions = episode_frames.select_columns(ACTION) + +# Connect to the robot +robot.connect() + +if not robot.is_connected: + raise ValueError("Robot is not connected!") + +print("Starting replay loop...") +log_say(f"Replaying episode {EPISODE_IDX}") +for idx in range(len(episode_frames)): + t0 = time.perf_counter() + + # Get recorded action from dataset + ee_action = { + name: float(actions[idx][ACTION][i]) for i, name in enumerate(dataset.features[ACTION]["names"]) + } + + # Get robot observation + robot_obs = robot.get_observation() + + # Dataset EE -> robot joints + joint_action = robot_ee_to_joints_processor((ee_action, robot_obs)) + + # Send action to robot + _ = robot.send_action(joint_action) + + busy_wait(1.0 / dataset.fps - (time.perf_counter() - t0)) + +# Clean up +robot.disconnect() diff --git a/examples/phone_to_so100/teleoperate.py b/examples/phone_to_so100/teleoperate.py new file mode 100644 index 0000000000000000000000000000000000000000..65cda56698a37c3034ce8f9288c0a73e63bf660a --- /dev/null +++ b/examples/phone_to_so100/teleoperate.py @@ -0,0 +1,113 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specif + +import time + +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline +from lerobot.processor.converters import ( + robot_action_observation_to_transition, + transition_to_robot_action, +) +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + EEBoundsAndSafety, + EEReferenceAndDelta, + GripperVelocityToJoint, + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.teleoperators.phone.config_phone import PhoneConfig, PhoneOS +from lerobot.teleoperators.phone.phone_processor import MapPhoneActionToRobotAction +from lerobot.teleoperators.phone.teleop_phone import Phone +from lerobot.utils.robot_utils import busy_wait +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data + +FPS = 30 + +# Initialize the robot and teleoperator +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", use_degrees=True +) +teleop_config = PhoneConfig(phone_os=PhoneOS.IOS) # or PhoneOS.ANDROID + +# Initialize the robot and teleoperator +robot = SO100Follower(robot_config) +teleop_device = Phone(teleop_config) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(robot.bus.motors.keys()), +) + +# Build pipeline to convert phone action to ee pose action to joint action +phone_to_robot_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + MapPhoneActionToRobotAction(platform=teleop_config.phone_os), + EEReferenceAndDelta( + kinematics=kinematics_solver, + end_effector_step_sizes={"x": 0.5, "y": 0.5, "z": 0.5}, + motor_names=list(robot.bus.motors.keys()), + use_latched_reference=True, + ), + EEBoundsAndSafety( + end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, + max_ee_step_m=0.10, + ), + GripperVelocityToJoint( + speed_factor=20.0, + ), + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Connect to the robot and teleoperator +robot.connect() +teleop_device.connect() + +# Init rerun viewer +init_rerun(session_name="phone_so100_teleop") + +if not robot.is_connected or not teleop_device.is_connected: + raise ValueError("Robot or teleop is not connected!") + +print("Starting teleop loop. Move your phone to teleoperate the robot...") +while True: + t0 = time.perf_counter() + + # Get robot observation + robot_obs = robot.get_observation() + + # Get teleop action + phone_obs = teleop_device.get_action() + + # Phone -> EE pose -> Joints transition + joint_action = phone_to_robot_joints_processor((phone_obs, robot_obs)) + + # Send action to robot + _ = robot.send_action(joint_action) + + # Visualize + log_rerun_data(observation=phone_obs, action=joint_action) + + busy_wait(max(1.0 / FPS - (time.perf_counter() - t0), 0.0)) diff --git a/examples/port_datasets/display_error_files.py b/examples/port_datasets/display_error_files.py new file mode 100644 index 0000000000000000000000000000000000000000..1dd2a7f99cfb897a1430a00076d08f1c4e248f13 --- /dev/null +++ b/examples/port_datasets/display_error_files.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import json +from pathlib import Path + + +def find_missing_workers(completions_dir, world_size): + """Find workers that are not completed and returns their indices.""" + full = list(range(world_size)) + + completed = [] + for path in completions_dir.glob("*"): + if path.name in [".", ".."]: + continue + index = path.name.lstrip("0") + index = 0 if index == "" else int(index) + completed.append(index) + + missing_workers = set(full) - set(completed) + return missing_workers + + +def find_output_files(slurm_dir, worker_indices): + """Find output files associated to worker indices, and return tuples + of (worker index, output file path) + """ + out_files = [] + for path in slurm_dir.glob("*.out"): + _, worker_id = path.name.replace(".out", "").split("_") + worker_id = int(worker_id) + if worker_id in worker_indices: + out_files.append((worker_id, path)) + return out_files + + +def display_error_files(logs_dir, job_name): + executor_path = Path(logs_dir) / job_name / "executor.json" + completions_dir = Path(logs_dir) / job_name / "completions" + + with open(executor_path) as f: + executor = json.load(f) + + missing_workers = find_missing_workers(completions_dir, executor["world_size"]) + + for missing in sorted(missing_workers)[::-1]: + print(missing) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--logs-dir", + type=str, + help="Path to logs directory for `datatrove`.", + ) + parser.add_argument( + "--job-name", + type=str, + default="port_droid", + help="Job name used in slurm, and name of the directory created inside the provided logs directory.", + ) + + args = parser.parse_args() + + display_error_files(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/examples/port_datasets/port_droid.py b/examples/port_datasets/port_droid.py new file mode 100644 index 0000000000000000000000000000000000000000..67b4754ab0cb8315330e18ea955703fe4e9d10a3 --- /dev/null +++ b/examples/port_datasets/port_droid.py @@ -0,0 +1,432 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import time +from pathlib import Path + +import numpy as np +import tensorflow_datasets as tfds + +from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata +from lerobot.utils.utils import get_elapsed_time_in_days_hours_minutes_seconds + +DROID_SHARDS = 2048 +DROID_FPS = 15 +DROID_ROBOT_TYPE = "Franka" + +# Dataset schema slightly adapted from: https://droid-dataset.github.io/droid/the-droid-dataset.html#-dataset-schema +DROID_FEATURES = { + # true on first step of the episode + "is_first": { + "dtype": "bool", + "shape": (1,), + "names": None, + }, + # true on last step of the episode + "is_last": { + "dtype": "bool", + "shape": (1,), + "names": None, + }, + # true on last step of the episode if it is a terminal step, True for demos + "is_terminal": { + "dtype": "bool", + "shape": (1,), + "names": None, + }, + # language_instruction is also stored as "task" to follow LeRobot standard + "language_instruction": { + "dtype": "string", + "shape": (1,), + "names": None, + }, + "language_instruction_2": { + "dtype": "string", + "shape": (1,), + "names": None, + }, + "language_instruction_3": { + "dtype": "string", + "shape": (1,), + "names": None, + }, + "observation.state.gripper_position": { + "dtype": "float32", + "shape": (1,), + "names": { + "axes": ["gripper"], + }, + }, + "observation.state.cartesian_position": { + "dtype": "float32", + "shape": (6,), + "names": { + "axes": ["x", "y", "z", "roll", "pitch", "yaw"], + }, + }, + "observation.state.joint_position": { + "dtype": "float32", + "shape": (7,), + "names": { + "axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6"], + }, + }, + # Add this new feature to follow LeRobot standard of using joint position + gripper + "observation.state": { + "dtype": "float32", + "shape": (8,), + "names": { + "axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6", "gripper"], + }, + }, + # Initially called wrist_image_left + "observation.images.wrist_left": { + "dtype": "video", + "shape": (180, 320, 3), + "names": [ + "height", + "width", + "channels", + ], + }, + # Initially called exterior_image_1_left + "observation.images.exterior_1_left": { + "dtype": "video", + "shape": (180, 320, 3), + "names": [ + "height", + "width", + "channels", + ], + }, + # Initially called exterior_image_2_left + "observation.images.exterior_2_left": { + "dtype": "video", + "shape": (180, 320, 3), + "names": [ + "height", + "width", + "channels", + ], + }, + "action.gripper_position": { + "dtype": "float32", + "shape": (1,), + "names": { + "axes": ["gripper"], + }, + }, + "action.gripper_velocity": { + "dtype": "float32", + "shape": (1,), + "names": { + "axes": ["gripper"], + }, + }, + "action.cartesian_position": { + "dtype": "float32", + "shape": (6,), + "names": { + "axes": ["x", "y", "z", "roll", "pitch", "yaw"], + }, + }, + "action.cartesian_velocity": { + "dtype": "float32", + "shape": (6,), + "names": { + "axes": ["x", "y", "z", "roll", "pitch", "yaw"], + }, + }, + "action.joint_position": { + "dtype": "float32", + "shape": (7,), + "names": { + "axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6"], + }, + }, + "action.joint_velocity": { + "dtype": "float32", + "shape": (7,), + "names": { + "axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6"], + }, + }, + # This feature was called "action" in RLDS dataset and consists of [6x joint velocities, 1x gripper position] + "action.original": { + "dtype": "float32", + "shape": (7,), + "names": { + "axes": ["x", "y", "z", "roll", "pitch", "yaw", "gripper"], + }, + }, + # Add this new feature to follow LeRobot standard of using joint position + gripper + "action": { + "dtype": "float32", + "shape": (8,), + "names": { + "axes": ["joint_0", "joint_1", "joint_2", "joint_3", "joint_4", "joint_5", "joint_6", "gripper"], + }, + }, + "discount": { + "dtype": "float32", + "shape": (1,), + "names": None, + }, + "reward": { + "dtype": "float32", + "shape": (1,), + "names": None, + }, + # Meta data that are the same for all frames in the episode + "task_category": { + "dtype": "string", + "shape": (1,), + "names": None, + }, + "building": { + "dtype": "string", + "shape": (1,), + "names": None, + }, + "collector_id": { + "dtype": "string", + "shape": (1,), + "names": None, + }, + "date": { + "dtype": "string", + "shape": (1,), + "names": None, + }, + "camera_extrinsics.wrist_left": { + "dtype": "float32", + "shape": (6,), + "names": { + "axes": ["x", "y", "z", "roll", "pitch", "yaw"], + }, + }, + "camera_extrinsics.exterior_1_left": { + "dtype": "float32", + "shape": (6,), + "names": { + "axes": ["x", "y", "z", "roll", "pitch", "yaw"], + }, + }, + "camera_extrinsics.exterior_2_left": { + "dtype": "float32", + "shape": (6,), + "names": { + "axes": ["x", "y", "z", "roll", "pitch", "yaw"], + }, + }, + "is_episode_successful": { + "dtype": "bool", + "shape": (1,), + "names": None, + }, +} + + +def is_episode_successful(tf_episode_metadata): + # Adapted from: https://github.com/droid-dataset/droid_policy_learning/blob/dd1020eb20d981f90b5ff07dc80d80d5c0cb108b/robomimic/utils/rlds_utils.py#L8 + return "/success/" in tf_episode_metadata["file_path"].numpy().decode() + + +def generate_lerobot_frames(tf_episode): + m = tf_episode["episode_metadata"] + frame_meta = { + "task_category": m["building"].numpy().decode(), + "building": m["building"].numpy().decode(), + "collector_id": m["collector_id"].numpy().decode(), + "date": m["date"].numpy().decode(), + "camera_extrinsics.wrist_left": m["extrinsics_wrist_cam"].numpy(), + "camera_extrinsics.exterior_1_left": m["extrinsics_exterior_cam_1"].numpy(), + "camera_extrinsics.exterior_2_left": m["extrinsics_exterior_cam_2"].numpy(), + "is_episode_successful": np.array([is_episode_successful(m)]), + } + for f in tf_episode["steps"]: + # Dataset schema slightly adapted from: https://droid-dataset.github.io/droid/the-droid-dataset.html#-dataset-schema + frame = { + "is_first": np.array([f["is_first"].numpy()]), + "is_last": np.array([f["is_last"].numpy()]), + "is_terminal": np.array([f["is_terminal"].numpy()]), + "language_instruction": f["language_instruction"].numpy().decode(), + "language_instruction_2": f["language_instruction_2"].numpy().decode(), + "language_instruction_3": f["language_instruction_3"].numpy().decode(), + "observation.state.gripper_position": f["observation"]["gripper_position"].numpy(), + "observation.state.cartesian_position": f["observation"]["cartesian_position"].numpy(), + "observation.state.joint_position": f["observation"]["joint_position"].numpy(), + "observation.images.wrist_left": f["observation"]["wrist_image_left"].numpy(), + "observation.images.exterior_1_left": f["observation"]["exterior_image_1_left"].numpy(), + "observation.images.exterior_2_left": f["observation"]["exterior_image_2_left"].numpy(), + "action.gripper_position": f["action_dict"]["gripper_position"].numpy(), + "action.gripper_velocity": f["action_dict"]["gripper_velocity"].numpy(), + "action.cartesian_position": f["action_dict"]["cartesian_position"].numpy(), + "action.cartesian_velocity": f["action_dict"]["cartesian_velocity"].numpy(), + "action.joint_position": f["action_dict"]["joint_position"].numpy(), + "action.joint_velocity": f["action_dict"]["joint_velocity"].numpy(), + "discount": np.array([f["discount"].numpy()]), + "reward": np.array([f["reward"].numpy()]), + "action.original": f["action"].numpy(), + } + + # language_instruction is also stored as "task" to follow LeRobot standard + frame["task"] = frame["language_instruction"] + + # Add this new feature to follow LeRobot standard of using joint position + gripper + frame["observation.state"] = np.concatenate( + [frame["observation.state.joint_position"], frame["observation.state.gripper_position"]] + ) + frame["action"] = np.concatenate([frame["action.joint_position"], frame["action.gripper_position"]]) + + # Meta data that are the same for all frames in the episode + frame.update(frame_meta) + + # Cast fp64 to fp32 + for key in frame: + if isinstance(frame[key], np.ndarray) and frame[key].dtype == np.float64: + frame[key] = frame[key].astype(np.float32) + + yield frame + + +def port_droid( + raw_dir: Path, + repo_id: str, + push_to_hub: bool = False, + num_shards: int | None = None, + shard_index: int | None = None, +): + dataset_name = raw_dir.parent.name + version = raw_dir.name + data_dir = raw_dir.parent.parent + + builder = tfds.builder(f"{dataset_name}/{version}", data_dir=data_dir, version="") + + if num_shards is not None: + tfds_num_shards = builder.info.splits["train"].num_shards + if tfds_num_shards != DROID_SHARDS: + raise ValueError( + f"Number of shards of Droid dataset is expected to be {DROID_SHARDS} but is {tfds_num_shards}." + ) + if num_shards != tfds_num_shards: + raise ValueError( + f"We only shard over the fixed number of shards provided by tensorflow dataset ({tfds_num_shards}), but {num_shards} shards provided instead." + ) + if shard_index >= tfds_num_shards: + raise ValueError( + f"Shard index is greater than the num of shards ({shard_index} >= {num_shards})." + ) + + raw_dataset = builder.as_dataset(split=f"train[{shard_index}shard]") + else: + raw_dataset = builder.as_dataset(split="train") + + lerobot_dataset = LeRobotDataset.create( + repo_id=repo_id, + robot_type=DROID_ROBOT_TYPE, + fps=DROID_FPS, + features=DROID_FEATURES, + ) + + start_time = time.time() + num_episodes = raw_dataset.cardinality().numpy().item() + logging.info(f"Number of episodes {num_episodes}") + + for episode_index, episode in enumerate(raw_dataset): + elapsed_time = time.time() - start_time + d, h, m, s = get_elapsed_time_in_days_hours_minutes_seconds(elapsed_time) + + logging.info( + f"{episode_index} / {num_episodes} episodes processed (after {d} days, {h} hours, {m} minutes, {s:.3f} seconds)" + ) + + for frame in generate_lerobot_frames(episode): + lerobot_dataset.add_frame(frame) + + lerobot_dataset.save_episode() + logging.info("Save_episode") + + lerobot_dataset.finalize() + + if push_to_hub: + lerobot_dataset.push_to_hub( + # Add openx tag, since it belongs to the openx collection of datasets + tags=["openx"], + private=False, + ) + + +def validate_dataset(repo_id): + """Sanity check that ensure meta data can be loaded and all files are present.""" + meta = LeRobotDatasetMetadata(repo_id) + + if meta.total_episodes == 0: + raise ValueError("Number of episodes is 0.") + + for ep_idx in range(meta.total_episodes): + data_path = meta.root / meta.get_data_file_path(ep_idx) + + if not data_path.exists(): + raise ValueError(f"Parquet file is missing in: {data_path}") + + for vid_key in meta.video_keys: + vid_path = meta.root / meta.get_video_file_path(ep_idx, vid_key) + if not vid_path.exists(): + raise ValueError(f"Video file is missing in: {vid_path}") + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--raw-dir", + type=Path, + required=True, + help="Directory containing input raw datasets (e.g. `path/to/dataset` or `path/to/dataset/version).", + ) + parser.add_argument( + "--repo-id", + type=str, + help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset, required when push-to-hub is True", + ) + parser.add_argument( + "--push-to-hub", + action="store_true", + help="Upload to hub.", + ) + parser.add_argument( + "--num-shards", + type=int, + default=None, + help="Number of shards. Can be either None to load the full dataset, or 2048 to load one of the 2048 tensorflow dataset files.", + ) + parser.add_argument( + "--shard-index", + type=int, + default=None, + help="Index of the shard. Can be either None to load the full dataset, or in [0,2047] to load one of the 2048 tensorflow dataset files.", + ) + + args = parser.parse_args() + + port_droid(**vars(args)) + + +if __name__ == "__main__": + main() diff --git a/examples/port_datasets/slurm_aggregate_shards.py b/examples/port_datasets/slurm_aggregate_shards.py new file mode 100644 index 0000000000000000000000000000000000000000..1024f3ec2d449b4b6be8a60870b074e15c9509b5 --- /dev/null +++ b/examples/port_datasets/slurm_aggregate_shards.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +from pathlib import Path + +from datatrove.executor import LocalPipelineExecutor +from datatrove.executor.slurm import SlurmPipelineExecutor +from datatrove.pipeline.base import PipelineStep +from port_droid import DROID_SHARDS + + +class AggregateDatasets(PipelineStep): + def __init__( + self, + repo_ids: list[str], + aggregated_repo_id: str, + ): + super().__init__() + self.repo_ids = repo_ids + self.aggr_repo_id = aggregated_repo_id + + def run(self, data=None, rank: int = 0, world_size: int = 1): + import logging + + from lerobot.datasets.aggregate import aggregate_datasets + from lerobot.utils.utils import init_logging + + init_logging() + + # Since aggregate_datasets already handles parallel processing internally, + # we only need one worker to run the entire aggregation + if rank == 0: + logging.info(f"Starting aggregation of {len(self.repo_ids)} datasets into {self.aggr_repo_id}") + aggregate_datasets(self.repo_ids, self.aggr_repo_id) + logging.info("Aggregation complete!") + else: + logging.info(f"Worker {rank} skipping - only worker 0 performs aggregation") + + +def make_aggregate_executor( + repo_ids, repo_id, job_name, logs_dir, workers, partition, cpus_per_task, mem_per_cpu, slurm=True +): + kwargs = { + "pipeline": [ + AggregateDatasets(repo_ids, repo_id), + ], + "logging_dir": str(logs_dir / job_name), + } + + if slurm: + # For aggregation, we only need 1 task since aggregate_datasets handles everything + kwargs.update( + { + "job_name": job_name, + "tasks": 1, # Only need 1 task for aggregation + "workers": 1, # Only need 1 worker + "time": "08:00:00", + "partition": partition, + "cpus_per_task": cpus_per_task, + "sbatch_args": {"mem-per-cpu": mem_per_cpu}, + } + ) + executor = SlurmPipelineExecutor(**kwargs) + else: + kwargs.update( + { + "tasks": 1, + "workers": 1, + } + ) + executor = LocalPipelineExecutor(**kwargs) + + return executor + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--repo-id", + type=str, + help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset, required when push-to-hub is True.", + ) + parser.add_argument( + "--logs-dir", + type=Path, + help="Path to logs directory for `datatrove`.", + ) + parser.add_argument( + "--job-name", + type=str, + default="aggr_droid", + help="Job name used in slurm, and name of the directory created inside the provided logs directory.", + ) + parser.add_argument( + "--slurm", + type=int, + default=1, + help="Launch over slurm. Use `--slurm 0` to launch sequentially (useful to debug).", + ) + parser.add_argument( + "--workers", + type=int, + default=1, # Changed default to 1 since aggregation doesn't need multiple workers + help="Number of slurm workers. For aggregation, this should be 1.", + ) + parser.add_argument( + "--partition", + type=str, + help="Slurm partition. Ideally a CPU partition. No need for GPU partition.", + ) + parser.add_argument( + "--cpus-per-task", + type=int, + default=8, + help="Number of cpus that each slurm worker will use.", + ) + parser.add_argument( + "--mem-per-cpu", + type=str, + default="1950M", + help="Memory per cpu that each worker will use.", + ) + + args = parser.parse_args() + kwargs = vars(args) + kwargs["slurm"] = kwargs.pop("slurm") == 1 + + repo_ids = [f"{args.repo_id}_world_{DROID_SHARDS}_rank_{rank}" for rank in range(DROID_SHARDS)] + aggregate_executor = make_aggregate_executor(repo_ids, **kwargs) + aggregate_executor.run() + + +if __name__ == "__main__": + main() diff --git a/examples/port_datasets/slurm_port_shards.py b/examples/port_datasets/slurm_port_shards.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebc966e6e65545e020be890e03d68fb09d70caa --- /dev/null +++ b/examples/port_datasets/slurm_port_shards.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +from pathlib import Path + +from datatrove.executor import LocalPipelineExecutor +from datatrove.executor.slurm import SlurmPipelineExecutor +from datatrove.pipeline.base import PipelineStep +from port_droid import DROID_SHARDS + + +class PortDroidShards(PipelineStep): + def __init__( + self, + raw_dir: Path | str, + repo_id: str = None, + ): + super().__init__() + self.raw_dir = Path(raw_dir) + self.repo_id = repo_id + + def run(self, data=None, rank: int = 0, world_size: int = 1): + from datasets.utils.tqdm import disable_progress_bars + from port_droid import port_droid, validate_dataset + + from lerobot.utils.utils import init_logging + + init_logging() + disable_progress_bars() + + shard_repo_id = f"{self.repo_id}_world_{world_size}_rank_{rank}" + + try: + validate_dataset(shard_repo_id) + return + except Exception: + pass # nosec B110 - Dataset doesn't exist yet, continue with porting + + port_droid( + self.raw_dir, + shard_repo_id, + push_to_hub=False, + num_shards=world_size, + shard_index=rank, + ) + + validate_dataset(shard_repo_id) + + +def make_port_executor( + raw_dir, repo_id, job_name, logs_dir, workers, partition, cpus_per_task, mem_per_cpu, slurm=True +): + kwargs = { + "pipeline": [ + PortDroidShards(raw_dir, repo_id), + ], + "logging_dir": str(logs_dir / job_name), + } + + if slurm: + kwargs.update( + { + "job_name": job_name, + "tasks": DROID_SHARDS, + "workers": workers, + "time": "08:00:00", + "partition": partition, + "cpus_per_task": cpus_per_task, + "sbatch_args": {"mem-per-cpu": mem_per_cpu}, + } + ) + executor = SlurmPipelineExecutor(**kwargs) + else: + kwargs.update( + { + "tasks": 1, + "workers": 1, + } + ) + executor = LocalPipelineExecutor(**kwargs) + + return executor + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--raw-dir", + type=Path, + required=True, + help="Directory containing input raw datasets (e.g. `path/to/dataset` or `path/to/dataset/version).", + ) + parser.add_argument( + "--repo-id", + type=str, + help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset, required when push-to-hub is True.", + ) + parser.add_argument( + "--logs-dir", + type=Path, + help="Path to logs directory for `datatrove`.", + ) + parser.add_argument( + "--job-name", + type=str, + default="port_droid", + help="Job name used in slurm, and name of the directory created inside the provided logs directory.", + ) + parser.add_argument( + "--slurm", + type=int, + default=1, + help="Launch over slurm. Use `--slurm 0` to launch sequentially (useful to debug).", + ) + parser.add_argument( + "--workers", + type=int, + default=2048, + help="Number of slurm workers. It should be less than the maximum number of shards.", + ) + parser.add_argument( + "--partition", + type=str, + help="Slurm partition. Ideally a CPU partition. No need for GPU partition.", + ) + parser.add_argument( + "--cpus-per-task", + type=int, + default=8, + help="Number of cpus that each slurm worker will use.", + ) + parser.add_argument( + "--mem-per-cpu", + type=str, + default="1950M", + help="Memory per cpu that each worker will use.", + ) + + args = parser.parse_args() + kwargs = vars(args) + kwargs["slurm"] = kwargs.pop("slurm") == 1 + port_executor = make_port_executor(**kwargs) + port_executor.run() + + +if __name__ == "__main__": + main() diff --git a/examples/port_datasets/slurm_upload.py b/examples/port_datasets/slurm_upload.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab49937ff1233a330cf84fbaf97a58481b99a54 --- /dev/null +++ b/examples/port_datasets/slurm_upload.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import os +from pathlib import Path + +from datatrove.executor import LocalPipelineExecutor +from datatrove.executor.slurm import SlurmPipelineExecutor +from datatrove.pipeline.base import PipelineStep +from huggingface_hub import HfApi +from huggingface_hub.constants import REPOCARD_NAME +from port_droid import DROID_SHARDS + +from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDatasetMetadata +from lerobot.datasets.utils import create_lerobot_dataset_card +from lerobot.utils.utils import init_logging + + +class UploadDataset(PipelineStep): + def __init__( + self, + repo_id: str, + branch: str | None = None, + revision: str | None = None, + tags: list | None = None, + license: str | None = "apache-2.0", + private: bool = False, + distant_repo_id: str | None = None, + **card_kwargs, + ): + super().__init__() + self.repo_id = repo_id + self.distant_repo_id = self.repo_id if distant_repo_id is None else distant_repo_id + self.branch = branch + self.tags = tags + self.license = license + self.private = private + self.card_kwargs = card_kwargs + self.revision = revision if revision else CODEBASE_VERSION + + if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") != "1": + logging.warning( + 'HF_HUB_ENABLE_HF_TRANSFER is not set to "1". Install hf_transfer and set the env ' + "variable for faster uploads:\npip install hf-transfer\nexport HF_HUB_ENABLE_HF_TRANSFER=1" + ) + + self.create_repo() + + def create_repo(self): + logging.info(f"Loading meta data from {self.repo_id}...") + meta = LeRobotDatasetMetadata(self.repo_id) + + logging.info(f"Creating repo {self.distant_repo_id}...") + hub_api = HfApi() + hub_api.create_repo( + repo_id=self.distant_repo_id, + private=self.private, + repo_type="dataset", + exist_ok=True, + ) + if self.branch: + hub_api.create_branch( + repo_id=self.distant_repo_id, + branch=self.branch, + revision=self.revision, + repo_type="dataset", + exist_ok=True, + ) + + if not hub_api.file_exists( + self.distant_repo_id, REPOCARD_NAME, repo_type="dataset", revision=self.branch + ): + card = create_lerobot_dataset_card( + tags=self.tags, dataset_info=meta.info, license=self.license, **self.card_kwargs + ) + card.push_to_hub(repo_id=self.distant_repo_id, repo_type="dataset", revision=self.branch) + + hub_api.create_tag(self.distant_repo_id, tag=CODEBASE_VERSION, repo_type="dataset") + + def list_files_recursively(directory): + base_path = Path(directory) + return [str(file.relative_to(base_path)) for file in base_path.rglob("*") if file.is_file()] + + logging.info(f"Listing all local files from {self.repo_id}...") + self.file_paths = list_files_recursively(meta.root) + self.file_paths = sorted(self.file_paths) + + def create_chunks(self, lst, n): + from itertools import islice + + it = iter(lst) + return [list(islice(it, size)) for size in [len(lst) // n + (i < len(lst) % n) for i in range(n)]] + + def create_commits(self, additions): + import logging + import math + import random + import time + + from huggingface_hub import create_commit + from huggingface_hub.utils import HfHubHTTPError + + FILES_BETWEEN_COMMITS = 10 # noqa: N806 + BASE_DELAY = 0.1 # noqa: N806 + MAX_RETRIES = 12 # noqa: N806 + + # Split the files into smaller chunks for faster commit + # and avoiding "A commit has happened since" error + num_chunks = math.ceil(len(additions) / FILES_BETWEEN_COMMITS) + chunks = self.create_chunks(additions, num_chunks) + + for chunk in chunks: + retries = 0 + while True: + try: + create_commit( + self.distant_repo_id, + repo_type="dataset", + operations=chunk, + commit_message=f"DataTrove upload ({len(chunk)} files)", + revision=self.branch, + ) + # TODO: every 100 chunks super_squach_commits() + logging.info("create_commit completed!") + break + except HfHubHTTPError as e: + if "A commit has happened since" in e.server_message: + if retries >= MAX_RETRIES: + logging.error(f"Failed to create commit after {MAX_RETRIES=}. Giving up.") + raise e + logging.info("Commit creation race condition issue. Waiting...") + time.sleep(BASE_DELAY * 2**retries + random.uniform(0, 2)) + retries += 1 + else: + raise e + + def run(self, data=None, rank: int = 0, world_size: int = 1): + import logging + + from datasets.utils.tqdm import disable_progress_bars + from huggingface_hub import CommitOperationAdd, preupload_lfs_files + + from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata + from lerobot.utils.utils import init_logging + + init_logging() + disable_progress_bars() + + chunks = self.create_chunks(self.file_paths, world_size) + file_paths = chunks[rank] + + if len(file_paths) == 0: + raise ValueError(file_paths) + + logging.info("Pre-uploading LFS files...") + for i, path in enumerate(file_paths): + logging.info(f"{i}: {path}") + + meta = LeRobotDatasetMetadata(self.repo_id) + additions = [ + CommitOperationAdd(path_in_repo=path, path_or_fileobj=meta.root / path) for path in file_paths + ] + preupload_lfs_files( + repo_id=self.distant_repo_id, repo_type="dataset", additions=additions, revision=self.branch + ) + + logging.info("Creating commits...") + self.create_commits(additions) + logging.info("Done!") + + +def make_upload_executor( + repo_id, job_name, logs_dir, workers, partition, cpus_per_task, mem_per_cpu, private=False, slurm=True +): + kwargs = { + "pipeline": [ + UploadDataset(repo_id, private=private), + ], + "logging_dir": str(logs_dir / job_name), + } + + if slurm: + kwargs.update( + { + "job_name": job_name, + "tasks": DROID_SHARDS, + "workers": workers, + "time": "08:00:00", + "partition": partition, + "cpus_per_task": cpus_per_task, + "sbatch_args": {"mem-per-cpu": mem_per_cpu}, + } + ) + executor = SlurmPipelineExecutor(**kwargs) + else: + kwargs.update( + { + "tasks": DROID_SHARDS, + "workers": 1, + } + ) + executor = LocalPipelineExecutor(**kwargs) + + return executor + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--repo-id", + type=str, + help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset, required when push-to-hub is True.", + ) + parser.add_argument( + "--logs-dir", + type=Path, + help="Path to logs directory for `datatrove`.", + ) + parser.add_argument( + "--job-name", + type=str, + default="upload_droid", + help="Job name used in slurm, and name of the directory created inside the provided logs directory.", + ) + parser.add_argument( + "--slurm", + type=int, + default=1, + help="Launch over slurm. Use `--slurm 0` to launch sequentially (useful to debug).", + ) + parser.add_argument( + "--workers", + type=int, + default=50, + help="Number of slurm workers. It should be less than the maximum number of shards.", + ) + parser.add_argument( + "--partition", + type=str, + help="Slurm partition. Ideally a CPU partition. No need for GPU partition.", + ) + parser.add_argument( + "--cpus-per-task", + type=int, + default=8, + help="Number of cpus that each slurm worker will use.", + ) + parser.add_argument( + "--mem-per-cpu", + type=str, + default="1950M", + help="Memory per cpu that each worker will use.", + ) + parser.add_argument( + "--private", + action="store_true", + default=False, + help="Whether to create a private repository.", + ) + + init_logging() + + args = parser.parse_args() + kwargs = vars(args) + kwargs["slurm"] = kwargs.pop("slurm") == 1 + upload_executor = make_upload_executor(**kwargs) + upload_executor.run() + + +if __name__ == "__main__": + main() diff --git a/examples/rtc/eval_dataset.py b/examples/rtc/eval_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..22d5ffe05775fc7b223d3615a2228d558f88a115 --- /dev/null +++ b/examples/rtc/eval_dataset.py @@ -0,0 +1,951 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Evaluate Real-Time Chunking (RTC) performance on dataset samples. + +This script takes two random samples from a dataset: +- Uses actions from the first sample as previous chunk +- Generates new actions for the second sample with and without RTC + +It compares action predictions with and without RTC on dataset samples, +measuring consistency and ground truth alignment. + +Usage: + # Basic usage with smolvla policy + uv run python examples/rtc/eval_dataset.py \ + --policy.path=helper2424/smolvla_check_rtc_last3 \ + --dataset.repo_id=helper2424/check_rtc \ + --rtc.execution_horizon=8 \ + --device=mps \ + --rtc.max_guidance_weight=10.0 \ + --rtc.prefix_attention_schedule=EXP \ + --seed=10 + + # Basic usage with pi0.5 policy + uv run python examples/rtc/eval_dataset.py \ + --policy.path=lerobot/pi05_libero_finetuned \ + --dataset.repo_id=HuggingFaceVLA/libero \ + --rtc.execution_horizon=10 \ + --device=mps + --seed=10 + + # Basic usage with pi0.5 policy with cuda device + uv run python examples/rtc/eval_dataset.py \ + --policy.path=lerobot/pi05_libero_finetuned \ + --dataset.repo_id=HuggingFaceVLA/libero \ + --rtc.execution_horizon=8 \ + --device=cuda + + # Basic usage with pi0 policy with cuda device + uv run python examples/rtc/eval_dataset.py \ + --policy.path=lerobot/pi0_libero_finetuned \ + --dataset.repo_id=HuggingFaceVLA/libero \ + --rtc.execution_horizon=8 \ + --device=cuda + + uv run python examples/rtc/eval_dataset.py \ + --policy.path=lipsop/reuben_pi0 \ + --dataset.repo_id=ReubenLim/so101_cube_in_cup \ + --rtc.execution_horizon=8 \ + --device=cuda + + # With torch.compile for faster inference (PyTorch 2.0+) + # Note: CUDA graphs disabled by default due to in-place ops in denoising loop + uv run python examples/rtc/eval_dataset.py \ + --policy.path=helper2424/smolvla_check_rtc_last3 \ + --dataset.repo_id=helper2424/check_rtc \ + --rtc.execution_horizon=8 \ + --device=mps \ + --use_torch_compile=true \ + --torch_compile_mode=max-autotune + + # With torch.compile on CUDA (CUDA graphs disabled by default) + uv run python examples/rtc/eval_dataset.py \ + --policy.path=helper2424/smolvla_check_rtc_last3 \ + --dataset.repo_id=helper2424/check_rtc \ + --rtc.execution_horizon=8 \ + --device=cuda \ + --use_torch_compile=true \ + --torch_compile_mode=reduce-overhead + + # Enable CUDA graphs (advanced - may cause tensor aliasing errors) + uv run python examples/rtc/eval_dataset.py \ + --policy.path=helper2424/smolvla_check_rtc_last3 \ + --dataset.repo_id=helper2424/check_rtc \ + --use_torch_compile=true \ + --torch_compile_backend=inductor \ + --torch_compile_mode=max-autotune \ + --torch_compile_disable_cudagraphs=false +""" + +import gc +import logging +import os +import random +from dataclasses import dataclass, field + +import numpy as np +import torch + +try: + import matplotlib.pyplot as plt + + MATPLOTLIB_AVAILABLE = True +except ImportError: + MATPLOTLIB_AVAILABLE = False + plt = None + +from lerobot.configs import parser +from lerobot.configs.default import DatasetConfig +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import RTCAttentionSchedule +from lerobot.datasets.factory import resolve_delta_timestamps +from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata +from lerobot.policies.factory import get_policy_class, make_pre_post_processors +from lerobot.policies.rtc.configuration_rtc import RTCConfig +from lerobot.policies.rtc.debug_visualizer import RTCDebugVisualizer +from lerobot.utils.hub import HubMixin +from lerobot.utils.utils import init_logging + + +def set_seed(seed: int): + """Set random seed for reproducibility.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if torch.backends.mps.is_available(): + torch.mps.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def _check_matplotlib_available(): + """Check if matplotlib is available, raise helpful error if not.""" + if not MATPLOTLIB_AVAILABLE: + raise ImportError( + "matplotlib is required for RTC debug visualizations. " + "Please install it by running:\n" + " uv pip install matplotlib" + ) + + +@dataclass +class RTCEvalConfig(HubMixin): + """Configuration for RTC evaluation.""" + + # Policy configuration + policy: PreTrainedConfig | None = None + + # Dataset configuration + dataset: DatasetConfig = field(default_factory=DatasetConfig) + + # RTC configuration + rtc: RTCConfig = field( + default_factory=lambda: RTCConfig( + enabled=True, + execution_horizon=20, + max_guidance_weight=10.0, + prefix_attention_schedule=RTCAttentionSchedule.EXP, + debug=True, + debug_maxlen=1000, + ) + ) + + # Device configuration + device: str | None = field( + default=None, + metadata={"help": "Device to run on (cuda, cpu, mps, auto)"}, + ) + + # Output configuration + output_dir: str = field( + default="rtc_debug_output", + metadata={"help": "Directory to save debug visualizations"}, + ) + + # Seed configuration + seed: int = field( + default=42, + metadata={"help": "Random seed for reproducibility"}, + ) + + inference_delay: int = field( + default=4, + metadata={"help": "Inference delay for RTC"}, + ) + + # Torch compile configuration + use_torch_compile: bool = field( + default=False, + metadata={"help": "Use torch.compile for faster inference (PyTorch 2.0+)"}, + ) + + torch_compile_backend: str = field( + default="inductor", + metadata={"help": "Backend for torch.compile (inductor, aot_eager, cudagraphs)"}, + ) + + torch_compile_mode: str = field( + default="default", + metadata={"help": "Compilation mode (default, reduce-overhead, max-autotune)"}, + ) + + torch_compile_disable_cudagraphs: bool = field( + default=True, + metadata={ + "help": "Disable CUDA graphs in torch.compile. Required due to in-place tensor " + "operations in denoising loop (x_t += dt * v_t) which cause tensor aliasing issues." + }, + ) + + def __post_init__(self): + # Parse policy path + policy_path = parser.get_path_arg("policy") + if policy_path: + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.pretrained_path = policy_path + else: + raise ValueError("Policy path is required (--policy.path)") + + # Auto-detect device if not specified + if self.device is None or self.device == "auto": + if torch.cuda.is_available(): + self.device = "cuda" + elif torch.backends.mps.is_available(): + self.device = "mps" + else: + self.device = "cpu" + logging.info(f"Auto-detected device: {self.device}") + + @classmethod + def __get_path_fields__(cls) -> list[str]: + """This enables the parser to load config from the policy using `--policy.path=local/dir`""" + return ["policy"] + + +class RTCEvaluator: + """Evaluator for RTC on dataset samples.""" + + def __init__(self, cfg: RTCEvalConfig): + self.cfg = cfg + self.device = cfg.device + + # Load dataset with proper delta_timestamps based on policy configuration + # Calculate delta_timestamps using the same logic as make_dataset factory + logging.info(f"Loading dataset: {cfg.dataset.repo_id}") + + # Get dataset metadata to extract FPS + ds_meta = LeRobotDatasetMetadata(cfg.dataset.repo_id) + + # Calculate delta_timestamps from policy's delta_indices + delta_timestamps = resolve_delta_timestamps(cfg.policy, ds_meta) + + # Create dataset with calculated delta_timestamps + self.dataset = LeRobotDataset( + cfg.dataset.repo_id, + delta_timestamps=delta_timestamps, + ) + logging.info(f"Dataset loaded: {len(self.dataset)} samples, {self.dataset.num_episodes} episodes") + + # Create preprocessor/postprocessor + self.preprocessor, self.postprocessor = make_pre_post_processors( + policy_cfg=cfg.policy, + pretrained_path=cfg.policy.pretrained_path, + preprocessor_overrides={ + "device_processor": {"device": self.device}, + }, + ) + + logging.info("=" * 80) + logging.info("Ready to run evaluation with sequential policy loading:") + logging.info(" 1. policy_prev_chunk - Generate reference chunk, then destroy") + logging.info(" 2. policy_no_rtc - Generate without RTC, then destroy") + logging.info(" 3. policy_rtc - Generate with RTC, then destroy") + logging.info(" Note: Only one policy in memory at a time for efficient memory usage") + logging.info("=" * 80) + + def _init_policy(self, name: str, rtc_enabled: bool, rtc_debug: bool): + """Initialize a single policy instance with specified RTC configuration. + + Args: + name: Name identifier for logging purposes + rtc_enabled: Whether to enable RTC for this policy + rtc_debug: Whether to enable debug tracking for this policy + + Returns: + Configured policy instance with optional torch.compile applied + """ + logging.info(f"Initializing {name}...") + + # Load policy from pretrained + policy_class = get_policy_class(self.cfg.policy.type) + + config = PreTrainedConfig.from_pretrained(self.cfg.policy.pretrained_path) + + if self.cfg.policy.type == "pi05" or self.cfg.policy.type == "pi0": + config.compile_model = self.cfg.use_torch_compile + + policy = policy_class.from_pretrained(self.cfg.policy.pretrained_path, config=config) + policy = policy.to(self.device) + policy.eval() + + # Configure RTC + rtc_config = RTCConfig( + enabled=rtc_enabled, + execution_horizon=self.cfg.rtc.execution_horizon, + max_guidance_weight=self.cfg.rtc.max_guidance_weight, + prefix_attention_schedule=self.cfg.rtc.prefix_attention_schedule, + debug=rtc_debug, + debug_maxlen=self.cfg.rtc.debug_maxlen, + ) + policy.config.rtc_config = rtc_config + policy.init_rtc_processor() + + logging.info(f" RTC enabled: {rtc_enabled}") + logging.info(f" RTC debug: {rtc_debug}") + logging.info(f" Policy config: {config}") + + # Apply torch.compile to predict_action_chunk method if enabled + if self.cfg.use_torch_compile: + policy = self._apply_torch_compile(policy, name) + + logging.info(f"✓ {name} initialized successfully") + return policy + + def _apply_torch_compile(self, policy, policy_name: str): + """Apply torch.compile to the policy's predict_action_chunk method. + + Args: + policy: Policy instance to compile + policy_name: Name for logging purposes + + Returns: + Policy with compiled predict_action_chunk method + """ + + # PI models handle their own compilation + if policy.type == "pi05" or policy.type == "pi0": + return policy + + try: + # Check if torch.compile is available (PyTorch 2.0+) + if not hasattr(torch, "compile"): + logging.warning( + f" [{policy_name}] torch.compile is not available. Requires PyTorch 2.0+. " + f"Current version: {torch.__version__}. Skipping compilation." + ) + return policy + + logging.info(f" [{policy_name}] Applying torch.compile to predict_action_chunk...") + logging.info(f" Backend: {self.cfg.torch_compile_backend}") + logging.info(f" Mode: {self.cfg.torch_compile_mode}") + logging.info(f" Disable CUDA graphs: {self.cfg.torch_compile_disable_cudagraphs}") + logging.info(" Note: Debug tracker excluded from compilation via @torch._dynamo.disable") + + # Compile the predict_action_chunk method + # - Debug tracker is excluded from compilation via @torch._dynamo.disable + # - CUDA graphs disabled to prevent tensor aliasing from in-place ops (x_t += dt * v_t) + compile_kwargs = { + "backend": self.cfg.torch_compile_backend, + "mode": self.cfg.torch_compile_mode, + } + + # Disable CUDA graphs if requested (prevents tensor aliasing issues) + if self.cfg.torch_compile_disable_cudagraphs: + compile_kwargs["options"] = {"triton.cudagraphs": False} + + original_method = policy.predict_action_chunk + compiled_method = torch.compile(original_method, **compile_kwargs) + policy.predict_action_chunk = compiled_method + logging.info(f" ✓ [{policy_name}] Successfully compiled predict_action_chunk") + + except Exception as e: + logging.error(f" [{policy_name}] Failed to apply torch.compile: {e}") + logging.warning(f" [{policy_name}] Continuing without torch.compile") + + return policy + + def _destroy_policy(self, policy, policy_name: str): + """Explicitly destroy a policy and free all associated memory. + + This method performs aggressive cleanup to ensure maximum memory is freed, + which is critical for large models (e.g., VLAs with billions of parameters). + + Args: + policy: Policy instance to destroy + policy_name: Name for logging purposes + """ + logging.info(f" Destroying {policy_name} and freeing memory...") + + try: + # Step 1: Move policy to CPU to free GPU/MPS memory + policy.cpu() + + # Step 2: Delete the policy object + del policy + + # Step 3: Force garbage collection to reclaim memory immediately + gc.collect() + + # Step 4: Clear device-specific caches + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.synchronize() # Ensure all operations complete + + if torch.backends.mps.is_available(): + torch.mps.empty_cache() + + logging.info(f" ✓ {policy_name} destroyed and memory freed") + + except Exception as e: + logging.warning(f" Warning: Error during {policy_name} cleanup: {e}") + + def run_evaluation(self): + """Run evaluation on two random dataset samples using three separate policies. + + Note: Policies are deinitalized after each step to free memory. Large models + (e.g., VLA models with billions of parameters) cannot fit three instances in + memory simultaneously. By deleting and garbage collecting after each step, + we ensure only one policy is loaded at a time. + """ + # Create output directory + os.makedirs(self.cfg.output_dir, exist_ok=True) + logging.info(f"Output directory: {self.cfg.output_dir}") + + logging.info("=" * 80) + logging.info("Starting RTC evaluation") + logging.info(f"Inference delay: {self.cfg.inference_delay}") + logging.info("=" * 80) + + # Load two random samples from dataset + data_loader = torch.utils.data.DataLoader(self.dataset, batch_size=1, shuffle=True) + loader_iter = iter(data_loader) + first_sample = next(loader_iter) + second_sample = next(loader_iter) + + preprocessed_first_sample = self.preprocessor(first_sample) + preprocessed_second_sample = self.preprocessor(second_sample) + + # ============================================================================ + # Step 1: Generate previous chunk using policy_prev_chunk + # ============================================================================ + # This policy is only used to generate the reference chunk and then freed + logging.info("=" * 80) + logging.info("Step 1: Generating previous chunk with policy_prev_chunk") + logging.info("=" * 80) + + # Initialize policy 1 + policy_prev_chunk_policy = self._init_policy( + name="policy_prev_chunk", + rtc_enabled=False, + rtc_debug=False, + ) + with torch.no_grad(): + prev_chunk_left_over = policy_prev_chunk_policy.predict_action_chunk( + preprocessed_first_sample, + )[:, :25, :].squeeze(0) + logging.info(f" Generated prev_chunk shape: {prev_chunk_left_over.shape}") + + # Destroy policy_prev_chunk to free memory for large models + self._destroy_policy(policy_prev_chunk_policy, "policy_prev_chunk") + + # ============================================================================ + # Step 2: Generate actions WITHOUT RTC using policy_no_rtc + # ============================================================================ + logging.info("=" * 80) + logging.info("Step 2: Generating actions WITHOUT RTC with policy_no_rtc") + logging.info("=" * 80) + + set_seed(self.cfg.seed) + + # Initialize policy 2 + policy_no_rtc_policy = self._init_policy( + name="policy_no_rtc", + rtc_enabled=False, + rtc_debug=True, + ) + + # Sample noise (use same noise for both RTC and non-RTC for fair comparison) + noise_size = (1, policy_no_rtc_policy.config.chunk_size, policy_no_rtc_policy.config.max_action_dim) + noise = policy_no_rtc_policy.model.sample_noise(noise_size, self.device) + noise_clone = noise.clone() + policy_no_rtc_policy.rtc_processor.reset_tracker() + with torch.no_grad(): + no_rtc_actions = policy_no_rtc_policy.predict_action_chunk( + preprocessed_second_sample, + noise=noise, + ) + no_rtc_tracked_steps = policy_no_rtc_policy.rtc_processor.tracker.get_all_steps() + logging.info(f" Tracked {len(no_rtc_tracked_steps)} steps without RTC") + logging.info(f" Generated no_rtc_actions shape: {no_rtc_actions.shape}") + + # Destroy policy_no_rtc to free memory before loading policy_rtc + self._destroy_policy(policy_no_rtc_policy, "policy_no_rtc") + + # ============================================================================ + # Step 3: Generate actions WITH RTC using policy_rtc + # ============================================================================ + logging.info("=" * 80) + logging.info("Step 3: Generating actions WITH RTC with policy_rtc") + logging.info("=" * 80) + + set_seed(self.cfg.seed) + + # Initialize policy 3 + policy_rtc_policy = self._init_policy( + name="policy_rtc", + rtc_enabled=True, + rtc_debug=True, + ) + policy_rtc_policy.rtc_processor.reset_tracker() + with torch.no_grad(): + rtc_actions = policy_rtc_policy.predict_action_chunk( + preprocessed_second_sample, + noise=noise_clone, + inference_delay=self.cfg.inference_delay, + prev_chunk_left_over=prev_chunk_left_over, + execution_horizon=self.cfg.rtc.execution_horizon, + ) + rtc_tracked_steps = policy_rtc_policy.rtc_processor.get_all_debug_steps() + logging.info(f" Tracked {len(rtc_tracked_steps)} steps with RTC") + logging.info(f" Generated rtc_actions shape: {rtc_actions.shape}") + + # Save num_steps before destroying policy (needed for plotting) + try: + num_steps = policy_rtc_policy.config.num_steps + except Exception as e: + logging.error(f" Error getting num_steps: {e}") + num_steps = policy_rtc_policy.config.num_inference_steps + logging.warning(f" Using num_inference_steps: {num_steps} instead of num_steps") + + # Destroy policy_rtc after final use + self._destroy_policy(policy_rtc_policy, "policy_rtc") + + # Plot and save results + logging.info("=" * 80) + logging.info("Plotting results...") + self.plot_tracked_data(rtc_tracked_steps, no_rtc_tracked_steps, prev_chunk_left_over, num_steps) + + # Plot final actions comparison + logging.info("=" * 80) + logging.info("Plotting final actions comparison...") + self.plot_final_actions_comparison(rtc_actions, no_rtc_actions, prev_chunk_left_over) + + logging.info("=" * 80) + logging.info("Evaluation completed successfully") + + def plot_final_actions_comparison(self, rtc_actions, no_rtc_actions, prev_chunk_left_over): + """Plot final action predictions comparison on a single chart. + + Args: + rtc_actions: Final actions from RTC policy + no_rtc_actions: Final actions from non-RTC policy + prev_chunk_left_over: Previous chunk used as ground truth + """ + _check_matplotlib_available() + + # Remove batch dimension if present + rtc_actions_plot = rtc_actions.squeeze(0).cpu() if len(rtc_actions.shape) == 3 else rtc_actions.cpu() + no_rtc_actions_plot = ( + no_rtc_actions.squeeze(0).cpu() if len(no_rtc_actions.shape) == 3 else no_rtc_actions.cpu() + ) + prev_chunk_plot = prev_chunk_left_over.cpu() + + # Create figure with 6 subplots (one per action dimension) + fig, axes = plt.subplots(6, 1, figsize=(16, 12)) + fig.suptitle("Final Action Predictions Comparison (Raw)", fontsize=16) + + # Plot each action dimension + for dim_idx, ax in enumerate(axes): + # Plot previous chunk (ground truth) in red + RTCDebugVisualizer.plot_waypoints( + [ax], + prev_chunk_plot[:, dim_idx : dim_idx + 1], + start_from=0, + color="red", + label="Previous Chunk (Ground Truth)", + linewidth=2.5, + alpha=0.8, + ) + + # Plot no-RTC actions in blue + RTCDebugVisualizer.plot_waypoints( + [ax], + no_rtc_actions_plot[:, dim_idx : dim_idx + 1], + start_from=0, + color="blue", + label="No RTC", + linewidth=2, + alpha=0.7, + ) + + # Plot RTC actions in green + RTCDebugVisualizer.plot_waypoints( + [ax], + rtc_actions_plot[:, dim_idx : dim_idx + 1], + start_from=0, + color="green", + label="RTC", + linewidth=2, + alpha=0.7, + ) + + # Add vertical lines for inference delay and execution horizon + inference_delay = self.cfg.inference_delay + execution_horizon = self.cfg.rtc.execution_horizon + + if inference_delay > 0: + ax.axvline( + x=inference_delay - 1, + color="orange", + linestyle="--", + alpha=0.5, + label=f"Inference Delay ({inference_delay})", + ) + + if execution_horizon > 0: + ax.axvline( + x=execution_horizon, + color="purple", + linestyle="--", + alpha=0.5, + label=f"Execution Horizon ({execution_horizon})", + ) + + ax.set_ylabel(f"Dim {dim_idx}", fontsize=10) + ax.grid(True, alpha=0.3) + + # Set x-axis ticks to show all integer values + max_len = max(rtc_actions_plot.shape[0], no_rtc_actions_plot.shape[0], prev_chunk_plot.shape[0]) + ax.set_xticks(range(0, max_len, max(1, max_len // 20))) # Show ~20 ticks + ax.set_xlim(-0.5, max_len - 0.5) + + axes[-1].set_xlabel("Step", fontsize=10) + + # Collect legend handles and labels from first subplot + handles, labels = axes[0].get_legend_handles_labels() + # Remove duplicates while preserving order + seen = set() + unique_handles = [] + unique_labels = [] + for handle, label in zip(handles, labels, strict=True): + if label not in seen: + seen.add(label) + unique_handles.append(handle) + unique_labels.append(label) + + # Add legend outside the plot area (to the right) + fig.legend( + unique_handles, + unique_labels, + loc="center right", + fontsize=9, + bbox_to_anchor=(1.0, 0.5), + framealpha=0.9, + ) + + # Save figure + output_path = os.path.join(self.cfg.output_dir, "final_actions_comparison.png") + fig.tight_layout(rect=[0, 0, 0.85, 1]) # Leave space for legend on right + fig.savefig(output_path, dpi=150, bbox_inches="tight") + logging.info(f"Saved final actions comparison to {output_path}") + plt.close(fig) + + def plot_tracked_data(self, rtc_tracked_steps, no_rtc_tracked_steps, prev_chunk_left_over, num_steps): + _check_matplotlib_available() + + # Create side-by-side figures for denoising visualization + fig_xt, axs_xt = self._create_figure("x_t Denoising: No RTC (left) vs RTC (right)") + fig_vt, axs_vt = self._create_figure("v_t Denoising: No RTC (left) vs RTC (right)") + fig_corr, axs_corr = self._create_figure("Correction: No RTC (left) vs RTC (right)") + fig_x1t, axs_x1t = self._create_figure( + "x1_t Predicted State & Error: No RTC (left - empty) vs RTC (right)" + ) + self._plot_denoising_steps_from_tracker( + rtc_tracked_steps, + axs_xt[:, 1], # Right column for x_t + axs_vt[:, 1], # Right column for v_t + axs_corr[:, 1], # Right column for correction + axs_x1t[:, 1], # Right column for x1_t + num_steps, + add_labels=True, # Add labels for RTC (right column) + ) + + self._plot_denoising_steps_from_tracker( + no_rtc_tracked_steps, + axs_xt[:, 0], # Left column for x_t + axs_vt[:, 0], # Left column for v_t + axs_corr[:, 0], # Left column for correction + axs_x1t[:, 0], # Left column for x1_t + num_steps, + add_labels=False, # No labels for No RTC (left column) + ) + + # Plot no-RTC x_t data on right chart as orange dashed line for comparison + self._plot_no_rtc_xt_reference(no_rtc_tracked_steps, axs_xt[:, 1], num_steps) + + # Plot ground truth on x_t axes + RTCDebugVisualizer.plot_waypoints( + axs_xt[:, 1], prev_chunk_left_over, start_from=0, color="red", label="Ground truth" + ) + + # Plot ground truth on x1_t axes + RTCDebugVisualizer.plot_waypoints( + axs_x1t[:, 1], prev_chunk_left_over, start_from=0, color="red", label="Ground truth" + ) + + # Plot ground truth on x_t axes (no labels for left column) + RTCDebugVisualizer.plot_waypoints( + axs_xt[:, 0], prev_chunk_left_over, start_from=0, color="red", label=None + ) + + RTCDebugVisualizer.plot_waypoints( + axs_x1t[:, 0], prev_chunk_left_over, start_from=0, color="red", label=None + ) + + # Add legends outside the plot area for each figure + self._add_figure_legend(fig_xt, axs_xt) + self._add_figure_legend(fig_vt, axs_vt) + self._add_figure_legend(fig_corr, axs_corr) + self._add_figure_legend(fig_x1t, axs_x1t) + + # Save denoising plots + self._save_figure(fig_xt, os.path.join(self.cfg.output_dir, "denoising_xt_comparison.png")) + self._save_figure(fig_vt, os.path.join(self.cfg.output_dir, "denoising_vt_comparison.png")) + self._save_figure(fig_corr, os.path.join(self.cfg.output_dir, "denoising_correction_comparison.png")) + self._save_figure(fig_x1t, os.path.join(self.cfg.output_dir, "denoising_x1t_comparison.png")) + + def _create_figure(self, title): + fig, axs = plt.subplots(6, 2, figsize=(24, 12)) + fig.suptitle(title, fontsize=16) + + for ax in axs[:, 0]: + ax.set_title("No RTC (N/A)" if ax == axs[0, 0] else "", fontsize=12) + for ax in axs[:, 1]: + ax.set_title("RTC" if ax == axs[0, 1] else "", fontsize=12) + + return fig, axs + + def _add_figure_legend(self, fig, axs): + """Add a legend outside the plot area on the right side. + + Args: + fig: Matplotlib figure to add legend to + axs: Array of axes to collect legend handles from + """ + # Collect all handles and labels from the first row of axes (right column) + handles, labels = axs[0, 1].get_legend_handles_labels() + + # Remove duplicates while preserving order + seen = set() + unique_handles = [] + unique_labels = [] + for handle, label in zip(handles, labels, strict=True): + if label not in seen: + seen.add(label) + unique_handles.append(handle) + unique_labels.append(label) + + # Add legend outside the plot area (to the right, close to charts) + if unique_handles: + fig.legend( + unique_handles, + unique_labels, + loc="center left", + fontsize=8, + bbox_to_anchor=(0.87, 0.5), + framealpha=0.9, + ncol=1, + ) + + def _save_figure(self, fig, path): + fig.tight_layout(rect=[0, 0, 0.85, 1]) # Leave space for legend/colorbar on right + fig.savefig(path, dpi=150, bbox_inches="tight") + logging.info(f"Saved figure to {path}") + plt.close(fig) + + def _plot_denoising_steps_from_tracker( + self, tracked_steps, xt_axs, vt_axs, corr_axs, x1t_axs, num_steps, add_labels=True + ): + """Plot denoising steps from tracker data. + + Args: + tracked_steps: List of DebugStep objects containing debug steps + xt_axs: Matplotlib axes for x_t plots (array of 6 axes) + vt_axs: Matplotlib axes for v_t plots (array of 6 axes) + corr_axs: Matplotlib axes for correction plots (array of 6 axes) + x1t_axs: Matplotlib axes for x1_t plots (array of 6 axes) + num_steps: Total number of denoising steps for colormap + add_labels: Whether to add legend labels for the plots + """ + + logging.info("=" * 80) + logging.info(f"Plotting {len(tracked_steps)} steps") + + debug_steps = tracked_steps + if not debug_steps: + return + + # Define colors for different denoise steps (using a colormap) + colors = plt.cm.viridis(np.linspace(0, 1, num_steps)) + + for step_idx, debug_step in enumerate(debug_steps): + color = colors[step_idx % len(colors)] + label = f"Step {step_idx}" if add_labels else None + + # Plot x_t + if debug_step.x_t is not None: + RTCDebugVisualizer.plot_waypoints( + xt_axs, debug_step.x_t, start_from=0, color=color, label=label + ) + + # Plot v_t + if debug_step.v_t is not None: + RTCDebugVisualizer.plot_waypoints( + vt_axs, debug_step.v_t, start_from=0, color=color, label=label + ) + + # Plot correction on separate axes + if debug_step.correction is not None: + RTCDebugVisualizer.plot_waypoints( + corr_axs, + debug_step.correction, + start_from=0, + color=color, + label=label, + ) + + # Plot x1_t (predicted state) + if x1t_axs is not None and debug_step.x1_t is not None: + x1t_label = f"x1_t Step {step_idx}" if add_labels else None + RTCDebugVisualizer.plot_waypoints( + x1t_axs, + debug_step.x1_t, + start_from=0, + color=color, + label=x1t_label, + ) + + # Plot error in orange dashed + if x1t_axs is not None and debug_step.err is not None: + error_chunk = ( + debug_step.err[0].cpu().numpy() + if len(debug_step.err.shape) == 3 + else debug_step.err.cpu().numpy() + ) + + num_dims = min(error_chunk.shape[-1], 6) + error_label = f"error Step {step_idx}" if add_labels else None + for j in range(num_dims): + x1t_axs[j].plot( + np.arange(0, error_chunk.shape[0]), + error_chunk[:, j], + color="orange", + linestyle="--", + alpha=0.7, + label=error_label, + ) + + # Recalculate axis limits after plotting to ensure proper scaling + self._rescale_axes(xt_axs) + self._rescale_axes(vt_axs) + self._rescale_axes(corr_axs) + self._rescale_axes(x1t_axs) + + def _plot_no_rtc_xt_reference(self, no_rtc_tracked_steps, xt_axs, num_steps): + """Plot final no-RTC x_t data as orange dashed line on the RTC chart for comparison. + + Args: + no_rtc_tracked_steps: List of DebugStep objects containing no-RTC debug steps + xt_axs: Matplotlib axes for x_t plots (array of 6 axes, right column) + num_steps: Total number of denoising steps for colormap + """ + debug_steps = no_rtc_tracked_steps + if not debug_steps: + return + + # Plot only the final x_t step as orange dashed line + final_step = debug_steps[-1] + logging.info("Plotting final no-RTC x_t step as orange dashed reference") + + if final_step.x_t is not None: + x_t_chunk = ( + final_step.x_t[0].cpu().numpy() + if len(final_step.x_t.shape) == 3 + else final_step.x_t.cpu().numpy() + ) + + num_dims = min(x_t_chunk.shape[-1], 6) + for j in range(num_dims): + xt_axs[j].plot( + np.arange(0, x_t_chunk.shape[0]), + x_t_chunk[:, j], + color="orange", + linestyle="--", + alpha=0.7, + linewidth=2, + label="No RTC (final)" if j == 0 else "", + ) + + def _rescale_axes(self, axes): + """Rescale axes to show all data with proper margins. + + Args: + axes: Array of matplotlib axes to rescale + """ + for ax in axes: + ax.relim() + ax.autoscale_view() + + # Add 10% margin to y-axis for better visualization + ylim = ax.get_ylim() + y_range = ylim[1] - ylim[0] + if y_range > 0: # Avoid division by zero + margin = y_range * 0.1 + ax.set_ylim(ylim[0] - margin, ylim[1] + margin) + + # Set x-axis ticks to show all integer values + xlim = ax.get_xlim() + max_len = int(xlim[1]) + 1 + if max_len > 0: + ax.set_xticks(range(0, max_len, max(1, max_len // 20))) # Show ~20 ticks + ax.set_xlim(-0.5, max_len - 0.5) + + +@parser.wrap() +def main(cfg: RTCEvalConfig): + """Main entry point for RTC evaluation.""" + # Set random seed for reproducibility + set_seed(cfg.seed) + + init_logging() + + logging.info("=" * 80) + logging.info("RTC Dataset Evaluation") + logging.info(f"Config: {cfg}") + logging.info("=" * 80) + + evaluator = RTCEvaluator(cfg) + evaluator.run_evaluation() + + +if __name__ == "__main__": + main() diff --git a/examples/rtc/eval_with_real_robot.py b/examples/rtc/eval_with_real_robot.py new file mode 100644 index 0000000000000000000000000000000000000000..5e2766d122ff4575731076162ed038aef408d9f9 --- /dev/null +++ b/examples/rtc/eval_with_real_robot.py @@ -0,0 +1,549 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Demo script showing how to use Real-Time Chunking (RTC) with action chunking policies on real robots. + +This script demonstrates: +1. Creating a robot and policy (SmolVLA, Pi0, etc.) with RTC +2. Consuming actions from the policy while the robot executes +3. Periodically requesting new action chunks in the background using threads +4. Managing action buffers and timing for real-time operation + +For simulation environments, see eval_with_simulation.py + +Usage: + # Run RTC with Real robot with RTC + uv run examples/rtc/eval_with_real_robot.py \ + --policy.path=helper2424/smolvla_check_rtc_last3 \ + --policy.device=mps \ + --rtc.enabled=true \ + --rtc.execution_horizon=20 \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58FA0834591 \ + --robot.id=so100_follower \ + --robot.cameras="{ gripper: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --task="Move green small object into the purple platform" \ + --duration=120 + + # Run RTC with Real robot without RTC + uv run examples/rtc/eval_with_real_robot.py \ + --policy.path=helper2424/smolvla_check_rtc_last3 \ + --policy.device=mps \ + --rtc.enabled=false \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58FA0834591 \ + --robot.id=so100_follower \ + --robot.cameras="{ gripper: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}}" \ + --task="Move green small object into the purple platform" \ + --duration=120 + + # Run RTC with Real robot with pi0.5 policy + uv run examples/rtc/eval_with_real_robot.py \ + --policy.path=helper2424/pi05_check_rtc \ + --policy.device=mps \ + --rtc.enabled=true \ + --rtc.execution_horizon=20 \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58FA0834591 \ + --robot.id=so100_follower \ + --robot.cameras="{ gripper: {type: opencv, index_or_path: 0, width: 640, height: 480, fps: 30}, front: {type: opencv, index_or_path: 1, width: 640, height: 480, fps: 30}}" \ + --task="Move green small object into the purple platform" \ + --duration=120 +""" + +import logging +import math +import sys +import time +import traceback +from dataclasses import dataclass, field +from threading import Event, Lock, Thread + +import torch +from torch import Tensor + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401 +from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401 +from lerobot.configs import parser +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import RTCAttentionSchedule +from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features +from lerobot.policies.factory import get_policy_class, make_pre_post_processors +from lerobot.policies.rtc.action_queue import ActionQueue +from lerobot.policies.rtc.configuration_rtc import RTCConfig +from lerobot.policies.rtc.latency_tracker import LatencyTracker +from lerobot.processor.factory import ( + make_default_robot_action_processor, + make_default_robot_observation_processor, +) +from lerobot.rl.process import ProcessSignalHandler +from lerobot.robots import ( # noqa: F401 + Robot, + RobotConfig, + koch_follower, + so100_follower, + so101_follower, +) +from lerobot.robots.utils import make_robot_from_config +from lerobot.utils.constants import OBS_IMAGES +from lerobot.utils.hub import HubMixin +from lerobot.utils.utils import init_logging + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class RobotWrapper: + def __init__(self, robot: Robot): + self.robot = robot + self.lock = Lock() + + def get_observation(self) -> dict[str, Tensor]: + with self.lock: + return self.robot.get_observation() + + def send_action(self, action: Tensor): + with self.lock: + self.robot.send_action(action) + + def observation_features(self) -> list[str]: + with self.lock: + return self.robot.observation_features + + def action_features(self) -> list[str]: + with self.lock: + return self.robot.action_features + + +@dataclass +class RTCDemoConfig(HubMixin): + """Configuration for RTC demo with action chunking policies and real robots.""" + + # Policy configuration + policy: PreTrainedConfig | None = None + + # Robot configuration + robot: RobotConfig | None = None + + # RTC configuration + rtc: RTCConfig = field( + default_factory=lambda: RTCConfig( + execution_horizon=10, + max_guidance_weight=1.0, + prefix_attention_schedule=RTCAttentionSchedule.EXP, + ) + ) + + # Demo parameters + duration: float = 30.0 # Duration to run the demo (seconds) + fps: float = 10.0 # Action execution frequency (Hz) + + # Compute device + device: str | None = None # Device to run on (cuda, cpu, auto) + + # Get new actions horizon. The amount of executed steps after which will be requested new actions. + # It should be higher than inference delay + execution horizon. + action_queue_size_to_get_new_actions: int = 30 + + # Task to execute + task: str = field(default="", metadata={"help": "Task to execute"}) + + # Torch compile configuration + use_torch_compile: bool = field( + default=False, + metadata={"help": "Use torch.compile for faster inference (PyTorch 2.0+)"}, + ) + + torch_compile_backend: str = field( + default="inductor", + metadata={"help": "Backend for torch.compile (inductor, aot_eager, cudagraphs)"}, + ) + + torch_compile_mode: str = field( + default="default", + metadata={"help": "Compilation mode (default, reduce-overhead, max-autotune)"}, + ) + + torch_compile_disable_cudagraphs: bool = field( + default=True, + metadata={ + "help": "Disable CUDA graphs in torch.compile. Required due to in-place tensor " + "operations in denoising loop (x_t += dt * v_t) which cause tensor aliasing issues." + }, + ) + + def __post_init__(self): + # HACK: We parse again the cli args here to get the pretrained path if there was one. + policy_path = parser.get_path_arg("policy") + if policy_path: + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.pretrained_path = policy_path + else: + raise ValueError("Policy path is required") + + # Validate that robot configuration is provided + if self.robot is None: + raise ValueError("Robot configuration must be provided") + + @classmethod + def __get_path_fields__(cls) -> list[str]: + """This enables the parser to load config from the policy using `--policy.path=local/dir`""" + return ["policy"] + + +def is_image_key(k: str) -> bool: + return k.startswith(OBS_IMAGES) + + +def get_actions( + policy, + robot: RobotWrapper, + robot_observation_processor, + action_queue: ActionQueue, + shutdown_event: Event, + cfg: RTCDemoConfig, +): + """Thread function to request action chunks from the policy. + + Args: + policy: The policy instance (SmolVLA, Pi0, etc.) + robot: The robot instance for getting observations + robot_observation_processor: Processor for raw robot observations + action_queue: Queue to put new action chunks + shutdown_event: Event to signal shutdown + cfg: Demo configuration + """ + try: + logger.info("[GET_ACTIONS] Starting get actions thread") + + latency_tracker = LatencyTracker() # Track latency of action chunks + fps = cfg.fps + time_per_chunk = 1.0 / fps + + dataset_features = hw_to_dataset_features(robot.observation_features(), "observation") + policy_device = policy.config.device + + # Load preprocessor and postprocessor from pretrained files + # The stats are embedded in the processor .safetensors files + logger.info(f"[GET_ACTIONS] Loading preprocessor/postprocessor from {cfg.policy.pretrained_path}") + + preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=cfg.policy, + pretrained_path=cfg.policy.pretrained_path, + dataset_stats=None, # Will load from pretrained processor files + preprocessor_overrides={ + "device_processor": {"device": cfg.policy.device}, + }, + ) + + logger.info("[GET_ACTIONS] Preprocessor/postprocessor loaded successfully with embedded stats") + + get_actions_threshold = cfg.action_queue_size_to_get_new_actions + + if not cfg.rtc.enabled: + get_actions_threshold = 0 + + while not shutdown_event.is_set(): + if action_queue.qsize() <= get_actions_threshold: + current_time = time.perf_counter() + action_index_before_inference = action_queue.get_action_index() + prev_actions = action_queue.get_left_over() + + inference_latency = latency_tracker.max() + inference_delay = math.ceil(inference_latency / time_per_chunk) + + obs = robot.get_observation() + + # Apply robot observation processor + obs_processed = robot_observation_processor(obs) + + obs_with_policy_features = build_dataset_frame( + dataset_features, obs_processed, prefix="observation" + ) + + for name in obs_with_policy_features: + obs_with_policy_features[name] = torch.from_numpy(obs_with_policy_features[name]) + if "image" in name: + obs_with_policy_features[name] = ( + obs_with_policy_features[name].type(torch.float32) / 255 + ) + obs_with_policy_features[name] = ( + obs_with_policy_features[name].permute(2, 0, 1).contiguous() + ) + obs_with_policy_features[name] = obs_with_policy_features[name].unsqueeze(0) + obs_with_policy_features[name] = obs_with_policy_features[name].to(policy_device) + + obs_with_policy_features["task"] = [cfg.task] # Task should be a list, not a string! + obs_with_policy_features["robot_type"] = ( + robot.robot.name if hasattr(robot.robot, "name") else "" + ) + + preproceseded_obs = preprocessor(obs_with_policy_features) + + # Generate actions WITH RTC + actions = policy.predict_action_chunk( + preproceseded_obs, + inference_delay=inference_delay, + prev_chunk_left_over=prev_actions, + ) + + # Store original actions (before postprocessing) for RTC + original_actions = actions.squeeze(0).clone() + + postprocessed_actions = postprocessor(actions) + + postprocessed_actions = postprocessed_actions.squeeze(0) + + new_latency = time.perf_counter() - current_time + new_delay = math.ceil(new_latency / time_per_chunk) + latency_tracker.add(new_latency) + + if cfg.action_queue_size_to_get_new_actions < cfg.rtc.execution_horizon + new_delay: + logger.warning( + "[GET_ACTIONS] cfg.action_queue_size_to_get_new_actions Too small, It should be higher than inference delay + execution horizon." + ) + + action_queue.merge( + original_actions, postprocessed_actions, new_delay, action_index_before_inference + ) + else: + # Small sleep to prevent busy waiting + time.sleep(0.1) + + logger.info("[GET_ACTIONS] get actions thread shutting down") + except Exception as e: + logger.error(f"[GET_ACTIONS] Fatal exception in get_actions thread: {e}") + logger.error(traceback.format_exc()) + sys.exit(1) + + +def actor_control( + robot: RobotWrapper, + robot_action_processor, + action_queue: ActionQueue, + shutdown_event: Event, + cfg: RTCDemoConfig, +): + """Thread function to execute actions on the robot. + + Args: + robot: The robot instance + action_queue: Queue to get actions from + shutdown_event: Event to signal shutdown + cfg: Demo configuration + """ + try: + logger.info("[ACTOR] Starting actor thread") + + action_count = 0 + action_interval = 1.0 / cfg.fps + + while not shutdown_event.is_set(): + start_time = time.perf_counter() + + # Try to get an action from the queue with timeout + action = action_queue.get() + + if action is not None: + action = action.cpu() + action_dict = {key: action[i].item() for i, key in enumerate(robot.action_features())} + action_processed = robot_action_processor((action_dict, None)) + robot.send_action(action_processed) + + action_count += 1 + + dt_s = time.perf_counter() - start_time + time.sleep(max(0, (action_interval - dt_s) - 0.001)) + + logger.info(f"[ACTOR] Actor thread shutting down. Total actions executed: {action_count}") + except Exception as e: + logger.error(f"[ACTOR] Fatal exception in actor_control thread: {e}") + logger.error(traceback.format_exc()) + sys.exit(1) + + +def _apply_torch_compile(policy, cfg: RTCDemoConfig): + """Apply torch.compile to the policy's predict_action_chunk method. + + Args: + policy: Policy instance to compile + cfg: Configuration containing torch compile settings + + Returns: + Policy with compiled predict_action_chunk method + """ + + # PI models handle their own compilation + if policy.type == "pi05" or policy.type == "pi0": + return policy + + try: + # Check if torch.compile is available (PyTorch 2.0+) + if not hasattr(torch, "compile"): + logger.warning( + f"torch.compile is not available. Requires PyTorch 2.0+. " + f"Current version: {torch.__version__}. Skipping compilation." + ) + return policy + + logger.info("Applying torch.compile to predict_action_chunk...") + logger.info(f" Backend: {cfg.torch_compile_backend}") + logger.info(f" Mode: {cfg.torch_compile_mode}") + logger.info(f" Disable CUDA graphs: {cfg.torch_compile_disable_cudagraphs}") + + # Compile the predict_action_chunk method + # - CUDA graphs disabled to prevent tensor aliasing from in-place ops (x_t += dt * v_t) + compile_kwargs = { + "backend": cfg.torch_compile_backend, + "mode": cfg.torch_compile_mode, + } + + # Disable CUDA graphs if requested (prevents tensor aliasing issues) + if cfg.torch_compile_disable_cudagraphs: + compile_kwargs["options"] = {"triton.cudagraphs": False} + + original_method = policy.predict_action_chunk + compiled_method = torch.compile(original_method, **compile_kwargs) + policy.predict_action_chunk = compiled_method + logger.info("✓ Successfully compiled predict_action_chunk") + + except Exception as e: + logger.error(f"Failed to apply torch.compile: {e}") + logger.warning("Continuing without torch.compile") + + return policy + + +@parser.wrap() +def demo_cli(cfg: RTCDemoConfig): + """Main entry point for RTC demo with draccus configuration.""" + + # Initialize logging + init_logging() + + logger.info(f"Using device: {cfg.device}") + + # Setup signal handler for graceful shutdown + signal_handler = ProcessSignalHandler(use_threads=True, display_pid=False) + shutdown_event = signal_handler.shutdown_event + + policy = None + robot = None + get_actions_thread = None + actor_thread = None + + policy_class = get_policy_class(cfg.policy.type) + + # Load config and set compile_model for pi0/pi05 models + config = PreTrainedConfig.from_pretrained(cfg.policy.pretrained_path) + + if cfg.policy.type == "pi05" or cfg.policy.type == "pi0": + config.compile_model = cfg.use_torch_compile + + policy = policy_class.from_pretrained(cfg.policy.pretrained_path, config=config) + + # Turn on RTC + policy.config.rtc_config = cfg.rtc + + # Init RTC processort, as by default if RTC disabled in the config + # The processor won't be created + policy.init_rtc_processor() + + assert policy.name in ["smolvla", "pi05", "pi0"], "Only smolvla, pi05, and pi0 are supported for RTC" + + policy = policy.to(cfg.device) + policy.eval() + + # Apply torch.compile to predict_action_chunk method if enabled + if cfg.use_torch_compile: + policy = _apply_torch_compile(policy, cfg) + + # Create robot + logger.info(f"Initializing robot: {cfg.robot.type}") + robot = make_robot_from_config(cfg.robot) + robot.connect() + robot_wrapper = RobotWrapper(robot) + + # Create robot observation processor + robot_observation_processor = make_default_robot_observation_processor() + robot_action_processor = make_default_robot_action_processor() + + # Create action queue for communication between threads + action_queue = ActionQueue(cfg.rtc) + + # Start chunk requester thread + get_actions_thread = Thread( + target=get_actions, + args=(policy, robot_wrapper, robot_observation_processor, action_queue, shutdown_event, cfg), + daemon=True, + name="GetActions", + ) + get_actions_thread.start() + logger.info("Started get actions thread") + + # Start action executor thread + actor_thread = Thread( + target=actor_control, + args=(robot_wrapper, robot_action_processor, action_queue, shutdown_event, cfg), + daemon=True, + name="Actor", + ) + actor_thread.start() + logger.info("Started actor thread") + + logger.info("Started stop by duration thread") + + # Main thread monitors for duration or shutdown + logger.info(f"Running demo for {cfg.duration} seconds...") + start_time = time.time() + + while not shutdown_event.is_set() and (time.time() - start_time) < cfg.duration: + time.sleep(10) + + # Log queue status periodically + if int(time.time() - start_time) % 5 == 0: + logger.info(f"[MAIN] Action queue size: {action_queue.qsize()}") + + if time.time() - start_time > cfg.duration: + break + + logger.info("Demo duration reached or shutdown requested") + + # Signal shutdown + shutdown_event.set() + + # Wait for threads to finish + if get_actions_thread and get_actions_thread.is_alive(): + logger.info("Waiting for chunk requester thread to finish...") + get_actions_thread.join() + + if actor_thread and actor_thread.is_alive(): + logger.info("Waiting for action executor thread to finish...") + actor_thread.join() + + # Cleanup robot + if robot: + robot.disconnect() + logger.info("Robot disconnected") + + logger.info("Cleanup completed") + + +if __name__ == "__main__": + demo_cli() + logging.info("RTC demo finished") diff --git a/examples/so100_to_so100_EE/evaluate.py b/examples/so100_to_so100_EE/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..2f8bde96b8250314125f069d5492d58d31e8a370 --- /dev/null +++ b/examples/so100_to_so100_EE/evaluate.py @@ -0,0 +1,200 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.configs.types import FeatureType, PolicyFeature +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features +from lerobot.datasets.utils import combine_feature_dicts +from lerobot.model.kinematics import RobotKinematics +from lerobot.policies.act.modeling_act import ACTPolicy +from lerobot.policies.factory import make_pre_post_processors +from lerobot.processor import ( + RobotAction, + RobotObservation, + RobotProcessorPipeline, + make_default_teleop_action_processor, +) +from lerobot.processor.converters import ( + observation_to_transition, + robot_action_observation_to_transition, + transition_to_observation, + transition_to_robot_action, +) +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + ForwardKinematicsJointsToEE, + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.scripts.lerobot_record import record_loop +from lerobot.utils.control_utils import init_keyboard_listener +from lerobot.utils.utils import log_say +from lerobot.utils.visualization_utils import init_rerun + +NUM_EPISODES = 5 +FPS = 30 +EPISODE_TIME_SEC = 60 +TASK_DESCRIPTION = "My task description" +HF_MODEL_ID = "/" +HF_DATASET_ID = "/" + +# Create the robot configuration & robot +camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", + id="my_awesome_follower_arm", + cameras=camera_config, + use_degrees=True, +) + +robot = SO100Follower(robot_config) + +# Create policy +policy = ACTPolicy.from_pretrained(HF_MODEL_ID) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(robot.bus.motors.keys()), +) + +# Build pipeline to convert EE action to joints action +robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Build pipeline to convert joints observation to EE observation +robot_joints_to_ee_pose_processor = RobotProcessorPipeline[RobotObservation, RobotObservation]( + steps=[ + ForwardKinematicsJointsToEE(kinematics=kinematics_solver, motor_names=list(robot.bus.motors.keys())) + ], + to_transition=observation_to_transition, + to_output=transition_to_observation, +) + + +# Create the dataset +dataset = LeRobotDataset.create( + repo_id=HF_DATASET_ID, + fps=FPS, + features=combine_feature_dicts( + aggregate_pipeline_dataset_features( + pipeline=robot_joints_to_ee_pose_processor, + initial_features=create_initial_features(observation=robot.observation_features), + use_videos=True, + ), + # User for now should be explicit on the feature keys that were used for record + # Alternatively, the user can pass the processor step that has the right features + aggregate_pipeline_dataset_features( + pipeline=make_default_teleop_action_processor(), + initial_features=create_initial_features( + action={ + f"ee.{k}": PolicyFeature(type=FeatureType.ACTION, shape=(1,)) + for k in ["x", "y", "z", "wx", "wy", "wz", "gripper_pos"] + } + ), + use_videos=True, + ), + ), + robot_type=robot.name, + use_videos=True, + image_writer_threads=4, +) + +# Build Policy Processors +preprocessor, postprocessor = make_pre_post_processors( + policy_cfg=policy, + pretrained_path=HF_MODEL_ID, + dataset_stats=dataset.meta.stats, + # The inference device is automatically set to match the detected hardware, overriding any previous device settings from training to ensure compatibility. + preprocessor_overrides={"device_processor": {"device": str(policy.config.device)}}, +) + +# Connect the robot and teleoperator +robot.connect() + +# Initialize the keyboard listener and rerun visualization +listener, events = init_keyboard_listener() +init_rerun(session_name="so100_so100_evaluate") + +if not robot.is_connected: + raise ValueError("Robot is not connected!") + +print("Starting evaluate loop...") +episode_idx = 0 +for episode_idx in range(NUM_EPISODES): + log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") + + # Main record loop + record_loop( + robot=robot, + events=events, + fps=FPS, + policy=policy, + preprocessor=preprocessor, # Pass the pre and post policy processors + postprocessor=postprocessor, + dataset=dataset, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=make_default_teleop_action_processor(), + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose_processor, + ) + + # Reset the environment if not stopping or re-recording + if not events["stop_recording"] and ((episode_idx < NUM_EPISODES - 1) or events["rerecord_episode"]): + log_say("Reset the environment") + record_loop( + robot=robot, + events=events, + fps=FPS, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=make_default_teleop_action_processor(), + robot_action_processor=robot_ee_to_joints_processor, + robot_observation_processor=robot_joints_to_ee_pose_processor, + ) + + if events["rerecord_episode"]: + log_say("Re-record episode") + events["rerecord_episode"] = False + events["exit_early"] = False + dataset.clear_episode_buffer() + continue + + # Save episode + dataset.save_episode() + episode_idx += 1 + +# Clean up +log_say("Stop recording") +robot.disconnect() +listener.stop() + +dataset.finalize() +dataset.push_to_hub() diff --git a/examples/so100_to_so100_EE/record.py b/examples/so100_to_so100_EE/record.py new file mode 100644 index 0000000000000000000000000000000000000000..3ba26eb3bea0962f3f6095a6be1150111bf15f9d --- /dev/null +++ b/examples/so100_to_so100_EE/record.py @@ -0,0 +1,204 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.pipeline_features import aggregate_pipeline_dataset_features, create_initial_features +from lerobot.datasets.utils import combine_feature_dicts +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline +from lerobot.processor.converters import ( + observation_to_transition, + robot_action_observation_to_transition, + transition_to_observation, + transition_to_robot_action, +) +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + EEBoundsAndSafety, + ForwardKinematicsJointsToEE, + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.scripts.lerobot_record import record_loop +from lerobot.teleoperators.so100_leader.config_so100_leader import SO100LeaderConfig +from lerobot.teleoperators.so100_leader.so100_leader import SO100Leader +from lerobot.utils.control_utils import init_keyboard_listener +from lerobot.utils.utils import log_say +from lerobot.utils.visualization_utils import init_rerun + +NUM_EPISODES = 2 +FPS = 30 +EPISODE_TIME_SEC = 60 +RESET_TIME_SEC = 30 +TASK_DESCRIPTION = "My task description" +HF_REPO_ID = "/" + +# Create the robot and teleoperator configurations +camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} +follower_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", cameras=camera_config, use_degrees=True +) +leader_config = SO100LeaderConfig(port="/dev/tty.usbmodem5A460819811", id="my_awesome_leader_arm") + +# Initialize the robot and teleoperator +follower = SO100Follower(follower_config) +leader = SO100Leader(leader_config) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +follower_kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(follower.bus.motors.keys()), +) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +leader_kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(leader.bus.motors.keys()), +) + +# Build pipeline to convert follower joints to EE observation +follower_joints_to_ee = RobotProcessorPipeline[RobotObservation, RobotObservation]( + steps=[ + ForwardKinematicsJointsToEE( + kinematics=follower_kinematics_solver, motor_names=list(follower.bus.motors.keys()) + ), + ], + to_transition=observation_to_transition, + to_output=transition_to_observation, +) + +# Build pipeline to convert leader joints to EE action +leader_joints_to_ee = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + ForwardKinematicsJointsToEE( + kinematics=leader_kinematics_solver, motor_names=list(leader.bus.motors.keys()) + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Build pipeline to convert EE action to follower joints +ee_to_follower_joints = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + [ + EEBoundsAndSafety( + end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, + max_ee_step_m=0.10, + ), + InverseKinematicsEEToJoints( + kinematics=follower_kinematics_solver, + motor_names=list(follower.bus.motors.keys()), + initial_guess_current_joints=True, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Create the dataset +dataset = LeRobotDataset.create( + repo_id=HF_REPO_ID, + fps=FPS, + features=combine_feature_dicts( + # Run the feature contract of the pipelines + # This tells you how the features would look like after the pipeline steps + aggregate_pipeline_dataset_features( + pipeline=leader_joints_to_ee, + initial_features=create_initial_features(action=leader.action_features), + use_videos=True, + ), + aggregate_pipeline_dataset_features( + pipeline=follower_joints_to_ee, + initial_features=create_initial_features(observation=follower.observation_features), + use_videos=True, + ), + ), + robot_type=follower.name, + use_videos=True, + image_writer_threads=4, +) + + +# Connect the robot and teleoperator +leader.connect() +follower.connect() + +# Initialize the keyboard listener and rerun visualization +listener, events = init_keyboard_listener() +init_rerun(session_name="recording_phone") + +if not leader.is_connected or not follower.is_connected: + raise ValueError("Robot or teleop is not connected!") + +print("Starting record loop...") +episode_idx = 0 +while episode_idx < NUM_EPISODES and not events["stop_recording"]: + log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}") + + # Main record loop + record_loop( + robot=follower, + events=events, + fps=FPS, + teleop=leader, + dataset=dataset, + control_time_s=EPISODE_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=leader_joints_to_ee, + robot_action_processor=ee_to_follower_joints, + robot_observation_processor=follower_joints_to_ee, + ) + + # Reset the environment if not stopping or re-recording + if not events["stop_recording"] and (episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]): + log_say("Reset the environment") + record_loop( + robot=follower, + events=events, + fps=FPS, + teleop=leader, + control_time_s=RESET_TIME_SEC, + single_task=TASK_DESCRIPTION, + display_data=True, + teleop_action_processor=leader_joints_to_ee, + robot_action_processor=ee_to_follower_joints, + robot_observation_processor=follower_joints_to_ee, + ) + + if events["rerecord_episode"]: + log_say("Re-recording episode") + events["rerecord_episode"] = False + events["exit_early"] = False + dataset.clear_episode_buffer() + continue + + # Save episode + dataset.save_episode() + episode_idx += 1 + +# Clean up +log_say("Stop recording") +leader.disconnect() +follower.disconnect() +listener.stop() + +dataset.finalize() +dataset.push_to_hub() diff --git a/examples/so100_to_so100_EE/replay.py b/examples/so100_to_so100_EE/replay.py new file mode 100644 index 0000000000000000000000000000000000000000..a9d6ad918f75622d0d42210e37809b4ef56b252c --- /dev/null +++ b/examples/so100_to_so100_EE/replay.py @@ -0,0 +1,101 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import time + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline +from lerobot.processor.converters import ( + robot_action_observation_to_transition, + transition_to_robot_action, +) +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.utils.constants import ACTION +from lerobot.utils.robot_utils import busy_wait +from lerobot.utils.utils import log_say + +EPISODE_IDX = 0 +HF_REPO_ID = "/" + +# Initialize the robot config +robot_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", use_degrees=True +) + +# Initialize the robot +robot = SO100Follower(robot_config) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(robot.bus.motors.keys()), +) + +# Build pipeline to convert EE action to joints action +robot_ee_to_joints_processor = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + steps=[ + InverseKinematicsEEToJoints( + kinematics=kinematics_solver, + motor_names=list(robot.bus.motors.keys()), + initial_guess_current_joints=False, # Because replay is open loop + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Fetch the dataset to replay +dataset = LeRobotDataset(HF_REPO_ID, episodes=[EPISODE_IDX]) +# Filter dataset to only include frames from the specified episode since episodes are chunked in dataset V3.0 +episode_frames = dataset.hf_dataset.filter(lambda x: x["episode_index"] == EPISODE_IDX) +actions = episode_frames.select_columns(ACTION) + +# Connect to the robot +robot.connect() + +if not robot.is_connected: + raise ValueError("Robot is not connected!") + +print("Starting replay loop...") +log_say(f"Replaying episode {EPISODE_IDX}") +for idx in range(len(episode_frames)): + t0 = time.perf_counter() + + # Get recorded action from dataset + ee_action = { + name: float(actions[idx][ACTION][i]) for i, name in enumerate(dataset.features[ACTION]["names"]) + } + + # Get robot observation + robot_obs = robot.get_observation() + + # Dataset EE -> robot joints + joint_action = robot_ee_to_joints_processor((ee_action, robot_obs)) + + # Send action to robot + _ = robot.send_action(joint_action) + + busy_wait(1.0 / dataset.fps - (time.perf_counter() - t0)) + +# Clean up +robot.disconnect() diff --git a/examples/so100_to_so100_EE/teleoperate.py b/examples/so100_to_so100_EE/teleoperate.py new file mode 100644 index 0000000000000000000000000000000000000000..912f90568fec959b64a42bb5f97cca6582cef57f --- /dev/null +++ b/examples/so100_to_so100_EE/teleoperate.py @@ -0,0 +1,121 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +from lerobot.model.kinematics import RobotKinematics +from lerobot.processor import RobotAction, RobotObservation, RobotProcessorPipeline +from lerobot.processor.converters import ( + robot_action_observation_to_transition, + robot_action_to_transition, + transition_to_robot_action, +) +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.robot_kinematic_processor import ( + EEBoundsAndSafety, + ForwardKinematicsJointsToEE, + InverseKinematicsEEToJoints, +) +from lerobot.robots.so100_follower.so100_follower import SO100Follower +from lerobot.teleoperators.so100_leader.config_so100_leader import SO100LeaderConfig +from lerobot.teleoperators.so100_leader.so100_leader import SO100Leader +from lerobot.utils.robot_utils import busy_wait +from lerobot.utils.visualization_utils import init_rerun, log_rerun_data + +FPS = 30 + +# Initialize the robot and teleoperator config +follower_config = SO100FollowerConfig( + port="/dev/tty.usbmodem5A460814411", id="my_awesome_follower_arm", use_degrees=True +) +leader_config = SO100LeaderConfig(port="/dev/tty.usbmodem5A460819811", id="my_awesome_leader_arm") + +# Initialize the robot and teleoperator +follower = SO100Follower(follower_config) +leader = SO100Leader(leader_config) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +follower_kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(follower.bus.motors.keys()), +) + +# NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf +leader_kinematics_solver = RobotKinematics( + urdf_path="./SO101/so101_new_calib.urdf", + target_frame_name="gripper_frame_link", + joint_names=list(leader.bus.motors.keys()), +) + +# Build pipeline to convert teleop joints to EE action +leader_to_ee = RobotProcessorPipeline[RobotAction, RobotAction]( + steps=[ + ForwardKinematicsJointsToEE( + kinematics=leader_kinematics_solver, motor_names=list(leader.bus.motors.keys()) + ), + ], + to_transition=robot_action_to_transition, + to_output=transition_to_robot_action, +) + +# build pipeline to convert EE action to robot joints +ee_to_follower_joints = RobotProcessorPipeline[tuple[RobotAction, RobotObservation], RobotAction]( + [ + EEBoundsAndSafety( + end_effector_bounds={"min": [-1.0, -1.0, -1.0], "max": [1.0, 1.0, 1.0]}, + max_ee_step_m=0.10, + ), + InverseKinematicsEEToJoints( + kinematics=follower_kinematics_solver, + motor_names=list(follower.bus.motors.keys()), + initial_guess_current_joints=False, + ), + ], + to_transition=robot_action_observation_to_transition, + to_output=transition_to_robot_action, +) + +# Connect to the robot and teleoperator +follower.connect() +leader.connect() + +# Init rerun viewer +init_rerun(session_name="so100_so100_EE_teleop") + +print("Starting teleop loop...") +while True: + t0 = time.perf_counter() + + # Get robot observation + robot_obs = follower.get_observation() + + # Get teleop observation + leader_joints_obs = leader.get_action() + + # teleop joints -> teleop EE action + leader_ee_act = leader_to_ee(leader_joints_obs) + + # teleop EE -> robot joints + follower_joints_act = ee_to_follower_joints((leader_ee_act, robot_obs)) + + # Send action to robot + _ = follower.send_action(follower_joints_act) + + # Visualize + log_rerun_data(observation=leader_ee_act, action=follower_joints_act) + + busy_wait(max(1.0 / FPS - (time.perf_counter() - t0), 0.0)) diff --git a/examples/training/train_policy.py b/examples/training/train_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..19aeae5d69189d7453f42338b190c931ea07178e --- /dev/null +++ b/examples/training/train_policy.py @@ -0,0 +1,120 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This script demonstrates how to train Diffusion Policy on the PushT environment.""" + +from pathlib import Path + +import torch + +from lerobot.configs.types import FeatureType +from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata +from lerobot.datasets.utils import dataset_to_policy_features +from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig +from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy +from lerobot.policies.factory import make_pre_post_processors + + +def main(): + # Create a directory to store the training checkpoint. + output_directory = Path("outputs/train/example_pusht_diffusion") + output_directory.mkdir(parents=True, exist_ok=True) + + # # Select your device + device = torch.device("cuda") + + # Number of offline training steps (we'll only do offline training for this example.) + # Adjust as you prefer. 5000 steps are needed to get something worth evaluating. + training_steps = 5000 + log_freq = 1 + + # When starting from scratch (i.e. not from a pretrained policy), we need to specify 2 things before + # creating the policy: + # - input/output shapes: to properly size the policy + # - dataset stats: for normalization and denormalization of input/outputs + dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht") + features = dataset_to_policy_features(dataset_metadata.features) + output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} + input_features = {key: ft for key, ft in features.items() if key not in output_features} + + # Policies are initialized with a configuration class, in this case `DiffusionConfig`. For this example, + # we'll just use the defaults and so no arguments other than input/output features need to be passed. + cfg = DiffusionConfig(input_features=input_features, output_features=output_features) + + # We can now instantiate our policy with this config and the dataset stats. + policy = DiffusionPolicy(cfg) + policy.train() + policy.to(device) + preprocessor, postprocessor = make_pre_post_processors(cfg, dataset_stats=dataset_metadata.stats) + + # Another policy-dataset interaction is with the delta_timestamps. Each policy expects a given number frames + # which can differ for inputs, outputs and rewards (if there are some). + delta_timestamps = { + "observation.image": [i / dataset_metadata.fps for i in cfg.observation_delta_indices], + "observation.state": [i / dataset_metadata.fps for i in cfg.observation_delta_indices], + "action": [i / dataset_metadata.fps for i in cfg.action_delta_indices], + } + + # In this case with the standard configuration for Diffusion Policy, it is equivalent to this: + delta_timestamps = { + # Load the previous image and state at -0.1 seconds before current frame, + # then load current image and state corresponding to 0.0 second. + "observation.image": [-0.1, 0.0], + "observation.state": [-0.1, 0.0], + # Load the previous action (-0.1), the next action to be executed (0.0), + # and 14 future actions with a 0.1 seconds spacing. All these actions will be + # used to supervise the policy. + "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4], + } + + # We can then instantiate the dataset with these delta_timestamps configuration. + dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps) + + # Then we create our optimizer and dataloader for offline training. + optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4) + dataloader = torch.utils.data.DataLoader( + dataset, + num_workers=4, + batch_size=64, + shuffle=True, + pin_memory=device.type != "cpu", + drop_last=True, + ) + + # Run training loop. + step = 0 + done = False + while not done: + for batch in dataloader: + batch = preprocessor(batch) + loss, _ = policy.forward(batch) + loss.backward() + optimizer.step() + optimizer.zero_grad() + + if step % log_freq == 0: + print(f"step: {step} loss: {loss.item():.3f}") + step += 1 + if step >= training_steps: + done = True + break + + # Save a policy checkpoint. + policy.save_pretrained(output_directory) + preprocessor.save_pretrained(output_directory) + postprocessor.save_pretrained(output_directory) + + +if __name__ == "__main__": + main() diff --git a/examples/training/train_with_streaming.py b/examples/training/train_with_streaming.py new file mode 100644 index 0000000000000000000000000000000000000000..ec0d3e9dbd91407f7245f5c75fc28f54100a3965 --- /dev/null +++ b/examples/training/train_with_streaming.py @@ -0,0 +1,108 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This script demonstrates how to train a Diffusion Policy on the PushT environment, +using a dataset processed in streaming mode.""" + +from pathlib import Path + +import torch + +from lerobot.configs.types import FeatureType +from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata +from lerobot.datasets.streaming_dataset import StreamingLeRobotDataset +from lerobot.datasets.utils import dataset_to_policy_features +from lerobot.policies.act.configuration_act import ACTConfig +from lerobot.policies.act.modeling_act import ACTPolicy +from lerobot.policies.factory import make_pre_post_processors +from lerobot.utils.constants import ACTION + + +def main(): + # Create a directory to store the training checkpoint. + output_directory = Path("outputs/train/example_streaming_dataset") + output_directory.mkdir(parents=True, exist_ok=True) + + # Selects the "best" device available + device = ( + torch.device("cuda") + if torch.cuda.is_available() + else torch.device("mps") + if torch.backends.mps.is_available() + else torch.device("cpu") + ) + print(f"Using device: {device}") + + training_steps = 10 + log_freq = 1 + + dataset_id = "lerobot/droid_1.0.1" # 26M frames! Would require 4TB of disk space if installed locally (: + dataset_metadata = LeRobotDatasetMetadata(dataset_id) + features = dataset_to_policy_features(dataset_metadata.features) + output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} + input_features = {key: ft for key, ft in features.items() if key not in output_features} + + # We can now instantiate our policy with this config and the dataset stats. + cfg = ACTConfig(input_features=input_features, output_features=output_features) + policy = ACTPolicy(cfg) + policy.train() + policy.to(device) + preprocessor, postprocessor = make_pre_post_processors(cfg, dataset_stats=dataset_metadata.stats) + + # Delta timestamps are used to (1) augment frames used during training and (2) supervise the policy. + # Here, we use delta-timestamps to only provide ground truth actions for supervision + delta_timestamps = { + ACTION: [t / dataset_metadata.fps for t in range(cfg.n_action_steps)], + } + + # Instantiating the training dataset in streaming mode allows to not consume up memory as the data is fetched + # iteratively rather than being load into memory all at once. Retrieved frames are shuffled across epochs + dataset = StreamingLeRobotDataset(dataset_id, delta_timestamps=delta_timestamps, tolerance_s=1e-3) + + optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4) + dataloader = torch.utils.data.DataLoader( + dataset, + num_workers=4, + batch_size=16, + pin_memory=device.type != "cpu", + drop_last=True, + prefetch_factor=2, # loads batches with multiprocessing while policy trains + ) + + # Run training loop. + step = 0 + done = False + while not done: + for batch in dataloader: + batch = preprocessor(batch) + loss, _ = policy.forward(batch) + loss.backward() + optimizer.step() + optimizer.zero_grad() + + if step % log_freq == 0: + print(f"step: {step} loss: {loss.item():.3f}") + step += 1 + if step >= training_steps: + done = True + break + + # Save a policy checkpoint. + policy.save_pretrained(output_directory) + preprocessor.save_pretrained(output_directory) + postprocessor.save_pretrained(output_directory) + + +if __name__ == "__main__": + main() diff --git a/examples/tutorial/act/act_training_example.py b/examples/tutorial/act/act_training_example.py new file mode 100644 index 0000000000000000000000000000000000000000..0880dfc85f82dc011b7203544298f841038fd5de --- /dev/null +++ b/examples/tutorial/act/act_training_example.py @@ -0,0 +1,98 @@ +"""This script demonstrates how to train ACT Policy on a real-world dataset.""" + +from pathlib import Path + +import torch + +from lerobot.configs.types import FeatureType +from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata +from lerobot.datasets.utils import dataset_to_policy_features +from lerobot.policies.act.configuration_act import ACTConfig +from lerobot.policies.act.modeling_act import ACTPolicy +from lerobot.policies.factory import make_pre_post_processors + + +def make_delta_timestamps(delta_indices: list[int] | None, fps: int) -> list[float]: + if delta_indices is None: + return [0] + + return [i / fps for i in delta_indices] + + +output_directory = Path("outputs/robot_learning_tutorial/act") +output_directory.mkdir(parents=True, exist_ok=True) + +# Select your device +device = torch.device("mps") # or "cuda" or "cpu" + +dataset_id = "lerobot/svla_so101_pickplace" + +# This specifies the inputs the model will be expecting and the outputs it will produce +dataset_metadata = LeRobotDatasetMetadata(dataset_id) +features = dataset_to_policy_features(dataset_metadata.features) + +output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} +input_features = {key: ft for key, ft in features.items() if key not in output_features} + +cfg = ACTConfig(input_features=input_features, output_features=output_features) +policy = ACTPolicy(cfg) +preprocessor, postprocessor = make_pre_post_processors(cfg, dataset_stats=dataset_metadata.stats) + +policy.train() +policy.to(device) + +# To perform action chunking, ACT expects a given number of actions as targets +delta_timestamps = { + "action": make_delta_timestamps(cfg.action_delta_indices, dataset_metadata.fps), +} + +# add image features if they are present +delta_timestamps |= { + k: make_delta_timestamps(cfg.observation_delta_indices, dataset_metadata.fps) for k in cfg.image_features +} + +# Instantiate the dataset +dataset = LeRobotDataset(dataset_id, delta_timestamps=delta_timestamps) + +# Create the optimizer and dataloader for offline training +optimizer = cfg.get_optimizer_preset().build(policy.parameters()) +batch_size = 32 +dataloader = torch.utils.data.DataLoader( + dataset, + batch_size=batch_size, + shuffle=True, + pin_memory=device.type != "cpu", + drop_last=True, +) + +# Number of training steps and logging frequency +training_steps = 1 +log_freq = 1 + +# Run training loop +step = 0 +done = False +while not done: + for batch in dataloader: + batch = preprocessor(batch) + loss, _ = policy.forward(batch) + loss.backward() + optimizer.step() + optimizer.zero_grad() + + if step % log_freq == 0: + print(f"step: {step} loss: {loss.item():.3f}") + step += 1 + if step >= training_steps: + done = True + break + +# Save the policy checkpoint, alongside the pre/post processors +policy.save_pretrained(output_directory) +preprocessor.save_pretrained(output_directory) +postprocessor.save_pretrained(output_directory) + +# Save all assets to the Hub +policy.push_to_hub("fracapuano/robot_learning_tutorial_act") +preprocessor.push_to_hub("fracapuano/robot_learning_tutorial_act") +postprocessor.push_to_hub("fracapuano/robot_learning_tutorial_act") diff --git a/examples/tutorial/act/act_using_example.py b/examples/tutorial/act/act_using_example.py new file mode 100644 index 0000000000000000000000000000000000000000..0dde55efb4666ef0618bb2b1ca704eea43dc83a7 --- /dev/null +++ b/examples/tutorial/act/act_using_example.py @@ -0,0 +1,57 @@ +import torch + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata +from lerobot.policies.act.modeling_act import ACTPolicy +from lerobot.policies.factory import make_pre_post_processors +from lerobot.policies.utils import build_inference_frame, make_robot_action +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.so100_follower import SO100Follower + +device = torch.device("mps") # or "cuda" or "cpu" +model_id = "fracapuano/robot_learning_tutorial_act" +model = ACTPolicy.from_pretrained(model_id) + +dataset_id = "lerobot/svla_so101_pickplace" +# This only downloads the metadata for the dataset, ~10s of MB even for large-scale datasets +dataset_metadata = LeRobotDatasetMetadata(dataset_id) +preprocess, postprocess = make_pre_post_processors(model.config, dataset_stats=dataset_metadata.stats) + +# # find ports using lerobot-find-port +follower_port = ... # something like "/dev/tty.usbmodem58760431631" + +# # the robot ids are used the load the right calibration files +follower_id = ... # something like "follower_so100" + +MAX_EPISODES = 5 +MAX_STEPS_PER_EPISODE = 20 + +# Robot and environment configuration +# Camera keys must match the name and resolutions of the ones used for training! +# You can check the camera keys expected by a model in the info.json card on the model card on the Hub +camera_config = { + "side": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30), + "up": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30), +} + +robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_config) +robot = SO100Follower(robot_cfg) +robot.connect() + +for _ in range(MAX_EPISODES): + for _ in range(MAX_STEPS_PER_EPISODE): + obs = robot.get_observation() + obs_frame = build_inference_frame( + observation=obs, ds_features=dataset_metadata.features, device=device + ) + + obs = preprocess(obs_frame) + + action = model.select_action(obs) + action = postprocess(action) + + action = make_robot_action(action, dataset_metadata.features) + + robot.send_action(action) + + print("Episode finished! Starting new episode...") diff --git a/examples/tutorial/async-inf/policy_server.py b/examples/tutorial/async-inf/policy_server.py new file mode 100644 index 0000000000000000000000000000000000000000..6456d53f8dcf45130432645938e65e9d2f0db356 --- /dev/null +++ b/examples/tutorial/async-inf/policy_server.py @@ -0,0 +1,11 @@ +from lerobot.async_inference.configs import PolicyServerConfig +from lerobot.async_inference.policy_server import serve + +host = ... # something like "127.0.0.1" if you're exposing to localhost +port = ... # something like 8080 + +config = PolicyServerConfig( + host=host, + port=port, +) +serve(config) diff --git a/examples/tutorial/async-inf/robot_client.py b/examples/tutorial/async-inf/robot_client.py new file mode 100644 index 0000000000000000000000000000000000000000..4eba1d6929cb85b2f906ab44d9283f9e98bac4fb --- /dev/null +++ b/examples/tutorial/async-inf/robot_client.py @@ -0,0 +1,55 @@ +import threading + +from lerobot.async_inference.configs import RobotClientConfig +from lerobot.async_inference.helpers import visualize_action_queue_size +from lerobot.async_inference.robot_client import RobotClient +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.robots.so100_follower import SO100FollowerConfig + +# these cameras must match the ones expected by the policy - find your cameras with lerobot-find-cameras +# check the config.json on the Hub for the policy you are using to see the expected camera specs +camera_cfg = { + "up": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30), + "side": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30), +} + +# # find ports using lerobot-find-port +follower_port = ... # something like "/dev/tty.usbmodem58760431631" + +# # the robot ids are used the load the right calibration files +follower_id = ... # something like "follower_so100" + +robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_cfg) + +server_address = ... # something like "127.0.0.1:8080" if using localhost + +# 3. Create client configuration +client_cfg = RobotClientConfig( + robot=robot_cfg, + server_address=server_address, + policy_device="mps", + policy_type="act", + pretrained_name_or_path="fracapuano/robot_learning_tutorial_act", + chunk_size_threshold=0.5, # g + actions_per_chunk=50, # make sure this is less than the max actions of the policy +) + +# 4. Create and start client +client = RobotClient(client_cfg) + +# 5. Provide a textual description of the task +task = ... + +if client.start(): + # Start action receiver thread + action_receiver_thread = threading.Thread(target=client.receive_actions, daemon=True) + action_receiver_thread.start() + + try: + # Run the control loop + client.control_loop(task) + except KeyboardInterrupt: + client.stop() + action_receiver_thread.join() + # (Optionally) plot the action queue size + visualize_action_queue_size(client.action_queue_size) diff --git a/examples/tutorial/diffusion/diffusion_training_example.py b/examples/tutorial/diffusion/diffusion_training_example.py new file mode 100644 index 0000000000000000000000000000000000000000..c34d00babf8eb89419226405b5ff3e569a6b1b48 --- /dev/null +++ b/examples/tutorial/diffusion/diffusion_training_example.py @@ -0,0 +1,99 @@ +"""This script demonstrates how to train Diffusion Policy on a real-world dataset.""" + +from pathlib import Path + +import torch + +from lerobot.configs.types import FeatureType +from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata +from lerobot.datasets.utils import dataset_to_policy_features +from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig +from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy +from lerobot.policies.factory import make_pre_post_processors + + +def make_delta_timestamps(delta_indices: list[int] | None, fps: int) -> list[float]: + if delta_indices is None: + return [0] + + return [i / fps for i in delta_indices] + + +output_directory = Path("outputs/robot_learning_tutorial/diffusion") +output_directory.mkdir(parents=True, exist_ok=True) + +# Select your device +device = torch.device("mps") # or "cuda" or "cpu" + +dataset_id = "lerobot/svla_so101_pickplace" + +# This specifies the inputs the model will be expecting and the outputs it will produce +dataset_metadata = LeRobotDatasetMetadata(dataset_id) +features = dataset_to_policy_features(dataset_metadata.features) + +output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} +input_features = {key: ft for key, ft in features.items() if key not in output_features} + +cfg = DiffusionConfig(input_features=input_features, output_features=output_features) +policy = DiffusionPolicy(cfg) +preprocessor, postprocessor = make_pre_post_processors(cfg, dataset_stats=dataset_metadata.stats) + +policy.train() +policy.to(device) + +# To perform action chunking, ACT expects a given number of actions as targets +delta_timestamps = { + "observation.state": make_delta_timestamps(cfg.observation_delta_indices, dataset_metadata.fps), + "action": make_delta_timestamps(cfg.action_delta_indices, dataset_metadata.fps), +} + +# add image features if they are present +delta_timestamps |= { + k: make_delta_timestamps(cfg.observation_delta_indices, dataset_metadata.fps) for k in cfg.image_features +} + +# Instantiate the dataset +dataset = LeRobotDataset(dataset_id, delta_timestamps=delta_timestamps) + +# Create the optimizer and dataloader for offline training +optimizer = cfg.get_optimizer_preset().build(policy.parameters()) +batch_size = 32 +dataloader = torch.utils.data.DataLoader( + dataset, + batch_size=batch_size, + shuffle=True, + pin_memory=device.type != "cpu", + drop_last=True, +) + +# Number of training steps and logging frequency +training_steps = 1 +log_freq = 1 + +# Run training loop +step = 0 +done = False +while not done: + for batch in dataloader: + batch = preprocessor(batch) + loss, _ = policy.forward(batch) + loss.backward() + optimizer.step() + optimizer.zero_grad() + + if step % log_freq == 0: + print(f"step: {step} loss: {loss.item():.3f}") + step += 1 + if step >= training_steps: + done = True + break + +# Save the policy checkpoint, alongside the pre/post processors +policy.save_pretrained(output_directory) +preprocessor.save_pretrained(output_directory) +postprocessor.save_pretrained(output_directory) + +# Save all assets to the Hub +policy.push_to_hub("fracapuano/robot_learning_tutorial_diffusion") +preprocessor.push_to_hub("fracapuano/robot_learning_tutorial_diffusion") +postprocessor.push_to_hub("fracapuano/robot_learning_tutorial_diffusion") diff --git a/examples/tutorial/diffusion/diffusion_using_example.py b/examples/tutorial/diffusion/diffusion_using_example.py new file mode 100644 index 0000000000000000000000000000000000000000..5caccfc995e62edacd0f81c83cf293da31c3e8cb --- /dev/null +++ b/examples/tutorial/diffusion/diffusion_using_example.py @@ -0,0 +1,60 @@ +import torch + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata +from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy +from lerobot.policies.factory import make_pre_post_processors +from lerobot.policies.utils import build_inference_frame, make_robot_action +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.so100_follower import SO100Follower + +device = torch.device("mps") # or "cuda" or "cpu" +model_id = "fracapuano/robot_learning_tutorial_diffusion" + +model = DiffusionPolicy.from_pretrained(model_id) + +dataset_id = "lerobot/svla_so101_pickplace" +# This only downloads the metadata for the dataset, ~10s of MB even for large-scale datasets +dataset_metadata = LeRobotDatasetMetadata(dataset_id) +preprocess, postprocess = make_pre_post_processors( + model.config, model_id, dataset_stats=dataset_metadata.stats +) + +MAX_EPISODES = 5 +MAX_STEPS_PER_EPISODE = 20 + + +# # find ports using lerobot-find-port +follower_port = ... # something like "/dev/tty.usbmodem58760431631" + +# # the robot ids are used the load the right calibration files +follower_id = ... # something like "follower_so100" + +# Robot and environment configuration +# Camera keys must match the name and resolutions of the ones used for training! +# You can check the camera keys expected by a model in the info.json card on the model card on the Hub +camera_config = { + "side": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30), + "up": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30), +} + +robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_config) +robot = SO100Follower(robot_cfg) +robot.connect() + + +for _ in range(MAX_EPISODES): + for _ in range(MAX_STEPS_PER_EPISODE): + obs = robot.get_observation() + obs_frame = build_inference_frame( + observation=obs, ds_features=dataset_metadata.features, device=device + ) + + obs = preprocess(obs_frame) + + action = model.select_action(obs) + action = postprocess(action) + action = make_robot_action(action, dataset_metadata.features) + robot.send_action(action) + + print("Episode finished! Starting new episode...") diff --git a/examples/tutorial/pi0/using_pi0_example.py b/examples/tutorial/pi0/using_pi0_example.py new file mode 100644 index 0000000000000000000000000000000000000000..5a9305f668ee86434cbe69df2f18318b7d5b4a8d --- /dev/null +++ b/examples/tutorial/pi0/using_pi0_example.py @@ -0,0 +1,67 @@ +import torch + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.datasets.utils import hw_to_dataset_features +from lerobot.policies.factory import make_pre_post_processors +from lerobot.policies.pi0.modeling_pi0 import PI0Policy +from lerobot.policies.utils import build_inference_frame, make_robot_action +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.so100_follower import SO100Follower + +MAX_EPISODES = 5 +MAX_STEPS_PER_EPISODE = 20 + +device = torch.device("mps") # or "cuda" or "cpu" +model_id = "lerobot/pi0_base" + +model = PI0Policy.from_pretrained(model_id) + +preprocess, postprocess = make_pre_post_processors( + model.config, + model_id, + # This overrides allows to run on MPS, otherwise defaults to CUDA (if available) + preprocessor_overrides={"device_processor": {"device": str(device)}}, +) + +# find ports using lerobot-find-port +follower_port = ... # something like "/dev/tty.usbmodem58760431631" + +# the robot ids are used the load the right calibration files +follower_id = ... # something like "follower_so100" + +# Robot and environment configuration +# Camera keys must match the name and resolutions of the ones used for training! +# You can check the camera keys expected by a model in the info.json card on the model card on the Hub +camera_config = { + "base_0_rgb": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30), + "left_wrist_0_rgb": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30), + "right_wrist_0_rgb": OpenCVCameraConfig(index_or_path=2, width=640, height=480, fps=30), +} + +robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_config) +robot = SO100Follower(robot_cfg) +robot.connect() + +task = "" # something like "pick the red block" +robot_type = "" # something like "so100_follower" for multi-embodiment datasets + +# This is used to match the raw observation keys to the keys expected by the policy +action_features = hw_to_dataset_features(robot.action_features, "action") +obs_features = hw_to_dataset_features(robot.observation_features, "observation") +dataset_features = {**action_features, **obs_features} + +for _ in range(MAX_EPISODES): + for _ in range(MAX_STEPS_PER_EPISODE): + obs = robot.get_observation() + obs_frame = build_inference_frame( + observation=obs, ds_features=dataset_features, device=device, task=task, robot_type=robot_type + ) + + obs = preprocess(obs_frame) + + action = model.select_action(obs) + action = postprocess(action) + action = make_robot_action(action, dataset_features) + robot.send_action(action) + + print("Episode finished! Starting new episode...") diff --git a/examples/tutorial/rl/hilserl_example.py b/examples/tutorial/rl/hilserl_example.py new file mode 100644 index 0000000000000000000000000000000000000000..9be9dabc3acb7182969a71a5bc166824385e2d6c --- /dev/null +++ b/examples/tutorial/rl/hilserl_example.py @@ -0,0 +1,345 @@ +import multiprocessing as mp +import signal +from pathlib import Path +from queue import Empty, Full + +import torch +import torch.optim as optim + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.datasets.utils import hw_to_dataset_features +from lerobot.envs.configs import HILSerlProcessorConfig, HILSerlRobotEnvConfig +from lerobot.policies.sac.configuration_sac import SACConfig +from lerobot.policies.sac.modeling_sac import SACPolicy +from lerobot.policies.sac.reward_model.modeling_classifier import Classifier +from lerobot.rl.buffer import ReplayBuffer +from lerobot.rl.gym_manipulator import make_robot_env +from lerobot.robots.so100_follower import SO100FollowerConfig +from lerobot.teleoperators.so100_leader import SO100LeaderConfig +from lerobot.teleoperators.utils import TeleopEvents + +LOG_EVERY = 10 +SEND_EVERY = 10 + + +def run_learner( + transitions_queue: mp.Queue, + parameters_queue: mp.Queue, + shutdown_event: mp.Event, + policy_learner: SACPolicy, + online_buffer: ReplayBuffer, + offline_buffer: ReplayBuffer, + lr: float = 3e-4, + batch_size: int = 32, + device: torch.device = "mps", +): + """The learner process - trains SAC policy on transitions streamed from the actor, updating parameters + for the actor to adopt.""" + policy_learner.train() + policy_learner.to(device) + + # Create Adam optimizer from scratch - simple and clean + optimizer = optim.Adam(policy_learner.parameters(), lr=lr) + + print(f"[LEARNER] Online buffer capacity: {online_buffer.capacity}") + print(f"[LEARNER] Offline buffer capacity: {offline_buffer.capacity}") + + training_step = 0 + + while not shutdown_event.is_set(): + # retrieve incoming transitions from the actor process + try: + transitions = transitions_queue.get(timeout=0.1) + for transition in transitions: + # HIL-SERL: Add ALL transitions to online buffer + online_buffer.add(**transition) + + # HIL-SERL: Add ONLY human intervention transitions to offline buffer + is_intervention = transition.get("complementary_info", {}).get("is_intervention", False) + if is_intervention: + offline_buffer.add(**transition) + print( + f"[LEARNER] Human intervention detected! Added to offline buffer (now {len(offline_buffer)} transitions)" + ) + + except Empty: + pass # No transitions available, continue + + # Train if we have enough data + if len(online_buffer) >= policy_learner.config.online_step_before_learning: + # Sample from online buffer (autonomous + human data) + online_batch = online_buffer.sample(batch_size // 2) + + # Sample from offline buffer (human demonstrations only, either precollected or at runtime) + offline_batch = offline_buffer.sample(batch_size // 2) + + # Combine batches - this is the key HIL-SERL mechanism! + batch = {} + for key in online_batch: + if key in offline_batch: + batch[key] = torch.cat([online_batch[key], offline_batch[key]], dim=0) + else: + batch[key] = online_batch[key] + + loss, _ = policy_learner.forward(batch) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + training_step += 1 + + if training_step % LOG_EVERY == 0: + print( + f"[LEARNER] Training step {training_step}, Loss: {loss.item():.4f}, " + f"Buffers: Online={len(online_buffer)}, Offline={len(offline_buffer)}" + ) + + # Send updated parameters to actor every 10 training steps + if training_step % SEND_EVERY == 0: + try: + state_dict = {k: v.cpu() for k, v in policy_learner.state_dict().items()} + parameters_queue.put_nowait(state_dict) + print("[LEARNER] Sent updated parameters to actor") + except Full: + # Missing write due to queue not being consumed (should happen rarely) + pass + + print("[LEARNER] Learner process finished") + + +def run_actor( + transitions_queue: mp.Queue, + parameters_queue: mp.Queue, + shutdown_event: mp.Event, + policy_actor: SACPolicy, + reward_classifier: Classifier, + env_cfg: HILSerlRobotEnvConfig, + device: torch.device = "mps", + output_directory: Path | None = None, +): + """The actor process - interacts with environment and collects data. + The policy is frozen and only the parameters are updated, popping the most recent ones from a queue.""" + policy_actor.eval() + policy_actor.to(device) + + reward_classifier.eval() + reward_classifier.to(device) + + # Create robot environment inside the actor process + env, teleop_device = make_robot_env(env_cfg) + + try: + for episode in range(MAX_EPISODES): + if shutdown_event.is_set(): + break + + obs, _info = env.reset() + episode_reward = 0.0 + step = 0 + episode_transitions = [] + + print(f"[ACTOR] Starting episode {episode + 1}") + + while step < MAX_STEPS_PER_EPISODE and not shutdown_event.is_set(): + try: + new_params = parameters_queue.get_nowait() + policy_actor.load_state_dict(new_params) + print("[ACTOR] Updated policy parameters from learner") + except Empty: # No new updated parameters available from learner, waiting + pass + + # Get action from policy + policy_obs = make_policy_obs(obs, device=device) + action_tensor = policy_actor.select_action(policy_obs) # predicts a single action + action = action_tensor.squeeze(0).cpu().numpy() + + # Step environment + next_obs, _env_reward, terminated, truncated, _info = env.step(action) + done = terminated or truncated + + # Predict reward + policy_next_obs = make_policy_obs(next_obs, device=device) + reward = reward_classifier.predict_reward(policy_next_obs) + + if reward >= 1.0 and not done: # success detected! halt episode + terminated = True + done = True + + # In HIL-SERL, human interventions come from the teleop device + is_intervention = False + if hasattr(teleop_device, "get_teleop_events"): + # Real intervention detection from teleop device + teleop_events = teleop_device.get_teleop_events() + is_intervention = teleop_events.get(TeleopEvents.IS_INTERVENTION, False) + + # Store transition with intervention metadata + transition = { + "state": policy_obs, + "action": action, + "reward": float(reward) if hasattr(reward, "item") else reward, + "next_state": policy_next_obs, + "done": done, + "truncated": truncated, + "complementary_info": { + "is_intervention": is_intervention, + }, + } + + episode_transitions.append(transition) + + episode_reward += reward + step += 1 + + obs = next_obs + + if done: + break + + # Send episode transitions to learner + transitions_queue.put_nowait(episode_transitions) + + except KeyboardInterrupt: + print("[ACTOR] Interrupted by user") + finally: + # Clean up + if hasattr(env, "robot") and env.robot.is_connected: + env.robot.disconnect() + if teleop_device and hasattr(teleop_device, "disconnect"): + teleop_device.disconnect() + if output_directory is not None: + policy_actor.save_pretrained(output_directory) + print(f"[ACTOR] Latest actor policy saved at: {output_directory}") + + print("[ACTOR] Actor process finished") + + +def make_policy_obs(obs, device: torch.device = "cpu"): + return { + "observation.state": torch.from_numpy(obs["agent_pos"]).float().unsqueeze(0).to(device), + **{ + f"observation.image.{k}": torch.from_numpy(obs["pixels"][k]).float().unsqueeze(0).to(device) + for k in obs["pixels"] + }, + } + + +"""Main function - coordinates actor and learner processes.""" + +device = "mps" # or "cuda" or "cpu" +output_directory = Path("outputs/robot_learning_tutorial/hil_serl") +output_directory.mkdir(parents=True, exist_ok=True) + +# find ports using lerobot-find-port +follower_port = ... +leader_port = ... + +# the robot ids are used the load the right calibration files +follower_id = ... +leader_id = ... + +# A pretrained model (to be used in-distribution!) +reward_classifier_id = "fracapuano/reward_classifier_hil_serl_example" +reward_classifier = Classifier.from_pretrained(reward_classifier_id) + +reward_classifier.to(device) +reward_classifier.eval() + +MAX_EPISODES = 5 +MAX_STEPS_PER_EPISODE = 20 + +# Robot and environment configuration +robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id) +teleop_cfg = SO100LeaderConfig(port=leader_port, id=leader_id) +processor_cfg = HILSerlProcessorConfig(control_mode="leader") + +env_cfg = HILSerlRobotEnvConfig(robot=robot_cfg, teleop=teleop_cfg, processor=processor_cfg) + +# Create robot environment +env, teleop_device = make_robot_env(env_cfg) + +obs_features = hw_to_dataset_features(env.robot.observation_features, "observation") +action_features = hw_to_dataset_features(env.robot.action_features, "action") + +# Create SAC policy for action selection +policy_cfg = SACConfig( + device=device, + input_features=obs_features, + output_features=action_features, +) + +policy_actor = SACPolicy(policy_cfg) +policy_learner = SACPolicy(policy_cfg) + +demonstrations_repo_id = "lerobot/example_hil_serl_dataset" +offline_dataset = LeRobotDataset(repo_id=demonstrations_repo_id) + +# Online buffer: initialized from scratch +online_replay_buffer = ReplayBuffer(device=device, state_keys=list(obs_features.keys())) +# Offline buffer: Created from dataset (pre-populated it with demonstrations) +offline_replay_buffer = ReplayBuffer.from_lerobot_dataset( + lerobot_dataset=offline_dataset, device=device, state_keys=list(obs_features.keys()) +) + +# Create communication channels between learner and actor processes +transitions_queue = mp.Queue(maxsize=10) +parameters_queue = mp.Queue(maxsize=2) +shutdown_event = mp.Event() + + +# Signal handler for graceful shutdown +def signal_handler(sig): + print(f"\nSignal {sig} received, shutting down...") + shutdown_event.set() + + +signal.signal(signal.SIGINT, signal_handler) +signal.signal(signal.SIGTERM, signal_handler) + +# Create processes +learner_process = mp.Process( + target=run_learner, + args=( + transitions_queue, + parameters_queue, + shutdown_event, + policy_learner, + online_replay_buffer, + offline_replay_buffer, + ), + kwargs={"device": device}, # can run on accelerated hardware for training +) + +actor_process = mp.Process( + target=run_actor, + args=( + transitions_queue, + parameters_queue, + shutdown_event, + policy_actor, + reward_classifier, + env_cfg, + output_directory, + ), + kwargs={"device": "cpu"}, # actor is frozen, can run on CPU or accelerate for inference +) + +learner_process.start() +actor_process.start() + +try: + # Wait for actor to finish (it controls the episode loop) + actor_process.join() + shutdown_event.set() + learner_process.join(timeout=10) + +except KeyboardInterrupt: + print("Main process interrupted") + shutdown_event.set() + actor_process.join(timeout=5) + learner_process.join(timeout=10) + +finally: + if learner_process.is_alive(): + learner_process.terminate() + if actor_process.is_alive(): + actor_process.terminate() diff --git a/examples/tutorial/rl/reward_classifier_example.py b/examples/tutorial/rl/reward_classifier_example.py new file mode 100644 index 0000000000000000000000000000000000000000..bd1b676b73fdd9e71a8a6fd4ada2d387ef3d054f --- /dev/null +++ b/examples/tutorial/rl/reward_classifier_example.py @@ -0,0 +1,62 @@ +import torch + +from lerobot.datasets.lerobot_dataset import LeRobotDataset +from lerobot.policies.factory import make_policy, make_pre_post_processors +from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig + +# Device to use for training +device = "mps" # or "cuda", or "cpu" + +# Load the dataset used for training +repo_id = "lerobot/example_hil_serl_dataset" +dataset = LeRobotDataset(repo_id) + +# Configure the policy to extract features from the image frames +camera_keys = dataset.meta.camera_keys + +config = RewardClassifierConfig( + num_cameras=len(camera_keys), + device=device, + # backbone model to extract features from the image frames + model_name="microsoft/resnet-18", +) + +# Make policy, preprocessor, and optimizer +policy = make_policy(config, ds_meta=dataset.meta) +optimizer = config.get_optimizer_preset().build(policy.parameters()) +preprocessor, _ = make_pre_post_processors(policy_cfg=config, dataset_stats=dataset.meta.stats) + + +classifier_id = "fracapuano/reward_classifier_hil_serl_example" + +# Instantiate a dataloader +dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True) + +# Training loop +num_epochs = 5 +for epoch in range(num_epochs): + total_loss = 0 + total_accuracy = 0 + for batch in dataloader: + # Preprocess the batch and move it to the correct device. + batch = preprocessor(batch) + + # Forward pass + loss, output_dict = policy.forward(batch) + + # Backward pass and optimization + optimizer.zero_grad() + loss.backward() + optimizer.step() + + total_loss += loss.item() + total_accuracy += output_dict["accuracy"] + + avg_loss = total_loss / len(dataloader) + avg_accuracy = total_accuracy / len(dataloader) + print(f"Epoch {epoch + 1}/{num_epochs}, Loss: {avg_loss:.4f}, Accuracy: {avg_accuracy:.2f}%") + +print("Training finished!") + +# You can now save the trained policy. +policy.push_to_hub(classifier_id) diff --git a/examples/tutorial/smolvla/using_smolvla_example.py b/examples/tutorial/smolvla/using_smolvla_example.py new file mode 100644 index 0000000000000000000000000000000000000000..2da127f24760713297eabcb6a6a6f0f507da5a4a --- /dev/null +++ b/examples/tutorial/smolvla/using_smolvla_example.py @@ -0,0 +1,66 @@ +import torch + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig +from lerobot.datasets.utils import hw_to_dataset_features +from lerobot.policies.factory import make_pre_post_processors +from lerobot.policies.smolvla.modeling_smolvla import SmolVLAPolicy +from lerobot.policies.utils import build_inference_frame, make_robot_action +from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig +from lerobot.robots.so100_follower.so100_follower import SO100Follower + +MAX_EPISODES = 5 +MAX_STEPS_PER_EPISODE = 20 + +device = torch.device("mps") # or "cuda" or "cpu" +model_id = "lerobot/smolvla_base" + +model = SmolVLAPolicy.from_pretrained(model_id) + +preprocess, postprocess = make_pre_post_processors( + model.config, + model_id, + # This overrides allows to run on MPS, otherwise defaults to CUDA (if available) + preprocessor_overrides={"device_processor": {"device": str(device)}}, +) + +# find ports using lerobot-find-port +follower_port = ... # something like "/dev/tty.usbmodem58760431631" + +# the robot ids are used the load the right calibration files +follower_id = ... # something like "follower_so100" + +# Robot and environment configuration +# Camera keys must match the name and resolutions of the ones used for training! +# You can check the camera keys expected by a model in the info.json card on the model card on the Hub +camera_config = { + "camera1": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30), + "camera2": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30), +} + +robot_cfg = SO100FollowerConfig(port=follower_port, id=follower_id, cameras=camera_config) +robot = SO100Follower(robot_cfg) +robot.connect() + +task = "" # something like "pick the red block" +robot_type = "" # something like "so100_follower" for multi-embodiment datasets + +# This is used to match the raw observation keys to the keys expected by the policy +action_features = hw_to_dataset_features(robot.action_features, "action") +obs_features = hw_to_dataset_features(robot.observation_features, "observation") +dataset_features = {**action_features, **obs_features} + +for _ in range(MAX_EPISODES): + for _ in range(MAX_STEPS_PER_EPISODE): + obs = robot.get_observation() + obs_frame = build_inference_frame( + observation=obs, ds_features=dataset_features, device=device, task=task, robot_type=robot_type + ) + + obs = preprocess(obs_frame) + + action = model.select_action(obs) + action = postprocess(action) + action = make_robot_action(action, dataset_features) + robot.send_action(action) + + print("Episode finished! Starting new episode...") diff --git a/media/gym/aloha_act.gif b/media/gym/aloha_act.gif new file mode 100644 index 0000000000000000000000000000000000000000..18a7f3fe635760741f73561909be7f250a73677a --- /dev/null +++ b/media/gym/aloha_act.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f10f8a91a65c62a844a255c17dd30c820c1c805c13af00fb49c6499b7cbed849 +size 3027981 diff --git a/media/gym/pusht_diffusion.gif b/media/gym/pusht_diffusion.gif new file mode 100644 index 0000000000000000000000000000000000000000..2efff2a3b1ce7781a40ea247a7e4fcf2895b7e12 --- /dev/null +++ b/media/gym/pusht_diffusion.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4599f556ab12717698774d14781c1e5b9afbf1c57df0a9c6cc25ca2a3032fecc +size 189800 diff --git a/media/gym/simxarm_tdmpc.gif b/media/gym/simxarm_tdmpc.gif new file mode 100644 index 0000000000000000000000000000000000000000..191d9104f09857bfae198ec6482e2132af282f5b --- /dev/null +++ b/media/gym/simxarm_tdmpc.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11739ff20376cd0a3deee86b98b493f25b32ff03e975624044e2c3b80b35302c +size 475007 diff --git a/media/hope_jr/hopejr.png b/media/hope_jr/hopejr.png new file mode 100644 index 0000000000000000000000000000000000000000..d872985436687fcb2cc30349eab57505fe3885bd --- /dev/null +++ b/media/hope_jr/hopejr.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad955fee42d543c4fb9c2ada45106dd0268367f72330d1c81fbb50fadb6f4f4c +size 73277 diff --git a/media/lekiwi/kiwi.webp b/media/lekiwi/kiwi.webp new file mode 100644 index 0000000000000000000000000000000000000000..1d5596cfd0c1d876897b959bb9eb9d075277274c --- /dev/null +++ b/media/lekiwi/kiwi.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e17d0417db3f5481437814e31b469546fe4e9ae7b8aa69b2180c97e1f3393d6c +size 224412 diff --git a/media/lerobot-logo-light.png b/media/lerobot-logo-light.png new file mode 100644 index 0000000000000000000000000000000000000000..c9b1011d256dfd138817722a4ba9edf9b71d3aac --- /dev/null +++ b/media/lerobot-logo-light.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bf6cd4c35a017f0d498be07cd4d3f004ee3a023c43383abdeb13163a591be0b +size 204038 diff --git a/media/lerobot-logo-thumbnail.png b/media/lerobot-logo-thumbnail.png new file mode 100644 index 0000000000000000000000000000000000000000..d14daf6ada759f06fadc9684c4b5f5e01756d5ab --- /dev/null +++ b/media/lerobot-logo-thumbnail.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72ee48061c2528eb9f6a1f163622d4805476a52c813bd03f2f32e32d89afd63e +size 164066 diff --git a/media/so100/leader_follower.webp b/media/so100/leader_follower.webp new file mode 100644 index 0000000000000000000000000000000000000000..cbb688378cc5cf841f872242237304aefdcfc3f8 --- /dev/null +++ b/media/so100/leader_follower.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d62790391c7536c661cbd8eb2a4c5012d0fc44873f0811a75b7adea0c5d556a6 +size 120188 diff --git a/media/so101/so101-leader.webp b/media/so101/so101-leader.webp new file mode 100644 index 0000000000000000000000000000000000000000..29df0472c64769f9a464acaa4c73c23125affdec --- /dev/null +++ b/media/so101/so101-leader.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f60d1c14a0d322d077c5a239bf9ee3599a1488e7234706eed35f6986366961b +size 154650 diff --git a/media/so101/so101.webp b/media/so101/so101.webp new file mode 100644 index 0000000000000000000000000000000000000000..486f65bdb22b9adf6d651aa12d048ee0b1fa0a64 --- /dev/null +++ b/media/so101/so101.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:855809851ecf2ac5a28b2f0050b4baca3adc5a18c5175908399f9c6a52dd6877 +size 133522 diff --git a/media/wandb.png b/media/wandb.png new file mode 100644 index 0000000000000000000000000000000000000000..c0a834dfe1e0f2a36c58651f5348ac38daa11b83 --- /dev/null +++ b/media/wandb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd6f74a6d07d26d4252246ed85c6c6719e0b8011c6f070e51d5521d7bb24dc67 +size 416489 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..4c644941ff014b61133b0a8d9d4c11cf7863a261 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,365 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta" + +[project.urls] +homepage = "https://huggingface.co/lerobot" +documentation = "https://huggingface.co/docs/lerobot/index" +source = "https://github.com/huggingface/lerobot" +issues = "https://github.com/huggingface/lerobot/issues" +discord = "https://discord.gg/s3KuuzsPFb" + +[project] +name = "lerobot" +version = "0.4.2" +description = "🤗 LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch" +readme = "README.md" +license = { text = "Apache-2.0" } +requires-python = ">=3.10" +authors = [ + { name = "Rémi Cadène", email = "re.cadene@gmail.com" }, + { name = "Simon Alibert", email = "alibert.sim@gmail.com" }, + { name = "Alexander Soare", email = "alexander.soare159@gmail.com" }, + { name = "Quentin Gallouédec", email = "quentin.gallouedec@ec-lyon.fr" }, + { name = "Steven Palma", email = "imstevenpmwork@ieee.org" }, + { name = "Pepijn Kooijmans", email = "pepijnkooijmans@outlook.com"}, + { name = "Michel Aractingi", email = "michel.aractingi@gmail.com"}, + { name = "Adil Zouitine", email = "adilzouitinegm@gmail.com" }, + { name = "Dana Aubakirova", email = "danaaubakirova17@gmail.com"}, + { name = "Caroline Pascal", email = "caroline8.pascal@gmail.com"}, + { name = "Martino Russi", email = "nopyeps@gmail.com"}, + { name = "Thomas Wolf", email = "thomaswolfcontact@gmail.com" }, +] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3.10", + "Topic :: Software Development :: Build Tools", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +keywords = ["lerobot", "huggingface", "robotics", "machine learning", "artificial intelligence"] + +dependencies = [ + + # Hugging Face dependencies + "datasets>=4.0.0,<4.2.0", + "diffusers>=0.27.2,<0.36.0", + "huggingface-hub[hf-transfer,cli]>=0.34.2,<0.36.0", + "accelerate>=1.10.0,<2.0.0", + + # Core dependencies + "setuptools>=71.0.0,<81.0.0", + "cmake>=3.29.0.1,<4.2.0", + "einops>=0.8.0,<0.9.0", + "opencv-python-headless>=4.9.0,<4.13.0", + "av>=15.0.0,<16.0.0", + "jsonlines>=4.0.0,<5.0.0", + "packaging>=24.2,<26.0", + "pynput>=1.7.7,<1.9.0", + "pyserial>=3.5,<4.0", + "wandb>=0.20.0,<0.22.0", # TODO: Bumb dependency (compatible with protobuf) + + "torch>=2.2.1,<2.8.0", # TODO: Bumb dependency + "torchcodec>=0.2.1,<0.6.0; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", # TODO: Bumb dependency + "torchvision>=0.21.0,<0.23.0", # TODO: Bumb dependency + + "draccus==0.10.0", # TODO: Remove == + "gymnasium>=1.1.1,<2.0.0", + "rerun-sdk>=0.24.0,<0.27.0", + + # Support dependencies + "deepdiff>=7.0.1,<9.0.0", + "imageio[ffmpeg]>=2.34.0,<3.0.0", + "termcolor>=2.4.0,<4.0.0", +] + +# Optional dependencies +[project.optional-dependencies] + +# Common +pygame-dep = ["pygame>=2.5.1,<2.7.0"] +placo-dep = ["placo>=0.9.6,<0.10.0"] +transformers-dep = ["transformers>=4.53.0,<5.0.0"] +grpcio-dep = ["grpcio==1.73.1", "protobuf==6.31.0"] # TODO: Bumb dependency (compatible with wandb) + +# Motors +feetech = ["feetech-servo-sdk>=1.0.0,<2.0.0"] +dynamixel = ["dynamixel-sdk>=3.7.31,<3.9.0"] + +# Robots +gamepad = ["lerobot[pygame-dep]", "hidapi>=0.14.0,<0.15.0"] +hopejr = ["lerobot[feetech]", "lerobot[pygame-dep]"] +lekiwi = ["lerobot[feetech]", "pyzmq>=26.2.1,<28.0.0"] +reachy2 = ["reachy2_sdk>=1.0.14,<1.1.0"] +kinematics = ["lerobot[placo-dep]"] +intelrealsense = [ + "pyrealsense2>=2.55.1.6486,<2.57.0 ; sys_platform != 'darwin'", + "pyrealsense2-macosx>=2.54,<2.55.0 ; sys_platform == 'darwin'", +] +phone = ["hebi-py>=2.8.0,<2.12.0", "teleop>=0.1.0,<0.2.0", "fastapi<1.0"] + +# Policies +pi = ["transformers @ git+https://github.com/huggingface/transformers.git@fix/lerobot_openpi"] +smolvla = ["lerobot[transformers-dep]", "num2words>=0.5.14,<0.6.0", "accelerate>=1.7.0,<2.0.0", "safetensors>=0.4.3,<1.0.0"] +groot = [ + "lerobot[transformers-dep]", + "peft>=0.13.0,<1.0.0", + "dm-tree>=0.1.8,<1.0.0", + "timm>=1.0.0,<1.1.0", + "safetensors>=0.4.3,<1.0.0", + "Pillow>=10.0.0,<13.0.0", + "decord>=0.6.0,<1.0.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64')", + "ninja>=1.11.1,<2.0.0", + "flash-attn>=2.5.9,<3.0.0 ; sys_platform != 'darwin'" +] +hilserl = ["lerobot[transformers-dep]", "gym-hil>=0.1.13,<0.2.0", "lerobot[grpcio-dep]", "lerobot[placo-dep]"] + +# Features +async = ["lerobot[grpcio-dep]", "matplotlib>=3.10.3,<4.0.0"] + +# Development +dev = ["pre-commit>=3.7.0,<5.0.0", "debugpy>=1.8.1,<1.9.0", "lerobot[grpcio-dep]", "grpcio-tools==1.73.1"] +test = ["pytest>=8.1.0,<9.0.0", "pytest-timeout>=2.4.0,<3.0.0", "pytest-cov>=5.0.0,<8.0.0", "mock-serial>=0.0.1,<0.1.0 ; sys_platform != 'win32'"] +video_benchmark = ["scikit-image>=0.23.2,<0.26.0", "pandas>=2.2.2,<2.4.0"] + +# Simulation +aloha = ["gym-aloha>=0.1.2,<0.2.0"] +pusht = ["gym-pusht>=0.1.5,<0.2.0", "pymunk>=6.6.0,<7.0.0"] # TODO: Fix pymunk version in gym-pusht instead +libero = ["lerobot[transformers-dep]", "hf-libero>=0.1.3,<0.2.0"] +metaworld = ["metaworld==3.0.0"] + +# All +all = [ + "lerobot[dynamixel]", + "lerobot[gamepad]", + "lerobot[hopejr]", + "lerobot[lekiwi]", + "lerobot[reachy2]", + "lerobot[kinematics]", + "lerobot[intelrealsense]", + "lerobot[pi]", + "lerobot[smolvla]", + # "lerobot[groot]", TODO(Steven): Gr00t requires specific installation instructions for flash-attn + "lerobot[hilserl]", + "lerobot[async]", + "lerobot[dev]", + "lerobot[test]", + "lerobot[video_benchmark]", + "lerobot[aloha]", + "lerobot[pusht]", + "lerobot[phone]", + "lerobot[libero]", + "lerobot[metaworld]", +] + +[project.scripts] +lerobot-calibrate="lerobot.scripts.lerobot_calibrate:main" +lerobot-find-cameras="lerobot.scripts.lerobot_find_cameras:main" +lerobot-find-port="lerobot.scripts.lerobot_find_port:main" +lerobot-record="lerobot.scripts.lerobot_record:main" +lerobot-replay="lerobot.scripts.lerobot_replay:main" +lerobot-setup-motors="lerobot.scripts.lerobot_setup_motors:main" +lerobot-teleoperate="lerobot.scripts.lerobot_teleoperate:main" +lerobot-eval="lerobot.scripts.lerobot_eval:main" +lerobot-train="lerobot.scripts.lerobot_train:main" +lerobot-dataset-viz="lerobot.scripts.lerobot_dataset_viz:main" +lerobot-info="lerobot.scripts.lerobot_info:main" +lerobot-find-joint-limits="lerobot.scripts.lerobot_find_joint_limits:main" +lerobot-imgtransform-viz="lerobot.scripts.lerobot_imgtransform_viz:main" +lerobot-edit-dataset="lerobot.scripts.lerobot_edit_dataset:main" + +# ---------------- Tool Configurations ---------------- +[tool.setuptools.packages.find] +where = ["src"] + +[tool.ruff] +target-version = "py310" +line-length = 110 +exclude = ["tests/artifacts/**/*.safetensors", "*_pb2.py", "*_pb2_grpc.py"] + +[tool.ruff.lint] +# E, W: pycodestyle errors and warnings +# F: PyFlakes +# I: isort +# UP: pyupgrade +# B: flake8-bugbear (good practices, potential bugs) +# C4: flake8-comprehensions (more concise comprehensions) +# A: flake8-builtins (shadowing builtins) +# SIM: flake8-simplify +# RUF: Ruff-specific rules +# D: pydocstyle (for docstring style/formatting) +# S: flake8-bandit (some security checks, complements Bandit) +# T20: flake8-print (discourage print statements in production code) +# N: pep8-naming +# TODO: Uncomment rules when ready to use +select = [ + "E", "W", "F", "I", "B", "C4", "T20", "N", "UP", "SIM" #, "A", "S", "D", "RUF" +] +ignore = [ + "E501", # Line too long + "T201", # Print statement found + "T203", # Pprint statement found + "B008", # Perform function call in argument defaults +] + +[tool.ruff.lint.per-file-ignores] +"__init__.py" = ["F401", "F403"] + +[tool.ruff.lint.isort] +combine-as-imports = true +known-first-party = ["lerobot"] + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" +docstring-code-format = true + +[tool.bandit] +exclude_dirs = [ + "tests", + "benchmarks", + "src/lerobot/datasets/push_dataset_to_hub", +] +skips = ["B101", "B311", "B404", "B603", "B615"] + +[tool.typos] +default.extend-ignore-re = [ + "(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # spellchecker:disable-line + "(?s)(#|//)\\s*spellchecker:off.*?\\n\\s*(#|//)\\s*spellchecker:on", # spellchecker: +] +default.extend-ignore-identifiers-re = [ + # Add individual words here to ignore them + "2nd", + "pn", + "ser", + "ein", + "thw", + "inpt", +] + +# TODO: Uncomment when ready to use +# [tool.interrogate] +# ignore-init-module = true +# ignore-init-method = true +# ignore-nested-functions = false +# ignore-magic = false +# ignore-semiprivate = false +# ignore-private = false +# ignore-property-decorators = false +# ignore-module = false +# ignore-setters = false +# fail-under = 80 +# output-format = "term-missing" +# color = true +# paths = ["src/lerobot"] + +# TODO: Enable mypy gradually module by module across multiple PRs +# Uncomment [tool.mypy] first, then uncomment individual module overrides as they get proper type annotations + +[tool.mypy] +python_version = "3.10" +ignore_missing_imports = true +follow_imports = "skip" +# warn_return_any = true +# warn_unused_configs = true +# strict = true +# disallow_untyped_defs = true +# disallow_incomplete_defs = true +# check_untyped_defs = true + +[[tool.mypy.overrides]] +module = "lerobot.*" +ignore_errors = true + +[[tool.mypy.overrides]] +module = "lerobot.envs.*" +ignore_errors = false + + +# [[tool.mypy.overrides]] +# module = "lerobot.utils.*" +# ignore_errors = false + +[[tool.mypy.overrides]] +module = "lerobot.configs.*" +ignore_errors = false + +# extra strictness for configs +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true + +# [[tool.mypy.overrides]] +# module = "lerobot.optim.*" +# ignore_errors = false + +[[tool.mypy.overrides]] +module = "lerobot.model.*" +ignore_errors = false + +# [[tool.mypy.overrides]] +# module = "lerobot.processor.*" +# ignore_errors = false + +# [[tool.mypy.overrides]] +# module = "lerobot.datasets.*" +# ignore_errors = false + +[[tool.mypy.overrides]] +module = "lerobot.cameras.*" +ignore_errors = false + +# [[tool.mypy.overrides]] +# module = "lerobot.motors.*" +# ignore_errors = false + +# [[tool.mypy.overrides]] +# module = "lerobot.robots.*" +# ignore_errors = false + +# [[tool.mypy.overrides]] +# module = "lerobot.teleoperators.*" +# ignore_errors = false + +# [[tool.mypy.overrides]] +# module = "lerobot.policies.*" +# ignore_errors = false + +# [[tool.mypy.overrides]] +# module = "lerobot.rl.*" +# ignore_errors = false + + +# [[tool.mypy.overrides]] +# module = "lerobot.async_inference.*" +# ignore_errors = false + +# [[tool.mypy.overrides]] +# module = "lerobot.transport.*" +# ignore_errors = false + +# [[tool.mypy.overrides]] +# module = "lerobot.scripts.*" +# ignore_errors = false diff --git a/requirements-macos.txt b/requirements-macos.txt new file mode 100644 index 0000000000000000000000000000000000000000..cf45eb81ba49729636df4e9c94376ad039a23f62 --- /dev/null +++ b/requirements-macos.txt @@ -0,0 +1,830 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --output-file=requirements-macos.txt requirements.in +# +-e .[all] + # via -[all] +absl-py==2.3.1 + # via + # dm-control + # dm-env + # dm-tree + # labmaze + # mujoco + # tensorboard +accelerate==1.11.0 + # via + # lerobot + # peft +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.13.1 + # via fsspec +aiosignal==1.4.0 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +antlr4-python3-runtime==4.9.3 + # via + # hydra-core + # omegaconf +anyio==4.11.0 + # via + # starlette + # watchfiles +asttokens==3.0.0 + # via stack-data +async-timeout==5.0.1 + # via aiohttp +attrs==25.4.0 + # via + # aiohttp + # dm-tree + # jsonlines + # jsonschema + # referencing + # rerun-sdk +av==15.1.0 + # via lerobot +bddl==1.0.1 + # via libero +certifi==2025.10.5 + # via + # requests + # sentry-sdk +cffi==2.0.0 + # via pymunk +cfgv==3.4.0 + # via pre-commit +charset-normalizer==3.4.4 + # via requests +click==8.3.0 + # via + # uvicorn + # wandb +cloudpickle==3.1.1 + # via + # gymnasium + # libero +cmake==4.1.0 + # via lerobot +cmeel==0.57.3 + # via + # cmeel-assimp + # cmeel-boost + # cmeel-console-bridge + # cmeel-octomap + # cmeel-qhull + # cmeel-tinyxml2 + # cmeel-urdfdom + # cmeel-zlib + # coal-library + # eigenpy + # eiquadprog + # pin + # placo + # rhoban-cmeel-jsoncpp +cmeel-assimp==5.4.3.1 + # via coal-library +cmeel-boost==1.87.0.1 + # via + # coal-library + # eigenpy + # eiquadprog + # pin +cmeel-console-bridge==1.0.2.3 + # via cmeel-urdfdom +cmeel-octomap==1.10.0 + # via coal-library +cmeel-qhull==8.0.2.1 + # via coal-library +cmeel-tinyxml2==10.0.0 + # via cmeel-urdfdom +cmeel-urdfdom==4.0.1 + # via pin +cmeel-zlib==1.3.1 + # via cmeel-assimp +coal-library==3.0.1 + # via pin +contourpy==1.3.2 + # via matplotlib +coverage[toml]==7.11.0 + # via pytest-cov +cycler==0.12.1 + # via matplotlib +datasets==4.1.1 + # via lerobot +debugpy==1.8.17 + # via lerobot +decorator==5.2.1 + # via ipython +deepdiff==8.6.1 + # via lerobot +diffusers==0.35.2 + # via lerobot +dill==0.4.0 + # via + # datasets + # multiprocess +distlib==0.4.0 + # via virtualenv +dm-control==1.0.34 + # via gym-aloha +dm-env==1.6 + # via dm-control +dm-tree==0.1.9 + # via + # dm-control + # dm-env + # lerobot +docopt==0.6.2 + # via num2words +draccus==0.10.0 + # via lerobot +dynamixel-sdk==3.8.4 + # via lerobot +easydict==1.13 + # via libero +egl-probe @ git+https://github.com/huggingface/egl_probe.git + # via + # libero + # robomimic +eigenpy==3.10.3 + # via coal-library +einops==0.8.1 + # via + # lerobot + # libero +eiquadprog==1.2.9 + # via placo +etils[epath,epy]==1.13.0 + # via mujoco +exceptiongroup==1.3.0 + # via + # anyio + # ipython + # pytest +executing==2.2.1 + # via stack-data +farama-notifications==0.0.4 + # via gymnasium +fastapi==0.119.1 + # via teleop +fastjsonschema==2.21.2 + # via nbformat +feetech-servo-sdk==1.0.0 + # via lerobot +filelock==3.20.0 + # via + # datasets + # diffusers + # huggingface-hub + # torch + # transformers + # virtualenv +fonttools==4.60.1 + # via matplotlib +frozenlist==1.8.0 + # via + # aiohttp + # aiosignal +fsspec[http]==2025.9.0 + # via + # datasets + # etils + # huggingface-hub + # torch +future==1.0.0 + # via libero +gitdb==4.0.12 + # via gitpython +gitpython==3.1.45 + # via wandb +glfw==2.10.0 + # via + # dm-control + # mujoco +grpcio==1.73.1 + # via + # grpcio-tools + # lerobot + # reachy2-sdk + # reachy2-sdk-api + # tensorboard +grpcio-tools==1.73.1 + # via + # lerobot + # reachy2-sdk-api +gym-aloha==0.1.3 + # via lerobot +gym-hil==0.1.13 + # via lerobot +gym-pusht==0.1.6 + # via lerobot +gymnasium==1.2.1 + # via + # gym-aloha + # gym-hil + # gym-pusht + # lerobot + # libero + # metaworld +h11==0.16.0 + # via uvicorn +h5py==3.15.1 + # via robomimic +hebi-py==2.11.0 + # via lerobot +hf-transfer==0.1.9 + # via huggingface-hub +hf-xet==1.1.10 + # via huggingface-hub +hidapi==0.14.0.post4 + # via + # gym-hil + # lerobot +httptools==0.7.1 + # via uvicorn +huggingface-hub[cli,hf-transfer]==0.35.3 + # via + # accelerate + # datasets + # diffusers + # lerobot + # peft + # timm + # tokenizers + # transformers +hydra-core==1.3.2 + # via libero +identify==2.6.15 + # via pre-commit +idna==3.11 + # via + # anyio + # requests + # yarl +imageio[ffmpeg]==2.37.0 + # via + # gym-aloha + # gym-hil + # lerobot + # metaworld + # robomimic + # scikit-image +imageio-ffmpeg==0.6.0 + # via + # imageio + # robomimic +importlib-metadata==8.7.0 + # via diffusers +importlib-resources==6.5.2 + # via etils +iniconfig==2.3.0 + # via pytest +inquirerpy==0.3.4 + # via huggingface-hub +ipython==8.37.0 + # via meshcat +ischedule==1.2.7 + # via placo +jedi==0.19.2 + # via ipython +jinja2==3.1.6 + # via torch +jsonlines==4.0.0 + # via lerobot +jsonschema==4.25.1 + # via nbformat +jsonschema-specifications==2025.9.1 + # via jsonschema +jupyter-core==5.9.1 + # via nbformat +jupytext==1.18.1 + # via bddl +kiwisolver==1.4.9 + # via matplotlib +labmaze==1.0.6 + # via dm-control +lazy-loader==0.4 + # via scikit-image +libero @ git+https://github.com/huggingface/lerobot-libero.git@main + # via lerobot +llvmlite==0.45.1 + # via numba +lxml==6.0.2 + # via dm-control +markdown==3.9 + # via tensorboard +markdown-it-py==4.0.0 + # via + # jupytext + # mdit-py-plugins +markupsafe==3.0.3 + # via + # jinja2 + # werkzeug +matplotlib==3.10.7 + # via + # lerobot + # libero +matplotlib-inline==0.2.1 + # via ipython +mdit-py-plugins==0.5.0 + # via jupytext +mdurl==0.1.2 + # via markdown-it-py +mergedeep==1.3.4 + # via draccus +meshcat==0.3.2 + # via placo +metaworld==3.0.0 + # via lerobot +mock-serial==0.0.1 + # via lerobot +mpmath==1.3.0 + # via sympy +mujoco==3.3.7 + # via + # dm-control + # gym-aloha + # gym-hil + # libero + # metaworld + # robosuite +multidict==6.7.0 + # via + # aiohttp + # yarl +multiprocess==0.70.16 + # via datasets +mypy-extensions==1.1.0 + # via typing-inspect +nbformat==5.10.4 + # via jupytext +networkx==3.4.2 + # via + # bddl + # scikit-image + # torch +ninja==1.13.0 + # via lerobot +nodeenv==1.9.1 + # via pre-commit +num2words==0.5.14 + # via lerobot +numba==0.62.1 + # via robosuite +numpy==2.2.6 + # via + # accelerate + # bddl + # cmeel-boost + # contourpy + # datasets + # diffusers + # dm-control + # dm-env + # dm-tree + # gymnasium + # h5py + # hebi-py + # imageio + # labmaze + # libero + # matplotlib + # meshcat + # metaworld + # mujoco + # numba + # opencv-python + # opencv-python-headless + # pandas + # peft + # pyquaternion + # reachy2-sdk + # rerun-sdk + # robomimic + # robosuite + # scikit-image + # scipy + # shapely + # teleop + # tensorboard + # tensorboardx + # tifffile + # torchvision + # transformers + # transforms3d +omegaconf==2.3.0 + # via hydra-core +opencv-python==4.12.0.88 + # via + # gym-pusht + # libero + # reachy2-sdk + # robosuite +opencv-python-headless==4.12.0.88 + # via lerobot +orderly-set==5.5.0 + # via deepdiff +packaging==25.0 + # via + # accelerate + # datasets + # huggingface-hub + # hydra-core + # jupytext + # lazy-loader + # lerobot + # matplotlib + # peft + # pytest + # reachy2-sdk + # scikit-image + # tensorboard + # tensorboardx + # transformers + # wandb +pandas==2.3.3 + # via + # datasets + # lerobot +parso==0.8.5 + # via jedi +peft==0.17.1 + # via lerobot +pexpect==4.9.0 + # via ipython +pfzy==0.3.4 + # via inquirerpy +pillow==12.0.0 + # via + # diffusers + # imageio + # lerobot + # matplotlib + # meshcat + # rerun-sdk + # robosuite + # scikit-image + # tensorboard + # torchvision +pin==3.4.0 + # via placo +placo==0.9.14 + # via lerobot +platformdirs==4.5.0 + # via + # jupyter-core + # virtualenv + # wandb +pluggy==1.6.0 + # via + # pytest + # pytest-cov +pre-commit==4.3.0 + # via lerobot +prompt-toolkit==3.0.52 + # via + # inquirerpy + # ipython +propcache==0.4.1 + # via + # aiohttp + # yarl +protobuf==6.31.0 + # via + # dm-control + # grpcio-tools + # lerobot + # reachy2-sdk + # reachy2-sdk-api + # tensorboard + # tensorboardx + # wandb +psutil==7.1.1 + # via + # accelerate + # imageio + # peft + # robomimic +ptyprocess==0.7.0 + # via pexpect +pure-eval==0.2.3 + # via stack-data +pyarrow==21.0.0 + # via + # datasets + # rerun-sdk +pycparser==2.23 + # via cffi +pydantic==2.12.3 + # via + # fastapi + # wandb +pydantic-core==2.41.4 + # via pydantic +pygame==2.6.1 + # via + # gym-hil + # gym-pusht + # lerobot +pygments==2.19.2 + # via + # ipython + # pytest +pymunk==6.11.1 + # via + # gym-pusht + # lerobot +pyngrok==7.4.1 + # via meshcat +pynput==1.8.1 + # via + # gym-hil + # lerobot +pyobjc-core==12.0 + # via + # pyobjc-framework-applicationservices + # pyobjc-framework-cocoa + # pyobjc-framework-coretext + # pyobjc-framework-quartz +pyobjc-framework-applicationservices==12.0 + # via pynput +pyobjc-framework-cocoa==12.0 + # via + # pyobjc-framework-applicationservices + # pyobjc-framework-coretext + # pyobjc-framework-quartz +pyobjc-framework-coretext==12.0 + # via pyobjc-framework-applicationservices +pyobjc-framework-quartz==12.0 + # via + # pynput + # pyobjc-framework-applicationservices + # pyobjc-framework-coretext +pyopengl==3.1.10 + # via + # dm-control + # mujoco +pyparsing==3.2.5 + # via + # dm-control + # matplotlib +pyquaternion==0.9.9 + # via reachy2-sdk +pyrealsense2-macosx==2.54.2 + # via lerobot +pyserial==3.5 + # via + # dynamixel-sdk + # feetech-servo-sdk + # lerobot +pytest==8.4.2 + # via + # bddl + # lerobot + # pytest-cov + # pytest-timeout + # teleop +pytest-cov==7.0.0 + # via lerobot +pytest-timeout==2.4.0 + # via lerobot +python-dateutil==2.9.0.post0 + # via + # matplotlib + # pandas +python-dotenv==1.1.1 + # via uvicorn +pytz==2025.2 + # via pandas +pyyaml==6.0.3 + # via + # accelerate + # datasets + # draccus + # hebi-py + # huggingface-hub + # jupytext + # omegaconf + # peft + # pre-commit + # pyngrok + # pyyaml-include + # timm + # transformers + # uvicorn + # wandb +pyyaml-include==1.4.1 + # via draccus +pyzmq==27.1.0 + # via + # lerobot + # meshcat +reachy2-sdk==1.0.14 + # via lerobot +reachy2-sdk-api==1.0.21 + # via reachy2-sdk +referencing==0.37.0 + # via + # jsonschema + # jsonschema-specifications +regex==2025.10.23 + # via + # diffusers + # transformers +requests==2.32.5 + # via + # datasets + # diffusers + # dm-control + # huggingface-hub + # teleop + # transformers + # wandb +rerun-sdk==0.26.1 + # via lerobot +rhoban-cmeel-jsoncpp==1.9.4.9 + # via placo +robomimic==0.2.0 + # via libero +robosuite==1.4.0 + # via libero +rpds-py==0.28.0 + # via + # jsonschema + # referencing +safetensors==0.6.2 + # via + # accelerate + # diffusers + # lerobot + # peft + # timm + # transformers +scikit-image==0.25.2 + # via + # gym-pusht + # lerobot +scipy==1.15.3 + # via + # dm-control + # metaworld + # robosuite + # scikit-image +sentry-sdk==2.42.1 + # via wandb +shapely==2.1.2 + # via gym-pusht +six==1.17.0 + # via + # pynput + # python-dateutil +smmap==5.0.2 + # via gitdb +sniffio==1.3.1 + # via anyio +stack-data==0.6.3 + # via ipython +starlette==0.48.0 + # via fastapi +sympy==1.14.0 + # via torch +teleop==0.1.2 + # via lerobot +tensorboard==2.20.0 + # via robomimic +tensorboard-data-server==0.7.2 + # via tensorboard +tensorboardx==2.6.4 + # via robomimic +termcolor==3.1.0 + # via + # lerobot + # robomimic +thop==0.1.1.post2209072238 + # via libero +tifffile==2025.5.10 + # via scikit-image +timm==1.0.20 + # via lerobot +tokenizers==0.22.1 + # via transformers +toml==0.10.2 + # via draccus +tomli==2.3.0 + # via + # cmeel + # coverage + # jupytext + # pytest +torch==2.7.1 + # via + # accelerate + # lerobot + # peft + # robomimic + # thop + # timm + # torchvision +torchcodec==0.5 + # via lerobot +torchvision==0.22.1 + # via + # lerobot + # robomimic + # timm +tornado==6.5.2 + # via meshcat +tqdm==4.67.1 + # via + # datasets + # dm-control + # huggingface-hub + # peft + # robomimic + # transformers +traitlets==5.14.3 + # via + # ipython + # jupyter-core + # matplotlib-inline + # nbformat +transformers==4.57.1 + # via + # lerobot + # libero + # peft +transforms3d==0.4.2 + # via teleop +typing-extensions==4.15.0 + # via + # aiosignal + # anyio + # etils + # exceptiongroup + # fastapi + # gymnasium + # huggingface-hub + # ipython + # multidict + # pydantic + # pydantic-core + # referencing + # rerun-sdk + # starlette + # torch + # typing-inspect + # typing-inspection + # uvicorn + # virtualenv + # wandb +typing-inspect==0.9.0 + # via draccus +typing-inspection==0.4.2 + # via pydantic +tzdata==2025.2 + # via pandas +u-msgpack-python==2.8.0 + # via meshcat +urllib3==2.5.0 + # via + # requests + # sentry-sdk +uvicorn[standard]==0.38.0 + # via teleop +uvloop==0.22.1 + # via uvicorn +virtualenv==20.35.3 + # via pre-commit +wandb==0.21.4 + # via + # lerobot + # libero +watchfiles==1.1.1 + # via uvicorn +wcwidth==0.2.14 + # via prompt-toolkit +websocket-client==1.9.0 + # via teleop +websockets==15.0.1 + # via uvicorn +werkzeug==3.1.3 + # via tensorboard +wrapt==2.0.0 + # via dm-tree +xxhash==3.6.0 + # via datasets +yarl==1.22.0 + # via aiohttp +zipp==3.23.0 + # via + # etils + # importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/requirements-ubuntu.txt b/requirements-ubuntu.txt new file mode 100644 index 0000000000000000000000000000000000000000..c168c651ce5ddce78f0f39c5483f853ffda29d23 --- /dev/null +++ b/requirements-ubuntu.txt @@ -0,0 +1,861 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --output-file=requirements-ubuntu.txt requirements.in +# +-e .[all] + # via -[all] +absl-py==2.3.1 + # via + # dm-control + # dm-env + # dm-tree + # labmaze + # mujoco + # tensorboard +accelerate==1.11.0 + # via + # lerobot + # peft +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.13.1 + # via fsspec +aiosignal==1.4.0 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +antlr4-python3-runtime==4.9.3 + # via + # hydra-core + # omegaconf +anyio==4.11.0 + # via + # starlette + # watchfiles +asttokens==3.0.0 + # via stack-data +async-timeout==5.0.1 + # via aiohttp +attrs==25.4.0 + # via + # aiohttp + # dm-tree + # jsonlines + # jsonschema + # referencing + # rerun-sdk +av==15.1.0 + # via lerobot +bddl==1.0.1 + # via libero +certifi==2025.10.5 + # via + # requests + # sentry-sdk +cffi==2.0.0 + # via pymunk +cfgv==3.4.0 + # via pre-commit +charset-normalizer==3.4.4 + # via requests +click==8.3.0 + # via + # uvicorn + # wandb +cloudpickle==3.1.1 + # via + # gymnasium + # libero +cmake==4.1.0 + # via lerobot +cmeel==0.57.3 + # via + # cmeel-assimp + # cmeel-boost + # cmeel-console-bridge + # cmeel-octomap + # cmeel-qhull + # cmeel-tinyxml2 + # cmeel-urdfdom + # cmeel-zlib + # coal-library + # eigenpy + # eiquadprog + # pin + # placo + # rhoban-cmeel-jsoncpp +cmeel-assimp==5.4.3.1 + # via coal-library +cmeel-boost==1.87.0.1 + # via + # coal-library + # eigenpy + # eiquadprog + # pin +cmeel-console-bridge==1.0.2.3 + # via cmeel-urdfdom +cmeel-octomap==1.10.0 + # via coal-library +cmeel-qhull==8.0.2.1 + # via coal-library +cmeel-tinyxml2==10.0.0 + # via cmeel-urdfdom +cmeel-urdfdom==4.0.1 + # via pin +cmeel-zlib==1.3.1 + # via cmeel-assimp +coal-library==3.0.1 + # via pin +contourpy==1.3.2 + # via matplotlib +coverage[toml]==7.11.0 + # via pytest-cov +cycler==0.12.1 + # via matplotlib +datasets==4.1.1 + # via lerobot +debugpy==1.8.17 + # via lerobot +decorator==5.2.1 + # via ipython +decord==0.6.0 + # via lerobot +deepdiff==8.6.1 + # via lerobot +diffusers==0.35.2 + # via lerobot +dill==0.4.0 + # via + # datasets + # multiprocess +distlib==0.4.0 + # via virtualenv +dm-control==1.0.34 + # via gym-aloha +dm-env==1.6 + # via dm-control +dm-tree==0.1.9 + # via + # dm-control + # dm-env + # lerobot +docopt==0.6.2 + # via num2words +draccus==0.10.0 + # via lerobot +dynamixel-sdk==3.8.4 + # via lerobot +easydict==1.13 + # via libero +egl-probe @ git+https://github.com/huggingface/egl_probe.git + # via + # libero + # robomimic +eigenpy==3.10.3 + # via coal-library +einops==0.8.1 + # via + # flash-attn + # lerobot + # libero +eiquadprog==1.2.9 + # via placo +etils[epath,epy]==1.13.0 + # via mujoco +evdev==1.9.2 + # via pynput +exceptiongroup==1.3.0 + # via + # anyio + # ipython + # pytest +executing==2.2.1 + # via stack-data +farama-notifications==0.0.4 + # via gymnasium +fastapi==0.119.1 + # via teleop +fastjsonschema==2.21.2 + # via nbformat +feetech-servo-sdk==1.0.0 + # via lerobot +filelock==3.20.0 + # via + # datasets + # diffusers + # huggingface-hub + # torch + # transformers + # virtualenv +flash-attn==2.8.3 + # via lerobot +fonttools==4.60.1 + # via matplotlib +frozenlist==1.8.0 + # via + # aiohttp + # aiosignal +fsspec[http]==2025.9.0 + # via + # datasets + # etils + # huggingface-hub + # torch +future==1.0.0 + # via libero +gitdb==4.0.12 + # via gitpython +gitpython==3.1.45 + # via wandb +glfw==2.10.0 + # via + # dm-control + # mujoco +grpcio==1.73.1 + # via + # grpcio-tools + # lerobot + # reachy2-sdk + # reachy2-sdk-api + # tensorboard +grpcio-tools==1.73.1 + # via + # lerobot + # reachy2-sdk-api +gym-aloha==0.1.3 + # via lerobot +gym-hil==0.1.13 + # via lerobot +gym-pusht==0.1.6 + # via lerobot +gymnasium==1.2.1 + # via + # gym-aloha + # gym-hil + # gym-pusht + # lerobot + # libero + # metaworld +h11==0.16.0 + # via uvicorn +h5py==3.15.1 + # via robomimic +hebi-py==2.11.0 + # via lerobot +hf-transfer==0.1.9 + # via huggingface-hub +hf-xet==1.1.10 + # via huggingface-hub +hidapi==0.14.0.post4 + # via + # gym-hil + # lerobot +httptools==0.7.1 + # via uvicorn +huggingface-hub[cli,hf-transfer]==0.35.3 + # via + # accelerate + # datasets + # diffusers + # lerobot + # peft + # timm + # tokenizers + # transformers +hydra-core==1.3.2 + # via libero +identify==2.6.15 + # via pre-commit +idna==3.11 + # via + # anyio + # requests + # yarl +imageio[ffmpeg]==2.37.0 + # via + # gym-aloha + # gym-hil + # lerobot + # metaworld + # robomimic + # scikit-image +imageio-ffmpeg==0.6.0 + # via + # imageio + # robomimic +importlib-metadata==8.7.0 + # via diffusers +importlib-resources==6.5.2 + # via etils +iniconfig==2.3.0 + # via pytest +inquirerpy==0.3.4 + # via huggingface-hub +ipython==8.37.0 + # via meshcat +ischedule==1.2.7 + # via placo +jedi==0.19.2 + # via ipython +jinja2==3.1.6 + # via torch +jsonlines==4.0.0 + # via lerobot +jsonschema==4.25.1 + # via nbformat +jsonschema-specifications==2025.9.1 + # via jsonschema +jupyter-core==5.9.1 + # via nbformat +jupytext==1.18.1 + # via bddl +kiwisolver==1.4.9 + # via matplotlib +labmaze==1.0.6 + # via dm-control +lazy-loader==0.4 + # via scikit-image +libero @ git+https://github.com/huggingface/lerobot-libero.git@main + # via lerobot +llvmlite==0.45.1 + # via numba +lxml==6.0.2 + # via dm-control +markdown==3.9 + # via tensorboard +markdown-it-py==4.0.0 + # via + # jupytext + # mdit-py-plugins +markupsafe==3.0.3 + # via + # jinja2 + # werkzeug +matplotlib==3.10.7 + # via + # lerobot + # libero +matplotlib-inline==0.2.1 + # via ipython +mdit-py-plugins==0.5.0 + # via jupytext +mdurl==0.1.2 + # via markdown-it-py +mergedeep==1.3.4 + # via draccus +meshcat==0.3.2 + # via placo +metaworld==3.0.0 + # via lerobot +mock-serial==0.0.1 + # via lerobot +mpmath==1.3.0 + # via sympy +mujoco==3.3.7 + # via + # dm-control + # gym-aloha + # gym-hil + # libero + # metaworld + # robosuite +multidict==6.7.0 + # via + # aiohttp + # yarl +multiprocess==0.70.16 + # via datasets +mypy-extensions==1.1.0 + # via typing-inspect +nbformat==5.10.4 + # via jupytext +networkx==3.4.2 + # via + # bddl + # scikit-image + # torch +ninja==1.13.0 + # via lerobot +nodeenv==1.9.1 + # via pre-commit +num2words==0.5.14 + # via lerobot +numba==0.62.1 + # via robosuite +numpy==2.2.6 + # via + # accelerate + # bddl + # cmeel-boost + # contourpy + # datasets + # decord + # diffusers + # dm-control + # dm-env + # dm-tree + # gymnasium + # h5py + # hebi-py + # imageio + # labmaze + # libero + # matplotlib + # meshcat + # metaworld + # mujoco + # numba + # opencv-python + # opencv-python-headless + # pandas + # peft + # pyquaternion + # reachy2-sdk + # rerun-sdk + # robomimic + # robosuite + # scikit-image + # scipy + # shapely + # teleop + # tensorboard + # tensorboardx + # tifffile + # torchvision + # transformers + # transforms3d +nvidia-cublas-cu12==12.6.4.1 + # via + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.6.80 + # via torch +nvidia-cuda-nvrtc-cu12==12.6.77 + # via torch +nvidia-cuda-runtime-cu12==12.6.77 + # via torch +nvidia-cudnn-cu12==9.5.1.17 + # via torch +nvidia-cufft-cu12==11.3.0.4 + # via torch +nvidia-cufile-cu12==1.11.1.6 + # via torch +nvidia-curand-cu12==10.3.7.77 + # via torch +nvidia-cusolver-cu12==11.7.1.2 + # via torch +nvidia-cusparse-cu12==12.5.4.2 + # via + # nvidia-cusolver-cu12 + # torch +nvidia-cusparselt-cu12==0.6.3 + # via torch +nvidia-nccl-cu12==2.26.2 + # via torch +nvidia-nvjitlink-cu12==12.6.85 + # via + # nvidia-cufft-cu12 + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 + # torch +nvidia-nvtx-cu12==12.6.77 + # via torch +omegaconf==2.3.0 + # via hydra-core +opencv-python==4.12.0.88 + # via + # gym-pusht + # libero + # reachy2-sdk + # robosuite +opencv-python-headless==4.12.0.88 + # via lerobot +orderly-set==5.5.0 + # via deepdiff +packaging==25.0 + # via + # accelerate + # datasets + # huggingface-hub + # hydra-core + # jupytext + # lazy-loader + # lerobot + # matplotlib + # peft + # pytest + # reachy2-sdk + # scikit-image + # tensorboard + # tensorboardx + # transformers + # wandb +pandas==2.3.3 + # via + # datasets + # lerobot +parso==0.8.5 + # via jedi +peft==0.17.1 + # via lerobot +pexpect==4.9.0 + # via ipython +pfzy==0.3.4 + # via inquirerpy +pillow==12.0.0 + # via + # diffusers + # imageio + # lerobot + # matplotlib + # meshcat + # rerun-sdk + # robosuite + # scikit-image + # tensorboard + # torchvision +pin==3.4.0 + # via placo +placo==0.9.14 + # via lerobot +platformdirs==4.5.0 + # via + # jupyter-core + # virtualenv + # wandb +pluggy==1.6.0 + # via + # pytest + # pytest-cov +pre-commit==4.3.0 + # via lerobot +prompt-toolkit==3.0.52 + # via + # inquirerpy + # ipython +propcache==0.4.1 + # via + # aiohttp + # yarl +protobuf==6.31.0 + # via + # dm-control + # grpcio-tools + # lerobot + # reachy2-sdk + # reachy2-sdk-api + # tensorboard + # tensorboardx + # wandb +psutil==7.1.1 + # via + # accelerate + # imageio + # peft + # robomimic +ptyprocess==0.7.0 + # via pexpect +pure-eval==0.2.3 + # via stack-data +pyarrow==21.0.0 + # via + # datasets + # rerun-sdk +pycparser==2.23 + # via cffi +pydantic==2.12.3 + # via + # fastapi + # wandb +pydantic-core==2.41.4 + # via pydantic +pygame==2.6.1 + # via + # gym-hil + # gym-pusht + # lerobot +pygments==2.19.2 + # via + # ipython + # pytest +pymunk==6.11.1 + # via + # gym-pusht + # lerobot +pyngrok==7.4.1 + # via meshcat +pynput==1.8.1 + # via + # gym-hil + # lerobot +pyopengl==3.1.10 + # via + # dm-control + # mujoco +pyparsing==3.2.5 + # via + # dm-control + # matplotlib +pyquaternion==0.9.9 + # via reachy2-sdk +pyrealsense2==2.56.5.9235 + # via lerobot +pyserial==3.5 + # via + # dynamixel-sdk + # feetech-servo-sdk + # lerobot +pytest==8.4.2 + # via + # bddl + # lerobot + # pytest-cov + # pytest-timeout + # teleop +pytest-cov==7.0.0 + # via lerobot +pytest-timeout==2.4.0 + # via lerobot +python-dateutil==2.9.0.post0 + # via + # matplotlib + # pandas +python-dotenv==1.1.1 + # via uvicorn +python-xlib==0.33 + # via pynput +pytz==2025.2 + # via pandas +pyyaml==6.0.3 + # via + # accelerate + # datasets + # draccus + # hebi-py + # huggingface-hub + # jupytext + # omegaconf + # peft + # pre-commit + # pyngrok + # pyyaml-include + # timm + # transformers + # uvicorn + # wandb +pyyaml-include==1.4.1 + # via draccus +pyzmq==27.1.0 + # via + # lerobot + # meshcat +reachy2-sdk==1.0.14 + # via lerobot +reachy2-sdk-api==1.0.21 + # via reachy2-sdk +referencing==0.37.0 + # via + # jsonschema + # jsonschema-specifications +regex==2025.10.23 + # via + # diffusers + # transformers +requests==2.32.5 + # via + # datasets + # diffusers + # dm-control + # huggingface-hub + # teleop + # transformers + # wandb +rerun-sdk==0.26.1 + # via lerobot +rhoban-cmeel-jsoncpp==1.9.4.9 + # via placo +robomimic==0.2.0 + # via libero +robosuite==1.4.0 + # via libero +rpds-py==0.28.0 + # via + # jsonschema + # referencing +safetensors==0.6.2 + # via + # accelerate + # diffusers + # lerobot + # peft + # timm + # transformers +scikit-image==0.25.2 + # via + # gym-pusht + # lerobot +scipy==1.15.3 + # via + # dm-control + # metaworld + # robosuite + # scikit-image +sentry-sdk==2.42.1 + # via wandb +shapely==2.1.2 + # via gym-pusht +six==1.17.0 + # via + # pynput + # python-dateutil + # python-xlib +smmap==5.0.2 + # via gitdb +sniffio==1.3.1 + # via anyio +stack-data==0.6.3 + # via ipython +starlette==0.48.0 + # via fastapi +sympy==1.14.0 + # via torch +teleop==0.1.2 + # via lerobot +tensorboard==2.20.0 + # via robomimic +tensorboard-data-server==0.7.2 + # via tensorboard +tensorboardx==2.6.4 + # via robomimic +termcolor==3.1.0 + # via + # lerobot + # robomimic +thop==0.1.1.post2209072238 + # via libero +tifffile==2025.5.10 + # via scikit-image +timm==1.0.20 + # via lerobot +tokenizers==0.22.1 + # via transformers +toml==0.10.2 + # via draccus +tomli==2.3.0 + # via + # cmeel + # coverage + # jupytext + # pytest +torch==2.7.1 + # via + # accelerate + # flash-attn + # lerobot + # peft + # robomimic + # thop + # timm + # torchvision +torchcodec==0.5 + # via lerobot +torchvision==0.22.1 + # via + # lerobot + # robomimic + # timm +tornado==6.5.2 + # via meshcat +tqdm==4.67.1 + # via + # datasets + # dm-control + # huggingface-hub + # peft + # robomimic + # transformers +traitlets==5.14.3 + # via + # ipython + # jupyter-core + # matplotlib-inline + # nbformat +transformers==4.57.1 + # via + # lerobot + # libero + # peft +transforms3d==0.4.2 + # via teleop +triton==3.3.1 + # via torch +typing-extensions==4.15.0 + # via + # aiosignal + # anyio + # etils + # exceptiongroup + # fastapi + # gymnasium + # huggingface-hub + # ipython + # multidict + # pydantic + # pydantic-core + # referencing + # rerun-sdk + # starlette + # torch + # typing-inspect + # typing-inspection + # uvicorn + # virtualenv + # wandb +typing-inspect==0.9.0 + # via draccus +typing-inspection==0.4.2 + # via pydantic +tzdata==2025.2 + # via pandas +u-msgpack-python==2.8.0 + # via meshcat +urllib3==2.5.0 + # via + # requests + # sentry-sdk +uvicorn[standard]==0.38.0 + # via teleop +uvloop==0.22.1 + # via uvicorn +virtualenv==20.35.3 + # via pre-commit +wandb==0.21.4 + # via + # lerobot + # libero +watchfiles==1.1.1 + # via uvicorn +wcwidth==0.2.14 + # via prompt-toolkit +websocket-client==1.9.0 + # via teleop +websockets==15.0.1 + # via uvicorn +werkzeug==3.1.3 + # via tensorboard +wrapt==2.0.0 + # via dm-tree +xxhash==3.6.0 + # via datasets +yarl==1.22.0 + # via aiohttp +zipp==3.23.0 + # via + # etils + # importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/requirements.in b/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..63c8e49d139aa5f75cd03ad1432e957f3895dc7d --- /dev/null +++ b/requirements.in @@ -0,0 +1,9 @@ +# requirements.in + +# requirements-macos.txt was generated on macOS and is platform-specific (macOS 26.0.1 25A362 arm64). +# Darwin MacBook-Pro.local 25.0.0 Darwin Kernel Version 25.0.0: Wed Sep 17 21:42:08 PDT 2025; root:xnu-12377.1.9~141/RELEASE_ARM64_T8132 arm64 + +# requirements-ubuntu.txt was generated on Linux and is platform-specific (Ubuntu 24.04.3 LTS x86_64). +# Linux mlerobot-linux 6.14.0-33-generic #33~24.04.1-Ubuntu SMP PREEMPT_DYNAMIC Fri Sep 19 17:02:30 UTC 2 x86_64 x86_64 x86_64 GNU/Linux + +-e .[all] diff --git a/src/lerobot.egg-info/PKG-INFO b/src/lerobot.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..f145b2a047cc64e672a594ed3dc40e20a9c82ec9 --- /dev/null +++ b/src/lerobot.egg-info/PKG-INFO @@ -0,0 +1,493 @@ +Metadata-Version: 2.4 +Name: lerobot +Version: 0.4.2 +Summary: 🤗 LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch +Author-email: Rémi Cadène , Simon Alibert , Alexander Soare , Quentin Gallouédec , Steven Palma , Pepijn Kooijmans , Michel Aractingi , Adil Zouitine , Dana Aubakirova , Caroline Pascal , Martino Russi , Thomas Wolf +License: Apache-2.0 +Project-URL: homepage, https://huggingface.co/lerobot +Project-URL: documentation, https://huggingface.co/docs/lerobot/index +Project-URL: source, https://github.com/huggingface/lerobot +Project-URL: issues, https://github.com/huggingface/lerobot/issues +Project-URL: discord, https://discord.gg/s3KuuzsPFb +Keywords: lerobot,huggingface,robotics,machine learning,artificial intelligence +Classifier: Development Status :: 3 - Alpha +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Software Development :: Build Tools +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.10 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: datasets<4.2.0,>=4.0.0 +Requires-Dist: diffusers<0.36.0,>=0.27.2 +Requires-Dist: huggingface-hub[cli,hf-transfer]<0.36.0,>=0.34.2 +Requires-Dist: accelerate<2.0.0,>=1.10.0 +Requires-Dist: setuptools<81.0.0,>=71.0.0 +Requires-Dist: cmake<4.2.0,>=3.29.0.1 +Requires-Dist: einops<0.9.0,>=0.8.0 +Requires-Dist: opencv-python-headless<4.13.0,>=4.9.0 +Requires-Dist: av<16.0.0,>=15.0.0 +Requires-Dist: jsonlines<5.0.0,>=4.0.0 +Requires-Dist: packaging<26.0,>=24.2 +Requires-Dist: pynput<1.9.0,>=1.7.7 +Requires-Dist: pyserial<4.0,>=3.5 +Requires-Dist: wandb<0.22.0,>=0.20.0 +Requires-Dist: torch<2.8.0,>=2.2.1 +Requires-Dist: torchcodec<0.6.0,>=0.2.1; sys_platform != "win32" and (sys_platform != "linux" or (platform_machine != "aarch64" and platform_machine != "arm64" and platform_machine != "armv7l")) and (sys_platform != "darwin" or platform_machine != "x86_64") +Requires-Dist: torchvision<0.23.0,>=0.21.0 +Requires-Dist: draccus==0.10.0 +Requires-Dist: gymnasium<2.0.0,>=1.1.1 +Requires-Dist: rerun-sdk<0.27.0,>=0.24.0 +Requires-Dist: deepdiff<9.0.0,>=7.0.1 +Requires-Dist: imageio[ffmpeg]<3.0.0,>=2.34.0 +Requires-Dist: termcolor<4.0.0,>=2.4.0 +Provides-Extra: pygame-dep +Requires-Dist: pygame<2.7.0,>=2.5.1; extra == "pygame-dep" +Provides-Extra: placo-dep +Requires-Dist: placo<0.10.0,>=0.9.6; extra == "placo-dep" +Provides-Extra: transformers-dep +Requires-Dist: transformers<5.0.0,>=4.53.0; extra == "transformers-dep" +Provides-Extra: grpcio-dep +Requires-Dist: grpcio==1.73.1; extra == "grpcio-dep" +Requires-Dist: protobuf==6.31.0; extra == "grpcio-dep" +Provides-Extra: feetech +Requires-Dist: feetech-servo-sdk<2.0.0,>=1.0.0; extra == "feetech" +Provides-Extra: dynamixel +Requires-Dist: dynamixel-sdk<3.9.0,>=3.7.31; extra == "dynamixel" +Provides-Extra: gamepad +Requires-Dist: lerobot[pygame-dep]; extra == "gamepad" +Requires-Dist: hidapi<0.15.0,>=0.14.0; extra == "gamepad" +Provides-Extra: hopejr +Requires-Dist: lerobot[feetech]; extra == "hopejr" +Requires-Dist: lerobot[pygame-dep]; extra == "hopejr" +Provides-Extra: lekiwi +Requires-Dist: lerobot[feetech]; extra == "lekiwi" +Requires-Dist: pyzmq<28.0.0,>=26.2.1; extra == "lekiwi" +Provides-Extra: reachy2 +Requires-Dist: reachy2_sdk<1.1.0,>=1.0.14; extra == "reachy2" +Provides-Extra: kinematics +Requires-Dist: lerobot[placo-dep]; extra == "kinematics" +Provides-Extra: intelrealsense +Requires-Dist: pyrealsense2<2.57.0,>=2.55.1.6486; sys_platform != "darwin" and extra == "intelrealsense" +Requires-Dist: pyrealsense2-macosx<2.55.0,>=2.54; sys_platform == "darwin" and extra == "intelrealsense" +Provides-Extra: phone +Requires-Dist: hebi-py<2.12.0,>=2.8.0; extra == "phone" +Requires-Dist: teleop<0.2.0,>=0.1.0; extra == "phone" +Requires-Dist: fastapi<1.0; extra == "phone" +Provides-Extra: pi +Requires-Dist: transformers@ git+https://github.com/huggingface/transformers.git@fix/lerobot_openpi ; extra == "pi" +Provides-Extra: smolvla +Requires-Dist: lerobot[transformers-dep]; extra == "smolvla" +Requires-Dist: num2words<0.6.0,>=0.5.14; extra == "smolvla" +Requires-Dist: accelerate<2.0.0,>=1.7.0; extra == "smolvla" +Requires-Dist: safetensors<1.0.0,>=0.4.3; extra == "smolvla" +Provides-Extra: groot +Requires-Dist: lerobot[transformers-dep]; extra == "groot" +Requires-Dist: peft<1.0.0,>=0.13.0; extra == "groot" +Requires-Dist: dm-tree<1.0.0,>=0.1.8; extra == "groot" +Requires-Dist: timm<1.1.0,>=1.0.0; extra == "groot" +Requires-Dist: safetensors<1.0.0,>=0.4.3; extra == "groot" +Requires-Dist: Pillow<13.0.0,>=10.0.0; extra == "groot" +Requires-Dist: decord<1.0.0,>=0.6.0; (platform_machine == "AMD64" or platform_machine == "x86_64") and extra == "groot" +Requires-Dist: ninja<2.0.0,>=1.11.1; extra == "groot" +Requires-Dist: flash-attn<3.0.0,>=2.5.9; sys_platform != "darwin" and extra == "groot" +Provides-Extra: hilserl +Requires-Dist: lerobot[transformers-dep]; extra == "hilserl" +Requires-Dist: gym-hil<0.2.0,>=0.1.13; extra == "hilserl" +Requires-Dist: lerobot[grpcio-dep]; extra == "hilserl" +Requires-Dist: lerobot[placo-dep]; extra == "hilserl" +Provides-Extra: async +Requires-Dist: lerobot[grpcio-dep]; extra == "async" +Requires-Dist: matplotlib<4.0.0,>=3.10.3; extra == "async" +Provides-Extra: dev +Requires-Dist: pre-commit<5.0.0,>=3.7.0; extra == "dev" +Requires-Dist: debugpy<1.9.0,>=1.8.1; extra == "dev" +Requires-Dist: lerobot[grpcio-dep]; extra == "dev" +Requires-Dist: grpcio-tools==1.73.1; extra == "dev" +Provides-Extra: test +Requires-Dist: pytest<9.0.0,>=8.1.0; extra == "test" +Requires-Dist: pytest-timeout<3.0.0,>=2.4.0; extra == "test" +Requires-Dist: pytest-cov<8.0.0,>=5.0.0; extra == "test" +Requires-Dist: mock-serial<0.1.0,>=0.0.1; sys_platform != "win32" and extra == "test" +Provides-Extra: video-benchmark +Requires-Dist: scikit-image<0.26.0,>=0.23.2; extra == "video-benchmark" +Requires-Dist: pandas<2.4.0,>=2.2.2; extra == "video-benchmark" +Provides-Extra: aloha +Requires-Dist: gym-aloha<0.2.0,>=0.1.2; extra == "aloha" +Provides-Extra: pusht +Requires-Dist: gym-pusht<0.2.0,>=0.1.5; extra == "pusht" +Requires-Dist: pymunk<7.0.0,>=6.6.0; extra == "pusht" +Provides-Extra: libero +Requires-Dist: lerobot[transformers-dep]; extra == "libero" +Requires-Dist: hf-libero<0.2.0,>=0.1.3; extra == "libero" +Provides-Extra: metaworld +Requires-Dist: metaworld==3.0.0; extra == "metaworld" +Provides-Extra: all +Requires-Dist: lerobot[dynamixel]; extra == "all" +Requires-Dist: lerobot[gamepad]; extra == "all" +Requires-Dist: lerobot[hopejr]; extra == "all" +Requires-Dist: lerobot[lekiwi]; extra == "all" +Requires-Dist: lerobot[reachy2]; extra == "all" +Requires-Dist: lerobot[kinematics]; extra == "all" +Requires-Dist: lerobot[intelrealsense]; extra == "all" +Requires-Dist: lerobot[pi]; extra == "all" +Requires-Dist: lerobot[smolvla]; extra == "all" +Requires-Dist: lerobot[hilserl]; extra == "all" +Requires-Dist: lerobot[async]; extra == "all" +Requires-Dist: lerobot[dev]; extra == "all" +Requires-Dist: lerobot[test]; extra == "all" +Requires-Dist: lerobot[video_benchmark]; extra == "all" +Requires-Dist: lerobot[aloha]; extra == "all" +Requires-Dist: lerobot[pusht]; extra == "all" +Requires-Dist: lerobot[phone]; extra == "all" +Requires-Dist: lerobot[libero]; extra == "all" +Requires-Dist: lerobot[metaworld]; extra == "all" +Dynamic: license-file + +

+ LeRobot, Hugging Face Robotics Library +
+
+

+ +
+ +[![Tests](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml/badge.svg?branch=main)](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml?query=branch%3Amain) +[![Python versions](https://img.shields.io/pypi/pyversions/lerobot)](https://www.python.org/downloads/) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/huggingface/lerobot/blob/main/LICENSE) +[![Status](https://img.shields.io/pypi/status/lerobot)](https://pypi.org/project/lerobot/) +[![Version](https://img.shields.io/pypi/v/lerobot)](https://pypi.org/project/lerobot/) +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.1-ff69b4.svg)](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md) +[![Discord](https://dcbadge.vercel.app/api/server/C5P34WJ68S?style=flat)](https://discord.gg/s3KuuzsPFb) + + + +
+ +

+

+ Build Your Own HopeJR Robot!

+

+ +
+ HopeJR robot + +

Meet HopeJR – A humanoid robot arm and hand for dexterous manipulation!

+

Control it with exoskeletons and gloves for precise hand movements.

+

Perfect for advanced manipulation tasks! 🤖

+ +

+ See the full HopeJR tutorial here.

+
+ +
+ +

+

+ Build Your Own SO-101 Robot!

+

+ +
+ + + + + +
SO-101 follower armSO-101 leader arm
+ +

Meet the updated SO100, the SO-101 – Just €114 per arm!

+

Train it in minutes with a few simple moves on your laptop.

+

Then sit back and watch your creation act autonomously! 🤯

+ +

+ See the full SO-101 tutorial here.

+ +

Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!

+

Check out the LeKiwi tutorial and bring your robot to life on wheels.

+ + LeKiwi mobile robot +
+ +
+ +

+

LeRobot: State-of-the-art AI for real-world robotics

+

+ +--- + +🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models. + +🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning. + +🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulation environments to get started without assembling a robot. In the coming weeks, the plan is to add more and more support for real-world robotics on the most affordable and capable robots out there. + +🤗 LeRobot hosts pretrained models and datasets on this Hugging Face community page: [huggingface.co/lerobot](https://huggingface.co/lerobot) + +#### Examples of pretrained models on simulation environments + + + + + + + + + + + + +
ACT policy on ALOHA envTDMPC policy on SimXArm envDiffusion policy on PushT env
ACT policy on ALOHA envTDMPC policy on SimXArm envDiffusion policy on PushT env
+ +## Installation + +LeRobot works with Python 3.10+ and PyTorch 2.2+. + +### Environment Setup + +Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniforge`](https://conda-forge.org/download/): + +```bash +conda create -y -n lerobot python=3.10 +conda activate lerobot +``` + +When using `conda`, install `ffmpeg` in your environment: + +```bash +conda install ffmpeg -c conda-forge +``` + +> **NOTE:** This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can: +> +> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using: +> +> ```bash +> conda install ffmpeg=7.1.1 -c conda-forge +> ``` +> +> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`. + +### Install LeRobot 🤗 + +#### From Source + +First, clone the repository and navigate into the directory: + +```bash +git clone https://github.com/huggingface/lerobot.git +cd lerobot +``` + +Then, install the library in editable mode. This is useful if you plan to contribute to the code. + +```bash +pip install -e . +``` + +> **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run: +> `sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg) + +For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras: + +- [aloha](https://github.com/huggingface/gym-aloha) +- [xarm](https://github.com/huggingface/gym-xarm) +- [pusht](https://github.com/huggingface/gym-pusht) + +For instance, to install 🤗 LeRobot with aloha and pusht, use: + +```bash +pip install -e ".[aloha, pusht]" +``` + +### Installation from PyPI + +**Core Library:** +Install the base package with: + +```bash +pip install lerobot +``` + +_This installs only the default dependencies._ + +**Extra Features:** +To install additional functionality, use one of the following: + +```bash +pip install 'lerobot[all]' # All available features +pip install 'lerobot[aloha,pusht]' # Specific features (Aloha & Pusht) +pip install 'lerobot[feetech]' # Feetech motor support +``` + +_Replace `[...]` with your desired features._ + +**Available Tags:** +For a full list of optional dependencies, see: +https://pypi.org/project/lerobot/ + +> [!NOTE] +> For lerobot 0.4.0, if you want to install pi tags, you will have to do: `pip install "lerobot[pi]@git+https://github.com/huggingface/lerobot.git"`. +> +> This will be solved in the next patch release + +### Weights & Biases + +To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with + +```bash +wandb login +``` + +(note: you will also need to enable WandB in the configuration. See below.) + +### Visualize datasets + +Check out [example 1](https://github.com/huggingface/lerobot/blob/main/examples/dataset/load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub. + +You can also locally visualize episodes from a dataset on the hub by executing our script from the command line: + +```bash +lerobot-dataset-viz \ + --repo-id lerobot/pusht \ + --episode-index 0 +``` + +or from a dataset in a local folder with the `root` option and the `--mode local` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`) + +```bash +lerobot-dataset-viz \ + --repo-id lerobot/pusht \ + --root ./my_local_data_dir \ + --mode local \ + --episode-index 0 +``` + +It will open `rerun.io` and display the camera streams, robot states and actions, like this: + +https://github-production-user-asset-6210df.s3.amazonaws.com/4681518/328035972-fd46b787-b532-47e2-bb6f-fd536a55a7ed.mov?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240505T172924Z&X-Amz-Expires=300&X-Amz-Signature=d680b26c532eeaf80740f08af3320d22ad0b8a4e4da1bcc4f33142c15b509eda&X-Amz-SignedHeaders=host&actor_id=24889239&key_id=0&repo_id=748713144 + +Our script can also visualize datasets stored on a distant server. See `lerobot-dataset-viz --help` for more instructions. + +### The `LeRobotDataset` format + +A dataset in `LeRobotDataset` format is very simple to use. It can be loaded from a repository on the Hugging Face hub or a local folder simply with e.g. `dataset = LeRobotDataset("lerobot/aloha_static_coffee")` and can be indexed into like any Hugging Face and PyTorch dataset. For instance `dataset[0]` will retrieve a single temporal frame from the dataset containing observation(s) and an action as PyTorch tensors ready to be fed to a model. + +A specificity of `LeRobotDataset` is that, rather than retrieving a single frame by its index, we can retrieve several frames based on their temporal relationship with the indexed frame, by setting `delta_timestamps` to a list of relative times with respect to the indexed frame. For example, with `delta_timestamps = {"observation.image": [-1, -0.5, -0.2, 0]}` one can retrieve, for a given index, 4 frames: 3 "previous" frames 1 second, 0.5 seconds, and 0.2 seconds before the indexed frame, and the indexed frame itself (corresponding to the 0 entry). See example [1_load_lerobot_dataset.py](https://github.com/huggingface/lerobot/blob/main/examples/dataset/load_lerobot_dataset.py) for more details on `delta_timestamps`. + +Under the hood, the `LeRobotDataset` format makes use of several ways to serialize data which can be useful to understand if you plan to work more closely with this format. We tried to make a flexible yet simple dataset format that would cover most type of features and specificities present in reinforcement learning and robotics, in simulation and in real-world, with a focus on cameras and robot states but easily extended to other types of sensory inputs as long as they can be represented by a tensor. + +Here are the important details and internal structure organization of a typical `LeRobotDataset` instantiated with `dataset = LeRobotDataset("lerobot/aloha_static_coffee")`. The exact features will change from dataset to dataset but not the main aspects: + +``` +dataset attributes: + ├ hf_dataset: a Hugging Face dataset (backed by Arrow/parquet). Typical features example: + │ ├ observation.images.cam_high (VideoFrame): + │ │ VideoFrame = {'path': path to a mp4 video, 'timestamp' (float32): timestamp in the video} + │ ├ observation.state (list of float32): position of an arm joints (for instance) + │ ... (more observations) + │ ├ action (list of float32): goal position of an arm joints (for instance) + │ ├ episode_index (int64): index of the episode for this sample + │ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode + │ ├ timestamp (float32): timestamp in the episode + │ ├ next.done (bool): indicates the end of an episode ; True for the last frame in each episode + │ └ index (int64): general index in the whole dataset + ├ meta: a LeRobotDatasetMetadata object containing: + │ ├ info: a dictionary of metadata on the dataset + │ │ ├ codebase_version (str): this is to keep track of the codebase version the dataset was created with + │ │ ├ fps (int): frame per second the dataset is recorded/synchronized to + │ │ ├ features (dict): all features contained in the dataset with their shapes and types + │ │ ├ total_episodes (int): total number of episodes in the dataset + │ │ ├ total_frames (int): total number of frames in the dataset + │ │ ├ robot_type (str): robot type used for recording + │ │ ├ data_path (str): formattable string for the parquet files + │ │ └ video_path (str): formattable string for the video files (if using videos) + │ ├ episodes: a DataFrame containing episode metadata with columns: + │ │ ├ episode_index (int): index of the episode + │ │ ├ tasks (list): list of tasks for this episode + │ │ ├ length (int): number of frames in this episode + │ │ ├ dataset_from_index (int): start index of this episode in the dataset + │ │ └ dataset_to_index (int): end index of this episode in the dataset + │ ├ stats: a dictionary of statistics (max, mean, min, std) for each feature in the dataset, for instance + │ │ ├ observation.images.front_cam: {'max': tensor with same number of dimensions (e.g. `(c, 1, 1)` for images, `(c,)` for states), etc.} + │ │ └ ... + │ └ tasks: a DataFrame containing task information with task names as index and task_index as values + ├ root (Path): local directory where the dataset is stored + ├ image_transforms (Callable): optional image transformations to apply to visual modalities + └ delta_timestamps (dict): optional delta timestamps for temporal queries +``` + +A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely: + +- hf_dataset stored using Hugging Face datasets library serialization to parquet +- videos are stored in mp4 format to save space +- metadata are stored in plain json/jsonl files + +Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location. + +#### Reproduce state-of-the-art (SOTA) + +We provide some pretrained policies on our [hub page](https://huggingface.co/lerobot) that can achieve state-of-the-art performances. +You can reproduce their training by loading the config from their run. Simply running: + +```bash +lerobot-train --config_path=lerobot/diffusion_pusht +``` + +reproduces SOTA results for Diffusion Policy on the PushT task. + +## Contribute + +If you would like to contribute to 🤗 LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md). + +### Add a pretrained policy + +Once you have trained a policy you may upload it to the Hugging Face hub using a hub id that looks like `${hf_user}/${repo_name}` (e.g. [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)). + +You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain: + +- `config.json`: A serialized version of the policy configuration (following the policy's dataclass config). +- `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format. +- `train_config.json`: A consolidated configuration containing all parameters used for training. The policy configuration should match `config.json` exactly. This is useful for anyone who wants to evaluate your policy or for reproducibility. + +To upload these to the hub, run the following: + +```bash +huggingface-cli upload ${hf_user}/${repo_name} path/to/pretrained_model +``` + +See [lerobot_eval.py](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/lerobot_eval.py) for an example of how other people may use your policy. + +### Acknowledgment + +- The LeRobot team 🤗 for building SmolVLA [Paper](https://arxiv.org/abs/2506.01844), [Blog](https://huggingface.co/blog/smolvla). +- Thanks to Tony Zhao, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io). +- Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io). +- Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM). +- Thanks to Antonio Loquercio and Ashish Kumar for their early support. +- Thanks to [Seungjae (Jay) Lee](https://sjlee.cc/), [Mahi Shafiullah](https://mahis.life/) and colleagues for open sourcing [VQ-BeT](https://sjlee.cc/vq-bet/) policy and helping us adapt the codebase to our repository. The policy is adapted from [VQ-BeT repo](https://github.com/jayLEE0301/vq_bet_official). + +## Citation + +If you want, you can cite this work with: + +```bibtex +@misc{cadene2024lerobot, + author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Palma, Steven and Kooijmans, Pepijn and Aractingi, Michel and Shukor, Mustafa and Aubakirova, Dana and Russi, Martino and Capuano, Francesco and Pascal, Caroline and Choghari, Jade and Moss, Jess and Wolf, Thomas}, + title = {LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch}, + howpublished = "\url{https://github.com/huggingface/lerobot}", + year = {2024} +} +``` + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=huggingface/lerobot&type=Timeline)](https://star-history.com/#huggingface/lerobot&Timeline) diff --git a/src/lerobot.egg-info/SOURCES.txt b/src/lerobot.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..05187dcc257b155a838b26f047fcd18c2e25ab45 --- /dev/null +++ b/src/lerobot.egg-info/SOURCES.txt @@ -0,0 +1,258 @@ +LICENSE +MANIFEST.in +README.md +pyproject.toml +src/lerobot/__init__.py +src/lerobot/__version__.py +src/lerobot.egg-info/PKG-INFO +src/lerobot.egg-info/SOURCES.txt +src/lerobot.egg-info/dependency_links.txt +src/lerobot.egg-info/entry_points.txt +src/lerobot.egg-info/requires.txt +src/lerobot.egg-info/top_level.txt +src/lerobot/async_inference/configs.py +src/lerobot/async_inference/constants.py +src/lerobot/async_inference/helpers.py +src/lerobot/async_inference/policy_server.py +src/lerobot/async_inference/robot_client.py +src/lerobot/cameras/__init__.py +src/lerobot/cameras/camera.py +src/lerobot/cameras/configs.py +src/lerobot/cameras/utils.py +src/lerobot/cameras/opencv/__init__.py +src/lerobot/cameras/opencv/camera_opencv.py +src/lerobot/cameras/opencv/configuration_opencv.py +src/lerobot/cameras/reachy2_camera/__init__.py +src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py +src/lerobot/cameras/reachy2_camera/reachy2_camera.py +src/lerobot/cameras/realsense/__init__.py +src/lerobot/cameras/realsense/camera_realsense.py +src/lerobot/cameras/realsense/configuration_realsense.py +src/lerobot/configs/default.py +src/lerobot/configs/eval.py +src/lerobot/configs/parser.py +src/lerobot/configs/policies.py +src/lerobot/configs/train.py +src/lerobot/configs/types.py +src/lerobot/datasets/aggregate.py +src/lerobot/datasets/backward_compatibility.py +src/lerobot/datasets/card_template.md +src/lerobot/datasets/compute_stats.py +src/lerobot/datasets/dataset_tools.py +src/lerobot/datasets/factory.py +src/lerobot/datasets/image_writer.py +src/lerobot/datasets/lerobot_dataset.py +src/lerobot/datasets/online_buffer.py +src/lerobot/datasets/pipeline_features.py +src/lerobot/datasets/sampler.py +src/lerobot/datasets/streaming_dataset.py +src/lerobot/datasets/transforms.py +src/lerobot/datasets/utils.py +src/lerobot/datasets/video_utils.py +src/lerobot/datasets/push_dataset_to_hub/utils.py +src/lerobot/datasets/v30/augment_dataset_quantile_stats.py +src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py +src/lerobot/envs/__init__.py +src/lerobot/envs/configs.py +src/lerobot/envs/factory.py +src/lerobot/envs/libero.py +src/lerobot/envs/metaworld.py +src/lerobot/envs/utils.py +src/lerobot/model/kinematics.py +src/lerobot/motors/__init__.py +src/lerobot/motors/calibration_gui.py +src/lerobot/motors/encoding_utils.py +src/lerobot/motors/motors_bus.py +src/lerobot/motors/dynamixel/__init__.py +src/lerobot/motors/dynamixel/dynamixel.py +src/lerobot/motors/dynamixel/tables.py +src/lerobot/motors/feetech/__init__.py +src/lerobot/motors/feetech/feetech.py +src/lerobot/motors/feetech/tables.py +src/lerobot/optim/__init__.py +src/lerobot/optim/factory.py +src/lerobot/optim/optimizers.py +src/lerobot/optim/schedulers.py +src/lerobot/policies/__init__.py +src/lerobot/policies/factory.py +src/lerobot/policies/pretrained.py +src/lerobot/policies/utils.py +src/lerobot/policies/act/configuration_act.py +src/lerobot/policies/act/modeling_act.py +src/lerobot/policies/act/processor_act.py +src/lerobot/policies/diffusion/configuration_diffusion.py +src/lerobot/policies/diffusion/modeling_diffusion.py +src/lerobot/policies/diffusion/processor_diffusion.py +src/lerobot/policies/groot/__init__.py +src/lerobot/policies/groot/configuration_groot.py +src/lerobot/policies/groot/groot_n1.py +src/lerobot/policies/groot/modeling_groot.py +src/lerobot/policies/groot/processor_groot.py +src/lerobot/policies/groot/utils.py +src/lerobot/policies/groot/action_head/__init__.py +src/lerobot/policies/groot/action_head/action_encoder.py +src/lerobot/policies/groot/action_head/cross_attention_dit.py +src/lerobot/policies/groot/action_head/flow_matching_action_head.py +src/lerobot/policies/groot/eagle2_hg_model/configuration_eagle2_5_vl.py +src/lerobot/policies/groot/eagle2_hg_model/image_processing_eagle2_5_vl_fast.py +src/lerobot/policies/groot/eagle2_hg_model/modeling_eagle2_5_vl.py +src/lerobot/policies/groot/eagle2_hg_model/processing_eagle2_5_vl.py +src/lerobot/policies/pi0/__init__.py +src/lerobot/policies/pi0/configuration_pi0.py +src/lerobot/policies/pi0/modeling_pi0.py +src/lerobot/policies/pi0/processor_pi0.py +src/lerobot/policies/pi05/__init__.py +src/lerobot/policies/pi05/configuration_pi05.py +src/lerobot/policies/pi05/modeling_pi05.py +src/lerobot/policies/pi05/processor_pi05.py +src/lerobot/policies/rtc/action_queue.py +src/lerobot/policies/rtc/configuration_rtc.py +src/lerobot/policies/rtc/debug_tracker.py +src/lerobot/policies/rtc/debug_visualizer.py +src/lerobot/policies/rtc/latency_tracker.py +src/lerobot/policies/rtc/modeling_rtc.py +src/lerobot/policies/sac/configuration_sac.py +src/lerobot/policies/sac/modeling_sac.py +src/lerobot/policies/sac/processor_sac.py +src/lerobot/policies/sac/reward_model/configuration_classifier.py +src/lerobot/policies/sac/reward_model/modeling_classifier.py +src/lerobot/policies/sac/reward_model/processor_classifier.py +src/lerobot/policies/smolvla/configuration_smolvla.py +src/lerobot/policies/smolvla/modeling_smolvla.py +src/lerobot/policies/smolvla/processor_smolvla.py +src/lerobot/policies/smolvla/smolvlm_with_expert.py +src/lerobot/policies/tdmpc/configuration_tdmpc.py +src/lerobot/policies/tdmpc/modeling_tdmpc.py +src/lerobot/policies/tdmpc/processor_tdmpc.py +src/lerobot/policies/vqbet/configuration_vqbet.py +src/lerobot/policies/vqbet/modeling_vqbet.py +src/lerobot/policies/vqbet/processor_vqbet.py +src/lerobot/policies/vqbet/vqbet_utils.py +src/lerobot/processor/__init__.py +src/lerobot/processor/batch_processor.py +src/lerobot/processor/converters.py +src/lerobot/processor/core.py +src/lerobot/processor/delta_action_processor.py +src/lerobot/processor/device_processor.py +src/lerobot/processor/env_processor.py +src/lerobot/processor/factory.py +src/lerobot/processor/gym_action_processor.py +src/lerobot/processor/hil_processor.py +src/lerobot/processor/joint_observations_processor.py +src/lerobot/processor/migrate_policy_normalization.py +src/lerobot/processor/normalize_processor.py +src/lerobot/processor/observation_processor.py +src/lerobot/processor/pipeline.py +src/lerobot/processor/policy_robot_bridge.py +src/lerobot/processor/rename_processor.py +src/lerobot/processor/tokenizer_processor.py +src/lerobot/rl/actor.py +src/lerobot/rl/buffer.py +src/lerobot/rl/crop_dataset_roi.py +src/lerobot/rl/eval_policy.py +src/lerobot/rl/gym_manipulator.py +src/lerobot/rl/learner.py +src/lerobot/rl/learner_service.py +src/lerobot/rl/process.py +src/lerobot/rl/queue.py +src/lerobot/rl/wandb_utils.py +src/lerobot/robots/__init__.py +src/lerobot/robots/config.py +src/lerobot/robots/robot.py +src/lerobot/robots/utils.py +src/lerobot/robots/bi_so100_follower/__init__.py +src/lerobot/robots/bi_so100_follower/bi_so100_follower.py +src/lerobot/robots/bi_so100_follower/config_bi_so100_follower.py +src/lerobot/robots/hope_jr/__init__.py +src/lerobot/robots/hope_jr/config_hope_jr.py +src/lerobot/robots/hope_jr/hope_jr_arm.py +src/lerobot/robots/hope_jr/hope_jr_hand.py +src/lerobot/robots/koch_follower/__init__.py +src/lerobot/robots/koch_follower/config_koch_follower.py +src/lerobot/robots/koch_follower/koch_follower.py +src/lerobot/robots/lekiwi/__init__.py +src/lerobot/robots/lekiwi/config_lekiwi.py +src/lerobot/robots/lekiwi/lekiwi.py +src/lerobot/robots/lekiwi/lekiwi_client.py +src/lerobot/robots/lekiwi/lekiwi_host.py +src/lerobot/robots/reachy2/__init__.py +src/lerobot/robots/reachy2/configuration_reachy2.py +src/lerobot/robots/reachy2/robot_reachy2.py +src/lerobot/robots/so100_follower/__init__.py +src/lerobot/robots/so100_follower/config_so100_follower.py +src/lerobot/robots/so100_follower/robot_kinematic_processor.py +src/lerobot/robots/so100_follower/so100_follower.py +src/lerobot/robots/so101_follower/__init__.py +src/lerobot/robots/so101_follower/config_so101_follower.py +src/lerobot/robots/so101_follower/so101_follower.py +src/lerobot/scripts/PruebaDeCamara2.py +src/lerobot/scripts/lerobot_calibrate.py +src/lerobot/scripts/lerobot_dataset_viz.py +src/lerobot/scripts/lerobot_edit_dataset.py +src/lerobot/scripts/lerobot_eval.py +src/lerobot/scripts/lerobot_find_cameras.py +src/lerobot/scripts/lerobot_find_joint_limits.py +src/lerobot/scripts/lerobot_find_port.py +src/lerobot/scripts/lerobot_imgtransform_viz.py +src/lerobot/scripts/lerobot_info.py +src/lerobot/scripts/lerobot_record.py +src/lerobot/scripts/lerobot_replay.py +src/lerobot/scripts/lerobot_setup_motors.py +src/lerobot/scripts/lerobot_teleoperate.py +src/lerobot/scripts/lerobot_train.py +src/lerobot/scripts/test_camara.py +src/lerobot/teleoperators/__init__.py +src/lerobot/teleoperators/config.py +src/lerobot/teleoperators/teleoperator.py +src/lerobot/teleoperators/utils.py +src/lerobot/teleoperators/bi_so100_leader/__init__.py +src/lerobot/teleoperators/bi_so100_leader/bi_so100_leader.py +src/lerobot/teleoperators/bi_so100_leader/config_bi_so100_leader.py +src/lerobot/teleoperators/gamepad/__init__.py +src/lerobot/teleoperators/gamepad/configuration_gamepad.py +src/lerobot/teleoperators/gamepad/gamepad_utils.py +src/lerobot/teleoperators/gamepad/teleop_gamepad.py +src/lerobot/teleoperators/homunculus/__init__.py +src/lerobot/teleoperators/homunculus/config_homunculus.py +src/lerobot/teleoperators/homunculus/homunculus_arm.py +src/lerobot/teleoperators/homunculus/homunculus_glove.py +src/lerobot/teleoperators/homunculus/joints_translation.py +src/lerobot/teleoperators/keyboard/__init__.py +src/lerobot/teleoperators/keyboard/configuration_keyboard.py +src/lerobot/teleoperators/keyboard/teleop_keyboard.py +src/lerobot/teleoperators/koch_leader/__init__.py +src/lerobot/teleoperators/koch_leader/config_koch_leader.py +src/lerobot/teleoperators/koch_leader/koch_leader.py +src/lerobot/teleoperators/phone/__init__.py +src/lerobot/teleoperators/phone/config_phone.py +src/lerobot/teleoperators/phone/phone_processor.py +src/lerobot/teleoperators/phone/teleop_phone.py +src/lerobot/teleoperators/reachy2_teleoperator/__init__.py +src/lerobot/teleoperators/reachy2_teleoperator/config_reachy2_teleoperator.py +src/lerobot/teleoperators/reachy2_teleoperator/reachy2_teleoperator.py +src/lerobot/teleoperators/so100_leader/__init__.py +src/lerobot/teleoperators/so100_leader/config_so100_leader.py +src/lerobot/teleoperators/so100_leader/so100_leader.py +src/lerobot/teleoperators/so101_leader/__init__.py +src/lerobot/teleoperators/so101_leader/config_so101_leader.py +src/lerobot/teleoperators/so101_leader/so101_leader.py +src/lerobot/templates/lerobot_modelcard_template.md +src/lerobot/transport/services_pb2.py +src/lerobot/transport/services_pb2_grpc.py +src/lerobot/transport/utils.py +src/lerobot/utils/constants.py +src/lerobot/utils/control_utils.py +src/lerobot/utils/errors.py +src/lerobot/utils/hub.py +src/lerobot/utils/import_utils.py +src/lerobot/utils/io_utils.py +src/lerobot/utils/logging_utils.py +src/lerobot/utils/random_utils.py +src/lerobot/utils/robot_utils.py +src/lerobot/utils/rotation.py +src/lerobot/utils/train_utils.py +src/lerobot/utils/transition.py +src/lerobot/utils/utils.py +src/lerobot/utils/visualization_utils.py +tests/test_available.py +tests/test_control_robot.py \ No newline at end of file diff --git a/src/lerobot.egg-info/dependency_links.txt b/src/lerobot.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/src/lerobot.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/src/lerobot.egg-info/entry_points.txt b/src/lerobot.egg-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..66c9aa778d09a611b51f08bb8b6344253843158d --- /dev/null +++ b/src/lerobot.egg-info/entry_points.txt @@ -0,0 +1,15 @@ +[console_scripts] +lerobot-calibrate = lerobot.scripts.lerobot_calibrate:main +lerobot-dataset-viz = lerobot.scripts.lerobot_dataset_viz:main +lerobot-edit-dataset = lerobot.scripts.lerobot_edit_dataset:main +lerobot-eval = lerobot.scripts.lerobot_eval:main +lerobot-find-cameras = lerobot.scripts.lerobot_find_cameras:main +lerobot-find-joint-limits = lerobot.scripts.lerobot_find_joint_limits:main +lerobot-find-port = lerobot.scripts.lerobot_find_port:main +lerobot-imgtransform-viz = lerobot.scripts.lerobot_imgtransform_viz:main +lerobot-info = lerobot.scripts.lerobot_info:main +lerobot-record = lerobot.scripts.lerobot_record:main +lerobot-replay = lerobot.scripts.lerobot_replay:main +lerobot-setup-motors = lerobot.scripts.lerobot_setup_motors:main +lerobot-teleoperate = lerobot.scripts.lerobot_teleoperate:main +lerobot-train = lerobot.scripts.lerobot_train:main diff --git a/src/lerobot.egg-info/requires.txt b/src/lerobot.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..f479b71ba86cab7d244267286fb273c0408decc8 --- /dev/null +++ b/src/lerobot.egg-info/requires.txt @@ -0,0 +1,162 @@ +datasets<4.2.0,>=4.0.0 +diffusers<0.36.0,>=0.27.2 +huggingface-hub[cli,hf-transfer]<0.36.0,>=0.34.2 +accelerate<2.0.0,>=1.10.0 +setuptools<81.0.0,>=71.0.0 +cmake<4.2.0,>=3.29.0.1 +einops<0.9.0,>=0.8.0 +opencv-python-headless<4.13.0,>=4.9.0 +av<16.0.0,>=15.0.0 +jsonlines<5.0.0,>=4.0.0 +packaging<26.0,>=24.2 +pynput<1.9.0,>=1.7.7 +pyserial<4.0,>=3.5 +wandb<0.22.0,>=0.20.0 +torch<2.8.0,>=2.2.1 +torchvision<0.23.0,>=0.21.0 +draccus==0.10.0 +gymnasium<2.0.0,>=1.1.1 +rerun-sdk<0.27.0,>=0.24.0 +deepdiff<9.0.0,>=7.0.1 +imageio[ffmpeg]<3.0.0,>=2.34.0 +termcolor<4.0.0,>=2.4.0 + +[:sys_platform != "win32" and (sys_platform != "linux" or (platform_machine != "aarch64" and platform_machine != "arm64" and platform_machine != "armv7l")) and (sys_platform != "darwin" or platform_machine != "x86_64")] +torchcodec<0.6.0,>=0.2.1 + +[all] +lerobot[dynamixel] +lerobot[gamepad] +lerobot[hopejr] +lerobot[lekiwi] +lerobot[reachy2] +lerobot[kinematics] +lerobot[intelrealsense] +lerobot[pi] +lerobot[smolvla] +lerobot[hilserl] +lerobot[async] +lerobot[dev] +lerobot[test] +lerobot[video_benchmark] +lerobot[aloha] +lerobot[pusht] +lerobot[phone] +lerobot[libero] +lerobot[metaworld] + +[aloha] +gym-aloha<0.2.0,>=0.1.2 + +[async] +lerobot[grpcio-dep] +matplotlib<4.0.0,>=3.10.3 + +[dev] +pre-commit<5.0.0,>=3.7.0 +debugpy<1.9.0,>=1.8.1 +lerobot[grpcio-dep] +grpcio-tools==1.73.1 + +[dynamixel] +dynamixel-sdk<3.9.0,>=3.7.31 + +[feetech] +feetech-servo-sdk<2.0.0,>=1.0.0 + +[gamepad] +lerobot[pygame-dep] +hidapi<0.15.0,>=0.14.0 + +[groot] +lerobot[transformers-dep] +peft<1.0.0,>=0.13.0 +dm-tree<1.0.0,>=0.1.8 +timm<1.1.0,>=1.0.0 +safetensors<1.0.0,>=0.4.3 +Pillow<13.0.0,>=10.0.0 +ninja<2.0.0,>=1.11.1 + +[groot:platform_machine == "AMD64" or platform_machine == "x86_64"] +decord<1.0.0,>=0.6.0 + +[groot:sys_platform != "darwin"] +flash-attn<3.0.0,>=2.5.9 + +[grpcio-dep] +grpcio==1.73.1 +protobuf==6.31.0 + +[hilserl] +lerobot[transformers-dep] +gym-hil<0.2.0,>=0.1.13 +lerobot[grpcio-dep] +lerobot[placo-dep] + +[hopejr] +lerobot[feetech] +lerobot[pygame-dep] + +[intelrealsense] + +[intelrealsense:sys_platform != "darwin"] +pyrealsense2<2.57.0,>=2.55.1.6486 + +[intelrealsense:sys_platform == "darwin"] +pyrealsense2-macosx<2.55.0,>=2.54 + +[kinematics] +lerobot[placo-dep] + +[lekiwi] +lerobot[feetech] +pyzmq<28.0.0,>=26.2.1 + +[libero] +lerobot[transformers-dep] +hf-libero<0.2.0,>=0.1.3 + +[metaworld] +metaworld==3.0.0 + +[phone] +hebi-py<2.12.0,>=2.8.0 +teleop<0.2.0,>=0.1.0 +fastapi<1.0 + +[pi] +transformers@ git+https://github.com/huggingface/transformers.git@fix/lerobot_openpi + +[placo-dep] +placo<0.10.0,>=0.9.6 + +[pusht] +gym-pusht<0.2.0,>=0.1.5 +pymunk<7.0.0,>=6.6.0 + +[pygame-dep] +pygame<2.7.0,>=2.5.1 + +[reachy2] +reachy2_sdk<1.1.0,>=1.0.14 + +[smolvla] +lerobot[transformers-dep] +num2words<0.6.0,>=0.5.14 +accelerate<2.0.0,>=1.7.0 +safetensors<1.0.0,>=0.4.3 + +[test] +pytest<9.0.0,>=8.1.0 +pytest-timeout<3.0.0,>=2.4.0 +pytest-cov<8.0.0,>=5.0.0 + +[test:sys_platform != "win32"] +mock-serial<0.1.0,>=0.0.1 + +[transformers-dep] +transformers<5.0.0,>=4.53.0 + +[video_benchmark] +scikit-image<0.26.0,>=0.23.2 +pandas<2.4.0,>=2.2.2 diff --git a/src/lerobot.egg-info/top_level.txt b/src/lerobot.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..89bce37a19870be68c4bb2a70525c9c6c58f1023 --- /dev/null +++ b/src/lerobot.egg-info/top_level.txt @@ -0,0 +1 @@ +lerobot diff --git a/src/lerobot/__init__.py b/src/lerobot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d461ee1b718dc651bfb4d4610f170ebcb4462f4a --- /dev/null +++ b/src/lerobot/__init__.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This file contains lists of available environments, dataset and policies to reflect the current state of LeRobot library. +We do not want to import all the dependencies, but instead we keep it lightweight to ensure fast access to these variables. + +Example: + ```python + import lerobot + print(lerobot.available_envs) + print(lerobot.available_tasks_per_env) + print(lerobot.available_datasets) + print(lerobot.available_datasets_per_env) + print(lerobot.available_real_world_datasets) + print(lerobot.available_policies) + print(lerobot.available_policies_per_env) + print(lerobot.available_robots) + print(lerobot.available_cameras) + print(lerobot.available_motors) + ``` + +When implementing a new dataset loadable with LeRobotDataset follow these steps: +- Update `available_datasets_per_env` in `lerobot/__init__.py` + +When implementing a new environment (e.g. `gym_aloha`), follow these steps: +- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py` + +When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps: +- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py` +- Set the required `name` class attribute. +- Update variables in `tests/test_available.py` by importing your new Policy class +""" + +import itertools + +from lerobot.__version__ import __version__ # noqa: F401 + +# TODO(rcadene): Improve policies and envs. As of now, an item in `available_policies` +# refers to a yaml file AND a modeling name. Same for `available_envs` which refers to +# a yaml file AND a environment name. The difference should be more obvious. +available_tasks_per_env = { + "aloha": [ + "AlohaInsertion-v0", + "AlohaTransferCube-v0", + ], + "pusht": ["PushT-v0"], +} +available_envs = list(available_tasks_per_env.keys()) + +available_datasets_per_env = { + "aloha": [ + "lerobot/aloha_sim_insertion_human", + "lerobot/aloha_sim_insertion_scripted", + "lerobot/aloha_sim_transfer_cube_human", + "lerobot/aloha_sim_transfer_cube_scripted", + "lerobot/aloha_sim_insertion_human_image", + "lerobot/aloha_sim_insertion_scripted_image", + "lerobot/aloha_sim_transfer_cube_human_image", + "lerobot/aloha_sim_transfer_cube_scripted_image", + ], + # TODO(alexander-soare): Add "lerobot/pusht_keypoints". Right now we can't because this is too tightly + # coupled with tests. + "pusht": ["lerobot/pusht", "lerobot/pusht_image"], +} + +available_real_world_datasets = [ + "lerobot/aloha_mobile_cabinet", + "lerobot/aloha_mobile_chair", + "lerobot/aloha_mobile_elevator", + "lerobot/aloha_mobile_shrimp", + "lerobot/aloha_mobile_wash_pan", + "lerobot/aloha_mobile_wipe_wine", + "lerobot/aloha_static_battery", + "lerobot/aloha_static_candy", + "lerobot/aloha_static_coffee", + "lerobot/aloha_static_coffee_new", + "lerobot/aloha_static_cups_open", + "lerobot/aloha_static_fork_pick_up", + "lerobot/aloha_static_pingpong_test", + "lerobot/aloha_static_pro_pencil", + "lerobot/aloha_static_screw_driver", + "lerobot/aloha_static_tape", + "lerobot/aloha_static_thread_velcro", + "lerobot/aloha_static_towel", + "lerobot/aloha_static_vinh_cup", + "lerobot/aloha_static_vinh_cup_left", + "lerobot/aloha_static_ziploc_slide", + "lerobot/umi_cup_in_the_wild", + "lerobot/unitreeh1_fold_clothes", + "lerobot/unitreeh1_rearrange_objects", + "lerobot/unitreeh1_two_robot_greeting", + "lerobot/unitreeh1_warehouse", + "lerobot/nyu_rot_dataset", + "lerobot/utokyo_saytap", + "lerobot/imperialcollege_sawyer_wrist_cam", + "lerobot/utokyo_xarm_bimanual", + "lerobot/tokyo_u_lsmo", + "lerobot/utokyo_pr2_opening_fridge", + "lerobot/cmu_franka_exploration_dataset", + "lerobot/cmu_stretch", + "lerobot/asu_table_top", + "lerobot/utokyo_pr2_tabletop_manipulation", + "lerobot/utokyo_xarm_pick_and_place", + "lerobot/ucsd_kitchen_dataset", + "lerobot/austin_buds_dataset", + "lerobot/dlr_sara_grid_clamp", + "lerobot/conq_hose_manipulation", + "lerobot/columbia_cairlab_pusht_real", + "lerobot/dlr_sara_pour", + "lerobot/dlr_edan_shared_control", + "lerobot/ucsd_pick_and_place_dataset", + "lerobot/berkeley_cable_routing", + "lerobot/nyu_franka_play_dataset", + "lerobot/austin_sirius_dataset", + "lerobot/cmu_play_fusion", + "lerobot/berkeley_gnm_sac_son", + "lerobot/nyu_door_opening_surprising_effectiveness", + "lerobot/berkeley_fanuc_manipulation", + "lerobot/jaco_play", + "lerobot/viola", + "lerobot/kaist_nonprehensile", + "lerobot/berkeley_mvp", + "lerobot/uiuc_d3field", + "lerobot/berkeley_gnm_recon", + "lerobot/austin_sailor_dataset", + "lerobot/utaustin_mutex", + "lerobot/roboturk", + "lerobot/stanford_hydra_dataset", + "lerobot/berkeley_autolab_ur5", + "lerobot/stanford_robocook", + "lerobot/toto", + "lerobot/fmb", + "lerobot/droid_100", + "lerobot/berkeley_rpt", + "lerobot/stanford_kuka_multimodal_dataset", + "lerobot/iamlab_cmu_pickup_insert", + "lerobot/taco_play", + "lerobot/berkeley_gnm_cory_hall", + "lerobot/usc_cloth_sim", +] + +available_datasets = sorted( + set(itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets)) +) + +# lists all available policies from `lerobot/policies` +available_policies = ["act", "diffusion", "tdmpc", "vqbet"] + +# lists all available robots from `lerobot/robots` +available_robots = [ + "koch", + "koch_bimanual", + "aloha", + "so100", + "so101", +] + +# lists all available cameras from `lerobot/cameras` +available_cameras = [ + "opencv", + "intelrealsense", +] + +# lists all available motors from `lerobot/motors` +available_motors = [ + "dynamixel", + "feetech", +] + +# keys and values refer to yaml files +available_policies_per_env = { + "aloha": ["act"], + "pusht": ["diffusion", "vqbet"], + "koch_real": ["act_koch_real"], + "aloha_real": ["act_aloha_real"], +} + +env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks] +env_dataset_pairs = [ + (env, dataset) for env, datasets in available_datasets_per_env.items() for dataset in datasets +] +env_dataset_policy_triplets = [ + (env, dataset, policy) + for env, datasets in available_datasets_per_env.items() + for dataset in datasets + for policy in available_policies_per_env[env] +] diff --git a/src/lerobot/__version__.py b/src/lerobot/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..90ee361a0b410c532dcb6ce2b51cd507fc86c4a5 --- /dev/null +++ b/src/lerobot/__version__.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""To enable `lerobot.__version__`""" + +from importlib.metadata import PackageNotFoundError, version + +try: + __version__ = version("lerobot") +except PackageNotFoundError: + __version__ = "unknown" diff --git a/src/lerobot/async_inference/configs.py b/src/lerobot/async_inference/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0d68383f8dad42acd872b772fd46b0e7442e8f --- /dev/null +++ b/src/lerobot/async_inference/configs.py @@ -0,0 +1,193 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections.abc import Callable +from dataclasses import dataclass, field + +import torch + +from lerobot.robots.config import RobotConfig + +from .constants import ( + DEFAULT_FPS, + DEFAULT_INFERENCE_LATENCY, + DEFAULT_OBS_QUEUE_TIMEOUT, +) + +# Aggregate function registry for CLI usage +AGGREGATE_FUNCTIONS = { + "weighted_average": lambda old, new: 0.3 * old + 0.7 * new, + "latest_only": lambda old, new: new, + "average": lambda old, new: 0.5 * old + 0.5 * new, + "conservative": lambda old, new: 0.7 * old + 0.3 * new, +} + + +def get_aggregate_function(name: str) -> Callable[[torch.Tensor, torch.Tensor], torch.Tensor]: + """Get aggregate function by name from registry.""" + if name not in AGGREGATE_FUNCTIONS: + available = list(AGGREGATE_FUNCTIONS.keys()) + raise ValueError(f"Unknown aggregate function '{name}'. Available: {available}") + return AGGREGATE_FUNCTIONS[name] + + +@dataclass +class PolicyServerConfig: + """Configuration for PolicyServer. + + This class defines all configurable parameters for the PolicyServer, + including networking settings and action chunking specifications. + """ + + # Networking configuration + host: str = field(default="localhost", metadata={"help": "Host address to bind the server to"}) + port: int = field(default=8080, metadata={"help": "Port number to bind the server to"}) + + # Timing configuration + fps: int = field(default=DEFAULT_FPS, metadata={"help": "Frames per second"}) + inference_latency: float = field( + default=DEFAULT_INFERENCE_LATENCY, metadata={"help": "Target inference latency in seconds"} + ) + + obs_queue_timeout: float = field( + default=DEFAULT_OBS_QUEUE_TIMEOUT, metadata={"help": "Timeout for observation queue in seconds"} + ) + + def __post_init__(self): + """Validate configuration after initialization.""" + if self.port < 1 or self.port > 65535: + raise ValueError(f"Port must be between 1 and 65535, got {self.port}") + + if self.environment_dt <= 0: + raise ValueError(f"environment_dt must be positive, got {self.environment_dt}") + + if self.inference_latency < 0: + raise ValueError(f"inference_latency must be non-negative, got {self.inference_latency}") + + if self.obs_queue_timeout < 0: + raise ValueError(f"obs_queue_timeout must be non-negative, got {self.obs_queue_timeout}") + + @classmethod + def from_dict(cls, config_dict: dict) -> "PolicyServerConfig": + """Create a PolicyServerConfig from a dictionary.""" + return cls(**config_dict) + + @property + def environment_dt(self) -> float: + """Environment time step, in seconds""" + return 1 / self.fps + + def to_dict(self) -> dict: + """Convert the configuration to a dictionary.""" + return { + "host": self.host, + "port": self.port, + "fps": self.fps, + "environment_dt": self.environment_dt, + "inference_latency": self.inference_latency, + } + + +@dataclass +class RobotClientConfig: + """Configuration for RobotClient. + + This class defines all configurable parameters for the RobotClient, + including network connection, policy settings, and control behavior. + """ + + # Policy configuration + policy_type: str = field(metadata={"help": "Type of policy to use"}) + pretrained_name_or_path: str = field(metadata={"help": "Pretrained model name or path"}) + + # Robot configuration (for CLI usage - robot instance will be created from this) + robot: RobotConfig = field(metadata={"help": "Robot configuration"}) + + # Policies typically output K actions at max, but we can use less to avoid wasting bandwidth (as actions + # would be aggregated on the client side anyway, depending on the value of `chunk_size_threshold`) + actions_per_chunk: int = field(metadata={"help": "Number of actions per chunk"}) + + # Task instruction for the robot to execute (e.g., 'fold my tshirt') + task: str = field(default="", metadata={"help": "Task instruction for the robot to execute"}) + + # Network configuration + server_address: str = field(default="localhost:8080", metadata={"help": "Server address to connect to"}) + + # Device configuration + policy_device: str = field(default="cpu", metadata={"help": "Device for policy inference"}) + + # Control behavior configuration + chunk_size_threshold: float = field(default=0.5, metadata={"help": "Threshold for chunk size control"}) + fps: int = field(default=DEFAULT_FPS, metadata={"help": "Frames per second"}) + + # Aggregate function configuration (CLI-compatible) + aggregate_fn_name: str = field( + default="weighted_average", + metadata={"help": f"Name of aggregate function to use. Options: {list(AGGREGATE_FUNCTIONS.keys())}"}, + ) + + # Debug configuration + debug_visualize_queue_size: bool = field( + default=False, metadata={"help": "Visualize the action queue size"} + ) + + @property + def environment_dt(self) -> float: + """Environment time step, in seconds""" + return 1 / self.fps + + def __post_init__(self): + """Validate configuration after initialization.""" + if not self.server_address: + raise ValueError("server_address cannot be empty") + + if not self.policy_type: + raise ValueError("policy_type cannot be empty") + + if not self.pretrained_name_or_path: + raise ValueError("pretrained_name_or_path cannot be empty") + + if not self.policy_device: + raise ValueError("policy_device cannot be empty") + + if self.chunk_size_threshold < 0 or self.chunk_size_threshold > 1: + raise ValueError(f"chunk_size_threshold must be between 0 and 1, got {self.chunk_size_threshold}") + + if self.fps <= 0: + raise ValueError(f"fps must be positive, got {self.fps}") + + if self.actions_per_chunk <= 0: + raise ValueError(f"actions_per_chunk must be positive, got {self.actions_per_chunk}") + + self.aggregate_fn = get_aggregate_function(self.aggregate_fn_name) + + @classmethod + def from_dict(cls, config_dict: dict) -> "RobotClientConfig": + """Create a RobotClientConfig from a dictionary.""" + return cls(**config_dict) + + def to_dict(self) -> dict: + """Convert the configuration to a dictionary.""" + return { + "server_address": self.server_address, + "policy_type": self.policy_type, + "pretrained_name_or_path": self.pretrained_name_or_path, + "policy_device": self.policy_device, + "chunk_size_threshold": self.chunk_size_threshold, + "fps": self.fps, + "actions_per_chunk": self.actions_per_chunk, + "task": self.task, + "debug_visualize_queue_size": self.debug_visualize_queue_size, + "aggregate_fn_name": self.aggregate_fn_name, + } diff --git a/src/lerobot/async_inference/constants.py b/src/lerobot/async_inference/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..27a76b8bf23ff04a5a7b9cc1d1a86df7d4fad0cf --- /dev/null +++ b/src/lerobot/async_inference/constants.py @@ -0,0 +1,29 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Client side: The environment evolves with a time resolution equal to 1/fps""" + +DEFAULT_FPS = 30 + +"""Server side: Running inference on (at most) 1/fps""" +DEFAULT_INFERENCE_LATENCY = 1 / DEFAULT_FPS + +"""Server side: Timeout for observation queue in seconds""" +DEFAULT_OBS_QUEUE_TIMEOUT = 2 + +# All action chunking policies +SUPPORTED_POLICIES = ["act", "smolvla", "diffusion", "tdmpc", "vqbet", "pi0", "pi05"] + +# TODO: Add all other robots +SUPPORTED_ROBOTS = ["so100_follower", "so101_follower", "bi_so100_follower"] diff --git a/src/lerobot/async_inference/helpers.py b/src/lerobot/async_inference/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..07786b0c3e2cab26d9c7c659e0101264b963644e --- /dev/null +++ b/src/lerobot/async_inference/helpers.py @@ -0,0 +1,296 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import logging.handlers +import os +import time +from dataclasses import dataclass, field +from pathlib import Path + +import torch + +from lerobot.configs.types import PolicyFeature +from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features + +# NOTE: Configs need to be loaded for the client to be able to instantiate the policy config +from lerobot.policies import ( # noqa: F401 + ACTConfig, + DiffusionConfig, + PI0Config, + PI05Config, + SmolVLAConfig, + VQBeTConfig, +) +from lerobot.robots.robot import Robot +from lerobot.utils.constants import OBS_IMAGES, OBS_STATE, OBS_STR +from lerobot.utils.utils import init_logging + +Action = torch.Tensor + +# observation as received from the robot +RawObservation = dict[str, torch.Tensor] + +# observation as those recorded in LeRobot dataset (keys are different) +LeRobotObservation = dict[str, torch.Tensor] + +# observation, ready for policy inference (image keys resized) +Observation = dict[str, torch.Tensor] + + +def visualize_action_queue_size(action_queue_size: list[int]) -> None: + import matplotlib.pyplot as plt + + _, ax = plt.subplots() + ax.set_title("Action Queue Size Over Time") + ax.set_xlabel("Environment steps") + ax.set_ylabel("Action Queue Size") + ax.set_ylim(0, max(action_queue_size) * 1.1) + ax.grid(True, alpha=0.3) + ax.plot(range(len(action_queue_size)), action_queue_size) + plt.show() + + +def map_robot_keys_to_lerobot_features(robot: Robot) -> dict[str, dict]: + return hw_to_dataset_features(robot.observation_features, OBS_STR, use_video=False) + + +def is_image_key(k: str) -> bool: + return k.startswith(OBS_IMAGES) + + +def resize_robot_observation_image(image: torch.tensor, resize_dims: tuple[int, int, int]) -> torch.tensor: + assert image.ndim == 3, f"Image must be (C, H, W)! Received {image.shape}" + # (H, W, C) -> (C, H, W) for resizing from robot obsevation resolution to policy image resolution + image = image.permute(2, 0, 1) + dims = (resize_dims[1], resize_dims[2]) + # Add batch dimension for interpolate: (C, H, W) -> (1, C, H, W) + image_batched = image.unsqueeze(0) + # Interpolate and remove batch dimension: (1, C, H, W) -> (C, H, W) + resized = torch.nn.functional.interpolate(image_batched, size=dims, mode="bilinear", align_corners=False) + + return resized.squeeze(0) + + +# TODO(Steven): Consider implementing a pipeline step for this +def raw_observation_to_observation( + raw_observation: RawObservation, + lerobot_features: dict[str, dict], + policy_image_features: dict[str, PolicyFeature], +) -> Observation: + observation = {} + + observation = prepare_raw_observation(raw_observation, lerobot_features, policy_image_features) + for k, v in observation.items(): + if isinstance(v, torch.Tensor): # VLAs present natural-language instructions in observations + if "image" in k: + # Policy expects images in shape (B, C, H, W) + observation[k] = prepare_image(v).unsqueeze(0) + else: + observation[k] = v + + return observation + + +def prepare_image(image: torch.Tensor) -> torch.Tensor: + """Minimal preprocessing to turn int8 images to float32 in [0, 1], and create a memory-contiguous tensor""" + image = image.type(torch.float32) / 255 + image = image.contiguous() + + return image + + +def extract_state_from_raw_observation( + lerobot_obs: RawObservation, +) -> torch.Tensor: + """Extract the state from a raw observation.""" + state = torch.tensor(lerobot_obs[OBS_STATE]) + + if state.ndim == 1: + state = state.unsqueeze(0) + + return state + + +def extract_images_from_raw_observation( + lerobot_obs: RawObservation, + camera_key: str, +) -> dict[str, torch.Tensor]: + """Extract the images from a raw observation.""" + return torch.tensor(lerobot_obs[camera_key]) + + +def make_lerobot_observation( + robot_obs: RawObservation, + lerobot_features: dict[str, dict], +) -> LeRobotObservation: + """Make a lerobot observation from a raw observation.""" + return build_dataset_frame(lerobot_features, robot_obs, prefix=OBS_STR) + + +def prepare_raw_observation( + robot_obs: RawObservation, + lerobot_features: dict[str, dict], + policy_image_features: dict[str, PolicyFeature], +) -> Observation: + """Matches keys from the raw robot_obs dict to the keys expected by a given policy (passed as + policy_image_features).""" + # 1. {motor.pos1:value1, motor.pos2:value2, ..., laptop:np.ndarray} -> + # -> {observation.state:[value1,value2,...], observation.images.laptop:np.ndarray} + lerobot_obs = make_lerobot_observation(robot_obs, lerobot_features) + + # 2. Greps all observation.images.<> keys + image_keys = list(filter(is_image_key, lerobot_obs)) + # state's shape is expected as (B, state_dim) + state_dict = {OBS_STATE: extract_state_from_raw_observation(lerobot_obs)} + image_dict = { + image_k: extract_images_from_raw_observation(lerobot_obs, image_k) for image_k in image_keys + } + + # Turns the image features to (C, H, W) with H, W matching the policy image features. + # This reduces the resolution of the images + image_dict = { + key: resize_robot_observation_image(torch.tensor(lerobot_obs[key]), policy_image_features[key].shape) + for key in image_keys + } + + if "task" in robot_obs: + state_dict["task"] = robot_obs["task"] + + return {**state_dict, **image_dict} + + +def get_logger(name: str, log_to_file: bool = True) -> logging.Logger: + """ + Get a logger using the standardized logging setup from utils.py. + + Args: + name: Logger name (e.g., 'policy_server', 'robot_client') + log_to_file: Whether to also log to a file + + Returns: + Configured logger instance + """ + # Create logs directory if logging to file + if log_to_file: + os.makedirs("logs", exist_ok=True) + log_file = Path(f"logs/{name}_{int(time.time())}.log") + else: + log_file = None + + # Initialize the standardized logging + init_logging(log_file=log_file, display_pid=False) + + # Return a named logger + return logging.getLogger(name) + + +@dataclass +class TimedData: + """A data object with timestamp and timestep information. + + Args: + timestamp: Unix timestamp relative to data's creation. + data: The actual data to wrap a timestamp around. + timestep: The timestep of the data. + """ + + timestamp: float + timestep: int + + def get_timestamp(self): + return self.timestamp + + def get_timestep(self): + return self.timestep + + +@dataclass +class TimedAction(TimedData): + action: Action + + def get_action(self): + return self.action + + +@dataclass +class TimedObservation(TimedData): + observation: RawObservation + must_go: bool = False + + def get_observation(self): + return self.observation + + +@dataclass +class FPSTracker: + """Utility class to track FPS metrics over time.""" + + target_fps: float + first_timestamp: float = None + total_obs_count: int = 0 + + def calculate_fps_metrics(self, current_timestamp: float) -> dict[str, float]: + """Calculate average FPS vs target""" + self.total_obs_count += 1 + + # Initialize first observation time + if self.first_timestamp is None: + self.first_timestamp = current_timestamp + + # Calculate overall average FPS (since start) + total_duration = current_timestamp - self.first_timestamp + avg_fps = (self.total_obs_count - 1) / total_duration if total_duration > 1e-6 else 0.0 + + return {"avg_fps": avg_fps, "target_fps": self.target_fps} + + def reset(self): + """Reset the FPS tracker state""" + self.first_timestamp = None + self.total_obs_count = 0 + + +@dataclass +class RemotePolicyConfig: + policy_type: str + pretrained_name_or_path: str + lerobot_features: dict[str, PolicyFeature] + actions_per_chunk: int + device: str = "cpu" + rename_map: dict[str, str] = field(default_factory=dict) + + +def _compare_observation_states(obs1_state: torch.Tensor, obs2_state: torch.Tensor, atol: float) -> bool: + """Check if two observation states are similar, under a tolerance threshold""" + return bool(torch.linalg.norm(obs1_state - obs2_state) < atol) + + +def observations_similar( + obs1: TimedObservation, obs2: TimedObservation, lerobot_features: dict[str, dict], atol: float = 1 +) -> bool: + """Check if two observations are similar, under a tolerance threshold. Measures distance between + observations as the difference in joint-space between the two observations. + + NOTE(fracapuano): This is a very simple check, and it is enough for the current use case. + An immediate next step is to use (fast) perceptual difference metrics comparing some camera views, + to surpass this joint-space similarity check. + """ + obs1_state = extract_state_from_raw_observation( + make_lerobot_observation(obs1.get_observation(), lerobot_features) + ) + obs2_state = extract_state_from_raw_observation( + make_lerobot_observation(obs2.get_observation(), lerobot_features) + ) + + return _compare_observation_states(obs1_state, obs2_state, atol=atol) diff --git a/src/lerobot/async_inference/policy_server.py b/src/lerobot/async_inference/policy_server.py new file mode 100644 index 0000000000000000000000000000000000000000..61f5056a12cc05a35bd9d929ce50b197bcff1fc2 --- /dev/null +++ b/src/lerobot/async_inference/policy_server.py @@ -0,0 +1,439 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Example: +```shell +python -m lerobot.async_inference.policy_server \ + --host=127.0.0.1 \ + --port=8080 \ + --fps=30 \ + --inference_latency=0.033 \ + --obs_queue_timeout=1 +``` +""" + +import logging +import pickle # nosec +import threading +import time +from concurrent import futures +from dataclasses import asdict +from pprint import pformat +from queue import Empty, Queue +from typing import Any + +import draccus +import grpc +import torch + +from lerobot.policies.factory import get_policy_class, make_pre_post_processors +from lerobot.processor import ( + PolicyAction, + PolicyProcessorPipeline, +) +from lerobot.transport import ( + services_pb2, # type: ignore + services_pb2_grpc, # type: ignore +) +from lerobot.transport.utils import receive_bytes_in_chunks + +from .configs import PolicyServerConfig +from .constants import SUPPORTED_POLICIES +from .helpers import ( + FPSTracker, + Observation, + RemotePolicyConfig, + TimedAction, + TimedObservation, + get_logger, + observations_similar, + raw_observation_to_observation, +) + + +class PolicyServer(services_pb2_grpc.AsyncInferenceServicer): + prefix = "policy_server" + logger = get_logger(prefix) + + def __init__(self, config: PolicyServerConfig): + self.config = config + self.shutdown_event = threading.Event() + + # FPS measurement + self.fps_tracker = FPSTracker(target_fps=config.fps) + + self.observation_queue = Queue(maxsize=1) + + self._predicted_timesteps_lock = threading.Lock() + self._predicted_timesteps = set() + + self.last_processed_obs = None + + # Attributes will be set by SendPolicyInstructions + self.device = None + self.policy_type = None + self.lerobot_features = None + self.actions_per_chunk = None + self.policy = None + self.preprocessor: PolicyProcessorPipeline[dict[str, Any], dict[str, Any]] | None = None + self.postprocessor: PolicyProcessorPipeline[PolicyAction, PolicyAction] | None = None + + @property + def running(self): + return not self.shutdown_event.is_set() + + @property + def policy_image_features(self): + return self.policy.config.image_features + + def _reset_server(self) -> None: + """Flushes server state when new client connects.""" + # only running inference on the latest observation received by the server + self.shutdown_event.set() + self.observation_queue = Queue(maxsize=1) + + with self._predicted_timesteps_lock: + self._predicted_timesteps = set() + + def Ready(self, request, context): # noqa: N802 + client_id = context.peer() + self.logger.info(f"Client {client_id} connected and ready") + self._reset_server() + self.shutdown_event.clear() + + return services_pb2.Empty() + + def SendPolicyInstructions(self, request, context): # noqa: N802 + """Receive policy instructions from the robot client""" + + if not self.running: + self.logger.warning("Server is not running. Ignoring policy instructions.") + return services_pb2.Empty() + + client_id = context.peer() + + policy_specs = pickle.loads(request.data) # nosec + + if not isinstance(policy_specs, RemotePolicyConfig): + raise TypeError(f"Policy specs must be a RemotePolicyConfig. Got {type(policy_specs)}") + + if policy_specs.policy_type not in SUPPORTED_POLICIES: + raise ValueError( + f"Policy type {policy_specs.policy_type} not supported. " + f"Supported policies: {SUPPORTED_POLICIES}" + ) + + self.logger.info( + f"Receiving policy instructions from {client_id} | " + f"Policy type: {policy_specs.policy_type} | " + f"Pretrained name or path: {policy_specs.pretrained_name_or_path} | " + f"Actions per chunk: {policy_specs.actions_per_chunk} | " + f"Device: {policy_specs.device}" + ) + + self.device = policy_specs.device + self.policy_type = policy_specs.policy_type # act, pi0, etc. + self.lerobot_features = policy_specs.lerobot_features + self.actions_per_chunk = policy_specs.actions_per_chunk + + policy_class = get_policy_class(self.policy_type) + + start = time.perf_counter() + self.policy = policy_class.from_pretrained(policy_specs.pretrained_name_or_path) + self.policy.to(self.device) + + # Load preprocessor and postprocessor, overriding device to match requested device + device_override = {"device": self.device} + self.preprocessor, self.postprocessor = make_pre_post_processors( + self.policy.config, + pretrained_path=policy_specs.pretrained_name_or_path, + preprocessor_overrides={ + "device_processor": device_override, + "rename_observations_processor": {"rename_map": policy_specs.rename_map}, + }, + postprocessor_overrides={"device_processor": device_override}, + ) + + end = time.perf_counter() + + self.logger.info(f"Time taken to put policy on {self.device}: {end - start:.4f} seconds") + + return services_pb2.Empty() + + def SendObservations(self, request_iterator, context): # noqa: N802 + """Receive observations from the robot client""" + client_id = context.peer() + self.logger.debug(f"Receiving observations from {client_id}") + + receive_time = time.time() # comparing timestamps so need time.time() + start_deserialize = time.perf_counter() + received_bytes = receive_bytes_in_chunks( + request_iterator, None, self.shutdown_event, self.logger + ) # blocking call while looping over request_iterator + timed_observation = pickle.loads(received_bytes) # nosec + deserialize_time = time.perf_counter() - start_deserialize + + self.logger.debug(f"Received observation #{timed_observation.get_timestep()}") + + obs_timestep = timed_observation.get_timestep() + obs_timestamp = timed_observation.get_timestamp() + + # Calculate FPS metrics + fps_metrics = self.fps_tracker.calculate_fps_metrics(obs_timestamp) + + self.logger.debug( + f"Received observation #{obs_timestep} | " + f"Avg FPS: {fps_metrics['avg_fps']:.2f} | " # fps at which observations are received from client + f"Target: {fps_metrics['target_fps']:.2f} | " + f"One-way latency: {(receive_time - obs_timestamp) * 1000:.2f}ms" + ) + + self.logger.debug( + f"Server timestamp: {receive_time:.6f} | " + f"Client timestamp: {obs_timestamp:.6f} | " + f"Deserialization time: {deserialize_time:.6f}s" + ) + + if not self._enqueue_observation( + timed_observation # wrapping a RawObservation + ): + self.logger.debug(f"Observation #{obs_timestep} has been filtered out") + + return services_pb2.Empty() + + def GetActions(self, request, context): # noqa: N802 + """Returns actions to the robot client. Actions are sent as a single + chunk, containing multiple actions.""" + client_id = context.peer() + self.logger.debug(f"Client {client_id} connected for action streaming") + + # Generate action based on the most recent observation and its timestep + try: + getactions_starts = time.perf_counter() + obs = self.observation_queue.get(timeout=self.config.obs_queue_timeout) + self.logger.info( + f"Running inference for observation #{obs.get_timestep()} (must_go: {obs.must_go})" + ) + + with self._predicted_timesteps_lock: + self._predicted_timesteps.add(obs.get_timestep()) + + start_time = time.perf_counter() + action_chunk = self._predict_action_chunk(obs) + inference_time = time.perf_counter() - start_time + + start_time = time.perf_counter() + actions_bytes = pickle.dumps(action_chunk) # nosec + serialize_time = time.perf_counter() - start_time + + # Create and return the action chunk + actions = services_pb2.Actions(data=actions_bytes) + + self.logger.info( + f"Action chunk #{obs.get_timestep()} generated | " + f"Total time: {(inference_time + serialize_time) * 1000:.2f}ms" + ) + + self.logger.debug( + f"Action chunk #{obs.get_timestep()} generated | " + f"Inference time: {inference_time:.2f}s |" + f"Serialize time: {serialize_time:.2f}s |" + f"Total time: {inference_time + serialize_time:.2f}s" + ) + + time.sleep( + max(0, self.config.inference_latency - max(0, time.perf_counter() - getactions_starts)) + ) # sleep controls inference latency + + return actions + + except Empty: # no observation added to queue in obs_queue_timeout + return services_pb2.Empty() + + except Exception as e: + self.logger.error(f"Error in StreamActions: {e}") + + return services_pb2.Empty() + + def _obs_sanity_checks(self, obs: TimedObservation, previous_obs: TimedObservation) -> bool: + """Check if the observation is valid to be processed by the policy""" + with self._predicted_timesteps_lock: + predicted_timesteps = self._predicted_timesteps + + if obs.get_timestep() in predicted_timesteps: + self.logger.debug(f"Skipping observation #{obs.get_timestep()} - Timestep predicted already!") + return False + + elif observations_similar(obs, previous_obs, lerobot_features=self.lerobot_features): + self.logger.debug( + f"Skipping observation #{obs.get_timestep()} - Observation too similar to last obs predicted!" + ) + return False + + else: + return True + + def _enqueue_observation(self, obs: TimedObservation) -> bool: + """Enqueue an observation if it must go through processing, otherwise skip it. + Observations not in queue are never run through the policy network""" + + if ( + obs.must_go + or self.last_processed_obs is None + or self._obs_sanity_checks(obs, self.last_processed_obs) + ): + last_obs = self.last_processed_obs.get_timestep() if self.last_processed_obs else "None" + self.logger.debug( + f"Enqueuing observation. Must go: {obs.must_go} | Last processed obs: {last_obs}" + ) + + # If queue is full, get the old observation to make room + if self.observation_queue.full(): + # pops from queue + _ = self.observation_queue.get_nowait() + self.logger.debug("Observation queue was full, removed oldest observation") + + # Now put the new observation (never blocks as queue is non-full here) + self.observation_queue.put(obs) + return True + + return False + + def _time_action_chunk(self, t_0: float, action_chunk: list[torch.Tensor], i_0: int) -> list[TimedAction]: + """Turn a chunk of actions into a list of TimedAction instances, + with the first action corresponding to t_0 and the rest corresponding to + t_0 + i*environment_dt for i in range(len(action_chunk)) + """ + return [ + TimedAction(timestamp=t_0 + i * self.config.environment_dt, timestep=i_0 + i, action=action) + for i, action in enumerate(action_chunk) + ] + + def _get_action_chunk(self, observation: dict[str, torch.Tensor]) -> torch.Tensor: + """Get an action chunk from the policy. The chunk contains only""" + chunk = self.policy.predict_action_chunk(observation) + if chunk.ndim != 3: + chunk = chunk.unsqueeze(0) # adding batch dimension, now shape is (B, chunk_size, action_dim) + + return chunk[:, : self.actions_per_chunk, :] + + def _predict_action_chunk(self, observation_t: TimedObservation) -> list[TimedAction]: + """Predict an action chunk based on an observation. + + Pipeline: + 1. Convert raw observation to LeRobot format + 2. Apply preprocessor (tokenization, normalization, batching, device placement) + 3. Run policy inference to get action chunk + 4. Apply postprocessor (unnormalization, device movement) + 5. Convert to TimedAction list + """ + """1. Prepare observation""" + start_prepare = time.perf_counter() + observation: Observation = raw_observation_to_observation( + observation_t.get_observation(), + self.lerobot_features, + self.policy_image_features, + ) + prepare_time = time.perf_counter() - start_prepare + + """2. Apply preprocessor""" + start_preprocess = time.perf_counter() + observation = self.preprocessor(observation) + self.last_processed_obs: TimedObservation = observation_t + preprocessing_time = time.perf_counter() - start_preprocess + + """3. Get action chunk""" + start_inference = time.perf_counter() + action_tensor = self._get_action_chunk(observation) + inference_time = time.perf_counter() - start_inference + self.logger.info( + f"Preprocessing and inference took {inference_time:.4f}s, action shape: {action_tensor.shape}" + ) + + """4. Apply postprocessor""" + # Apply postprocessor (handles unnormalization and device movement) + # Postprocessor expects (B, action_dim) per action, but we have (B, chunk_size, action_dim) + # So we process each action in the chunk individually + start_postprocess = time.perf_counter() + _, chunk_size, _ = action_tensor.shape + + # Process each action in the chunk + processed_actions = [] + for i in range(chunk_size): + # Extract action at timestep i: (B, action_dim) + single_action = action_tensor[:, i, :] + processed_action = self.postprocessor(single_action) + processed_actions.append(processed_action) + + # Stack back to (B, chunk_size, action_dim), then remove batch dim + action_tensor = torch.stack(processed_actions, dim=1).squeeze(0) + self.logger.debug(f"Postprocessed action shape: {action_tensor.shape}") + + """5. Convert to TimedAction list""" + action_chunk = self._time_action_chunk( + observation_t.get_timestamp(), list(action_tensor), observation_t.get_timestep() + ) + postprocess_stops = time.perf_counter() + postprocessing_time = postprocess_stops - start_postprocess + + self.logger.info( + f"Observation {observation_t.get_timestep()} | " + f"Total time: {1000 * (postprocess_stops - start_prepare):.2f}ms" + ) + + self.logger.debug( + f"Observation {observation_t.get_timestep()} | " + f"Prepare time: {1000 * prepare_time:.2f}ms | " + f"Preprocessing time: {1000 * preprocessing_time:.2f}ms | " + f"Inference time: {1000 * inference_time:.2f}ms | " + f"Postprocessing time: {1000 * postprocessing_time:.2f}ms | " + f"Total time: {1000 * (postprocess_stops - start_prepare):.2f}ms" + ) + + return action_chunk + + def stop(self): + """Stop the server""" + self._reset_server() + self.logger.info("Server stopping...") + + +@draccus.wrap() +def serve(cfg: PolicyServerConfig): + """Start the PolicyServer with the given configuration. + + Args: + config: PolicyServerConfig instance. If None, uses default configuration. + """ + logging.info(pformat(asdict(cfg))) + + # Create the server instance first + policy_server = PolicyServer(cfg) + + # Setup and start gRPC server + server = grpc.server(futures.ThreadPoolExecutor(max_workers=4)) + services_pb2_grpc.add_AsyncInferenceServicer_to_server(policy_server, server) + server.add_insecure_port(f"{cfg.host}:{cfg.port}") + + policy_server.logger.info(f"PolicyServer started on {cfg.host}:{cfg.port}") + server.start() + + server.wait_for_termination() + + policy_server.logger.info("Server terminated") + + +if __name__ == "__main__": + serve() diff --git a/src/lerobot/async_inference/robot_client.py b/src/lerobot/async_inference/robot_client.py new file mode 100644 index 0000000000000000000000000000000000000000..e537d62b9cb99a2f6e3bf296d5a5e549ad72037e --- /dev/null +++ b/src/lerobot/async_inference/robot_client.py @@ -0,0 +1,499 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Example command: +```shell +python src/lerobot/async_inference/robot_client.py \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58760431541 \ + --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ + --robot.id=black \ + --task="dummy" \ + --server_address=127.0.0.1:8080 \ + --policy_type=act \ + --pretrained_name_or_path=user/model \ + --policy_device=mps \ + --actions_per_chunk=50 \ + --chunk_size_threshold=0.5 \ + --aggregate_fn_name=weighted_average \ + --debug_visualize_queue_size=True +``` +""" + +import logging +import pickle # nosec +import threading +import time +from collections.abc import Callable +from dataclasses import asdict +from pprint import pformat +from queue import Queue +from typing import Any + +import draccus +import grpc +import torch + +from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401 +from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401 +from lerobot.robots import ( # noqa: F401 + Robot, + RobotConfig, + bi_so100_follower, + koch_follower, + make_robot_from_config, + so100_follower, + so101_follower, +) +from lerobot.transport import ( + services_pb2, # type: ignore + services_pb2_grpc, # type: ignore +) +from lerobot.transport.utils import grpc_channel_options, send_bytes_in_chunks + +from .configs import RobotClientConfig +from .constants import SUPPORTED_ROBOTS +from .helpers import ( + Action, + FPSTracker, + Observation, + RawObservation, + RemotePolicyConfig, + TimedAction, + TimedObservation, + get_logger, + map_robot_keys_to_lerobot_features, + visualize_action_queue_size, +) + + +class RobotClient: + prefix = "robot_client" + logger = get_logger(prefix) + + def __init__(self, config: RobotClientConfig): + """Initialize RobotClient with unified configuration. + + Args: + config: RobotClientConfig containing all configuration parameters + """ + # Store configuration + self.config = config + self.robot = make_robot_from_config(config.robot) + self.robot.connect() + + lerobot_features = map_robot_keys_to_lerobot_features(self.robot) + + # Use environment variable if server_address is not provided in config + self.server_address = config.server_address + + self.policy_config = RemotePolicyConfig( + config.policy_type, + config.pretrained_name_or_path, + lerobot_features, + config.actions_per_chunk, + config.policy_device, + ) + self.channel = grpc.insecure_channel( + self.server_address, grpc_channel_options(initial_backoff=f"{config.environment_dt:.4f}s") + ) + self.stub = services_pb2_grpc.AsyncInferenceStub(self.channel) + self.logger.info(f"Initializing client to connect to server at {self.server_address}") + + self.shutdown_event = threading.Event() + + # Initialize client side variables + self.latest_action_lock = threading.Lock() + self.latest_action = -1 + self.action_chunk_size = -1 + + self._chunk_size_threshold = config.chunk_size_threshold + + self.action_queue = Queue() + self.action_queue_lock = threading.Lock() # Protect queue operations + self.action_queue_size = [] + self.start_barrier = threading.Barrier(2) # 2 threads: action receiver, control loop + + # FPS measurement + self.fps_tracker = FPSTracker(target_fps=self.config.fps) + + self.logger.info("Robot connected and ready") + + # Use an event for thread-safe coordination + self.must_go = threading.Event() + self.must_go.set() # Initially set - observations qualify for direct processing + + @property + def running(self): + return not self.shutdown_event.is_set() + + def start(self): + """Start the robot client and connect to the policy server""" + try: + # client-server handshake + start_time = time.perf_counter() + self.stub.Ready(services_pb2.Empty()) + end_time = time.perf_counter() + self.logger.debug(f"Connected to policy server in {end_time - start_time:.4f}s") + + # send policy instructions + policy_config_bytes = pickle.dumps(self.policy_config) + policy_setup = services_pb2.PolicySetup(data=policy_config_bytes) + + self.logger.info("Sending policy instructions to policy server") + self.logger.debug( + f"Policy type: {self.policy_config.policy_type} | " + f"Pretrained name or path: {self.policy_config.pretrained_name_or_path} | " + f"Device: {self.policy_config.device}" + ) + + self.stub.SendPolicyInstructions(policy_setup) + + self.shutdown_event.clear() + + return True + + except grpc.RpcError as e: + self.logger.error(f"Failed to connect to policy server: {e}") + return False + + def stop(self): + """Stop the robot client""" + self.shutdown_event.set() + + self.robot.disconnect() + self.logger.debug("Robot disconnected") + + self.channel.close() + self.logger.debug("Client stopped, channel closed") + + def send_observation( + self, + obs: TimedObservation, + ) -> bool: + """Send observation to the policy server. + Returns True if the observation was sent successfully, False otherwise.""" + if not self.running: + raise RuntimeError("Client not running. Run RobotClient.start() before sending observations.") + + if not isinstance(obs, TimedObservation): + raise ValueError("Input observation needs to be a TimedObservation!") + + start_time = time.perf_counter() + observation_bytes = pickle.dumps(obs) + serialize_time = time.perf_counter() - start_time + self.logger.debug(f"Observation serialization time: {serialize_time:.6f}s") + + try: + observation_iterator = send_bytes_in_chunks( + observation_bytes, + services_pb2.Observation, + log_prefix="[CLIENT] Observation", + silent=True, + ) + _ = self.stub.SendObservations(observation_iterator) + obs_timestep = obs.get_timestep() + self.logger.debug(f"Sent observation #{obs_timestep} | ") + + return True + + except grpc.RpcError as e: + self.logger.error(f"Error sending observation #{obs.get_timestep()}: {e}") + return False + + def _inspect_action_queue(self): + with self.action_queue_lock: + queue_size = self.action_queue.qsize() + timestamps = sorted([action.get_timestep() for action in self.action_queue.queue]) + self.logger.debug(f"Queue size: {queue_size}, Queue contents: {timestamps}") + return queue_size, timestamps + + def _aggregate_action_queues( + self, + incoming_actions: list[TimedAction], + aggregate_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | None = None, + ): + """Finds the same timestep actions in the queue and aggregates them using the aggregate_fn""" + if aggregate_fn is None: + # default aggregate function: take the latest action + def aggregate_fn(x1, x2): + return x2 + + future_action_queue = Queue() + with self.action_queue_lock: + internal_queue = self.action_queue.queue + + current_action_queue = {action.get_timestep(): action.get_action() for action in internal_queue} + + for new_action in incoming_actions: + with self.latest_action_lock: + latest_action = self.latest_action + + # New action is older than the latest action in the queue, skip it + if new_action.get_timestep() <= latest_action: + continue + + # If the new action's timestep is not in the current action queue, add it directly + elif new_action.get_timestep() not in current_action_queue: + future_action_queue.put(new_action) + continue + + # If the new action's timestep is in the current action queue, aggregate it + # TODO: There is probably a way to do this with broadcasting of the two action tensors + future_action_queue.put( + TimedAction( + timestamp=new_action.get_timestamp(), + timestep=new_action.get_timestep(), + action=aggregate_fn( + current_action_queue[new_action.get_timestep()], new_action.get_action() + ), + ) + ) + + with self.action_queue_lock: + self.action_queue = future_action_queue + + def receive_actions(self, verbose: bool = False): + """Receive actions from the policy server""" + # Wait at barrier for synchronized start + self.start_barrier.wait() + self.logger.info("Action receiving thread starting") + + while self.running: + try: + # Use StreamActions to get a stream of actions from the server + actions_chunk = self.stub.GetActions(services_pb2.Empty()) + if len(actions_chunk.data) == 0: + continue # received `Empty` from server, wait for next call + + receive_time = time.time() + + # Deserialize bytes back into list[TimedAction] + deserialize_start = time.perf_counter() + timed_actions = pickle.loads(actions_chunk.data) # nosec + deserialize_time = time.perf_counter() - deserialize_start + + self.action_chunk_size = max(self.action_chunk_size, len(timed_actions)) + + # Calculate network latency if we have matching observations + if len(timed_actions) > 0 and verbose: + with self.latest_action_lock: + latest_action = self.latest_action + + self.logger.debug(f"Current latest action: {latest_action}") + + # Get queue state before changes + old_size, old_timesteps = self._inspect_action_queue() + if not old_timesteps: + old_timesteps = [latest_action] # queue was empty + + # Log incoming actions + incoming_timesteps = [a.get_timestep() for a in timed_actions] + + first_action_timestep = timed_actions[0].get_timestep() + server_to_client_latency = (receive_time - timed_actions[0].get_timestamp()) * 1000 + + self.logger.info( + f"Received action chunk for step #{first_action_timestep} | " + f"Latest action: #{latest_action} | " + f"Incoming actions: {incoming_timesteps[0]}:{incoming_timesteps[-1]} | " + f"Network latency (server->client): {server_to_client_latency:.2f}ms | " + f"Deserialization time: {deserialize_time * 1000:.2f}ms" + ) + + # Update action queue + start_time = time.perf_counter() + self._aggregate_action_queues(timed_actions, self.config.aggregate_fn) + queue_update_time = time.perf_counter() - start_time + + self.must_go.set() # after receiving actions, next empty queue triggers must-go processing! + + if verbose: + # Get queue state after changes + new_size, new_timesteps = self._inspect_action_queue() + + with self.latest_action_lock: + latest_action = self.latest_action + + self.logger.info( + f"Latest action: {latest_action} | " + f"Old action steps: {old_timesteps[0]}:{old_timesteps[-1]} | " + f"Incoming action steps: {incoming_timesteps[0]}:{incoming_timesteps[-1]} | " + f"Updated action steps: {new_timesteps[0]}:{new_timesteps[-1]}" + ) + self.logger.debug( + f"Queue update complete ({queue_update_time:.6f}s) | " + f"Before: {old_size} items | " + f"After: {new_size} items | " + ) + + except grpc.RpcError as e: + self.logger.error(f"Error receiving actions: {e}") + + def actions_available(self): + """Check if there are actions available in the queue""" + with self.action_queue_lock: + return not self.action_queue.empty() + + def _action_tensor_to_action_dict(self, action_tensor: torch.Tensor) -> dict[str, float]: + action = {key: action_tensor[i].item() for i, key in enumerate(self.robot.action_features)} + return action + + def control_loop_action(self, verbose: bool = False) -> dict[str, Any]: + """Reading and performing actions in local queue""" + + # Lock only for queue operations + get_start = time.perf_counter() + with self.action_queue_lock: + self.action_queue_size.append(self.action_queue.qsize()) + # Get action from queue + timed_action = self.action_queue.get_nowait() + get_end = time.perf_counter() - get_start + + _performed_action = self.robot.send_action( + self._action_tensor_to_action_dict(timed_action.get_action()) + ) + with self.latest_action_lock: + self.latest_action = timed_action.get_timestep() + + if verbose: + with self.action_queue_lock: + current_queue_size = self.action_queue.qsize() + + self.logger.debug( + f"Ts={timed_action.get_timestamp()} | " + f"Action #{timed_action.get_timestep()} performed | " + f"Queue size: {current_queue_size}" + ) + + self.logger.debug( + f"Popping action from queue to perform took {get_end:.6f}s | Queue size: {current_queue_size}" + ) + + return _performed_action + + def _ready_to_send_observation(self): + """Flags when the client is ready to send an observation""" + with self.action_queue_lock: + return self.action_queue.qsize() / self.action_chunk_size <= self._chunk_size_threshold + + def control_loop_observation(self, task: str, verbose: bool = False) -> RawObservation: + try: + # Get serialized observation bytes from the function + start_time = time.perf_counter() + + raw_observation: RawObservation = self.robot.get_observation() + raw_observation["task"] = task + + with self.latest_action_lock: + latest_action = self.latest_action + + observation = TimedObservation( + timestamp=time.time(), # need time.time() to compare timestamps across client and server + observation=raw_observation, + timestep=max(latest_action, 0), + ) + + obs_capture_time = time.perf_counter() - start_time + + # If there are no actions left in the queue, the observation must go through processing! + with self.action_queue_lock: + observation.must_go = self.must_go.is_set() and self.action_queue.empty() + current_queue_size = self.action_queue.qsize() + + _ = self.send_observation(observation) + + self.logger.debug(f"QUEUE SIZE: {current_queue_size} (Must go: {observation.must_go})") + if observation.must_go: + # must-go event will be set again after receiving actions + self.must_go.clear() + + if verbose: + # Calculate comprehensive FPS metrics + fps_metrics = self.fps_tracker.calculate_fps_metrics(observation.get_timestamp()) + + self.logger.info( + f"Obs #{observation.get_timestep()} | " + f"Avg FPS: {fps_metrics['avg_fps']:.2f} | " + f"Target: {fps_metrics['target_fps']:.2f}" + ) + + self.logger.debug( + f"Ts={observation.get_timestamp():.6f} | Capturing observation took {obs_capture_time:.6f}s" + ) + + return raw_observation + + except Exception as e: + self.logger.error(f"Error in observation sender: {e}") + + def control_loop(self, task: str, verbose: bool = False) -> tuple[Observation, Action]: + """Combined function for executing actions and streaming observations""" + # Wait at barrier for synchronized start + self.start_barrier.wait() + self.logger.info("Control loop thread starting") + + _performed_action = None + _captured_observation = None + + while self.running: + control_loop_start = time.perf_counter() + """Control loop: (1) Performing actions, when available""" + if self.actions_available(): + _performed_action = self.control_loop_action(verbose) + + """Control loop: (2) Streaming observations to the remote policy server""" + if self._ready_to_send_observation(): + _captured_observation = self.control_loop_observation(task, verbose) + + self.logger.debug(f"Control loop (ms): {(time.perf_counter() - control_loop_start) * 1000:.2f}") + # Dynamically adjust sleep time to maintain the desired control frequency + time.sleep(max(0, self.config.environment_dt - (time.perf_counter() - control_loop_start))) + + return _captured_observation, _performed_action + + +@draccus.wrap() +def async_client(cfg: RobotClientConfig): + logging.info(pformat(asdict(cfg))) + + if cfg.robot.type not in SUPPORTED_ROBOTS: + raise ValueError(f"Robot {cfg.robot.type} not yet supported!") + + client = RobotClient(cfg) + + if client.start(): + client.logger.info("Starting action receiver thread...") + + # Create and start action receiver thread + action_receiver_thread = threading.Thread(target=client.receive_actions, daemon=True) + + # Start action receiver thread + action_receiver_thread.start() + + try: + # The main thread runs the control loop + client.control_loop(task=cfg.task) + + finally: + client.stop() + action_receiver_thread.join() + if cfg.debug_visualize_queue_size: + visualize_action_queue_size(client.action_queue_size) + client.logger.info("Client stopped") + + +if __name__ == "__main__": + async_client() # run the client diff --git a/src/lerobot/cameras/__init__.py b/src/lerobot/cameras/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..44cfbb7aaea3603881724a4a541ff8c7c657b010 --- /dev/null +++ b/src/lerobot/cameras/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .camera import Camera +from .configs import CameraConfig, ColorMode, Cv2Rotation +from .utils import make_cameras_from_configs diff --git a/src/lerobot/cameras/camera.py b/src/lerobot/cameras/camera.py new file mode 100644 index 0000000000000000000000000000000000000000..da014967d4ae3dae680a52338920eae0b7f69301 --- /dev/null +++ b/src/lerobot/cameras/camera.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +from typing import Any + +from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing + +from .configs import CameraConfig, ColorMode + + +class Camera(abc.ABC): + """Base class for camera implementations. + + Defines a standard interface for camera operations across different backends. + Subclasses must implement all abstract methods. + + Manages basic camera properties (FPS, resolution) and core operations: + - Connection/disconnection + - Frame capture (sync/async) + + Attributes: + fps (int | None): Configured frames per second + width (int | None): Frame width in pixels + height (int | None): Frame height in pixels + + Example: + class MyCamera(Camera): + def __init__(self, config): ... + @property + def is_connected(self) -> bool: ... + def connect(self, warmup=True): ... + # Plus other required methods + """ + + def __init__(self, config: CameraConfig): + """Initialize the camera with the given configuration. + + Args: + config: Camera configuration containing FPS and resolution. + """ + self.fps: int | None = config.fps + self.width: int | None = config.width + self.height: int | None = config.height + + @property + @abc.abstractmethod + def is_connected(self) -> bool: + """Check if the camera is currently connected. + + Returns: + bool: True if the camera is connected and ready to capture frames, + False otherwise. + """ + pass + + @staticmethod + @abc.abstractmethod + def find_cameras() -> list[dict[str, Any]]: + """Detects available cameras connected to the system. + Returns: + List[Dict[str, Any]]: A list of dictionaries, + where each dictionary contains information about a detected camera. + """ + pass + + @abc.abstractmethod + def connect(self, warmup: bool = True) -> None: + """Establish connection to the camera. + + Args: + warmup: If True (default), captures a warmup frame before returning. Useful + for cameras that require time to adjust capture settings. + If False, skips the warmup frame. + """ + pass + + @abc.abstractmethod + def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]: + """Capture and return a single frame from the camera. + + Args: + color_mode: Desired color mode for the output frame. If None, + uses the camera's default color mode. + + Returns: + np.ndarray: Captured frame as a numpy array. + """ + pass + + @abc.abstractmethod + def async_read(self, timeout_ms: float = ...) -> NDArray[Any]: + """Asynchronously capture and return a single frame from the camera. + + Args: + timeout_ms: Maximum time to wait for a frame in milliseconds. + Defaults to implementation-specific timeout. + + Returns: + np.ndarray: Captured frame as a numpy array. + """ + pass + + @abc.abstractmethod + def disconnect(self) -> None: + """Disconnect from the camera and release resources.""" + pass diff --git a/src/lerobot/cameras/configs.py b/src/lerobot/cameras/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..5f0c34b8d6d4a7cc7bf25a7428401680bebb1a0a --- /dev/null +++ b/src/lerobot/cameras/configs.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +from dataclasses import dataclass +from enum import Enum + +import draccus # type: ignore # TODO: add type stubs for draccus + + +class ColorMode(str, Enum): + RGB = "rgb" + BGR = "bgr" + + +class Cv2Rotation(int, Enum): + NO_ROTATION = 0 + ROTATE_90 = 90 + ROTATE_180 = 180 + ROTATE_270 = -90 + + +@dataclass(kw_only=True) +class CameraConfig(draccus.ChoiceRegistry, abc.ABC): # type: ignore # TODO: add type stubs for draccus + fps: int | None = None + width: int | None = None + height: int | None = None + + @property + def type(self) -> str: + return str(self.get_choice_name(self.__class__)) diff --git a/src/lerobot/cameras/opencv/__init__.py b/src/lerobot/cameras/opencv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bb7c12a7aa99a2f615ebe326dcc72226d7f48485 --- /dev/null +++ b/src/lerobot/cameras/opencv/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .camera_opencv import OpenCVCamera +from .configuration_opencv import OpenCVCameraConfig + +__all__ = ["OpenCVCamera", "OpenCVCameraConfig"] diff --git a/src/lerobot/cameras/opencv/camera_opencv.py b/src/lerobot/cameras/opencv/camera_opencv.py new file mode 100644 index 0000000000000000000000000000000000000000..2026bad42e9ff325560d748669742ffb00ee5168 --- /dev/null +++ b/src/lerobot/cameras/opencv/camera_opencv.py @@ -0,0 +1,541 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides the OpenCVCamera class for capturing frames from cameras using OpenCV. +""" + +import logging +import math +import os +import platform +import time +from pathlib import Path +from threading import Event, Lock, Thread +from typing import Any + +from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing + +# Fix MSMF hardware transform compatibility for Windows before importing cv2 +if platform.system() == "Windows" and "OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS" not in os.environ: + os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0" +import cv2 # type: ignore # TODO: add type stubs for OpenCV + +from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError + +from ..camera import Camera +from ..utils import get_cv2_backend, get_cv2_rotation +from .configuration_opencv import ColorMode, OpenCVCameraConfig + +# NOTE(Steven): The maximum opencv device index depends on your operating system. For instance, +# if you have 3 cameras, they should be associated to index 0, 1, and 2. This is the case +# on MacOS. However, on Ubuntu, the indices are different like 6, 16, 23. +# When you change the USB port or reboot the computer, the operating system might +# treat the same cameras as new devices. Thus we select a higher bound to search indices. +MAX_OPENCV_INDEX = 60 + +logger = logging.getLogger(__name__) + + +class OpenCVCamera(Camera): + """ + Manages camera interactions using OpenCV for efficient frame recording. + + This class provides a high-level interface to connect to, configure, and read + frames from cameras compatible with OpenCV's VideoCapture. It supports both + synchronous and asynchronous frame reading. + + An OpenCVCamera instance requires a camera index (e.g., 0) or a device path + (e.g., '/dev/video0' on Linux). Camera indices can be unstable across reboots + or port changes, especially on Linux. Use the provided utility script to find + available camera indices or paths: + ```bash + lerobot-find-cameras opencv + ``` + + The camera's default settings (FPS, resolution, color mode) are used unless + overridden in the configuration. + + Example: + ```python + from lerobot.cameras.opencv import OpenCVCamera + from lerobot.cameras.configuration_opencv import OpenCVCameraConfig, ColorMode, Cv2Rotation + + # Basic usage with camera index 0 + config = OpenCVCameraConfig(index_or_path=0) + camera = OpenCVCamera(config) + camera.connect() + + # Read 1 frame synchronously + color_image = camera.read() + print(color_image.shape) + + # Read 1 frame asynchronously + async_image = camera.async_read() + + # When done, properly disconnect the camera using + camera.disconnect() + + # Example with custom settings + custom_config = OpenCVCameraConfig( + index_or_path='/dev/video0', # Or use an index + fps=30, + width=1280, + height=720, + color_mode=ColorMode.RGB, + rotation=Cv2Rotation.ROTATE_90 + ) + custom_camera = OpenCVCamera(custom_config) + # ... connect, read, disconnect ... + ``` + """ + + def __init__(self, config: OpenCVCameraConfig): + """ + Initializes the OpenCVCamera instance. + + Args: + config: The configuration settings for the camera. + """ + super().__init__(config) + + self.config = config + self.index_or_path = config.index_or_path + + self.fps = config.fps + self.color_mode = config.color_mode + self.warmup_s = config.warmup_s + + self.videocapture: cv2.VideoCapture | None = None + + self.thread: Thread | None = None + self.stop_event: Event | None = None + self.frame_lock: Lock = Lock() + self.latest_frame: NDArray[Any] | None = None + self.new_frame_event: Event = Event() + + self.rotation: int | None = get_cv2_rotation(config.rotation) + self.backend: int = get_cv2_backend() + + if self.height and self.width: + self.capture_width, self.capture_height = self.width, self.height + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: + self.capture_width, self.capture_height = self.height, self.width + + def __str__(self) -> str: + return f"{self.__class__.__name__}({self.index_or_path})" + + @property + def is_connected(self) -> bool: + """Checks if the camera is currently connected and opened.""" + return isinstance(self.videocapture, cv2.VideoCapture) and self.videocapture.isOpened() + + def connect(self, warmup: bool = True) -> None: + """ + Connects to the OpenCV camera specified in the configuration. + + Initializes the OpenCV VideoCapture object, sets desired camera properties + (FPS, width, height), and performs initial checks. + + Raises: + DeviceAlreadyConnectedError: If the camera is already connected. + ConnectionError: If the specified camera index/path is not found or the camera is found but fails to open. + RuntimeError: If the camera opens but fails to apply requested FPS/resolution settings. + """ + if self.is_connected: + raise DeviceAlreadyConnectedError(f"{self} is already connected.") + + # Use 1 thread for OpenCV operations to avoid potential conflicts or + # blocking in multi-threaded applications, especially during data collection. + cv2.setNumThreads(1) + + self.videocapture = cv2.VideoCapture(self.index_or_path, self.backend) + + if not self.videocapture.isOpened(): + self.videocapture.release() + self.videocapture = None + raise ConnectionError( + f"Failed to open {self}.Run `lerobot-find-cameras opencv` to find available cameras." + ) + + self._configure_capture_settings() + + if warmup: + start_time = time.time() + while time.time() - start_time < self.warmup_s: + self.read() + time.sleep(0.1) + + logger.info(f"{self} connected.") + + def _configure_capture_settings(self) -> None: + """ + Applies the specified FOURCC, FPS, width, and height settings to the connected camera. + + This method attempts to set the camera properties via OpenCV. It checks if + the camera successfully applied the settings and raises an error if not. + FOURCC is set first (if specified) as it can affect the available FPS and resolution options. + + Args: + fourcc: The desired FOURCC code (e.g., "MJPG", "YUYV"). If None, auto-detect. + fps: The desired frames per second. If None, the setting is skipped. + width: The desired capture width. If None, the setting is skipped. + height: The desired capture height. If None, the setting is skipped. + + Raises: + RuntimeError: If the camera fails to set any of the specified properties + to the requested value. + DeviceNotConnectedError: If the camera is not connected when attempting + to configure settings. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"Cannot configure settings for {self} as it is not connected.") + + # Set FOURCC first (if specified) as it can affect available FPS/resolution options + if self.config.fourcc is not None: + self._validate_fourcc() + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + default_width = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_WIDTH))) + default_height = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_HEIGHT))) + + if self.width is None or self.height is None: + self.width, self.height = default_width, default_height + self.capture_width, self.capture_height = default_width, default_height + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: + self.width, self.height = default_height, default_width + self.capture_width, self.capture_height = default_width, default_height + else: + self._validate_width_and_height() + + if self.fps is None: + self.fps = self.videocapture.get(cv2.CAP_PROP_FPS) + else: + self._validate_fps() + + def _validate_fps(self) -> None: + """Validates and sets the camera's frames per second (FPS).""" + + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + if self.fps is None: + raise ValueError(f"{self} FPS is not set") + + success = self.videocapture.set(cv2.CAP_PROP_FPS, float(self.fps)) + actual_fps = self.videocapture.get(cv2.CAP_PROP_FPS) + # Use math.isclose for robust float comparison + if not success or not math.isclose(self.fps, actual_fps, rel_tol=1e-3): + raise RuntimeError(f"{self} failed to set fps={self.fps} ({actual_fps=}).") + + def _validate_fourcc(self) -> None: + """Validates and sets the camera's FOURCC code.""" + + fourcc_code = cv2.VideoWriter_fourcc(*self.config.fourcc) + + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + success = self.videocapture.set(cv2.CAP_PROP_FOURCC, fourcc_code) + actual_fourcc_code = self.videocapture.get(cv2.CAP_PROP_FOURCC) + + # Convert actual FOURCC code back to string for comparison + actual_fourcc_code_int = int(actual_fourcc_code) + actual_fourcc = "".join([chr((actual_fourcc_code_int >> 8 * i) & 0xFF) for i in range(4)]) + + if not success or actual_fourcc != self.config.fourcc: + logger.warning( + f"{self} failed to set fourcc={self.config.fourcc} (actual={actual_fourcc}, success={success}). " + f"Continuing with default format." + ) + + def _validate_width_and_height(self) -> None: + """Validates and sets the camera's frame capture width and height.""" + + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + if self.capture_width is None or self.capture_height is None: + raise ValueError(f"{self} capture_width or capture_height is not set") + + width_success = self.videocapture.set(cv2.CAP_PROP_FRAME_WIDTH, float(self.capture_width)) + height_success = self.videocapture.set(cv2.CAP_PROP_FRAME_HEIGHT, float(self.capture_height)) + + actual_width = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_WIDTH))) + if not width_success or self.capture_width != actual_width: + raise RuntimeError( + f"{self} failed to set capture_width={self.capture_width} ({actual_width=}, {width_success=})." + ) + + actual_height = int(round(self.videocapture.get(cv2.CAP_PROP_FRAME_HEIGHT))) + if not height_success or self.capture_height != actual_height: + raise RuntimeError( + f"{self} failed to set capture_height={self.capture_height} ({actual_height=}, {height_success=})." + ) + + @staticmethod + def find_cameras() -> list[dict[str, Any]]: + """ + Detects available OpenCV cameras connected to the system. + + On Linux, it scans '/dev/video*' paths. On other systems (like macOS, Windows), + it checks indices from 0 up to `MAX_OPENCV_INDEX`. + + Returns: + List[Dict[str, Any]]: A list of dictionaries, + where each dictionary contains 'type', 'id' (port index or path), + and the default profile properties (width, height, fps, format). + """ + found_cameras_info = [] + + targets_to_scan: list[str | int] + if platform.system() == "Linux": + possible_paths = sorted(Path("/dev").glob("video*"), key=lambda p: p.name) + targets_to_scan = [str(p) for p in possible_paths] + else: + targets_to_scan = [int(i) for i in range(MAX_OPENCV_INDEX)] + + for target in targets_to_scan: + camera = cv2.VideoCapture(target) + if camera.isOpened(): + default_width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)) + default_height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)) + default_fps = camera.get(cv2.CAP_PROP_FPS) + default_format = camera.get(cv2.CAP_PROP_FORMAT) + + # Get FOURCC code and convert to string + default_fourcc_code = camera.get(cv2.CAP_PROP_FOURCC) + default_fourcc_code_int = int(default_fourcc_code) + default_fourcc = "".join([chr((default_fourcc_code_int >> 8 * i) & 0xFF) for i in range(4)]) + + camera_info = { + "name": f"OpenCV Camera @ {target}", + "type": "OpenCV", + "id": target, + "backend_api": camera.getBackendName(), + "default_stream_profile": { + "format": default_format, + "fourcc": default_fourcc, + "width": default_width, + "height": default_height, + "fps": default_fps, + }, + } + + found_cameras_info.append(camera_info) + camera.release() + + return found_cameras_info + + def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]: + """ + Reads a single frame synchronously from the camera. + + This is a blocking call. It waits for the next available frame from the + camera hardware via OpenCV. + + Args: + color_mode (Optional[ColorMode]): If specified, overrides the default + color mode (`self.color_mode`) for this read operation (e.g., + request RGB even if default is BGR). + + Returns: + np.ndarray: The captured frame as a NumPy array in the format + (height, width, channels), using the specified or default + color mode and applying any configured rotation. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + RuntimeError: If reading the frame from the camera fails or if the + received frame dimensions don't match expectations before rotation. + ValueError: If an invalid `color_mode` is requested. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + start_time = time.perf_counter() + + if self.videocapture is None: + raise DeviceNotConnectedError(f"{self} videocapture is not initialized") + + ret, frame = self.videocapture.read() + + if not ret or frame is None: + raise RuntimeError(f"{self} read failed (status={ret}).") + + processed_frame = self._postprocess_image(frame, color_mode) + + read_duration_ms = (time.perf_counter() - start_time) * 1e3 + logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") + + return processed_frame + + def _postprocess_image(self, image: NDArray[Any], color_mode: ColorMode | None = None) -> NDArray[Any]: + """ + Applies color conversion, dimension validation, and rotation to a raw frame. + + Args: + image (np.ndarray): The raw image frame (expected BGR format from OpenCV). + color_mode (Optional[ColorMode]): The target color mode (RGB or BGR). If None, + uses the instance's default `self.color_mode`. + + Returns: + np.ndarray: The processed image frame. + + Raises: + ValueError: If the requested `color_mode` is invalid. + RuntimeError: If the raw frame dimensions do not match the configured + `width` and `height`. + """ + requested_color_mode = self.color_mode if color_mode is None else color_mode + + if requested_color_mode not in (ColorMode.RGB, ColorMode.BGR): + raise ValueError( + f"Invalid color mode '{requested_color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}." + ) + + h, w, c = image.shape + + if h != self.capture_height or w != self.capture_width: + raise RuntimeError( + f"{self} frame width={w} or height={h} do not match configured width={self.capture_width} or height={self.capture_height}." + ) + + if c != 3: + raise RuntimeError(f"{self} frame channels={c} do not match expected 3 channels (RGB/BGR).") + + processed_image = image + if requested_color_mode == ColorMode.RGB: + processed_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]: + processed_image = cv2.rotate(processed_image, self.rotation) + + return processed_image + + def _read_loop(self) -> None: + """ + Internal loop run by the background thread for asynchronous reading. + + On each iteration: + 1. Reads a color frame + 2. Stores result in latest_frame (thread-safe) + 3. Sets new_frame_event to notify listeners + + Stops on DeviceNotConnectedError, logs other errors and continues. + """ + if self.stop_event is None: + raise RuntimeError(f"{self}: stop_event is not initialized before starting read loop.") + + while not self.stop_event.is_set(): + try: + color_image = self.read() + + with self.frame_lock: + self.latest_frame = color_image + self.new_frame_event.set() + + except DeviceNotConnectedError: + break + except Exception as e: + logger.warning(f"Error reading frame in background thread for {self}: {e}") + + def _start_read_thread(self) -> None: + """Starts or restarts the background read thread if it's not running.""" + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=0.1) + if self.stop_event is not None: + self.stop_event.set() + + self.stop_event = Event() + self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop") + self.thread.daemon = True + self.thread.start() + + def _stop_read_thread(self) -> None: + """Signals the background read thread to stop and waits for it to join.""" + if self.stop_event is not None: + self.stop_event.set() + + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=2.0) + + self.thread = None + self.stop_event = None + + def async_read(self, timeout_ms: float = 200) -> NDArray[Any]: + """ + Reads the latest available frame asynchronously. + + This method retrieves the most recent frame captured by the background + read thread. It does not block waiting for the camera hardware directly, + but may wait up to timeout_ms for the background thread to provide a frame. + + Args: + timeout_ms (float): Maximum time in milliseconds to wait for a frame + to become available. Defaults to 200ms (0.2 seconds). + + Returns: + np.ndarray: The latest captured frame as a NumPy array in the format + (height, width, channels), processed according to configuration. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + TimeoutError: If no frame becomes available within the specified timeout. + RuntimeError: If an unexpected error occurs. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + if self.thread is None or not self.thread.is_alive(): + self._start_read_thread() + + if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0): + thread_alive = self.thread is not None and self.thread.is_alive() + raise TimeoutError( + f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. " + f"Read thread alive: {thread_alive}." + ) + + with self.frame_lock: + frame = self.latest_frame + self.new_frame_event.clear() + + if frame is None: + raise RuntimeError(f"Internal error: Event set but no frame available for {self}.") + + return frame + + def disconnect(self) -> None: + """ + Disconnects from the camera and cleans up resources. + + Stops the background read thread (if running) and releases the OpenCV + VideoCapture object. + + Raises: + DeviceNotConnectedError: If the camera is already disconnected. + """ + if not self.is_connected and self.thread is None: + raise DeviceNotConnectedError(f"{self} not connected.") + + if self.thread is not None: + self._stop_read_thread() + + if self.videocapture is not None: + self.videocapture.release() + self.videocapture = None + + logger.info(f"{self} disconnected.") diff --git a/src/lerobot/cameras/opencv/configuration_opencv.py b/src/lerobot/cameras/opencv/configuration_opencv.py new file mode 100644 index 0000000000000000000000000000000000000000..88ce873432972b561fa7d68062d4b50c7d3efd04 --- /dev/null +++ b/src/lerobot/cameras/opencv/configuration_opencv.py @@ -0,0 +1,85 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from pathlib import Path + +from ..configs import CameraConfig, ColorMode, Cv2Rotation + +__all__ = ["OpenCVCameraConfig", "ColorMode", "Cv2Rotation"] + + +@CameraConfig.register_subclass("opencv") +@dataclass +class OpenCVCameraConfig(CameraConfig): + """Configuration class for OpenCV-based camera devices or video files. + + This class provides configuration options for cameras accessed through OpenCV, + supporting both physical camera devices and video files. It includes settings + for resolution, frame rate, color mode, and image rotation. + + Example configurations: + ```python + # Basic configurations + OpenCVCameraConfig(0, 30, 1280, 720) # 1280x720 @ 30FPS + OpenCVCameraConfig(/dev/video4, 60, 640, 480) # 640x480 @ 60FPS + + # Advanced configurations with FOURCC format + OpenCVCameraConfig(128422271347, 30, 640, 480, rotation=Cv2Rotation.ROTATE_90, fourcc="MJPG") # With 90° rotation and MJPG format + OpenCVCameraConfig(0, 30, 1280, 720, fourcc="YUYV") # With YUYV format + ``` + + Attributes: + index_or_path: Either an integer representing the camera device index, + or a Path object pointing to a video file. + fps: Requested frames per second for the color stream. + width: Requested frame width in pixels for the color stream. + height: Requested frame height in pixels for the color stream. + color_mode: Color mode for image output (RGB or BGR). Defaults to RGB. + rotation: Image rotation setting (0°, 90°, 180°, or 270°). Defaults to no rotation. + warmup_s: Time reading frames before returning from connect (in seconds) + fourcc: FOURCC code for video format (e.g., "MJPG", "YUYV", "I420"). Defaults to None (auto-detect). + + Note: + - Only 3-channel color output (RGB/BGR) is currently supported. + - FOURCC codes must be 4-character strings (e.g., "MJPG", "YUYV"). Some common FOUCC codes: https://learn.microsoft.com/en-us/windows/win32/medfound/video-fourccs#fourcc-constants + - Setting FOURCC can help achieve higher frame rates on some cameras. + """ + + index_or_path: int | Path + color_mode: ColorMode = ColorMode.RGB + rotation: Cv2Rotation = Cv2Rotation.NO_ROTATION + warmup_s: int = 1 + fourcc: str | None = None + + def __post_init__(self) -> None: + if self.color_mode not in (ColorMode.RGB, ColorMode.BGR): + raise ValueError( + f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided." + ) + + if self.rotation not in ( + Cv2Rotation.NO_ROTATION, + Cv2Rotation.ROTATE_90, + Cv2Rotation.ROTATE_180, + Cv2Rotation.ROTATE_270, + ): + raise ValueError( + f"`rotation` is expected to be in {(Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270)}, but {self.rotation} is provided." + ) + + if self.fourcc is not None and (not isinstance(self.fourcc, str) or len(self.fourcc) != 4): + raise ValueError( + f"`fourcc` must be a 4-character string (e.g., 'MJPG', 'YUYV'), but '{self.fourcc}' is provided." + ) diff --git a/src/lerobot/cameras/reachy2_camera/__init__.py b/src/lerobot/cameras/reachy2_camera/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc9d87f781dd48b14349d5d22fa5d2cf31367430 --- /dev/null +++ b/src/lerobot/cameras/reachy2_camera/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .configuration_reachy2_camera import Reachy2CameraConfig +from .reachy2_camera import Reachy2Camera diff --git a/src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py b/src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py new file mode 100644 index 0000000000000000000000000000000000000000..cdfe16e630ca406e42d610b639d3a4689d75a9c7 --- /dev/null +++ b/src/lerobot/cameras/reachy2_camera/configuration_reachy2_camera.py @@ -0,0 +1,80 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..configs import CameraConfig, ColorMode + +__all__ = ["CameraConfig", "ColorMode", "Reachy2CameraConfig"] + + +@CameraConfig.register_subclass("reachy2_camera") +@dataclass +class Reachy2CameraConfig(CameraConfig): + """Configuration class for Reachy 2 camera devices. + + This class provides configuration options for Reachy 2 cameras, + supporting both the teleop and depth cameras. It includes settings + for resolution, frame rate, color mode, and the selection of the cameras. + + Example configurations: + ```python + # Basic configurations + Reachy2CameraConfig( + name="teleop", + image_type="left", + ip_address="192.168.0.200", # IP address of the robot + fps=15, + width=640, + height=480, + color_mode=ColorMode.RGB, + ) # Left teleop camera, 640x480 @ 15FPS + ``` + + Attributes: + name: Name of the camera device. Can be "teleop" or "depth". + image_type: Type of image stream. For "teleop" camera, can be "left" or "right". + For "depth" camera, can be "rgb" or "depth". (depth is not supported yet) + fps: Requested frames per second for the color stream. + width: Requested frame width in pixels for the color stream. + height: Requested frame height in pixels for the color stream. + color_mode: Color mode for image output (RGB or BGR). Defaults to RGB. + ip_address: IP address of the robot. Defaults to "localhost". + port: Port number for the camera server. Defaults to 50065. + + Note: + - Only 3-channel color output (RGB/BGR) is currently supported. + """ + + name: str + image_type: str + color_mode: ColorMode = ColorMode.RGB + ip_address: str | None = "localhost" + port: int = 50065 + # use_depth: bool = False + + def __post_init__(self) -> None: + if self.name not in ["teleop", "depth"]: + raise ValueError(f"`name` is expected to be 'teleop' or 'depth', but {self.name} is provided.") + if (self.name == "teleop" and self.image_type not in ["left", "right"]) or ( + self.name == "depth" and self.image_type not in ["rgb", "depth"] + ): + raise ValueError( + f"`image_type` is expected to be 'left' or 'right' for teleop camera, and 'rgb' or 'depth' for depth camera, but {self.image_type} is provided." + ) + + if self.color_mode not in ["rgb", "bgr"]: + raise ValueError( + f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided." + ) diff --git a/src/lerobot/cameras/reachy2_camera/reachy2_camera.py b/src/lerobot/cameras/reachy2_camera/reachy2_camera.py new file mode 100644 index 0000000000000000000000000000000000000000..9d7f6058d51b11bd0ab5a13b2b465faf6c170b1d --- /dev/null +++ b/src/lerobot/cameras/reachy2_camera/reachy2_camera.py @@ -0,0 +1,299 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides the Reachy2Camera class for capturing frames from Reachy 2 cameras using Reachy 2's CameraManager. +""" + +import logging +import os +import platform +import time +from threading import Event, Lock, Thread +from typing import Any + +from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing + +# Fix MSMF hardware transform compatibility for Windows before importing cv2 +if platform.system() == "Windows" and "OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS" not in os.environ: + os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0" +import cv2 # type: ignore # TODO: add type stubs for OpenCV +import numpy as np # type: ignore # TODO: add type stubs for numpy +from reachy2_sdk.media.camera import CameraView # type: ignore # TODO: add type stubs for reachy2_sdk +from reachy2_sdk.media.camera_manager import ( # type: ignore # TODO: add type stubs for reachy2_sdk + CameraManager, +) + +from lerobot.utils.errors import DeviceNotConnectedError + +from ..camera import Camera +from .configuration_reachy2_camera import ColorMode, Reachy2CameraConfig + +logger = logging.getLogger(__name__) + + +class Reachy2Camera(Camera): + """ + Manages Reachy 2 camera using Reachy 2 CameraManager. + + This class provides a high-level interface to connect to, configure, and read + frames from Reachy 2 cameras. It supports both synchronous and asynchronous + frame reading. + + An Reachy2Camera instance requires a camera name (e.g., "teleop") and an image + type (e.g., "left") to be specified in the configuration. + + The camera's default settings (FPS, resolution, color mode) are used unless + overridden in the configuration. + """ + + def __init__(self, config: Reachy2CameraConfig): + """ + Initializes the Reachy2Camera instance. + + Args: + config: The configuration settings for the camera. + """ + super().__init__(config) + + self.config = config + + self.fps = config.fps + self.color_mode = config.color_mode + + self.cam_manager: CameraManager | None = None + + self.thread: Thread | None = None + self.stop_event: Event | None = None + self.frame_lock: Lock = Lock() + self.latest_frame: NDArray[Any] | None = None + self.new_frame_event: Event = Event() + + def __str__(self) -> str: + return f"{self.__class__.__name__}({self.config.name}, {self.config.image_type})" + + @property + def is_connected(self) -> bool: + """Checks if the camera is currently connected and opened.""" + if self.config.name == "teleop": + return bool( + self.cam_manager._grpc_connected and self.cam_manager.teleop if self.cam_manager else False + ) + elif self.config.name == "depth": + return bool( + self.cam_manager._grpc_connected and self.cam_manager.depth if self.cam_manager else False + ) + else: + raise ValueError(f"Invalid camera name '{self.config.name}'. Expected 'teleop' or 'depth'.") + + def connect(self, warmup: bool = True) -> None: + """ + Connects to the Reachy2 CameraManager as specified in the configuration. + """ + self.cam_manager = CameraManager(host=self.config.ip_address, port=self.config.port) + self.cam_manager.initialize_cameras() + + logger.info(f"{self} connected.") + + @staticmethod + def find_cameras(ip_address: str = "localhost", port: int = 50065) -> list[dict[str, Any]]: + """ + Detects available Reachy 2 cameras. + + Returns: + List[Dict[str, Any]]: A list of dictionaries, + where each dictionary contains 'name', 'stereo', + and the default profile properties (width, height, fps). + """ + initialized_cameras = [] + camera_manager = CameraManager(host=ip_address, port=port) + + for camera in [camera_manager.teleop, camera_manager.depth]: + if camera is None: + continue + + height, width, _, _, _, _, _ = camera.get_parameters() + + camera_info = { + "name": camera._cam_info.name, + "stereo": camera._cam_info.stereo, + "default_profile": { + "width": width, + "height": height, + "fps": 30, + }, + } + initialized_cameras.append(camera_info) + + camera_manager.disconnect() + return initialized_cameras + + def read(self, color_mode: ColorMode | None = None) -> NDArray[Any]: + """ + Reads a single frame synchronously from the camera. + + This is a blocking call. + + Args: + color_mode (Optional[ColorMode]): If specified, overrides the default + color mode (`self.color_mode`) for this read operation (e.g., + request RGB even if default is BGR). + + Returns: + np.ndarray: The captured frame as a NumPy array in the format + (height, width, channels), using the specified or default + color mode and applying any configured rotation. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + start_time = time.perf_counter() + + frame: NDArray[Any] = np.empty((0, 0, 3), dtype=np.uint8) + + if self.cam_manager is None: + raise DeviceNotConnectedError(f"{self} is not connected.") + else: + if self.config.name == "teleop" and hasattr(self.cam_manager, "teleop"): + if self.config.image_type == "left": + frame = self.cam_manager.teleop.get_frame(CameraView.LEFT, size=(640, 480))[0] + elif self.config.image_type == "right": + frame = self.cam_manager.teleop.get_frame(CameraView.RIGHT, size=(640, 480))[0] + elif self.config.name == "depth" and hasattr(self.cam_manager, "depth"): + if self.config.image_type == "depth": + frame = self.cam_manager.depth.get_depth_frame()[0] + elif self.config.image_type == "rgb": + frame = self.cam_manager.depth.get_frame(size=(640, 480))[0] + + if frame is None: + return np.empty((0, 0, 3), dtype=np.uint8) + + if self.config.color_mode == "rgb": + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + read_duration_ms = (time.perf_counter() - start_time) * 1e3 + logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") + + return frame + + def _read_loop(self) -> None: + """ + Internal loop run by the background thread for asynchronous reading. + + On each iteration: + 1. Reads a color frame + 2. Stores result in latest_frame (thread-safe) + 3. Sets new_frame_event to notify listeners + + Stops on DeviceNotConnectedError, logs other errors and continues. + """ + if self.stop_event is None: + raise RuntimeError(f"{self}: stop_event is not initialized before starting read loop.") + + while not self.stop_event.is_set(): + try: + color_image = self.read() + + with self.frame_lock: + self.latest_frame = color_image + self.new_frame_event.set() + + except DeviceNotConnectedError: + break + except Exception as e: + logger.warning(f"Error reading frame in background thread for {self}: {e}") + + def _start_read_thread(self) -> None: + """Starts or restarts the background read thread if it's not running.""" + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=0.1) + if self.stop_event is not None: + self.stop_event.set() + + self.stop_event = Event() + self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop") + self.thread.daemon = True + self.thread.start() + + def _stop_read_thread(self) -> None: + """Signals the background read thread to stop and waits for it to join.""" + if self.stop_event is not None: + self.stop_event.set() + + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=2.0) + + self.thread = None + self.stop_event = None + + def async_read(self, timeout_ms: float = 200) -> NDArray[Any]: + """ + Reads the latest available frame asynchronously. + + This method retrieves the most recent frame captured by the background + read thread. It does not block waiting for the camera hardware directly, + but may wait up to timeout_ms for the background thread to provide a frame. + + Args: + timeout_ms (float): Maximum time in milliseconds to wait for a frame + to become available. Defaults to 200ms (0.2 seconds). + + Returns: + np.ndarray: The latest captured frame as a NumPy array in the format + (height, width, channels), processed according to configuration. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + TimeoutError: If no frame becomes available within the specified timeout. + RuntimeError: If an unexpected error occurs. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + if self.thread is None or not self.thread.is_alive(): + self._start_read_thread() + + if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0): + thread_alive = self.thread is not None and self.thread.is_alive() + raise TimeoutError( + f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. " + f"Read thread alive: {thread_alive}." + ) + + with self.frame_lock: + frame = self.latest_frame + self.new_frame_event.clear() + + if frame is None: + raise RuntimeError(f"Internal error: Event set but no frame available for {self}.") + + return frame + + def disconnect(self) -> None: + """ + Stops the background read thread (if running). + + Raises: + DeviceNotConnectedError: If the camera is already disconnected. + """ + if not self.is_connected and self.thread is None: + raise DeviceNotConnectedError(f"{self} not connected.") + + if self.thread is not None: + self._stop_read_thread() + + if self.cam_manager is not None: + self.cam_manager.disconnect() + + logger.info(f"{self} disconnected.") diff --git a/src/lerobot/cameras/realsense/__init__.py b/src/lerobot/cameras/realsense/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5184a99bc33c17d0c759dc5a561ce800f5a278 --- /dev/null +++ b/src/lerobot/cameras/realsense/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .camera_realsense import RealSenseCamera +from .configuration_realsense import RealSenseCameraConfig diff --git a/src/lerobot/cameras/realsense/camera_realsense.py b/src/lerobot/cameras/realsense/camera_realsense.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b8c3164c7f9b44bd2ed24a73ddfdf7d1961d6d --- /dev/null +++ b/src/lerobot/cameras/realsense/camera_realsense.py @@ -0,0 +1,568 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides the RealSenseCamera class for capturing frames from Intel RealSense cameras. +""" + +import logging +import time +from threading import Event, Lock, Thread +from typing import Any + +import cv2 # type: ignore # TODO: add type stubs for OpenCV +import numpy as np # type: ignore # TODO: add type stubs for numpy +from numpy.typing import NDArray # type: ignore # TODO: add type stubs for numpy.typing + +try: + import pyrealsense2 as rs # type: ignore # TODO: add type stubs for pyrealsense2 +except Exception as e: + logging.info(f"Could not import realsense: {e}") + +from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError + +from ..camera import Camera +from ..configs import ColorMode +from ..utils import get_cv2_rotation +from .configuration_realsense import RealSenseCameraConfig + +logger = logging.getLogger(__name__) + + +class RealSenseCamera(Camera): + """ + Manages interactions with Intel RealSense cameras for frame and depth recording. + + This class provides an interface similar to `OpenCVCamera` but tailored for + RealSense devices, leveraging the `pyrealsense2` library. It uses the camera's + unique serial number for identification, offering more stability than device + indices, especially on Linux. It also supports capturing depth maps alongside + color frames. + + Use the provided utility script to find available camera indices and default profiles: + ```bash + lerobot-find-cameras realsense + ``` + + A `RealSenseCamera` instance requires a configuration object specifying the + camera's serial number or a unique device name. If using the name, ensure only + one camera with that name is connected. + + The camera's default settings (FPS, resolution, color mode) from the stream + profile are used unless overridden in the configuration. + + Example: + ```python + from lerobot.cameras.realsense import RealSenseCamera, RealSenseCameraConfig + from lerobot.cameras import ColorMode, Cv2Rotation + + # Basic usage with serial number + config = RealSenseCameraConfig(serial_number_or_name="0123456789") # Replace with actual SN + camera = RealSenseCamera(config) + camera.connect() + + # Read 1 frame synchronously + color_image = camera.read() + print(color_image.shape) + + # Read 1 frame asynchronously + async_image = camera.async_read() + + # When done, properly disconnect the camera using + camera.disconnect() + + # Example with depth capture and custom settings + custom_config = RealSenseCameraConfig( + serial_number_or_name="0123456789", # Replace with actual SN + fps=30, + width=1280, + height=720, + color_mode=ColorMode.BGR, # Request BGR output + rotation=Cv2Rotation.NO_ROTATION, + use_depth=True + ) + depth_camera = RealSenseCamera(custom_config) + depth_camera.connect() + + # Read 1 depth frame + depth_map = depth_camera.read_depth() + + # Example using a unique camera name + name_config = RealSenseCameraConfig(serial_number_or_name="Intel RealSense D435") # If unique + name_camera = RealSenseCamera(name_config) + # ... connect, read, disconnect ... + ``` + """ + + def __init__(self, config: RealSenseCameraConfig): + """ + Initializes the RealSenseCamera instance. + + Args: + config: The configuration settings for the camera. + """ + + super().__init__(config) + + self.config = config + + if config.serial_number_or_name.isdigit(): + self.serial_number = config.serial_number_or_name + else: + self.serial_number = self._find_serial_number_from_name(config.serial_number_or_name) + + self.fps = config.fps + self.color_mode = config.color_mode + self.use_depth = config.use_depth + self.warmup_s = config.warmup_s + + self.rs_pipeline: rs.pipeline | None = None + self.rs_profile: rs.pipeline_profile | None = None + + self.thread: Thread | None = None + self.stop_event: Event | None = None + self.frame_lock: Lock = Lock() + self.latest_frame: NDArray[Any] | None = None + self.new_frame_event: Event = Event() + + self.rotation: int | None = get_cv2_rotation(config.rotation) + + if self.height and self.width: + self.capture_width, self.capture_height = self.width, self.height + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: + self.capture_width, self.capture_height = self.height, self.width + + def __str__(self) -> str: + return f"{self.__class__.__name__}({self.serial_number})" + + @property + def is_connected(self) -> bool: + """Checks if the camera pipeline is started and streams are active.""" + return self.rs_pipeline is not None and self.rs_profile is not None + + def connect(self, warmup: bool = True) -> None: + """ + Connects to the RealSense camera specified in the configuration. + + Initializes the RealSense pipeline, configures the required streams (color + and optionally depth), starts the pipeline, and validates the actual stream settings. + + Raises: + DeviceAlreadyConnectedError: If the camera is already connected. + ValueError: If the configuration is invalid (e.g., missing serial/name, name not unique). + ConnectionError: If the camera is found but fails to start the pipeline or no RealSense devices are detected at all. + RuntimeError: If the pipeline starts but fails to apply requested settings. + """ + if self.is_connected: + raise DeviceAlreadyConnectedError(f"{self} is already connected.") + + self.rs_pipeline = rs.pipeline() + rs_config = rs.config() + self._configure_rs_pipeline_config(rs_config) + + try: + self.rs_profile = self.rs_pipeline.start(rs_config) + except RuntimeError as e: + self.rs_profile = None + self.rs_pipeline = None + raise ConnectionError( + f"Failed to open {self}.Run `lerobot-find-cameras realsense` to find available cameras." + ) from e + + self._configure_capture_settings() + + if warmup: + time.sleep( + 1 + ) # NOTE(Steven): RS cameras need a bit of time to warm up before the first read. If we don't wait, the first read from the warmup will raise. + start_time = time.time() + while time.time() - start_time < self.warmup_s: + self.read() + time.sleep(0.1) + + logger.info(f"{self} connected.") + + @staticmethod + def find_cameras() -> list[dict[str, Any]]: + """ + Detects available Intel RealSense cameras connected to the system. + + Returns: + List[Dict[str, Any]]: A list of dictionaries, + where each dictionary contains 'type', 'id' (serial number), 'name', + firmware version, USB type, and other available specs, and the default profile properties (width, height, fps, format). + + Raises: + OSError: If pyrealsense2 is not installed. + ImportError: If pyrealsense2 is not installed. + """ + found_cameras_info = [] + context = rs.context() + devices = context.query_devices() + + for device in devices: + camera_info = { + "name": device.get_info(rs.camera_info.name), + "type": "RealSense", + "id": device.get_info(rs.camera_info.serial_number), + "firmware_version": device.get_info(rs.camera_info.firmware_version), + "usb_type_descriptor": device.get_info(rs.camera_info.usb_type_descriptor), + "physical_port": device.get_info(rs.camera_info.physical_port), + "product_id": device.get_info(rs.camera_info.product_id), + "product_line": device.get_info(rs.camera_info.product_line), + } + + # Get stream profiles for each sensor + sensors = device.query_sensors() + for sensor in sensors: + profiles = sensor.get_stream_profiles() + + for profile in profiles: + if profile.is_video_stream_profile() and profile.is_default(): + vprofile = profile.as_video_stream_profile() + stream_info = { + "stream_type": vprofile.stream_name(), + "format": vprofile.format().name, + "width": vprofile.width(), + "height": vprofile.height(), + "fps": vprofile.fps(), + } + camera_info["default_stream_profile"] = stream_info + + found_cameras_info.append(camera_info) + + return found_cameras_info + + def _find_serial_number_from_name(self, name: str) -> str: + """Finds the serial number for a given unique camera name.""" + camera_infos = self.find_cameras() + found_devices = [cam for cam in camera_infos if str(cam["name"]) == name] + + if not found_devices: + available_names = [cam["name"] for cam in camera_infos] + raise ValueError( + f"No RealSense camera found with name '{name}'. Available camera names: {available_names}" + ) + + if len(found_devices) > 1: + serial_numbers = [dev["serial_number"] for dev in found_devices] + raise ValueError( + f"Multiple RealSense cameras found with name '{name}'. " + f"Please use a unique serial number instead. Found SNs: {serial_numbers}" + ) + + serial_number = str(found_devices[0]["serial_number"]) + return serial_number + + def _configure_rs_pipeline_config(self, rs_config: Any) -> None: + """Creates and configures the RealSense pipeline configuration object.""" + rs.config.enable_device(rs_config, self.serial_number) + + if self.width and self.height and self.fps: + rs_config.enable_stream( + rs.stream.color, self.capture_width, self.capture_height, rs.format.rgb8, self.fps + ) + if self.use_depth: + rs_config.enable_stream( + rs.stream.depth, self.capture_width, self.capture_height, rs.format.z16, self.fps + ) + else: + rs_config.enable_stream(rs.stream.color) + if self.use_depth: + rs_config.enable_stream(rs.stream.depth) + + def _configure_capture_settings(self) -> None: + """Sets fps, width, and height from device stream if not already configured. + + Uses the color stream profile to update unset attributes. Handles rotation by + swapping width/height when needed. Original capture dimensions are always stored. + + Raises: + DeviceNotConnectedError: If device is not connected. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"Cannot validate settings for {self} as it is not connected.") + + if self.rs_profile is None: + raise RuntimeError(f"{self}: rs_profile must be initialized before use.") + + stream = self.rs_profile.get_stream(rs.stream.color).as_video_stream_profile() + + if self.fps is None: + self.fps = stream.fps() + + if self.width is None or self.height is None: + actual_width = int(round(stream.width())) + actual_height = int(round(stream.height())) + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: + self.width, self.height = actual_height, actual_width + self.capture_width, self.capture_height = actual_width, actual_height + else: + self.width, self.height = actual_width, actual_height + self.capture_width, self.capture_height = actual_width, actual_height + + def read_depth(self, timeout_ms: int = 200) -> NDArray[Any]: + """ + Reads a single frame (depth) synchronously from the camera. + + This is a blocking call. It waits for a coherent set of frames (depth) + from the camera hardware via the RealSense pipeline. + + Args: + timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms. + + Returns: + np.ndarray: The depth map as a NumPy array (height, width) + of type `np.uint16` (raw depth values in millimeters) and rotation. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + RuntimeError: If reading frames from the pipeline fails or frames are invalid. + """ + + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + if not self.use_depth: + raise RuntimeError( + f"Failed to capture depth frame '.read_depth()'. Depth stream is not enabled for {self}." + ) + + start_time = time.perf_counter() + + if self.rs_pipeline is None: + raise RuntimeError(f"{self}: rs_pipeline must be initialized before use.") + + ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms) + + if not ret or frame is None: + raise RuntimeError(f"{self} read_depth failed (status={ret}).") + + depth_frame = frame.get_depth_frame() + depth_map = np.asanyarray(depth_frame.get_data()) + + depth_map_processed = self._postprocess_image(depth_map, depth_frame=True) + + read_duration_ms = (time.perf_counter() - start_time) * 1e3 + logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") + + return depth_map_processed + + def read(self, color_mode: ColorMode | None = None, timeout_ms: int = 200) -> NDArray[Any]: + """ + Reads a single frame (color) synchronously from the camera. + + This is a blocking call. It waits for a coherent set of frames (color) + from the camera hardware via the RealSense pipeline. + + Args: + timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms. + + Returns: + np.ndarray: The captured color frame as a NumPy array + (height, width, channels), processed according to `color_mode` and rotation. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + RuntimeError: If reading frames from the pipeline fails or frames are invalid. + ValueError: If an invalid `color_mode` is requested. + """ + + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + start_time = time.perf_counter() + + if self.rs_pipeline is None: + raise RuntimeError(f"{self}: rs_pipeline must be initialized before use.") + + ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms) + + if not ret or frame is None: + raise RuntimeError(f"{self} read failed (status={ret}).") + + color_frame = frame.get_color_frame() + color_image_raw = np.asanyarray(color_frame.get_data()) + + color_image_processed = self._postprocess_image(color_image_raw, color_mode) + + read_duration_ms = (time.perf_counter() - start_time) * 1e3 + logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") + + return color_image_processed + + def _postprocess_image( + self, image: NDArray[Any], color_mode: ColorMode | None = None, depth_frame: bool = False + ) -> NDArray[Any]: + """ + Applies color conversion, dimension validation, and rotation to a raw color frame. + + Args: + image (np.ndarray): The raw image frame (expected RGB format from RealSense). + color_mode (Optional[ColorMode]): The target color mode (RGB or BGR). If None, + uses the instance's default `self.color_mode`. + + Returns: + np.ndarray: The processed image frame according to `self.color_mode` and `self.rotation`. + + Raises: + ValueError: If the requested `color_mode` is invalid. + RuntimeError: If the raw frame dimensions do not match the configured + `width` and `height`. + """ + + if color_mode and color_mode not in (ColorMode.RGB, ColorMode.BGR): + raise ValueError( + f"Invalid requested color mode '{color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}." + ) + + if depth_frame: + h, w = image.shape + else: + h, w, c = image.shape + + if c != 3: + raise RuntimeError(f"{self} frame channels={c} do not match expected 3 channels (RGB/BGR).") + + if h != self.capture_height or w != self.capture_width: + raise RuntimeError( + f"{self} frame width={w} or height={h} do not match configured width={self.capture_width} or height={self.capture_height}." + ) + + processed_image = image + if self.color_mode == ColorMode.BGR: + processed_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + + if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]: + processed_image = cv2.rotate(processed_image, self.rotation) + + return processed_image + + def _read_loop(self) -> None: + """ + Internal loop run by the background thread for asynchronous reading. + + On each iteration: + 1. Reads a color frame with 500ms timeout + 2. Stores result in latest_frame (thread-safe) + 3. Sets new_frame_event to notify listeners + + Stops on DeviceNotConnectedError, logs other errors and continues. + """ + if self.stop_event is None: + raise RuntimeError(f"{self}: stop_event is not initialized before starting read loop.") + + while not self.stop_event.is_set(): + try: + color_image = self.read(timeout_ms=500) + + with self.frame_lock: + self.latest_frame = color_image + self.new_frame_event.set() + + except DeviceNotConnectedError: + break + except Exception as e: + logger.warning(f"Error reading frame in background thread for {self}: {e}") + + def _start_read_thread(self) -> None: + """Starts or restarts the background read thread if it's not running.""" + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=0.1) + if self.stop_event is not None: + self.stop_event.set() + + self.stop_event = Event() + self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop") + self.thread.daemon = True + self.thread.start() + + def _stop_read_thread(self) -> None: + """Signals the background read thread to stop and waits for it to join.""" + if self.stop_event is not None: + self.stop_event.set() + + if self.thread is not None and self.thread.is_alive(): + self.thread.join(timeout=2.0) + + self.thread = None + self.stop_event = None + + # NOTE(Steven): Missing implementation for depth for now + def async_read(self, timeout_ms: float = 200) -> NDArray[Any]: + """ + Reads the latest available frame data (color) asynchronously. + + This method retrieves the most recent color frame captured by the background + read thread. It does not block waiting for the camera hardware directly, + but may wait up to timeout_ms for the background thread to provide a frame. + + Args: + timeout_ms (float): Maximum time in milliseconds to wait for a frame + to become available. Defaults to 200ms (0.2 seconds). + + Returns: + np.ndarray: + The latest captured frame data (color image), processed according to configuration. + + Raises: + DeviceNotConnectedError: If the camera is not connected. + TimeoutError: If no frame data becomes available within the specified timeout. + RuntimeError: If the background thread died unexpectedly or another error occurs. + """ + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + if self.thread is None or not self.thread.is_alive(): + self._start_read_thread() + + if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0): + thread_alive = self.thread is not None and self.thread.is_alive() + raise TimeoutError( + f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. " + f"Read thread alive: {thread_alive}." + ) + + with self.frame_lock: + frame = self.latest_frame + self.new_frame_event.clear() + + if frame is None: + raise RuntimeError(f"Internal error: Event set but no frame available for {self}.") + + return frame + + def disconnect(self) -> None: + """ + Disconnects from the camera, stops the pipeline, and cleans up resources. + + Stops the background read thread (if running) and stops the RealSense pipeline. + + Raises: + DeviceNotConnectedError: If the camera is already disconnected (pipeline not running). + """ + + if not self.is_connected and self.thread is None: + raise DeviceNotConnectedError( + f"Attempted to disconnect {self}, but it appears already disconnected." + ) + + if self.thread is not None: + self._stop_read_thread() + + if self.rs_pipeline is not None: + self.rs_pipeline.stop() + self.rs_pipeline = None + self.rs_profile = None + + logger.info(f"{self} disconnected.") diff --git a/src/lerobot/cameras/realsense/configuration_realsense.py b/src/lerobot/cameras/realsense/configuration_realsense.py new file mode 100644 index 0000000000000000000000000000000000000000..e981b35341e004a528c8bfeac9ef2c0f0542fdd4 --- /dev/null +++ b/src/lerobot/cameras/realsense/configuration_realsense.py @@ -0,0 +1,82 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..configs import CameraConfig, ColorMode, Cv2Rotation + + +@CameraConfig.register_subclass("intelrealsense") +@dataclass +class RealSenseCameraConfig(CameraConfig): + """Configuration class for Intel RealSense cameras. + + This class provides specialized configuration options for Intel RealSense cameras, + including support for depth sensing and device identification via serial number or name. + + Example configurations for Intel RealSense D405: + ```python + # Basic configurations + RealSenseCameraConfig("0123456789", 30, 1280, 720) # 1280x720 @ 30FPS + RealSenseCameraConfig("0123456789", 60, 640, 480) # 640x480 @ 60FPS + + # Advanced configurations + RealSenseCameraConfig("0123456789", 30, 640, 480, use_depth=True) # With depth sensing + RealSenseCameraConfig("0123456789", 30, 640, 480, rotation=Cv2Rotation.ROTATE_90) # With 90° rotation + ``` + + Attributes: + fps: Requested frames per second for the color stream. + width: Requested frame width in pixels for the color stream. + height: Requested frame height in pixels for the color stream. + serial_number_or_name: Unique serial number or human-readable name to identify the camera. + color_mode: Color mode for image output (RGB or BGR). Defaults to RGB. + use_depth: Whether to enable depth stream. Defaults to False. + rotation: Image rotation setting (0°, 90°, 180°, or 270°). Defaults to no rotation. + warmup_s: Time reading frames before returning from connect (in seconds) + + Note: + - Either name or serial_number must be specified. + - Depth stream configuration (if enabled) will use the same FPS as the color stream. + - The actual resolution and FPS may be adjusted by the camera to the nearest supported mode. + - For `fps`, `width` and `height`, either all of them need to be set, or none of them. + """ + + serial_number_or_name: str + color_mode: ColorMode = ColorMode.RGB + use_depth: bool = False + rotation: Cv2Rotation = Cv2Rotation.NO_ROTATION + warmup_s: int = 1 + + def __post_init__(self) -> None: + if self.color_mode not in (ColorMode.RGB, ColorMode.BGR): + raise ValueError( + f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided." + ) + + if self.rotation not in ( + Cv2Rotation.NO_ROTATION, + Cv2Rotation.ROTATE_90, + Cv2Rotation.ROTATE_180, + Cv2Rotation.ROTATE_270, + ): + raise ValueError( + f"`rotation` is expected to be in {(Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270)}, but {self.rotation} is provided." + ) + + values = (self.fps, self.width, self.height) + if any(v is not None for v in values) and any(v is None for v in values): + raise ValueError( + "For `fps`, `width` and `height`, either all of them need to be set, or none of them." + ) diff --git a/src/lerobot/cameras/utils.py b/src/lerobot/cameras/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0b64c3cdb6f86362cf4b9b11b37370a0c75b3b9a --- /dev/null +++ b/src/lerobot/cameras/utils.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import platform +from typing import cast + +from lerobot.utils.import_utils import make_device_from_device_class + +from .camera import Camera +from .configs import CameraConfig, Cv2Rotation + + +def make_cameras_from_configs(camera_configs: dict[str, CameraConfig]) -> dict[str, Camera]: + cameras: dict[str, Camera] = {} + + for key, cfg in camera_configs.items(): + # TODO(Steven): Consider just using the make_device_from_device_class for all types + if cfg.type == "opencv": + from .opencv import OpenCVCamera + + cameras[key] = OpenCVCamera(cfg) + + elif cfg.type == "intelrealsense": + from .realsense.camera_realsense import RealSenseCamera + + cameras[key] = RealSenseCamera(cfg) + + elif cfg.type == "reachy2_camera": + from .reachy2_camera.reachy2_camera import Reachy2Camera + + cameras[key] = Reachy2Camera(cfg) + + else: + try: + cameras[key] = cast(Camera, make_device_from_device_class(cfg)) + except Exception as e: + raise ValueError(f"Error creating camera {key} with config {cfg}: {e}") from e + + return cameras + + +def get_cv2_rotation(rotation: Cv2Rotation) -> int | None: + import cv2 # type: ignore # TODO: add type stubs for OpenCV + + if rotation == Cv2Rotation.ROTATE_90: + return int(cv2.ROTATE_90_CLOCKWISE) + elif rotation == Cv2Rotation.ROTATE_180: + return int(cv2.ROTATE_180) + elif rotation == Cv2Rotation.ROTATE_270: + return int(cv2.ROTATE_90_COUNTERCLOCKWISE) + else: + return None + + +def get_cv2_backend() -> int: + import cv2 + + if platform.system() == "Windows": + return int(cv2.CAP_MSMF) # Use MSMF for Windows instead of AVFOUNDATION + # elif platform.system() == "Darwin": # macOS + # return cv2.CAP_AVFOUNDATION + else: # Linux and others + return int(cv2.CAP_ANY) diff --git a/src/lerobot/configs/default.py b/src/lerobot/configs/default.py new file mode 100644 index 0000000000000000000000000000000000000000..6f2f5b11ba01f7f9242be92b11c2b8a1259b5e42 --- /dev/null +++ b/src/lerobot/configs/default.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field + +from lerobot.datasets.transforms import ImageTransformsConfig +from lerobot.datasets.video_utils import get_safe_default_codec + + +@dataclass +class DatasetConfig: + # You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data + # keys common between the datasets are kept. Each dataset gets and additional transform that inserts the + # "dataset_index" into the returned item. The index mapping is made according to the order in which the + # datasets are provided. + repo_id: str + # Root directory where the dataset will be stored (e.g. 'dataset/path'). + root: str | None = None + episodes: list[int] | None = None + image_transforms: ImageTransformsConfig = field(default_factory=ImageTransformsConfig) + revision: str | None = None + use_imagenet_stats: bool = True + video_backend: str = field(default_factory=get_safe_default_codec) + streaming: bool = False + + +@dataclass +class WandBConfig: + enable: bool = False + # Set to true to disable saving an artifact despite training.save_checkpoint=True + disable_artifact: bool = False + project: str = "lerobot" + entity: str | None = None + notes: str | None = None + run_id: str | None = None + mode: str | None = None # Allowed values: 'online', 'offline' 'disabled'. Defaults to 'online' + + +@dataclass +class EvalConfig: + n_episodes: int = 50 + # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv. + batch_size: int = 50 + # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). + use_async_envs: bool = False + + def __post_init__(self) -> None: + if self.batch_size > self.n_episodes: + raise ValueError( + "The eval batch size is greater than the number of eval episodes " + f"({self.batch_size} > {self.n_episodes}). As a result, {self.batch_size} " + f"eval environments will be instantiated, but only {self.n_episodes} will be used. " + "This might significantly slow down evaluation. To fix this, you should update your command " + f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={self.batch_size}`), " + f"or lower the batch size (e.g. `eval.batch_size={self.n_episodes}`)." + ) diff --git a/src/lerobot/configs/eval.py b/src/lerobot/configs/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..1a79e96f696631443b7242a3adcf69cd57cb6717 --- /dev/null +++ b/src/lerobot/configs/eval.py @@ -0,0 +1,73 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime as dt +from dataclasses import dataclass, field +from logging import getLogger +from pathlib import Path + +from lerobot import envs, policies # noqa: F401 +from lerobot.configs import parser +from lerobot.configs.default import EvalConfig +from lerobot.configs.policies import PreTrainedConfig + +logger = getLogger(__name__) + + +@dataclass +class EvalPipelineConfig: + # Either the repo ID of a model hosted on the Hub or a path to a directory containing weights + # saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch + # (useful for debugging). This argument is mutually exclusive with `--config`. + env: envs.EnvConfig + eval: EvalConfig = field(default_factory=EvalConfig) + policy: PreTrainedConfig | None = None + output_dir: Path | None = None + job_name: str | None = None + seed: int | None = 1000 + # Rename map for the observation to override the image and state keys + rename_map: dict[str, str] = field(default_factory=dict) + + def __post_init__(self) -> None: + # HACK: We parse again the cli args here to get the pretrained path if there was one. + policy_path = parser.get_path_arg("policy") + if policy_path: + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.pretrained_path = Path(policy_path) + + else: + logger.warning( + "No pretrained path was provided, evaluated policy will be built from scratch (random weights)." + ) + + if not self.job_name: + if self.env is None: + self.job_name = f"{self.policy.type if self.policy is not None else 'scratch'}" + else: + self.job_name = ( + f"{self.env.type}_{self.policy.type if self.policy is not None else 'scratch'}" + ) + + logger.warning(f"No job name provided, using '{self.job_name}' as job name.") + + if not self.output_dir: + now = dt.datetime.now() + eval_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" + self.output_dir = Path("outputs/eval") / eval_dir + + @classmethod + def __get_path_fields__(cls) -> list[str]: + """This enables the parser to load config from the policy using `--policy.path=local/dir`""" + return ["policy"] diff --git a/src/lerobot/configs/parser.py b/src/lerobot/configs/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..df3ce9e626b678e66e427f74156844a4a9e85349 --- /dev/null +++ b/src/lerobot/configs/parser.py @@ -0,0 +1,238 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import inspect +import pkgutil +import sys +from argparse import ArgumentError +from collections.abc import Callable, Iterable, Sequence +from functools import wraps +from pathlib import Path +from pkgutil import ModuleInfo +from types import ModuleType +from typing import Any, TypeVar, cast + +import draccus + +from lerobot.utils.utils import has_method + +F = TypeVar("F", bound=Callable[..., object]) + +PATH_KEY = "path" +PLUGIN_DISCOVERY_SUFFIX = "discover_packages_path" + + +def get_cli_overrides(field_name: str, args: Sequence[str] | None = None) -> list[str] | None: + """Parses arguments from cli at a given nested attribute level. + + For example, supposing the main script was called with: + python myscript.py --arg1=1 --arg2.subarg1=abc --arg2.subarg2=some/path + + If called during execution of myscript.py, get_cli_overrides("arg2") will return: + ["--subarg1=abc" "--subarg2=some/path"] + """ + if args is None: + args = sys.argv[1:] + attr_level_args = [] + detect_string = f"--{field_name}." + exclude_strings = (f"--{field_name}.{draccus.CHOICE_TYPE_KEY}=", f"--{field_name}.{PATH_KEY}=") + for arg in args: + if arg.startswith(detect_string) and not arg.startswith(exclude_strings): + denested_arg = f"--{arg.removeprefix(detect_string)}" + attr_level_args.append(denested_arg) + + return attr_level_args + + +def parse_arg(arg_name: str, args: Sequence[str] | None = None) -> str | None: + if args is None: + args = sys.argv[1:] + prefix = f"--{arg_name}=" + for arg in args: + if arg.startswith(prefix): + return arg[len(prefix) :] + return None + + +def parse_plugin_args(plugin_arg_suffix: str, args: Sequence[str]) -> dict[str, str]: + """Parse plugin-related arguments from command-line arguments. + + This function extracts arguments from command-line arguments that match a specified suffix pattern. + It processes arguments in the format '--key=value' and returns them as a dictionary. + + Args: + plugin_arg_suffix (str): The suffix to identify plugin-related arguments. + cli_args (Sequence[str]): A sequence of command-line arguments to parse. + + Returns: + dict: A dictionary containing the parsed plugin arguments where: + - Keys are the argument names (with '--' prefix removed if present) + - Values are the corresponding argument values + + Example: + >>> args = ["--env.discover_packages_path=my_package", "--other_arg=value"] + >>> parse_plugin_args("discover_packages_path", args) + {'env.discover_packages_path': 'my_package'} + """ + plugin_args = {} + for arg in args: + if "=" in arg and plugin_arg_suffix in arg: + key, value = arg.split("=", 1) + # Remove leading '--' if present + if key.startswith("--"): + key = key[2:] + plugin_args[key] = value + return plugin_args + + +class PluginLoadError(Exception): + """Raised when a plugin fails to load.""" + + +def load_plugin(plugin_path: str) -> None: + """Load and initialize a plugin from a given Python package path. + + This function attempts to load a plugin by importing its package and any submodules. + Plugin registration is expected to happen during package initialization, i.e. when + the package is imported the gym environment should be registered and the config classes + registered with their parents using the `register_subclass` decorator. + + Args: + plugin_path (str): The Python package path to the plugin (e.g. "mypackage.plugins.myplugin") + + Raises: + PluginLoadError: If the plugin cannot be loaded due to import errors or if the package path is invalid. + + Examples: + >>> load_plugin("external_plugin.core") # Loads plugin from external package + + Notes: + - The plugin package should handle its own registration during import + - All submodules in the plugin package will be imported + - Implementation follows the plugin discovery pattern from Python packaging guidelines + + See Also: + https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/ + """ + try: + package_module = importlib.import_module(plugin_path, __package__) + except (ImportError, ModuleNotFoundError) as e: + raise PluginLoadError( + f"Failed to load plugin '{plugin_path}'. Verify the path and installation: {str(e)}" + ) from e + + def iter_namespace(ns_pkg: ModuleType) -> Iterable[ModuleInfo]: + return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".") + + try: + for _finder, pkg_name, _ispkg in iter_namespace(package_module): + importlib.import_module(pkg_name) + except ImportError as e: + raise PluginLoadError( + f"Failed to load plugin '{plugin_path}'. Verify the path and installation: {str(e)}" + ) from e + + +def get_path_arg(field_name: str, args: Sequence[str] | None = None) -> str | None: + return parse_arg(f"{field_name}.{PATH_KEY}", args) + + +def get_type_arg(field_name: str, args: Sequence[str] | None = None) -> str | None: + return parse_arg(f"{field_name}.{draccus.CHOICE_TYPE_KEY}", args) + + +def filter_arg(field_to_filter: str, args: Sequence[str] | None = None) -> list[str]: + if args is None: + return [] + return [arg for arg in args if not arg.startswith(f"--{field_to_filter}=")] + + +def filter_path_args(fields_to_filter: str | list[str], args: Sequence[str] | None = None) -> list[str]: + """ + Filters command-line arguments related to fields with specific path arguments. + + Args: + fields_to_filter (str | list[str]): A single str or a list of str whose arguments need to be filtered. + args (Sequence[str] | None): The sequence of command-line arguments to be filtered. + Defaults to None. + + Returns: + list[str]: A filtered list of arguments, with arguments related to the specified + fields removed. + + Raises: + ArgumentError: If both a path argument (e.g., `--field_name.path`) and a type + argument (e.g., `--field_name.type`) are specified for the same field. + """ + if isinstance(fields_to_filter, str): + fields_to_filter = [fields_to_filter] + + filtered_args = [] if args is None else list(args) + + for field in fields_to_filter: + if get_path_arg(field, args): + if get_type_arg(field, args): + raise ArgumentError( + argument=None, + message=f"Cannot specify both --{field}.{PATH_KEY} and --{field}.{draccus.CHOICE_TYPE_KEY}", + ) + filtered_args = [arg for arg in filtered_args if not arg.startswith(f"--{field}.")] + + return filtered_args + + +def wrap(config_path: Path | None = None) -> Callable[[F], F]: + """ + HACK: Similar to draccus.wrap but does three additional things: + - Will remove '.path' arguments from CLI in order to process them later on. + - If a 'config_path' is passed and the main config class has a 'from_pretrained' method, will + initialize it from there to allow to fetch configs from the hub directly + - Will load plugins specified in the CLI arguments. These plugins will typically register + their own subclasses of config classes, so that draccus can find the right class to instantiate + from the CLI '.type' arguments + """ + + def wrapper_outer(fn: F) -> F: + @wraps(fn) + def wrapper_inner(*args: Any, **kwargs: Any) -> Any: + argspec = inspect.getfullargspec(fn) + argtype = argspec.annotations[argspec.args[0]] + if len(args) > 0 and type(args[0]) is argtype: + cfg = args[0] + args = args[1:] + else: + cli_args = sys.argv[1:] + plugin_args = parse_plugin_args(PLUGIN_DISCOVERY_SUFFIX, cli_args) + for plugin_cli_arg, plugin_path in plugin_args.items(): + try: + load_plugin(plugin_path) + except PluginLoadError as e: + # add the relevant CLI arg to the error message + raise PluginLoadError(f"{e}\nFailed plugin CLI Arg: {plugin_cli_arg}") from e + cli_args = filter_arg(plugin_cli_arg, cli_args) + config_path_cli = parse_arg("config_path", cli_args) + if has_method(argtype, "__get_path_fields__"): + path_fields = argtype.__get_path_fields__() + cli_args = filter_path_args(path_fields, cli_args) + if has_method(argtype, "from_pretrained") and config_path_cli: + cli_args = filter_arg("config_path", cli_args) + cfg = argtype.from_pretrained(config_path_cli, cli_args=cli_args) + else: + cfg = draccus.parse(config_class=argtype, config_path=config_path, args=cli_args) + response = fn(cfg, *args, **kwargs) + return response + + return cast(F, wrapper_inner) + + return cast(Callable[[F], F], wrapper_outer) diff --git a/src/lerobot/configs/policies.py b/src/lerobot/configs/policies.py new file mode 100644 index 0000000000000000000000000000000000000000..ad3a43b74ffcf20b7ddd48b655bae4237acea852 --- /dev/null +++ b/src/lerobot/configs/policies.py @@ -0,0 +1,214 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import abc +import builtins +import json +import os +import tempfile +from dataclasses import dataclass, field +from logging import getLogger +from pathlib import Path +from typing import Any, TypeVar + +import draccus +from huggingface_hub import hf_hub_download +from huggingface_hub.constants import CONFIG_NAME +from huggingface_hub.errors import HfHubHTTPError + +from lerobot.configs.types import FeatureType, PolicyFeature +from lerobot.optim.optimizers import OptimizerConfig +from lerobot.optim.schedulers import LRSchedulerConfig +from lerobot.utils.constants import ACTION, OBS_STATE +from lerobot.utils.hub import HubMixin +from lerobot.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available + +T = TypeVar("T", bound="PreTrainedConfig") +logger = getLogger(__name__) + + +@dataclass +class PreTrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC): # type: ignore[misc,name-defined] #TODO: draccus issue + """ + Base configuration class for policy models. + + Args: + n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the + current step and additional steps going back). + input_shapes: A dictionary defining the shapes of the input data for the policy. + output_shapes: A dictionary defining the shapes of the output data for the policy. + input_normalization_modes: A dictionary with key representing the modality and the value specifies the + normalization mode to apply. + output_normalization_modes: Similar dictionary as `input_normalization_modes`, but to unnormalize to + the original scale. + """ + + n_obs_steps: int = 1 + + input_features: dict[str, PolicyFeature] = field(default_factory=dict) + output_features: dict[str, PolicyFeature] = field(default_factory=dict) + + device: str | None = None # e.g. "cuda", "cuda:0", "cpu", or "mps" + # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, + # automatic gradient scaling is used. + use_amp: bool = False + + push_to_hub: bool = True # type: ignore[assignment] # TODO: use a different name to avoid override + repo_id: str | None = None + + # Upload on private repository on the Hugging Face hub. + private: bool | None = None + # Add tags to your policy on the hub. + tags: list[str] | None = None + # Add tags to your policy on the hub. + license: str | None = None + # Either the repo ID of a model hosted on the Hub or a path to a directory containing weights + # saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch. + pretrained_path: Path | None = None + + def __post_init__(self) -> None: + if not self.device or not is_torch_device_available(self.device): + auto_device = auto_select_torch_device() + logger.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.") + self.device = auto_device.type + + # Automatically deactivate AMP if necessary + if self.use_amp and not is_amp_available(self.device): + logger.warning( + f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP." + ) + self.use_amp = False + + @property + def type(self) -> str: + choice_name = self.get_choice_name(self.__class__) + if not isinstance(choice_name, str): + raise TypeError(f"Expected string from get_choice_name, got {type(choice_name)}") + return choice_name + + @property + @abc.abstractmethod + def observation_delta_indices(self) -> list | None: # type: ignore[type-arg] #TODO: No implementation + raise NotImplementedError + + @property + @abc.abstractmethod + def action_delta_indices(self) -> list | None: # type: ignore[type-arg] #TODO: No implementation + raise NotImplementedError + + @property + @abc.abstractmethod + def reward_delta_indices(self) -> list | None: # type: ignore[type-arg] #TODO: No implementation + raise NotImplementedError + + @abc.abstractmethod + def get_optimizer_preset(self) -> OptimizerConfig: + raise NotImplementedError + + @abc.abstractmethod + def get_scheduler_preset(self) -> LRSchedulerConfig | None: + raise NotImplementedError + + @abc.abstractmethod + def validate_features(self) -> None: + raise NotImplementedError + + @property + def robot_state_feature(self) -> PolicyFeature | None: + for ft_name, ft in self.input_features.items(): + if ft.type is FeatureType.STATE and ft_name == OBS_STATE: + return ft + return None + + @property + def env_state_feature(self) -> PolicyFeature | None: + for _, ft in self.input_features.items(): + if ft.type is FeatureType.ENV: + return ft + return None + + @property + def image_features(self) -> dict[str, PolicyFeature]: + return {key: ft for key, ft in self.input_features.items() if ft.type is FeatureType.VISUAL} + + @property + def action_feature(self) -> PolicyFeature | None: + for ft_name, ft in self.output_features.items(): + if ft.type is FeatureType.ACTION and ft_name == ACTION: + return ft + return None + + def _save_pretrained(self, save_directory: Path) -> None: + with open(save_directory / CONFIG_NAME, "w") as f, draccus.config_type("json"): + draccus.dump(self, f, indent=4) + + @classmethod + def from_pretrained( + cls: builtins.type[T], + pretrained_name_or_path: str | Path, + *, + force_download: bool = False, + resume_download: bool | None = None, + proxies: dict[Any, Any] | None = None, + token: str | bool | None = None, + cache_dir: str | Path | None = None, + local_files_only: bool = False, + revision: str | None = None, + **policy_kwargs: Any, + ) -> T: + model_id = str(pretrained_name_or_path) + config_file: str | None = None + if Path(model_id).is_dir(): + if CONFIG_NAME in os.listdir(model_id): + config_file = os.path.join(model_id, CONFIG_NAME) + else: + logger.error(f"{CONFIG_NAME} not found in {Path(model_id).resolve()}") + else: + try: + config_file = hf_hub_download( + repo_id=model_id, + filename=CONFIG_NAME, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + except HfHubHTTPError as e: + raise FileNotFoundError( + f"{CONFIG_NAME} not found on the HuggingFace Hub in {model_id}" + ) from e + + # HACK: Parse the original config to get the config subclass, so that we can + # apply cli overrides. + # This is very ugly, ideally we'd like to be able to do that natively with draccus + # something like --policy.path (in addition to --policy.type) + with draccus.config_type("json"): + orig_config = draccus.parse(cls, config_file, args=[]) + + if config_file is None: + raise FileNotFoundError(f"{CONFIG_NAME} not found in {model_id}") + + with open(config_file) as f: + config = json.load(f) + + config.pop("type") + with tempfile.NamedTemporaryFile("w+", delete=False, suffix=".json") as f: + json.dump(config, f) + config_file = f.name + + cli_overrides = policy_kwargs.pop("cli_overrides", []) + with draccus.config_type("json"): + return draccus.parse(orig_config.__class__, config_file, args=cli_overrides) diff --git a/src/lerobot/configs/train.py b/src/lerobot/configs/train.py new file mode 100644 index 0000000000000000000000000000000000000000..1f470eb3ae62af49b46ef053b69996171c2a1ece --- /dev/null +++ b/src/lerobot/configs/train.py @@ -0,0 +1,195 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import builtins +import datetime as dt +import os +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any + +import draccus +from huggingface_hub import hf_hub_download +from huggingface_hub.errors import HfHubHTTPError + +from lerobot import envs +from lerobot.configs import parser +from lerobot.configs.default import DatasetConfig, EvalConfig, WandBConfig +from lerobot.configs.policies import PreTrainedConfig +from lerobot.optim import OptimizerConfig +from lerobot.optim.schedulers import LRSchedulerConfig +from lerobot.utils.hub import HubMixin + +TRAIN_CONFIG_NAME = "train_config.json" + + +@dataclass +class TrainPipelineConfig(HubMixin): + dataset: DatasetConfig + env: envs.EnvConfig | None = None + policy: PreTrainedConfig | None = None + # Set `dir` to where you would like to save all of the run outputs. If you run another training session + # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. + output_dir: Path | None = None + job_name: str | None = None + # Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure + # `dir` is the directory of an existing run with at least one checkpoint in it. + # Note that when resuming a run, the default behavior is to use the configuration from the checkpoint, + # regardless of what's provided with the training command at the time of resumption. + resume: bool = False + # `seed` is used for training (eg: model initialization, dataset shuffling) + # AND for the evaluation environments. + seed: int | None = 1000 + # Number of workers for the dataloader. + num_workers: int = 4 + batch_size: int = 8 + steps: int = 100_000 + eval_freq: int = 20_000 + log_freq: int = 200 + save_checkpoint: bool = True + # Checkpoint is saved every `save_freq` training iterations and after the last training step. + save_freq: int = 20_000 + use_policy_training_preset: bool = True + optimizer: OptimizerConfig | None = None + scheduler: LRSchedulerConfig | None = None + eval: EvalConfig = field(default_factory=EvalConfig) + wandb: WandBConfig = field(default_factory=WandBConfig) + checkpoint_path: Path | None = field(init=False, default=None) + # Rename map for the observation to override the image and state keys + rename_map: dict[str, str] = field(default_factory=dict) + + def validate(self) -> None: + # HACK: We parse again the cli args here to get the pretrained paths if there was some. + policy_path = parser.get_path_arg("policy") + if policy_path: + # Only load the policy config + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.pretrained_path = Path(policy_path) + elif self.resume: + # The entire train config is already loaded, we just need to get the checkpoint dir + config_path = parser.parse_arg("config_path") + if not config_path: + raise ValueError( + f"A config_path is expected when resuming a run. Please specify path to {TRAIN_CONFIG_NAME}" + ) + + if not Path(config_path).resolve().exists(): + raise NotADirectoryError( + f"{config_path=} is expected to be a local path. " + "Resuming from the hub is not supported for now." + ) + + policy_dir = Path(config_path).parent + if self.policy is not None: + self.policy.pretrained_path = policy_dir + self.checkpoint_path = policy_dir.parent + + if self.policy is None: + raise ValueError( + "Policy is not configured. Please specify a pretrained policy with `--policy.path`." + ) + + if not self.job_name: + if self.env is None: + self.job_name = f"{self.policy.type}" + else: + self.job_name = f"{self.env.type}_{self.policy.type}" + + if not self.resume and isinstance(self.output_dir, Path) and self.output_dir.is_dir(): + raise FileExistsError( + f"Output directory {self.output_dir} already exists and resume is {self.resume}. " + f"Please change your output directory so that {self.output_dir} is not overwritten." + ) + elif not self.output_dir: + now = dt.datetime.now() + train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" + self.output_dir = Path("outputs/train") / train_dir + + if isinstance(self.dataset.repo_id, list): + raise NotImplementedError("LeRobotMultiDataset is not currently implemented.") + + if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None): + raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.") + elif self.use_policy_training_preset and not self.resume: + self.optimizer = self.policy.get_optimizer_preset() + self.scheduler = self.policy.get_scheduler_preset() + + if self.policy.push_to_hub and not self.policy.repo_id: + raise ValueError( + "'policy.repo_id' argument missing. Please specify it to push the model to the hub." + ) + + @classmethod + def __get_path_fields__(cls) -> list[str]: + """This enables the parser to load config from the policy using `--policy.path=local/dir`""" + return ["policy"] + + def to_dict(self) -> dict[str, Any]: + return draccus.encode(self) # type: ignore[no-any-return] # because of the third-party library draccus uses Any as the return type + + def _save_pretrained(self, save_directory: Path) -> None: + with open(save_directory / TRAIN_CONFIG_NAME, "w") as f, draccus.config_type("json"): + draccus.dump(self, f, indent=4) + + @classmethod + def from_pretrained( + cls: builtins.type["TrainPipelineConfig"], + pretrained_name_or_path: str | Path, + *, + force_download: bool = False, + resume_download: bool | None = None, + proxies: dict[Any, Any] | None = None, + token: str | bool | None = None, + cache_dir: str | Path | None = None, + local_files_only: bool = False, + revision: str | None = None, + **kwargs: Any, + ) -> "TrainPipelineConfig": + model_id = str(pretrained_name_or_path) + config_file: str | None = None + if Path(model_id).is_dir(): + if TRAIN_CONFIG_NAME in os.listdir(model_id): + config_file = os.path.join(model_id, TRAIN_CONFIG_NAME) + else: + print(f"{TRAIN_CONFIG_NAME} not found in {Path(model_id).resolve()}") + elif Path(model_id).is_file(): + config_file = model_id + else: + try: + config_file = hf_hub_download( + repo_id=model_id, + filename=TRAIN_CONFIG_NAME, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + except HfHubHTTPError as e: + raise FileNotFoundError( + f"{TRAIN_CONFIG_NAME} not found on the HuggingFace Hub in {model_id}" + ) from e + + cli_args = kwargs.pop("cli_args", []) + with draccus.config_type("json"): + return draccus.parse(cls, config_file, args=cli_args) + + +@dataclass(kw_only=True) +class TrainRLServerPipelineConfig(TrainPipelineConfig): + # NOTE: In RL, we don't need an offline dataset + # TODO: Make `TrainPipelineConfig.dataset` optional + dataset: DatasetConfig | None = None # type: ignore[assignment] # because the parent class has made it's type non-optional diff --git a/src/lerobot/configs/types.py b/src/lerobot/configs/types.py new file mode 100644 index 0000000000000000000000000000000000000000..33244ab630e83557fd1bee2828fe405097cb1532 --- /dev/null +++ b/src/lerobot/configs/types.py @@ -0,0 +1,52 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Note: We subclass str so that serialization is straightforward +# https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json +from dataclasses import dataclass +from enum import Enum + + +class FeatureType(str, Enum): + STATE = "STATE" + VISUAL = "VISUAL" + ENV = "ENV" + ACTION = "ACTION" + REWARD = "REWARD" + LANGUAGE = "LANGUAGE" + + +class PipelineFeatureType(str, Enum): + ACTION = "ACTION" + OBSERVATION = "OBSERVATION" + + +class NormalizationMode(str, Enum): + MIN_MAX = "MIN_MAX" + MEAN_STD = "MEAN_STD" + IDENTITY = "IDENTITY" + QUANTILES = "QUANTILES" + QUANTILE10 = "QUANTILE10" + + +@dataclass +class PolicyFeature: + type: FeatureType + shape: tuple[int, ...] + + +class RTCAttentionSchedule(str, Enum): + ZEROS = "ZEROS" + ONES = "ONES" + LINEAR = "LINEAR" + EXP = "EXP" diff --git a/src/lerobot/datasets/aggregate.py b/src/lerobot/datasets/aggregate.py new file mode 100644 index 0000000000000000000000000000000000000000..b059744feb7d64536cfa67eaf30b39b403c6091e --- /dev/null +++ b/src/lerobot/datasets/aggregate.py @@ -0,0 +1,531 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import shutil +from pathlib import Path + +import pandas as pd +import tqdm + +from lerobot.datasets.compute_stats import aggregate_stats +from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata +from lerobot.datasets.utils import ( + DEFAULT_CHUNK_SIZE, + DEFAULT_DATA_FILE_SIZE_IN_MB, + DEFAULT_DATA_PATH, + DEFAULT_EPISODES_PATH, + DEFAULT_VIDEO_FILE_SIZE_IN_MB, + DEFAULT_VIDEO_PATH, + get_file_size_in_mb, + get_parquet_file_size_in_mb, + to_parquet_with_hf_images, + update_chunk_file_indices, + write_info, + write_stats, + write_tasks, +) +from lerobot.datasets.video_utils import concatenate_video_files, get_video_duration_in_s + + +def validate_all_metadata(all_metadata: list[LeRobotDatasetMetadata]): + """Validates that all dataset metadata have consistent properties. + + Ensures all datasets have the same fps, robot_type, and features to guarantee + compatibility when aggregating them into a single dataset. + + Args: + all_metadata: List of LeRobotDatasetMetadata objects to validate. + + Returns: + tuple: A tuple containing (fps, robot_type, features) from the first metadata. + + Raises: + ValueError: If any metadata has different fps, robot_type, or features + than the first metadata in the list. + """ + + fps = all_metadata[0].fps + robot_type = all_metadata[0].robot_type + features = all_metadata[0].features + + for meta in tqdm.tqdm(all_metadata, desc="Validate all meta data"): + if fps != meta.fps: + raise ValueError(f"Same fps is expected, but got fps={meta.fps} instead of {fps}.") + if robot_type != meta.robot_type: + raise ValueError( + f"Same robot_type is expected, but got robot_type={meta.robot_type} instead of {robot_type}." + ) + if features != meta.features: + raise ValueError( + f"Same features is expected, but got features={meta.features} instead of {features}." + ) + + return fps, robot_type, features + + +def update_data_df(df, src_meta, dst_meta): + """Updates a data DataFrame with new indices and task mappings for aggregation. + + Adjusts episode indices, frame indices, and task indices to account for + previously aggregated data in the destination dataset. + + Args: + df: DataFrame containing the data to be updated. + src_meta: Source dataset metadata. + dst_meta: Destination dataset metadata. + + Returns: + pd.DataFrame: Updated DataFrame with adjusted indices. + """ + + df["episode_index"] = df["episode_index"] + dst_meta.info["total_episodes"] + df["index"] = df["index"] + dst_meta.info["total_frames"] + + src_task_names = src_meta.tasks.index.take(df["task_index"].to_numpy()) + df["task_index"] = dst_meta.tasks.loc[src_task_names, "task_index"].to_numpy() + + return df + + +def update_meta_data( + df, + dst_meta, + meta_idx, + data_idx, + videos_idx, +): + """Updates metadata DataFrame with new chunk, file, and timestamp indices. + + Adjusts all indices and timestamps to account for previously aggregated + data and videos in the destination dataset. + + Args: + df: DataFrame containing the metadata to be updated. + dst_meta: Destination dataset metadata. + meta_idx: Dictionary containing current metadata chunk and file indices. + data_idx: Dictionary containing current data chunk and file indices. + videos_idx: Dictionary containing current video indices and timestamps. + + Returns: + pd.DataFrame: Updated DataFrame with adjusted indices and timestamps. + """ + + df["meta/episodes/chunk_index"] = df["meta/episodes/chunk_index"] + meta_idx["chunk"] + df["meta/episodes/file_index"] = df["meta/episodes/file_index"] + meta_idx["file"] + df["data/chunk_index"] = df["data/chunk_index"] + data_idx["chunk"] + df["data/file_index"] = df["data/file_index"] + data_idx["file"] + for key, video_idx in videos_idx.items(): + # Store original video file indices before updating + orig_chunk_col = f"videos/{key}/chunk_index" + orig_file_col = f"videos/{key}/file_index" + df["_orig_chunk"] = df[orig_chunk_col].copy() + df["_orig_file"] = df[orig_file_col].copy() + + # Update chunk and file indices to point to destination + df[orig_chunk_col] = video_idx["chunk"] + df[orig_file_col] = video_idx["file"] + + # Apply per-source-file timestamp offsets + src_to_offset = video_idx.get("src_to_offset", {}) + if src_to_offset: + # Apply offset based on original source file + for idx in df.index: + src_key = (df.at[idx, "_orig_chunk"], df.at[idx, "_orig_file"]) + offset = src_to_offset.get(src_key, 0) + df.at[idx, f"videos/{key}/from_timestamp"] += offset + df.at[idx, f"videos/{key}/to_timestamp"] += offset + else: + # Fallback to simple offset (for backward compatibility) + df[f"videos/{key}/from_timestamp"] = ( + df[f"videos/{key}/from_timestamp"] + video_idx["latest_duration"] + ) + df[f"videos/{key}/to_timestamp"] = df[f"videos/{key}/to_timestamp"] + video_idx["latest_duration"] + + # Clean up temporary columns + df = df.drop(columns=["_orig_chunk", "_orig_file"]) + + df["dataset_from_index"] = df["dataset_from_index"] + dst_meta.info["total_frames"] + df["dataset_to_index"] = df["dataset_to_index"] + dst_meta.info["total_frames"] + df["episode_index"] = df["episode_index"] + dst_meta.info["total_episodes"] + + return df + + +def aggregate_datasets( + repo_ids: list[str], + aggr_repo_id: str, + roots: list[Path] | None = None, + aggr_root: Path | None = None, + data_files_size_in_mb: float | None = None, + video_files_size_in_mb: float | None = None, + chunk_size: int | None = None, +): + """Aggregates multiple LeRobot datasets into a single unified dataset. + + This is the main function that orchestrates the aggregation process by: + 1. Loading and validating all source dataset metadata + 2. Creating a new destination dataset with unified tasks + 3. Aggregating videos, data, and metadata from all source datasets + 4. Finalizing the aggregated dataset with proper statistics + + Args: + repo_ids: List of repository IDs for the datasets to aggregate. + aggr_repo_id: Repository ID for the aggregated output dataset. + roots: Optional list of root paths for the source datasets. + aggr_root: Optional root path for the aggregated dataset. + data_files_size_in_mb: Maximum size for data files in MB (defaults to DEFAULT_DATA_FILE_SIZE_IN_MB) + video_files_size_in_mb: Maximum size for video files in MB (defaults to DEFAULT_VIDEO_FILE_SIZE_IN_MB) + chunk_size: Maximum number of files per chunk (defaults to DEFAULT_CHUNK_SIZE) + """ + logging.info("Start aggregate_datasets") + + if data_files_size_in_mb is None: + data_files_size_in_mb = DEFAULT_DATA_FILE_SIZE_IN_MB + if video_files_size_in_mb is None: + video_files_size_in_mb = DEFAULT_VIDEO_FILE_SIZE_IN_MB + if chunk_size is None: + chunk_size = DEFAULT_CHUNK_SIZE + + all_metadata = ( + [LeRobotDatasetMetadata(repo_id) for repo_id in repo_ids] + if roots is None + else [ + LeRobotDatasetMetadata(repo_id, root=root) for repo_id, root in zip(repo_ids, roots, strict=False) + ] + ) + fps, robot_type, features = validate_all_metadata(all_metadata) + video_keys = [key for key in features if features[key]["dtype"] == "video"] + + dst_meta = LeRobotDatasetMetadata.create( + repo_id=aggr_repo_id, + fps=fps, + robot_type=robot_type, + features=features, + root=aggr_root, + use_videos=len(video_keys) > 0, + chunks_size=chunk_size, + data_files_size_in_mb=data_files_size_in_mb, + video_files_size_in_mb=video_files_size_in_mb, + ) + + logging.info("Find all tasks") + unique_tasks = pd.concat([m.tasks for m in all_metadata]).index.unique() + dst_meta.tasks = pd.DataFrame({"task_index": range(len(unique_tasks))}, index=unique_tasks) + + meta_idx = {"chunk": 0, "file": 0} + data_idx = {"chunk": 0, "file": 0} + videos_idx = { + key: {"chunk": 0, "file": 0, "latest_duration": 0, "episode_duration": 0} for key in video_keys + } + + dst_meta.episodes = {} + + for src_meta in tqdm.tqdm(all_metadata, desc="Copy data and videos"): + videos_idx = aggregate_videos(src_meta, dst_meta, videos_idx, video_files_size_in_mb, chunk_size) + data_idx = aggregate_data(src_meta, dst_meta, data_idx, data_files_size_in_mb, chunk_size) + + meta_idx = aggregate_metadata(src_meta, dst_meta, meta_idx, data_idx, videos_idx) + + dst_meta.info["total_episodes"] += src_meta.total_episodes + dst_meta.info["total_frames"] += src_meta.total_frames + + finalize_aggregation(dst_meta, all_metadata) + logging.info("Aggregation complete.") + + +def aggregate_videos(src_meta, dst_meta, videos_idx, video_files_size_in_mb, chunk_size): + """Aggregates video chunks from a source dataset into the destination dataset. + + Handles video file concatenation and rotation based on file size limits. + Creates new video files when size limits are exceeded. + + Args: + src_meta: Source dataset metadata. + dst_meta: Destination dataset metadata. + videos_idx: Dictionary tracking video chunk and file indices. + video_files_size_in_mb: Maximum size for video files in MB (defaults to DEFAULT_VIDEO_FILE_SIZE_IN_MB) + chunk_size: Maximum number of files per chunk (defaults to DEFAULT_CHUNK_SIZE) + + Returns: + dict: Updated videos_idx with current chunk and file indices. + """ + for key in videos_idx: + videos_idx[key]["episode_duration"] = 0 + # Track offset for each source (chunk, file) pair + videos_idx[key]["src_to_offset"] = {} + + for key, video_idx in videos_idx.items(): + unique_chunk_file_pairs = { + (chunk, file) + for chunk, file in zip( + src_meta.episodes[f"videos/{key}/chunk_index"], + src_meta.episodes[f"videos/{key}/file_index"], + strict=False, + ) + } + unique_chunk_file_pairs = sorted(unique_chunk_file_pairs) + + chunk_idx = video_idx["chunk"] + file_idx = video_idx["file"] + current_offset = video_idx["latest_duration"] + + for src_chunk_idx, src_file_idx in unique_chunk_file_pairs: + src_path = src_meta.root / DEFAULT_VIDEO_PATH.format( + video_key=key, + chunk_index=src_chunk_idx, + file_index=src_file_idx, + ) + + dst_path = dst_meta.root / DEFAULT_VIDEO_PATH.format( + video_key=key, + chunk_index=chunk_idx, + file_index=file_idx, + ) + + src_duration = get_video_duration_in_s(src_path) + + if not dst_path.exists(): + # Store offset before incrementing + videos_idx[key]["src_to_offset"][(src_chunk_idx, src_file_idx)] = current_offset + dst_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(str(src_path), str(dst_path)) + videos_idx[key]["episode_duration"] += src_duration + current_offset += src_duration + continue + + # Check file sizes before appending + src_size = get_file_size_in_mb(src_path) + dst_size = get_file_size_in_mb(dst_path) + + if dst_size + src_size >= video_files_size_in_mb: + # Rotate to a new file, this source becomes start of new destination + # So its offset should be 0 + videos_idx[key]["src_to_offset"][(src_chunk_idx, src_file_idx)] = 0 + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, chunk_size) + dst_path = dst_meta.root / DEFAULT_VIDEO_PATH.format( + video_key=key, + chunk_index=chunk_idx, + file_index=file_idx, + ) + dst_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(str(src_path), str(dst_path)) + # Reset offset for next file + current_offset = src_duration + else: + # Append to existing video file - use current accumulated offset + videos_idx[key]["src_to_offset"][(src_chunk_idx, src_file_idx)] = current_offset + concatenate_video_files( + [dst_path, src_path], + dst_path, + ) + current_offset += src_duration + + videos_idx[key]["episode_duration"] += src_duration + + videos_idx[key]["chunk"] = chunk_idx + videos_idx[key]["file"] = file_idx + + return videos_idx + + +def aggregate_data(src_meta, dst_meta, data_idx, data_files_size_in_mb, chunk_size): + """Aggregates data chunks from a source dataset into the destination dataset. + + Reads source data files, updates indices to match the aggregated dataset, + and writes them to the destination with proper file rotation. + + Args: + src_meta: Source dataset metadata. + dst_meta: Destination dataset metadata. + data_idx: Dictionary tracking data chunk and file indices. + + Returns: + dict: Updated data_idx with current chunk and file indices. + """ + unique_chunk_file_ids = { + (c, f) + for c, f in zip( + src_meta.episodes["data/chunk_index"], src_meta.episodes["data/file_index"], strict=False + ) + } + + unique_chunk_file_ids = sorted(unique_chunk_file_ids) + + for src_chunk_idx, src_file_idx in unique_chunk_file_ids: + src_path = src_meta.root / DEFAULT_DATA_PATH.format( + chunk_index=src_chunk_idx, file_index=src_file_idx + ) + df = pd.read_parquet(src_path) + df = update_data_df(df, src_meta, dst_meta) + + data_idx = append_or_create_parquet_file( + df, + src_path, + data_idx, + data_files_size_in_mb, + chunk_size, + DEFAULT_DATA_PATH, + contains_images=len(dst_meta.image_keys) > 0, + aggr_root=dst_meta.root, + ) + + return data_idx + + +def aggregate_metadata(src_meta, dst_meta, meta_idx, data_idx, videos_idx): + """Aggregates metadata from a source dataset into the destination dataset. + + Reads source metadata files, updates all indices and timestamps, + and writes them to the destination with proper file rotation. + + Args: + src_meta: Source dataset metadata. + dst_meta: Destination dataset metadata. + meta_idx: Dictionary tracking metadata chunk and file indices. + data_idx: Dictionary tracking data chunk and file indices. + videos_idx: Dictionary tracking video indices and timestamps. + + Returns: + dict: Updated meta_idx with current chunk and file indices. + """ + chunk_file_ids = { + (c, f) + for c, f in zip( + src_meta.episodes["meta/episodes/chunk_index"], + src_meta.episodes["meta/episodes/file_index"], + strict=False, + ) + } + + chunk_file_ids = sorted(chunk_file_ids) + for chunk_idx, file_idx in chunk_file_ids: + src_path = src_meta.root / DEFAULT_EPISODES_PATH.format(chunk_index=chunk_idx, file_index=file_idx) + df = pd.read_parquet(src_path) + df = update_meta_data( + df, + dst_meta, + meta_idx, + data_idx, + videos_idx, + ) + + meta_idx = append_or_create_parquet_file( + df, + src_path, + meta_idx, + DEFAULT_DATA_FILE_SIZE_IN_MB, + DEFAULT_CHUNK_SIZE, + DEFAULT_EPISODES_PATH, + contains_images=False, + aggr_root=dst_meta.root, + ) + + # Increment latest_duration by the total duration added from this source dataset + for k in videos_idx: + videos_idx[k]["latest_duration"] += videos_idx[k]["episode_duration"] + + return meta_idx + + +def append_or_create_parquet_file( + df: pd.DataFrame, + src_path: Path, + idx: dict[str, int], + max_mb: float, + chunk_size: int, + default_path: str, + contains_images: bool = False, + aggr_root: Path = None, +): + """Appends data to an existing parquet file or creates a new one based on size constraints. + + Manages file rotation when size limits are exceeded to prevent individual files + from becoming too large. Handles both regular parquet files and those containing images. + + Args: + df: DataFrame to write to the parquet file. + src_path: Path to the source file (used for size estimation). + idx: Dictionary containing current 'chunk' and 'file' indices. + max_mb: Maximum allowed file size in MB before rotation. + chunk_size: Maximum number of files per chunk before incrementing chunk index. + default_path: Format string for generating file paths. + contains_images: Whether the data contains images requiring special handling. + aggr_root: Root path for the aggregated dataset. + + Returns: + dict: Updated index dictionary with current chunk and file indices. + """ + dst_path = aggr_root / default_path.format(chunk_index=idx["chunk"], file_index=idx["file"]) + + if not dst_path.exists(): + dst_path.parent.mkdir(parents=True, exist_ok=True) + if contains_images: + to_parquet_with_hf_images(df, dst_path) + else: + df.to_parquet(dst_path) + return idx + + src_size = get_parquet_file_size_in_mb(src_path) + dst_size = get_parquet_file_size_in_mb(dst_path) + + if dst_size + src_size >= max_mb: + idx["chunk"], idx["file"] = update_chunk_file_indices(idx["chunk"], idx["file"], chunk_size) + new_path = aggr_root / default_path.format(chunk_index=idx["chunk"], file_index=idx["file"]) + new_path.parent.mkdir(parents=True, exist_ok=True) + final_df = df + target_path = new_path + else: + existing_df = pd.read_parquet(dst_path) + final_df = pd.concat([existing_df, df], ignore_index=True) + target_path = dst_path + + if contains_images: + to_parquet_with_hf_images(final_df, target_path) + else: + final_df.to_parquet(target_path) + + return idx + + +def finalize_aggregation(aggr_meta, all_metadata): + """Finalizes the dataset aggregation by writing summary files and statistics. + + Writes the tasks file, info file with total counts and splits, and + aggregated statistics from all source datasets. + + Args: + aggr_meta: Aggregated dataset metadata. + all_metadata: List of all source dataset metadata objects. + """ + logging.info("write tasks") + write_tasks(aggr_meta.tasks, aggr_meta.root) + + logging.info("write info") + aggr_meta.info.update( + { + "total_tasks": len(aggr_meta.tasks), + "total_episodes": sum(m.total_episodes for m in all_metadata), + "total_frames": sum(m.total_frames for m in all_metadata), + "splits": {"train": f"0:{sum(m.total_episodes for m in all_metadata)}"}, + } + ) + write_info(aggr_meta.info, aggr_meta.root) + + logging.info("write stats") + aggr_meta.stats = aggregate_stats([m.stats for m in all_metadata]) + write_stats(aggr_meta.stats, aggr_meta.root) diff --git a/src/lerobot/datasets/backward_compatibility.py b/src/lerobot/datasets/backward_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..b86f361289b6566dc125b9ddf6a7de71482ec9fc --- /dev/null +++ b/src/lerobot/datasets/backward_compatibility.py @@ -0,0 +1,56 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import packaging.version + +V30_MESSAGE = """ +The dataset you requested ({repo_id}) is in {version} format. + +We introduced a new format since v3.0 which is not backward compatible with v2.1. +Please, update your dataset to the new format using this command: +``` +python -m lerobot.datasets.v30.convert_dataset_v21_to_v30 --repo-id={repo_id} +``` + +If you already have a converted version uploaded to the hub, then this error might be because of +an older version in your local cache. Consider deleting the cached version and retrying. + +If you encounter a problem, contact LeRobot maintainers on [Discord](https://discord.com/invite/s3KuuzsPFb) +or open an [issue on GitHub](https://github.com/huggingface/lerobot/issues/new/choose). +""" + +FUTURE_MESSAGE = """ +The dataset you requested ({repo_id}) is only available in {version} format. +As we cannot ensure forward compatibility with it, please update your current version of lerobot. +""" + + +class CompatibilityError(Exception): ... + + +class BackwardCompatibilityError(CompatibilityError): + def __init__(self, repo_id: str, version: packaging.version.Version): + if version.major == 2 and version.minor == 1: + message = V30_MESSAGE.format(repo_id=repo_id, version=version) + else: + raise NotImplementedError( + "Contact the maintainer on [Discord](https://discord.com/invite/s3KuuzsPFb)." + ) + super().__init__(message) + + +class ForwardCompatibilityError(CompatibilityError): + def __init__(self, repo_id: str, version: packaging.version.Version): + message = FUTURE_MESSAGE.format(repo_id=repo_id, version=version) + super().__init__(message) diff --git a/src/lerobot/datasets/card_template.md b/src/lerobot/datasets/card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..39828ae6a557ba8e649b8ec5cb515fe4875da26e --- /dev/null +++ b/src/lerobot/datasets/card_template.md @@ -0,0 +1,28 @@ +--- +# For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/datasets-cards +# prettier-ignore +{{card_data}} +--- + +This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). + +## Dataset Description + +{{ dataset_description | default("", true) }} + +- **Homepage:** {{ url | default("[More Information Needed]", true)}} +- **Paper:** {{ paper | default("[More Information Needed]", true)}} +- **License:** {{ license | default("[More Information Needed]", true)}} + +## Dataset Structure + +{{ dataset_structure | default("[More Information Needed]", true)}} + +## Citation + +**BibTeX:** + +```bibtex +{{ citation_bibtex | default("[More Information Needed]", true)}} +``` diff --git a/src/lerobot/datasets/compute_stats.py b/src/lerobot/datasets/compute_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..93975615af21e143ba489906926fdcd7321d38c8 --- /dev/null +++ b/src/lerobot/datasets/compute_stats.py @@ -0,0 +1,626 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np + +from lerobot.datasets.utils import load_image_as_numpy + +DEFAULT_QUANTILES = [0.01, 0.10, 0.50, 0.90, 0.99] + + +class RunningQuantileStats: + """ + Maintains running statistics for batches of vectors, including mean, + standard deviation, min, max, and approximate quantiles. + + Statistics are computed per feature dimension and updated incrementally + as new batches are observed. Quantiles are estimated using histograms, + which adapt dynamically if the observed data range expands. + """ + + def __init__(self, quantile_list: list[float] | None = None, num_quantile_bins: int = 5000): + self._count = 0 + self._mean = None + self._mean_of_squares = None + self._min = None + self._max = None + self._histograms = None + self._bin_edges = None + self._num_quantile_bins = num_quantile_bins + + self._quantile_list = quantile_list + if self._quantile_list is None: + self._quantile_list = DEFAULT_QUANTILES + self._quantile_keys = [f"q{int(q * 100):02d}" for q in self._quantile_list] + + def update(self, batch: np.ndarray) -> None: + """Update the running statistics with a batch of vectors. + + Args: + batch: An array where all dimensions except the last are batch dimensions. + """ + batch = batch.reshape(-1, batch.shape[-1]) + num_elements, vector_length = batch.shape + + if self._count == 0: + self._mean = np.mean(batch, axis=0) + self._mean_of_squares = np.mean(batch**2, axis=0) + self._min = np.min(batch, axis=0) + self._max = np.max(batch, axis=0) + self._histograms = [np.zeros(self._num_quantile_bins) for _ in range(vector_length)] + self._bin_edges = [ + np.linspace(self._min[i] - 1e-10, self._max[i] + 1e-10, self._num_quantile_bins + 1) + for i in range(vector_length) + ] + else: + if vector_length != self._mean.size: + raise ValueError("The length of new vectors does not match the initialized vector length.") + + new_max = np.max(batch, axis=0) + new_min = np.min(batch, axis=0) + max_changed = np.any(new_max > self._max) + min_changed = np.any(new_min < self._min) + self._max = np.maximum(self._max, new_max) + self._min = np.minimum(self._min, new_min) + + if max_changed or min_changed: + self._adjust_histograms() + + self._count += num_elements + + batch_mean = np.mean(batch, axis=0) + batch_mean_of_squares = np.mean(batch**2, axis=0) + + # Update running mean and mean of squares + self._mean += (batch_mean - self._mean) * (num_elements / self._count) + self._mean_of_squares += (batch_mean_of_squares - self._mean_of_squares) * ( + num_elements / self._count + ) + + self._update_histograms(batch) + + def get_statistics(self) -> dict[str, np.ndarray]: + """Compute and return the statistics of the vectors processed so far. + + Args: + quantiles: List of quantiles to compute (e.g., [0.01, 0.10, 0.50, 0.90, 0.99]). If None, no quantiles computed. + + Returns: + Dictionary containing the computed statistics. + """ + if self._count < 2: + raise ValueError("Cannot compute statistics for less than 2 vectors.") + + variance = self._mean_of_squares - self._mean**2 + + stddev = np.sqrt(np.maximum(0, variance)) + + stats = { + "min": self._min.copy(), + "max": self._max.copy(), + "mean": self._mean.copy(), + "std": stddev, + "count": np.array([self._count]), + } + + quantile_results = self._compute_quantiles() + for i, q in enumerate(self._quantile_keys): + stats[q] = quantile_results[i] + + return stats + + def _adjust_histograms(self): + """Adjust histograms when min or max changes.""" + for i in range(len(self._histograms)): + old_edges = self._bin_edges[i] + old_hist = self._histograms[i] + + # Create new edges with small padding to ensure range coverage + padding = (self._max[i] - self._min[i]) * 1e-10 + new_edges = np.linspace( + self._min[i] - padding, self._max[i] + padding, self._num_quantile_bins + 1 + ) + + # Redistribute existing histogram counts to new bins + # We need to map each old bin center to the new bins + old_centers = (old_edges[:-1] + old_edges[1:]) / 2 + new_hist = np.zeros(self._num_quantile_bins) + + for old_center, count in zip(old_centers, old_hist, strict=False): + if count > 0: + # Find which new bin this old center belongs to + bin_idx = np.searchsorted(new_edges, old_center) - 1 + bin_idx = max(0, min(bin_idx, self._num_quantile_bins - 1)) + new_hist[bin_idx] += count + + self._histograms[i] = new_hist + self._bin_edges[i] = new_edges + + def _update_histograms(self, batch: np.ndarray) -> None: + """Update histograms with new vectors.""" + for i in range(batch.shape[1]): + hist, _ = np.histogram(batch[:, i], bins=self._bin_edges[i]) + self._histograms[i] += hist + + def _compute_quantiles(self) -> list[np.ndarray]: + """Compute quantiles based on histograms.""" + results = [] + for q in self._quantile_list: + target_count = q * self._count + q_values = [] + + for hist, edges in zip(self._histograms, self._bin_edges, strict=True): + q_value = self._compute_single_quantile(hist, edges, target_count) + q_values.append(q_value) + + results.append(np.array(q_values)) + return results + + def _compute_single_quantile(self, hist: np.ndarray, edges: np.ndarray, target_count: float) -> float: + """Compute a single quantile value from histogram and bin edges.""" + cumsum = np.cumsum(hist) + idx = np.searchsorted(cumsum, target_count) + + if idx == 0: + return edges[0] + if idx >= len(cumsum): + return edges[-1] + + # If not edge case, interpolate within the bin + count_before = cumsum[idx - 1] + count_in_bin = cumsum[idx] - count_before + + # If no samples in this bin, use the bin edge + if count_in_bin == 0: + return edges[idx] + + # Linear interpolation within the bin + fraction = (target_count - count_before) / count_in_bin + return edges[idx] + fraction * (edges[idx + 1] - edges[idx]) + + +def estimate_num_samples( + dataset_len: int, min_num_samples: int = 100, max_num_samples: int = 10_000, power: float = 0.75 +) -> int: + """Heuristic to estimate the number of samples based on dataset size. + The power controls the sample growth relative to dataset size. + Lower the power for less number of samples. + + For default arguments, we have: + - from 1 to ~500, num_samples=100 + - at 1000, num_samples=177 + - at 2000, num_samples=299 + - at 5000, num_samples=594 + - at 10000, num_samples=1000 + - at 20000, num_samples=1681 + """ + if dataset_len < min_num_samples: + min_num_samples = dataset_len + return max(min_num_samples, min(int(dataset_len**power), max_num_samples)) + + +def sample_indices(data_len: int) -> list[int]: + num_samples = estimate_num_samples(data_len) + return np.round(np.linspace(0, data_len - 1, num_samples)).astype(int).tolist() + + +def auto_downsample_height_width(img: np.ndarray, target_size: int = 150, max_size_threshold: int = 300): + _, height, width = img.shape + + if max(width, height) < max_size_threshold: + # no downsampling needed + return img + + downsample_factor = int(width / target_size) if width > height else int(height / target_size) + return img[:, ::downsample_factor, ::downsample_factor] + + +def sample_images(image_paths: list[str]) -> np.ndarray: + sampled_indices = sample_indices(len(image_paths)) + + images = None + for i, idx in enumerate(sampled_indices): + path = image_paths[idx] + # we load as uint8 to reduce memory usage + img = load_image_as_numpy(path, dtype=np.uint8, channel_first=True) + img = auto_downsample_height_width(img) + + if images is None: + images = np.empty((len(sampled_indices), *img.shape), dtype=np.uint8) + + images[i] = img + + return images + + +def _reshape_stats_by_axis( + stats: dict[str, np.ndarray], + axis: int | tuple[int, ...] | None, + keepdims: bool, + original_shape: tuple[int, ...], +) -> dict[str, np.ndarray]: + """Reshape all statistics to match NumPy's output conventions. + + Applies consistent reshaping to all statistics (except 'count') based on the + axis and keepdims parameters. This ensures statistics have the correct shape + for broadcasting with the original data. + + Args: + stats: Dictionary of computed statistics + axis: Axis or axes along which statistics were computed + keepdims: Whether to keep reduced dimensions as size-1 dimensions + original_shape: Shape of the original array + + Returns: + Dictionary with reshaped statistics + + Note: + The 'count' statistic is never reshaped as it represents metadata + rather than per-feature statistics. + """ + if axis == (1,) and not keepdims: + return stats + + result = {} + for key, value in stats.items(): + if key == "count": + result[key] = value + else: + result[key] = _reshape_single_stat(value, axis, keepdims, original_shape) + + return result + + +def _reshape_for_image_stats(value: np.ndarray, keepdims: bool) -> np.ndarray: + """Reshape statistics for image data (axis=(0,2,3)).""" + if keepdims and value.ndim == 1: + return value.reshape(1, -1, 1, 1) + return value + + +def _reshape_for_vector_stats( + value: np.ndarray, keepdims: bool, original_shape: tuple[int, ...] +) -> np.ndarray: + """Reshape statistics for vector data (axis=0 or axis=(0,)).""" + if not keepdims: + return value + + if len(original_shape) == 1 and value.ndim > 0: + return value.reshape(1) + elif len(original_shape) >= 2 and value.ndim == 1: + return value.reshape(1, -1) + return value + + +def _reshape_for_feature_stats(value: np.ndarray, keepdims: bool) -> np.ndarray: + """Reshape statistics for feature-wise computation (axis=(1,)).""" + if not keepdims: + return value + + if value.ndim == 0: + return value.reshape(1, 1) + elif value.ndim == 1: + return value.reshape(-1, 1) + return value + + +def _reshape_for_global_stats( + value: np.ndarray, keepdims: bool, original_shape: tuple[int, ...] +) -> np.ndarray | float: + """Reshape statistics for global reduction (axis=None).""" + if keepdims: + target_shape = tuple(1 for _ in original_shape) + return value.reshape(target_shape) + # Keep at least 1-D arrays to satisfy validator + return np.atleast_1d(value) + + +def _reshape_single_stat( + value: np.ndarray, axis: int | tuple[int, ...] | None, keepdims: bool, original_shape: tuple[int, ...] +) -> np.ndarray | float: + """Apply appropriate reshaping to a single statistic array. + + This function transforms statistic arrays to match expected output shapes + based on the axis configuration and keepdims parameter. + + Args: + value: The statistic array to reshape + axis: Axis or axes that were reduced during computation + keepdims: Whether to maintain reduced dimensions as size-1 dimensions + original_shape: Shape of the original data before reduction + + Returns: + Reshaped array following NumPy broadcasting conventions + + """ + if axis == (0, 2, 3): + return _reshape_for_image_stats(value, keepdims) + + if axis in [0, (0,)]: + return _reshape_for_vector_stats(value, keepdims, original_shape) + + if axis == (1,): + return _reshape_for_feature_stats(value, keepdims) + + if axis is None: + return _reshape_for_global_stats(value, keepdims, original_shape) + + return value + + +def _prepare_array_for_stats(array: np.ndarray, axis: int | tuple[int, ...] | None) -> tuple[np.ndarray, int]: + """Prepare array for statistics computation by reshaping according to axis. + + Args: + array: Input data array + axis: Axis or axes along which to compute statistics + + Returns: + Tuple of (reshaped_array, sample_count) + """ + if axis == (0, 2, 3): # Image data + batch_size, channels, height, width = array.shape + reshaped = array.transpose(0, 2, 3, 1).reshape(-1, channels) + return reshaped, batch_size + + if axis == 0 or axis == (0,): # Vector data + reshaped = array + if array.ndim == 1: + reshaped = array.reshape(-1, 1) + return reshaped, array.shape[0] + + if axis == (1,): # Feature-wise statistics + return array.T, array.shape[1] + + if axis is None: # Global statistics + reshaped = array.reshape(-1, 1) + # For backward compatibility, count represents the first dimension size + return reshaped, array.shape[0] if array.ndim > 0 else 1 + + raise ValueError(f"Unsupported axis configuration: {axis}") + + +def _compute_basic_stats( + array: np.ndarray, sample_count: int, quantile_list: list[float] | None = None +) -> dict[str, np.ndarray]: + """Compute basic statistics for arrays with insufficient samples for quantiles. + + Args: + array: Reshaped array ready for statistics computation + sample_count: Number of samples represented in the data + + Returns: + Dictionary with basic statistics and quantiles set to mean values + """ + if quantile_list is None: + quantile_list = DEFAULT_QUANTILES + quantile_list_keys = [f"q{int(q * 100):02d}" for q in quantile_list] + + stats = { + "min": np.min(array, axis=0), + "max": np.max(array, axis=0), + "mean": np.mean(array, axis=0), + "std": np.std(array, axis=0), + "count": np.array([sample_count]), + } + + for q in quantile_list_keys: + stats[q] = stats["mean"].copy() + + return stats + + +def get_feature_stats( + array: np.ndarray, + axis: int | tuple[int, ...] | None, + keepdims: bool, + quantile_list: list[float] | None = None, +) -> dict[str, np.ndarray]: + """Compute comprehensive statistics for array features along specified axes. + + This function calculates min, max, mean, std, and quantiles (1%, 10%, 50%, 90%, 99%) + for the input array along the specified axes. It handles different data layouts: + - Image data: axis=(0,2,3) computes per-channel statistics + - Vector data: axis=0 computes per-feature statistics + - Feature-wise: axis=1 computes statistics across features + - Global: axis=None computes statistics over entire array + + Args: + array: Input data array with shape appropriate for the specified axis + axis: Axis or axes along which to compute statistics + - (0, 2, 3): For image data (batch, channels, height, width) + - 0 or (0,): For vector/tabular data (samples, features) + - (1,): For computing across features + - None: For global statistics over entire array + keepdims: If True, reduced axes are kept as dimensions with size 1 + + Returns: + Dictionary containing: + - 'min': Minimum values + - 'max': Maximum values + - 'mean': Mean values + - 'std': Standard deviation + - 'count': Number of samples (always shape (1,)) + - 'q01', 'q10', 'q50', 'q90', 'q99': Quantile values + + """ + if quantile_list is None: + quantile_list = DEFAULT_QUANTILES + + original_shape = array.shape + reshaped, sample_count = _prepare_array_for_stats(array, axis) + + if reshaped.shape[0] < 2: + stats = _compute_basic_stats(reshaped, sample_count, quantile_list) + else: + running_stats = RunningQuantileStats() + running_stats.update(reshaped) + stats = running_stats.get_statistics() + stats["count"] = np.array([sample_count]) + + stats = _reshape_stats_by_axis(stats, axis, keepdims, original_shape) + return stats + + +def compute_episode_stats( + episode_data: dict[str, list[str] | np.ndarray], + features: dict, + quantile_list: list[float] | None = None, +) -> dict: + """Compute comprehensive statistics for all features in an episode. + + Processes different data types appropriately: + - Images/videos: Samples from paths, computes per-channel stats, normalizes to [0,1] + - Numerical arrays: Computes per-feature statistics + - Strings: Skipped (no statistics computed) + + Args: + episode_data: Dictionary mapping feature names to data + - For images/videos: list of file paths + - For numerical data: numpy arrays + features: Dictionary describing each feature's dtype and shape + + Returns: + Dictionary mapping feature names to their statistics dictionaries. + Each statistics dictionary contains min, max, mean, std, count, and quantiles. + + Note: + Image statistics are normalized to [0,1] range and have shape (3,1,1) for + per-channel values when dtype is 'image' or 'video'. + """ + if quantile_list is None: + quantile_list = DEFAULT_QUANTILES + + ep_stats = {} + for key, data in episode_data.items(): + if features[key]["dtype"] == "string": + continue + + if features[key]["dtype"] in ["image", "video"]: + ep_ft_array = sample_images(data) + axes_to_reduce = (0, 2, 3) + keepdims = True + else: + ep_ft_array = data + axes_to_reduce = 0 + keepdims = data.ndim == 1 + + ep_stats[key] = get_feature_stats( + ep_ft_array, axis=axes_to_reduce, keepdims=keepdims, quantile_list=quantile_list + ) + + if features[key]["dtype"] in ["image", "video"]: + ep_stats[key] = { + k: v if k == "count" else np.squeeze(v / 255.0, axis=0) for k, v in ep_stats[key].items() + } + + return ep_stats + + +def _validate_stat_value(value: np.ndarray, key: str, feature_key: str) -> None: + """Validate a single statistic value.""" + if not isinstance(value, np.ndarray): + raise ValueError( + f"Stats must be composed of numpy array, but key '{key}' of feature '{feature_key}' " + f"is of type '{type(value)}' instead." + ) + + if value.ndim == 0: + raise ValueError("Number of dimensions must be at least 1, and is 0 instead.") + + if key == "count" and value.shape != (1,): + raise ValueError(f"Shape of 'count' must be (1), but is {value.shape} instead.") + + if "image" in feature_key and key != "count" and value.shape != (3, 1, 1): + raise ValueError(f"Shape of quantile '{key}' must be (3,1,1), but is {value.shape} instead.") + + +def _assert_type_and_shape(stats_list: list[dict[str, dict]]): + """Validate that all statistics have correct types and shapes. + + Args: + stats_list: List of statistics dictionaries to validate + + Raises: + ValueError: If any statistic has incorrect type or shape + """ + for stats in stats_list: + for feature_key, feature_stats in stats.items(): + for stat_key, stat_value in feature_stats.items(): + _validate_stat_value(stat_value, stat_key, feature_key) + + +def aggregate_feature_stats(stats_ft_list: list[dict[str, dict]]) -> dict[str, dict[str, np.ndarray]]: + """Aggregates stats for a single feature.""" + means = np.stack([s["mean"] for s in stats_ft_list]) + variances = np.stack([s["std"] ** 2 for s in stats_ft_list]) + counts = np.stack([s["count"] for s in stats_ft_list]) + total_count = counts.sum(axis=0) + + # Prepare weighted mean by matching number of dimensions + while counts.ndim < means.ndim: + counts = np.expand_dims(counts, axis=-1) + + # Compute the weighted mean + weighted_means = means * counts + total_mean = weighted_means.sum(axis=0) / total_count + + # Compute the variance using the parallel algorithm + delta_means = means - total_mean + weighted_variances = (variances + delta_means**2) * counts + total_variance = weighted_variances.sum(axis=0) / total_count + + aggregated = { + "min": np.min(np.stack([s["min"] for s in stats_ft_list]), axis=0), + "max": np.max(np.stack([s["max"] for s in stats_ft_list]), axis=0), + "mean": total_mean, + "std": np.sqrt(total_variance), + "count": total_count, + } + + if stats_ft_list: + quantile_keys = [k for k in stats_ft_list[0] if k.startswith("q") and k[1:].isdigit()] + + for q_key in quantile_keys: + if all(q_key in s for s in stats_ft_list): + quantile_values = np.stack([s[q_key] for s in stats_ft_list]) + weighted_quantiles = quantile_values * counts + aggregated[q_key] = weighted_quantiles.sum(axis=0) / total_count + + return aggregated + + +def aggregate_stats(stats_list: list[dict[str, dict]]) -> dict[str, dict[str, np.ndarray]]: + """Aggregate stats from multiple compute_stats outputs into a single set of stats. + + The final stats will have the union of all data keys from each of the stats dicts. + + For instance: + - new_min = min(min_dataset_0, min_dataset_1, ...) + - new_max = max(max_dataset_0, max_dataset_1, ...) + - new_mean = (mean of all data, weighted by counts) + - new_std = (std of all data) + """ + + _assert_type_and_shape(stats_list) + + data_keys = {key for stats in stats_list for key in stats} + aggregated_stats = {key: {} for key in data_keys} + + for key in data_keys: + stats_with_key = [stats[key] for stats in stats_list if key in stats] + aggregated_stats[key] = aggregate_feature_stats(stats_with_key) + + return aggregated_stats diff --git a/src/lerobot/datasets/dataset_tools.py b/src/lerobot/datasets/dataset_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..ba0ff1d6ae5218b21d2435b5a22b600e2a256c5e --- /dev/null +++ b/src/lerobot/datasets/dataset_tools.py @@ -0,0 +1,1085 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Dataset tools utilities for LeRobotDataset. + +This module provides utilities for: +- Deleting episodes from datasets +- Splitting datasets into multiple smaller datasets +- Adding/removing features from datasets +- Merging datasets (wrapper around aggregate functionality) +""" + +import logging +import shutil +from collections.abc import Callable +from pathlib import Path + +import datasets +import numpy as np +import pandas as pd +import pyarrow.parquet as pq +import torch +from tqdm import tqdm + +from lerobot.datasets.aggregate import aggregate_datasets +from lerobot.datasets.compute_stats import aggregate_stats +from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata +from lerobot.datasets.utils import ( + DATA_DIR, + DEFAULT_CHUNK_SIZE, + DEFAULT_DATA_FILE_SIZE_IN_MB, + DEFAULT_DATA_PATH, + DEFAULT_EPISODES_PATH, + get_parquet_file_size_in_mb, + load_episodes, + update_chunk_file_indices, + write_info, + write_stats, + write_tasks, +) +from lerobot.utils.constants import HF_LEROBOT_HOME + + +def _load_episode_with_stats(src_dataset: LeRobotDataset, episode_idx: int) -> dict: + """Load a single episode's metadata including stats from parquet file. + + Args: + src_dataset: Source dataset + episode_idx: Episode index to load + + Returns: + dict containing episode metadata and stats + """ + ep_meta = src_dataset.meta.episodes[episode_idx] + chunk_idx = ep_meta["meta/episodes/chunk_index"] + file_idx = ep_meta["meta/episodes/file_index"] + + parquet_path = src_dataset.root / DEFAULT_EPISODES_PATH.format(chunk_index=chunk_idx, file_index=file_idx) + df = pd.read_parquet(parquet_path) + + episode_row = df[df["episode_index"] == episode_idx].iloc[0] + + return episode_row.to_dict() + + +def delete_episodes( + dataset: LeRobotDataset, + episode_indices: list[int], + output_dir: str | Path | None = None, + repo_id: str | None = None, +) -> LeRobotDataset: + """Delete episodes from a LeRobotDataset and create a new dataset. + + Args: + dataset: The source LeRobotDataset. + episode_indices: List of episode indices to delete. + output_dir: Directory to save the new dataset. If None, uses default location. + repo_id: Repository ID for the new dataset. If None, appends "_modified" to original. + """ + if not episode_indices: + raise ValueError("No episodes to delete") + + valid_indices = set(range(dataset.meta.total_episodes)) + invalid = set(episode_indices) - valid_indices + if invalid: + raise ValueError(f"Invalid episode indices: {invalid}") + + logging.info(f"Deleting {len(episode_indices)} episodes from dataset") + + if repo_id is None: + repo_id = f"{dataset.repo_id}_modified" + output_dir = Path(output_dir) if output_dir is not None else HF_LEROBOT_HOME / repo_id + + episodes_to_keep = [i for i in range(dataset.meta.total_episodes) if i not in episode_indices] + if not episodes_to_keep: + raise ValueError("Cannot delete all episodes from dataset") + + new_meta = LeRobotDatasetMetadata.create( + repo_id=repo_id, + fps=dataset.meta.fps, + features=dataset.meta.features, + robot_type=dataset.meta.robot_type, + root=output_dir, + use_videos=len(dataset.meta.video_keys) > 0, + ) + + episode_mapping = {old_idx: new_idx for new_idx, old_idx in enumerate(episodes_to_keep)} + + video_metadata = None + if dataset.meta.video_keys: + video_metadata = _copy_and_reindex_videos(dataset, new_meta, episode_mapping) + + data_metadata = _copy_and_reindex_data(dataset, new_meta, episode_mapping) + + _copy_and_reindex_episodes_metadata(dataset, new_meta, episode_mapping, data_metadata, video_metadata) + + new_dataset = LeRobotDataset( + repo_id=repo_id, + root=output_dir, + image_transforms=dataset.image_transforms, + delta_timestamps=dataset.delta_timestamps, + tolerance_s=dataset.tolerance_s, + ) + + logging.info(f"Created new dataset with {len(episodes_to_keep)} episodes") + return new_dataset + + +def split_dataset( + dataset: LeRobotDataset, + splits: dict[str, float | list[int]], + output_dir: str | Path | None = None, +) -> dict[str, LeRobotDataset]: + """Split a LeRobotDataset into multiple smaller datasets. + + Args: + dataset: The source LeRobotDataset to split. + splits: Either a dict mapping split names to episode indices, or a dict mapping + split names to fractions (must sum to <= 1.0). + output_dir: Base directory for output datasets. If None, uses default location. + + Examples: + Split by specific episodes + splits = {"train": [0, 1, 2], "val": [3, 4]} + datasets = split_dataset(dataset, splits) + + Split by fractions + splits = {"train": 0.8, "val": 0.2} + datasets = split_dataset(dataset, splits) + """ + if not splits: + raise ValueError("No splits provided") + + if all(isinstance(v, float) for v in splits.values()): + splits = _fractions_to_episode_indices(dataset.meta.total_episodes, splits) + + all_episodes = set() + for split_name, episodes in splits.items(): + if not episodes: + raise ValueError(f"Split '{split_name}' has no episodes") + episode_set = set(episodes) + if episode_set & all_episodes: + raise ValueError("Episodes cannot appear in multiple splits") + all_episodes.update(episode_set) + + valid_indices = set(range(dataset.meta.total_episodes)) + invalid = all_episodes - valid_indices + if invalid: + raise ValueError(f"Invalid episode indices: {invalid}") + + if output_dir is not None: + output_dir = Path(output_dir) + + result_datasets = {} + + for split_name, episodes in splits.items(): + logging.info(f"Creating split '{split_name}' with {len(episodes)} episodes") + + split_repo_id = f"{dataset.repo_id}_{split_name}" + + split_output_dir = ( + output_dir / split_name if output_dir is not None else HF_LEROBOT_HOME / split_repo_id + ) + + episode_mapping = {old_idx: new_idx for new_idx, old_idx in enumerate(sorted(episodes))} + + new_meta = LeRobotDatasetMetadata.create( + repo_id=split_repo_id, + fps=dataset.meta.fps, + features=dataset.meta.features, + robot_type=dataset.meta.robot_type, + root=split_output_dir, + use_videos=len(dataset.meta.video_keys) > 0, + chunks_size=dataset.meta.chunks_size, + data_files_size_in_mb=dataset.meta.data_files_size_in_mb, + video_files_size_in_mb=dataset.meta.video_files_size_in_mb, + ) + + video_metadata = None + if dataset.meta.video_keys: + video_metadata = _copy_and_reindex_videos(dataset, new_meta, episode_mapping) + + data_metadata = _copy_and_reindex_data(dataset, new_meta, episode_mapping) + + _copy_and_reindex_episodes_metadata(dataset, new_meta, episode_mapping, data_metadata, video_metadata) + + new_dataset = LeRobotDataset( + repo_id=split_repo_id, + root=split_output_dir, + image_transforms=dataset.image_transforms, + delta_timestamps=dataset.delta_timestamps, + tolerance_s=dataset.tolerance_s, + ) + + result_datasets[split_name] = new_dataset + + return result_datasets + + +def merge_datasets( + datasets: list[LeRobotDataset], + output_repo_id: str, + output_dir: str | Path | None = None, +) -> LeRobotDataset: + """Merge multiple LeRobotDatasets into a single dataset. + + This is a wrapper around the aggregate_datasets functionality with a cleaner API. + + Args: + datasets: List of LeRobotDatasets to merge. + output_repo_id: Repository ID for the merged dataset. + output_dir: Directory to save the merged dataset. If None, uses default location. + """ + if not datasets: + raise ValueError("No datasets to merge") + + output_dir = Path(output_dir) if output_dir is not None else HF_LEROBOT_HOME / output_repo_id + + repo_ids = [ds.repo_id for ds in datasets] + roots = [ds.root for ds in datasets] + + aggregate_datasets( + repo_ids=repo_ids, + aggr_repo_id=output_repo_id, + roots=roots, + aggr_root=output_dir, + ) + + merged_dataset = LeRobotDataset( + repo_id=output_repo_id, + root=output_dir, + image_transforms=datasets[0].image_transforms, + delta_timestamps=datasets[0].delta_timestamps, + tolerance_s=datasets[0].tolerance_s, + ) + + return merged_dataset + + +def modify_features( + dataset: LeRobotDataset, + add_features: dict[str, tuple[np.ndarray | torch.Tensor | Callable, dict]] | None = None, + remove_features: str | list[str] | None = None, + output_dir: str | Path | None = None, + repo_id: str | None = None, +) -> LeRobotDataset: + """Modify a LeRobotDataset by adding and/or removing features in a single pass. + + This is the most efficient way to modify features, as it only copies the dataset once + regardless of how many features are being added or removed. + + Args: + dataset: The source LeRobotDataset. + add_features: Optional dict mapping feature names to (feature_values, feature_info) tuples. + remove_features: Optional feature name(s) to remove. Can be a single string or list. + output_dir: Directory to save the new dataset. If None, uses default location. + repo_id: Repository ID for the new dataset. If None, appends "_modified" to original. + + Returns: + New dataset with features modified. + + Example: + new_dataset = modify_features( + dataset, + add_features={ + "reward": (reward_array, {"dtype": "float32", "shape": [1], "names": None}), + }, + remove_features=["old_feature"], + output_dir="./output", + ) + """ + if add_features is None and remove_features is None: + raise ValueError("Must specify at least one of add_features or remove_features") + + remove_features_list: list[str] = [] + if remove_features is not None: + remove_features_list = [remove_features] if isinstance(remove_features, str) else remove_features + + if add_features: + required_keys = {"dtype", "shape"} + for feature_name, (_, feature_info) in add_features.items(): + if feature_name in dataset.meta.features: + raise ValueError(f"Feature '{feature_name}' already exists in dataset") + + if not required_keys.issubset(feature_info.keys()): + raise ValueError(f"feature_info for '{feature_name}' must contain keys: {required_keys}") + + if remove_features_list: + for name in remove_features_list: + if name not in dataset.meta.features: + raise ValueError(f"Feature '{name}' not found in dataset") + + required_features = {"timestamp", "frame_index", "episode_index", "index", "task_index"} + if any(name in required_features for name in remove_features_list): + raise ValueError(f"Cannot remove required features: {required_features}") + + if repo_id is None: + repo_id = f"{dataset.repo_id}_modified" + output_dir = Path(output_dir) if output_dir is not None else HF_LEROBOT_HOME / repo_id + + new_features = dataset.meta.features.copy() + + if remove_features_list: + for name in remove_features_list: + new_features.pop(name, None) + + if add_features: + for feature_name, (_, feature_info) in add_features.items(): + new_features[feature_name] = feature_info + + video_keys_to_remove = [name for name in remove_features_list if name in dataset.meta.video_keys] + remaining_video_keys = [k for k in dataset.meta.video_keys if k not in video_keys_to_remove] + + new_meta = LeRobotDatasetMetadata.create( + repo_id=repo_id, + fps=dataset.meta.fps, + features=new_features, + robot_type=dataset.meta.robot_type, + root=output_dir, + use_videos=len(remaining_video_keys) > 0, + ) + + _copy_data_with_feature_changes( + dataset=dataset, + new_meta=new_meta, + add_features=add_features, + remove_features=remove_features_list if remove_features_list else None, + ) + + if new_meta.video_keys: + _copy_videos(dataset, new_meta, exclude_keys=video_keys_to_remove if video_keys_to_remove else None) + + new_dataset = LeRobotDataset( + repo_id=repo_id, + root=output_dir, + image_transforms=dataset.image_transforms, + delta_timestamps=dataset.delta_timestamps, + tolerance_s=dataset.tolerance_s, + ) + + return new_dataset + + +def add_features( + dataset: LeRobotDataset, + features: dict[str, tuple[np.ndarray | torch.Tensor | Callable, dict]], + output_dir: str | Path | None = None, + repo_id: str | None = None, +) -> LeRobotDataset: + """Add multiple features to a LeRobotDataset in a single pass. + + This is more efficient than calling add_feature() multiple times, as it only + copies the dataset once regardless of how many features are being added. + + Args: + dataset: The source LeRobotDataset. + features: Dictionary mapping feature names to (feature_values, feature_info) tuples. + output_dir: Directory to save the new dataset. If None, uses default location. + repo_id: Repository ID for the new dataset. If None, appends "_modified" to original. + + Returns: + New dataset with all features added. + + Example: + features = { + "task_embedding": (task_emb_array, {"dtype": "float32", "shape": [384], "names": None}), + "cam1_embedding": (cam1_emb_array, {"dtype": "float32", "shape": [768], "names": None}), + "cam2_embedding": (cam2_emb_array, {"dtype": "float32", "shape": [768], "names": None}), + } + new_dataset = add_features(dataset, features, output_dir="./output", repo_id="my_dataset") + """ + if not features: + raise ValueError("No features provided") + + return modify_features( + dataset=dataset, + add_features=features, + remove_features=None, + output_dir=output_dir, + repo_id=repo_id, + ) + + +def remove_feature( + dataset: LeRobotDataset, + feature_names: str | list[str], + output_dir: str | Path | None = None, + repo_id: str | None = None, +) -> LeRobotDataset: + """Remove features from a LeRobotDataset. + + Args: + dataset: The source LeRobotDataset. + feature_names: Name(s) of features to remove. Can be a single string or list. + output_dir: Directory to save the new dataset. If None, uses default location. + repo_id: Repository ID for the new dataset. If None, appends "_modified" to original. + + Returns: + New dataset with features removed. + """ + return modify_features( + dataset=dataset, + add_features=None, + remove_features=feature_names, + output_dir=output_dir, + repo_id=repo_id, + ) + + +def _fractions_to_episode_indices( + total_episodes: int, + splits: dict[str, float], +) -> dict[str, list[int]]: + """Convert split fractions to episode indices.""" + if sum(splits.values()) > 1.0: + raise ValueError("Split fractions must sum to <= 1.0") + + indices = list(range(total_episodes)) + result = {} + start_idx = 0 + + for split_name, fraction in splits.items(): + num_episodes = int(total_episodes * fraction) + if num_episodes == 0: + logging.warning(f"Split '{split_name}' has no episodes, skipping...") + continue + end_idx = start_idx + num_episodes + if split_name == list(splits.keys())[-1]: + end_idx = total_episodes + result[split_name] = indices[start_idx:end_idx] + start_idx = end_idx + + return result + + +def _copy_and_reindex_data( + src_dataset: LeRobotDataset, + dst_meta: LeRobotDatasetMetadata, + episode_mapping: dict[int, int], +) -> dict[int, dict]: + """Copy and filter data files, only modifying files with deleted episodes. + + Args: + src_dataset: Source dataset to copy from + dst_meta: Destination metadata object + episode_mapping: Mapping from old episode indices to new indices + + Returns: + dict mapping episode index to its data file metadata (chunk_index, file_index, etc.) + """ + if src_dataset.meta.episodes is None: + src_dataset.meta.episodes = load_episodes(src_dataset.meta.root) + + file_to_episodes: dict[Path, set[int]] = {} + for old_idx in episode_mapping: + file_path = src_dataset.meta.get_data_file_path(old_idx) + if file_path not in file_to_episodes: + file_to_episodes[file_path] = set() + file_to_episodes[file_path].add(old_idx) + + global_index = 0 + episode_data_metadata: dict[int, dict] = {} + + if dst_meta.tasks is None: + all_task_indices = set() + for src_path in file_to_episodes: + df = pd.read_parquet(src_dataset.root / src_path) + mask = df["episode_index"].isin(list(episode_mapping.keys())) + task_series: pd.Series = df[mask]["task_index"] + all_task_indices.update(task_series.unique().tolist()) + tasks = [src_dataset.meta.tasks.iloc[idx].name for idx in all_task_indices] + dst_meta.save_episode_tasks(list(set(tasks))) + + task_mapping = {} + for old_task_idx in range(len(src_dataset.meta.tasks)): + task_name = src_dataset.meta.tasks.iloc[old_task_idx].name + new_task_idx = dst_meta.get_task_index(task_name) + if new_task_idx is not None: + task_mapping[old_task_idx] = new_task_idx + + for src_path in tqdm(sorted(file_to_episodes.keys()), desc="Processing data files"): + df = pd.read_parquet(src_dataset.root / src_path) + + all_episodes_in_file = set(df["episode_index"].unique()) + episodes_to_keep = file_to_episodes[src_path] + + if all_episodes_in_file == episodes_to_keep: + df["episode_index"] = df["episode_index"].replace(episode_mapping) + df["index"] = range(global_index, global_index + len(df)) + df["task_index"] = df["task_index"].replace(task_mapping) + + first_ep_old_idx = min(episodes_to_keep) + src_ep = src_dataset.meta.episodes[first_ep_old_idx] + chunk_idx = src_ep["data/chunk_index"] + file_idx = src_ep["data/file_index"] + else: + mask = df["episode_index"].isin(list(episode_mapping.keys())) + df = df[mask].copy().reset_index(drop=True) + + if len(df) == 0: + continue + + df["episode_index"] = df["episode_index"].replace(episode_mapping) + df["index"] = range(global_index, global_index + len(df)) + df["task_index"] = df["task_index"].replace(task_mapping) + + first_ep_old_idx = min(episodes_to_keep) + src_ep = src_dataset.meta.episodes[first_ep_old_idx] + chunk_idx = src_ep["data/chunk_index"] + file_idx = src_ep["data/file_index"] + + dst_path = dst_meta.root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx) + dst_path.parent.mkdir(parents=True, exist_ok=True) + + _write_parquet(df, dst_path, dst_meta) + + for ep_old_idx in episodes_to_keep: + ep_new_idx = episode_mapping[ep_old_idx] + ep_df = df[df["episode_index"] == ep_new_idx] + episode_data_metadata[ep_new_idx] = { + "data/chunk_index": chunk_idx, + "data/file_index": file_idx, + "dataset_from_index": int(ep_df["index"].min()), + "dataset_to_index": int(ep_df["index"].max() + 1), + } + + global_index += len(df) + + return episode_data_metadata + + +def _keep_episodes_from_video_with_av( + input_path: Path, + output_path: Path, + episodes_to_keep: list[tuple[float, float]], + fps: float, + vcodec: str = "libsvtav1", + pix_fmt: str = "yuv420p", +) -> None: + """Keep only specified episodes from a video file using PyAV. + + This function decodes frames from specified time ranges and re-encodes them with + properly reset timestamps to ensure monotonic progression. + + Args: + input_path: Source video file path. + output_path: Destination video file path. + episodes_to_keep: List of (start_time, end_time) tuples for episodes to keep. + fps: Frame rate of the video. + vcodec: Video codec to use for encoding. + pix_fmt: Pixel format for output video. + """ + from fractions import Fraction + + import av + + if not episodes_to_keep: + raise ValueError("No episodes to keep") + + in_container = av.open(str(input_path)) + + # Check if video stream exists. + if not in_container.streams.video: + raise ValueError( + f"No video streams found in {input_path}. " + "The video file may be corrupted or empty. " + "Try re-downloading the dataset or checking the video file." + ) + + v_in = in_container.streams.video[0] + + out = av.open(str(output_path), mode="w") + + # Convert fps to Fraction for PyAV compatibility. + fps_fraction = Fraction(fps).limit_denominator(1000) + v_out = out.add_stream(vcodec, rate=fps_fraction) + + # PyAV type stubs don't distinguish video streams from audio/subtitle streams. + v_out.width = v_in.codec_context.width + v_out.height = v_in.codec_context.height + v_out.pix_fmt = pix_fmt + + # Set time_base to match the frame rate for proper timestamp handling. + v_out.time_base = Fraction(1, int(fps)) + + out.start_encoding() + + # Create set of (start, end) ranges for fast lookup. + # Convert to a sorted list for efficient checking. + time_ranges = sorted(episodes_to_keep) + + # Track frame index for setting PTS and current range being processed. + frame_count = 0 + range_idx = 0 + + # Read through entire video once and filter frames. + for packet in in_container.demux(v_in): + for frame in packet.decode(): + if frame is None: + continue + + # Get frame timestamp. + frame_time = float(frame.pts * frame.time_base) if frame.pts is not None else 0.0 + + # Check if frame is in any of our desired time ranges. + # Skip ranges that have already passed. + while range_idx < len(time_ranges) and frame_time >= time_ranges[range_idx][1]: + range_idx += 1 + + # If we've passed all ranges, stop processing. + if range_idx >= len(time_ranges): + break + + # Check if frame is in current range. + start_ts, end_ts = time_ranges[range_idx] + if frame_time < start_ts: + continue + + # Frame is in range - create a new frame with reset timestamps. + # We need to create a copy to avoid modifying the original. + new_frame = frame.reformat(width=v_out.width, height=v_out.height, format=v_out.pix_fmt) + new_frame.pts = frame_count + new_frame.time_base = Fraction(1, int(fps)) + + # Encode and mux the frame. + for pkt in v_out.encode(new_frame): + out.mux(pkt) + + frame_count += 1 + + # Flush encoder. + for pkt in v_out.encode(): + out.mux(pkt) + + out.close() + in_container.close() + + +def _copy_and_reindex_videos( + src_dataset: LeRobotDataset, + dst_meta: LeRobotDatasetMetadata, + episode_mapping: dict[int, int], + vcodec: str = "libsvtav1", + pix_fmt: str = "yuv420p", +) -> dict[int, dict]: + """Copy and filter video files, only re-encoding files with deleted episodes. + + For video files that only contain kept episodes, we copy them directly. + For files with mixed kept/deleted episodes, we use PyAV filters to efficiently + re-encode only the desired segments. + + Args: + src_dataset: Source dataset to copy from + dst_meta: Destination metadata object + episode_mapping: Mapping from old episode indices to new indices + + Returns: + dict mapping episode index to its video metadata (chunk_index, file_index, timestamps) + """ + if src_dataset.meta.episodes is None: + src_dataset.meta.episodes = load_episodes(src_dataset.meta.root) + + episodes_video_metadata: dict[int, dict] = {new_idx: {} for new_idx in episode_mapping.values()} + + for video_key in src_dataset.meta.video_keys: + logging.info(f"Processing videos for {video_key}") + + if dst_meta.video_path is None: + raise ValueError("Destination metadata has no video_path defined") + + file_to_episodes: dict[tuple[int, int], list[int]] = {} + for old_idx in episode_mapping: + src_ep = src_dataset.meta.episodes[old_idx] + chunk_idx = src_ep[f"videos/{video_key}/chunk_index"] + file_idx = src_ep[f"videos/{video_key}/file_index"] + file_key = (chunk_idx, file_idx) + if file_key not in file_to_episodes: + file_to_episodes[file_key] = [] + file_to_episodes[file_key].append(old_idx) + + for (src_chunk_idx, src_file_idx), episodes_in_file in tqdm( + sorted(file_to_episodes.items()), desc=f"Processing {video_key} video files" + ): + all_episodes_in_file = [ + ep_idx + for ep_idx in range(src_dataset.meta.total_episodes) + if src_dataset.meta.episodes[ep_idx].get(f"videos/{video_key}/chunk_index") == src_chunk_idx + and src_dataset.meta.episodes[ep_idx].get(f"videos/{video_key}/file_index") == src_file_idx + ] + + episodes_to_keep_set = set(episodes_in_file) + all_in_file_set = set(all_episodes_in_file) + + if all_in_file_set == episodes_to_keep_set: + assert src_dataset.meta.video_path is not None + src_video_path = src_dataset.root / src_dataset.meta.video_path.format( + video_key=video_key, chunk_index=src_chunk_idx, file_index=src_file_idx + ) + dst_video_path = dst_meta.root / dst_meta.video_path.format( + video_key=video_key, chunk_index=src_chunk_idx, file_index=src_file_idx + ) + dst_video_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(src_video_path, dst_video_path) + + for old_idx in episodes_in_file: + new_idx = episode_mapping[old_idx] + src_ep = src_dataset.meta.episodes[old_idx] + episodes_video_metadata[new_idx][f"videos/{video_key}/chunk_index"] = src_chunk_idx + episodes_video_metadata[new_idx][f"videos/{video_key}/file_index"] = src_file_idx + episodes_video_metadata[new_idx][f"videos/{video_key}/from_timestamp"] = src_ep[ + f"videos/{video_key}/from_timestamp" + ] + episodes_video_metadata[new_idx][f"videos/{video_key}/to_timestamp"] = src_ep[ + f"videos/{video_key}/to_timestamp" + ] + else: + # Build list of time ranges to keep, in sorted order. + sorted_keep_episodes = sorted(episodes_in_file, key=lambda x: episode_mapping[x]) + episodes_to_keep_ranges: list[tuple[float, float]] = [] + + for old_idx in sorted_keep_episodes: + src_ep = src_dataset.meta.episodes[old_idx] + from_ts = src_ep[f"videos/{video_key}/from_timestamp"] + to_ts = src_ep[f"videos/{video_key}/to_timestamp"] + episodes_to_keep_ranges.append((from_ts, to_ts)) + + # Use PyAV filters to efficiently re-encode only the desired segments. + assert src_dataset.meta.video_path is not None + src_video_path = src_dataset.root / src_dataset.meta.video_path.format( + video_key=video_key, chunk_index=src_chunk_idx, file_index=src_file_idx + ) + dst_video_path = dst_meta.root / dst_meta.video_path.format( + video_key=video_key, chunk_index=src_chunk_idx, file_index=src_file_idx + ) + dst_video_path.parent.mkdir(parents=True, exist_ok=True) + + logging.info( + f"Re-encoding {video_key} (chunk {src_chunk_idx}, file {src_file_idx}) " + f"with {len(episodes_to_keep_ranges)} episodes" + ) + _keep_episodes_from_video_with_av( + src_video_path, + dst_video_path, + episodes_to_keep_ranges, + src_dataset.meta.fps, + vcodec, + pix_fmt, + ) + + cumulative_ts = 0.0 + for old_idx in sorted_keep_episodes: + new_idx = episode_mapping[old_idx] + src_ep = src_dataset.meta.episodes[old_idx] + ep_length = src_ep["length"] + ep_duration = ep_length / src_dataset.meta.fps + + episodes_video_metadata[new_idx][f"videos/{video_key}/chunk_index"] = src_chunk_idx + episodes_video_metadata[new_idx][f"videos/{video_key}/file_index"] = src_file_idx + episodes_video_metadata[new_idx][f"videos/{video_key}/from_timestamp"] = cumulative_ts + episodes_video_metadata[new_idx][f"videos/{video_key}/to_timestamp"] = ( + cumulative_ts + ep_duration + ) + + cumulative_ts += ep_duration + + return episodes_video_metadata + + +def _copy_and_reindex_episodes_metadata( + src_dataset: LeRobotDataset, + dst_meta: LeRobotDatasetMetadata, + episode_mapping: dict[int, int], + data_metadata: dict[int, dict], + video_metadata: dict[int, dict] | None = None, +) -> None: + """Copy and reindex episodes metadata using provided data and video metadata. + + Args: + src_dataset: Source dataset to copy from + dst_meta: Destination metadata object + episode_mapping: Mapping from old episode indices to new indices + data_metadata: Dict mapping new episode index to its data file metadata + video_metadata: Optional dict mapping new episode index to its video metadata + """ + from lerobot.datasets.utils import flatten_dict + + if src_dataset.meta.episodes is None: + src_dataset.meta.episodes = load_episodes(src_dataset.meta.root) + + all_stats = [] + total_frames = 0 + + for old_idx, new_idx in tqdm( + sorted(episode_mapping.items(), key=lambda x: x[1]), desc="Processing episodes metadata" + ): + src_episode_full = _load_episode_with_stats(src_dataset, old_idx) + + src_episode = src_dataset.meta.episodes[old_idx] + + episode_meta = data_metadata[new_idx].copy() + + if video_metadata and new_idx in video_metadata: + episode_meta.update(video_metadata[new_idx]) + + # Extract episode statistics from parquet metadata. + # Note (maractingi): When pandas/pyarrow serializes numpy arrays with shape (3, 1, 1) to parquet, + # they are being deserialized as nested object arrays like: + # array([array([array([0.])]), array([array([0.])]), array([array([0.])])]) + # This happens particularly with image/video statistics. We need to detect and flatten + # these nested structures back to proper (3, 1, 1) arrays so aggregate_stats can process them. + episode_stats = {} + for key in src_episode_full: + if key.startswith("stats/"): + stat_key = key.replace("stats/", "") + parts = stat_key.split("/") + if len(parts) == 2: + feature_name, stat_name = parts + if feature_name not in episode_stats: + episode_stats[feature_name] = {} + + value = src_episode_full[key] + + if feature_name in src_dataset.meta.features: + feature_dtype = src_dataset.meta.features[feature_name]["dtype"] + if feature_dtype in ["image", "video"] and stat_name != "count": + if isinstance(value, np.ndarray) and value.dtype == object: + flat_values = [] + for item in value: + while isinstance(item, np.ndarray): + item = item.flatten()[0] + flat_values.append(item) + value = np.array(flat_values, dtype=np.float64).reshape(3, 1, 1) + elif isinstance(value, np.ndarray) and value.shape == (3,): + value = value.reshape(3, 1, 1) + + episode_stats[feature_name][stat_name] = value + + all_stats.append(episode_stats) + + episode_dict = { + "episode_index": new_idx, + "tasks": src_episode["tasks"], + "length": src_episode["length"], + } + episode_dict.update(episode_meta) + episode_dict.update(flatten_dict({"stats": episode_stats})) + dst_meta._save_episode_metadata(episode_dict) + + total_frames += src_episode["length"] + + dst_meta._close_writer() + + dst_meta.info.update( + { + "total_episodes": len(episode_mapping), + "total_frames": total_frames, + "total_tasks": len(dst_meta.tasks) if dst_meta.tasks is not None else 0, + "splits": {"train": f"0:{len(episode_mapping)}"}, + } + ) + write_info(dst_meta.info, dst_meta.root) + + if not all_stats: + logging.warning("No statistics found to aggregate") + return + + logging.info(f"Aggregating statistics for {len(all_stats)} episodes") + aggregated_stats = aggregate_stats(all_stats) + filtered_stats = {k: v for k, v in aggregated_stats.items() if k in dst_meta.features} + write_stats(filtered_stats, dst_meta.root) + + +def _write_parquet(df: pd.DataFrame, path: Path, meta: LeRobotDatasetMetadata) -> None: + """Write DataFrame to parquet + + This ensures images are properly embedded and the file can be loaded correctly by HF datasets. + """ + from lerobot.datasets.utils import embed_images, get_hf_features_from_features + + hf_features = get_hf_features_from_features(meta.features) + ep_dataset = datasets.Dataset.from_dict(df.to_dict(orient="list"), features=hf_features, split="train") + + if len(meta.image_keys) > 0: + ep_dataset = embed_images(ep_dataset) + + table = ep_dataset.with_format("arrow")[:] + writer = pq.ParquetWriter(path, schema=table.schema, compression="snappy", use_dictionary=True) + writer.write_table(table) + writer.close() + + +def _save_data_chunk( + df: pd.DataFrame, + meta: LeRobotDatasetMetadata, + chunk_idx: int = 0, + file_idx: int = 0, +) -> tuple[int, int, dict[int, dict]]: + """Save a data chunk and return updated indices and episode metadata. + + Returns: + tuple: (next_chunk_idx, next_file_idx, episode_metadata_dict) + where episode_metadata_dict maps episode_index to its data file metadata + """ + path = meta.root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx) + path.parent.mkdir(parents=True, exist_ok=True) + + _write_parquet(df, path, meta) + + episode_metadata = {} + for ep_idx in df["episode_index"].unique(): + ep_df = df[df["episode_index"] == ep_idx] + episode_metadata[ep_idx] = { + "data/chunk_index": chunk_idx, + "data/file_index": file_idx, + "dataset_from_index": int(ep_df["index"].min()), + "dataset_to_index": int(ep_df["index"].max() + 1), + } + + file_size = get_parquet_file_size_in_mb(path) + if file_size >= DEFAULT_DATA_FILE_SIZE_IN_MB * 0.9: + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE) + + return chunk_idx, file_idx, episode_metadata + + +def _copy_data_with_feature_changes( + dataset: LeRobotDataset, + new_meta: LeRobotDatasetMetadata, + add_features: dict[str, tuple] | None = None, + remove_features: list[str] | None = None, +) -> None: + """Copy data while adding or removing features.""" + data_dir = dataset.root / DATA_DIR + parquet_files = sorted(data_dir.glob("*/*.parquet")) + + if not parquet_files: + raise ValueError(f"No parquet files found in {data_dir}") + + frame_idx = 0 + + for src_path in tqdm(parquet_files, desc="Processing data files"): + df = pd.read_parquet(src_path).reset_index(drop=True) + + relative_path = src_path.relative_to(dataset.root) + chunk_dir = relative_path.parts[1] + file_name = relative_path.parts[2] + + chunk_idx = int(chunk_dir.split("-")[1]) + file_idx = int(file_name.split("-")[1].split(".")[0]) + + if remove_features: + df = df.drop(columns=remove_features, errors="ignore") + + if add_features: + end_idx = frame_idx + len(df) + for feature_name, (values, _) in add_features.items(): + if callable(values): + feature_values = [] + for _, row in df.iterrows(): + ep_idx = row["episode_index"] + frame_in_ep = row["frame_index"] + value = values(row.to_dict(), ep_idx, frame_in_ep) + if isinstance(value, np.ndarray) and value.size == 1: + value = value.item() + feature_values.append(value) + df[feature_name] = feature_values + else: + feature_slice = values[frame_idx:end_idx] + if len(feature_slice.shape) > 1 and feature_slice.shape[1] == 1: + df[feature_name] = feature_slice.flatten() + else: + df[feature_name] = feature_slice + frame_idx = end_idx + + # Write using the same chunk/file structure as source + dst_path = new_meta.root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx) + dst_path.parent.mkdir(parents=True, exist_ok=True) + + _write_parquet(df, dst_path, new_meta) + + _copy_episodes_metadata_and_stats(dataset, new_meta) + + +def _copy_videos( + src_dataset: LeRobotDataset, + dst_meta: LeRobotDatasetMetadata, + exclude_keys: list[str] | None = None, +) -> None: + """Copy video files, optionally excluding certain keys.""" + if exclude_keys is None: + exclude_keys = [] + + for video_key in src_dataset.meta.video_keys: + if video_key in exclude_keys: + continue + + video_files = set() + for ep_idx in range(len(src_dataset.meta.episodes)): + try: + video_files.add(src_dataset.meta.get_video_file_path(ep_idx, video_key)) + except KeyError: + continue + + for src_path in tqdm(sorted(video_files), desc=f"Copying {video_key} videos"): + dst_path = dst_meta.root / src_path + dst_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(src_dataset.root / src_path, dst_path) + + +def _copy_episodes_metadata_and_stats( + src_dataset: LeRobotDataset, + dst_meta: LeRobotDatasetMetadata, +) -> None: + """Copy episodes metadata and recalculate stats.""" + if src_dataset.meta.tasks is not None: + write_tasks(src_dataset.meta.tasks, dst_meta.root) + dst_meta.tasks = src_dataset.meta.tasks.copy() + + episodes_dir = src_dataset.root / "meta/episodes" + dst_episodes_dir = dst_meta.root / "meta/episodes" + if episodes_dir.exists(): + shutil.copytree(episodes_dir, dst_episodes_dir, dirs_exist_ok=True) + + dst_meta.info.update( + { + "total_episodes": src_dataset.meta.total_episodes, + "total_frames": src_dataset.meta.total_frames, + "total_tasks": src_dataset.meta.total_tasks, + "splits": src_dataset.meta.info.get("splits", {"train": f"0:{src_dataset.meta.total_episodes}"}), + } + ) + + if dst_meta.video_keys and src_dataset.meta.video_keys: + for key in dst_meta.video_keys: + if key in src_dataset.meta.features: + dst_meta.info["features"][key]["info"] = src_dataset.meta.info["features"][key].get( + "info", {} + ) + + write_info(dst_meta.info, dst_meta.root) + + if set(dst_meta.features.keys()) != set(src_dataset.meta.features.keys()): + logging.info("Recalculating dataset statistics...") + if src_dataset.meta.stats: + new_stats = {} + for key in dst_meta.features: + if key in src_dataset.meta.stats: + new_stats[key] = src_dataset.meta.stats[key] + write_stats(new_stats, dst_meta.root) + else: + if src_dataset.meta.stats: + write_stats(src_dataset.meta.stats, dst_meta.root) diff --git a/src/lerobot/datasets/factory.py b/src/lerobot/datasets/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..a477d505ed1b1b834bef0dbf923746981778080d --- /dev/null +++ b/src/lerobot/datasets/factory.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from pprint import pformat + +import torch + +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.train import TrainPipelineConfig +from lerobot.datasets.lerobot_dataset import ( + LeRobotDataset, + LeRobotDatasetMetadata, + MultiLeRobotDataset, +) +from lerobot.datasets.streaming_dataset import StreamingLeRobotDataset +from lerobot.datasets.transforms import ImageTransforms +from lerobot.utils.constants import ACTION, OBS_PREFIX, REWARD + +IMAGENET_STATS = { + "mean": [[[0.485]], [[0.456]], [[0.406]]], # (c,1,1) + "std": [[[0.229]], [[0.224]], [[0.225]]], # (c,1,1) +} + + +def resolve_delta_timestamps( + cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata +) -> dict[str, list] | None: + """Resolves delta_timestamps by reading from the 'delta_indices' properties of the PreTrainedConfig. + + Args: + cfg (PreTrainedConfig): The PreTrainedConfig to read delta_indices from. + ds_meta (LeRobotDatasetMetadata): The dataset from which features and fps are used to build + delta_timestamps against. + + Returns: + dict[str, list] | None: A dictionary of delta_timestamps, e.g.: + { + "observation.state": [-0.04, -0.02, 0] + "observation.action": [-0.02, 0, 0.02] + } + returns `None` if the resulting dict is empty. + """ + delta_timestamps = {} + for key in ds_meta.features: + if key == REWARD and cfg.reward_delta_indices is not None: + delta_timestamps[key] = [i / ds_meta.fps for i in cfg.reward_delta_indices] + if key == ACTION and cfg.action_delta_indices is not None: + delta_timestamps[key] = [i / ds_meta.fps for i in cfg.action_delta_indices] + if key.startswith(OBS_PREFIX) and cfg.observation_delta_indices is not None: + delta_timestamps[key] = [i / ds_meta.fps for i in cfg.observation_delta_indices] + + if len(delta_timestamps) == 0: + delta_timestamps = None + + return delta_timestamps + + +def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDataset: + """Handles the logic of setting up delta timestamps and image transforms before creating a dataset. + + Args: + cfg (TrainPipelineConfig): A TrainPipelineConfig config which contains a DatasetConfig and a PreTrainedConfig. + + Raises: + NotImplementedError: The MultiLeRobotDataset is currently deactivated. + + Returns: + LeRobotDataset | MultiLeRobotDataset + """ + image_transforms = ( + ImageTransforms(cfg.dataset.image_transforms) if cfg.dataset.image_transforms.enable else None + ) + + if isinstance(cfg.dataset.repo_id, str): + ds_meta = LeRobotDatasetMetadata( + cfg.dataset.repo_id, root=cfg.dataset.root, revision=cfg.dataset.revision + ) + delta_timestamps = resolve_delta_timestamps(cfg.policy, ds_meta) + if not cfg.dataset.streaming: + dataset = LeRobotDataset( + cfg.dataset.repo_id, + root=cfg.dataset.root, + episodes=cfg.dataset.episodes, + delta_timestamps=delta_timestamps, + image_transforms=image_transforms, + revision=cfg.dataset.revision, + video_backend=cfg.dataset.video_backend, + ) + else: + dataset = StreamingLeRobotDataset( + cfg.dataset.repo_id, + root=cfg.dataset.root, + episodes=cfg.dataset.episodes, + delta_timestamps=delta_timestamps, + image_transforms=image_transforms, + revision=cfg.dataset.revision, + max_num_shards=cfg.num_workers, + ) + else: + raise NotImplementedError("The MultiLeRobotDataset isn't supported for now.") + dataset = MultiLeRobotDataset( + cfg.dataset.repo_id, + # TODO(aliberts): add proper support for multi dataset + # delta_timestamps=delta_timestamps, + image_transforms=image_transforms, + video_backend=cfg.dataset.video_backend, + ) + logging.info( + "Multiple datasets were provided. Applied the following index mapping to the provided datasets: " + f"{pformat(dataset.repo_id_to_index, indent=2)}" + ) + + if cfg.dataset.use_imagenet_stats: + for key in dataset.meta.camera_keys: + for stats_type, stats in IMAGENET_STATS.items(): + dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) + + return dataset diff --git a/src/lerobot/datasets/image_writer.py b/src/lerobot/datasets/image_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..218bb15bf41f7583704dd08b43cd145cf9e761d9 --- /dev/null +++ b/src/lerobot/datasets/image_writer.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import queue +import threading +from pathlib import Path + +import numpy as np +import PIL.Image +import torch + + +def safe_stop_image_writer(func): + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + dataset = kwargs.get("dataset") + image_writer = getattr(dataset, "image_writer", None) if dataset else None + if image_writer is not None: + print("Waiting for image writer to terminate...") + image_writer.stop() + raise e + + return wrapper + + +def image_array_to_pil_image(image_array: np.ndarray, range_check: bool = True) -> PIL.Image.Image: + # TODO(aliberts): handle 1 channel and 4 for depth images + if image_array.ndim != 3: + raise ValueError(f"The array has {image_array.ndim} dimensions, but 3 is expected for an image.") + + if image_array.shape[0] == 3: + # Transpose from pytorch convention (C, H, W) to (H, W, C) + image_array = image_array.transpose(1, 2, 0) + + elif image_array.shape[-1] != 3: + raise NotImplementedError( + f"The image has {image_array.shape[-1]} channels, but 3 is required for now." + ) + + if image_array.dtype != np.uint8: + if range_check: + max_ = image_array.max().item() + min_ = image_array.min().item() + if max_ > 1.0 or min_ < 0.0: + raise ValueError( + "The image data type is float, which requires values in the range [0.0, 1.0]. " + f"However, the provided range is [{min_}, {max_}]. Please adjust the range or " + "provide a uint8 image with values in the range [0, 255]." + ) + + image_array = (image_array * 255).astype(np.uint8) + + return PIL.Image.fromarray(image_array) + + +def write_image(image: np.ndarray | PIL.Image.Image, fpath: Path, compress_level: int = 1): + """ + Saves a NumPy array or PIL Image to a file. + + This function handles both NumPy arrays and PIL Image objects, converting + the former to a PIL Image before saving. It includes error handling for + the save operation. + + Args: + image (np.ndarray | PIL.Image.Image): The image data to save. + fpath (Path): The destination file path for the image. + compress_level (int, optional): The compression level for the saved + image, as used by PIL.Image.save(). Defaults to 1. + Refer to: https://github.com/huggingface/lerobot/pull/2135 + for more details on the default value rationale. + + Raises: + TypeError: If the input 'image' is not a NumPy array or a + PIL.Image.Image object. + + Side Effects: + Prints an error message to the console if the image writing process + fails for any reason. + """ + try: + if isinstance(image, np.ndarray): + img = image_array_to_pil_image(image) + elif isinstance(image, PIL.Image.Image): + img = image + else: + raise TypeError(f"Unsupported image type: {type(image)}") + img.save(fpath, compress_level=compress_level) + except Exception as e: + print(f"Error writing image {fpath}: {e}") + + +def worker_thread_loop(queue: queue.Queue): + while True: + item = queue.get() + if item is None: + queue.task_done() + break + image_array, fpath = item + write_image(image_array, fpath) + queue.task_done() + + +def worker_process(queue: queue.Queue, num_threads: int): + threads = [] + for _ in range(num_threads): + t = threading.Thread(target=worker_thread_loop, args=(queue,)) + t.daemon = True + t.start() + threads.append(t) + for t in threads: + t.join() + + +class AsyncImageWriter: + """ + This class abstract away the initialisation of processes or/and threads to + save images on disk asynchronously, which is critical to control a robot and record data + at a high frame rate. + + When `num_processes=0`, it creates a threads pool of size `num_threads`. + When `num_processes>0`, it creates processes pool of size `num_processes`, where each subprocess starts + their own threads pool of size `num_threads`. + + The optimal number of processes and threads depends on your computer capabilities. + We advise to use 4 threads per camera with 0 processes. If the fps is not stable, try to increase or lower + the number of threads. If it is still not stable, try to use 1 subprocess, or more. + """ + + def __init__(self, num_processes: int = 0, num_threads: int = 1): + self.num_processes = num_processes + self.num_threads = num_threads + self.queue = None + self.threads = [] + self.processes = [] + self._stopped = False + + if num_threads <= 0 and num_processes <= 0: + raise ValueError("Number of threads and processes must be greater than zero.") + + if self.num_processes == 0: + # Use threading + self.queue = queue.Queue() + for _ in range(self.num_threads): + t = threading.Thread(target=worker_thread_loop, args=(self.queue,)) + t.daemon = True + t.start() + self.threads.append(t) + else: + # Use multiprocessing + self.queue = multiprocessing.JoinableQueue() + for _ in range(self.num_processes): + p = multiprocessing.Process(target=worker_process, args=(self.queue, self.num_threads)) + p.daemon = True + p.start() + self.processes.append(p) + + def save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path): + if isinstance(image, torch.Tensor): + # Convert tensor to numpy array to minimize main process time + image = image.cpu().numpy() + self.queue.put((image, fpath)) + + def wait_until_done(self): + self.queue.join() + + def stop(self): + if self._stopped: + return + + if self.num_processes == 0: + for _ in self.threads: + self.queue.put(None) + for t in self.threads: + t.join() + else: + num_nones = self.num_processes * self.num_threads + for _ in range(num_nones): + self.queue.put(None) + for p in self.processes: + p.join() + if p.is_alive(): + p.terminate() + self.queue.close() + self.queue.join_thread() + + self._stopped = True diff --git a/src/lerobot/datasets/lerobot_dataset.py b/src/lerobot/datasets/lerobot_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d392f98a84f90ae05d4e6f18f858208aff30f9 --- /dev/null +++ b/src/lerobot/datasets/lerobot_dataset.py @@ -0,0 +1,1707 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import contextlib +import logging +import shutil +import tempfile +from collections.abc import Callable +from pathlib import Path + +import datasets +import numpy as np +import packaging.version +import pandas as pd +import PIL.Image +import pyarrow as pa +import pyarrow.parquet as pq +import torch +import torch.utils +from huggingface_hub import HfApi, snapshot_download +from huggingface_hub.errors import RevisionNotFoundError + +from lerobot.datasets.compute_stats import aggregate_stats, compute_episode_stats +from lerobot.datasets.image_writer import AsyncImageWriter, write_image +from lerobot.datasets.utils import ( + DEFAULT_EPISODES_PATH, + DEFAULT_FEATURES, + DEFAULT_IMAGE_PATH, + INFO_PATH, + _validate_feature_names, + check_delta_timestamps, + check_version_compatibility, + create_empty_dataset_info, + create_lerobot_dataset_card, + embed_images, + flatten_dict, + get_delta_indices, + get_file_size_in_mb, + get_hf_features_from_features, + get_safe_version, + hf_transform_to_torch, + is_valid_version, + load_episodes, + load_info, + load_nested_dataset, + load_stats, + load_tasks, + update_chunk_file_indices, + validate_episode_buffer, + validate_frame, + write_info, + write_json, + write_stats, + write_tasks, +) +from lerobot.datasets.video_utils import ( + VideoFrame, + concatenate_video_files, + decode_video_frames, + encode_video_frames, + get_safe_default_codec, + get_video_duration_in_s, + get_video_info, +) +from lerobot.utils.constants import HF_LEROBOT_HOME + +CODEBASE_VERSION = "v3.0" + + +class LeRobotDatasetMetadata: + def __init__( + self, + repo_id: str, + root: str | Path | None = None, + revision: str | None = None, + force_cache_sync: bool = False, + metadata_buffer_size: int = 10, + ): + self.repo_id = repo_id + self.revision = revision if revision else CODEBASE_VERSION + self.root = Path(root) if root is not None else HF_LEROBOT_HOME / repo_id + self.writer = None + self.latest_episode = None + self.metadata_buffer: list[dict] = [] + self.metadata_buffer_size = metadata_buffer_size + + try: + if force_cache_sync: + raise FileNotFoundError + self.load_metadata() + except (FileNotFoundError, NotADirectoryError): + if is_valid_version(self.revision): + self.revision = get_safe_version(self.repo_id, self.revision) + + (self.root / "meta").mkdir(exist_ok=True, parents=True) + self.pull_from_repo(allow_patterns="meta/") + self.load_metadata() + + def _flush_metadata_buffer(self) -> None: + """Write all buffered episode metadata to parquet file.""" + if not hasattr(self, "metadata_buffer") or len(self.metadata_buffer) == 0: + return + + combined_dict = {} + for episode_dict in self.metadata_buffer: + for key, value in episode_dict.items(): + if key not in combined_dict: + combined_dict[key] = [] + # Extract value and serialize numpy arrays + # because PyArrow's from_pydict function doesn't support numpy arrays + val = value[0] if isinstance(value, list) else value + combined_dict[key].append(val.tolist() if isinstance(val, np.ndarray) else val) + + first_ep = self.metadata_buffer[0] + chunk_idx = first_ep["meta/episodes/chunk_index"][0] + file_idx = first_ep["meta/episodes/file_index"][0] + + table = pa.Table.from_pydict(combined_dict) + + if not self.writer: + path = Path(self.root / DEFAULT_EPISODES_PATH.format(chunk_index=chunk_idx, file_index=file_idx)) + path.parent.mkdir(parents=True, exist_ok=True) + self.writer = pq.ParquetWriter( + path, schema=table.schema, compression="snappy", use_dictionary=True + ) + + self.writer.write_table(table) + + self.latest_episode = self.metadata_buffer[-1] + self.metadata_buffer.clear() + + def _close_writer(self) -> None: + """Close and cleanup the parquet writer if it exists.""" + self._flush_metadata_buffer() + + writer = getattr(self, "writer", None) + if writer is not None: + writer.close() + self.writer = None + + def __del__(self): + """ + Trust the user to call .finalize() but as an added safety check call the parquet writer to stop when calling the destructor + """ + self._close_writer() + + def load_metadata(self): + self.info = load_info(self.root) + check_version_compatibility(self.repo_id, self._version, CODEBASE_VERSION) + self.tasks = load_tasks(self.root) + self.episodes = load_episodes(self.root) + self.stats = load_stats(self.root) + + def pull_from_repo( + self, + allow_patterns: list[str] | str | None = None, + ignore_patterns: list[str] | str | None = None, + ) -> None: + snapshot_download( + self.repo_id, + repo_type="dataset", + revision=self.revision, + local_dir=self.root, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + ) + + @property + def url_root(self) -> str: + return f"hf://datasets/{self.repo_id}" + + @property + def _version(self) -> packaging.version.Version: + """Codebase version used to create this dataset.""" + return packaging.version.parse(self.info["codebase_version"]) + + def get_data_file_path(self, ep_index: int) -> Path: + if self.episodes is None: + self.episodes = load_episodes(self.root) + if ep_index >= len(self.episodes): + raise IndexError( + f"Episode index {ep_index} out of range. Episodes: {len(self.episodes) if self.episodes else 0}" + ) + ep = self.episodes[ep_index] + chunk_idx = ep["data/chunk_index"] + file_idx = ep["data/file_index"] + fpath = self.data_path.format(chunk_index=chunk_idx, file_index=file_idx) + return Path(fpath) + + def get_video_file_path(self, ep_index: int, vid_key: str) -> Path: + if self.episodes is None: + self.episodes = load_episodes(self.root) + if ep_index >= len(self.episodes): + raise IndexError( + f"Episode index {ep_index} out of range. Episodes: {len(self.episodes) if self.episodes else 0}" + ) + ep = self.episodes[ep_index] + chunk_idx = ep[f"videos/{vid_key}/chunk_index"] + file_idx = ep[f"videos/{vid_key}/file_index"] + fpath = self.video_path.format(video_key=vid_key, chunk_index=chunk_idx, file_index=file_idx) + return Path(fpath) + + @property + def data_path(self) -> str: + """Formattable string for the parquet files.""" + return self.info["data_path"] + + @property + def video_path(self) -> str | None: + """Formattable string for the video files.""" + return self.info["video_path"] + + @property + def robot_type(self) -> str | None: + """Robot type used in recording this dataset.""" + return self.info["robot_type"] + + @property + def fps(self) -> int: + """Frames per second used during data collection.""" + return self.info["fps"] + + @property + def features(self) -> dict[str, dict]: + """All features contained in the dataset.""" + return self.info["features"] + + @property + def image_keys(self) -> list[str]: + """Keys to access visual modalities stored as images.""" + return [key for key, ft in self.features.items() if ft["dtype"] == "image"] + + @property + def video_keys(self) -> list[str]: + """Keys to access visual modalities stored as videos.""" + return [key for key, ft in self.features.items() if ft["dtype"] == "video"] + + @property + def camera_keys(self) -> list[str]: + """Keys to access visual modalities (regardless of their storage method).""" + return [key for key, ft in self.features.items() if ft["dtype"] in ["video", "image"]] + + @property + def names(self) -> dict[str, list | dict]: + """Names of the various dimensions of vector modalities.""" + return {key: ft["names"] for key, ft in self.features.items()} + + @property + def shapes(self) -> dict: + """Shapes for the different features.""" + return {key: tuple(ft["shape"]) for key, ft in self.features.items()} + + @property + def total_episodes(self) -> int: + """Total number of episodes available.""" + return self.info["total_episodes"] + + @property + def total_frames(self) -> int: + """Total number of frames saved in this dataset.""" + return self.info["total_frames"] + + @property + def total_tasks(self) -> int: + """Total number of different tasks performed in this dataset.""" + return self.info["total_tasks"] + + @property + def chunks_size(self) -> int: + """Max number of files per chunk.""" + return self.info["chunks_size"] + + @property + def data_files_size_in_mb(self) -> int: + """Max size of data file in mega bytes.""" + return self.info["data_files_size_in_mb"] + + @property + def video_files_size_in_mb(self) -> int: + """Max size of video file in mega bytes.""" + return self.info["video_files_size_in_mb"] + + def get_task_index(self, task: str) -> int | None: + """ + Given a task in natural language, returns its task_index if the task already exists in the dataset, + otherwise return None. + """ + if task in self.tasks.index: + return int(self.tasks.loc[task].task_index) + else: + return None + + def save_episode_tasks(self, tasks: list[str]): + if len(set(tasks)) != len(tasks): + raise ValueError(f"Tasks are not unique: {tasks}") + + if self.tasks is None: + new_tasks = tasks + task_indices = range(len(tasks)) + self.tasks = pd.DataFrame({"task_index": task_indices}, index=tasks) + else: + new_tasks = [task for task in tasks if task not in self.tasks.index] + new_task_indices = range(len(self.tasks), len(self.tasks) + len(new_tasks)) + for task_idx, task in zip(new_task_indices, new_tasks, strict=False): + self.tasks.loc[task] = task_idx + + if len(new_tasks) > 0: + # Update on disk + write_tasks(self.tasks, self.root) + + def _save_episode_metadata(self, episode_dict: dict) -> None: + """Buffer episode metadata and write to parquet in batches for efficiency. + + This function accumulates episode metadata in a buffer and flushes it when the buffer + reaches the configured size. This reduces I/O overhead by writing multiple episodes + at once instead of one row at a time. + + Notes: We both need to update parquet files and HF dataset: + - `pandas` loads parquet file in RAM + - `datasets` relies on a memory mapping from pyarrow (no RAM). It either converts parquet files to a pyarrow cache on disk, + or loads directly from pyarrow cache. + """ + # Convert to list format for each value + episode_dict = {key: [value] for key, value in episode_dict.items()} + num_frames = episode_dict["length"][0] + + if self.latest_episode is None: + # Initialize indices and frame count for a new dataset made of the first episode data + chunk_idx, file_idx = 0, 0 + if self.episodes is not None and len(self.episodes) > 0: + # It means we are resuming recording, so we need to load the latest episode + # Update the indices to avoid overwriting the latest episode + chunk_idx = self.episodes[-1]["meta/episodes/chunk_index"] + file_idx = self.episodes[-1]["meta/episodes/file_index"] + latest_num_frames = self.episodes[-1]["dataset_to_index"] + episode_dict["dataset_from_index"] = [latest_num_frames] + episode_dict["dataset_to_index"] = [latest_num_frames + num_frames] + + # When resuming, move to the next file + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, self.chunks_size) + else: + episode_dict["dataset_from_index"] = [0] + episode_dict["dataset_to_index"] = [num_frames] + + episode_dict["meta/episodes/chunk_index"] = [chunk_idx] + episode_dict["meta/episodes/file_index"] = [file_idx] + else: + chunk_idx = self.latest_episode["meta/episodes/chunk_index"][0] + file_idx = self.latest_episode["meta/episodes/file_index"][0] + + latest_path = ( + self.root / DEFAULT_EPISODES_PATH.format(chunk_index=chunk_idx, file_index=file_idx) + if self.writer is None + else self.writer.where + ) + + if Path(latest_path).exists(): + latest_size_in_mb = get_file_size_in_mb(Path(latest_path)) + latest_num_frames = self.latest_episode["episode_index"][0] + + av_size_per_frame = latest_size_in_mb / latest_num_frames if latest_num_frames > 0 else 0.0 + + if latest_size_in_mb + av_size_per_frame * num_frames >= self.data_files_size_in_mb: + # Size limit is reached, flush buffer and prepare new parquet file + self._flush_metadata_buffer() + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, self.chunks_size) + self._close_writer() + + # Update the existing pandas dataframe with new row + episode_dict["meta/episodes/chunk_index"] = [chunk_idx] + episode_dict["meta/episodes/file_index"] = [file_idx] + episode_dict["dataset_from_index"] = [self.latest_episode["dataset_to_index"][0]] + episode_dict["dataset_to_index"] = [self.latest_episode["dataset_to_index"][0] + num_frames] + + # Add to buffer + self.metadata_buffer.append(episode_dict) + self.latest_episode = episode_dict + + if len(self.metadata_buffer) >= self.metadata_buffer_size: + self._flush_metadata_buffer() + + def save_episode( + self, + episode_index: int, + episode_length: int, + episode_tasks: list[str], + episode_stats: dict[str, dict], + episode_metadata: dict, + ) -> None: + episode_dict = { + "episode_index": episode_index, + "tasks": episode_tasks, + "length": episode_length, + } + episode_dict.update(episode_metadata) + episode_dict.update(flatten_dict({"stats": episode_stats})) + self._save_episode_metadata(episode_dict) + + # Update info + self.info["total_episodes"] += 1 + self.info["total_frames"] += episode_length + self.info["total_tasks"] = len(self.tasks) + self.info["splits"] = {"train": f"0:{self.info['total_episodes']}"} + + write_info(self.info, self.root) + + self.stats = aggregate_stats([self.stats, episode_stats]) if self.stats is not None else episode_stats + write_stats(self.stats, self.root) + + def update_video_info(self, video_key: str | None = None) -> None: + """ + Warning: this function writes info from first episode videos, implicitly assuming that all videos have + been encoded the same way. Also, this means it assumes the first episode exists. + """ + if video_key is not None and video_key not in self.video_keys: + raise ValueError(f"Video key {video_key} not found in dataset") + + video_keys = [video_key] if video_key is not None else self.video_keys + for key in video_keys: + if not self.features[key].get("info", None): + video_path = self.root / self.video_path.format(video_key=key, chunk_index=0, file_index=0) + self.info["features"][key]["info"] = get_video_info(video_path) + + def update_chunk_settings( + self, + chunks_size: int | None = None, + data_files_size_in_mb: int | None = None, + video_files_size_in_mb: int | None = None, + ) -> None: + """Update chunk and file size settings after dataset creation. + + This allows users to customize storage organization without modifying the constructor. + These settings control how episodes are chunked and how large files can grow before + creating new ones. + + Args: + chunks_size: Maximum number of files per chunk directory. If None, keeps current value. + data_files_size_in_mb: Maximum size for data parquet files in MB. If None, keeps current value. + video_files_size_in_mb: Maximum size for video files in MB. If None, keeps current value. + """ + if chunks_size is not None: + if chunks_size <= 0: + raise ValueError(f"chunks_size must be positive, got {chunks_size}") + self.info["chunks_size"] = chunks_size + + if data_files_size_in_mb is not None: + if data_files_size_in_mb <= 0: + raise ValueError(f"data_files_size_in_mb must be positive, got {data_files_size_in_mb}") + self.info["data_files_size_in_mb"] = data_files_size_in_mb + + if video_files_size_in_mb is not None: + if video_files_size_in_mb <= 0: + raise ValueError(f"video_files_size_in_mb must be positive, got {video_files_size_in_mb}") + self.info["video_files_size_in_mb"] = video_files_size_in_mb + + # Update the info file on disk + write_info(self.info, self.root) + + def get_chunk_settings(self) -> dict[str, int]: + """Get current chunk and file size settings. + + Returns: + Dict containing chunks_size, data_files_size_in_mb, and video_files_size_in_mb. + """ + return { + "chunks_size": self.chunks_size, + "data_files_size_in_mb": self.data_files_size_in_mb, + "video_files_size_in_mb": self.video_files_size_in_mb, + } + + def __repr__(self): + feature_keys = list(self.features) + return ( + f"{self.__class__.__name__}({{\n" + f" Repository ID: '{self.repo_id}',\n" + f" Total episodes: '{self.total_episodes}',\n" + f" Total frames: '{self.total_frames}',\n" + f" Features: '{feature_keys}',\n" + "})',\n" + ) + + @classmethod + def create( + cls, + repo_id: str, + fps: int, + features: dict, + robot_type: str | None = None, + root: str | Path | None = None, + use_videos: bool = True, + metadata_buffer_size: int = 10, + chunks_size: int | None = None, + data_files_size_in_mb: int | None = None, + video_files_size_in_mb: int | None = None, + ) -> "LeRobotDatasetMetadata": + """Creates metadata for a LeRobotDataset.""" + obj = cls.__new__(cls) + obj.repo_id = repo_id + obj.root = Path(root) if root is not None else HF_LEROBOT_HOME / repo_id + + obj.root.mkdir(parents=True, exist_ok=False) + + features = {**features, **DEFAULT_FEATURES} + _validate_feature_names(features) + + obj.tasks = None + obj.episodes = None + obj.stats = None + obj.info = create_empty_dataset_info( + CODEBASE_VERSION, + fps, + features, + use_videos, + robot_type, + chunks_size, + data_files_size_in_mb, + video_files_size_in_mb, + ) + if len(obj.video_keys) > 0 and not use_videos: + raise ValueError() + write_json(obj.info, obj.root / INFO_PATH) + obj.revision = None + obj.writer = None + obj.latest_episode = None + obj.metadata_buffer = [] + obj.metadata_buffer_size = metadata_buffer_size + return obj + + +class LeRobotDataset(torch.utils.data.Dataset): + def __init__( + self, + repo_id: str, + root: str | Path | None = None, + episodes: list[int] | None = None, + image_transforms: Callable | None = None, + delta_timestamps: dict[str, list[float]] | None = None, + tolerance_s: float = 1e-4, + revision: str | None = None, + force_cache_sync: bool = False, + download_videos: bool = True, + video_backend: str | None = None, + batch_encoding_size: int = 1, + ): + """ + 2 modes are available for instantiating this class, depending on 2 different use cases: + + 1. Your dataset already exists: + - On your local disk in the 'root' folder. This is typically the case when you recorded your + dataset locally and you may or may not have pushed it to the hub yet. Instantiating this class + with 'root' will load your dataset directly from disk. This can happen while you're offline (no + internet connection). + + - On the Hugging Face Hub at the address https://huggingface.co/datasets/{repo_id} and not on + your local disk in the 'root' folder. Instantiating this class with this 'repo_id' will download + the dataset from that address and load it, pending your dataset is compliant with + codebase_version v3.0. If your dataset has been created before this new format, you will be + prompted to convert it using our conversion script from v2.1 to v3.0, which you can find at + lerobot/datasets/v30/convert_dataset_v21_to_v30.py. + + + 2. Your dataset doesn't already exists (either on local disk or on the Hub): you can create an empty + LeRobotDataset with the 'create' classmethod. This can be used for recording a dataset or port an + existing dataset to the LeRobotDataset format. + + + In terms of files, LeRobotDataset encapsulates 3 main things: + - metadata: + - info contains various information about the dataset like shapes, keys, fps etc. + - stats stores the dataset statistics of the different modalities for normalization + - tasks contains the prompts for each task of the dataset, which can be used for + task-conditioned training. + - hf_dataset (from datasets.Dataset), which will read any values from parquet files. + - videos (optional) from which frames are loaded to be synchronous with data from parquet files. + + A typical LeRobotDataset looks like this from its root path: + . + ├── data + │ ├── chunk-000 + │ │ ├── file-000.parquet + │ │ ├── file-001.parquet + │ │ └── ... + │ ├── chunk-001 + │ │ ├── file-000.parquet + │ │ ├── file-001.parquet + │ │ └── ... + │ └── ... + ├── meta + │ ├── episodes + │ │ ├── chunk-000 + │ │ │ ├── file-000.parquet + │ │ │ ├── file-001.parquet + │ │ │ └── ... + │ │ ├── chunk-001 + │ │ │ └── ... + │ │ └── ... + │ ├── info.json + │ ├── stats.json + │ └── tasks.parquet + └── videos + ├── observation.images.laptop + │ ├── chunk-000 + │ │ ├── file-000.mp4 + │ │ ├── file-001.mp4 + │ │ └── ... + │ ├── chunk-001 + │ │ └── ... + │ └── ... + ├── observation.images.phone + │ ├── chunk-000 + │ │ ├── file-000.mp4 + │ │ ├── file-001.mp4 + │ │ └── ... + │ ├── chunk-001 + │ │ └── ... + │ └── ... + └── ... + + Note that this file-based structure is designed to be as versatile as possible. Multiple episodes are + consolidated into chunked files which improves storage efficiency and loading performance. The + structure of the dataset is entirely described in the info.json file, which can be easily downloaded + or viewed directly on the hub before downloading any actual data. The type of files used are very + simple and do not need complex tools to be read, it only uses .parquet, .json and .mp4 files (and .md + for the README). + + Args: + repo_id (str): This is the repo id that will be used to fetch the dataset. Locally, the dataset + will be stored under root/repo_id. + root (Path | None, optional): Local directory to use for downloading/writing files. You can also + set the LEROBOT_HOME environment variable to point to a different location. Defaults to + '~/.cache/huggingface/lerobot'. + episodes (list[int] | None, optional): If specified, this will only load episodes specified by + their episode_index in this list. Defaults to None. + image_transforms (Callable | None, optional): You can pass standard v2 image transforms from + torchvision.transforms.v2 here which will be applied to visual modalities (whether they come + from videos or images). Defaults to None. + delta_timestamps (dict[list[float]] | None, optional): _description_. Defaults to None. + tolerance_s (float, optional): Tolerance in seconds used to ensure data timestamps are actually in + sync with the fps value. It is used at the init of the dataset to make sure that each + timestamps is separated to the next by 1/fps +/- tolerance_s. This also applies to frames + decoded from video files. It is also used to check that `delta_timestamps` (when provided) are + multiples of 1/fps. Defaults to 1e-4. + revision (str, optional): An optional Git revision id which can be a branch name, a tag, or a + commit hash. Defaults to current codebase version tag. + force_cache_sync (bool, optional): Flag to sync and refresh local files first. If True and files + are already present in the local cache, this will be faster. However, files loaded might not + be in sync with the version on the hub, especially if you specified 'revision'. Defaults to + False. + download_videos (bool, optional): Flag to download the videos. Note that when set to True but the + video files are already present on local disk, they won't be downloaded again. Defaults to + True. + video_backend (str | None, optional): Video backend to use for decoding videos. Defaults to torchcodec when available int the platform; otherwise, defaults to 'pyav'. + You can also use the 'pyav' decoder used by Torchvision, which used to be the default option, or 'video_reader' which is another decoder of Torchvision. + batch_encoding_size (int, optional): Number of episodes to accumulate before batch encoding videos. + Set to 1 for immediate encoding (default), or higher for batched encoding. Defaults to 1. + """ + super().__init__() + self.repo_id = repo_id + self.root = Path(root) if root else HF_LEROBOT_HOME / repo_id + self.image_transforms = image_transforms + self.delta_timestamps = delta_timestamps + self.episodes = episodes + self.tolerance_s = tolerance_s + self.revision = revision if revision else CODEBASE_VERSION + self.video_backend = video_backend if video_backend else get_safe_default_codec() + self.delta_indices = None + self.batch_encoding_size = batch_encoding_size + self.episodes_since_last_encoding = 0 + + # Unused attributes + self.image_writer = None + self.episode_buffer = None + self.writer = None + self.latest_episode = None + self._current_file_start_frame = None # Track the starting frame index of the current parquet file + + self.root.mkdir(exist_ok=True, parents=True) + + # Load metadata + self.meta = LeRobotDatasetMetadata( + self.repo_id, self.root, self.revision, force_cache_sync=force_cache_sync + ) + + # Track dataset state for efficient incremental writing + self._lazy_loading = False + self._recorded_frames = self.meta.total_frames + self._writer_closed_for_reading = False + + # Load actual data + try: + if force_cache_sync: + raise FileNotFoundError + self.hf_dataset = self.load_hf_dataset() + # Check if cached dataset contains all requested episodes + if not self._check_cached_episodes_sufficient(): + raise FileNotFoundError("Cached dataset doesn't contain all requested episodes") + except (AssertionError, FileNotFoundError, NotADirectoryError): + if is_valid_version(self.revision): + self.revision = get_safe_version(self.repo_id, self.revision) + self.download(download_videos) + self.hf_dataset = self.load_hf_dataset() + + # Create mapping from absolute indices to relative indices when only a subset of the episodes are loaded + # Build a mapping: absolute_index -> relative_index_in_filtered_dataset + self._absolute_to_relative_idx = None + if self.episodes is not None: + self._absolute_to_relative_idx = { + abs_idx.item() if isinstance(abs_idx, torch.Tensor) else abs_idx: rel_idx + for rel_idx, abs_idx in enumerate(self.hf_dataset["index"]) + } + + # Setup delta_indices + if self.delta_timestamps is not None: + check_delta_timestamps(self.delta_timestamps, self.fps, self.tolerance_s) + self.delta_indices = get_delta_indices(self.delta_timestamps, self.fps) + + def _close_writer(self) -> None: + """Close and cleanup the parquet writer if it exists.""" + writer = getattr(self, "writer", None) + if writer is not None: + writer.close() + self.writer = None + + def __del__(self): + """ + Trust the user to call .finalize() but as an added safety check call the parquet writer to stop when calling the destructor + """ + self._close_writer() + + def push_to_hub( + self, + branch: str | None = None, + tags: list | None = None, + license: str | None = "apache-2.0", + tag_version: bool = True, + push_videos: bool = True, + private: bool = False, + allow_patterns: list[str] | str | None = None, + upload_large_folder: bool = False, + **card_kwargs, + ) -> None: + ignore_patterns = ["images/"] + if not push_videos: + ignore_patterns.append("videos/") + + hub_api = HfApi() + hub_api.create_repo( + repo_id=self.repo_id, + private=private, + repo_type="dataset", + exist_ok=True, + ) + if branch: + hub_api.create_branch( + repo_id=self.repo_id, + branch=branch, + revision=self.revision, + repo_type="dataset", + exist_ok=True, + ) + + upload_kwargs = { + "repo_id": self.repo_id, + "folder_path": self.root, + "repo_type": "dataset", + "revision": branch, + "allow_patterns": allow_patterns, + "ignore_patterns": ignore_patterns, + } + if upload_large_folder: + hub_api.upload_large_folder(**upload_kwargs) + else: + hub_api.upload_folder(**upload_kwargs) + + card = create_lerobot_dataset_card( + tags=tags, dataset_info=self.meta.info, license=license, **card_kwargs + ) + card.push_to_hub(repo_id=self.repo_id, repo_type="dataset", revision=branch) + + if tag_version: + with contextlib.suppress(RevisionNotFoundError): + hub_api.delete_tag(self.repo_id, tag=CODEBASE_VERSION, repo_type="dataset") + hub_api.create_tag(self.repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset") + + def pull_from_repo( + self, + allow_patterns: list[str] | str | None = None, + ignore_patterns: list[str] | str | None = None, + ) -> None: + snapshot_download( + self.repo_id, + repo_type="dataset", + revision=self.revision, + local_dir=self.root, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + ) + + def download(self, download_videos: bool = True) -> None: + """Downloads the dataset from the given 'repo_id' at the provided version. If 'episodes' is given, this + will only download those episodes (selected by their episode_index). If 'episodes' is None, the whole + dataset will be downloaded. Thanks to the behavior of snapshot_download, if the files are already present + in 'local_dir', they won't be downloaded again. + """ + # TODO(rcadene, aliberts): implement faster transfer + # https://huggingface.co/docs/huggingface_hub/en/guides/download#faster-downloads + ignore_patterns = None if download_videos else "videos/" + files = None + if self.episodes is not None: + files = self.get_episodes_file_paths() + self.pull_from_repo(allow_patterns=files, ignore_patterns=ignore_patterns) + + def get_episodes_file_paths(self) -> list[Path]: + episodes = self.episodes if self.episodes is not None else list(range(self.meta.total_episodes)) + fpaths = [str(self.meta.get_data_file_path(ep_idx)) for ep_idx in episodes] + if len(self.meta.video_keys) > 0: + video_files = [ + str(self.meta.get_video_file_path(ep_idx, vid_key)) + for vid_key in self.meta.video_keys + for ep_idx in episodes + ] + fpaths += video_files + # episodes are stored in the same files, so we return unique paths only + fpaths = list(set(fpaths)) + return fpaths + + def load_hf_dataset(self) -> datasets.Dataset: + """hf_dataset contains all the observations, states, actions, rewards, etc.""" + features = get_hf_features_from_features(self.features) + hf_dataset = load_nested_dataset(self.root / "data", features=features, episodes=self.episodes) + hf_dataset.set_transform(hf_transform_to_torch) + return hf_dataset + + def _check_cached_episodes_sufficient(self) -> bool: + """Check if the cached dataset contains all requested episodes and their video files.""" + if self.hf_dataset is None or len(self.hf_dataset) == 0: + return False + + # Get available episode indices from cached dataset + available_episodes = { + ep_idx.item() if isinstance(ep_idx, torch.Tensor) else ep_idx + for ep_idx in self.hf_dataset.unique("episode_index") + } + + # Determine requested episodes + if self.episodes is None: + requested_episodes = set(range(self.meta.total_episodes)) + else: + requested_episodes = set(self.episodes) + + # Check if all requested episodes are available in cached data + if not requested_episodes.issubset(available_episodes): + return False + + # Check if all required video files exist + if len(self.meta.video_keys) > 0: + for ep_idx in requested_episodes: + for vid_key in self.meta.video_keys: + video_path = self.root / self.meta.get_video_file_path(ep_idx, vid_key) + if not video_path.exists(): + return False + + return True + + def create_hf_dataset(self) -> datasets.Dataset: + features = get_hf_features_from_features(self.features) + ft_dict = {col: [] for col in features} + hf_dataset = datasets.Dataset.from_dict(ft_dict, features=features, split="train") + hf_dataset.set_transform(hf_transform_to_torch) + return hf_dataset + + @property + def fps(self) -> int: + """Frames per second used during data collection.""" + return self.meta.fps + + @property + def num_frames(self) -> int: + """Number of frames in selected episodes. + + Note: When episodes a subset of the full dataset is requested, we must return the + actual loaded data length (len(self.hf_dataset)) rather than metadata total_frames. + self.meta.total_frames is the total number of frames in the full dataset. + """ + if self.episodes is not None and self.hf_dataset is not None: + return len(self.hf_dataset) + return self.meta.total_frames + + @property + def num_episodes(self) -> int: + """Number of episodes selected.""" + return len(self.episodes) if self.episodes is not None else self.meta.total_episodes + + @property + def features(self) -> dict[str, dict]: + return self.meta.features + + @property + def hf_features(self) -> datasets.Features: + """Features of the hf_dataset.""" + if self.hf_dataset is not None: + return self.hf_dataset.features + else: + return get_hf_features_from_features(self.features) + + def _get_query_indices(self, idx: int, ep_idx: int) -> tuple[dict[str, list[int | bool]]]: + ep = self.meta.episodes[ep_idx] + ep_start = ep["dataset_from_index"] + ep_end = ep["dataset_to_index"] + query_indices = { + key: [max(ep_start, min(ep_end - 1, idx + delta)) for delta in delta_idx] + for key, delta_idx in self.delta_indices.items() + } + padding = { # Pad values outside of current episode range + f"{key}_is_pad": torch.BoolTensor( + [(idx + delta < ep_start) | (idx + delta >= ep_end) for delta in delta_idx] + ) + for key, delta_idx in self.delta_indices.items() + } + return query_indices, padding + + def _get_query_timestamps( + self, + current_ts: float, + query_indices: dict[str, list[int]] | None = None, + ) -> dict[str, list[float]]: + query_timestamps = {} + for key in self.meta.video_keys: + if query_indices is not None and key in query_indices: + if self._absolute_to_relative_idx is not None: + relative_indices = [self._absolute_to_relative_idx[idx] for idx in query_indices[key]] + timestamps = self.hf_dataset[relative_indices]["timestamp"] + else: + timestamps = self.hf_dataset[query_indices[key]]["timestamp"] + query_timestamps[key] = torch.stack(timestamps).tolist() + else: + query_timestamps[key] = [current_ts] + + return query_timestamps + + def _query_hf_dataset(self, query_indices: dict[str, list[int]]) -> dict: + """ + Query dataset for indices across keys, skipping video keys. + + Tries column-first [key][indices] for speed, falls back to row-first. + + Args: + query_indices: Dict mapping keys to index lists to retrieve + + Returns: + Dict with stacked tensors of queried data (video keys excluded) + """ + result: dict = {} + for key, q_idx in query_indices.items(): + if key in self.meta.video_keys: + continue + # Map absolute indices to relative indices if needed + relative_indices = ( + q_idx + if self._absolute_to_relative_idx is None + else [self._absolute_to_relative_idx[idx] for idx in q_idx] + ) + try: + result[key] = torch.stack(self.hf_dataset[key][relative_indices]) + except (KeyError, TypeError, IndexError): + result[key] = torch.stack(self.hf_dataset[relative_indices][key]) + return result + + def _query_videos(self, query_timestamps: dict[str, list[float]], ep_idx: int) -> dict[str, torch.Tensor]: + """Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function + in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a + Segmentation Fault. This probably happens because a memory reference to the video loader is created in + the main process and a subprocess fails to access it. + """ + ep = self.meta.episodes[ep_idx] + item = {} + for vid_key, query_ts in query_timestamps.items(): + # Episodes are stored sequentially on a single mp4 to reduce the number of files. + # Thus we load the start timestamp of the episode on this mp4 and, + # shift the query timestamp accordingly. + from_timestamp = ep[f"videos/{vid_key}/from_timestamp"] + shifted_query_ts = [from_timestamp + ts for ts in query_ts] + + video_path = self.root / self.meta.get_video_file_path(ep_idx, vid_key) + frames = decode_video_frames(video_path, shifted_query_ts, self.tolerance_s, self.video_backend) + item[vid_key] = frames.squeeze(0) + + return item + + def _ensure_hf_dataset_loaded(self): + """Lazy load the HF dataset only when needed for reading.""" + if self._lazy_loading or self.hf_dataset is None: + # Close the writer before loading to ensure parquet file is properly finalized + if self.writer is not None: + self._close_writer() + self._writer_closed_for_reading = True + self.hf_dataset = self.load_hf_dataset() + self._lazy_loading = False + + def __len__(self): + return self.num_frames + + def __getitem__(self, idx) -> dict: + # Ensure dataset is loaded when we actually need to read from it + self._ensure_hf_dataset_loaded() + item = self.hf_dataset[idx] + ep_idx = item["episode_index"].item() + + query_indices = None + if self.delta_indices is not None: + query_indices, padding = self._get_query_indices(idx, ep_idx) + query_result = self._query_hf_dataset(query_indices) + item = {**item, **padding} + for key, val in query_result.items(): + item[key] = val + + if len(self.meta.video_keys) > 0: + current_ts = item["timestamp"].item() + query_timestamps = self._get_query_timestamps(current_ts, query_indices) + video_frames = self._query_videos(query_timestamps, ep_idx) + item = {**video_frames, **item} + + if self.image_transforms is not None: + image_keys = self.meta.camera_keys + for cam in image_keys: + item[cam] = self.image_transforms(item[cam]) + + # Add task as a string + task_idx = item["task_index"].item() + item["task"] = self.meta.tasks.iloc[task_idx].name + return item + + def __repr__(self): + feature_keys = list(self.features) + return ( + f"{self.__class__.__name__}({{\n" + f" Repository ID: '{self.repo_id}',\n" + f" Number of selected episodes: '{self.num_episodes}',\n" + f" Number of selected samples: '{self.num_frames}',\n" + f" Features: '{feature_keys}',\n" + "})',\n" + ) + + def finalize(self): + """ + Close the parquet writers. This function needs to be called after data collection/conversion, else footer metadata won't be written to the parquet files. + The dataset won't be valid and can't be loaded as ds = LeRobotDataset(repo_id=repo, root=HF_LEROBOT_HOME.joinpath(repo)) + """ + self._close_writer() + self.meta._close_writer() + + def create_episode_buffer(self, episode_index: int | None = None) -> dict: + current_ep_idx = self.meta.total_episodes if episode_index is None else episode_index + ep_buffer = {} + # size and task are special cases that are not in self.features + ep_buffer["size"] = 0 + ep_buffer["task"] = [] + for key in self.features: + ep_buffer[key] = current_ep_idx if key == "episode_index" else [] + return ep_buffer + + def _get_image_file_path(self, episode_index: int, image_key: str, frame_index: int) -> Path: + fpath = DEFAULT_IMAGE_PATH.format( + image_key=image_key, episode_index=episode_index, frame_index=frame_index + ) + return self.root / fpath + + def _get_image_file_dir(self, episode_index: int, image_key: str) -> Path: + return self._get_image_file_path(episode_index, image_key, frame_index=0).parent + + def _save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path) -> None: + if self.image_writer is None: + if isinstance(image, torch.Tensor): + image = image.cpu().numpy() + write_image(image, fpath) + else: + self.image_writer.save_image(image=image, fpath=fpath) + + def add_frame(self, frame: dict) -> None: + """ + This function only adds the frame to the episode_buffer. Apart from images — which are written in a + temporary directory — nothing is written to disk. To save those frames, the 'save_episode()' method + then needs to be called. + """ + # Convert torch to numpy if needed + for name in frame: + if isinstance(frame[name], torch.Tensor): + frame[name] = frame[name].numpy() + + validate_frame(frame, self.features) + + if self.episode_buffer is None: + self.episode_buffer = self.create_episode_buffer() + + # Automatically add frame_index and timestamp to episode buffer + frame_index = self.episode_buffer["size"] + timestamp = frame.pop("timestamp") if "timestamp" in frame else frame_index / self.fps + self.episode_buffer["frame_index"].append(frame_index) + self.episode_buffer["timestamp"].append(timestamp) + self.episode_buffer["task"].append(frame.pop("task")) # Remove task from frame after processing + + # Add frame features to episode_buffer + for key in frame: + if key not in self.features: + raise ValueError( + f"An element of the frame is not in the features. '{key}' not in '{self.features.keys()}'." + ) + + if self.features[key]["dtype"] in ["image", "video"]: + img_path = self._get_image_file_path( + episode_index=self.episode_buffer["episode_index"], image_key=key, frame_index=frame_index + ) + if frame_index == 0: + img_path.parent.mkdir(parents=True, exist_ok=True) + self._save_image(frame[key], img_path) + self.episode_buffer[key].append(str(img_path)) + else: + self.episode_buffer[key].append(frame[key]) + + self.episode_buffer["size"] += 1 + + def save_episode(self, episode_data: dict | None = None) -> None: + """ + This will save to disk the current episode in self.episode_buffer. + + Video encoding is handled automatically based on batch_encoding_size: + - If batch_encoding_size == 1: Videos are encoded immediately after each episode + - If batch_encoding_size > 1: Videos are encoded in batches. + + Args: + episode_data (dict | None, optional): Dict containing the episode data to save. If None, this will + save the current episode in self.episode_buffer, which is filled with 'add_frame'. Defaults to + None. + """ + episode_buffer = episode_data if episode_data is not None else self.episode_buffer + + validate_episode_buffer(episode_buffer, self.meta.total_episodes, self.features) + + # size and task are special cases that won't be added to hf_dataset + episode_length = episode_buffer.pop("size") + tasks = episode_buffer.pop("task") + episode_tasks = list(set(tasks)) + episode_index = episode_buffer["episode_index"] + + episode_buffer["index"] = np.arange(self.meta.total_frames, self.meta.total_frames + episode_length) + episode_buffer["episode_index"] = np.full((episode_length,), episode_index) + + # Update tasks and task indices with new tasks if any + self.meta.save_episode_tasks(episode_tasks) + + # Given tasks in natural language, find their corresponding task indices + episode_buffer["task_index"] = np.array([self.meta.get_task_index(task) for task in tasks]) + + for key, ft in self.features.items(): + # index, episode_index, task_index are already processed above, and image and video + # are processed separately by storing image path and frame info as meta data + if key in ["index", "episode_index", "task_index"] or ft["dtype"] in ["image", "video"]: + continue + episode_buffer[key] = np.stack(episode_buffer[key]) + + # Wait for image writer to end, so that episode stats over images can be computed + self._wait_image_writer() + ep_stats = compute_episode_stats(episode_buffer, self.features) + + ep_metadata = self._save_episode_data(episode_buffer) + has_video_keys = len(self.meta.video_keys) > 0 + use_batched_encoding = self.batch_encoding_size > 1 + + if has_video_keys and not use_batched_encoding: + for video_key in self.meta.video_keys: + ep_metadata.update(self._save_episode_video(video_key, episode_index)) + + # `meta.save_episode` need to be executed after encoding the videos + self.meta.save_episode(episode_index, episode_length, episode_tasks, ep_stats, ep_metadata) + + if has_video_keys and use_batched_encoding: + # Check if we should trigger batch encoding + self.episodes_since_last_encoding += 1 + if self.episodes_since_last_encoding == self.batch_encoding_size: + start_ep = self.num_episodes - self.batch_encoding_size + end_ep = self.num_episodes + self._batch_save_episode_video(start_ep, end_ep) + self.episodes_since_last_encoding = 0 + + if not episode_data: + # Reset episode buffer and clean up temporary images (if not already deleted during video encoding) + self.clear_episode_buffer(delete_images=len(self.meta.image_keys) > 0) + + def _batch_save_episode_video(self, start_episode: int, end_episode: int | None = None) -> None: + """ + Batch save videos for multiple episodes. + + Args: + start_episode: Starting episode index (inclusive) + end_episode: Ending episode index (exclusive). If None, encodes all episodes from start_episode to the current episode. + """ + if end_episode is None: + end_episode = self.num_episodes + + logging.info( + f"Batch encoding {self.batch_encoding_size} videos for episodes {start_episode} to {end_episode - 1}" + ) + + chunk_idx = self.meta.episodes[start_episode]["data/chunk_index"] + file_idx = self.meta.episodes[start_episode]["data/file_index"] + episode_df_path = self.root / DEFAULT_EPISODES_PATH.format(chunk_index=chunk_idx, file_index=file_idx) + episode_df = pd.read_parquet(episode_df_path) + + for ep_idx in range(start_episode, end_episode): + logging.info(f"Encoding videos for episode {ep_idx}") + + if ( + self.meta.episodes[ep_idx]["data/chunk_index"] != chunk_idx + or self.meta.episodes[ep_idx]["data/file_index"] != file_idx + ): + # The current episode is in a new chunk or file. + # Save previous episode dataframe and update the Hugging Face dataset by reloading it. + episode_df.to_parquet(episode_df_path) + self.meta.episodes = load_episodes(self.root) + + # Load new episode dataframe + chunk_idx = self.meta.episodes[ep_idx]["data/chunk_index"] + file_idx = self.meta.episodes[ep_idx]["data/file_index"] + episode_df_path = self.root / DEFAULT_EPISODES_PATH.format( + chunk_index=chunk_idx, file_index=file_idx + ) + episode_df = pd.read_parquet(episode_df_path) + + # Save the current episode's video metadata to the dataframe + video_ep_metadata = {} + for video_key in self.meta.video_keys: + video_ep_metadata.update(self._save_episode_video(video_key, ep_idx)) + video_ep_metadata.pop("episode_index") + video_ep_df = pd.DataFrame(video_ep_metadata, index=[ep_idx]).convert_dtypes( + dtype_backend="pyarrow" + ) # allows NaN values along with integers + + episode_df = episode_df.combine_first(video_ep_df) + episode_df.to_parquet(episode_df_path) + self.meta.episodes = load_episodes(self.root) + + def _save_episode_data(self, episode_buffer: dict) -> dict: + """Save episode data to a parquet file and update the Hugging Face dataset of frames data. + + This function processes episodes data from a buffer, converts it into a Hugging Face dataset, + and saves it as a parquet file. It handles both the creation of new parquet files and the + updating of existing ones based on size constraints. After saving the data, it reloads + the Hugging Face dataset to ensure it is up-to-date. + + Notes: We both need to update parquet files and HF dataset: + - `pandas` loads parquet file in RAM + - `datasets` relies on a memory mapping from pyarrow (no RAM). It either converts parquet files to a pyarrow cache on disk, + or loads directly from pyarrow cache. + """ + # Convert buffer into HF Dataset + ep_dict = {key: episode_buffer[key] for key in self.hf_features} + ep_dataset = datasets.Dataset.from_dict(ep_dict, features=self.hf_features, split="train") + ep_dataset = embed_images(ep_dataset) + ep_num_frames = len(ep_dataset) + + if self.latest_episode is None: + # Initialize indices and frame count for a new dataset made of the first episode data + chunk_idx, file_idx = 0, 0 + global_frame_index = 0 + self._current_file_start_frame = 0 + # However, if the episodes already exists + # It means we are resuming recording, so we need to load the latest episode + # Update the indices to avoid overwriting the latest episode + if self.meta.episodes is not None and len(self.meta.episodes) > 0: + latest_ep = self.meta.episodes[-1] + global_frame_index = latest_ep["dataset_to_index"] + chunk_idx = latest_ep["data/chunk_index"] + file_idx = latest_ep["data/file_index"] + + # When resuming, move to the next file + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, self.meta.chunks_size) + self._current_file_start_frame = global_frame_index + else: + # Retrieve information from the latest parquet file + latest_ep = self.latest_episode + chunk_idx = latest_ep["data/chunk_index"] + file_idx = latest_ep["data/file_index"] + global_frame_index = latest_ep["index"][-1] + 1 + + latest_path = self.root / self.meta.data_path.format(chunk_index=chunk_idx, file_index=file_idx) + latest_size_in_mb = get_file_size_in_mb(latest_path) + + frames_in_current_file = global_frame_index - self._current_file_start_frame + av_size_per_frame = ( + latest_size_in_mb / frames_in_current_file if frames_in_current_file > 0 else 0 + ) + + # Determine if a new parquet file is needed + if ( + latest_size_in_mb + av_size_per_frame * ep_num_frames >= self.meta.data_files_size_in_mb + or self._writer_closed_for_reading + ): + # Size limit is reached or writer was closed for reading, prepare new parquet file + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, self.meta.chunks_size) + self._close_writer() + self._writer_closed_for_reading = False + self._current_file_start_frame = global_frame_index + + ep_dict["data/chunk_index"] = chunk_idx + ep_dict["data/file_index"] = file_idx + + # Write the resulting dataframe from RAM to disk + path = self.root / self.meta.data_path.format(chunk_index=chunk_idx, file_index=file_idx) + path.parent.mkdir(parents=True, exist_ok=True) + + table = ep_dataset.with_format("arrow")[:] + if not self.writer: + self.writer = pq.ParquetWriter( + path, schema=table.schema, compression="snappy", use_dictionary=True + ) + self.writer.write_table(table) + + metadata = { + "data/chunk_index": chunk_idx, + "data/file_index": file_idx, + "dataset_from_index": global_frame_index, + "dataset_to_index": global_frame_index + ep_num_frames, + } + + # Store metadata with episode data for next episode + self.latest_episode = {**ep_dict, **metadata} + + # Mark that the HF dataset needs reloading (lazy loading approach) + # This avoids expensive reloading during sequential recording + self._lazy_loading = True + # Update recorded frames count for efficient length tracking + self._recorded_frames += ep_num_frames + + return metadata + + def _save_episode_video(self, video_key: str, episode_index: int) -> dict: + # Encode episode frames into a temporary video + ep_path = self._encode_temporary_episode_video(video_key, episode_index) + ep_size_in_mb = get_file_size_in_mb(ep_path) + ep_duration_in_s = get_video_duration_in_s(ep_path) + + if ( + episode_index == 0 + or self.meta.latest_episode is None + or f"videos/{video_key}/chunk_index" not in self.meta.latest_episode + ): + # Initialize indices for a new dataset made of the first episode data + chunk_idx, file_idx = 0, 0 + if self.meta.episodes is not None and len(self.meta.episodes) > 0: + # It means we are resuming recording, so we need to load the latest episode + # Update the indices to avoid overwriting the latest episode + old_chunk_idx = self.meta.episodes[-1][f"videos/{video_key}/chunk_index"] + old_file_idx = self.meta.episodes[-1][f"videos/{video_key}/file_index"] + chunk_idx, file_idx = update_chunk_file_indices( + old_chunk_idx, old_file_idx, self.meta.chunks_size + ) + latest_duration_in_s = 0.0 + new_path = self.root / self.meta.video_path.format( + video_key=video_key, chunk_index=chunk_idx, file_index=file_idx + ) + new_path.parent.mkdir(parents=True, exist_ok=True) + shutil.move(str(ep_path), str(new_path)) + else: + # Retrieve information from the latest updated video file using latest_episode + latest_ep = self.meta.latest_episode + chunk_idx = latest_ep[f"videos/{video_key}/chunk_index"][0] + file_idx = latest_ep[f"videos/{video_key}/file_index"][0] + + latest_path = self.root / self.meta.video_path.format( + video_key=video_key, chunk_index=chunk_idx, file_index=file_idx + ) + latest_size_in_mb = get_file_size_in_mb(latest_path) + latest_duration_in_s = latest_ep[f"videos/{video_key}/to_timestamp"][0] + + if latest_size_in_mb + ep_size_in_mb >= self.meta.video_files_size_in_mb: + # Move temporary episode video to a new video file in the dataset + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, self.meta.chunks_size) + new_path = self.root / self.meta.video_path.format( + video_key=video_key, chunk_index=chunk_idx, file_index=file_idx + ) + new_path.parent.mkdir(parents=True, exist_ok=True) + shutil.move(str(ep_path), str(new_path)) + latest_duration_in_s = 0.0 + else: + # Update latest video file + concatenate_video_files( + [latest_path, ep_path], + latest_path, + ) + + # Remove temporary directory + shutil.rmtree(str(ep_path.parent)) + + # Update video info (only needed when first episode is encoded since it reads from episode 0) + if episode_index == 0: + self.meta.update_video_info(video_key) + write_info(self.meta.info, self.meta.root) # ensure video info always written properly + + metadata = { + "episode_index": episode_index, + f"videos/{video_key}/chunk_index": chunk_idx, + f"videos/{video_key}/file_index": file_idx, + f"videos/{video_key}/from_timestamp": latest_duration_in_s, + f"videos/{video_key}/to_timestamp": latest_duration_in_s + ep_duration_in_s, + } + return metadata + + def clear_episode_buffer(self, delete_images: bool = True) -> None: + # Clean up image files for the current episode buffer + if delete_images: + # Wait for the async image writer to finish + if self.image_writer is not None: + self._wait_image_writer() + episode_index = self.episode_buffer["episode_index"] + if isinstance(episode_index, np.ndarray): + episode_index = episode_index.item() if episode_index.size == 1 else episode_index[0] + for cam_key in self.meta.camera_keys: + img_dir = self._get_image_file_dir(episode_index, cam_key) + if img_dir.is_dir(): + shutil.rmtree(img_dir) + + # Reset the buffer + self.episode_buffer = self.create_episode_buffer() + + def start_image_writer(self, num_processes: int = 0, num_threads: int = 4) -> None: + if isinstance(self.image_writer, AsyncImageWriter): + logging.warning( + "You are starting a new AsyncImageWriter that is replacing an already existing one in the dataset." + ) + + self.image_writer = AsyncImageWriter( + num_processes=num_processes, + num_threads=num_threads, + ) + + def stop_image_writer(self) -> None: + """ + Whenever wrapping this dataset inside a parallelized DataLoader, this needs to be called first to + remove the image_writer in order for the LeRobotDataset object to be pickleable and parallelized. + """ + if self.image_writer is not None: + self.image_writer.stop() + self.image_writer = None + + def _wait_image_writer(self) -> None: + """Wait for asynchronous image writer to finish.""" + if self.image_writer is not None: + self.image_writer.wait_until_done() + + def _encode_temporary_episode_video(self, video_key: str, episode_index: int) -> Path: + """ + Use ffmpeg to convert frames stored as png into mp4 videos. + Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding, + since video encoding with ffmpeg is already using multithreading. + """ + temp_path = Path(tempfile.mkdtemp(dir=self.root)) / f"{video_key}_{episode_index:03d}.mp4" + img_dir = self._get_image_file_dir(episode_index, video_key) + encode_video_frames(img_dir, temp_path, self.fps, overwrite=True) + shutil.rmtree(img_dir) + return temp_path + + @classmethod + def create( + cls, + repo_id: str, + fps: int, + features: dict, + root: str | Path | None = None, + robot_type: str | None = None, + use_videos: bool = True, + tolerance_s: float = 1e-4, + image_writer_processes: int = 0, + image_writer_threads: int = 0, + video_backend: str | None = None, + batch_encoding_size: int = 1, + ) -> "LeRobotDataset": + """Create a LeRobot Dataset from scratch in order to record data.""" + obj = cls.__new__(cls) + obj.meta = LeRobotDatasetMetadata.create( + repo_id=repo_id, + fps=fps, + robot_type=robot_type, + features=features, + root=root, + use_videos=use_videos, + ) + obj.repo_id = obj.meta.repo_id + obj.root = obj.meta.root + obj.revision = None + obj.tolerance_s = tolerance_s + obj.image_writer = None + obj.batch_encoding_size = batch_encoding_size + obj.episodes_since_last_encoding = 0 + + if image_writer_processes or image_writer_threads: + obj.start_image_writer(image_writer_processes, image_writer_threads) + + # TODO(aliberts, rcadene, alexander-soare): Merge this with OnlineBuffer/DataBuffer + obj.episode_buffer = obj.create_episode_buffer() + + obj.episodes = None + obj.hf_dataset = obj.create_hf_dataset() + obj.image_transforms = None + obj.delta_timestamps = None + obj.delta_indices = None + obj._absolute_to_relative_idx = None + obj.video_backend = video_backend if video_backend is not None else get_safe_default_codec() + obj.writer = None + obj.latest_episode = None + obj._current_file_start_frame = None + # Initialize tracking for incremental recording + obj._lazy_loading = False + obj._recorded_frames = 0 + obj._writer_closed_for_reading = False + return obj + + +class MultiLeRobotDataset(torch.utils.data.Dataset): + """A dataset consisting of multiple underlying `LeRobotDataset`s. + + The underlying `LeRobotDataset`s are effectively concatenated, and this class adopts much of the API + structure of `LeRobotDataset`. + """ + + def __init__( + self, + repo_ids: list[str], + root: str | Path | None = None, + episodes: dict | None = None, + image_transforms: Callable | None = None, + delta_timestamps: dict[str, list[float]] | None = None, + tolerances_s: dict | None = None, + download_videos: bool = True, + video_backend: str | None = None, + ): + super().__init__() + self.repo_ids = repo_ids + self.root = Path(root) if root else HF_LEROBOT_HOME + self.tolerances_s = tolerances_s if tolerances_s else dict.fromkeys(repo_ids, 0.0001) + # Construct the underlying datasets passing everything but `transform` and `delta_timestamps` which + # are handled by this class. + self._datasets = [ + LeRobotDataset( + repo_id, + root=self.root / repo_id, + episodes=episodes[repo_id] if episodes else None, + image_transforms=image_transforms, + delta_timestamps=delta_timestamps, + tolerance_s=self.tolerances_s[repo_id], + download_videos=download_videos, + video_backend=video_backend, + ) + for repo_id in repo_ids + ] + + # Disable any data keys that are not common across all of the datasets. Note: we may relax this + # restriction in future iterations of this class. For now, this is necessary at least for being able + # to use PyTorch's default DataLoader collate function. + self.disabled_features = set() + intersection_features = set(self._datasets[0].features) + for ds in self._datasets: + intersection_features.intersection_update(ds.features) + if len(intersection_features) == 0: + raise RuntimeError( + "Multiple datasets were provided but they had no keys common to all of them. " + "The multi-dataset functionality currently only keeps common keys." + ) + for repo_id, ds in zip(self.repo_ids, self._datasets, strict=True): + extra_keys = set(ds.features).difference(intersection_features) + logging.warning( + f"keys {extra_keys} of {repo_id} were disabled as they are not contained in all the " + "other datasets." + ) + self.disabled_features.update(extra_keys) + + self.image_transforms = image_transforms + self.delta_timestamps = delta_timestamps + # TODO(rcadene, aliberts): We should not perform this aggregation for datasets + # with multiple robots of different ranges. Instead we should have one normalization + # per robot. + self.stats = aggregate_stats([dataset.meta.stats for dataset in self._datasets]) + + @property + def repo_id_to_index(self): + """Return a mapping from dataset repo_id to a dataset index automatically created by this class. + + This index is incorporated as a data key in the dictionary returned by `__getitem__`. + """ + return {repo_id: i for i, repo_id in enumerate(self.repo_ids)} + + @property + def fps(self) -> int: + """Frames per second used during data collection. + + NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info. + """ + return self._datasets[0].meta.info["fps"] + + @property + def video(self) -> bool: + """Returns True if this dataset loads video frames from mp4 files. + + Returns False if it only loads images from png files. + + NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info. + """ + return self._datasets[0].meta.info.get("video", False) + + @property + def features(self) -> datasets.Features: + features = {} + for dataset in self._datasets: + features.update({k: v for k, v in dataset.hf_features.items() if k not in self.disabled_features}) + return features + + @property + def camera_keys(self) -> list[str]: + """Keys to access image and video stream from cameras.""" + keys = [] + for key, feats in self.features.items(): + if isinstance(feats, (datasets.Image | VideoFrame)): + keys.append(key) + return keys + + @property + def video_frame_keys(self) -> list[str]: + """Keys to access video frames that requires to be decoded into images. + + Note: It is empty if the dataset contains images only, + or equal to `self.cameras` if the dataset contains videos only, + or can even be a subset of `self.cameras` in a case of a mixed image/video dataset. + """ + video_frame_keys = [] + for key, feats in self.features.items(): + if isinstance(feats, VideoFrame): + video_frame_keys.append(key) + return video_frame_keys + + @property + def num_frames(self) -> int: + """Number of samples/frames.""" + return sum(d.num_frames for d in self._datasets) + + @property + def num_episodes(self) -> int: + """Number of episodes.""" + return sum(d.num_episodes for d in self._datasets) + + @property + def tolerance_s(self) -> float: + """Tolerance in seconds used to discard loaded frames when their timestamps + are not close enough from the requested frames. It is only used when `delta_timestamps` + is provided or when loading video frames from mp4 files. + """ + # 1e-4 to account for possible numerical error + return 1 / self.fps - 1e-4 + + def __len__(self): + return self.num_frames + + def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: + if idx >= len(self): + raise IndexError(f"Index {idx} out of bounds.") + # Determine which dataset to get an item from based on the index. + start_idx = 0 + dataset_idx = 0 + for dataset in self._datasets: + if idx >= start_idx + dataset.num_frames: + start_idx += dataset.num_frames + dataset_idx += 1 + continue + break + else: + raise AssertionError("We expect the loop to break out as long as the index is within bounds.") + item = self._datasets[dataset_idx][idx - start_idx] + item["dataset_index"] = torch.tensor(dataset_idx) + for data_key in self.disabled_features: + if data_key in item: + del item[data_key] + + return item + + def __repr__(self): + return ( + f"{self.__class__.__name__}(\n" + f" Repository IDs: '{self.repo_ids}',\n" + f" Number of Samples: {self.num_frames},\n" + f" Number of Episodes: {self.num_episodes},\n" + f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n" + f" Recorded Frames per Second: {self.fps},\n" + f" Camera Keys: {self.camera_keys},\n" + f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n" + f" Transformations: {self.image_transforms},\n" + f")" + ) diff --git a/src/lerobot/datasets/online_buffer.py b/src/lerobot/datasets/online_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..d7cc3a118a3f981f55fbb678c107094c2477deb5 --- /dev/null +++ b/src/lerobot/datasets/online_buffer.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""An online buffer for the online training loop in train.py + +Note to maintainers: This duplicates some logic from LeRobotDataset and EpisodeAwareSampler. We should +consider converging to one approach. Here we have opted to use numpy.memmap to back the data buffer. It's much +faster than using HuggingFace Datasets as there's no conversion to an intermediate non-python object. Also it +supports in-place slicing and mutation which is very handy for a dynamic buffer. +""" + +import os +from pathlib import Path +from typing import Any + +import numpy as np +import torch + +from lerobot.datasets.lerobot_dataset import LeRobotDataset + + +def _make_memmap_safe(**kwargs) -> np.memmap: + """Make a numpy memmap with checks on available disk space first. + + Expected kwargs are: "filename", "dtype" (must by np.dtype), "mode" and "shape" + + For information on dtypes: + https://numpy.org/doc/stable/reference/arrays.dtypes.html#arrays-dtypes-constructing + """ + if kwargs["mode"].startswith("w"): + required_space = kwargs["dtype"].itemsize * np.prod(kwargs["shape"]) # bytes + stats = os.statvfs(Path(kwargs["filename"]).parent) + available_space = stats.f_bavail * stats.f_frsize # bytes + if required_space >= available_space * 0.8: + raise RuntimeError( + f"You're about to take up {required_space} of {available_space} bytes available." + ) + return np.memmap(**kwargs) + + +class OnlineBuffer(torch.utils.data.Dataset): + """FIFO data buffer for the online training loop in train.py. + + Follows the protocol of LeRobotDataset as much as is required to have it be used by the online training + loop in the same way that a LeRobotDataset would be used. + + The underlying data structure will have data inserted in a circular fashion. Always insert after the + last index, and when you reach the end, wrap around to the start. + + The data is stored in a numpy memmap. + """ + + NEXT_INDEX_KEY = "_next_index" + OCCUPANCY_MASK_KEY = "_occupancy_mask" + INDEX_KEY = "index" + FRAME_INDEX_KEY = "frame_index" + EPISODE_INDEX_KEY = "episode_index" + TIMESTAMP_KEY = "timestamp" + IS_PAD_POSTFIX = "_is_pad" + + def __init__( + self, + write_dir: str | Path, + data_spec: dict[str, Any] | None, + buffer_capacity: int | None, + fps: float | None = None, + delta_timestamps: dict[str, list[float]] | dict[str, np.ndarray] | None = None, + ): + """ + The online buffer can be provided from scratch or you can load an existing online buffer by passing + a `write_dir` associated with an existing buffer. + + Args: + write_dir: Where to keep the numpy memmap files. One memmap file will be stored for each data key. + Note that if the files already exist, they are opened in read-write mode (used for training + resumption.) + data_spec: A mapping from data key to data specification, like {data_key: {"shape": tuple[int], + "dtype": np.dtype}}. This should include all the data that you wish to record into the buffer, + but note that "index", "frame_index" and "episode_index" are already accounted for by this + class, so you don't need to include them. + buffer_capacity: How many frames should be stored in the buffer as a maximum. Be aware of your + system's available disk space when choosing this. + fps: Same as the fps concept in LeRobot dataset. Here it needs to be provided for the + delta_timestamps logic. You can pass None if you are not using delta_timestamps. + delta_timestamps: Same as the delta_timestamps concept in LeRobotDataset. This is internally + converted to dict[str, np.ndarray] for optimization purposes. + + """ + self.set_delta_timestamps(delta_timestamps) + self._fps = fps + # Tolerance in seconds used to discard loaded frames when their timestamps are not close enough from + # the requested frames. It is only used when `delta_timestamps` is provided. + # minus 1e-4 to account for possible numerical error + self.tolerance_s = 1 / self.fps - 1e-4 if fps is not None else None + self._buffer_capacity = buffer_capacity + data_spec = self._make_data_spec(data_spec, buffer_capacity) + Path(write_dir).mkdir(parents=True, exist_ok=True) + self._data = {} + for k, v in data_spec.items(): + self._data[k] = _make_memmap_safe( + filename=Path(write_dir) / k, + dtype=v["dtype"] if v is not None else None, + mode="r+" if (Path(write_dir) / k).exists() else "w+", + shape=tuple(v["shape"]) if v is not None else None, + ) + + @property + def delta_timestamps(self) -> dict[str, np.ndarray] | None: + return self._delta_timestamps + + def set_delta_timestamps(self, value: dict[str, list[float]] | None): + """Set delta_timestamps converting the values to numpy arrays. + + The conversion is for an optimization in the __getitem__. The loop is much slower if the arrays + need to be converted into numpy arrays. + """ + if value is not None: + self._delta_timestamps = {k: np.array(v) for k, v in value.items()} + else: + self._delta_timestamps = None + + def _make_data_spec(self, data_spec: dict[str, Any], buffer_capacity: int) -> dict[str, dict[str, Any]]: + """Makes the data spec for np.memmap.""" + if any(k.startswith("_") for k in data_spec): + raise ValueError( + "data_spec keys should not start with '_'. This prefix is reserved for internal logic." + ) + preset_keys = { + OnlineBuffer.INDEX_KEY, + OnlineBuffer.FRAME_INDEX_KEY, + OnlineBuffer.EPISODE_INDEX_KEY, + OnlineBuffer.TIMESTAMP_KEY, + } + if len(intersection := set(data_spec).intersection(preset_keys)) > 0: + raise ValueError( + f"data_spec should not contain any of {preset_keys} as these are handled internally. " + f"The provided data_spec has {intersection}." + ) + complete_data_spec = { + # _next_index will be a pointer to the next index that we should start filling from when we add + # more data. + OnlineBuffer.NEXT_INDEX_KEY: {"dtype": np.dtype("int64"), "shape": ()}, + # Since the memmap is initialized with all-zeros, this keeps track of which indices are occupied + # with real data rather than the dummy initialization. + OnlineBuffer.OCCUPANCY_MASK_KEY: {"dtype": np.dtype("?"), "shape": (buffer_capacity,)}, + OnlineBuffer.INDEX_KEY: {"dtype": np.dtype("int64"), "shape": (buffer_capacity,)}, + OnlineBuffer.FRAME_INDEX_KEY: {"dtype": np.dtype("int64"), "shape": (buffer_capacity,)}, + OnlineBuffer.EPISODE_INDEX_KEY: {"dtype": np.dtype("int64"), "shape": (buffer_capacity,)}, + OnlineBuffer.TIMESTAMP_KEY: {"dtype": np.dtype("float64"), "shape": (buffer_capacity,)}, + } + for k, v in data_spec.items(): + complete_data_spec[k] = {"dtype": v["dtype"], "shape": (buffer_capacity, *v["shape"])} + return complete_data_spec + + def add_data(self, data: dict[str, np.ndarray]): + """Add new data to the buffer, which could potentially mean shifting old data out. + + The new data should contain all the frames (in order) of any number of episodes. The indices should + start from 0 (note to the developer: this can easily be generalized). See the `rollout` and + `eval_policy` functions in `eval.py` for more information on how the data is constructed. + + Shift the incoming data index and episode_index to continue on from the last frame. Note that this + will be done in place! + """ + if len(missing_keys := (set(self.data_keys).difference(set(data)))) > 0: + raise ValueError(f"Missing data keys: {missing_keys}") + new_data_length = len(data[self.data_keys[0]]) + if not all(len(data[k]) == new_data_length for k in self.data_keys): + raise ValueError("All data items should have the same length") + + next_index = self._data[OnlineBuffer.NEXT_INDEX_KEY] + + # Sanity check to make sure that the new data indices start from 0. + assert data[OnlineBuffer.EPISODE_INDEX_KEY][0].item() == 0 + assert data[OnlineBuffer.INDEX_KEY][0].item() == 0 + + # Shift the incoming indices if necessary. + if self.num_frames > 0: + last_episode_index = self._data[OnlineBuffer.EPISODE_INDEX_KEY][next_index - 1] + last_data_index = self._data[OnlineBuffer.INDEX_KEY][next_index - 1] + data[OnlineBuffer.EPISODE_INDEX_KEY] += last_episode_index + 1 + data[OnlineBuffer.INDEX_KEY] += last_data_index + 1 + + # Insert the new data starting from next_index. It may be necessary to wrap around to the start. + n_surplus = max(0, new_data_length - (self._buffer_capacity - next_index)) + for k in self.data_keys: + if n_surplus == 0: + slc = slice(next_index, next_index + new_data_length) + self._data[k][slc] = data[k] + self._data[OnlineBuffer.OCCUPANCY_MASK_KEY][slc] = True + else: + self._data[k][next_index:] = data[k][:-n_surplus] + self._data[OnlineBuffer.OCCUPANCY_MASK_KEY][next_index:] = True + self._data[k][:n_surplus] = data[k][-n_surplus:] + if n_surplus == 0: + self._data[OnlineBuffer.NEXT_INDEX_KEY] = next_index + new_data_length + else: + self._data[OnlineBuffer.NEXT_INDEX_KEY] = n_surplus + + @property + def data_keys(self) -> list[str]: + keys = set(self._data) + keys.remove(OnlineBuffer.OCCUPANCY_MASK_KEY) + keys.remove(OnlineBuffer.NEXT_INDEX_KEY) + return sorted(keys) + + @property + def fps(self) -> float | None: + return self._fps + + @property + def num_episodes(self) -> int: + return len( + np.unique(self._data[OnlineBuffer.EPISODE_INDEX_KEY][self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]]) + ) + + @property + def num_frames(self) -> int: + return np.count_nonzero(self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]) + + def __len__(self): + return self.num_frames + + def _item_to_tensors(self, item: dict) -> dict: + item_ = {} + for k, v in item.items(): + if isinstance(v, torch.Tensor): + item_[k] = v + elif isinstance(v, np.ndarray): + item_[k] = torch.from_numpy(v) + else: + item_[k] = torch.tensor(v) + return item_ + + def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: + if idx >= len(self) or idx < -len(self): + raise IndexError + + item = {k: v[idx] for k, v in self._data.items() if not k.startswith("_")} + + if self.delta_timestamps is None: + return self._item_to_tensors(item) + + episode_index = item[OnlineBuffer.EPISODE_INDEX_KEY] + current_ts = item[OnlineBuffer.TIMESTAMP_KEY] + episode_data_indices = np.where( + np.bitwise_and( + self._data[OnlineBuffer.EPISODE_INDEX_KEY] == episode_index, + self._data[OnlineBuffer.OCCUPANCY_MASK_KEY], + ) + )[0] + episode_timestamps = self._data[OnlineBuffer.TIMESTAMP_KEY][episode_data_indices] + + for data_key in self.delta_timestamps: + # Note: The logic in this loop is copied from `load_previous_and_future_frames`. + # Get timestamps used as query to retrieve data of previous/future frames. + query_ts = current_ts + self.delta_timestamps[data_key] + + # Compute distances between each query timestamp and all timestamps of all the frames belonging to + # the episode. + dist = np.abs(query_ts[:, None] - episode_timestamps[None, :]) + argmin_ = np.argmin(dist, axis=1) + min_ = dist[np.arange(dist.shape[0]), argmin_] + + is_pad = min_ > self.tolerance_s + + # Check violated query timestamps are all outside the episode range. + assert ( + (query_ts[is_pad] < episode_timestamps[0]) | (episode_timestamps[-1] < query_ts[is_pad]) + ).all(), ( + f"One or several timestamps unexpectedly violate the tolerance ({min_} > {self.tolerance_s=}" + ") inside the episode range." + ) + + # Load frames for this data key. + item[data_key] = self._data[data_key][episode_data_indices[argmin_]] + + item[f"{data_key}{OnlineBuffer.IS_PAD_POSTFIX}"] = is_pad + + return self._item_to_tensors(item) + + def get_data_by_key(self, key: str) -> torch.Tensor: + """Returns all data for a given data key as a Tensor.""" + return torch.from_numpy(self._data[key][self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]]) + + +def compute_sampler_weights( + offline_dataset: LeRobotDataset, + offline_drop_n_last_frames: int = 0, + online_dataset: OnlineBuffer | None = None, + online_sampling_ratio: float | None = None, + online_drop_n_last_frames: int = 0, +) -> torch.Tensor: + """Compute the sampling weights for the online training dataloader in train.py. + + Args: + offline_dataset: The LeRobotDataset used for offline pre-training. + online_drop_n_last_frames: Number of frames to drop from the end of each offline dataset episode. + online_dataset: The OnlineBuffer used in online training. + online_sampling_ratio: The proportion of data that should be sampled from the online dataset. If an + online dataset is provided, this value must also be provided. + online_drop_n_first_frames: See `offline_drop_n_last_frames`. This is the same, but for the online + dataset. + Returns: + Tensor of weights for [offline_dataset; online_dataset], normalized to 1. + + Notes to maintainers: + - This duplicates some logic from EpisodeAwareSampler. We should consider converging to one approach. + - When used with `torch.utils.data.WeightedRandomSampler`, it could completely replace + `EpisodeAwareSampler` as the online dataset related arguments are optional. The only missing feature + is the ability to turn shuffling off. + - Options `drop_first_n_frames` and `episode_indices_to_use` can be added easily. They were not + included here to avoid adding complexity. + """ + if len(offline_dataset) == 0 and (online_dataset is None or len(online_dataset) == 0): + raise ValueError("At least one of `offline_dataset` or `online_dataset` should be contain data.") + if (online_dataset is None) ^ (online_sampling_ratio is None): + raise ValueError( + "`online_dataset` and `online_sampling_ratio` must be provided together or not at all." + ) + offline_sampling_ratio = 0 if online_sampling_ratio is None else 1 - online_sampling_ratio + + weights = [] + + if len(offline_dataset) > 0: + offline_data_mask_indices = [] + for start_index, end_index in zip( + offline_dataset.meta.episodes["dataset_from_index"], + offline_dataset.meta.episodes["dataset_to_index"], + strict=True, + ): + offline_data_mask_indices.extend(range(start_index, end_index - offline_drop_n_last_frames)) + offline_data_mask = torch.zeros(len(offline_dataset), dtype=torch.bool) + offline_data_mask[torch.tensor(offline_data_mask_indices)] = True + weights.append( + torch.full( + size=(len(offline_dataset),), + fill_value=offline_sampling_ratio / offline_data_mask.sum(), + ) + * offline_data_mask + ) + + if online_dataset is not None and len(online_dataset) > 0: + online_data_mask_indices = [] + episode_indices = online_dataset.get_data_by_key("episode_index") + for episode_idx in torch.unique(episode_indices): + where_episode = torch.where(episode_indices == episode_idx) + start_index = where_episode[0][0] + end_index = where_episode[0][-1] + 1 + online_data_mask_indices.extend( + range(start_index.item(), end_index.item() - online_drop_n_last_frames) + ) + online_data_mask = torch.zeros(len(online_dataset), dtype=torch.bool) + online_data_mask[torch.tensor(online_data_mask_indices)] = True + weights.append( + torch.full( + size=(len(online_dataset),), + fill_value=online_sampling_ratio / online_data_mask.sum(), + ) + * online_data_mask + ) + + weights = torch.cat(weights) + + if weights.sum() == 0: + weights += 1 / len(weights) + else: + weights /= weights.sum() + + return weights diff --git a/src/lerobot/datasets/pipeline_features.py b/src/lerobot/datasets/pipeline_features.py new file mode 100644 index 0000000000000000000000000000000000000000..6d9a21e2c4453fcc5e3ddb8374e25f66efbfade4 --- /dev/null +++ b/src/lerobot/datasets/pipeline_features.py @@ -0,0 +1,139 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from collections.abc import Sequence +from typing import Any + +from lerobot.configs.types import PipelineFeatureType +from lerobot.datasets.utils import hw_to_dataset_features +from lerobot.processor import DataProcessorPipeline +from lerobot.utils.constants import ACTION, OBS_IMAGES, OBS_STATE, OBS_STR + + +def create_initial_features( + action: dict[str, Any] | None = None, observation: dict[str, Any] | None = None +) -> dict[PipelineFeatureType, dict[str, Any]]: + """ + Creates the initial features dict for the dataset from action and observation specs. + + Args: + action: A dictionary of action feature names to their types/shapes. + observation: A dictionary of observation feature names to their types/shapes. + + Returns: + The initial features dictionary structured by PipelineFeatureType. + """ + features = {PipelineFeatureType.ACTION: {}, PipelineFeatureType.OBSERVATION: {}} + if action: + features[PipelineFeatureType.ACTION] = action + if observation: + features[PipelineFeatureType.OBSERVATION] = observation + return features + + +# Helper to filter state/action keys based on regex patterns. +def should_keep(key: str, patterns: tuple[str]) -> bool: + if patterns is None: + return True + return any(re.search(pat, key) for pat in patterns) + + +def strip_prefix(key: str, prefixes_to_strip: tuple[str]) -> str: + for prefix in prefixes_to_strip: + if key.startswith(prefix): + return key[len(prefix) :] + return key + + +# Define prefixes to strip from feature keys for clean names. +# Handles both fully qualified (e.g., "action.state") and short (e.g., "state") forms. +PREFIXES_TO_STRIP = tuple( + f"{token}." for const in (ACTION, OBS_STATE, OBS_IMAGES) for token in (const, const.split(".")[-1]) +) + + +def aggregate_pipeline_dataset_features( + pipeline: DataProcessorPipeline, + initial_features: dict[PipelineFeatureType, dict[str, Any]], + *, + use_videos: bool = True, + patterns: Sequence[str] | None = None, +) -> dict[str, dict]: + """ + Aggregates and filters pipeline features to create a dataset-ready features dictionary. + + This function transforms initial features using the pipeline, categorizes them as action or observations + (image or state), filters them based on `use_videos` and `patterns`, and finally + formats them for use with a Hugging Face LeRobot Dataset. + + Args: + pipeline: The DataProcessorPipeline to apply. + initial_features: A dictionary of raw feature specs for actions and observations. + use_videos: If False, image features are excluded. + patterns: A sequence of regex patterns to filter action and state features. + Image features are not affected by this filter. + + Returns: + A dictionary of features formatted for a Hugging Face LeRobot Dataset. + """ + all_features = pipeline.transform_features(initial_features) + + # Intermediate storage for categorized and filtered features. + processed_features: dict[str, dict[str, Any]] = { + ACTION: {}, + OBS_STR: {}, + } + images_token = OBS_IMAGES.split(".")[-1] + + # Iterate through all features transformed by the pipeline. + for ptype, feats in all_features.items(): + if ptype not in [PipelineFeatureType.ACTION, PipelineFeatureType.OBSERVATION]: + continue + + for key, value in feats.items(): + # 1. Categorize the feature. + is_action = ptype == PipelineFeatureType.ACTION + # Observations are classified as images if their key matches image-related tokens or if the shape of the feature is 3. + # All other observations are treated as state. + is_image = not is_action and ( + (isinstance(value, tuple) and len(value) == 3) + or ( + key.startswith(f"{OBS_IMAGES}.") + or key.startswith(f"{images_token}.") + or f".{images_token}." in key + ) + ) + + # 2. Apply filtering rules. + if is_image and not use_videos: + continue + if not is_image and not should_keep(key, patterns): + continue + + # 3. Add the feature to the appropriate group with a clean name. + name = strip_prefix(key, PREFIXES_TO_STRIP) + if is_action: + processed_features[ACTION][name] = value + else: + processed_features[OBS_STR][name] = value + + # Convert the processed features into the final dataset format. + dataset_features = {} + if processed_features[ACTION]: + dataset_features.update(hw_to_dataset_features(processed_features[ACTION], ACTION, use_videos)) + if processed_features[OBS_STR]: + dataset_features.update(hw_to_dataset_features(processed_features[OBS_STR], OBS_STR, use_videos)) + + return dataset_features diff --git a/src/lerobot/datasets/push_dataset_to_hub/utils.py b/src/lerobot/datasets/push_dataset_to_hub/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..970196d378b033c34b56a6750a87f3611ba3f968 --- /dev/null +++ b/src/lerobot/datasets/push_dataset_to_hub/utils.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datasets +import torch + + +# TODO(aliberts): remove +def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> dict[str, torch.Tensor]: + """ + Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset. + + Parameters: + - hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index. + + Returns: + - episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys: + - "from": A tensor containing the starting index of each episode. + - "to": A tensor containing the ending index of each episode. + """ + episode_data_index = {"from": [], "to": []} + + current_episode = None + """ + The episode_index is a list of integers, each representing the episode index of the corresponding example. + For instance, the following is a valid episode_index: + [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2] + + Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and + ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this: + { + "from": [0, 3, 7], + "to": [3, 7, 12] + } + """ + if len(hf_dataset) == 0: + episode_data_index = { + "from": torch.tensor([]), + "to": torch.tensor([]), + } + return episode_data_index + for idx, episode_idx in enumerate(hf_dataset["episode_index"]): + if episode_idx != current_episode: + # We encountered a new episode, so we append its starting location to the "from" list + episode_data_index["from"].append(idx) + # If this is not the first episode, we append the ending location of the previous episode to the "to" list + if current_episode is not None: + episode_data_index["to"].append(idx) + # Let's keep track of the current episode index + current_episode = episode_idx + else: + # We are still in the same episode, so there is nothing for us to do here + pass + # We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list + episode_data_index["to"].append(idx + 1) + + for k in ["from", "to"]: + episode_data_index[k] = torch.tensor(episode_data_index[k]) + + return episode_data_index diff --git a/src/lerobot/datasets/sampler.py b/src/lerobot/datasets/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..816559fbd20d31f60d9d3730d6a2b030733a35b6 --- /dev/null +++ b/src/lerobot/datasets/sampler.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections.abc import Iterator + +import torch + + +class EpisodeAwareSampler: + def __init__( + self, + dataset_from_indices: list[int], + dataset_to_indices: list[int], + episode_indices_to_use: list | None = None, + drop_n_first_frames: int = 0, + drop_n_last_frames: int = 0, + shuffle: bool = False, + ): + """Sampler that optionally incorporates episode boundary information. + + Args: + dataset_from_indices: List of indices containing the start of each episode in the dataset. + dataset_to_indices: List of indices containing the end of each episode in the dataset. + episode_indices_to_use: List of episode indices to use. If None, all episodes are used. + Assumes that episodes are indexed from 0 to N-1. + drop_n_first_frames: Number of frames to drop from the start of each episode. + drop_n_last_frames: Number of frames to drop from the end of each episode. + shuffle: Whether to shuffle the indices. + """ + indices = [] + for episode_idx, (start_index, end_index) in enumerate( + zip(dataset_from_indices, dataset_to_indices, strict=True) + ): + if episode_indices_to_use is None or episode_idx in episode_indices_to_use: + indices.extend(range(start_index + drop_n_first_frames, end_index - drop_n_last_frames)) + + self.indices = indices + self.shuffle = shuffle + + def __iter__(self) -> Iterator[int]: + if self.shuffle: + for i in torch.randperm(len(self.indices)): + yield self.indices[i] + else: + for i in self.indices: + yield i + + def __len__(self) -> int: + return len(self.indices) diff --git a/src/lerobot/datasets/streaming_dataset.py b/src/lerobot/datasets/streaming_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..85b2af30dbaf461e567015cc6c2cad5b131a063e --- /dev/null +++ b/src/lerobot/datasets/streaming_dataset.py @@ -0,0 +1,533 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections.abc import Callable, Generator, Iterator +from pathlib import Path + +import datasets +import numpy as np +import torch +from datasets import load_dataset + +from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDatasetMetadata +from lerobot.datasets.utils import ( + Backtrackable, + LookAheadError, + LookBackError, + check_version_compatibility, + find_float_index, + get_delta_indices, + is_float_in_list, + item_to_torch, + safe_shard, +) +from lerobot.datasets.video_utils import ( + VideoDecoderCache, + decode_video_frames_torchcodec, +) +from lerobot.utils.constants import HF_LEROBOT_HOME, LOOKAHEAD_BACKTRACKTABLE, LOOKBACK_BACKTRACKTABLE + + +class StreamingLeRobotDataset(torch.utils.data.IterableDataset): + """LeRobotDataset with streaming capabilities. + + This class extends LeRobotDataset to add streaming functionality, allowing data to be streamed + rather than loaded entirely into memory. This is especially useful for large datasets that may + not fit in memory or when you want to quickly explore a dataset without downloading it completely. + + The key innovation is using a Backtrackable iterator that maintains a bounded buffer of recent + items, allowing us to access previous frames for delta timestamps without loading the entire + dataset into memory. + + Example: + Basic usage: + ```python + from lerobot.common.datasets.streaming_dataset import StreamingLeRobotDataset + + # Create a streaming dataset with delta timestamps + delta_timestamps = { + "observation.image": [-1.0, -0.5, 0.0], # 1 sec ago, 0.5 sec ago, current + "action": [0.0, 0.1, 0.2], # current, 0.1 sec future, 0.2 sec future + } + + dataset = StreamingLeRobotDataset( + repo_id="your-dataset-repo-id", + delta_timestamps=delta_timestamps, + streaming=True, + buffer_size=1000, + ) + + # Iterate over the dataset + for i, item in enumerate(dataset): + print(f"Sample {i}: Episode {item['episode_index']} Frame {item['frame_index']}") + # item will contain stacked frames according to delta_timestamps + if i >= 10: + break + ``` + """ + + def __init__( + self, + repo_id: str, + root: str | Path | None = None, + episodes: list[int] | None = None, + image_transforms: Callable | None = None, + delta_timestamps: dict[list[float]] | None = None, + tolerance_s: float = 1e-4, + revision: str | None = None, + force_cache_sync: bool = False, + streaming: bool = True, + buffer_size: int = 1000, + max_num_shards: int = 16, + seed: int = 42, + rng: np.random.Generator | None = None, + shuffle: bool = True, + ): + """Initialize a StreamingLeRobotDataset. + + Args: + repo_id (str): This is the repo id that will be used to fetch the dataset. + root (Path | None, optional): Local directory to use for downloading/writing files. + episodes (list[int] | None, optional): If specified, this will only load episodes specified by + their episode_index in this list. + image_transforms (Callable | None, optional): Transform to apply to image data. + tolerance_s (float, optional): Tolerance in seconds for timestamp matching. + revision (str, optional): Git revision id (branch name, tag, or commit hash). + force_cache_sync (bool, optional): Flag to sync and refresh local files first. + streaming (bool, optional): Whether to stream the dataset or load it all. Defaults to True. + buffer_size (int, optional): Buffer size for shuffling when streaming. Defaults to 1000. + max_num_shards (int, optional): Number of shards to re-shard the input dataset into. Defaults to 16. + seed (int, optional): Reproducibility random seed. + rng (np.random.Generator | None, optional): Random number generator. + shuffle (bool, optional): Whether to shuffle the dataset across exhaustions. Defaults to True. + """ + super().__init__() + self.repo_id = repo_id + self.root = Path(root) if root else HF_LEROBOT_HOME / repo_id + self.streaming_from_local = root is not None + + self.image_transforms = image_transforms + self.episodes = episodes + self.tolerance_s = tolerance_s + self.revision = revision if revision else CODEBASE_VERSION + self.seed = seed + self.rng = rng if rng is not None else np.random.default_rng(seed) + self.shuffle = shuffle + + self.streaming = streaming + self.buffer_size = buffer_size + + # We cache the video decoders to avoid re-initializing them at each frame (avoiding a ~10x slowdown) + self.video_decoder_cache = None + + self.root.mkdir(exist_ok=True, parents=True) + + # Load metadata + self.meta = LeRobotDatasetMetadata( + self.repo_id, self.root, self.revision, force_cache_sync=force_cache_sync + ) + # Check version + check_version_compatibility(self.repo_id, self.meta._version, CODEBASE_VERSION) + + self.delta_timestamps = None + self.delta_indices = None + + if delta_timestamps is not None: + self._validate_delta_timestamp_keys(delta_timestamps) # raises ValueError if invalid + self.delta_timestamps = delta_timestamps + self.delta_indices = get_delta_indices(self.delta_timestamps, self.fps) + + self.hf_dataset: datasets.IterableDataset = load_dataset( + self.repo_id if not self.streaming_from_local else str(self.root), + split="train", + streaming=self.streaming, + data_files="data/*/*.parquet", + revision=self.revision, + ) + + self.num_shards = min(self.hf_dataset.num_shards, max_num_shards) + + @property + def num_frames(self): + return self.meta.total_frames + + @property + def num_episodes(self): + return self.meta.total_episodes + + @property + def fps(self): + return self.meta.fps + + @staticmethod + def _iter_random_indices( + rng: np.random.Generator, buffer_size: int, random_batch_size=100 + ) -> Iterator[int]: + while True: + yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size)) + + @staticmethod + def _infinite_generator_over_elements(rng: np.random.Generator, elements: list[int]) -> Iterator[int]: + while True: + yield rng.choice(elements) + + # TODO(fracapuano): Implement multi-threaded prefetching to accelerate data loading. + # The current sequential iteration is a bottleneck. A producer-consumer pattern + # could be used with a ThreadPoolExecutor to run `make_frame` (especially video decoding) + # in parallel, feeding a queue from which this iterator will yield processed items. + def __iter__(self) -> Iterator[dict[str, torch.Tensor]]: + if self.video_decoder_cache is None: + self.video_decoder_cache = VideoDecoderCache() + + # keep the same seed across exhaustions if shuffle is False, otherwise shuffle data across exhaustions + rng = np.random.default_rng(self.seed) if not self.shuffle else self.rng + + buffer_indices_generator = self._iter_random_indices(rng, self.buffer_size) + + idx_to_backtrack_dataset = { + idx: self._make_backtrackable_dataset(safe_shard(self.hf_dataset, idx, self.num_shards)) + for idx in range(self.num_shards) + } + + # This buffer is populated while iterating on the dataset's shards + # the logic is to add 2 levels of randomness: + # (1) sample one shard at random from the ones available, and + # (2) sample one frame from the shard sampled at (1) + frames_buffer = [] + while available_shards := list(idx_to_backtrack_dataset.keys()): + shard_key = next(self._infinite_generator_over_elements(rng, available_shards)) + backtrack_dataset = idx_to_backtrack_dataset[shard_key] # selects which shard to iterate on + + try: + for frame in self.make_frame(backtrack_dataset): + if len(frames_buffer) == self.buffer_size: + i = next(buffer_indices_generator) # samples a element from the buffer + yield frames_buffer[i] + frames_buffer[i] = frame + else: + frames_buffer.append(frame) + break # random shard sampled, switch shard + except ( + RuntimeError, + StopIteration, + ): # NOTE: StopIteration inside a generator throws a RuntimeError since python 3.7 + del idx_to_backtrack_dataset[shard_key] # Remove exhausted shard, onto another shard + + # Once shards are all exhausted, shuffle the buffer and yield the remaining frames + rng.shuffle(frames_buffer) + yield from frames_buffer + + def _get_window_steps( + self, delta_timestamps: dict[str, list[float]] | None = None, dynamic_bounds: bool = False + ) -> tuple[int, int]: + if delta_timestamps is None: + return 1, 1 + + if not dynamic_bounds: + # Fix the windows + lookback = LOOKBACK_BACKTRACKTABLE + lookahead = LOOKAHEAD_BACKTRACKTABLE + else: + # Dynamically adjust the windows based on the given delta_timesteps + all_timestamps = sum(delta_timestamps.values(), []) + lookback = min(all_timestamps) * self.fps + lookahead = max(all_timestamps) * self.fps + + # When lookback is >=0 it means no negative timesteps have been provided + lookback = 0 if lookback >= 0 else (lookback * -1) + + return lookback, lookahead + + def _make_backtrackable_dataset(self, dataset: datasets.IterableDataset) -> Backtrackable: + lookback, lookahead = self._get_window_steps(self.delta_timestamps) + return Backtrackable(dataset, history=lookback, lookahead=lookahead) + + def _make_timestamps_from_indices( + self, start_ts: float, indices: dict[str, list[int]] | None = None + ) -> dict[str, list[float]]: + if indices is not None: + return { + key: ( + start_ts + torch.tensor(indices[key]) / self.fps + ).tolist() # NOTE: why not delta_timestamps directly? + for key in self.delta_timestamps + } + else: + return dict.fromkeys(self.meta.video_keys, [start_ts]) + + def _make_padding_camera_frame(self, camera_key: str): + """Variable-shape padding frame for given camera keys, given in (H, W, C)""" + return torch.zeros(self.meta.info["features"][camera_key]["shape"]).permute(-1, 0, 1) + + def _get_video_frame_padding_mask( + self, + video_frames: dict[str, torch.Tensor], + query_timestamps: dict[str, list[float]], + original_timestamps: dict[str, list[float]], + ) -> dict[str, torch.BoolTensor]: + padding_mask = {} + + for video_key, timestamps in original_timestamps.items(): + if video_key not in video_frames: + continue # only padding on video keys that are available + frames = [] + mask = [] + padding_frame = self._make_padding_camera_frame(video_key) + for ts in timestamps: + if is_float_in_list(ts, query_timestamps[video_key]): + idx = find_float_index(ts, query_timestamps[video_key]) + frames.append(video_frames[video_key][idx, :]) + mask.append(False) + else: + frames.append(padding_frame) + mask.append(True) + + padding_mask[f"{video_key}_is_pad"] = torch.BoolTensor(mask) + + return padding_mask + + def make_frame(self, dataset_iterator: Backtrackable) -> Generator: + """Makes a frame starting from a dataset iterator""" + item = next(dataset_iterator) + item = item_to_torch(item) + + updates = [] # list of "updates" to apply to the item retrieved from hf_dataset (w/o camera features) + + # Get episode index from the item + ep_idx = item["episode_index"] + + # "timestamp" restarts from 0 for each episode, whereas we need a global timestep within the single .mp4 file (given by index/fps) + current_ts = item["index"] / self.fps + + episode_boundaries_ts = { + key: ( + self.meta.episodes[ep_idx][f"videos/{key}/from_timestamp"], + self.meta.episodes[ep_idx][f"videos/{key}/to_timestamp"], + ) + for key in self.meta.video_keys + } + + # Apply delta querying logic if necessary + if self.delta_indices is not None: + query_result, padding = self._get_delta_frames(dataset_iterator, item) + updates.append(query_result) + updates.append(padding) + + # Load video frames, when needed + if len(self.meta.video_keys) > 0: + original_timestamps = self._make_timestamps_from_indices(current_ts, self.delta_indices) + + # Some timestamps might not result available considering the episode's boundaries + query_timestamps = self._get_query_timestamps( + current_ts, self.delta_indices, episode_boundaries_ts + ) + video_frames = self._query_videos(query_timestamps, ep_idx) + + if self.image_transforms is not None: + image_keys = self.meta.camera_keys + for cam in image_keys: + video_frames[cam] = self.image_transforms(video_frames[cam]) + + updates.append(video_frames) + + if self.delta_indices is not None: + # We always return the same number of frames. Unavailable frames are padded. + padding_mask = self._get_video_frame_padding_mask( + video_frames, query_timestamps, original_timestamps + ) + updates.append(padding_mask) + + result = item.copy() + for update in updates: + result.update(update) + + result["task"] = self.meta.tasks.iloc[item["task_index"]].name + + yield result + + def _get_query_timestamps( + self, + current_ts: float, + query_indices: dict[str, list[int]] | None = None, + episode_boundaries_ts: dict[str, tuple[float, float]] | None = None, + ) -> dict[str, list[float]]: + query_timestamps = {} + keys_to_timestamps = self._make_timestamps_from_indices(current_ts, query_indices) + for key in self.meta.video_keys: + if query_indices is not None and key in query_indices: + timestamps = keys_to_timestamps[key] + # Clamp out timesteps outside of episode boundaries + query_timestamps[key] = torch.clamp( + torch.tensor(timestamps), *episode_boundaries_ts[key] + ).tolist() + + else: + query_timestamps[key] = [current_ts] + + return query_timestamps + + def _query_videos(self, query_timestamps: dict[str, list[float]], ep_idx: int) -> dict: + """Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function + in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a + Segmentation Fault. This probably happens because a memory reference to the video loader is created in + the main process and a subprocess fails to access it. + """ + + item = {} + for video_key, query_ts in query_timestamps.items(): + root = self.meta.url_root if self.streaming and not self.streaming_from_local else self.root + video_path = f"{root}/{self.meta.get_video_file_path(ep_idx, video_key)}" + frames = decode_video_frames_torchcodec( + video_path, query_ts, self.tolerance_s, decoder_cache=self.video_decoder_cache + ) + + item[video_key] = frames.squeeze(0) if len(query_ts) == 1 else frames + + return item + + def _get_delta_frames(self, dataset_iterator: Backtrackable, current_item: dict): + # TODO(fracapuano): Modularize this function, refactor the code + """Get frames with delta offsets using the backtrackable iterator. + + Args: + current_item (dict): Current item from the iterator. + ep_idx (int): Episode index. + + Returns: + tuple: (query_result, padding) - frames at delta offsets and padding info. + """ + current_episode_idx = current_item["episode_index"] + + # Prepare results + query_result = {} + padding = {} + + for key, delta_indices in self.delta_indices.items(): + if key in self.meta.video_keys: + continue # visual frames are decoded separately + + target_frames = [] + is_pad = [] + + # Create a results dictionary to store frames in processing order, then reconstruct original order for stacking + delta_results = {} + + # Separate and sort deltas by difficulty (easier operations first) + negative_deltas = sorted([d for d in delta_indices if d < 0], reverse=True) # [-1, -2, -3, ...] + positive_deltas = sorted([d for d in delta_indices if d > 0]) # [1, 2, 3, ...] + zero_deltas = [d for d in delta_indices if d == 0] + + # Process zero deltas (current frame) + for delta in zero_deltas: + delta_results[delta] = ( + current_item[key], + False, + ) + + # Process negative deltas in order of increasing difficulty + lookback_failed = False + + last_successful_frame = current_item[key] + + for delta in negative_deltas: + if lookback_failed: + delta_results[delta] = (last_successful_frame, True) + continue + + try: + steps_back = abs(delta) + if dataset_iterator.can_peek_back(steps_back): + past_item = dataset_iterator.peek_back(steps_back) + past_item = item_to_torch(past_item) + + if past_item["episode_index"] == current_episode_idx: + delta_results[delta] = (past_item[key], False) + last_successful_frame = past_item[key] + + else: + raise LookBackError("Retrieved frame is from different episode!") + else: + raise LookBackError("Cannot go back further than the history buffer!") + + except LookBackError: + delta_results[delta] = (last_successful_frame, True) + lookback_failed = True # All subsequent negative deltas will also fail + + # Process positive deltas in order of increasing difficulty + lookahead_failed = False + last_successful_frame = current_item[key] + + for delta in positive_deltas: + if lookahead_failed: + delta_results[delta] = (last_successful_frame, True) + continue + + try: + if dataset_iterator.can_peek_ahead(delta): + future_item = dataset_iterator.peek_ahead(delta) + future_item = item_to_torch(future_item) + + if future_item["episode_index"] == current_episode_idx: + delta_results[delta] = (future_item[key], False) + last_successful_frame = future_item[key] + + else: + raise LookAheadError("Retrieved frame is from different episode!") + else: + raise LookAheadError("Cannot go ahead further than the lookahead buffer!") + + except LookAheadError: + delta_results[delta] = (last_successful_frame, True) + lookahead_failed = True # All subsequent positive deltas will also fail + + # Reconstruct original order for stacking + for delta in delta_indices: + frame, is_padded = delta_results[delta] + + # add batch dimension for stacking + target_frames.append(frame) # frame.unsqueeze(0)) + is_pad.append(is_padded) + + # Stack frames and add to results + if target_frames: + query_result[key] = torch.stack(target_frames) + padding[f"{key}_is_pad"] = torch.BoolTensor(is_pad) + + return query_result, padding + + def _validate_delta_timestamp_keys(self, delta_timestamps: dict[list[float]]) -> None: + """ + Validate that all keys in delta_timestamps correspond to actual features in the dataset. + + Raises: + ValueError: If any delta timestamp key doesn't correspond to a dataset feature. + """ + if delta_timestamps is None: + return + + # Get all available feature keys from the dataset metadata + available_features = set(self.meta.features.keys()) + + # Get all keys from delta_timestamps + delta_keys = set(delta_timestamps.keys()) + + # Find any keys that don't correspond to features + invalid_keys = delta_keys - available_features + + if invalid_keys: + raise ValueError( + f"The following delta_timestamp keys do not correspond to dataset features: {invalid_keys}. " + f"Available features are: {sorted(available_features)}" + ) diff --git a/src/lerobot/datasets/transforms.py b/src/lerobot/datasets/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..f573300d1ff9144799dd6287c9db19116e4d3426 --- /dev/null +++ b/src/lerobot/datasets/transforms.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import collections +from collections.abc import Callable, Sequence +from dataclasses import dataclass, field +from typing import Any + +import torch +from torchvision.transforms import v2 +from torchvision.transforms.v2 import ( + Transform, + functional as F, # noqa: N812 +) + + +class RandomSubsetApply(Transform): + """Apply a random subset of N transformations from a list of transformations. + + Args: + transforms: list of transformations. + p: represents the multinomial probabilities (with no replacement) used for sampling the transform. + If the sum of the weights is not 1, they will be normalized. If ``None`` (default), all transforms + have the same probability. + n_subset: number of transformations to apply. If ``None``, all transforms are applied. + Must be in [1, len(transforms)]. + random_order: apply transformations in a random order. + """ + + def __init__( + self, + transforms: Sequence[Callable], + p: list[float] | None = None, + n_subset: int | None = None, + random_order: bool = False, + ) -> None: + super().__init__() + if not isinstance(transforms, Sequence): + raise TypeError("Argument transforms should be a sequence of callables") + if p is None: + p = [1] * len(transforms) + elif len(p) != len(transforms): + raise ValueError( + f"Length of p doesn't match the number of transforms: {len(p)} != {len(transforms)}" + ) + + if n_subset is None: + n_subset = len(transforms) + elif not isinstance(n_subset, int): + raise TypeError("n_subset should be an int or None") + elif not (1 <= n_subset <= len(transforms)): + raise ValueError(f"n_subset should be in the interval [1, {len(transforms)}]") + + self.transforms = transforms + total = sum(p) + self.p = [prob / total for prob in p] + self.n_subset = n_subset + self.random_order = random_order + + self.selected_transforms = None + + def forward(self, *inputs: Any) -> Any: + needs_unpacking = len(inputs) > 1 + + selected_indices = torch.multinomial(torch.tensor(self.p), self.n_subset) + if not self.random_order: + selected_indices = selected_indices.sort().values + + self.selected_transforms = [self.transforms[i] for i in selected_indices] + + for transform in self.selected_transforms: + outputs = transform(*inputs) + inputs = outputs if needs_unpacking else (outputs,) + + return outputs + + def extra_repr(self) -> str: + return ( + f"transforms={self.transforms}, " + f"p={self.p}, " + f"n_subset={self.n_subset}, " + f"random_order={self.random_order}" + ) + + +class SharpnessJitter(Transform): + """Randomly change the sharpness of an image or video. + + Similar to a v2.RandomAdjustSharpness with p=1 and a sharpness_factor sampled randomly. + While v2.RandomAdjustSharpness applies — with a given probability — a fixed sharpness_factor to an image, + SharpnessJitter applies a random sharpness_factor each time. This is to have a more diverse set of + augmentations as a result. + + A sharpness_factor of 0 gives a blurred image, 1 gives the original image while 2 increases the sharpness + by a factor of 2. + + If the input is a :class:`torch.Tensor`, + it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + sharpness: How much to jitter sharpness. sharpness_factor is chosen uniformly from + [max(0, 1 - sharpness), 1 + sharpness] or the given + [min, max]. Should be non negative numbers. + """ + + def __init__(self, sharpness: float | Sequence[float]) -> None: + super().__init__() + self.sharpness = self._check_input(sharpness) + + def _check_input(self, sharpness): + if isinstance(sharpness, (int | float)): + if sharpness < 0: + raise ValueError("If sharpness is a single number, it must be non negative.") + sharpness = [1.0 - sharpness, 1.0 + sharpness] + sharpness[0] = max(sharpness[0], 0.0) + elif isinstance(sharpness, collections.abc.Sequence) and len(sharpness) == 2: + sharpness = [float(v) for v in sharpness] + else: + raise TypeError(f"{sharpness=} should be a single number or a sequence with length 2.") + + if not 0.0 <= sharpness[0] <= sharpness[1]: + raise ValueError(f"sharpness values should be between (0., inf), but got {sharpness}.") + + return float(sharpness[0]), float(sharpness[1]) + + def make_params(self, flat_inputs: list[Any]) -> dict[str, Any]: + sharpness_factor = torch.empty(1).uniform_(self.sharpness[0], self.sharpness[1]).item() + return {"sharpness_factor": sharpness_factor} + + def transform(self, inpt: Any, params: dict[str, Any]) -> Any: + sharpness_factor = params["sharpness_factor"] + return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor) + + +@dataclass +class ImageTransformConfig: + """ + For each transform, the following parameters are available: + weight: This represents the multinomial probability (with no replacement) + used for sampling the transform. If the sum of the weights is not 1, + they will be normalized. + type: The name of the class used. This is either a class available under torchvision.transforms.v2 or a + custom transform defined here. + kwargs: Lower & upper bound respectively used for sampling the transform's parameter + (following uniform distribution) when it's applied. + """ + + weight: float = 1.0 + type: str = "Identity" + kwargs: dict[str, Any] = field(default_factory=dict) + + +@dataclass +class ImageTransformsConfig: + """ + These transforms are all using standard torchvision.transforms.v2 + You can find out how these transformations affect images here: + https://pytorch.org/vision/0.18/auto_examples/transforms/plot_transforms_illustrations.html + We use a custom RandomSubsetApply container to sample them. + """ + + # Set this flag to `true` to enable transforms during training + enable: bool = False + # This is the maximum number of transforms (sampled from these below) that will be applied to each frame. + # It's an integer in the interval [1, number_of_available_transforms]. + max_num_transforms: int = 3 + # By default, transforms are applied in Torchvision's suggested order (shown below). + # Set this to True to apply them in a random order. + random_order: bool = False + tfs: dict[str, ImageTransformConfig] = field( + default_factory=lambda: { + "brightness": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"brightness": (0.8, 1.2)}, + ), + "contrast": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"contrast": (0.8, 1.2)}, + ), + "saturation": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"saturation": (0.5, 1.5)}, + ), + "hue": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"hue": (-0.05, 0.05)}, + ), + "sharpness": ImageTransformConfig( + weight=1.0, + type="SharpnessJitter", + kwargs={"sharpness": (0.5, 1.5)}, + ), + "affine": ImageTransformConfig( + weight=1.0, + type="RandomAffine", + kwargs={"degrees": (-5.0, 5.0), "translate": (0.05, 0.05)}, + ), + } + ) + + +def make_transform_from_config(cfg: ImageTransformConfig): + if cfg.type == "Identity": + return v2.Identity(**cfg.kwargs) + elif cfg.type == "ColorJitter": + return v2.ColorJitter(**cfg.kwargs) + elif cfg.type == "SharpnessJitter": + return SharpnessJitter(**cfg.kwargs) + elif cfg.type == "RandomAffine": + return v2.RandomAffine(**cfg.kwargs) + else: + raise ValueError(f"Transform '{cfg.type}' is not valid.") + + +class ImageTransforms(Transform): + """A class to compose image transforms based on configuration.""" + + def __init__(self, cfg: ImageTransformsConfig) -> None: + super().__init__() + self._cfg = cfg + + self.weights = [] + self.transforms = {} + for tf_name, tf_cfg in cfg.tfs.items(): + if tf_cfg.weight <= 0.0: + continue + + self.transforms[tf_name] = make_transform_from_config(tf_cfg) + self.weights.append(tf_cfg.weight) + + n_subset = min(len(self.transforms), cfg.max_num_transforms) + if n_subset == 0 or not cfg.enable: + self.tf = v2.Identity() + else: + self.tf = RandomSubsetApply( + transforms=list(self.transforms.values()), + p=self.weights, + n_subset=n_subset, + random_order=cfg.random_order, + ) + + def forward(self, *inputs: Any) -> Any: + return self.tf(*inputs) diff --git a/src/lerobot/datasets/utils.py b/src/lerobot/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e0fd280820d8e32b212410f8a11d5919376596a3 --- /dev/null +++ b/src/lerobot/datasets/utils.py @@ -0,0 +1,1376 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import contextlib +import importlib.resources +import json +import logging +from collections import deque +from collections.abc import Iterable, Iterator +from pathlib import Path +from pprint import pformat +from typing import Any, Generic, TypeVar + +import datasets +import numpy as np +import packaging.version +import pandas +import pandas as pd +import pyarrow.dataset as pa_ds +import pyarrow.parquet as pq +import torch +from datasets import Dataset +from datasets.table import embed_table_storage +from huggingface_hub import DatasetCard, DatasetCardData, HfApi +from huggingface_hub.errors import RevisionNotFoundError +from PIL import Image as PILImage +from torchvision import transforms + +from lerobot.configs.types import FeatureType, PolicyFeature +from lerobot.datasets.backward_compatibility import ( + FUTURE_MESSAGE, + BackwardCompatibilityError, + ForwardCompatibilityError, +) +from lerobot.utils.constants import ACTION, OBS_ENV_STATE, OBS_STR +from lerobot.utils.utils import SuppressProgressBars, is_valid_numpy_dtype_string + +DEFAULT_CHUNK_SIZE = 1000 # Max number of files per chunk +DEFAULT_DATA_FILE_SIZE_IN_MB = 100 # Max size per file +DEFAULT_VIDEO_FILE_SIZE_IN_MB = 500 # Max size per file + +INFO_PATH = "meta/info.json" +STATS_PATH = "meta/stats.json" + +EPISODES_DIR = "meta/episodes" +DATA_DIR = "data" +VIDEO_DIR = "videos" + +CHUNK_FILE_PATTERN = "chunk-{chunk_index:03d}/file-{file_index:03d}" +DEFAULT_TASKS_PATH = "meta/tasks.parquet" +DEFAULT_EPISODES_PATH = EPISODES_DIR + "/" + CHUNK_FILE_PATTERN + ".parquet" +DEFAULT_DATA_PATH = DATA_DIR + "/" + CHUNK_FILE_PATTERN + ".parquet" +DEFAULT_VIDEO_PATH = VIDEO_DIR + "/{video_key}/" + CHUNK_FILE_PATTERN + ".mp4" +DEFAULT_IMAGE_PATH = "images/{image_key}/episode-{episode_index:06d}/frame-{frame_index:06d}.png" + +LEGACY_EPISODES_PATH = "meta/episodes.jsonl" +LEGACY_EPISODES_STATS_PATH = "meta/episodes_stats.jsonl" +LEGACY_TASKS_PATH = "meta/tasks.jsonl" + +DEFAULT_FEATURES = { + "timestamp": {"dtype": "float32", "shape": (1,), "names": None}, + "frame_index": {"dtype": "int64", "shape": (1,), "names": None}, + "episode_index": {"dtype": "int64", "shape": (1,), "names": None}, + "index": {"dtype": "int64", "shape": (1,), "names": None}, + "task_index": {"dtype": "int64", "shape": (1,), "names": None}, +} + +T = TypeVar("T") + + +def get_parquet_file_size_in_mb(parquet_path: str | Path) -> float: + metadata = pq.read_metadata(parquet_path) + total_uncompressed_size = 0 + for row_group in range(metadata.num_row_groups): + rg_metadata = metadata.row_group(row_group) + for column in range(rg_metadata.num_columns): + col_metadata = rg_metadata.column(column) + total_uncompressed_size += col_metadata.total_uncompressed_size + return total_uncompressed_size / (1024**2) + + +def get_hf_dataset_size_in_mb(hf_ds: Dataset) -> int: + return hf_ds.data.nbytes // (1024**2) + + +def update_chunk_file_indices(chunk_idx: int, file_idx: int, chunks_size: int) -> tuple[int, int]: + if file_idx == chunks_size - 1: + file_idx = 0 + chunk_idx += 1 + else: + file_idx += 1 + return chunk_idx, file_idx + + +def load_nested_dataset( + pq_dir: Path, features: datasets.Features | None = None, episodes: list[int] | None = None +) -> Dataset: + """Find parquet files in provided directory {pq_dir}/chunk-xxx/file-xxx.parquet + Convert parquet files to pyarrow memory mapped in a cache folder for efficient RAM usage + Concatenate all pyarrow references to return HF Dataset format + + Args: + pq_dir: Directory containing parquet files + features: Optional features schema to ensure consistent loading of complex types like images + episodes: Optional list of episode indices to filter. Uses PyArrow predicate pushdown for efficiency. + """ + paths = sorted(pq_dir.glob("*/*.parquet")) + if len(paths) == 0: + raise FileNotFoundError(f"Provided directory does not contain any parquet file: {pq_dir}") + + with SuppressProgressBars(): + # When no filtering needed, Dataset uses memory-mapped loading for efficiency + # PyArrow loads the entire dataset into memory + if episodes is None: + return Dataset.from_parquet([str(path) for path in paths], features=features) + + arrow_dataset = pa_ds.dataset(paths, format="parquet") + filter_expr = pa_ds.field("episode_index").isin(episodes) + table = arrow_dataset.to_table(filter=filter_expr) + + if features is not None: + table = table.cast(features.arrow_schema) + + return Dataset(table) + + +def get_parquet_num_frames(parquet_path: str | Path) -> int: + metadata = pq.read_metadata(parquet_path) + return metadata.num_rows + + +def get_file_size_in_mb(file_path: Path) -> float: + """Get file size on disk in megabytes. + + Args: + file_path (Path): Path to the file. + """ + file_size_bytes = file_path.stat().st_size + return file_size_bytes / (1024**2) + + +def flatten_dict(d: dict, parent_key: str = "", sep: str = "/") -> dict: + """Flatten a nested dictionary by joining keys with a separator. + + Example: + >>> dct = {"a": {"b": 1, "c": {"d": 2}}, "e": 3} + >>> print(flatten_dict(dct)) + {'a/b': 1, 'a/c/d': 2, 'e': 3} + + Args: + d (dict): The dictionary to flatten. + parent_key (str): The base key to prepend to the keys in this level. + sep (str): The separator to use between keys. + + Returns: + dict: A flattened dictionary. + """ + items = [] + for k, v in d.items(): + new_key = f"{parent_key}{sep}{k}" if parent_key else k + if isinstance(v, dict): + items.extend(flatten_dict(v, new_key, sep=sep).items()) + else: + items.append((new_key, v)) + return dict(items) + + +def unflatten_dict(d: dict, sep: str = "/") -> dict: + """Unflatten a dictionary with delimited keys into a nested dictionary. + + Example: + >>> flat_dct = {"a/b": 1, "a/c/d": 2, "e": 3} + >>> print(unflatten_dict(flat_dct)) + {'a': {'b': 1, 'c': {'d': 2}}, 'e': 3} + + Args: + d (dict): A dictionary with flattened keys. + sep (str): The separator used in the keys. + + Returns: + dict: A nested dictionary. + """ + outdict = {} + for key, value in d.items(): + parts = key.split(sep) + d = outdict + for part in parts[:-1]: + if part not in d: + d[part] = {} + d = d[part] + d[parts[-1]] = value + return outdict + + +def serialize_dict(stats: dict[str, torch.Tensor | np.ndarray | dict]) -> dict: + """Serialize a dictionary containing tensors or numpy arrays to be JSON-compatible. + + Converts torch.Tensor, np.ndarray, and np.generic types to lists or native Python types. + + Args: + stats (dict): A dictionary that may contain non-serializable numeric types. + + Returns: + dict: A dictionary with all values converted to JSON-serializable types. + + Raises: + NotImplementedError: If a value has an unsupported type. + """ + serialized_dict = {} + for key, value in flatten_dict(stats).items(): + if isinstance(value, (torch.Tensor | np.ndarray)): + serialized_dict[key] = value.tolist() + elif isinstance(value, list) and isinstance(value[0], (int | float | list)): + serialized_dict[key] = value + elif isinstance(value, np.generic): + serialized_dict[key] = value.item() + elif isinstance(value, (int | float)): + serialized_dict[key] = value + else: + raise NotImplementedError(f"The value '{value}' of type '{type(value)}' is not supported.") + return unflatten_dict(serialized_dict) + + +def embed_images(dataset: datasets.Dataset) -> datasets.Dataset: + """Embed image bytes into the dataset table before saving to Parquet. + + This function prepares a Hugging Face dataset for serialization by converting + image objects into an embedded format that can be stored in Arrow/Parquet. + + Args: + dataset (datasets.Dataset): The input dataset, possibly containing image features. + + Returns: + datasets.Dataset: The dataset with images embedded in the table storage. + """ + # Embed image bytes into the table before saving to parquet + format = dataset.format + dataset = dataset.with_format("arrow") + dataset = dataset.map(embed_table_storage, batched=False) + dataset = dataset.with_format(**format) + return dataset + + +def load_json(fpath: Path) -> Any: + """Load data from a JSON file. + + Args: + fpath (Path): Path to the JSON file. + + Returns: + Any: The data loaded from the JSON file. + """ + with open(fpath) as f: + return json.load(f) + + +def write_json(data: dict, fpath: Path) -> None: + """Write data to a JSON file. + + Creates parent directories if they don't exist. + + Args: + data (dict): The dictionary to write. + fpath (Path): The path to the output JSON file. + """ + fpath.parent.mkdir(exist_ok=True, parents=True) + with open(fpath, "w") as f: + json.dump(data, f, indent=4, ensure_ascii=False) + + +def write_info(info: dict, local_dir: Path) -> None: + write_json(info, local_dir / INFO_PATH) + + +def load_info(local_dir: Path) -> dict: + """Load dataset info metadata from its standard file path. + + Also converts shape lists to tuples for consistency. + + Args: + local_dir (Path): The root directory of the dataset. + + Returns: + dict: The dataset information dictionary. + """ + info = load_json(local_dir / INFO_PATH) + for ft in info["features"].values(): + ft["shape"] = tuple(ft["shape"]) + return info + + +def write_stats(stats: dict, local_dir: Path) -> None: + """Serialize and write dataset statistics to their standard file path. + + Args: + stats (dict): The statistics dictionary (can contain tensors/numpy arrays). + local_dir (Path): The root directory of the dataset. + """ + serialized_stats = serialize_dict(stats) + write_json(serialized_stats, local_dir / STATS_PATH) + + +def cast_stats_to_numpy(stats: dict) -> dict[str, dict[str, np.ndarray]]: + """Recursively cast numerical values in a stats dictionary to numpy arrays. + + Args: + stats (dict): The statistics dictionary. + + Returns: + dict: The statistics dictionary with values cast to numpy arrays. + """ + stats = {key: np.array(value) for key, value in flatten_dict(stats).items()} + return unflatten_dict(stats) + + +def load_stats(local_dir: Path) -> dict[str, dict[str, np.ndarray]] | None: + """Load dataset statistics and cast numerical values to numpy arrays. + + Returns None if the stats file doesn't exist. + + Args: + local_dir (Path): The root directory of the dataset. + + Returns: + A dictionary of statistics or None if the file is not found. + """ + if not (local_dir / STATS_PATH).exists(): + return None + stats = load_json(local_dir / STATS_PATH) + return cast_stats_to_numpy(stats) + + +def write_tasks(tasks: pandas.DataFrame, local_dir: Path) -> None: + path = local_dir / DEFAULT_TASKS_PATH + path.parent.mkdir(parents=True, exist_ok=True) + tasks.to_parquet(path) + + +def load_tasks(local_dir: Path) -> pandas.DataFrame: + tasks = pd.read_parquet(local_dir / DEFAULT_TASKS_PATH) + return tasks + + +def write_episodes(episodes: Dataset, local_dir: Path) -> None: + """Write episode metadata to a parquet file in the LeRobot v3.0 format. + This function writes episode-level metadata to a single parquet file. + Used primarily during dataset conversion (v2.1 → v3.0) and in test fixtures. + + Args: + episodes: HuggingFace Dataset containing episode metadata + local_dir: Root directory where the dataset will be stored + """ + episode_size_mb = get_hf_dataset_size_in_mb(episodes) + if episode_size_mb > DEFAULT_DATA_FILE_SIZE_IN_MB: + raise NotImplementedError( + f"Episodes dataset is too large ({episode_size_mb} MB) to write to a single file. " + f"The current limit is {DEFAULT_DATA_FILE_SIZE_IN_MB} MB. " + "This function only supports single-file episode metadata. " + ) + + fpath = local_dir / DEFAULT_EPISODES_PATH.format(chunk_index=0, file_index=0) + fpath.parent.mkdir(parents=True, exist_ok=True) + episodes.to_parquet(fpath) + + +def load_episodes(local_dir: Path) -> datasets.Dataset: + episodes = load_nested_dataset(local_dir / EPISODES_DIR) + # Select episode features/columns containing references to episode data and videos + # (e.g. tasks, dataset_from_index, dataset_to_index, data/chunk_index, data/file_index, etc.) + # This is to speedup access to these data, instead of having to load episode stats. + episodes = episodes.select_columns([key for key in episodes.features if not key.startswith("stats/")]) + return episodes + + +def load_image_as_numpy( + fpath: str | Path, dtype: np.dtype = np.float32, channel_first: bool = True +) -> np.ndarray: + """Load an image from a file into a numpy array. + + Args: + fpath (str | Path): Path to the image file. + dtype (np.dtype): The desired data type of the output array. If floating, + pixels are scaled to [0, 1]. + channel_first (bool): If True, converts the image to (C, H, W) format. + Otherwise, it remains in (H, W, C) format. + + Returns: + np.ndarray: The image as a numpy array. + """ + img = PILImage.open(fpath).convert("RGB") + img_array = np.array(img, dtype=dtype) + if channel_first: # (H, W, C) -> (C, H, W) + img_array = np.transpose(img_array, (2, 0, 1)) + if np.issubdtype(dtype, np.floating): + img_array /= 255.0 + return img_array + + +def hf_transform_to_torch(items_dict: dict[str, list[Any]]) -> dict[str, list[torch.Tensor | str]]: + """Convert a batch from a Hugging Face dataset to torch tensors. + + This transform function converts items from Hugging Face dataset format (pyarrow) + to torch tensors. Importantly, images are converted from PIL objects (H, W, C, uint8) + to a torch image representation (C, H, W, float32) in the range [0, 1]. Other + types are converted to torch.tensor. + + Args: + items_dict (dict): A dictionary representing a batch of data from a + Hugging Face dataset. + + Returns: + dict: The batch with items converted to torch tensors. + """ + for key in items_dict: + first_item = items_dict[key][0] + if isinstance(first_item, PILImage.Image): + to_tensor = transforms.ToTensor() + items_dict[key] = [to_tensor(img) for img in items_dict[key]] + elif first_item is None: + pass + else: + items_dict[key] = [x if isinstance(x, str) else torch.tensor(x) for x in items_dict[key]] + return items_dict + + +def is_valid_version(version: str) -> bool: + """Check if a string is a valid PEP 440 version. + + Args: + version (str): The version string to check. + + Returns: + bool: True if the version string is valid, False otherwise. + """ + try: + packaging.version.parse(version) + return True + except packaging.version.InvalidVersion: + return False + + +def check_version_compatibility( + repo_id: str, + version_to_check: str | packaging.version.Version, + current_version: str | packaging.version.Version, + enforce_breaking_major: bool = True, +) -> None: + """Check for version compatibility between a dataset and the current codebase. + + Args: + repo_id (str): The repository ID for logging purposes. + version_to_check (str | packaging.version.Version): The version of the dataset. + current_version (str | packaging.version.Version): The current version of the codebase. + enforce_breaking_major (bool): If True, raise an error on major version mismatch. + + Raises: + BackwardCompatibilityError: If the dataset version is from a newer, incompatible + major version of the codebase. + """ + v_check = ( + packaging.version.parse(version_to_check) + if not isinstance(version_to_check, packaging.version.Version) + else version_to_check + ) + v_current = ( + packaging.version.parse(current_version) + if not isinstance(current_version, packaging.version.Version) + else current_version + ) + if v_check.major < v_current.major and enforce_breaking_major: + raise BackwardCompatibilityError(repo_id, v_check) + elif v_check.minor < v_current.minor: + logging.warning(FUTURE_MESSAGE.format(repo_id=repo_id, version=v_check)) + + +def get_repo_versions(repo_id: str) -> list[packaging.version.Version]: + """Return available valid versions (branches and tags) on a given Hub repo. + + Args: + repo_id (str): The repository ID on the Hugging Face Hub. + + Returns: + list[packaging.version.Version]: A list of valid versions found. + """ + api = HfApi() + repo_refs = api.list_repo_refs(repo_id, repo_type="dataset") + repo_refs = [b.name for b in repo_refs.branches + repo_refs.tags] + repo_versions = [] + for ref in repo_refs: + with contextlib.suppress(packaging.version.InvalidVersion): + repo_versions.append(packaging.version.parse(ref)) + + return repo_versions + + +def get_safe_version(repo_id: str, version: str | packaging.version.Version) -> str: + """Return the specified version if available on repo, or the latest compatible one. + + If the exact version is not found, it looks for the latest version with the + same major version number that is less than or equal to the target minor version. + + Args: + repo_id (str): The repository ID on the Hugging Face Hub. + version (str | packaging.version.Version): The target version. + + Returns: + str: The safe version string (e.g., "v1.2.3") to use as a revision. + + Raises: + RevisionNotFoundError: If the repo has no version tags. + BackwardCompatibilityError: If only older major versions are available. + ForwardCompatibilityError: If only newer major versions are available. + """ + target_version = ( + packaging.version.parse(version) if not isinstance(version, packaging.version.Version) else version + ) + hub_versions = get_repo_versions(repo_id) + + if not hub_versions: + raise RevisionNotFoundError( + f"""Your dataset must be tagged with a codebase version. + Assuming _version_ is the codebase_version value in the info.json, you can run this: + ```python + from huggingface_hub import HfApi + + hub_api = HfApi() + hub_api.create_tag("{repo_id}", tag="_version_", repo_type="dataset") + ``` + """ + ) + + if target_version in hub_versions: + return f"v{target_version}" + + compatibles = [ + v for v in hub_versions if v.major == target_version.major and v.minor <= target_version.minor + ] + if compatibles: + return_version = max(compatibles) + if return_version < target_version: + logging.warning(f"Revision {version} for {repo_id} not found, using version v{return_version}") + return f"v{return_version}" + + lower_major = [v for v in hub_versions if v.major < target_version.major] + if lower_major: + raise BackwardCompatibilityError(repo_id, max(lower_major)) + + upper_versions = [v for v in hub_versions if v > target_version] + assert len(upper_versions) > 0 + raise ForwardCompatibilityError(repo_id, min(upper_versions)) + + +def get_hf_features_from_features(features: dict) -> datasets.Features: + """Convert a LeRobot features dictionary to a `datasets.Features` object. + + Args: + features (dict): A LeRobot-style feature dictionary. + + Returns: + datasets.Features: The corresponding Hugging Face `datasets.Features` object. + + Raises: + ValueError: If a feature has an unsupported shape. + """ + hf_features = {} + for key, ft in features.items(): + if ft["dtype"] == "video": + continue + elif ft["dtype"] == "image": + hf_features[key] = datasets.Image() + elif ft["shape"] == (1,): + hf_features[key] = datasets.Value(dtype=ft["dtype"]) + elif len(ft["shape"]) == 1: + hf_features[key] = datasets.Sequence( + length=ft["shape"][0], feature=datasets.Value(dtype=ft["dtype"]) + ) + elif len(ft["shape"]) == 2: + hf_features[key] = datasets.Array2D(shape=ft["shape"], dtype=ft["dtype"]) + elif len(ft["shape"]) == 3: + hf_features[key] = datasets.Array3D(shape=ft["shape"], dtype=ft["dtype"]) + elif len(ft["shape"]) == 4: + hf_features[key] = datasets.Array4D(shape=ft["shape"], dtype=ft["dtype"]) + elif len(ft["shape"]) == 5: + hf_features[key] = datasets.Array5D(shape=ft["shape"], dtype=ft["dtype"]) + else: + raise ValueError(f"Corresponding feature is not valid: {ft}") + + return datasets.Features(hf_features) + + +def _validate_feature_names(features: dict[str, dict]) -> None: + """Validate that feature names do not contain invalid characters. + + Args: + features (dict): The LeRobot features dictionary. + + Raises: + ValueError: If any feature name contains '/'. + """ + invalid_features = {name: ft for name, ft in features.items() if "/" in name} + if invalid_features: + raise ValueError(f"Feature names should not contain '/'. Found '/' in '{invalid_features}'.") + + +def hw_to_dataset_features( + hw_features: dict[str, type | tuple], prefix: str, use_video: bool = True +) -> dict[str, dict]: + """Convert hardware-specific features to a LeRobot dataset feature dictionary. + + This function takes a dictionary describing hardware outputs (like joint states + or camera image shapes) and formats it into the standard LeRobot feature + specification. + + Args: + hw_features (dict): Dictionary mapping feature names to their type (float for + joints) or shape (tuple for images). + prefix (str): The prefix to add to the feature keys (e.g., "observation" + or "action"). + use_video (bool): If True, image features are marked as "video", otherwise "image". + + Returns: + dict: A LeRobot features dictionary. + """ + features = {} + joint_fts = { + key: ftype + for key, ftype in hw_features.items() + if ftype is float or (isinstance(ftype, PolicyFeature) and ftype.type != FeatureType.VISUAL) + } + cam_fts = {key: shape for key, shape in hw_features.items() if isinstance(shape, tuple)} + + if joint_fts and prefix == ACTION: + features[prefix] = { + "dtype": "float32", + "shape": (len(joint_fts),), + "names": list(joint_fts), + } + + if joint_fts and prefix == OBS_STR: + features[f"{prefix}.state"] = { + "dtype": "float32", + "shape": (len(joint_fts),), + "names": list(joint_fts), + } + + for key, shape in cam_fts.items(): + features[f"{prefix}.images.{key}"] = { + "dtype": "video" if use_video else "image", + "shape": shape, + "names": ["height", "width", "channels"], + } + + _validate_feature_names(features) + return features + + +def build_dataset_frame( + ds_features: dict[str, dict], values: dict[str, Any], prefix: str +) -> dict[str, np.ndarray]: + """Construct a single data frame from raw values based on dataset features. + + A "frame" is a dictionary containing all the data for a single timestep, + formatted as numpy arrays according to the feature specification. + + Args: + ds_features (dict): The LeRobot dataset features dictionary. + values (dict): A dictionary of raw values from the hardware/environment. + prefix (str): The prefix to filter features by (e.g., "observation" + or "action"). + + Returns: + dict: A dictionary representing a single frame of data. + """ + frame = {} + for key, ft in ds_features.items(): + if key in DEFAULT_FEATURES or not key.startswith(prefix): + continue + elif ft["dtype"] == "float32" and len(ft["shape"]) == 1: + frame[key] = np.array([values[name] for name in ft["names"]], dtype=np.float32) + elif ft["dtype"] in ["image", "video"]: + frame[key] = values[key.removeprefix(f"{prefix}.images.")] + + return frame + + +def dataset_to_policy_features(features: dict[str, dict]) -> dict[str, PolicyFeature]: + """Convert dataset features to policy features. + + This function transforms the dataset's feature specification into a format + that a policy can use, classifying features by type (e.g., visual, state, + action) and ensuring correct shapes (e.g., channel-first for images). + + Args: + features (dict): The LeRobot dataset features dictionary. + + Returns: + dict: A dictionary mapping feature keys to `PolicyFeature` objects. + + Raises: + ValueError: If an image feature does not have a 3D shape. + """ + # TODO(aliberts): Implement "type" in dataset features and simplify this + policy_features = {} + for key, ft in features.items(): + shape = ft["shape"] + if ft["dtype"] in ["image", "video"]: + type = FeatureType.VISUAL + if len(shape) != 3: + raise ValueError(f"Number of dimensions of {key} != 3 (shape={shape})") + + names = ft["names"] + # Backward compatibility for "channel" which is an error introduced in LeRobotDataset v2.0 for ported datasets. + if names[2] in ["channel", "channels"]: # (h, w, c) -> (c, h, w) + shape = (shape[2], shape[0], shape[1]) + elif key == OBS_ENV_STATE: + type = FeatureType.ENV + elif key.startswith(OBS_STR): + type = FeatureType.STATE + elif key.startswith(ACTION): + type = FeatureType.ACTION + else: + continue + + policy_features[key] = PolicyFeature( + type=type, + shape=shape, + ) + + return policy_features + + +def combine_feature_dicts(*dicts: dict) -> dict: + """Merge LeRobot grouped feature dicts. + + - For 1D numeric specs (dtype not image/video/string) with "names": we merge the names and recompute the shape. + - For others (e.g. `observation.images.*`), the last one wins (if they are identical). + + Args: + *dicts: A variable number of LeRobot feature dictionaries to merge. + + Returns: + dict: A single merged feature dictionary. + + Raises: + ValueError: If there's a dtype mismatch for a feature being merged. + """ + out: dict = {} + for d in dicts: + for key, value in d.items(): + if not isinstance(value, dict): + out[key] = value + continue + + dtype = value.get("dtype") + shape = value.get("shape") + is_vector = ( + dtype not in ("image", "video", "string") + and isinstance(shape, tuple) + and len(shape) == 1 + and "names" in value + ) + + if is_vector: + # Initialize or retrieve the accumulating dict for this feature key + target = out.setdefault(key, {"dtype": dtype, "names": [], "shape": (0,)}) + # Ensure consistent data types across merged entries + if "dtype" in target and dtype != target["dtype"]: + raise ValueError(f"dtype mismatch for '{key}': {target['dtype']} vs {dtype}") + + # Merge feature names: append only new ones to preserve order without duplicates + seen = set(target["names"]) + for n in value["names"]: + if n not in seen: + target["names"].append(n) + seen.add(n) + # Recompute the shape to reflect the updated number of features + target["shape"] = (len(target["names"]),) + else: + # For images/videos and non-1D entries: override with the latest definition + out[key] = value + return out + + +def create_empty_dataset_info( + codebase_version: str, + fps: int, + features: dict, + use_videos: bool, + robot_type: str | None = None, + chunks_size: int | None = None, + data_files_size_in_mb: int | None = None, + video_files_size_in_mb: int | None = None, +) -> dict: + """Create a template dictionary for a new dataset's `info.json`. + + Args: + codebase_version (str): The version of the LeRobot codebase. + fps (int): The frames per second of the data. + features (dict): The LeRobot features dictionary for the dataset. + use_videos (bool): Whether the dataset will store videos. + robot_type (str | None): The type of robot used, if any. + + Returns: + dict: A dictionary with the initial dataset metadata. + """ + return { + "codebase_version": codebase_version, + "robot_type": robot_type, + "total_episodes": 0, + "total_frames": 0, + "total_tasks": 0, + "chunks_size": chunks_size or DEFAULT_CHUNK_SIZE, + "data_files_size_in_mb": data_files_size_in_mb or DEFAULT_DATA_FILE_SIZE_IN_MB, + "video_files_size_in_mb": video_files_size_in_mb or DEFAULT_VIDEO_FILE_SIZE_IN_MB, + "fps": fps, + "splits": {}, + "data_path": DEFAULT_DATA_PATH, + "video_path": DEFAULT_VIDEO_PATH if use_videos else None, + "features": features, + } + + +def check_delta_timestamps( + delta_timestamps: dict[str, list[float]], fps: int, tolerance_s: float, raise_value_error: bool = True +) -> bool: + """Check if delta timestamps are multiples of 1/fps +/- tolerance. + + This ensures that adding these delta timestamps to any existing timestamp in + the dataset will result in a value that aligns with the dataset's frame rate. + + Args: + delta_timestamps (dict): A dictionary where values are lists of time + deltas in seconds. + fps (int): The frames per second of the dataset. + tolerance_s (float): The allowed tolerance in seconds. + raise_value_error (bool): If True, raises an error on failure. + + Returns: + bool: True if all deltas are valid, False otherwise. + + Raises: + ValueError: If any delta is outside the tolerance and `raise_value_error` is True. + """ + outside_tolerance = {} + for key, delta_ts in delta_timestamps.items(): + within_tolerance = [abs(ts * fps - round(ts * fps)) / fps <= tolerance_s for ts in delta_ts] + if not all(within_tolerance): + outside_tolerance[key] = [ + ts for ts, is_within in zip(delta_ts, within_tolerance, strict=True) if not is_within + ] + + if len(outside_tolerance) > 0: + if raise_value_error: + raise ValueError( + f""" + The following delta_timestamps are found outside of tolerance range. + Please make sure they are multiples of 1/{fps} +/- tolerance and adjust + their values accordingly. + \n{pformat(outside_tolerance)} + """ + ) + return False + + return True + + +def get_delta_indices(delta_timestamps: dict[str, list[float]], fps: int) -> dict[str, list[int]]: + """Convert delta timestamps in seconds to delta indices in frames. + + Args: + delta_timestamps (dict): A dictionary of time deltas in seconds. + fps (int): The frames per second of the dataset. + + Returns: + dict: A dictionary of frame delta indices. + """ + delta_indices = {} + for key, delta_ts in delta_timestamps.items(): + delta_indices[key] = [round(d * fps) for d in delta_ts] + + return delta_indices + + +def cycle(iterable: Any) -> Iterator[Any]: + """Create a dataloader-safe cyclical iterator. + + This is an equivalent of `itertools.cycle` but is safe for use with + PyTorch DataLoaders with multiple workers. + See https://github.com/pytorch/pytorch/issues/23900 for details. + + Args: + iterable: The iterable to cycle over. + + Yields: + Items from the iterable, restarting from the beginning when exhausted. + """ + iterator = iter(iterable) + while True: + try: + yield next(iterator) + except StopIteration: + iterator = iter(iterable) + + +def create_branch(repo_id: str, *, branch: str, repo_type: str | None = None) -> None: + """Create a branch on an existing Hugging Face repo. + + Deletes the branch if it already exists before creating it. + + Args: + repo_id (str): The ID of the repository. + branch (str): The name of the branch to create. + repo_type (str | None): The type of the repository (e.g., "dataset"). + """ + api = HfApi() + + branches = api.list_repo_refs(repo_id, repo_type=repo_type).branches + refs = [branch.ref for branch in branches] + ref = f"refs/heads/{branch}" + if ref in refs: + api.delete_branch(repo_id, repo_type=repo_type, branch=branch) + + api.create_branch(repo_id, repo_type=repo_type, branch=branch) + + +def create_lerobot_dataset_card( + tags: list | None = None, + dataset_info: dict | None = None, + **kwargs, +) -> DatasetCard: + """Create a `DatasetCard` for a LeRobot dataset. + + Keyword arguments are used to replace values in the card template. + Note: If specified, `license` must be a valid license identifier from + https://huggingface.co/docs/hub/repositories-licenses. + + Args: + tags (list | None): A list of tags to add to the dataset card. + dataset_info (dict | None): The dataset's info dictionary, which will + be displayed on the card. + **kwargs: Additional keyword arguments to populate the card template. + + Returns: + DatasetCard: The generated dataset card object. + """ + card_tags = ["LeRobot"] + + if tags: + card_tags += tags + if dataset_info: + dataset_structure = "[meta/info.json](meta/info.json):\n" + dataset_structure += f"```json\n{json.dumps(dataset_info, indent=4)}\n```\n" + kwargs = {**kwargs, "dataset_structure": dataset_structure} + card_data = DatasetCardData( + license=kwargs.get("license"), + tags=card_tags, + task_categories=["robotics"], + configs=[ + { + "config_name": "default", + "data_files": "data/*/*.parquet", + } + ], + ) + + card_template = (importlib.resources.files("lerobot.datasets") / "card_template.md").read_text() + + return DatasetCard.from_template( + card_data=card_data, + template_str=card_template, + **kwargs, + ) + + +def validate_frame(frame: dict, features: dict) -> None: + expected_features = set(features) - set(DEFAULT_FEATURES) + actual_features = set(frame) + + # task is a special required field that's not part of regular features + if "task" not in actual_features: + raise ValueError("Feature mismatch in `frame` dictionary:\nMissing features: {'task'}\n") + + # Remove task from actual_features for regular feature validation + actual_features_for_validation = actual_features - {"task"} + + error_message = validate_features_presence(actual_features_for_validation, expected_features) + + common_features = actual_features_for_validation & expected_features + for name in common_features: + error_message += validate_feature_dtype_and_shape(name, features[name], frame[name]) + + if error_message: + raise ValueError(error_message) + + +def validate_features_presence(actual_features: set[str], expected_features: set[str]) -> str: + """Check for missing or extra features in a frame. + + Args: + actual_features (set[str]): The set of feature names present in the frame. + expected_features (set[str]): The set of feature names expected in the frame. + + Returns: + str: An error message string if there's a mismatch, otherwise an empty string. + """ + error_message = "" + missing_features = expected_features - actual_features + extra_features = actual_features - expected_features + + if missing_features or extra_features: + error_message += "Feature mismatch in `frame` dictionary:\n" + if missing_features: + error_message += f"Missing features: {missing_features}\n" + if extra_features: + error_message += f"Extra features: {extra_features}\n" + + return error_message + + +def validate_feature_dtype_and_shape( + name: str, feature: dict, value: np.ndarray | PILImage.Image | str +) -> str: + """Validate the dtype and shape of a single feature's value. + + Args: + name (str): The name of the feature. + feature (dict): The feature specification from the LeRobot features dictionary. + value: The value of the feature to validate. + + Returns: + str: An error message if validation fails, otherwise an empty string. + + Raises: + NotImplementedError: If the feature dtype is not supported for validation. + """ + expected_dtype = feature["dtype"] + expected_shape = feature["shape"] + if is_valid_numpy_dtype_string(expected_dtype): + return validate_feature_numpy_array(name, expected_dtype, expected_shape, value) + elif expected_dtype in ["image", "video"]: + return validate_feature_image_or_video(name, expected_shape, value) + elif expected_dtype == "string": + return validate_feature_string(name, value) + else: + raise NotImplementedError(f"The feature dtype '{expected_dtype}' is not implemented yet.") + + +def validate_feature_numpy_array( + name: str, expected_dtype: str, expected_shape: list[int], value: np.ndarray +) -> str: + """Validate a feature that is expected to be a numpy array. + + Args: + name (str): The name of the feature. + expected_dtype (str): The expected numpy dtype as a string. + expected_shape (list[int]): The expected shape. + value (np.ndarray): The numpy array to validate. + + Returns: + str: An error message if validation fails, otherwise an empty string. + """ + error_message = "" + if isinstance(value, np.ndarray): + actual_dtype = value.dtype + actual_shape = value.shape + + if actual_dtype != np.dtype(expected_dtype): + error_message += f"The feature '{name}' of dtype '{actual_dtype}' is not of the expected dtype '{expected_dtype}'.\n" + + if actual_shape != expected_shape: + error_message += f"The feature '{name}' of shape '{actual_shape}' does not have the expected shape '{expected_shape}'.\n" + else: + error_message += f"The feature '{name}' is not a 'np.ndarray'. Expected type is '{expected_dtype}', but type '{type(value)}' provided instead.\n" + + return error_message + + +def validate_feature_image_or_video( + name: str, expected_shape: list[str], value: np.ndarray | PILImage.Image +) -> str: + """Validate a feature that is expected to be an image or video frame. + + Accepts `np.ndarray` (channel-first or channel-last) or `PIL.Image.Image`. + + Args: + name (str): The name of the feature. + expected_shape (list[str]): The expected shape (C, H, W). + value: The image data to validate. + + Returns: + str: An error message if validation fails, otherwise an empty string. + """ + # Note: The check of pixels range ([0,1] for float and [0,255] for uint8) is done by the image writer threads. + error_message = "" + if isinstance(value, np.ndarray): + actual_shape = value.shape + c, h, w = expected_shape + if len(actual_shape) != 3 or (actual_shape != (c, h, w) and actual_shape != (h, w, c)): + error_message += f"The feature '{name}' of shape '{actual_shape}' does not have the expected shape '{(c, h, w)}' or '{(h, w, c)}'.\n" + elif isinstance(value, PILImage.Image): + pass + else: + error_message += f"The feature '{name}' is expected to be of type 'PIL.Image' or 'np.ndarray' channel first or channel last, but type '{type(value)}' provided instead.\n" + + return error_message + + +def validate_feature_string(name: str, value: str) -> str: + """Validate a feature that is expected to be a string. + + Args: + name (str): The name of the feature. + value (str): The value to validate. + + Returns: + str: An error message if validation fails, otherwise an empty string. + """ + if not isinstance(value, str): + return f"The feature '{name}' is expected to be of type 'str', but type '{type(value)}' provided instead.\n" + return "" + + +def validate_episode_buffer(episode_buffer: dict, total_episodes: int, features: dict) -> None: + """Validate the episode buffer before it's written to disk. + + Ensures the buffer has the required keys, contains at least one frame, and + has features consistent with the dataset's specification. + + Args: + episode_buffer (dict): The buffer containing data for a single episode. + total_episodes (int): The current total number of episodes in the dataset. + features (dict): The LeRobot features dictionary for the dataset. + + Raises: + ValueError: If the buffer is invalid. + NotImplementedError: If the episode index is manually set and doesn't match. + """ + if "size" not in episode_buffer: + raise ValueError("size key not found in episode_buffer") + + if "task" not in episode_buffer: + raise ValueError("task key not found in episode_buffer") + + if episode_buffer["episode_index"] != total_episodes: + # TODO(aliberts): Add option to use existing episode_index + raise NotImplementedError( + "You might have manually provided the episode_buffer with an episode_index that doesn't " + "match the total number of episodes already in the dataset. This is not supported for now." + ) + + if episode_buffer["size"] == 0: + raise ValueError("You must add one or several frames with `add_frame` before calling `add_episode`.") + + buffer_keys = set(episode_buffer.keys()) - {"task", "size"} + if not buffer_keys == set(features): + raise ValueError( + f"Features from `episode_buffer` don't match the ones in `features`." + f"In episode_buffer not in features: {buffer_keys - set(features)}" + f"In features not in episode_buffer: {set(features) - buffer_keys}" + ) + + +def to_parquet_with_hf_images(df: pandas.DataFrame, path: Path) -> None: + """This function correctly writes to parquet a panda DataFrame that contains images encoded by HF dataset. + This way, it can be loaded by HF dataset and correctly formatted images are returned. + """ + # TODO(qlhoest): replace this weird synthax by `df.to_parquet(path)` only + datasets.Dataset.from_dict(df.to_dict(orient="list")).to_parquet(path) + + +def item_to_torch(item: dict) -> dict: + """Convert all items in a dictionary to PyTorch tensors where appropriate. + + This function is used to convert an item from a streaming dataset to PyTorch tensors. + + Args: + item (dict): Dictionary of items from a dataset. + + Returns: + dict: Dictionary with all tensor-like items converted to torch.Tensor. + """ + for key, val in item.items(): + if isinstance(val, (np.ndarray | list)) and key not in ["task"]: + # Convert numpy arrays and lists to torch tensors + item[key] = torch.tensor(val) + return item + + +def is_float_in_list(target, float_list, threshold=1e-6): + return any(abs(target - x) <= threshold for x in float_list) + + +def find_float_index(target, float_list, threshold=1e-6): + for i, x in enumerate(float_list): + if abs(target - x) <= threshold: + return i + return -1 + + +class LookBackError(Exception): + """ + Exception raised when trying to look back in the history of a Backtrackable object. + """ + + pass + + +class LookAheadError(Exception): + """ + Exception raised when trying to look ahead in the future of a Backtrackable object. + """ + + pass + + +class Backtrackable(Generic[T]): + """ + Wrap any iterator/iterable so you can step back up to `history` items + and look ahead up to `lookahead` items. + + This is useful for streaming datasets where you need to access previous and future items + but can't load the entire dataset into memory. + + Example: + ------- + ```python + ds = load_dataset("c4", "en", streaming=True, split="train") + rev = Backtrackable(ds, history=3, lookahead=2) + + x0 = next(rev) # forward + x1 = next(rev) + x2 = next(rev) + + # Look ahead + x3_peek = rev.peek_ahead(1) # next item without moving cursor + x4_peek = rev.peek_ahead(2) # two items ahead + + # Look back + x1_again = rev.peek_back(1) # previous item without moving cursor + x0_again = rev.peek_back(2) # two items back + + # Move backward + x1_back = rev.prev() # back one step + next(rev) # returns x2, continues forward from where we were + ``` + """ + + __slots__ = ("_source", "_back_buf", "_ahead_buf", "_cursor", "_history", "_lookahead") + + def __init__(self, iterable: Iterable[T], *, history: int = 1, lookahead: int = 0): + if history < 1: + raise ValueError("history must be >= 1") + if lookahead <= 0: + raise ValueError("lookahead must be > 0") + + self._source: Iterator[T] = iter(iterable) + self._back_buf: deque[T] = deque(maxlen=history) + self._ahead_buf: deque[T] = deque(maxlen=lookahead) if lookahead > 0 else deque() + self._cursor: int = 0 + self._history = history + self._lookahead = lookahead + + def __iter__(self) -> "Backtrackable[T]": + return self + + def __next__(self) -> T: + # If we've stepped back, consume from back buffer first + if self._cursor < 0: # -1 means "last item", etc. + self._cursor += 1 + return self._back_buf[self._cursor] + + # If we have items in the ahead buffer, use them first + item = self._ahead_buf.popleft() if self._ahead_buf else next(self._source) + + # Add current item to back buffer and reset cursor + self._back_buf.append(item) + self._cursor = 0 + return item + + def prev(self) -> T: + """ + Step one item back in history and return it. + Raises IndexError if already at the oldest buffered item. + """ + if len(self._back_buf) + self._cursor <= 1: + raise LookBackError("At start of history") + + self._cursor -= 1 + return self._back_buf[self._cursor] + + def peek_back(self, n: int = 1) -> T: + """ + Look `n` items back (n=1 == previous item) without moving the cursor. + """ + if n < 0 or n + 1 > len(self._back_buf) + self._cursor: + raise LookBackError("peek_back distance out of range") + + return self._back_buf[self._cursor - (n + 1)] + + def peek_ahead(self, n: int = 1) -> T: + """ + Look `n` items ahead (n=1 == next item) without moving the cursor. + Fills the ahead buffer if necessary. + """ + if n < 1: + raise LookAheadError("peek_ahead distance must be 1 or more") + elif n > self._lookahead: + raise LookAheadError("peek_ahead distance exceeds lookahead limit") + + # Fill ahead buffer if we don't have enough items + while len(self._ahead_buf) < n: + try: + item = next(self._source) + self._ahead_buf.append(item) + + except StopIteration as err: + raise LookAheadError("peek_ahead: not enough items in source") from err + + return self._ahead_buf[n - 1] + + def history(self) -> list[T]: + """ + Return a copy of the buffered history (most recent last). + The list length ≤ `history` argument passed at construction. + """ + if self._cursor == 0: + return list(self._back_buf) + + # When cursor<0, slice so the order remains chronological + return list(self._back_buf)[: self._cursor or None] + + def can_peek_back(self, steps: int = 1) -> bool: + """ + Check if we can go back `steps` items without raising an IndexError. + """ + return steps <= len(self._back_buf) + self._cursor + + def can_peek_ahead(self, steps: int = 1) -> bool: + """ + Check if we can peek ahead `steps` items. + This may involve trying to fill the ahead buffer. + """ + if self._lookahead > 0 and steps > self._lookahead: + return False + + # Try to fill ahead buffer to check if we can peek that far + try: + while len(self._ahead_buf) < steps: + if self._lookahead > 0 and len(self._ahead_buf) >= self._lookahead: + return False + item = next(self._source) + self._ahead_buf.append(item) + return True + except StopIteration: + return False + + +def safe_shard(dataset: datasets.IterableDataset, index: int, num_shards: int) -> datasets.Dataset: + """ + Safe shards the dataset. + """ + shard_idx = min(dataset.num_shards, index + 1) - 1 + + return dataset.shard(num_shards, index=shard_idx) diff --git a/src/lerobot/datasets/v30/augment_dataset_quantile_stats.py b/src/lerobot/datasets/v30/augment_dataset_quantile_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..83a60c7442f4db0a9173f716ac765dcc208f5e66 --- /dev/null +++ b/src/lerobot/datasets/v30/augment_dataset_quantile_stats.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script augments existing LeRobot datasets with quantile statistics. + +Most datasets created before the quantile feature was added do not contain +quantile statistics (q01, q10, q50, q90, q99) in their metadata. This script: + +1. Loads an existing LeRobot dataset in v3.0 format +2. Checks if it already contains quantile statistics +3. If missing, computes quantile statistics for all features +4. Updates the dataset metadata with the new quantile statistics + +Usage: + +```bash +python src/lerobot/datasets/v30/augment_dataset_quantile_stats.py \ + --repo-id=lerobot/pusht \ +``` +""" + +import argparse +import concurrent.futures +import logging +from pathlib import Path + +import numpy as np +import torch +from huggingface_hub import HfApi +from requests import HTTPError +from tqdm import tqdm + +from lerobot.datasets.compute_stats import DEFAULT_QUANTILES, aggregate_stats, get_feature_stats +from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset +from lerobot.datasets.utils import write_stats +from lerobot.utils.utils import init_logging + + +def has_quantile_stats(stats: dict[str, dict] | None, quantile_list_keys: list[str] | None = None) -> bool: + """Check if dataset statistics already contain quantile information. + + Args: + stats: Dataset statistics dictionary + + Returns: + True if quantile statistics are present, False otherwise + """ + if quantile_list_keys is None: + quantile_list_keys = [f"q{int(q * 100):02d}" for q in DEFAULT_QUANTILES] + + if stats is None: + return False + + for feature_stats in stats.values(): + if any(q_key in feature_stats for q_key in quantile_list_keys): + return True + + return False + + +def process_single_episode(dataset: LeRobotDataset, episode_idx: int) -> dict: + """Process a single episode and return its statistics. + + Args: + dataset: The LeRobot dataset + episode_idx: Index of the episode to process + + Returns: + Dictionary containing episode statistics + """ + logging.info(f"Computing stats for episode {episode_idx}") + + start_idx = dataset.meta.episodes[episode_idx]["dataset_from_index"] + end_idx = dataset.meta.episodes[episode_idx]["dataset_to_index"] + + collected_data: dict[str, list] = {} + for idx in range(start_idx, end_idx): + item = dataset[idx] + for key, value in item.items(): + if key not in dataset.features: + continue + + if key not in collected_data: + collected_data[key] = [] + collected_data[key].append(value) + + ep_stats = {} + for key, data_list in collected_data.items(): + if dataset.features[key]["dtype"] == "string": + continue + + data = torch.stack(data_list).cpu().numpy() + if dataset.features[key]["dtype"] in ["image", "video"]: + if data.dtype == np.uint8: + data = data.astype(np.float32) / 255.0 + + axes_to_reduce = (0, 2, 3) + keepdims = True + else: + axes_to_reduce = 0 + keepdims = data.ndim == 1 + + ep_stats[key] = get_feature_stats( + data, axis=axes_to_reduce, keepdims=keepdims, quantile_list=DEFAULT_QUANTILES + ) + + if dataset.features[key]["dtype"] in ["image", "video"]: + ep_stats[key] = { + k: v if k == "count" else np.squeeze(v, axis=0) for k, v in ep_stats[key].items() + } + + return ep_stats + + +def compute_quantile_stats_for_dataset(dataset: LeRobotDataset) -> dict[str, dict]: + """Compute quantile statistics for all episodes in the dataset. + + Args: + dataset: The LeRobot dataset to compute statistics for + + Returns: + Dictionary containing aggregated statistics with quantiles + + Note: + Video decoding operations are not thread-safe, so we process episodes sequentially + when video keys are present. For datasets without videos, we use parallel processing + with ThreadPoolExecutor for better performance. + """ + logging.info(f"Computing quantile statistics for dataset with {dataset.num_episodes} episodes") + + episode_stats_list = [] + has_videos = len(dataset.meta.video_keys) > 0 + + if has_videos: + logging.info("Dataset contains video keys - using sequential processing for thread safety") + for episode_idx in tqdm(range(dataset.num_episodes), desc="Processing episodes"): + ep_stats = process_single_episode(dataset, episode_idx) + episode_stats_list.append(ep_stats) + else: + logging.info("Dataset has no video keys - using parallel processing for better performance") + max_workers = min(dataset.num_episodes, 16) + + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_episode = { + executor.submit(process_single_episode, dataset, episode_idx): episode_idx + for episode_idx in range(dataset.num_episodes) + } + + episode_results = {} + with tqdm(total=dataset.num_episodes, desc="Processing episodes") as pbar: + for future in concurrent.futures.as_completed(future_to_episode): + episode_idx = future_to_episode[future] + ep_stats = future.result() + episode_results[episode_idx] = ep_stats + pbar.update(1) + + for episode_idx in range(dataset.num_episodes): + if episode_idx in episode_results: + episode_stats_list.append(episode_results[episode_idx]) + + if not episode_stats_list: + raise ValueError("No episode data found for computing statistics") + + logging.info(f"Aggregating statistics from {len(episode_stats_list)} episodes") + return aggregate_stats(episode_stats_list) + + +def augment_dataset_with_quantile_stats( + repo_id: str, + root: str | Path | None = None, + overwrite: bool = False, +) -> None: + """Augment a dataset with quantile statistics if they are missing. + + Args: + repo_id: Repository ID of the dataset + root: Local root directory for the dataset + overwrite: Overwrite existing quantile statistics if they already exist + """ + logging.info(f"Loading dataset: {repo_id}") + dataset = LeRobotDataset( + repo_id=repo_id, + root=root, + ) + + if not overwrite and has_quantile_stats(dataset.meta.stats): + logging.info("Dataset already contains quantile statistics. No action needed.") + return + + logging.info("Dataset does not contain quantile statistics. Computing them now...") + + new_stats = compute_quantile_stats_for_dataset(dataset) + + logging.info("Updating dataset metadata with new quantile statistics") + dataset.meta.stats = new_stats + + write_stats(new_stats, dataset.meta.root) + + logging.info("Successfully updated dataset with quantile statistics") + dataset.push_to_hub() + + hub_api = HfApi() + try: + hub_api.delete_tag(repo_id, tag=CODEBASE_VERSION, repo_type="dataset") + except HTTPError as e: + logging.info(f"tag={CODEBASE_VERSION} probably doesn't exist. Skipping exception ({e})") + pass + hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=None, repo_type="dataset") + + +def main(): + """Main function to run the augmentation script.""" + parser = argparse.ArgumentParser(description="Augment LeRobot dataset with quantile statistics") + + parser.add_argument( + "--repo-id", + type=str, + required=True, + help="Repository ID of the dataset (e.g., 'lerobot/pusht')", + ) + + parser.add_argument( + "--root", + type=str, + help="Local root directory for the dataset", + ) + parser.add_argument( + "--overwrite", + action="store_true", + help="Overwrite existing quantile statistics if they already exist", + ) + + args = parser.parse_args() + root = Path(args.root) if args.root else None + + init_logging() + + augment_dataset_with_quantile_stats( + repo_id=args.repo_id, + root=root, + overwrite=args.overwrite, + ) + + +if __name__ == "__main__": + main() diff --git a/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py b/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py new file mode 100644 index 0000000000000000000000000000000000000000..b3198053ba7c6773fbdc93ef4dd6bdb4ba50c524 --- /dev/null +++ b/src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py @@ -0,0 +1,571 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 2.1 to +3.0. It will: + +- Generate per-episodes stats and writes them in `episodes_stats.jsonl` +- Check consistency between these new stats and the old ones. +- Remove the deprecated `stats.json`. +- Update codebase_version in `info.json`. +- Push this new version to the hub on the 'main' branch and tags it with "v3.0". + +Usage: + +Convert a dataset from the hub: +```bash +python src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py \ + --repo-id=lerobot/pusht +``` + +Convert a local dataset (works in place): +```bash +python src/lerobot/datasets/v30/convert_dataset_v21_to_v30.py \ + --repo-id=lerobot/pusht \ + --root=/path/to/local/dataset/directory + --push-to-hub=false +``` + +""" + +import argparse +import logging +import shutil +from pathlib import Path +from typing import Any + +import jsonlines +import pandas as pd +import pyarrow as pa +import tqdm +from datasets import Dataset, Features, Image +from huggingface_hub import HfApi, snapshot_download +from requests import HTTPError + +from lerobot.datasets.compute_stats import aggregate_stats +from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset +from lerobot.datasets.utils import ( + DEFAULT_CHUNK_SIZE, + DEFAULT_DATA_FILE_SIZE_IN_MB, + DEFAULT_DATA_PATH, + DEFAULT_VIDEO_FILE_SIZE_IN_MB, + DEFAULT_VIDEO_PATH, + LEGACY_EPISODES_PATH, + LEGACY_EPISODES_STATS_PATH, + LEGACY_TASKS_PATH, + cast_stats_to_numpy, + flatten_dict, + get_file_size_in_mb, + get_parquet_file_size_in_mb, + get_parquet_num_frames, + load_info, + update_chunk_file_indices, + write_episodes, + write_info, + write_stats, + write_tasks, +) +from lerobot.datasets.video_utils import concatenate_video_files, get_video_duration_in_s +from lerobot.utils.constants import HF_LEROBOT_HOME +from lerobot.utils.utils import init_logging + +V21 = "v2.1" +V30 = "v3.0" + +""" +------------------------- +OLD +data/chunk-000/episode_000000.parquet + +NEW +data/chunk-000/file_000.parquet +------------------------- +OLD +videos/chunk-000/CAMERA/episode_000000.mp4 + +NEW +videos/CAMERA/chunk-000/file_000.mp4 +------------------------- +OLD +episodes.jsonl +{"episode_index": 1, "tasks": ["Put the blue block in the green bowl"], "length": 266} + +NEW +meta/episodes/chunk-000/episodes_000.parquet +episode_index | video_chunk_index | video_file_index | data_chunk_index | data_file_index | tasks | length +------------------------- +OLD +tasks.jsonl +{"task_index": 1, "task": "Put the blue block in the green bowl"} + +NEW +meta/tasks/chunk-000/file_000.parquet +task_index | task +------------------------- +OLD +episodes_stats.jsonl + +NEW +meta/episodes_stats/chunk-000/file_000.parquet +episode_index | mean | std | min | max +------------------------- +UPDATE +meta/info.json +------------------------- +""" + + +def load_jsonlines(fpath: Path) -> list[Any]: + with jsonlines.open(fpath, "r") as reader: + return list(reader) + + +def legacy_load_episodes(local_dir: Path) -> dict: + episodes = load_jsonlines(local_dir / LEGACY_EPISODES_PATH) + return {item["episode_index"]: item for item in sorted(episodes, key=lambda x: x["episode_index"])} + + +def legacy_load_episodes_stats(local_dir: Path) -> dict: + episodes_stats = load_jsonlines(local_dir / LEGACY_EPISODES_STATS_PATH) + return { + item["episode_index"]: cast_stats_to_numpy(item["stats"]) + for item in sorted(episodes_stats, key=lambda x: x["episode_index"]) + } + + +def legacy_load_tasks(local_dir: Path) -> tuple[dict, dict]: + tasks = load_jsonlines(local_dir / LEGACY_TASKS_PATH) + tasks = {item["task_index"]: item["task"] for item in sorted(tasks, key=lambda x: x["task_index"])} + task_to_task_index = {task: task_index for task_index, task in tasks.items()} + return tasks, task_to_task_index + + +def validate_local_dataset_version(local_path: Path) -> None: + """Validate that the local dataset has the expected v2.1 version.""" + info = load_info(local_path) + dataset_version = info.get("codebase_version", "unknown") + if dataset_version != V21: + raise ValueError( + f"Local dataset has codebase version '{dataset_version}', expected '{V21}'. " + f"This script is specifically for converting v2.1 datasets to v3.0." + ) + + +def convert_tasks(root, new_root): + logging.info(f"Converting tasks from {root} to {new_root}") + tasks, _ = legacy_load_tasks(root) + task_indices = tasks.keys() + task_strings = tasks.values() + df_tasks = pd.DataFrame({"task_index": task_indices}, index=task_strings) + write_tasks(df_tasks, new_root) + + +def concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys): + # TODO(rcadene): to save RAM use Dataset.from_parquet(file) and concatenate_datasets + dataframes = [pd.read_parquet(file) for file in paths_to_cat] + # Concatenate all DataFrames along rows + concatenated_df = pd.concat(dataframes, ignore_index=True) + + path = new_root / DEFAULT_DATA_PATH.format(chunk_index=chunk_idx, file_index=file_idx) + path.parent.mkdir(parents=True, exist_ok=True) + + if len(image_keys) > 0: + schema = pa.Schema.from_pandas(concatenated_df) + features = Features.from_arrow_schema(schema) + for key in image_keys: + features[key] = Image() + schema = features.arrow_schema + else: + schema = None + + concatenated_df.to_parquet(path, index=False, schema=schema) + + +def convert_data(root: Path, new_root: Path, data_file_size_in_mb: int): + data_dir = root / "data" + ep_paths = sorted(data_dir.glob("*/*.parquet")) + + image_keys = get_image_keys(root) + + ep_idx = 0 + chunk_idx = 0 + file_idx = 0 + size_in_mb = 0 + num_frames = 0 + paths_to_cat = [] + episodes_metadata = [] + + logging.info(f"Converting data files from {len(ep_paths)} episodes") + + for ep_path in tqdm.tqdm(ep_paths, desc="convert data files"): + ep_size_in_mb = get_parquet_file_size_in_mb(ep_path) + ep_num_frames = get_parquet_num_frames(ep_path) + ep_metadata = { + "episode_index": ep_idx, + "data/chunk_index": chunk_idx, + "data/file_index": file_idx, + "dataset_from_index": num_frames, + "dataset_to_index": num_frames + ep_num_frames, + } + size_in_mb += ep_size_in_mb + num_frames += ep_num_frames + episodes_metadata.append(ep_metadata) + ep_idx += 1 + + if size_in_mb < data_file_size_in_mb: + paths_to_cat.append(ep_path) + continue + + if paths_to_cat: + concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys) + + # Reset for the next file + size_in_mb = ep_size_in_mb + paths_to_cat = [ep_path] + + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE) + + # Write remaining data if any + if paths_to_cat: + concat_data_files(paths_to_cat, new_root, chunk_idx, file_idx, image_keys) + + return episodes_metadata + + +def get_video_keys(root): + info = load_info(root) + features = info["features"] + video_keys = [key for key, ft in features.items() if ft["dtype"] == "video"] + return video_keys + + +def get_image_keys(root): + info = load_info(root) + features = info["features"] + image_keys = [key for key, ft in features.items() if ft["dtype"] == "image"] + return image_keys + + +def convert_videos(root: Path, new_root: Path, video_file_size_in_mb: int): + logging.info(f"Converting videos from {root} to {new_root}") + + video_keys = get_video_keys(root) + if len(video_keys) == 0: + return None + + video_keys = sorted(video_keys) + + eps_metadata_per_cam = [] + for camera in video_keys: + eps_metadata = convert_videos_of_camera(root, new_root, camera, video_file_size_in_mb) + eps_metadata_per_cam.append(eps_metadata) + + num_eps_per_cam = [len(eps_cam_map) for eps_cam_map in eps_metadata_per_cam] + if len(set(num_eps_per_cam)) != 1: + raise ValueError(f"All cams dont have same number of episodes ({num_eps_per_cam}).") + + episods_metadata = [] + num_cameras = len(video_keys) + num_episodes = num_eps_per_cam[0] + for ep_idx in tqdm.tqdm(range(num_episodes), desc="convert videos"): + # Sanity check + ep_ids = [eps_metadata_per_cam[cam_idx][ep_idx]["episode_index"] for cam_idx in range(num_cameras)] + ep_ids += [ep_idx] + if len(set(ep_ids)) != 1: + raise ValueError(f"All episode indices need to match ({ep_ids}).") + + ep_dict = {} + for cam_idx in range(num_cameras): + ep_dict.update(eps_metadata_per_cam[cam_idx][ep_idx]) + episods_metadata.append(ep_dict) + + return episods_metadata + + +def convert_videos_of_camera(root: Path, new_root: Path, video_key: str, video_file_size_in_mb: int): + # Access old paths to mp4 + videos_dir = root / "videos" + ep_paths = sorted(videos_dir.glob(f"*/{video_key}/*.mp4")) + + ep_idx = 0 + chunk_idx = 0 + file_idx = 0 + size_in_mb = 0 + duration_in_s = 0.0 + paths_to_cat = [] + episodes_metadata = [] + + for ep_path in tqdm.tqdm(ep_paths, desc=f"convert videos of {video_key}"): + ep_size_in_mb = get_file_size_in_mb(ep_path) + ep_duration_in_s = get_video_duration_in_s(ep_path) + + # Check if adding this episode would exceed the limit + if size_in_mb + ep_size_in_mb >= video_file_size_in_mb and len(paths_to_cat) > 0: + # Size limit would be exceeded, save current accumulation WITHOUT this episode + concatenate_video_files( + paths_to_cat, + new_root + / DEFAULT_VIDEO_PATH.format(video_key=video_key, chunk_index=chunk_idx, file_index=file_idx), + ) + + # Update episodes metadata for the file we just saved + for i, _ in enumerate(paths_to_cat): + past_ep_idx = ep_idx - len(paths_to_cat) + i + episodes_metadata[past_ep_idx][f"videos/{video_key}/chunk_index"] = chunk_idx + episodes_metadata[past_ep_idx][f"videos/{video_key}/file_index"] = file_idx + + # Move to next file and start fresh with current episode + chunk_idx, file_idx = update_chunk_file_indices(chunk_idx, file_idx, DEFAULT_CHUNK_SIZE) + size_in_mb = 0 + duration_in_s = 0.0 + paths_to_cat = [] + + # Add current episode metadata + ep_metadata = { + "episode_index": ep_idx, + f"videos/{video_key}/chunk_index": chunk_idx, # Will be updated when file is saved + f"videos/{video_key}/file_index": file_idx, # Will be updated when file is saved + f"videos/{video_key}/from_timestamp": duration_in_s, + f"videos/{video_key}/to_timestamp": duration_in_s + ep_duration_in_s, + } + episodes_metadata.append(ep_metadata) + + # Add current episode to accumulation + paths_to_cat.append(ep_path) + size_in_mb += ep_size_in_mb + duration_in_s += ep_duration_in_s + ep_idx += 1 + + # Write remaining videos if any + if paths_to_cat: + concatenate_video_files( + paths_to_cat, + new_root + / DEFAULT_VIDEO_PATH.format(video_key=video_key, chunk_index=chunk_idx, file_index=file_idx), + ) + + # Update episodes metadata for the final file + for i, _ in enumerate(paths_to_cat): + past_ep_idx = ep_idx - len(paths_to_cat) + i + episodes_metadata[past_ep_idx][f"videos/{video_key}/chunk_index"] = chunk_idx + episodes_metadata[past_ep_idx][f"videos/{video_key}/file_index"] = file_idx + + return episodes_metadata + + +def generate_episode_metadata_dict( + episodes_legacy_metadata, episodes_metadata, episodes_stats, episodes_videos=None +): + num_episodes = len(episodes_metadata) + episodes_legacy_metadata_vals = list(episodes_legacy_metadata.values()) + episodes_stats_vals = list(episodes_stats.values()) + episodes_stats_keys = list(episodes_stats.keys()) + + for i in range(num_episodes): + ep_legacy_metadata = episodes_legacy_metadata_vals[i] + ep_metadata = episodes_metadata[i] + ep_stats = episodes_stats_vals[i] + + ep_ids_set = { + ep_legacy_metadata["episode_index"], + ep_metadata["episode_index"], + episodes_stats_keys[i], + } + + if episodes_videos is None: + ep_video = {} + else: + ep_video = episodes_videos[i] + ep_ids_set.add(ep_video["episode_index"]) + + if len(ep_ids_set) != 1: + raise ValueError(f"Number of episodes is not the same ({ep_ids_set}).") + + ep_dict = {**ep_metadata, **ep_video, **ep_legacy_metadata, **flatten_dict({"stats": ep_stats})} + ep_dict["meta/episodes/chunk_index"] = 0 + ep_dict["meta/episodes/file_index"] = 0 + yield ep_dict + + +def convert_episodes_metadata(root, new_root, episodes_metadata, episodes_video_metadata=None): + logging.info(f"Converting episodes metadata from {root} to {new_root}") + + episodes_legacy_metadata = legacy_load_episodes(root) + episodes_stats = legacy_load_episodes_stats(root) + + num_eps_set = {len(episodes_legacy_metadata), len(episodes_metadata)} + if episodes_video_metadata is not None: + num_eps_set.add(len(episodes_video_metadata)) + + if len(num_eps_set) != 1: + raise ValueError(f"Number of episodes is not the same ({num_eps_set}).") + + ds_episodes = Dataset.from_generator( + lambda: generate_episode_metadata_dict( + episodes_legacy_metadata, episodes_metadata, episodes_stats, episodes_video_metadata + ) + ) + write_episodes(ds_episodes, new_root) + + stats = aggregate_stats(list(episodes_stats.values())) + write_stats(stats, new_root) + + +def convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb): + info = load_info(root) + info["codebase_version"] = V30 + del info["total_chunks"] + del info["total_videos"] + info["data_files_size_in_mb"] = data_file_size_in_mb + info["video_files_size_in_mb"] = video_file_size_in_mb + info["data_path"] = DEFAULT_DATA_PATH + info["video_path"] = DEFAULT_VIDEO_PATH if info["video_path"] is not None else None + info["fps"] = int(info["fps"]) + logging.info(f"Converting info from {root} to {new_root}") + for key in info["features"]: + if info["features"][key]["dtype"] == "video": + # already has fps in video_info + continue + info["features"][key]["fps"] = info["fps"] + write_info(info, new_root) + + +def convert_dataset( + repo_id: str, + branch: str | None = None, + data_file_size_in_mb: int | None = None, + video_file_size_in_mb: int | None = None, + root: str | Path | None = None, + push_to_hub: bool = True, + force_conversion: bool = False, +): + if data_file_size_in_mb is None: + data_file_size_in_mb = DEFAULT_DATA_FILE_SIZE_IN_MB + if video_file_size_in_mb is None: + video_file_size_in_mb = DEFAULT_VIDEO_FILE_SIZE_IN_MB + + # First check if the dataset already has a v3.0 version + if root is None and not force_conversion: + try: + print("Trying to download v3.0 version of the dataset from the hub...") + snapshot_download(repo_id, repo_type="dataset", revision=V30, local_dir=HF_LEROBOT_HOME / repo_id) + return + except Exception: + print("Dataset does not have an uploaded v3.0 version. Continuing with conversion.") + + # Set root based on whether local dataset path is provided + use_local_dataset = False + root = HF_LEROBOT_HOME / repo_id if root is None else Path(root) / repo_id + if root.exists(): + validate_local_dataset_version(root) + use_local_dataset = True + print(f"Using local dataset at {root}") + + old_root = root.parent / f"{root.name}_old" + new_root = root.parent / f"{root.name}_v30" + + # Handle old_root cleanup if both old_root and root exist + if old_root.is_dir() and root.is_dir(): + shutil.rmtree(str(root)) + shutil.move(str(old_root), str(root)) + + if new_root.is_dir(): + shutil.rmtree(new_root) + + if not use_local_dataset: + snapshot_download( + repo_id, + repo_type="dataset", + revision=V21, + local_dir=root, + ) + + convert_info(root, new_root, data_file_size_in_mb, video_file_size_in_mb) + convert_tasks(root, new_root) + episodes_metadata = convert_data(root, new_root, data_file_size_in_mb) + episodes_videos_metadata = convert_videos(root, new_root, video_file_size_in_mb) + convert_episodes_metadata(root, new_root, episodes_metadata, episodes_videos_metadata) + + shutil.move(str(root), str(old_root)) + shutil.move(str(new_root), str(root)) + + if push_to_hub: + hub_api = HfApi() + try: + hub_api.delete_tag(repo_id, tag=CODEBASE_VERSION, repo_type="dataset") + except HTTPError as e: + print(f"tag={CODEBASE_VERSION} probably doesn't exist. Skipping exception ({e})") + pass + hub_api.delete_files( + delete_patterns=["data/chunk*/episode_*", "meta/*.jsonl", "videos/chunk*"], + repo_id=repo_id, + revision=branch, + repo_type="dataset", + ) + hub_api.create_tag(repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset") + + LeRobotDataset(repo_id).push_to_hub() + + +if __name__ == "__main__": + init_logging() + parser = argparse.ArgumentParser() + parser.add_argument( + "--repo-id", + type=str, + required=True, + help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset " + "(e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).", + ) + parser.add_argument( + "--branch", + type=str, + default=None, + help="Repo branch to push your dataset. Defaults to the main branch.", + ) + parser.add_argument( + "--data-file-size-in-mb", + type=int, + default=None, + help="File size in MB. Defaults to 100 for data and 500 for videos.", + ) + parser.add_argument( + "--video-file-size-in-mb", + type=int, + default=None, + help="File size in MB. Defaults to 100 for data and 500 for videos.", + ) + parser.add_argument( + "--root", + type=str, + default=None, + help="Local directory to use for downloading/writing the dataset.", + ) + parser.add_argument( + "--push-to-hub", + type=lambda input: input.lower() == "true", + default=True, + help="Push the converted dataset to the hub.", + ) + parser.add_argument( + "--force-conversion", + action="store_true", + help="Force conversion even if the dataset already has a v3.0 version.", + ) + + args = parser.parse_args() + convert_dataset(**vars(args)) diff --git a/src/lerobot/datasets/video_utils.py b/src/lerobot/datasets/video_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..70a4ca9c1a124b815866e87ba16253346815e785 --- /dev/null +++ b/src/lerobot/datasets/video_utils.py @@ -0,0 +1,674 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import glob +import importlib +import logging +import shutil +import tempfile +import warnings +from dataclasses import dataclass, field +from pathlib import Path +from threading import Lock +from typing import Any, ClassVar + +import av +import fsspec +import pyarrow as pa +import torch +import torchvision +from datasets.features.features import register_feature +from PIL import Image + + +def get_safe_default_codec(): + if importlib.util.find_spec("torchcodec"): + return "torchcodec" + else: + logging.warning( + "'torchcodec' is not available in your platform, falling back to 'pyav' as a default decoder" + ) + return "pyav" + + +def decode_video_frames( + video_path: Path | str, + timestamps: list[float], + tolerance_s: float, + backend: str | None = None, +) -> torch.Tensor: + """ + Decodes video frames using the specified backend. + + Args: + video_path (Path): Path to the video file. + timestamps (list[float]): List of timestamps to extract frames. + tolerance_s (float): Allowed deviation in seconds for frame retrieval. + backend (str, optional): Backend to use for decoding. Defaults to "torchcodec" when available in the platform; otherwise, defaults to "pyav".. + + Returns: + torch.Tensor: Decoded frames. + + Currently supports torchcodec on cpu and pyav. + """ + if backend is None: + backend = get_safe_default_codec() + if backend == "torchcodec": + return decode_video_frames_torchcodec(video_path, timestamps, tolerance_s) + elif backend in ["pyav", "video_reader"]: + return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend) + else: + raise ValueError(f"Unsupported video backend: {backend}") + + +def decode_video_frames_torchvision( + video_path: Path | str, + timestamps: list[float], + tolerance_s: float, + backend: str = "pyav", + log_loaded_timestamps: bool = False, +) -> torch.Tensor: + """Loads frames associated to the requested timestamps of a video + + The backend can be either "pyav" (default) or "video_reader". + "video_reader" requires installing torchvision from source, see: + https://github.com/pytorch/vision/blob/main/torchvision/csrc/io/decoder/gpu/README.rst + (note that you need to compile against ffmpeg<4.3) + + While both use cpu, "video_reader" is supposedly faster than "pyav" but requires additional setup. + For more info on video decoding, see `benchmark/video/README.md` + + See torchvision doc for more info on these two backends: + https://pytorch.org/vision/0.18/index.html?highlight=backend#torchvision.set_video_backend + + Note: Video benefits from inter-frame compression. Instead of storing every frame individually, + the encoder stores a reference frame (or a key frame) and subsequent frames as differences relative to + that key frame. As a consequence, to access a requested frame, we need to load the preceding key frame, + and all subsequent frames until reaching the requested frame. The number of key frames in a video + can be adjusted during encoding to take into account decoding time and video size in bytes. + """ + video_path = str(video_path) + + # set backend + keyframes_only = False + torchvision.set_video_backend(backend) + if backend == "pyav": + keyframes_only = True # pyav doesn't support accurate seek + + # set a video stream reader + # TODO(rcadene): also load audio stream at the same time + reader = torchvision.io.VideoReader(video_path, "video") + + # set the first and last requested timestamps + # Note: previous timestamps are usually loaded, since we need to access the previous key frame + first_ts = min(timestamps) + last_ts = max(timestamps) + + # access closest key frame of the first requested frame + # Note: closest key frame timestamp is usually smaller than `first_ts` (e.g. key frame can be the first frame of the video) + # for details on what `seek` is doing see: https://pyav.basswood-io.com/docs/stable/api/container.html?highlight=inputcontainer#av.container.InputContainer.seek + reader.seek(first_ts, keyframes_only=keyframes_only) + + # load all frames until last requested frame + loaded_frames = [] + loaded_ts = [] + for frame in reader: + current_ts = frame["pts"] + if log_loaded_timestamps: + logging.info(f"frame loaded at timestamp={current_ts:.4f}") + loaded_frames.append(frame["data"]) + loaded_ts.append(current_ts) + if current_ts >= last_ts: + break + + if backend == "pyav": + reader.container.close() + + reader = None + + query_ts = torch.tensor(timestamps) + loaded_ts = torch.tensor(loaded_ts) + + # compute distances between each query timestamp and timestamps of all loaded frames + dist = torch.cdist(query_ts[:, None], loaded_ts[:, None], p=1) + min_, argmin_ = dist.min(1) + + is_within_tol = min_ < tolerance_s + assert is_within_tol.all(), ( + f"One or several query timestamps unexpectedly violate the tolerance ({min_[~is_within_tol]} > {tolerance_s=})." + "It means that the closest frame that can be loaded from the video is too far away in time." + "This might be due to synchronization issues with timestamps during data collection." + "To be safe, we advise to ignore this item during training." + f"\nqueried timestamps: {query_ts}" + f"\nloaded timestamps: {loaded_ts}" + f"\nvideo: {video_path}" + f"\nbackend: {backend}" + ) + + # get closest frames to the query timestamps + closest_frames = torch.stack([loaded_frames[idx] for idx in argmin_]) + closest_ts = loaded_ts[argmin_] + + if log_loaded_timestamps: + logging.info(f"{closest_ts=}") + + # convert to the pytorch format which is float32 in [0,1] range (and channel first) + closest_frames = closest_frames.type(torch.float32) / 255 + + assert len(timestamps) == len(closest_frames) + return closest_frames + + +class VideoDecoderCache: + """Thread-safe cache for video decoders to avoid expensive re-initialization.""" + + def __init__(self): + self._cache: dict[str, tuple[Any, Any]] = {} + self._lock = Lock() + + def get_decoder(self, video_path: str): + """Get a cached decoder or create a new one.""" + if importlib.util.find_spec("torchcodec"): + from torchcodec.decoders import VideoDecoder + else: + raise ImportError("torchcodec is required but not available.") + + video_path = str(video_path) + + with self._lock: + if video_path not in self._cache: + file_handle = fsspec.open(video_path).__enter__() + decoder = VideoDecoder(file_handle, seek_mode="approximate") + self._cache[video_path] = (decoder, file_handle) + + return self._cache[video_path][0] + + def clear(self): + """Clear the cache and close file handles.""" + with self._lock: + for _, file_handle in self._cache.values(): + file_handle.close() + self._cache.clear() + + def size(self) -> int: + """Return the number of cached decoders.""" + with self._lock: + return len(self._cache) + + +class FrameTimestampError(ValueError): + """Helper error to indicate the retrieved timestamps exceed the queried ones""" + + pass + + +_default_decoder_cache = VideoDecoderCache() + + +def decode_video_frames_torchcodec( + video_path: Path | str, + timestamps: list[float], + tolerance_s: float, + log_loaded_timestamps: bool = False, + decoder_cache: VideoDecoderCache | None = None, +) -> torch.Tensor: + """Loads frames associated with the requested timestamps of a video using torchcodec. + + Args: + video_path: Path to the video file. + timestamps: List of timestamps to extract frames. + tolerance_s: Allowed deviation in seconds for frame retrieval. + log_loaded_timestamps: Whether to log loaded timestamps. + decoder_cache: Optional decoder cache instance. Uses default if None. + + Note: Setting device="cuda" outside the main process, e.g. in data loader workers, will lead to CUDA initialization errors. + + Note: Video benefits from inter-frame compression. Instead of storing every frame individually, + the encoder stores a reference frame (or a key frame) and subsequent frames as differences relative to + that key frame. As a consequence, to access a requested frame, we need to load the preceding key frame, + and all subsequent frames until reaching the requested frame. The number of key frames in a video + can be adjusted during encoding to take into account decoding time and video size in bytes. + """ + if decoder_cache is None: + decoder_cache = _default_decoder_cache + + # Use cached decoder instead of creating new one each time + decoder = decoder_cache.get_decoder(str(video_path)) + + loaded_ts = [] + loaded_frames = [] + + # get metadata for frame information + metadata = decoder.metadata + average_fps = metadata.average_fps + # convert timestamps to frame indices + frame_indices = [round(ts * average_fps) for ts in timestamps] + # retrieve frames based on indices + frames_batch = decoder.get_frames_at(indices=frame_indices) + + for frame, pts in zip(frames_batch.data, frames_batch.pts_seconds, strict=True): + loaded_frames.append(frame) + loaded_ts.append(pts.item()) + if log_loaded_timestamps: + logging.info(f"Frame loaded at timestamp={pts:.4f}") + + query_ts = torch.tensor(timestamps) + loaded_ts = torch.tensor(loaded_ts) + + # compute distances between each query timestamp and loaded timestamps + dist = torch.cdist(query_ts[:, None], loaded_ts[:, None], p=1) + min_, argmin_ = dist.min(1) + + is_within_tol = min_ < tolerance_s + assert is_within_tol.all(), ( + f"One or several query timestamps unexpectedly violate the tolerance ({min_[~is_within_tol]} > {tolerance_s=})." + "It means that the closest frame that can be loaded from the video is too far away in time." + "This might be due to synchronization issues with timestamps during data collection." + "To be safe, we advise to ignore this item during training." + f"\nqueried timestamps: {query_ts}" + f"\nloaded timestamps: {loaded_ts}" + f"\nvideo: {video_path}" + ) + + # get closest frames to the query timestamps + closest_frames = torch.stack([loaded_frames[idx] for idx in argmin_]) + closest_ts = loaded_ts[argmin_] + + if log_loaded_timestamps: + logging.info(f"{closest_ts=}") + + # convert to float32 in [0,1] range + closest_frames = (closest_frames / 255.0).type(torch.float32) + + if not len(timestamps) == len(closest_frames): + raise FrameTimestampError( + f"Retrieved timestamps differ from queried {set(closest_frames) - set(timestamps)}" + ) + + return closest_frames + + +def encode_video_frames( + imgs_dir: Path | str, + video_path: Path | str, + fps: int, + vcodec: str = "libsvtav1", + pix_fmt: str = "yuv420p", + g: int | None = 2, + crf: int | None = 30, + fast_decode: int = 0, + log_level: int | None = av.logging.ERROR, + overwrite: bool = False, +) -> None: + """More info on ffmpeg arguments tuning on `benchmark/video/README.md`""" + # Check encoder availability + if vcodec not in ["h264", "hevc", "libsvtav1"]: + raise ValueError(f"Unsupported video codec: {vcodec}. Supported codecs are: h264, hevc, libsvtav1.") + + video_path = Path(video_path) + imgs_dir = Path(imgs_dir) + + if video_path.exists() and not overwrite: + logging.warning(f"Video file already exists: {video_path}. Skipping encoding.") + return + + video_path.parent.mkdir(parents=True, exist_ok=True) + + # Encoders/pixel formats incompatibility check + if (vcodec == "libsvtav1" or vcodec == "hevc") and pix_fmt == "yuv444p": + logging.warning( + f"Incompatible pixel format 'yuv444p' for codec {vcodec}, auto-selecting format 'yuv420p'" + ) + pix_fmt = "yuv420p" + + # Get input frames + template = "frame-" + ("[0-9]" * 6) + ".png" + input_list = sorted( + glob.glob(str(imgs_dir / template)), key=lambda x: int(x.split("-")[-1].split(".")[0]) + ) + + # Define video output frame size (assuming all input frames are the same size) + if len(input_list) == 0: + raise FileNotFoundError(f"No images found in {imgs_dir}.") + with Image.open(input_list[0]) as dummy_image: + width, height = dummy_image.size + + # Define video codec options + video_options = {} + + if g is not None: + video_options["g"] = str(g) + + if crf is not None: + video_options["crf"] = str(crf) + + if fast_decode: + key = "svtav1-params" if vcodec == "libsvtav1" else "tune" + value = f"fast-decode={fast_decode}" if vcodec == "libsvtav1" else "fastdecode" + video_options[key] = value + + # Set logging level + if log_level is not None: + # "While less efficient, it is generally preferable to modify logging with Python's logging" + logging.getLogger("libav").setLevel(log_level) + + # Create and open output file (overwrite by default) + with av.open(str(video_path), "w") as output: + output_stream = output.add_stream(vcodec, fps, options=video_options) + output_stream.pix_fmt = pix_fmt + output_stream.width = width + output_stream.height = height + + # Loop through input frames and encode them + for input_data in input_list: + with Image.open(input_data) as input_image: + input_image = input_image.convert("RGB") + input_frame = av.VideoFrame.from_image(input_image) + packet = output_stream.encode(input_frame) + if packet: + output.mux(packet) + + # Flush the encoder + packet = output_stream.encode() + if packet: + output.mux(packet) + + # Reset logging level + if log_level is not None: + av.logging.restore_default_callback() + + if not video_path.exists(): + raise OSError(f"Video encoding did not work. File not found: {video_path}.") + + +def concatenate_video_files( + input_video_paths: list[Path | str], output_video_path: Path, overwrite: bool = True +): + """ + Concatenate multiple video files into a single video file using pyav. + + This function takes a list of video input file paths and concatenates them into a single + output video file. It uses ffmpeg's concat demuxer with stream copy mode for fast + concatenation without re-encoding. + + Args: + input_video_paths: Ordered list of input video file paths to concatenate. + output_video_path: Path to the output video file. + overwrite: Whether to overwrite the output video file if it already exists. Default is True. + + Note: + - Creates a temporary directory for intermediate files that is cleaned up after use. + - Uses ffmpeg's concat demuxer which requires all input videos to have the same + codec, resolution, and frame rate for proper concatenation. + """ + + output_video_path = Path(output_video_path) + + if output_video_path.exists() and not overwrite: + logging.warning(f"Video file already exists: {output_video_path}. Skipping concatenation.") + return + + output_video_path.parent.mkdir(parents=True, exist_ok=True) + + if len(input_video_paths) == 0: + raise FileNotFoundError("No input video paths provided.") + + # Create a temporary .ffconcat file to list the input video paths + with tempfile.NamedTemporaryFile(mode="w", suffix=".ffconcat", delete=False) as tmp_concatenate_file: + tmp_concatenate_file.write("ffconcat version 1.0\n") + for input_path in input_video_paths: + tmp_concatenate_file.write(f"file '{str(input_path.resolve())}'\n") + tmp_concatenate_file.flush() + tmp_concatenate_path = tmp_concatenate_file.name + + # Create input and output containers + input_container = av.open( + tmp_concatenate_path, mode="r", format="concat", options={"safe": "0"} + ) # safe = 0 allows absolute paths as well as relative paths + + with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp_named_file: + tmp_output_video_path = tmp_named_file.name + + output_container = av.open( + tmp_output_video_path, mode="w", options={"movflags": "faststart"} + ) # faststart is to move the metadata to the beginning of the file to speed up loading + + # Replicate input streams in output container + stream_map = {} + for input_stream in input_container.streams: + if input_stream.type in ("video", "audio", "subtitle"): # only copy compatible streams + stream_map[input_stream.index] = output_container.add_stream_from_template( + template=input_stream, opaque=True + ) + + # set the time base to the input stream time base (missing in the codec context) + stream_map[input_stream.index].time_base = input_stream.time_base + + # Demux + remux packets (no re-encode) + for packet in input_container.demux(): + # Skip packets from un-mapped streams + if packet.stream.index not in stream_map: + continue + + # Skip demux flushing packets + if packet.dts is None: + continue + + output_stream = stream_map[packet.stream.index] + packet.stream = output_stream + output_container.mux(packet) + + input_container.close() + output_container.close() + shutil.move(tmp_output_video_path, output_video_path) + Path(tmp_concatenate_path).unlink() + + +@dataclass +class VideoFrame: + # TODO(rcadene, lhoestq): move to Hugging Face `datasets` repo + """ + Provides a type for a dataset containing video frames. + + Example: + + ```python + data_dict = [{"image": {"path": "videos/episode_0.mp4", "timestamp": 0.3}}] + features = {"image": VideoFrame()} + Dataset.from_dict(data_dict, features=Features(features)) + ``` + """ + + pa_type: ClassVar[Any] = pa.struct({"path": pa.string(), "timestamp": pa.float32()}) + _type: str = field(default="VideoFrame", init=False, repr=False) + + def __call__(self): + return self.pa_type + + +with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "'register_feature' is experimental and might be subject to breaking changes in the future.", + category=UserWarning, + ) + # to make VideoFrame available in HuggingFace `datasets` + register_feature(VideoFrame, "VideoFrame") + + +def get_audio_info(video_path: Path | str) -> dict: + # Set logging level + logging.getLogger("libav").setLevel(av.logging.ERROR) + + # Getting audio stream information + audio_info = {} + with av.open(str(video_path), "r") as audio_file: + try: + audio_stream = audio_file.streams.audio[0] + except IndexError: + # Reset logging level + av.logging.restore_default_callback() + return {"has_audio": False} + + audio_info["audio.channels"] = audio_stream.channels + audio_info["audio.codec"] = audio_stream.codec.canonical_name + # In an ideal loseless case : bit depth x sample rate x channels = bit rate. + # In an actual compressed case, the bit rate is set according to the compression level : the lower the bit rate, the more compression is applied. + audio_info["audio.bit_rate"] = audio_stream.bit_rate + audio_info["audio.sample_rate"] = audio_stream.sample_rate # Number of samples per second + # In an ideal loseless case : fixed number of bits per sample. + # In an actual compressed case : variable number of bits per sample (often reduced to match a given depth rate). + audio_info["audio.bit_depth"] = audio_stream.format.bits + audio_info["audio.channel_layout"] = audio_stream.layout.name + audio_info["has_audio"] = True + + # Reset logging level + av.logging.restore_default_callback() + + return audio_info + + +def get_video_info(video_path: Path | str) -> dict: + # Set logging level + logging.getLogger("libav").setLevel(av.logging.ERROR) + + # Getting video stream information + video_info = {} + with av.open(str(video_path), "r") as video_file: + try: + video_stream = video_file.streams.video[0] + except IndexError: + # Reset logging level + av.logging.restore_default_callback() + return {} + + video_info["video.height"] = video_stream.height + video_info["video.width"] = video_stream.width + video_info["video.codec"] = video_stream.codec.canonical_name + video_info["video.pix_fmt"] = video_stream.pix_fmt + video_info["video.is_depth_map"] = False + + # Calculate fps from r_frame_rate + video_info["video.fps"] = int(video_stream.base_rate) + + pixel_channels = get_video_pixel_channels(video_stream.pix_fmt) + video_info["video.channels"] = pixel_channels + + # Reset logging level + av.logging.restore_default_callback() + + # Adding audio stream information + video_info.update(**get_audio_info(video_path)) + + return video_info + + +def get_video_pixel_channels(pix_fmt: str) -> int: + if "gray" in pix_fmt or "depth" in pix_fmt or "monochrome" in pix_fmt: + return 1 + elif "rgba" in pix_fmt or "yuva" in pix_fmt: + return 4 + elif "rgb" in pix_fmt or "yuv" in pix_fmt: + return 3 + else: + raise ValueError("Unknown format") + + +def get_video_duration_in_s(video_path: Path | str) -> float: + """ + Get the duration of a video file in seconds using PyAV. + + Args: + video_path: Path to the video file. + + Returns: + Duration of the video in seconds. + """ + with av.open(str(video_path)) as container: + # Get the first video stream + video_stream = container.streams.video[0] + # Calculate duration: stream.duration * stream.time_base gives duration in seconds + if video_stream.duration is not None: + duration = float(video_stream.duration * video_stream.time_base) + else: + # Fallback to container duration if stream duration is not available + duration = float(container.duration / av.time_base) + return duration + + +class VideoEncodingManager: + """ + Context manager that ensures proper video encoding and data cleanup even if exceptions occur. + + This manager handles: + - Batch encoding for any remaining episodes when recording interrupted + - Cleaning up temporary image files from interrupted episodes + - Removing empty image directories + + Args: + dataset: The LeRobotDataset instance + """ + + def __init__(self, dataset): + self.dataset = dataset + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + # Handle any remaining episodes that haven't been batch encoded + if self.dataset.episodes_since_last_encoding > 0: + if exc_type is not None: + logging.info("Exception occurred. Encoding remaining episodes before exit...") + else: + logging.info("Recording stopped. Encoding remaining episodes...") + + start_ep = self.dataset.num_episodes - self.dataset.episodes_since_last_encoding + end_ep = self.dataset.num_episodes + logging.info( + f"Encoding remaining {self.dataset.episodes_since_last_encoding} episodes, " + f"from episode {start_ep} to {end_ep - 1}" + ) + self.dataset._batch_save_episode_video(start_ep, end_ep) + + # Finalize the dataset to properly close all writers + self.dataset.finalize() + + # Clean up episode images if recording was interrupted + if exc_type is not None: + interrupted_episode_index = self.dataset.num_episodes + for key in self.dataset.meta.video_keys: + img_dir = self.dataset._get_image_file_path( + episode_index=interrupted_episode_index, image_key=key, frame_index=0 + ).parent + if img_dir.exists(): + logging.debug( + f"Cleaning up interrupted episode images for episode {interrupted_episode_index}, camera {key}" + ) + shutil.rmtree(img_dir) + + # Clean up any remaining images directory if it's empty + img_dir = self.dataset.root / "images" + # Check for any remaining PNG files + png_files = list(img_dir.rglob("*.png")) + if len(png_files) == 0: + # Only remove the images directory if no PNG files remain + if img_dir.exists(): + shutil.rmtree(img_dir) + logging.debug("Cleaned up empty images directory") + else: + logging.debug(f"Images directory is not empty, containing {len(png_files)} PNG files") + + return False # Don't suppress the original exception diff --git a/src/lerobot/envs/__init__.py b/src/lerobot/envs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..16e1ae75f3e2eb852fbbd5e0ed48f6653205a98b --- /dev/null +++ b/src/lerobot/envs/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .configs import AlohaEnv, EnvConfig, PushtEnv # noqa: F401 diff --git a/src/lerobot/envs/configs.py b/src/lerobot/envs/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..56b2ec4f26859f44580fb00d338e719a5bf92312 --- /dev/null +++ b/src/lerobot/envs/configs.py @@ -0,0 +1,369 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +from dataclasses import dataclass, field +from typing import Any + +import draccus + +from lerobot.configs.types import FeatureType, PolicyFeature +from lerobot.robots import RobotConfig +from lerobot.teleoperators.config import TeleoperatorConfig +from lerobot.utils.constants import ( + ACTION, + LIBERO_KEY_EEF_MAT, + LIBERO_KEY_EEF_POS, + LIBERO_KEY_EEF_QUAT, + LIBERO_KEY_GRIPPER_QPOS, + LIBERO_KEY_GRIPPER_QVEL, + LIBERO_KEY_JOINTS_POS, + LIBERO_KEY_JOINTS_VEL, + LIBERO_KEY_PIXELS_AGENTVIEW, + LIBERO_KEY_PIXELS_EYE_IN_HAND, + OBS_ENV_STATE, + OBS_IMAGE, + OBS_IMAGES, + OBS_STATE, +) + + +@dataclass +class EnvConfig(draccus.ChoiceRegistry, abc.ABC): + task: str | None = None + fps: int = 30 + features: dict[str, PolicyFeature] = field(default_factory=dict) + features_map: dict[str, str] = field(default_factory=dict) + max_parallel_tasks: int = 1 + disable_env_checker: bool = True + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + @property + def package_name(self) -> str: + """Package name to import if environment not found in gym registry""" + return f"gym_{self.type}" + + @property + def gym_id(self) -> str: + """ID string used in gym.make() to instantiate the environment""" + return f"{self.package_name}/{self.task}" + + @property + @abc.abstractmethod + def gym_kwargs(self) -> dict: + raise NotImplementedError() + + +@EnvConfig.register_subclass("aloha") +@dataclass +class AlohaEnv(EnvConfig): + task: str | None = "AlohaInsertion-v0" + fps: int = 50 + episode_length: int = 400 + obs_type: str = "pixels_agent_pos" + observation_height: int = 480 + observation_width: int = 640 + render_mode: str = "rgb_array" + features: dict[str, PolicyFeature] = field( + default_factory=lambda: { + ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(14,)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + ACTION: ACTION, + "agent_pos": OBS_STATE, + "top": f"{OBS_IMAGE}.top", + "pixels/top": f"{OBS_IMAGES}.top", + } + ) + + def __post_init__(self): + if self.obs_type == "pixels": + self.features["top"] = PolicyFeature( + type=FeatureType.VISUAL, shape=(self.observation_height, self.observation_width, 3) + ) + elif self.obs_type == "pixels_agent_pos": + self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(14,)) + self.features["pixels/top"] = PolicyFeature( + type=FeatureType.VISUAL, shape=(self.observation_height, self.observation_width, 3) + ) + + @property + def gym_kwargs(self) -> dict: + return { + "obs_type": self.obs_type, + "render_mode": self.render_mode, + "max_episode_steps": self.episode_length, + } + + +@EnvConfig.register_subclass("pusht") +@dataclass +class PushtEnv(EnvConfig): + task: str | None = "PushT-v0" + fps: int = 10 + episode_length: int = 300 + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + visualization_width: int = 384 + visualization_height: int = 384 + observation_height: int = 384 + observation_width: int = 384 + features: dict[str, PolicyFeature] = field( + default_factory=lambda: { + ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(2,)), + "agent_pos": PolicyFeature(type=FeatureType.STATE, shape=(2,)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + ACTION: ACTION, + "agent_pos": OBS_STATE, + "environment_state": OBS_ENV_STATE, + "pixels": OBS_IMAGE, + } + ) + + def __post_init__(self): + if self.obs_type == "pixels_agent_pos": + self.features["pixels"] = PolicyFeature( + type=FeatureType.VISUAL, shape=(self.observation_height, self.observation_width, 3) + ) + elif self.obs_type == "environment_state_agent_pos": + self.features["environment_state"] = PolicyFeature(type=FeatureType.ENV, shape=(16,)) + + @property + def gym_kwargs(self) -> dict: + return { + "obs_type": self.obs_type, + "render_mode": self.render_mode, + "visualization_width": self.visualization_width, + "visualization_height": self.visualization_height, + "max_episode_steps": self.episode_length, + } + + +@dataclass +class ImagePreprocessingConfig: + crop_params_dict: dict[str, tuple[int, int, int, int]] | None = None + resize_size: tuple[int, int] | None = None + + +@dataclass +class RewardClassifierConfig: + """Configuration for reward classification.""" + + pretrained_path: str | None = None + success_threshold: float = 0.5 + success_reward: float = 1.0 + + +@dataclass +class InverseKinematicsConfig: + """Configuration for inverse kinematics processing.""" + + urdf_path: str | None = None + target_frame_name: str | None = None + end_effector_bounds: dict[str, list[float]] | None = None + end_effector_step_sizes: dict[str, float] | None = None + + +@dataclass +class ObservationConfig: + """Configuration for observation processing.""" + + add_joint_velocity_to_observation: bool = False + add_current_to_observation: bool = False + display_cameras: bool = False + + +@dataclass +class GripperConfig: + """Configuration for gripper control and penalties.""" + + use_gripper: bool = True + gripper_penalty: float = 0.0 + + +@dataclass +class ResetConfig: + """Configuration for environment reset behavior.""" + + fixed_reset_joint_positions: Any | None = None + reset_time_s: float = 5.0 + control_time_s: float = 20.0 + terminate_on_success: bool = True + + +@dataclass +class HILSerlProcessorConfig: + """Configuration for environment processing pipeline.""" + + control_mode: str = "gamepad" + observation: ObservationConfig | None = None + image_preprocessing: ImagePreprocessingConfig | None = None + gripper: GripperConfig | None = None + reset: ResetConfig | None = None + inverse_kinematics: InverseKinematicsConfig | None = None + reward_classifier: RewardClassifierConfig | None = None + max_gripper_pos: float | None = 100.0 + + +@EnvConfig.register_subclass(name="gym_manipulator") +@dataclass +class HILSerlRobotEnvConfig(EnvConfig): + """Configuration for the HILSerlRobotEnv environment.""" + + robot: RobotConfig | None = None + teleop: TeleoperatorConfig | None = None + processor: HILSerlProcessorConfig = field(default_factory=HILSerlProcessorConfig) + + name: str = "real_robot" + + @property + def gym_kwargs(self) -> dict: + return {} + + +@EnvConfig.register_subclass("libero") +@dataclass +class LiberoEnv(EnvConfig): + task: str = "libero_10" # can also choose libero_spatial, libero_object, etc. + fps: int = 30 + episode_length: int = 520 + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + camera_name: str = "agentview_image,robot0_eye_in_hand_image" + init_states: bool = True + camera_name_mapping: dict[str, str] | None = None + observation_height: int = 360 + observation_width: int = 360 + features: dict[str, PolicyFeature] = field( + default_factory=lambda: { + ACTION: PolicyFeature(type=FeatureType.ACTION, shape=(7,)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + ACTION: ACTION, + LIBERO_KEY_EEF_POS: f"{OBS_STATE}.eef_pos", + LIBERO_KEY_EEF_QUAT: f"{OBS_STATE}.eef_quat", + LIBERO_KEY_EEF_MAT: f"{OBS_STATE}.eef_mat", + LIBERO_KEY_GRIPPER_QPOS: f"{OBS_STATE}.gripper_qpos", + LIBERO_KEY_GRIPPER_QVEL: f"{OBS_STATE}.gripper_qvel", + LIBERO_KEY_JOINTS_POS: f"{OBS_STATE}.joint_pos", + LIBERO_KEY_JOINTS_VEL: f"{OBS_STATE}.joint_vel", + LIBERO_KEY_PIXELS_AGENTVIEW: f"{OBS_IMAGES}.image", + LIBERO_KEY_PIXELS_EYE_IN_HAND: f"{OBS_IMAGES}.image2", + } + ) + + def __post_init__(self): + if self.obs_type == "pixels": + self.features[LIBERO_KEY_PIXELS_AGENTVIEW] = PolicyFeature( + type=FeatureType.VISUAL, shape=(self.observation_height, self.observation_width, 3) + ) + self.features[LIBERO_KEY_PIXELS_EYE_IN_HAND] = PolicyFeature( + type=FeatureType.VISUAL, shape=(self.observation_height, self.observation_width, 3) + ) + elif self.obs_type == "pixels_agent_pos": + self.features[LIBERO_KEY_PIXELS_AGENTVIEW] = PolicyFeature( + type=FeatureType.VISUAL, shape=(self.observation_height, self.observation_width, 3) + ) + self.features[LIBERO_KEY_PIXELS_EYE_IN_HAND] = PolicyFeature( + type=FeatureType.VISUAL, shape=(self.observation_height, self.observation_width, 3) + ) + self.features[LIBERO_KEY_EEF_POS] = PolicyFeature( + type=FeatureType.STATE, + shape=(3,), + ) + self.features[LIBERO_KEY_EEF_QUAT] = PolicyFeature( + type=FeatureType.STATE, + shape=(4,), + ) + self.features[LIBERO_KEY_EEF_MAT] = PolicyFeature( + type=FeatureType.STATE, + shape=(3, 3), + ) + self.features[LIBERO_KEY_GRIPPER_QPOS] = PolicyFeature( + type=FeatureType.STATE, + shape=(2,), + ) + self.features[LIBERO_KEY_GRIPPER_QVEL] = PolicyFeature( + type=FeatureType.STATE, + shape=(2,), + ) + self.features[LIBERO_KEY_JOINTS_POS] = PolicyFeature( + type=FeatureType.STATE, + shape=(7,), + ) + self.features[LIBERO_KEY_JOINTS_VEL] = PolicyFeature( + type=FeatureType.STATE, + shape=(7,), + ) + else: + raise ValueError(f"Unsupported obs_type: {self.obs_type}") + + @property + def gym_kwargs(self) -> dict: + return { + "obs_type": self.obs_type, + "render_mode": self.render_mode, + } + + +@EnvConfig.register_subclass("metaworld") +@dataclass +class MetaworldEnv(EnvConfig): + task: str = "metaworld-push-v2" # add all tasks + fps: int = 80 + episode_length: int = 400 + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + multitask_eval: bool = True + features: dict[str, PolicyFeature] = field( + default_factory=lambda: { + "action": PolicyFeature(type=FeatureType.ACTION, shape=(4,)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + "action": ACTION, + "agent_pos": OBS_STATE, + "top": f"{OBS_IMAGE}", + "pixels/top": f"{OBS_IMAGE}", + } + ) + + def __post_init__(self): + if self.obs_type == "pixels": + self.features["top"] = PolicyFeature(type=FeatureType.VISUAL, shape=(480, 480, 3)) + + elif self.obs_type == "pixels_agent_pos": + self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(4,)) + self.features["pixels/top"] = PolicyFeature(type=FeatureType.VISUAL, shape=(480, 480, 3)) + + else: + raise ValueError(f"Unsupported obs_type: {self.obs_type}") + + @property + def gym_kwargs(self) -> dict: + return { + "obs_type": self.obs_type, + "render_mode": self.render_mode, + } diff --git a/src/lerobot/envs/factory.py b/src/lerobot/envs/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..0d0752f3ea2d67e3ad564be91edee64b0a7d14fe --- /dev/null +++ b/src/lerobot/envs/factory.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +from typing import Any + +import gymnasium as gym +from gymnasium.envs.registration import registry as gym_registry + +from lerobot.envs.configs import AlohaEnv, EnvConfig, LiberoEnv, PushtEnv +from lerobot.envs.utils import _call_make_env, _download_hub_file, _import_hub_module, _normalize_hub_result +from lerobot.processor import ProcessorStep +from lerobot.processor.env_processor import LiberoProcessorStep +from lerobot.processor.pipeline import PolicyProcessorPipeline + + +def make_env_config(env_type: str, **kwargs) -> EnvConfig: + if env_type == "aloha": + return AlohaEnv(**kwargs) + elif env_type == "pusht": + return PushtEnv(**kwargs) + elif env_type == "libero": + return LiberoEnv(**kwargs) + else: + raise ValueError(f"Policy type '{env_type}' is not available.") + + +def make_env_pre_post_processors( + env_cfg: EnvConfig, +) -> tuple[ + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], +]: + """ + Create preprocessor and postprocessor pipelines for environment observations. + + This function creates processor pipelines that transform raw environment + observations and actions. By default, it returns identity processors that do nothing. + For specific environments like LIBERO, it adds environment-specific processing steps. + + Args: + env_cfg: The configuration of the environment. + + Returns: + A tuple containing: + - preprocessor: Pipeline that processes environment observations + - postprocessor: Pipeline that processes environment outputs (currently identity) + """ + # Preprocessor and Postprocessor steps are Identity for most environments + preprocessor_steps: list[ProcessorStep] = [] + postprocessor_steps: list[ProcessorStep] = [] + + # For LIBERO environments, add the LiberoProcessorStep to preprocessor + if isinstance(env_cfg, LiberoEnv) or "libero" in env_cfg.type: + preprocessor_steps.append(LiberoProcessorStep()) + + preprocessor = PolicyProcessorPipeline(steps=preprocessor_steps) + postprocessor = PolicyProcessorPipeline(steps=postprocessor_steps) + + return preprocessor, postprocessor + + +def make_env( + cfg: EnvConfig | str, + n_envs: int = 1, + use_async_envs: bool = False, + hub_cache_dir: str | None = None, + trust_remote_code: bool = False, +) -> dict[str, dict[int, gym.vector.VectorEnv]]: + """Makes a gym vector environment according to the config or Hub reference. + + Args: + cfg (EnvConfig | str): Either an `EnvConfig` object describing the environment to build locally, + or a Hugging Face Hub repository identifier (e.g. `"username/repo"`). In the latter case, + the repo must include a Python file (usually `env.py`). + n_envs (int, optional): The number of parallelized env to return. Defaults to 1. + use_async_envs (bool, optional): Whether to return an AsyncVectorEnv or a SyncVectorEnv. Defaults to + False. + hub_cache_dir (str | None): Optional cache path for downloaded hub files. + trust_remote_code (bool): **Explicit consent** to execute remote code from the Hub. + Default False — must be set to True to import/exec hub `env.py`. + + Raises: + ValueError: if n_envs < 1 + ModuleNotFoundError: If the requested env package is not installed + + Returns: + dict[str, dict[int, gym.vector.VectorEnv]]: + A mapping from suite name to indexed vectorized environments. + - For multi-task benchmarks (e.g., LIBERO): one entry per suite, and one vec env per task_id. + - For single-task environments: a single suite entry (cfg.type) with task_id=0. + + """ + # if user passed a hub id string (e.g., "username/repo", "username/repo@main:env.py") + # simplified: only support hub-provided `make_env` + if isinstance(cfg, str): + # _download_hub_file will raise the same RuntimeError if trust_remote_code is False + repo_id, file_path, local_file, revision = _download_hub_file(cfg, trust_remote_code, hub_cache_dir) + + # import and surface clear import errors + module = _import_hub_module(local_file, repo_id) + + # call the hub-provided make_env + raw_result = _call_make_env(module, n_envs=n_envs, use_async_envs=use_async_envs) + + # normalize the return into {suite: {task_id: vec_env}} + return _normalize_hub_result(raw_result) + + if n_envs < 1: + raise ValueError("`n_envs` must be at least 1") + + env_cls = gym.vector.AsyncVectorEnv if use_async_envs else gym.vector.SyncVectorEnv + + if "libero" in cfg.type: + from lerobot.envs.libero import create_libero_envs + + if cfg.task is None: + raise ValueError("LiberoEnv requires a task to be specified") + + return create_libero_envs( + task=cfg.task, + n_envs=n_envs, + camera_name=cfg.camera_name, + init_states=cfg.init_states, + gym_kwargs=cfg.gym_kwargs, + env_cls=env_cls, + ) + elif "metaworld" in cfg.type: + from lerobot.envs.metaworld import create_metaworld_envs + + if cfg.task is None: + raise ValueError("MetaWorld requires a task to be specified") + + return create_metaworld_envs( + task=cfg.task, + n_envs=n_envs, + gym_kwargs=cfg.gym_kwargs, + env_cls=env_cls, + ) + + if cfg.gym_id not in gym_registry: + print(f"gym id '{cfg.gym_id}' not found, attempting to import '{cfg.package_name}'...") + try: + importlib.import_module(cfg.package_name) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Package '{cfg.package_name}' required for env '{cfg.type}' not found. " + f"Please install it or check PYTHONPATH." + ) from e + + if cfg.gym_id not in gym_registry: + raise gym.error.NameNotFound( + f"Environment '{cfg.gym_id}' not registered even after importing '{cfg.package_name}'." + ) + + def _make_one(): + return gym.make(cfg.gym_id, disable_env_checker=cfg.disable_env_checker, **(cfg.gym_kwargs or {})) + + vec = env_cls([_make_one for _ in range(n_envs)], autoreset_mode=gym.vector.AutoresetMode.SAME_STEP) + + # normalize to {suite: {task_id: vec_env}} for consistency + suite_name = cfg.type # e.g., "pusht", "aloha" + return {suite_name: {0: vec}} diff --git a/src/lerobot/envs/libero.py b/src/lerobot/envs/libero.py new file mode 100644 index 0000000000000000000000000000000000000000..2c0488087b83c5b8ab02e7350b54e154d231e0de --- /dev/null +++ b/src/lerobot/envs/libero.py @@ -0,0 +1,429 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +from collections import defaultdict +from collections.abc import Callable, Iterable, Mapping, Sequence +from functools import partial +from pathlib import Path +from typing import Any + +import gymnasium as gym +import numpy as np +import torch +from gymnasium import spaces +from libero.libero import benchmark, get_libero_path +from libero.libero.envs import OffScreenRenderEnv + + +def _parse_camera_names(camera_name: str | Sequence[str]) -> list[str]: + """Normalize camera_name into a non-empty list of strings.""" + if isinstance(camera_name, str): + cams = [c.strip() for c in camera_name.split(",") if c.strip()] + elif isinstance(camera_name, (list | tuple)): + cams = [str(c).strip() for c in camera_name if str(c).strip()] + else: + raise TypeError(f"camera_name must be str or sequence[str], got {type(camera_name).__name__}") + if not cams: + raise ValueError("camera_name resolved to an empty list.") + return cams + + +def _get_suite(name: str) -> benchmark.Benchmark: + """Instantiate a LIBERO suite by name with clear validation.""" + bench = benchmark.get_benchmark_dict() + if name not in bench: + raise ValueError(f"Unknown LIBERO suite '{name}'. Available: {', '.join(sorted(bench.keys()))}") + suite = bench[name]() + if not getattr(suite, "tasks", None): + raise ValueError(f"Suite '{name}' has no tasks.") + return suite + + +def _select_task_ids(total_tasks: int, task_ids: Iterable[int] | None) -> list[int]: + """Validate/normalize task ids. If None → all tasks.""" + if task_ids is None: + return list(range(total_tasks)) + ids = sorted({int(t) for t in task_ids}) + for t in ids: + if t < 0 or t >= total_tasks: + raise ValueError(f"task_id {t} out of range [0, {total_tasks - 1}].") + return ids + + +def get_task_init_states(task_suite: Any, i: int) -> np.ndarray: + init_states_path = ( + Path(get_libero_path("init_states")) + / task_suite.tasks[i].problem_folder + / task_suite.tasks[i].init_states_file + ) + init_states = torch.load(init_states_path, weights_only=False) # nosec B614 + return init_states + + +def get_libero_dummy_action(): + """Get dummy/no-op action, used to roll out the simulation while the robot does nothing.""" + return [0, 0, 0, 0, 0, 0, -1] + + +OBS_STATE_DIM = 8 +ACTION_DIM = 7 +AGENT_POS_LOW = -1000.0 +AGENT_POS_HIGH = 1000.0 +ACTION_LOW = -1.0 +ACTION_HIGH = 1.0 +TASK_SUITE_MAX_STEPS: dict[str, int] = { + "libero_spatial": 280, # longest training demo has 193 steps + "libero_object": 280, # longest training demo has 254 steps + "libero_goal": 300, # longest training demo has 270 steps + "libero_10": 520, # longest training demo has 505 steps + "libero_90": 400, # longest training demo has 373 steps +} + + +class LiberoEnv(gym.Env): + metadata = {"render_modes": ["rgb_array"], "render_fps": 80} + + def __init__( + self, + task_suite: Any, + task_id: int, + task_suite_name: str, + camera_name: str | Sequence[str] = "agentview_image,robot0_eye_in_hand_image", + obs_type: str = "pixels", + render_mode: str = "rgb_array", + observation_width: int = 256, + observation_height: int = 256, + visualization_width: int = 640, + visualization_height: int = 480, + init_states: bool = True, + episode_index: int = 0, + camera_name_mapping: dict[str, str] | None = None, + num_steps_wait: int = 10, + ): + super().__init__() + self.task_id = task_id + self.obs_type = obs_type + self.render_mode = render_mode + self.observation_width = observation_width + self.observation_height = observation_height + self.visualization_width = visualization_width + self.visualization_height = visualization_height + self.init_states = init_states + self.camera_name = _parse_camera_names( + camera_name + ) # agentview_image (main) or robot0_eye_in_hand_image (wrist) + + # Map raw camera names to "image1" and "image2". + # The preprocessing step `preprocess_observation` will then prefix these with `.images.*`, + # following the LeRobot convention (e.g., `observation.images.image`, `observation.images.image2`). + # This ensures the policy consistently receives observations in the + # expected format regardless of the original camera naming. + if camera_name_mapping is None: + camera_name_mapping = { + "agentview_image": "image", + "robot0_eye_in_hand_image": "image2", + } + self.camera_name_mapping = camera_name_mapping + self.num_steps_wait = num_steps_wait + self.episode_index = episode_index + # Load once and keep + self._init_states = get_task_init_states(task_suite, self.task_id) if self.init_states else None + self._init_state_id = self.episode_index # tie each sub-env to a fixed init state + + self._env = self._make_envs_task(task_suite, self.task_id) + default_steps = 500 + self._max_episode_steps = TASK_SUITE_MAX_STEPS.get(task_suite_name, default_steps) + + images = {} + for cam in self.camera_name: + images[self.camera_name_mapping[cam]] = spaces.Box( + low=0, + high=255, + shape=(self.observation_height, self.observation_width, 3), + dtype=np.uint8, + ) + + if self.obs_type == "state": + raise NotImplementedError( + "The 'state' observation type is not supported in LiberoEnv. " + "Please switch to an image-based obs_type (e.g. 'pixels', 'pixels_agent_pos')." + ) + + elif self.obs_type == "pixels": + self.observation_space = spaces.Dict( + { + "pixels": spaces.Dict(images), + } + ) + elif self.obs_type == "pixels_agent_pos": + self.observation_space = spaces.Dict( + { + "pixels": spaces.Dict(images), + "robot_state": spaces.Dict( + { + "eef": spaces.Dict( + { + "pos": spaces.Box(low=-np.inf, high=np.inf, shape=(3,), dtype=np.float64), + "quat": spaces.Box( + low=-np.inf, high=np.inf, shape=(4,), dtype=np.float64 + ), + "mat": spaces.Box( + low=-np.inf, high=np.inf, shape=(3, 3), dtype=np.float64 + ), + } + ), + "gripper": spaces.Dict( + { + "qpos": spaces.Box( + low=-np.inf, high=np.inf, shape=(2,), dtype=np.float64 + ), + "qvel": spaces.Box( + low=-np.inf, high=np.inf, shape=(2,), dtype=np.float64 + ), + } + ), + "joints": spaces.Dict( + { + "pos": spaces.Box(low=-np.inf, high=np.inf, shape=(7,), dtype=np.float64), + "vel": spaces.Box(low=-np.inf, high=np.inf, shape=(7,), dtype=np.float64), + } + ), + } + ), + } + ) + + self.action_space = spaces.Box( + low=ACTION_LOW, high=ACTION_HIGH, shape=(ACTION_DIM,), dtype=np.float32 + ) + + def render(self): + raw_obs = self._env.env._get_observations() + image = self._format_raw_obs(raw_obs)["pixels"]["image"] + image = image[::-1, ::-1] # flip both H and W for visualization + return image + + def _make_envs_task(self, task_suite: Any, task_id: int = 0): + task = task_suite.get_task(task_id) + self.task = task.name + self.task_description = task.language + task_bddl_file = os.path.join(get_libero_path("bddl_files"), task.problem_folder, task.bddl_file) + + env_args = { + "bddl_file_name": task_bddl_file, + "camera_heights": self.observation_height, + "camera_widths": self.observation_width, + } + env = OffScreenRenderEnv(**env_args) + env.reset() + return env + + def _format_raw_obs(self, raw_obs: dict[str, Any]) -> dict[str, Any]: + images = {} + for camera_name in self.camera_name: + image = raw_obs[camera_name] + images[self.camera_name_mapping[camera_name]] = image + + eef_pos = raw_obs.get("robot0_eef_pos") + eef_quat = raw_obs.get("robot0_eef_quat") + + # rotation matrix from controller + eef_mat = self._env.robots[0].controller.ee_ori_mat if eef_pos is not None else None + gripper_qpos = raw_obs.get("robot0_gripper_qpos") + gripper_qvel = raw_obs.get("robot0_gripper_qvel") + joint_pos = raw_obs.get("robot0_joint_pos") + joint_vel = raw_obs.get("robot0_joint_vel") + obs = { + "pixels": images, + "robot_state": { + "eef": { + "pos": eef_pos, # (3,) + "quat": eef_quat, # (4,) + "mat": eef_mat, # (3, 3) + }, + "gripper": { + "qpos": gripper_qpos, # (2,) + "qvel": gripper_qvel, # (2,) + }, + "joints": { + "pos": joint_pos, # (7,) + "vel": joint_vel, # (7,) + }, + }, + } + if self.obs_type == "pixels": + return {"pixels": images.copy()} + + if self.obs_type == "pixels_agent_pos": + # Validate required fields are present + if eef_pos is None or eef_quat is None or gripper_qpos is None: + raise ValueError( + f"Missing required robot state fields in raw observation. " + f"Got eef_pos={eef_pos is not None}, eef_quat={eef_quat is not None}, " + f"gripper_qpos={gripper_qpos is not None}" + ) + return obs + + raise NotImplementedError( + f"The observation type '{self.obs_type}' is not supported in LiberoEnv. " + "Please switch to an image-based obs_type (e.g. 'pixels', 'pixels_agent_pos')." + ) + + def reset(self, seed=None, **kwargs): + super().reset(seed=seed) + self._env.seed(seed) + if self.init_states and self._init_states is not None: + self._env.set_init_state(self._init_states[self._init_state_id]) + raw_obs = self._env.reset() + + # After reset, objects may be unstable (slightly floating, intersecting, etc.). + # Step the simulator with a no-op action for a few frames so everything settles. + # Increasing this value can improve determinism and reproducibility across resets. + for _ in range(self.num_steps_wait): + raw_obs, _, _, _ = self._env.step(get_libero_dummy_action()) + observation = self._format_raw_obs(raw_obs) + info = {"is_success": False} + return observation, info + + def step(self, action: np.ndarray) -> tuple[dict[str, Any], float, bool, bool, dict[str, Any]]: + if action.ndim != 1: + raise ValueError( + f"Expected action to be 1-D (shape (action_dim,)), " + f"but got shape {action.shape} with ndim={action.ndim}" + ) + raw_obs, reward, done, info = self._env.step(action) + + is_success = self._env.check_success() + terminated = done or is_success + info.update( + { + "task": self.task, + "task_id": self.task_id, + "done": done, + "is_success": is_success, + } + ) + observation = self._format_raw_obs(raw_obs) + if terminated: + info["final_info"] = { + "task": self.task, + "task_id": self.task_id, + "done": bool(done), + "is_success": bool(is_success), + } + self.reset() + truncated = False + return observation, reward, terminated, truncated, info + + def close(self): + self._env.close() + + +def _make_env_fns( + *, + suite, + suite_name: str, + task_id: int, + n_envs: int, + camera_names: list[str], + init_states: bool, + gym_kwargs: Mapping[str, Any], +) -> list[Callable[[], LiberoEnv]]: + """Build n_envs factory callables for a single (suite, task_id).""" + + def _make_env(episode_index: int, **kwargs) -> LiberoEnv: + local_kwargs = dict(kwargs) + return LiberoEnv( + task_suite=suite, + task_id=task_id, + task_suite_name=suite_name, + camera_name=camera_names, + init_states=init_states, + episode_index=episode_index, + **local_kwargs, + ) + + fns: list[Callable[[], LiberoEnv]] = [] + for episode_index in range(n_envs): + fns.append(partial(_make_env, episode_index, **gym_kwargs)) + return fns + + +# ---- Main API ---------------------------------------------------------------- + + +def create_libero_envs( + task: str, + n_envs: int, + gym_kwargs: dict[str, Any] | None = None, + camera_name: str | Sequence[str] = "agentview_image,robot0_eye_in_hand_image", + init_states: bool = True, + env_cls: Callable[[Sequence[Callable[[], Any]]], Any] | None = None, +) -> dict[str, dict[int, Any]]: + """ + Create vectorized LIBERO environments with a consistent return shape. + + Returns: + dict[suite_name][task_id] -> vec_env (env_cls([...]) with exactly n_envs factories) + Notes: + - n_envs is the number of rollouts *per task* (episode_index = 0..n_envs-1). + - `task` can be a single suite or a comma-separated list of suites. + - You may pass `task_ids` (list[int]) inside `gym_kwargs` to restrict tasks per suite. + """ + if env_cls is None or not callable(env_cls): + raise ValueError("env_cls must be a callable that wraps a list of environment factory callables.") + if not isinstance(n_envs, int) or n_envs <= 0: + raise ValueError(f"n_envs must be a positive int; got {n_envs}.") + + gym_kwargs = dict(gym_kwargs or {}) + task_ids_filter = gym_kwargs.pop("task_ids", None) # optional: limit to specific tasks + + camera_names = _parse_camera_names(camera_name) + suite_names = [s.strip() for s in str(task).split(",") if s.strip()] + if not suite_names: + raise ValueError("`task` must contain at least one LIBERO suite name.") + + print( + f"Creating LIBERO envs | suites={suite_names} | n_envs(per task)={n_envs} | init_states={init_states}" + ) + if task_ids_filter is not None: + print(f"Restricting to task_ids={task_ids_filter}") + + out: dict[str, dict[int, Any]] = defaultdict(dict) + for suite_name in suite_names: + suite = _get_suite(suite_name) + total = len(suite.tasks) + selected = _select_task_ids(total, task_ids_filter) + if not selected: + raise ValueError(f"No tasks selected for suite '{suite_name}' (available: {total}).") + + for tid in selected: + fns = _make_env_fns( + suite=suite, + suite_name=suite_name, + task_id=tid, + n_envs=n_envs, + camera_names=camera_names, + init_states=init_states, + gym_kwargs=gym_kwargs, + ) + out[suite_name][tid] = env_cls(fns) + print(f"Built vec env | suite={suite_name} | task_id={tid} | n_envs={n_envs}") + + # return plain dicts for predictability + return {suite: dict(task_map) for suite, task_map in out.items()} diff --git a/src/lerobot/envs/metaworld.py b/src/lerobot/envs/metaworld.py new file mode 100644 index 0000000000000000000000000000000000000000..1f64a9a3bae31a731b7cc9677ec3bb7bc7b06b69 --- /dev/null +++ b/src/lerobot/envs/metaworld.py @@ -0,0 +1,313 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +from collections import defaultdict +from collections.abc import Callable, Sequence +from pathlib import Path +from typing import Any + +import gymnasium as gym +import metaworld +import metaworld.policies as policies +import numpy as np +from gymnasium import spaces + +# ---- Load configuration data from the external JSON file ---- +CONFIG_PATH = Path(__file__).parent / "metaworld_config.json" +try: + with open(CONFIG_PATH) as f: + data = json.load(f) +except FileNotFoundError as err: + raise FileNotFoundError( + "Could not find 'metaworld_config.json'. " + "Please ensure the configuration file is in the same directory as the script." + ) from err +except json.JSONDecodeError as err: + raise ValueError( + "Failed to decode 'metaworld_config.json'. Please ensure it is a valid JSON file." + ) from err + +# ---- Process the loaded data ---- + +# extract and type-check top-level dicts +task_descriptions_obj = data.get("TASK_DESCRIPTIONS") +if not isinstance(task_descriptions_obj, dict): + raise TypeError("Expected TASK_DESCRIPTIONS to be a dict[str, str]") +TASK_DESCRIPTIONS: dict[str, str] = task_descriptions_obj + +task_name_to_id_obj = data.get("TASK_NAME_TO_ID") +if not isinstance(task_name_to_id_obj, dict): + raise TypeError("Expected TASK_NAME_TO_ID to be a dict[str, int]") +TASK_NAME_TO_ID: dict[str, int] = task_name_to_id_obj + +# difficulty -> tasks mapping +difficulty_to_tasks = data.get("DIFFICULTY_TO_TASKS") +if not isinstance(difficulty_to_tasks, dict): + raise TypeError("Expected 'DIFFICULTY_TO_TASKS' to be a dict[str, list[str]]") +DIFFICULTY_TO_TASKS: dict[str, list[str]] = difficulty_to_tasks + +# convert policy strings -> actual policy classes +task_policy_mapping = data.get("TASK_POLICY_MAPPING") +if not isinstance(task_policy_mapping, dict): + raise TypeError("Expected 'TASK_POLICY_MAPPING' to be a dict[str, str]") +TASK_POLICY_MAPPING: dict[str, Any] = { + task_name: getattr(policies, policy_class_name) + for task_name, policy_class_name in task_policy_mapping.items() +} +ACTION_DIM = 4 +OBS_DIM = 4 + + +class MetaworldEnv(gym.Env): + metadata = {"render_modes": ["rgb_array"], "render_fps": 80} + + def __init__( + self, + task, + camera_name="corner2", + obs_type="pixels", + render_mode="rgb_array", + observation_width=480, + observation_height=480, + visualization_width=640, + visualization_height=480, + ): + super().__init__() + self.task = task.replace("metaworld-", "") + self.obs_type = obs_type + self.render_mode = render_mode + self.observation_width = observation_width + self.observation_height = observation_height + self.visualization_width = visualization_width + self.visualization_height = visualization_height + self.camera_name = camera_name + + self._env = self._make_envs_task(self.task) + self._max_episode_steps = self._env.max_path_length + self.task_description = TASK_DESCRIPTIONS[self.task] + + self.expert_policy = TASK_POLICY_MAPPING[self.task]() + + if self.obs_type == "state": + raise NotImplementedError() + elif self.obs_type == "pixels": + self.observation_space = spaces.Dict( + { + "pixels": spaces.Box( + low=0, + high=255, + shape=(self.observation_height, self.observation_width, 3), + dtype=np.uint8, + ) + } + ) + elif self.obs_type == "pixels_agent_pos": + self.observation_space = spaces.Dict( + { + "pixels": spaces.Box( + low=0, + high=255, + shape=(self.observation_height, self.observation_width, 3), + dtype=np.uint8, + ), + "agent_pos": spaces.Box( + low=-1000.0, + high=1000.0, + shape=(OBS_DIM,), + dtype=np.float64, + ), + } + ) + + self.action_space = spaces.Box(low=-1, high=1, shape=(ACTION_DIM,), dtype=np.float32) + + def render(self) -> np.ndarray: + """ + Render the current environment frame. + + Returns: + np.ndarray: The rendered RGB image from the environment. + """ + image = self._env.render() + if self.camera_name == "corner2": + # Images from this camera are flipped — correct them + image = np.flip(image, (0, 1)) + return image + + def _make_envs_task(self, env_name: str): + mt1 = metaworld.MT1(env_name, seed=42) + env = mt1.train_classes[env_name](render_mode="rgb_array", camera_name=self.camera_name) + env.set_task(mt1.train_tasks[0]) + if self.camera_name == "corner2": + env.model.cam_pos[2] = [ + 0.75, + 0.075, + 0.7, + ] # corner2 position, similar to https://arxiv.org/pdf/2206.14244 + env.reset() + env._freeze_rand_vec = False # otherwise no randomization + return env + + def _format_raw_obs(self, raw_obs: np.ndarray) -> dict[str, Any]: + image = None + if self._env is not None: + image = self._env.render() + if self.camera_name == "corner2": + # NOTE: The "corner2" camera in MetaWorld environments outputs images with both axes inverted. + image = np.flip(image, (0, 1)) + agent_pos = raw_obs[:4] + if self.obs_type == "state": + raise NotImplementedError( + "'state' obs_type not implemented for MetaWorld. Use pixel modes instead." + ) + + elif self.obs_type in ("pixels", "pixels_agent_pos"): + assert image is not None, ( + "Expected `image` to be rendered before constructing pixel-based observations. " + "This likely means `env.render()` returned None or the environment was not provided." + ) + + if self.obs_type == "pixels": + obs = {"pixels": image.copy()} + + else: # pixels_agent_pos + obs = { + "pixels": image.copy(), + "agent_pos": agent_pos, + } + else: + raise ValueError(f"Unknown obs_type: {self.obs_type}") + return obs + + def reset( + self, + seed: int | None = None, + **kwargs, + ) -> tuple[dict[str, Any], dict[str, Any]]: + """ + Reset the environment to its initial state. + + Args: + seed (Optional[int]): Random seed for environment initialization. + + Returns: + observation (Dict[str, Any]): The initial formatted observation. + info (Dict[str, Any]): Additional info about the reset state. + """ + super().reset(seed=seed) + + raw_obs, info = self._env.reset(seed=seed) + + observation = self._format_raw_obs(raw_obs) + + info = {"is_success": False} + return observation, info + + def step(self, action: np.ndarray) -> tuple[dict[str, Any], float, bool, bool, dict[str, Any]]: + """ + Perform one environment step. + + Args: + action (np.ndarray): The action to execute, must be 1-D with shape (action_dim,). + + Returns: + observation (Dict[str, Any]): The formatted observation after the step. + reward (float): The scalar reward for this step. + terminated (bool): Whether the episode terminated successfully. + truncated (bool): Whether the episode was truncated due to a time limit. + info (Dict[str, Any]): Additional environment info. + """ + if action.ndim != 1: + raise ValueError( + f"Expected action to be 1-D (shape (action_dim,)), " + f"but got shape {action.shape} with ndim={action.ndim}" + ) + raw_obs, reward, done, truncated, info = self._env.step(action) + + # Determine whether the task was successful + is_success = bool(info.get("success", 0)) + terminated = done or is_success + info.update( + { + "task": self.task, + "done": done, + "is_success": is_success, + } + ) + + # Format the raw observation into the expected structure + observation = self._format_raw_obs(raw_obs) + if terminated: + info["final_info"] = { + "task": self.task, + "done": bool(done), + "is_success": bool(is_success), + } + self.reset() + + return observation, reward, terminated, truncated, info + + def close(self): + self._env.close() + + +# ---- Main API ---------------------------------------------------------------- + + +def create_metaworld_envs( + task: str, + n_envs: int, + gym_kwargs: dict[str, Any] | None = None, + env_cls: Callable[[Sequence[Callable[[], Any]]], Any] | None = None, +) -> dict[str, dict[int, Any]]: + """ + Create vectorized Meta-World environments with a consistent return shape. + + Returns: + dict[task_group][task_id] -> vec_env (env_cls([...]) with exactly n_envs factories) + Notes: + - n_envs is the number of rollouts *per task* (episode_index = 0..n_envs-1). + - `task` can be a single difficulty group (e.g., "easy", "medium", "hard") or a comma-separated list. + - If a task name is not in DIFFICULTY_TO_TASKS, we treat it as a single custom task. + """ + if env_cls is None or not callable(env_cls): + raise ValueError("env_cls must be a callable that wraps a list of environment factory callables.") + if not isinstance(n_envs, int) or n_envs <= 0: + raise ValueError(f"n_envs must be a positive int; got {n_envs}.") + + gym_kwargs = dict(gym_kwargs or {}) + task_groups = [t.strip() for t in task.split(",") if t.strip()] + if not task_groups: + raise ValueError("`task` must contain at least one Meta-World task or difficulty group.") + + print(f"Creating Meta-World envs | task_groups={task_groups} | n_envs(per task)={n_envs}") + + out: dict[str, dict[int, Any]] = defaultdict(dict) + + for group in task_groups: + # if not in difficulty presets, treat it as a single custom task + tasks = DIFFICULTY_TO_TASKS.get(group, [group]) + + for tid, task_name in enumerate(tasks): + print(f"Building vec env | group={group} | task_id={tid} | task={task_name}") + + # build n_envs factories + fns = [(lambda tn=task_name: MetaworldEnv(task=tn, **gym_kwargs)) for _ in range(n_envs)] + + out[group][tid] = env_cls(fns) + + # return a plain dict for consistency + return {group: dict(task_map) for group, task_map in out.items()} diff --git a/src/lerobot/envs/metaworld_config.json b/src/lerobot/envs/metaworld_config.json new file mode 100644 index 0000000000000000000000000000000000000000..716df877eb889627d7c914f5698476eaf004236b --- /dev/null +++ b/src/lerobot/envs/metaworld_config.json @@ -0,0 +1,121 @@ +{ + "TASK_DESCRIPTIONS": { + "assembly-v3": "Pick up a nut and place it onto a peg", + "basketball-v3": "Dunk the basketball into the basket", + "bin-picking-v3": "Grasp the puck from one bin and place it into another bin", + "box-close-v3": "Grasp the cover and close the box with it", + "button-press-topdown-v3": "Press a button from the top", + "button-press-topdown-wall-v3": "Bypass a wall and press a button from the top", + "button-press-v3": "Press a button", + "button-press-wall-v3": "Bypass a wall and press a button", + "coffee-button-v3": "Push a button on the coffee machine", + "coffee-pull-v3": "Pull a mug from a coffee machine", + "coffee-push-v3": "Push a mug under a coffee machine", + "dial-turn-v3": "Rotate a dial 180 degrees", + "disassemble-v3": "Pick a nut out of a peg", + "door-close-v3": "Close a door with a revolving joint", + "door-lock-v3": "Lock the door by rotating the lock clockwise", + "door-open-v3": "Open a door with a revolving joint", + "door-unlock-v3": "Unlock the door by rotating the lock counter-clockwise", + "hand-insert-v3": "Insert the gripper into a hole", + "drawer-close-v3": "Push and close a drawer", + "drawer-open-v3": "Open a drawer", + "faucet-open-v3": "Rotate the faucet counter-clockwise", + "faucet-close-v3": "Rotate the faucet clockwise", + "hammer-v3": "Hammer a screw on the wall", + "handle-press-side-v3": "Press a handle down sideways", + "handle-press-v3": "Press a handle down", + "handle-pull-side-v3": "Pull a handle up sideways", + "handle-pull-v3": "Pull a handle up", + "lever-pull-v3": "Pull a lever down 90 degrees", + "peg-insert-side-v3": "Insert a peg sideways", + "pick-place-wall-v3": "Pick a puck, bypass a wall and place the puck", + "pick-out-of-hole-v3": "Pick up a puck from a hole", + "reach-v3": "Reach a goal position", + "push-back-v3": "Push the puck to a goal", + "push-v3": "Push the puck to a goal", + "pick-place-v3": "Pick and place a puck to a goal", + "plate-slide-v3": "Slide a plate into a cabinet", + "plate-slide-side-v3": "Slide a plate into a cabinet sideways", + "plate-slide-back-v3": "Get a plate from the cabinet", + "plate-slide-back-side-v3": "Get a plate from the cabinet sideways", + "peg-unplug-side-v3": "Unplug a peg sideways", + "soccer-v3": "Kick a soccer into the goal", + "stick-push-v3": "Grasp a stick and push a box using the stick", + "stick-pull-v3": "Grasp a stick and pull a box with the stick", + "push-wall-v3": "Bypass a wall and push a puck to a goal", + "reach-wall-v3": "Bypass a wall and reach a goal", + "shelf-place-v3": "Pick and place a puck onto a shelf", + "sweep-into-v3": "Sweep a puck into a hole", + "sweep-v3": "Sweep a puck off the table", + "window-open-v3": "Push and open a window", + "window-close-v3": "Push and close a window" + }, + "TASK_NAME_TO_ID": { + "assembly-v3": 0, "basketball-v3": 1, "bin-picking-v3": 2, "box-close-v3": 3, + "button-press-topdown-v3": 4, "button-press-topdown-wall-v3": 5, "button-press-v3": 6, + "button-press-wall-v3": 7, "coffee-button-v3": 8, "coffee-pull-v3": 9, "coffee-push-v3": 10, + "dial-turn-v3": 11, "disassemble-v3": 12, "door-close-v3": 13, "door-lock-v3": 14, + "door-open-v3": 15, "door-unlock-v3": 16, "drawer-close-v3": 17, "drawer-open-v3": 18, + "faucet-close-v3": 19, "faucet-open-v3": 20, "hammer-v3": 21, "hand-insert-v3": 22, + "handle-press-side-v3": 23, "handle-press-v3": 24, "handle-pull-side-v3": 25, + "handle-pull-v3": 26, "lever-pull-v3": 27, "peg-insert-side-v3": 28, "peg-unplug-side-v3": 29, + "pick-out-of-hole-v3": 30, "pick-place-v3": 31, "pick-place-wall-v3": 32, + "plate-slide-back-side-v3": 33, "plate-slide-back-v3": 34, "plate-slide-side-v3": 35, + "plate-slide-v3": 36, "push-back-v3": 37, "push-v3": 38, "push-wall-v3": 39, "reach-v3": 40, + "reach-wall-v3": 41, "shelf-place-v3": 42, "soccer-v3": 43, "stick-pull-v3": 44, + "stick-push-v3": 45, "sweep-into-v3": 46, "sweep-v3": 47, "window-open-v3": 48, + "window-close-v3": 49 + }, + "DIFFICULTY_TO_TASKS": { + "easy": [ + "button-press-v3", "button-press-topdown-v3", "button-press-topdown-wall-v3", + "button-press-wall-v3", "coffee-button-v3", "dial-turn-v3", "door-close-v3", + "door-lock-v3", "door-open-v3", "door-unlock-v3", "drawer-close-v3", "drawer-open-v3", + "faucet-close-v3", "faucet-open-v3", "handle-press-v3", "handle-press-side-v3", + "handle-pull-v3", "handle-pull-side-v3", "lever-pull-v3", "plate-slide-v3", + "plate-slide-back-v3", "plate-slide-back-side-v3", "plate-slide-side-v3", "reach-v3", + "reach-wall-v3", "window-close-v3", "window-open-v3", "peg-unplug-side-v3" + ], + "medium": [ + "basketball-v3", "bin-picking-v3", "box-close-v3", "coffee-pull-v3", "coffee-push-v3", + "hammer-v3", "peg-insert-side-v3", "push-wall-v3", "soccer-v3", "sweep-v3", "sweep-into-v3" + ], + "hard": [ + "assembly-v3", "hand-insert-v3", "pick-out-of-hole-v3", "pick-place-v3", "push-v3", "push-back-v3" + ], + "very_hard": [ + "shelf-place-v3", "disassemble-v3", "stick-pull-v3", "stick-push-v3", "pick-place-wall-v3" + ] + }, + "TASK_POLICY_MAPPING": { + "assembly-v3": "SawyerAssemblyV3Policy", "basketball-v3": "SawyerBasketballV3Policy", + "bin-picking-v3": "SawyerBinPickingV3Policy", "box-close-v3": "SawyerBoxCloseV3Policy", + "button-press-topdown-v3": "SawyerButtonPressTopdownV3Policy", + "button-press-topdown-wall-v3": "SawyerButtonPressTopdownWallV3Policy", + "button-press-v3": "SawyerButtonPressV3Policy", "button-press-wall-v3": "SawyerButtonPressWallV3Policy", + "coffee-button-v3": "SawyerCoffeeButtonV3Policy", "coffee-pull-v3": "SawyerCoffeePullV3Policy", + "coffee-push-v3": "SawyerCoffeePushV3Policy", "dial-turn-v3": "SawyerDialTurnV3Policy", + "disassemble-v3": "SawyerDisassembleV3Policy", "door-close-v3": "SawyerDoorCloseV3Policy", + "door-lock-v3": "SawyerDoorLockV3Policy", "door-open-v3": "SawyerDoorOpenV3Policy", + "door-unlock-v3": "SawyerDoorUnlockV3Policy", "drawer-close-v3": "SawyerDrawerCloseV3Policy", + "drawer-open-v3": "SawyerDrawerOpenV3Policy", "faucet-close-v3": "SawyerFaucetCloseV3Policy", + "faucet-open-v3": "SawyerFaucetOpenV3Policy", "hammer-v3": "SawyerHammerV3Policy", + "hand-insert-v3": "SawyerHandInsertV3Policy", "handle-press-side-v3": "SawyerHandlePressSideV3Policy", + "handle-press-v3": "SawyerHandlePressV3Policy", "handle-pull-side-v3": "SawyerHandlePullSideV3Policy", + "handle-pull-v3": "SawyerHandlePullV3Policy", "lever-pull-v3": "SawyerLeverPullV3Policy", + "peg-insert-side-v3": "SawyerPegInsertionSideV3Policy", "peg-unplug-side-v3": "SawyerPegUnplugSideV3Policy", + "pick-out-of-hole-v3": "SawyerPickOutOfHoleV3Policy", "pick-place-v3": "SawyerPickPlaceV3Policy", + "pick-place-wall-v3": "SawyerPickPlaceWallV3Policy", + "plate-slide-back-side-v3": "SawyerPlateSlideBackSideV3Policy", + "plate-slide-back-v3": "SawyerPlateSlideBackV3Policy", + "plate-slide-side-v3": "SawyerPlateSlideSideV3Policy", "plate-slide-v3": "SawyerPlateSlideV3Policy", + "push-back-v3": "SawyerPushBackV3Policy", "push-v3": "SawyerPushV3Policy", + "push-wall-v3": "SawyerPushWallV3Policy", "reach-v3": "SawyerReachV3Policy", + "reach-wall-v3": "SawyerReachWallV3Policy", "shelf-place-v3": "SawyerShelfPlaceV3Policy", + "soccer-v3": "SawyerSoccerV3Policy", "stick-pull-v3": "SawyerStickPullV3Policy", + "stick-push-v3": "SawyerStickPushV3Policy", "sweep-into-v3": "SawyerSweepIntoV3Policy", + "sweep-v3": "SawyerSweepV3Policy", "window-open-v3": "SawyerWindowOpenV3Policy", + "window-close-v3": "SawyerWindowCloseV3Policy" + } +} diff --git a/src/lerobot/envs/utils.py b/src/lerobot/envs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ea89b5dd7b8458ed92de5ef4a8f0b699a0a9fb2b --- /dev/null +++ b/src/lerobot/envs/utils.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib.util +import os +import warnings +from collections.abc import Mapping, Sequence +from functools import singledispatch +from typing import Any + +import einops +import gymnasium as gym +import numpy as np +import torch +from huggingface_hub import hf_hub_download, snapshot_download +from torch import Tensor + +from lerobot.configs.types import FeatureType, PolicyFeature +from lerobot.envs.configs import EnvConfig +from lerobot.utils.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE, OBS_STR +from lerobot.utils.utils import get_channel_first_image_shape + + +def _convert_nested_dict(d): + result = {} + for k, v in d.items(): + if isinstance(v, dict): + result[k] = _convert_nested_dict(v) + elif isinstance(v, np.ndarray): + result[k] = torch.from_numpy(v) + else: + result[k] = v + return result + + +def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Tensor]: + # TODO(aliberts, rcadene): refactor this to use features from the environment (no hardcoding) + """Convert environment observation to LeRobot format observation. + Args: + observation: Dictionary of observation batches from a Gym vector environment. + Returns: + Dictionary of observation batches with keys renamed to LeRobot format and values as tensors. + """ + # map to expected inputs for the policy + return_observations = {} + if "pixels" in observations: + if isinstance(observations["pixels"], dict): + imgs = {f"{OBS_IMAGES}.{key}": img for key, img in observations["pixels"].items()} + else: + imgs = {OBS_IMAGE: observations["pixels"]} + + for imgkey, img in imgs.items(): + # TODO(aliberts, rcadene): use transforms.ToTensor()? + img_tensor = torch.from_numpy(img) + + # When preprocessing observations in a non-vectorized environment, we need to add a batch dimension. + # This is the case for human-in-the-loop RL where there is only one environment. + if img_tensor.ndim == 3: + img_tensor = img_tensor.unsqueeze(0) + # sanity check that images are channel last + _, h, w, c = img_tensor.shape + assert c < h and c < w, f"expect channel last images, but instead got {img_tensor.shape=}" + + # sanity check that images are uint8 + assert img_tensor.dtype == torch.uint8, f"expect torch.uint8, but instead {img_tensor.dtype=}" + + # convert to channel first of type float32 in range [0,1] + img_tensor = einops.rearrange(img_tensor, "b h w c -> b c h w").contiguous() + img_tensor = img_tensor.type(torch.float32) + img_tensor /= 255 + + return_observations[imgkey] = img_tensor + + if "environment_state" in observations: + env_state = torch.from_numpy(observations["environment_state"]).float() + if env_state.dim() == 1: + env_state = env_state.unsqueeze(0) + + return_observations[OBS_ENV_STATE] = env_state + + if "agent_pos" in observations: + agent_pos = torch.from_numpy(observations["agent_pos"]).float() + if agent_pos.dim() == 1: + agent_pos = agent_pos.unsqueeze(0) + return_observations[OBS_STATE] = agent_pos + + if "robot_state" in observations: + return_observations[f"{OBS_STR}.robot_state"] = _convert_nested_dict(observations["robot_state"]) + return return_observations + + +def env_to_policy_features(env_cfg: EnvConfig) -> dict[str, PolicyFeature]: + # TODO(aliberts, rcadene): remove this hardcoding of keys and just use the nested keys as is + # (need to also refactor preprocess_observation and externalize normalization from policies) + policy_features = {} + for key, ft in env_cfg.features.items(): + if ft.type is FeatureType.VISUAL: + if len(ft.shape) != 3: + raise ValueError(f"Number of dimensions of {key} != 3 (shape={ft.shape})") + + shape = get_channel_first_image_shape(ft.shape) + feature = PolicyFeature(type=ft.type, shape=shape) + else: + feature = ft + + policy_key = env_cfg.features_map[key] + policy_features[policy_key] = feature + + return policy_features + + +def are_all_envs_same_type(env: gym.vector.VectorEnv) -> bool: + first_type = type(env.envs[0]) # Get type of first env + return all(type(e) is first_type for e in env.envs) # Fast type check + + +def check_env_attributes_and_types(env: gym.vector.VectorEnv) -> None: + with warnings.catch_warnings(): + warnings.simplefilter("once", UserWarning) # Apply filter only in this function + + if not (hasattr(env.envs[0], "task_description") and hasattr(env.envs[0], "task")): + warnings.warn( + "The environment does not have 'task_description' and 'task'. Some policies require these features.", + UserWarning, + stacklevel=2, + ) + if not are_all_envs_same_type(env): + warnings.warn( + "The environments have different types. Make sure you infer the right task from each environment. Empty task will be passed instead.", + UserWarning, + stacklevel=2, + ) + + +def add_envs_task(env: gym.vector.VectorEnv, observation: dict[str, Any]) -> dict[str, Any]: + """Adds task feature to the observation dict with respect to the first environment attribute.""" + if hasattr(env.envs[0], "task_description"): + task_result = env.call("task_description") + + if isinstance(task_result, tuple): + task_result = list(task_result) + + if not isinstance(task_result, list): + raise TypeError(f"Expected task_description to return a list, got {type(task_result)}") + if not all(isinstance(item, str) for item in task_result): + raise TypeError("All items in task_description result must be strings") + + observation["task"] = task_result + elif hasattr(env.envs[0], "task"): + task_result = env.call("task") + + if isinstance(task_result, tuple): + task_result = list(task_result) + + if not isinstance(task_result, list): + raise TypeError(f"Expected task to return a list, got {type(task_result)}") + if not all(isinstance(item, str) for item in task_result): + raise TypeError("All items in task result must be strings") + + observation["task"] = task_result + else: # For envs without language instructions, e.g. aloha transfer cube and etc. + num_envs = observation[list(observation.keys())[0]].shape[0] + observation["task"] = ["" for _ in range(num_envs)] + return observation + + +def _close_single_env(env: Any) -> None: + try: + env.close() + except Exception as exc: + print(f"Exception while closing env {env}: {exc}") + + +@singledispatch +def close_envs(obj: Any) -> None: + """Default: raise if the type is not recognized.""" + raise NotImplementedError(f"close_envs not implemented for type {type(obj).__name__}") + + +@close_envs.register +def _(env: Mapping) -> None: + for v in env.values(): + if isinstance(v, Mapping): + close_envs(v) + elif hasattr(v, "close"): + _close_single_env(v) + + +@close_envs.register +def _(envs: Sequence) -> None: + if isinstance(envs, (str | bytes)): + return + for v in envs: + if isinstance(v, Mapping) or isinstance(v, Sequence) and not isinstance(v, (str | bytes)): + close_envs(v) + elif hasattr(v, "close"): + _close_single_env(v) + + +@close_envs.register +def _(env: gym.Env) -> None: + _close_single_env(env) + + +# helper to safely load a python file as a module +def _load_module_from_path(path: str, module_name: str | None = None): + module_name = module_name or f"hub_env_{os.path.basename(path).replace('.', '_')}" + spec = importlib.util.spec_from_file_location(module_name, path) + if spec is None: + raise ImportError(f"Could not load module spec for {module_name} from {path}") + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) # type: ignore + return module + + +# helper to parse hub string (supports "user/repo", "user/repo@rev", optional path) +# examples: +# "user/repo" -> will look for env.py at repo root +# "user/repo@main:envs/my_env.py" -> explicit revision and path +def _parse_hub_url(hub_uri: str): + # very small parser: [repo_id][@revision][:path] + # repo_id is required (user/repo or org/repo) + revision = None + file_path = "env.py" + if "@" in hub_uri: + repo_and_rev, *rest = hub_uri.split(":", 1) + repo_id, rev = repo_and_rev.split("@", 1) + revision = rev + if rest: + file_path = rest[0] + else: + repo_id, *rest = hub_uri.split(":", 1) + if rest: + file_path = rest[0] + return repo_id, revision, file_path + + +def _download_hub_file( + cfg_str: str, + trust_remote_code: bool, + hub_cache_dir: str | None, +) -> tuple[str, str, str, str]: + """ + Parse `cfg_str` (hub URL), enforce `trust_remote_code`, and return + (repo_id, file_path, local_file, revision). + """ + if not trust_remote_code: + raise RuntimeError( + f"Refusing to execute remote code from the Hub for '{cfg_str}'. " + "Executing hub env modules runs arbitrary Python code from third-party repositories. " + "If you trust this repo and understand the risks, call `make_env(..., trust_remote_code=True)` " + "and prefer pinning to a specific revision: 'user/repo@:env.py'." + ) + + repo_id, revision, file_path = _parse_hub_url(cfg_str) + + try: + local_file = hf_hub_download( + repo_id=repo_id, filename=file_path, revision=revision, cache_dir=hub_cache_dir + ) + except Exception as e: + # fallback to snapshot download + snapshot_dir = snapshot_download(repo_id=repo_id, revision=revision, cache_dir=hub_cache_dir) + local_file = os.path.join(snapshot_dir, file_path) + if not os.path.exists(local_file): + raise FileNotFoundError( + f"Could not find {file_path} in repository {repo_id}@{revision or 'main'}" + ) from e + + return repo_id, file_path, local_file, revision + + +def _import_hub_module(local_file: str, repo_id: str) -> Any: + """ + Import the downloaded file as a module and surface helpful import error messages. + """ + module_name = f"hub_env_{repo_id.replace('/', '_')}" + try: + module = _load_module_from_path(local_file, module_name=module_name) + except ModuleNotFoundError as e: + missing = getattr(e, "name", None) or str(e) + raise ModuleNotFoundError( + f"Hub env '{repo_id}:{os.path.basename(local_file)}' failed to import because the dependency " + f"'{missing}' is not installed locally.\n\n" + ) from e + except ImportError as e: + raise ImportError( + f"Failed to load hub env module '{repo_id}:{os.path.basename(local_file)}'. Import error: {e}\n\n" + ) from e + return module + + +def _call_make_env(module: Any, n_envs: int, use_async_envs: bool) -> Any: + """ + Ensure module exposes make_env and call it. + """ + if not hasattr(module, "make_env"): + raise AttributeError( + f"The hub module {getattr(module, '__name__', 'hub_module')} must expose `make_env(n_envs=int, use_async_envs=bool)`." + ) + entry_fn = module.make_env + return entry_fn(n_envs=n_envs, use_async_envs=use_async_envs) + + +def _normalize_hub_result(result: Any) -> dict[str, dict[int, gym.vector.VectorEnv]]: + """ + Normalize possible return types from hub `make_env` into the mapping: + { suite_name: { task_id: vector_env } } + Accepts: + - dict (assumed already correct) + - gym.vector.VectorEnv + - gym.Env (will be wrapped into SyncVectorEnv) + """ + if isinstance(result, dict): + return result + + # VectorEnv: use its spec.id if available + if isinstance(result, gym.vector.VectorEnv): + suite_name = getattr(result, "spec", None) and getattr(result.spec, "id", None) or "hub_env" + return {suite_name: {0: result}} + + # Single Env: wrap into SyncVectorEnv + if isinstance(result, gym.Env): + vec = gym.vector.SyncVectorEnv([lambda: result]) + suite_name = getattr(result, "spec", None) and getattr(result.spec, "id", None) or "hub_env" + return {suite_name: {0: vec}} + + raise ValueError( + "Hub `make_env` must return either a mapping {suite: {task_id: vec_env}}, " + "a gym.vector.VectorEnv, or a single gym.Env." + ) diff --git a/src/lerobot/model/kinematics.py b/src/lerobot/model/kinematics.py new file mode 100644 index 0000000000000000000000000000000000000000..d5e1a9e941b332bf212ea254b50257f9dbbb340e --- /dev/null +++ b/src/lerobot/model/kinematics.py @@ -0,0 +1,132 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + + +class RobotKinematics: + """Robot kinematics using placo library for forward and inverse kinematics.""" + + def __init__( + self, + urdf_path: str, + target_frame_name: str = "gripper_frame_link", + joint_names: list[str] | None = None, + ): + """ + Initialize placo-based kinematics solver. + + Args: + urdf_path (str): Path to the robot URDF file + target_frame_name (str): Name of the end-effector frame in the URDF + joint_names (list[str] | None): List of joint names to use for the kinematics solver + """ + try: + import placo # type: ignore[import-not-found] # C++ library with Python bindings, no type stubs available. TODO: Create stub file or request upstream typing support. + except ImportError as e: + raise ImportError( + "placo is required for RobotKinematics. " + "Please install the optional dependencies of `kinematics` in the package." + ) from e + + self.robot = placo.RobotWrapper(urdf_path) + self.solver = placo.KinematicsSolver(self.robot) + self.solver.mask_fbase(True) # Fix the base + + self.target_frame_name = target_frame_name + + # Set joint names + self.joint_names = list(self.robot.joint_names()) if joint_names is None else joint_names + + # Initialize frame task for IK + self.tip_frame = self.solver.add_frame_task(self.target_frame_name, np.eye(4)) + + def forward_kinematics(self, joint_pos_deg: np.ndarray) -> np.ndarray: + """ + Compute forward kinematics for given joint configuration given the target frame name in the constructor. + + Args: + joint_pos_deg: Joint positions in degrees (numpy array) + + Returns: + 4x4 transformation matrix of the end-effector pose + """ + + # Convert degrees to radians + joint_pos_rad = np.deg2rad(joint_pos_deg[: len(self.joint_names)]) + + # Update joint positions in placo robot + for i, joint_name in enumerate(self.joint_names): + self.robot.set_joint(joint_name, joint_pos_rad[i]) + + # Update kinematics + self.robot.update_kinematics() + + # Get the transformation matrix + return self.robot.get_T_world_frame(self.target_frame_name) + + def inverse_kinematics( + self, + current_joint_pos: np.ndarray, + desired_ee_pose: np.ndarray, + position_weight: float = 1.0, + orientation_weight: float = 0.01, + ) -> np.ndarray: + """ + Compute inverse kinematics using placo solver. + + Args: + current_joint_pos: Current joint positions in degrees (used as initial guess) + desired_ee_pose: Target end-effector pose as a 4x4 transformation matrix + position_weight: Weight for position constraint in IK + orientation_weight: Weight for orientation constraint in IK, set to 0.0 to only constrain position + + Returns: + Joint positions in degrees that achieve the desired end-effector pose + """ + + # Convert current joint positions to radians for initial guess + current_joint_rad = np.deg2rad(current_joint_pos[: len(self.joint_names)]) + + # Set current joint positions as initial guess + for i, joint_name in enumerate(self.joint_names): + self.robot.set_joint(joint_name, current_joint_rad[i]) + + # Update the target pose for the frame task + self.tip_frame.T_world_frame = desired_ee_pose + + # Configure the task based on position_only flag + self.tip_frame.configure(self.target_frame_name, "soft", position_weight, orientation_weight) + + # Solve IK + self.solver.solve(True) + self.robot.update_kinematics() + + # Extract joint positions + joint_pos_rad = [] + for joint_name in self.joint_names: + joint = self.robot.get_joint(joint_name) + joint_pos_rad.append(joint) + + # Convert back to degrees + joint_pos_deg = np.rad2deg(joint_pos_rad) + + # Preserve gripper position if present in current_joint_pos + if len(current_joint_pos) > len(self.joint_names): + result = np.zeros_like(current_joint_pos) + result[: len(self.joint_names)] = joint_pos_deg + result[len(self.joint_names) :] = current_joint_pos[len(self.joint_names) :] + return result + else: + return joint_pos_deg diff --git a/src/lerobot/motors/__init__.py b/src/lerobot/motors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..233b6c6b4e008b8c4d9602870dc6c393e8e02335 --- /dev/null +++ b/src/lerobot/motors/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .motors_bus import Motor, MotorCalibration, MotorNormMode, MotorsBus diff --git a/src/lerobot/motors/calibration_gui.py b/src/lerobot/motors/calibration_gui.py new file mode 100644 index 0000000000000000000000000000000000000000..97525229b4e66a2a0fc9b580622e2a83ab2826b8 --- /dev/null +++ b/src/lerobot/motors/calibration_gui.py @@ -0,0 +1,401 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import os +from dataclasses import dataclass + +os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1" + +from lerobot.motors import MotorCalibration, MotorsBus + +BAR_LEN, BAR_THICKNESS = 450, 8 +HANDLE_R = 10 +BRACKET_W, BRACKET_H = 6, 14 +TRI_W, TRI_H = 12, 14 + +BTN_W, BTN_H = 60, 22 +SAVE_W, SAVE_H = 80, 28 +LOAD_W = 80 +DD_W, DD_H = 160, 28 + +TOP_GAP = 50 +PADDING_Y, TOP_OFFSET = 70, 60 +FONT_SIZE, FPS = 20, 60 + +BG_COLOR = (30, 30, 30) +BAR_RED, BAR_GREEN = (200, 60, 60), (60, 200, 60) +HANDLE_COLOR, TEXT_COLOR = (240, 240, 240), (250, 250, 250) +TICK_COLOR = (250, 220, 40) +BTN_COLOR, BTN_COLOR_HL = (80, 80, 80), (110, 110, 110) +DD_COLOR, DD_COLOR_HL = (70, 70, 70), (100, 100, 100) + + +def dist(a, b): + return math.hypot(a[0] - b[0], a[1] - b[1]) + + +@dataclass +class RangeValues: + min_v: int + pos_v: int + max_v: int + + +class RangeSlider: + """One motor = one slider row""" + + def __init__(self, motor, idx, res, calibration, present, label_pad, base_y): + import pygame + + self.motor = motor + self.res = res + self.x0 = 40 + label_pad + self.x1 = self.x0 + BAR_LEN + self.y = base_y + idx * PADDING_Y + + self.min_v = calibration.range_min + self.max_v = calibration.range_max + self.pos_v = max(self.min_v, min(present, self.max_v)) + + self.min_x = self._pos_from_val(self.min_v) + self.max_x = self._pos_from_val(self.max_v) + self.pos_x = self._pos_from_val(self.pos_v) + + self.min_btn = pygame.Rect(self.x0 - BTN_W - 6, self.y - BTN_H // 2, BTN_W, BTN_H) + self.max_btn = pygame.Rect(self.x1 + 6, self.y - BTN_H // 2, BTN_W, BTN_H) + + self.drag_min = self.drag_max = self.drag_pos = False + self.tick_val = present + self.font = pygame.font.Font(None, FONT_SIZE) + + def _val_from_pos(self, x): + return round((x - self.x0) / BAR_LEN * self.res) + + def _pos_from_val(self, v): + return self.x0 + (v / self.res) * BAR_LEN + + def set_tick(self, v): + self.tick_val = max(0, min(v, self.res)) + + def _triangle_hit(self, pos): + import pygame + + tri_top = self.y - BAR_THICKNESS // 2 - 2 + return pygame.Rect(self.pos_x - TRI_W // 2, tri_top - TRI_H, TRI_W, TRI_H).collidepoint(pos) + + def handle_event(self, e): + import pygame + + if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1: + if self.min_btn.collidepoint(e.pos): + self.min_x, self.min_v = self.pos_x, self.pos_v + return + if self.max_btn.collidepoint(e.pos): + self.max_x, self.max_v = self.pos_x, self.pos_v + return + if dist(e.pos, (self.min_x, self.y)) <= HANDLE_R: + self.drag_min = True + elif dist(e.pos, (self.max_x, self.y)) <= HANDLE_R: + self.drag_max = True + elif self._triangle_hit(e.pos): + self.drag_pos = True + + elif e.type == pygame.MOUSEBUTTONUP and e.button == 1: + self.drag_min = self.drag_max = self.drag_pos = False + + elif e.type == pygame.MOUSEMOTION: + x = e.pos[0] + if self.drag_min: + self.min_x = max(self.x0, min(x, self.pos_x)) + elif self.drag_max: + self.max_x = min(self.x1, max(x, self.pos_x)) + elif self.drag_pos: + self.pos_x = max(self.min_x, min(x, self.max_x)) + + self.min_v = self._val_from_pos(self.min_x) + self.max_v = self._val_from_pos(self.max_x) + self.pos_v = self._val_from_pos(self.pos_x) + + def _draw_button(self, surf, rect, text): + import pygame + + clr = BTN_COLOR_HL if rect.collidepoint(pygame.mouse.get_pos()) else BTN_COLOR + pygame.draw.rect(surf, clr, rect, border_radius=4) + t = self.font.render(text, True, TEXT_COLOR) + surf.blit(t, (rect.centerx - t.get_width() // 2, rect.centery - t.get_height() // 2)) + + def draw(self, surf): + import pygame + + # motor name above set-min button (right-aligned) + name_surf = self.font.render(self.motor, True, TEXT_COLOR) + surf.blit( + name_surf, + (self.min_btn.right - name_surf.get_width(), self.min_btn.y - name_surf.get_height() - 4), + ) + + # bar + active section + pygame.draw.rect(surf, BAR_RED, (self.x0, self.y - BAR_THICKNESS // 2, BAR_LEN, BAR_THICKNESS)) + pygame.draw.rect( + surf, BAR_GREEN, (self.min_x, self.y - BAR_THICKNESS // 2, self.max_x - self.min_x, BAR_THICKNESS) + ) + + # tick + tick_x = self._pos_from_val(self.tick_val) + pygame.draw.line( + surf, + TICK_COLOR, + (tick_x, self.y - BAR_THICKNESS // 2 - 4), + (tick_x, self.y + BAR_THICKNESS // 2 + 4), + 2, + ) + + # brackets + for x, sign in ((self.min_x, +1), (self.max_x, -1)): + pygame.draw.line( + surf, HANDLE_COLOR, (x, self.y - BRACKET_H // 2), (x, self.y + BRACKET_H // 2), 2 + ) + pygame.draw.line( + surf, + HANDLE_COLOR, + (x, self.y - BRACKET_H // 2), + (x + sign * BRACKET_W, self.y - BRACKET_H // 2), + 2, + ) + pygame.draw.line( + surf, + HANDLE_COLOR, + (x, self.y + BRACKET_H // 2), + (x + sign * BRACKET_W, self.y + BRACKET_H // 2), + 2, + ) + + # triangle ▼ + tri_top = self.y - BAR_THICKNESS // 2 - 2 + pygame.draw.polygon( + surf, + HANDLE_COLOR, + [ + (self.pos_x, tri_top), + (self.pos_x - TRI_W // 2, tri_top - TRI_H), + (self.pos_x + TRI_W // 2, tri_top - TRI_H), + ], + ) + + # numeric labels + fh = self.font.get_height() + pos_y = tri_top - TRI_H - 4 - fh + txts = [ + (self.min_v, self.min_x, self.y - BRACKET_H // 2 - 4 - fh), + (self.max_v, self.max_x, self.y - BRACKET_H // 2 - 4 - fh), + (self.pos_v, self.pos_x, pos_y), + ] + for v, x, y in txts: + s = self.font.render(str(v), True, TEXT_COLOR) + surf.blit(s, (x - s.get_width() // 2, y)) + + # buttons + self._draw_button(surf, self.min_btn, "set min") + self._draw_button(surf, self.max_btn, "set max") + + # external + def values(self) -> RangeValues: + return RangeValues(self.min_v, self.pos_v, self.max_v) + + +class RangeFinderGUI: + def __init__(self, bus: MotorsBus, groups: dict[str, list[str]] | None = None): + import pygame + + self.bus = bus + self.groups = groups if groups is not None else {"all": list(bus.motors)} + self.group_names = list(groups) + self.current_group = self.group_names[0] + + if not bus.is_connected: + bus.connect() + + self.calibration = bus.read_calibration() + self.res_table = bus.model_resolution_table + self.present_cache = { + m: bus.read("Present_Position", m, normalize=False) for motors in groups.values() for m in motors + } + + pygame.init() + self.font = pygame.font.Font(None, FONT_SIZE) + + label_pad = max(self.font.size(m)[0] for ms in groups.values() for m in ms) + self.label_pad = label_pad + width = 40 + label_pad + BAR_LEN + 6 + BTN_W + 10 + SAVE_W + 10 + self.controls_bottom = 10 + SAVE_H + self.base_y = self.controls_bottom + TOP_GAP + height = self.base_y + PADDING_Y * len(groups[self.current_group]) + 40 + + self.screen = pygame.display.set_mode((width, height)) + pygame.display.set_caption("Motors range finder") + + # ui rects + self.save_btn = pygame.Rect(width - SAVE_W - 10, 10, SAVE_W, SAVE_H) + self.load_btn = pygame.Rect(self.save_btn.left - LOAD_W - 10, 10, LOAD_W, SAVE_H) + self.dd_btn = pygame.Rect(width // 2 - DD_W // 2, 10, DD_W, DD_H) + self.dd_open = False # dropdown expanded? + + self.clock = pygame.time.Clock() + self._build_sliders() + self._adjust_height() + + def _adjust_height(self): + import pygame + + motors = self.groups[self.current_group] + new_h = self.base_y + PADDING_Y * len(motors) + 40 + if new_h != self.screen.get_height(): + w = self.screen.get_width() + self.screen = pygame.display.set_mode((w, new_h)) + + def _build_sliders(self): + self.sliders: list[RangeSlider] = [] + motors = self.groups[self.current_group] + for i, m in enumerate(motors): + self.sliders.append( + RangeSlider( + motor=m, + idx=i, + res=self.res_table[self.bus.motors[m].model] - 1, + calibration=self.calibration[m], + present=self.present_cache[m], + label_pad=self.label_pad, + base_y=self.base_y, + ) + ) + + def _draw_dropdown(self): + import pygame + + # collapsed box + hover = self.dd_btn.collidepoint(pygame.mouse.get_pos()) + pygame.draw.rect(self.screen, DD_COLOR_HL if hover else DD_COLOR, self.dd_btn, border_radius=6) + + txt = self.font.render(self.current_group, True, TEXT_COLOR) + self.screen.blit( + txt, (self.dd_btn.centerx - txt.get_width() // 2, self.dd_btn.centery - txt.get_height() // 2) + ) + + tri_w, tri_h = 12, 6 + cx = self.dd_btn.right - 14 + cy = self.dd_btn.centery + 1 + pygame.draw.polygon( + self.screen, + TEXT_COLOR, + [(cx - tri_w // 2, cy - tri_h // 2), (cx + tri_w // 2, cy - tri_h // 2), (cx, cy + tri_h // 2)], + ) + + if not self.dd_open: + return + + # expanded list + for i, name in enumerate(self.group_names): + item_rect = pygame.Rect(self.dd_btn.left, self.dd_btn.bottom + i * DD_H, DD_W, DD_H) + clr = DD_COLOR_HL if item_rect.collidepoint(pygame.mouse.get_pos()) else DD_COLOR + pygame.draw.rect(self.screen, clr, item_rect) + t = self.font.render(name, True, TEXT_COLOR) + self.screen.blit( + t, (item_rect.centerx - t.get_width() // 2, item_rect.centery - t.get_height() // 2) + ) + + def _handle_dropdown_event(self, e): + import pygame + + if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1: + if self.dd_btn.collidepoint(e.pos): + self.dd_open = not self.dd_open + return True + if self.dd_open: + for i, name in enumerate(self.group_names): + item_rect = pygame.Rect(self.dd_btn.left, self.dd_btn.bottom + i * DD_H, DD_W, DD_H) + if item_rect.collidepoint(e.pos): + if name != self.current_group: + self.current_group = name + self._build_sliders() + self._adjust_height() + self.dd_open = False + return True + self.dd_open = False + return False + + def _save_current(self): + for s in self.sliders: + self.calibration[s.motor].range_min = s.min_v + self.calibration[s.motor].range_max = s.max_v + + with self.bus.torque_disabled(): + self.bus.write_calibration(self.calibration) + + def _load_current(self): + self.calibration = self.bus.read_calibration() + for s in self.sliders: + s.min_v = self.calibration[s.motor].range_min + s.max_v = self.calibration[s.motor].range_max + s.min_x = s._pos_from_val(s.min_v) + s.max_x = s._pos_from_val(s.max_v) + + def run(self) -> dict[str, MotorCalibration]: + import pygame + + while True: + for e in pygame.event.get(): + if e.type == pygame.QUIT: + pygame.quit() + return self.calibration + + if self._handle_dropdown_event(e): + continue + + if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1: + if self.save_btn.collidepoint(e.pos): + self._save_current() + elif self.load_btn.collidepoint(e.pos): + self._load_current() + + for s in self.sliders: + s.handle_event(e) + + # live goal write while dragging + for s in self.sliders: + if s.drag_pos: + self.bus.write("Goal_Position", s.motor, s.pos_v, normalize=False) + + # tick update + for s in self.sliders: + pos = self.bus.read("Present_Position", s.motor, normalize=False) + s.set_tick(pos) + self.present_cache[s.motor] = pos + + # ─ drawing + self.screen.fill(BG_COLOR) + for s in self.sliders: + s.draw(self.screen) + + self._draw_dropdown() + + # load / save buttons + for rect, text in ((self.load_btn, "LOAD"), (self.save_btn, "SAVE")): + clr = BTN_COLOR_HL if rect.collidepoint(pygame.mouse.get_pos()) else BTN_COLOR + pygame.draw.rect(self.screen, clr, rect, border_radius=6) + t = self.font.render(text, True, TEXT_COLOR) + self.screen.blit(t, (rect.centerx - t.get_width() // 2, rect.centery - t.get_height() // 2)) + + pygame.display.flip() + self.clock.tick(FPS) diff --git a/src/lerobot/motors/dynamixel/__init__.py b/src/lerobot/motors/dynamixel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..38b770cd6a0947395f4c0f6e2f37bbdc8120d965 --- /dev/null +++ b/src/lerobot/motors/dynamixel/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .dynamixel import DriveMode, DynamixelMotorsBus, OperatingMode, TorqueMode +from .tables import * diff --git a/src/lerobot/motors/dynamixel/dynamixel.py b/src/lerobot/motors/dynamixel/dynamixel.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc63fef8e999e1756dd8c05784f0c4dce545d39 --- /dev/null +++ b/src/lerobot/motors/dynamixel/dynamixel.py @@ -0,0 +1,264 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(aliberts): Should we implement FastSyncRead/Write? +# https://github.com/ROBOTIS-GIT/DynamixelSDK/pull/643 +# https://github.com/ROBOTIS-GIT/DynamixelSDK/releases/tag/3.8.2 +# https://emanual.robotis.com/docs/en/dxl/protocol2/#fast-sync-read-0x8a +# -> Need to check compatibility across models + +import logging +from copy import deepcopy +from enum import Enum + +from lerobot.motors.encoding_utils import decode_twos_complement, encode_twos_complement + +from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address +from .tables import ( + AVAILABLE_BAUDRATES, + MODEL_BAUDRATE_TABLE, + MODEL_CONTROL_TABLE, + MODEL_ENCODING_TABLE, + MODEL_NUMBER_TABLE, + MODEL_RESOLUTION, +) + +PROTOCOL_VERSION = 2.0 +DEFAULT_BAUDRATE = 1_000_000 +DEFAULT_TIMEOUT_MS = 1000 + +NORMALIZED_DATA = ["Goal_Position", "Present_Position"] + +logger = logging.getLogger(__name__) + + +class OperatingMode(Enum): + # DYNAMIXEL only controls current(torque) regardless of speed and position. This mode is ideal for a + # gripper or a system that only uses current(torque) control or a system that has additional + # velocity/position controllers. + CURRENT = 0 + + # This mode controls velocity. This mode is identical to the Wheel Mode(endless) from existing DYNAMIXEL. + # This mode is ideal for wheel-type robots. + VELOCITY = 1 + + # This mode controls position. This mode is identical to the Joint Mode from existing DYNAMIXEL. Operating + # position range is limited by the Max Position Limit(48) and the Min Position Limit(52). This mode is + # ideal for articulated robots that each joint rotates less than 360 degrees. + POSITION = 3 + + # This mode controls position. This mode is identical to the Multi-turn Position Control from existing + # DYNAMIXEL. 512 turns are supported(-256[rev] ~ 256[rev]). This mode is ideal for multi-turn wrists or + # conveyor systems or a system that requires an additional reduction gear. Note that Max Position + # Limit(48), Min Position Limit(52) are not used on Extended Position Control Mode. + EXTENDED_POSITION = 4 + + # This mode controls both position and current(torque). Up to 512 turns are supported (-256[rev] ~ + # 256[rev]). This mode is ideal for a system that requires both position and current control such as + # articulated robots or grippers. + CURRENT_POSITION = 5 + + # This mode directly controls PWM output. (Voltage Control Mode) + PWM = 16 + + +class DriveMode(Enum): + NON_INVERTED = 0 + INVERTED = 1 + + +class TorqueMode(Enum): + ENABLED = 1 + DISABLED = 0 + + +def _split_into_byte_chunks(value: int, length: int) -> list[int]: + import dynamixel_sdk as dxl + + if length == 1: + data = [value] + elif length == 2: + data = [dxl.DXL_LOBYTE(value), dxl.DXL_HIBYTE(value)] + elif length == 4: + data = [ + dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)), + dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)), + dxl.DXL_LOBYTE(dxl.DXL_HIWORD(value)), + dxl.DXL_HIBYTE(dxl.DXL_HIWORD(value)), + ] + return data + + +class DynamixelMotorsBus(MotorsBus): + """ + The Dynamixel implementation for a MotorsBus. It relies on the python dynamixel sdk to communicate with + the motors. For more info, see the Dynamixel SDK Documentation: + https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20 + """ + + apply_drive_mode = False + available_baudrates = deepcopy(AVAILABLE_BAUDRATES) + default_baudrate = DEFAULT_BAUDRATE + default_timeout = DEFAULT_TIMEOUT_MS + model_baudrate_table = deepcopy(MODEL_BAUDRATE_TABLE) + model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) + model_encoding_table = deepcopy(MODEL_ENCODING_TABLE) + model_number_table = deepcopy(MODEL_NUMBER_TABLE) + model_resolution_table = deepcopy(MODEL_RESOLUTION) + normalized_data = deepcopy(NORMALIZED_DATA) + + def __init__( + self, + port: str, + motors: dict[str, Motor], + calibration: dict[str, MotorCalibration] | None = None, + ): + super().__init__(port, motors, calibration) + import dynamixel_sdk as dxl + + self.port_handler = dxl.PortHandler(self.port) + self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION) + self.sync_reader = dxl.GroupSyncRead(self.port_handler, self.packet_handler, 0, 0) + self.sync_writer = dxl.GroupSyncWrite(self.port_handler, self.packet_handler, 0, 0) + self._comm_success = dxl.COMM_SUCCESS + self._no_error = 0x00 + + def _assert_protocol_is_compatible(self, instruction_name: str) -> None: + pass + + def _handshake(self) -> None: + self._assert_motors_exist() + + def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]: + model = self.motors[motor].model + search_baudrates = ( + [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model] + ) + + for baudrate in search_baudrates: + self.set_baudrate(baudrate) + id_model = self.broadcast_ping() + if id_model: + found_id, found_model = next(iter(id_model.items())) + expected_model_nb = self.model_number_table[model] + if found_model != expected_model_nb: + raise RuntimeError( + f"Found one motor on {baudrate=} with id={found_id} but it has a " + f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. " + f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')." + ) + return baudrate, found_id + + raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.") + + def configure_motors(self, return_delay_time=0) -> None: + # By default, Dynamixel motors have a 500µs delay response time (corresponding to a value of 250 on + # the 'Return_Delay_Time' address). We ensure this is reduced to the minimum of 2µs (value of 0). + for motor in self.motors: + self.write("Return_Delay_Time", motor, return_delay_time) + + @property + def is_calibrated(self) -> bool: + return self.calibration == self.read_calibration() + + def read_calibration(self) -> dict[str, MotorCalibration]: + offsets = self.sync_read("Homing_Offset", normalize=False) + mins = self.sync_read("Min_Position_Limit", normalize=False) + maxes = self.sync_read("Max_Position_Limit", normalize=False) + drive_modes = self.sync_read("Drive_Mode", normalize=False) + + calibration = {} + for motor, m in self.motors.items(): + calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=drive_modes[motor], + homing_offset=offsets[motor], + range_min=mins[motor], + range_max=maxes[motor], + ) + + return calibration + + def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None: + for motor, calibration in calibration_dict.items(): + self.write("Homing_Offset", motor, calibration.homing_offset) + self.write("Min_Position_Limit", motor, calibration.range_min) + self.write("Max_Position_Limit", motor, calibration.range_max) + + if cache: + self.calibration = calibration_dict + + def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + for motor in self._get_motors_list(motors): + self.write("Torque_Enable", motor, TorqueMode.DISABLED.value, num_retry=num_retry) + + def _disable_torque(self, motor_id: int, model: str, num_retry: int = 0) -> None: + addr, length = get_address(self.model_ctrl_table, model, "Torque_Enable") + self._write(addr, length, motor_id, TorqueMode.DISABLED.value, num_retry=num_retry) + + def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + for motor in self._get_motors_list(motors): + self.write("Torque_Enable", motor, TorqueMode.ENABLED.value, num_retry=num_retry) + + def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + for id_ in ids_values: + model = self._id_to_model(id_) + encoding_table = self.model_encoding_table.get(model) + if encoding_table and data_name in encoding_table: + n_bytes = encoding_table[data_name] + ids_values[id_] = encode_twos_complement(ids_values[id_], n_bytes) + + return ids_values + + def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + for id_ in ids_values: + model = self._id_to_model(id_) + encoding_table = self.model_encoding_table.get(model) + if encoding_table and data_name in encoding_table: + n_bytes = encoding_table[data_name] + ids_values[id_] = decode_twos_complement(ids_values[id_], n_bytes) + + return ids_values + + def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]: + """ + On Dynamixel Motors: + Present_Position = Actual_Position + Homing_Offset + """ + half_turn_homings = {} + for motor, pos in positions.items(): + model = self._get_motor_model(motor) + max_res = self.model_resolution_table[model] - 1 + half_turn_homings[motor] = int(max_res / 2) - pos + + return half_turn_homings + + def _split_into_byte_chunks(self, value: int, length: int) -> list[int]: + return _split_into_byte_chunks(value, length) + + def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None: + for n_try in range(1 + num_retry): + data_list, comm = self.packet_handler.broadcastPing(self.port_handler) + if self._is_comm_success(comm): + break + logger.debug(f"Broadcast ping failed on port '{self.port}' ({n_try=})") + logger.debug(self.packet_handler.getTxRxResult(comm)) + + if not self._is_comm_success(comm): + if raise_on_error: + raise ConnectionError(self.packet_handler.getTxRxResult(comm)) + + return + + return {id_: data[0] for id_, data in data_list.items()} diff --git a/src/lerobot/motors/dynamixel/tables.py b/src/lerobot/motors/dynamixel/tables.py new file mode 100644 index 0000000000000000000000000000000000000000..904cc3ae1a529017c0b7d33a47cd41c6806e1524 --- /dev/null +++ b/src/lerobot/motors/dynamixel/tables.py @@ -0,0 +1,199 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(Steven): Consider doing the following: +# from enum import Enum +# class MyControlTableKey(Enum): +# ID = "ID" +# GOAL_SPEED = "Goal_Speed" +# ... +# +# MY_CONTROL_TABLE ={ +# MyControlTableKey.ID.value: (5,1) +# MyControlTableKey.GOAL_SPEED.value: (46, 2) +# ... +# } +# This allows me do to: +# bus.write(MyControlTableKey.GOAL_SPEED, ...) +# Instead of: +# bus.write("Goal_Speed", ...) +# This is important for two reasons: +# 1. The linter will tell me if I'm trying to use an invalid key, instead of me realizing when I get the RunTimeError +# 2. We can change the value of the MyControlTableKey enums without impacting the client code + + +# {data_name: (address, size_byte)} +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table +X_SERIES_CONTROL_TABLE = { + "Model_Number": (0, 2), + "Model_Information": (2, 4), + "Firmware_Version": (6, 1), + "ID": (7, 1), + "Baud_Rate": (8, 1), + "Return_Delay_Time": (9, 1), + "Drive_Mode": (10, 1), + "Operating_Mode": (11, 1), + "Secondary_ID": (12, 1), + "Protocol_Type": (13, 1), + "Homing_Offset": (20, 4), + "Moving_Threshold": (24, 4), + "Temperature_Limit": (31, 1), + "Max_Voltage_Limit": (32, 2), + "Min_Voltage_Limit": (34, 2), + "PWM_Limit": (36, 2), + "Current_Limit": (38, 2), + "Acceleration_Limit": (40, 4), + "Velocity_Limit": (44, 4), + "Max_Position_Limit": (48, 4), + "Min_Position_Limit": (52, 4), + "Shutdown": (63, 1), + "Torque_Enable": (64, 1), + "LED": (65, 1), + "Status_Return_Level": (68, 1), + "Registered_Instruction": (69, 1), + "Hardware_Error_Status": (70, 1), + "Velocity_I_Gain": (76, 2), + "Velocity_P_Gain": (78, 2), + "Position_D_Gain": (80, 2), + "Position_I_Gain": (82, 2), + "Position_P_Gain": (84, 2), + "Feedforward_2nd_Gain": (88, 2), + "Feedforward_1st_Gain": (90, 2), + "Bus_Watchdog": (98, 1), + "Goal_PWM": (100, 2), + "Goal_Current": (102, 2), + "Goal_Velocity": (104, 4), + "Profile_Acceleration": (108, 4), + "Profile_Velocity": (112, 4), + "Goal_Position": (116, 4), + "Realtime_Tick": (120, 2), + "Moving": (122, 1), + "Moving_Status": (123, 1), + "Present_PWM": (124, 2), + "Present_Current": (126, 2), + "Present_Velocity": (128, 4), + "Present_Position": (132, 4), + "Velocity_Trajectory": (136, 4), + "Position_Trajectory": (140, 4), + "Present_Input_Voltage": (144, 2), + "Present_Temperature": (146, 1), +} + +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#baud-rate8 +X_SERIES_BAUDRATE_TABLE = { + 9_600: 0, + 57_600: 1, + 115_200: 2, + 1_000_000: 3, + 2_000_000: 4, + 3_000_000: 5, + 4_000_000: 6, +} + +# {data_name: size_byte} +X_SERIES_ENCODINGS_TABLE = { + "Homing_Offset": X_SERIES_CONTROL_TABLE["Homing_Offset"][1], + "Goal_PWM": X_SERIES_CONTROL_TABLE["Goal_PWM"][1], + "Goal_Current": X_SERIES_CONTROL_TABLE["Goal_Current"][1], + "Goal_Velocity": X_SERIES_CONTROL_TABLE["Goal_Velocity"][1], + "Goal_Position": X_SERIES_CONTROL_TABLE["Goal_Position"][1], + "Present_Position": X_SERIES_CONTROL_TABLE["Present_Position"][1], + "Present_PWM": X_SERIES_CONTROL_TABLE["Present_PWM"][1], + "Present_Current": X_SERIES_CONTROL_TABLE["Present_Current"][1], + "Present_Velocity": X_SERIES_CONTROL_TABLE["Present_Velocity"][1], +} + +MODEL_ENCODING_TABLE = { + "x_series": X_SERIES_ENCODINGS_TABLE, + "xl330-m077": X_SERIES_ENCODINGS_TABLE, + "xl330-m288": X_SERIES_ENCODINGS_TABLE, + "xl430-w250": X_SERIES_ENCODINGS_TABLE, + "xm430-w350": X_SERIES_ENCODINGS_TABLE, + "xm540-w270": X_SERIES_ENCODINGS_TABLE, + "xc430-w150": X_SERIES_ENCODINGS_TABLE, +} + +# {model: model_resolution} +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#specifications +MODEL_RESOLUTION = { + "x_series": 4096, + "xl330-m077": 4096, + "xl330-m288": 4096, + "xl430-w250": 4096, + "xm430-w350": 4096, + "xm540-w270": 4096, + "xc430-w150": 4096, +} + +# {model: model_number} +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table-of-eeprom-area +MODEL_NUMBER_TABLE = { + "xl330-m077": 1190, + "xl330-m288": 1200, + "xl430-w250": 1060, + "xm430-w350": 1020, + "xm540-w270": 1120, + "xc430-w150": 1070, +} + +# {model: available_operating_modes} +# https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#operating-mode11 +MODEL_OPERATING_MODES = { + "xl330-m077": [0, 1, 3, 4, 5, 16], + "xl330-m288": [0, 1, 3, 4, 5, 16], + "xl430-w250": [1, 3, 4, 16], + "xm430-w350": [0, 1, 3, 4, 5, 16], + "xm540-w270": [0, 1, 3, 4, 5, 16], + "xc430-w150": [1, 3, 4, 16], +} + +MODEL_CONTROL_TABLE = { + "x_series": X_SERIES_CONTROL_TABLE, + "xl330-m077": X_SERIES_CONTROL_TABLE, + "xl330-m288": X_SERIES_CONTROL_TABLE, + "xl430-w250": X_SERIES_CONTROL_TABLE, + "xm430-w350": X_SERIES_CONTROL_TABLE, + "xm540-w270": X_SERIES_CONTROL_TABLE, + "xc430-w150": X_SERIES_CONTROL_TABLE, +} + +MODEL_BAUDRATE_TABLE = { + "x_series": X_SERIES_BAUDRATE_TABLE, + "xl330-m077": X_SERIES_BAUDRATE_TABLE, + "xl330-m288": X_SERIES_BAUDRATE_TABLE, + "xl430-w250": X_SERIES_BAUDRATE_TABLE, + "xm430-w350": X_SERIES_BAUDRATE_TABLE, + "xm540-w270": X_SERIES_BAUDRATE_TABLE, + "xc430-w150": X_SERIES_BAUDRATE_TABLE, +} + +AVAILABLE_BAUDRATES = [ + 9_600, + 19_200, + 38_400, + 57_600, + 115_200, + 230_400, + 460_800, + 500_000, + 576_000, + 921_600, + 1_000_000, + 1_152_000, + 2_000_000, + 2_500_000, + 3_000_000, + 3_500_000, + 4_000_000, +] diff --git a/src/lerobot/motors/encoding_utils.py b/src/lerobot/motors/encoding_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..15365855a41de0bd6d3c51212908d2a7db445f15 --- /dev/null +++ b/src/lerobot/motors/encoding_utils.py @@ -0,0 +1,67 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def encode_sign_magnitude(value: int, sign_bit_index: int): + """ + https://en.wikipedia.org/wiki/Signed_number_representations#Sign%E2%80%93magnitude + """ + max_magnitude = (1 << sign_bit_index) - 1 + magnitude = abs(value) + if magnitude > max_magnitude: + raise ValueError(f"Magnitude {magnitude} exceeds {max_magnitude} (max for {sign_bit_index=})") + + direction_bit = 1 if value < 0 else 0 + return (direction_bit << sign_bit_index) | magnitude + + +def decode_sign_magnitude(encoded_value: int, sign_bit_index: int): + """ + https://en.wikipedia.org/wiki/Signed_number_representations#Sign%E2%80%93magnitude + """ + direction_bit = (encoded_value >> sign_bit_index) & 1 + magnitude_mask = (1 << sign_bit_index) - 1 + magnitude = encoded_value & magnitude_mask + return -magnitude if direction_bit else magnitude + + +def encode_twos_complement(value: int, n_bytes: int): + """ + https://en.wikipedia.org/wiki/Signed_number_representations#Two%27s_complement + """ + + bit_width = n_bytes * 8 + min_val = -(1 << (bit_width - 1)) + max_val = (1 << (bit_width - 1)) - 1 + + if not (min_val <= value <= max_val): + raise ValueError( + f"Value {value} out of range for {n_bytes}-byte two's complement: [{min_val}, {max_val}]" + ) + + if value >= 0: + return value + + return (1 << bit_width) + value + + +def decode_twos_complement(value: int, n_bytes: int) -> int: + """ + https://en.wikipedia.org/wiki/Signed_number_representations#Two%27s_complement + """ + bits = n_bytes * 8 + sign_bit = 1 << (bits - 1) + if value & sign_bit: + value -= 1 << bits + return value diff --git a/src/lerobot/motors/feetech/__init__.py b/src/lerobot/motors/feetech/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33992c51d2ceb32f2480d8d3a727f9a9d75bab5d --- /dev/null +++ b/src/lerobot/motors/feetech/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .feetech import DriveMode, FeetechMotorsBus, OperatingMode, TorqueMode +from .tables import * diff --git a/src/lerobot/motors/feetech/feetech.py b/src/lerobot/motors/feetech/feetech.py new file mode 100644 index 0000000000000000000000000000000000000000..98cde209c44158a244e4da03e756d2774fa0fb93 --- /dev/null +++ b/src/lerobot/motors/feetech/feetech.py @@ -0,0 +1,455 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from copy import deepcopy +from enum import Enum +from pprint import pformat + +from lerobot.motors.encoding_utils import decode_sign_magnitude, encode_sign_magnitude + +from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address +from .tables import ( + FIRMWARE_MAJOR_VERSION, + FIRMWARE_MINOR_VERSION, + MODEL_BAUDRATE_TABLE, + MODEL_CONTROL_TABLE, + MODEL_ENCODING_TABLE, + MODEL_NUMBER, + MODEL_NUMBER_TABLE, + MODEL_PROTOCOL, + MODEL_RESOLUTION, + SCAN_BAUDRATES, +) + +DEFAULT_PROTOCOL_VERSION = 0 +DEFAULT_BAUDRATE = 1_000_000 +DEFAULT_TIMEOUT_MS = 1000 + +NORMALIZED_DATA = ["Goal_Position", "Present_Position"] + +logger = logging.getLogger(__name__) + + +class OperatingMode(Enum): + # position servo mode + POSITION = 0 + # The motor is in constant speed mode, which is controlled by parameter 0x2e, and the highest bit 15 is + # the direction bit + VELOCITY = 1 + # PWM open-loop speed regulation mode, with parameter 0x2c running time parameter control, bit11 as + # direction bit + PWM = 2 + # In step servo mode, the number of step progress is represented by parameter 0x2a, and the highest bit 15 + # is the direction bit + STEP = 3 + + +class DriveMode(Enum): + NON_INVERTED = 0 + INVERTED = 1 + + +class TorqueMode(Enum): + ENABLED = 1 + DISABLED = 0 + + +def _split_into_byte_chunks(value: int, length: int) -> list[int]: + import scservo_sdk as scs + + if length == 1: + data = [value] + elif length == 2: + data = [scs.SCS_LOBYTE(value), scs.SCS_HIBYTE(value)] + elif length == 4: + data = [ + scs.SCS_LOBYTE(scs.SCS_LOWORD(value)), + scs.SCS_HIBYTE(scs.SCS_LOWORD(value)), + scs.SCS_LOBYTE(scs.SCS_HIWORD(value)), + scs.SCS_HIBYTE(scs.SCS_HIWORD(value)), + ] + return data + + +def patch_setPacketTimeout(self, packet_length): # noqa: N802 + """ + HACK: This patches the PortHandler behavior to set the correct packet timeouts. + + It fixes https://gitee.com/ftservo/SCServoSDK/issues/IBY2S6 + The bug is fixed on the official Feetech SDK repo (https://gitee.com/ftservo/FTServo_Python) + but because that version is not published on PyPI, we rely on the (unofficial) on that is, which needs + patching. + """ + self.packet_start_time = self.getCurrentTime() + self.packet_timeout = (self.tx_time_per_byte * packet_length) + (self.tx_time_per_byte * 3.0) + 50 + + +class FeetechMotorsBus(MotorsBus): + """ + The FeetechMotorsBus class allows to efficiently read and write to the attached motors. It relies on the + python feetech sdk to communicate with the motors, which is itself based on the dynamixel sdk. + """ + + apply_drive_mode = True + available_baudrates = deepcopy(SCAN_BAUDRATES) + default_baudrate = DEFAULT_BAUDRATE + default_timeout = DEFAULT_TIMEOUT_MS + model_baudrate_table = deepcopy(MODEL_BAUDRATE_TABLE) + model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) + model_encoding_table = deepcopy(MODEL_ENCODING_TABLE) + model_number_table = deepcopy(MODEL_NUMBER_TABLE) + model_resolution_table = deepcopy(MODEL_RESOLUTION) + normalized_data = deepcopy(NORMALIZED_DATA) + + def __init__( + self, + port: str, + motors: dict[str, Motor], + calibration: dict[str, MotorCalibration] | None = None, + protocol_version: int = DEFAULT_PROTOCOL_VERSION, + ): + super().__init__(port, motors, calibration) + self.protocol_version = protocol_version + self._assert_same_protocol() + import scservo_sdk as scs + + self.port_handler = scs.PortHandler(self.port) + # HACK: monkeypatch + self.port_handler.setPacketTimeout = patch_setPacketTimeout.__get__( + self.port_handler, scs.PortHandler + ) + self.packet_handler = scs.PacketHandler(protocol_version) + self.sync_reader = scs.GroupSyncRead(self.port_handler, self.packet_handler, 0, 0) + self.sync_writer = scs.GroupSyncWrite(self.port_handler, self.packet_handler, 0, 0) + self._comm_success = scs.COMM_SUCCESS + self._no_error = 0x00 + + if any(MODEL_PROTOCOL[model] != self.protocol_version for model in self.models): + raise ValueError(f"Some motors are incompatible with protocol_version={self.protocol_version}") + + def _assert_same_protocol(self) -> None: + if any(MODEL_PROTOCOL[model] != self.protocol_version for model in self.models): + raise RuntimeError("Some motors use an incompatible protocol.") + + def _assert_protocol_is_compatible(self, instruction_name: str) -> None: + if instruction_name == "sync_read" and self.protocol_version == 1: + raise NotImplementedError( + "'Sync Read' is not available with Feetech motors using Protocol 1. Use 'Read' sequentially instead." + ) + if instruction_name == "broadcast_ping" and self.protocol_version == 1: + raise NotImplementedError( + "'Broadcast Ping' is not available with Feetech motors using Protocol 1. Use 'Ping' sequentially instead." + ) + + def _assert_same_firmware(self) -> None: + firmware_versions = self._read_firmware_version(self.ids, raise_on_error=True) + if len(set(firmware_versions.values())) != 1: + raise RuntimeError( + "Some Motors use different firmware versions:" + f"\n{pformat(firmware_versions)}\n" + "Update their firmware first using Feetech's software. " + "Visit https://www.feetechrc.com/software." + ) + + def _handshake(self) -> None: + self._assert_motors_exist() + self._assert_same_firmware() + + def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]: + if self.protocol_version == 0: + return self._find_single_motor_p0(motor, initial_baudrate) + else: + return self._find_single_motor_p1(motor, initial_baudrate) + + def _find_single_motor_p0(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]: + model = self.motors[motor].model + search_baudrates = ( + [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model] + ) + expected_model_nb = self.model_number_table[model] + + for baudrate in search_baudrates: + self.set_baudrate(baudrate) + id_model = self.broadcast_ping() + if id_model: + found_id, found_model = next(iter(id_model.items())) + if found_model != expected_model_nb: + raise RuntimeError( + f"Found one motor on {baudrate=} with id={found_id} but it has a " + f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. " + f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')." + ) + return baudrate, found_id + + raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.") + + def _find_single_motor_p1(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]: + import scservo_sdk as scs + + model = self.motors[motor].model + search_baudrates = ( + [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model] + ) + expected_model_nb = self.model_number_table[model] + + for baudrate in search_baudrates: + self.set_baudrate(baudrate) + for id_ in range(scs.MAX_ID + 1): + found_model = self.ping(id_) + if found_model is not None: + if found_model != expected_model_nb: + raise RuntimeError( + f"Found one motor on {baudrate=} with id={id_} but it has a " + f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. " + f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')." + ) + return baudrate, id_ + + raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.") + + def configure_motors(self, return_delay_time=0, maximum_acceleration=254, acceleration=254) -> None: + for motor in self.motors: + # By default, Feetech motors have a 500µs delay response time (corresponding to a value of 250 on + # the 'Return_Delay_Time' address). We ensure this is reduced to the minimum of 2µs (value of 0). + self.write("Return_Delay_Time", motor, return_delay_time) + # Set 'Maximum_Acceleration' to 254 to speedup acceleration and deceleration of the motors. + if self.protocol_version == 0: + self.write("Maximum_Acceleration", motor, maximum_acceleration) + self.write("Acceleration", motor, acceleration) + + @property + def is_calibrated(self) -> bool: + motors_calibration = self.read_calibration() + if set(motors_calibration) != set(self.calibration): + return False + + same_ranges = all( + self.calibration[motor].range_min == cal.range_min + and self.calibration[motor].range_max == cal.range_max + for motor, cal in motors_calibration.items() + ) + if self.protocol_version == 1: + return same_ranges + + same_offsets = all( + self.calibration[motor].homing_offset == cal.homing_offset + for motor, cal in motors_calibration.items() + ) + return same_ranges and same_offsets + + def read_calibration(self) -> dict[str, MotorCalibration]: + offsets, mins, maxes = {}, {}, {} + for motor in self.motors: + mins[motor] = self.read("Min_Position_Limit", motor, normalize=False) + maxes[motor] = self.read("Max_Position_Limit", motor, normalize=False) + offsets[motor] = ( + self.read("Homing_Offset", motor, normalize=False) if self.protocol_version == 0 else 0 + ) + + calibration = {} + for motor, m in self.motors.items(): + calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=0, + homing_offset=offsets[motor], + range_min=mins[motor], + range_max=maxes[motor], + ) + + return calibration + + def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None: + for motor, calibration in calibration_dict.items(): + if self.protocol_version == 0: + self.write("Homing_Offset", motor, calibration.homing_offset) + self.write("Min_Position_Limit", motor, calibration.range_min) + self.write("Max_Position_Limit", motor, calibration.range_max) + + if cache: + self.calibration = calibration_dict + + def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]: + """ + On Feetech Motors: + Present_Position = Actual_Position - Homing_Offset + """ + half_turn_homings = {} + for motor, pos in positions.items(): + model = self._get_motor_model(motor) + max_res = self.model_resolution_table[model] - 1 + half_turn_homings[motor] = pos - int(max_res / 2) + + return half_turn_homings + + def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + for motor in self._get_motors_list(motors): + self.write("Torque_Enable", motor, TorqueMode.DISABLED.value, num_retry=num_retry) + self.write("Lock", motor, 0, num_retry=num_retry) + + def _disable_torque(self, motor_id: int, model: str, num_retry: int = 0) -> None: + addr, length = get_address(self.model_ctrl_table, model, "Torque_Enable") + self._write(addr, length, motor_id, TorqueMode.DISABLED.value, num_retry=num_retry) + addr, length = get_address(self.model_ctrl_table, model, "Lock") + self._write(addr, length, motor_id, 0, num_retry=num_retry) + + def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + for motor in self._get_motors_list(motors): + self.write("Torque_Enable", motor, TorqueMode.ENABLED.value, num_retry=num_retry) + self.write("Lock", motor, 1, num_retry=num_retry) + + def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + for id_ in ids_values: + model = self._id_to_model(id_) + encoding_table = self.model_encoding_table.get(model) + if encoding_table and data_name in encoding_table: + sign_bit = encoding_table[data_name] + ids_values[id_] = encode_sign_magnitude(ids_values[id_], sign_bit) + + return ids_values + + def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + for id_ in ids_values: + model = self._id_to_model(id_) + encoding_table = self.model_encoding_table.get(model) + if encoding_table and data_name in encoding_table: + sign_bit = encoding_table[data_name] + ids_values[id_] = decode_sign_magnitude(ids_values[id_], sign_bit) + + return ids_values + + def _split_into_byte_chunks(self, value: int, length: int) -> list[int]: + return _split_into_byte_chunks(value, length) + + def _broadcast_ping(self) -> tuple[dict[int, int], int]: + import scservo_sdk as scs + + data_list = {} + + status_length = 6 + + rx_length = 0 + wait_length = status_length * scs.MAX_ID + + txpacket = [0] * 6 + + tx_time_per_byte = (1000.0 / self.port_handler.getBaudRate()) * 10.0 + + txpacket[scs.PKT_ID] = scs.BROADCAST_ID + txpacket[scs.PKT_LENGTH] = 2 + txpacket[scs.PKT_INSTRUCTION] = scs.INST_PING + + result = self.packet_handler.txPacket(self.port_handler, txpacket) + if result != scs.COMM_SUCCESS: + self.port_handler.is_using = False + return data_list, result + + # set rx timeout + self.port_handler.setPacketTimeoutMillis((wait_length * tx_time_per_byte) + (3.0 * scs.MAX_ID) + 16.0) + + rxpacket = [] + while not self.port_handler.isPacketTimeout() and rx_length < wait_length: + rxpacket += self.port_handler.readPort(wait_length - rx_length) + rx_length = len(rxpacket) + + self.port_handler.is_using = False + + if rx_length == 0: + return data_list, scs.COMM_RX_TIMEOUT + + while True: + if rx_length < status_length: + return data_list, scs.COMM_RX_CORRUPT + + # find packet header + for idx in range(0, (rx_length - 1)): + if (rxpacket[idx] == 0xFF) and (rxpacket[idx + 1] == 0xFF): + break + + if idx == 0: # found at the beginning of the packet + # calculate checksum + checksum = 0 + for idx in range(2, status_length - 1): # except header & checksum + checksum += rxpacket[idx] + + checksum = ~checksum & 0xFF + if rxpacket[status_length - 1] == checksum: + result = scs.COMM_SUCCESS + data_list[rxpacket[scs.PKT_ID]] = rxpacket[scs.PKT_ERROR] + + del rxpacket[0:status_length] + rx_length = rx_length - status_length + + if rx_length == 0: + return data_list, result + else: + result = scs.COMM_RX_CORRUPT + # remove header (0xFF 0xFF) + del rxpacket[0:2] + rx_length = rx_length - 2 + else: + # remove unnecessary packets + del rxpacket[0:idx] + rx_length = rx_length - idx + + def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None: + self._assert_protocol_is_compatible("broadcast_ping") + for n_try in range(1 + num_retry): + ids_status, comm = self._broadcast_ping() + if self._is_comm_success(comm): + break + logger.debug(f"Broadcast ping failed on port '{self.port}' ({n_try=})") + logger.debug(self.packet_handler.getTxRxResult(comm)) + + if not self._is_comm_success(comm): + if raise_on_error: + raise ConnectionError(self.packet_handler.getTxRxResult(comm)) + return + + ids_errors = {id_: status for id_, status in ids_status.items() if self._is_error(status)} + if ids_errors: + display_dict = {id_: self.packet_handler.getRxPacketError(err) for id_, err in ids_errors.items()} + logger.error(f"Some motors found returned an error status:\n{pformat(display_dict, indent=4)}") + + return self._read_model_number(list(ids_status), raise_on_error) + + def _read_firmware_version(self, motor_ids: list[int], raise_on_error: bool = False) -> dict[int, str]: + firmware_versions = {} + for id_ in motor_ids: + firm_ver_major, comm, error = self._read( + *FIRMWARE_MAJOR_VERSION, id_, raise_on_error=raise_on_error + ) + if not self._is_comm_success(comm) or self._is_error(error): + continue + + firm_ver_minor, comm, error = self._read( + *FIRMWARE_MINOR_VERSION, id_, raise_on_error=raise_on_error + ) + if not self._is_comm_success(comm) or self._is_error(error): + continue + + firmware_versions[id_] = f"{firm_ver_major}.{firm_ver_minor}" + + return firmware_versions + + def _read_model_number(self, motor_ids: list[int], raise_on_error: bool = False) -> dict[int, int]: + model_numbers = {} + for id_ in motor_ids: + model_nb, comm, error = self._read(*MODEL_NUMBER, id_, raise_on_error=raise_on_error) + if not self._is_comm_success(comm) or self._is_error(error): + continue + + model_numbers[id_] = model_nb + + return model_numbers diff --git a/src/lerobot/motors/feetech/tables.py b/src/lerobot/motors/feetech/tables.py new file mode 100644 index 0000000000000000000000000000000000000000..e26d24226275d0330254ca4b1ab028d7b7bfa850 --- /dev/null +++ b/src/lerobot/motors/feetech/tables.py @@ -0,0 +1,256 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FIRMWARE_MAJOR_VERSION = (0, 1) +FIRMWARE_MINOR_VERSION = (1, 1) +MODEL_NUMBER = (3, 2) + +# TODO(Steven): Consider doing the following: +# from enum import Enum +# class MyControlTableKey(Enum): +# ID = "ID" +# GOAL_SPEED = "Goal_Speed" +# ... +# +# MY_CONTROL_TABLE ={ +# MyControlTableKey.ID.value: (5,1) +# MyControlTableKey.GOAL_SPEED.value: (46, 2) +# ... +# } +# This allows me do to: +# bus.write(MyControlTableKey.GOAL_SPEED, ...) +# Instead of: +# bus.write("Goal_Speed", ...) +# This is important for two reasons: +# 1. The linter will tell me if I'm trying to use an invalid key, instead of me realizing when I get the RunTimeError +# 2. We can change the value of the MyControlTableKey enums without impacting the client code + +# data_name: (address, size_byte) +# http://doc.feetech.cn/#/prodinfodownload?srcType=FT-SMS-STS-emanual-229f4476422d4059abfb1cb0 +STS_SMS_SERIES_CONTROL_TABLE = { + # EPROM + "Firmware_Major_Version": FIRMWARE_MAJOR_VERSION, # read-only + "Firmware_Minor_Version": FIRMWARE_MINOR_VERSION, # read-only + "Model_Number": MODEL_NUMBER, # read-only + "ID": (5, 1), + "Baud_Rate": (6, 1), + "Return_Delay_Time": (7, 1), + "Response_Status_Level": (8, 1), + "Min_Position_Limit": (9, 2), + "Max_Position_Limit": (11, 2), + "Max_Temperature_Limit": (13, 1), + "Max_Voltage_Limit": (14, 1), + "Min_Voltage_Limit": (15, 1), + "Max_Torque_Limit": (16, 2), + "Phase": (18, 1), + "Unloading_Condition": (19, 1), + "LED_Alarm_Condition": (20, 1), + "P_Coefficient": (21, 1), + "D_Coefficient": (22, 1), + "I_Coefficient": (23, 1), + "Minimum_Startup_Force": (24, 2), + "CW_Dead_Zone": (26, 1), + "CCW_Dead_Zone": (27, 1), + "Protection_Current": (28, 2), + "Angular_Resolution": (30, 1), + "Homing_Offset": (31, 2), + "Operating_Mode": (33, 1), + "Protective_Torque": (34, 1), + "Protection_Time": (35, 1), + "Overload_Torque": (36, 1), + "Velocity_closed_loop_P_proportional_coefficient": (37, 1), + "Over_Current_Protection_Time": (38, 1), + "Velocity_closed_loop_I_integral_coefficient": (39, 1), + # SRAM + "Torque_Enable": (40, 1), + "Acceleration": (41, 1), + "Goal_Position": (42, 2), + "Goal_Time": (44, 2), + "Goal_Velocity": (46, 2), + "Torque_Limit": (48, 2), + "Lock": (55, 1), + "Present_Position": (56, 2), # read-only + "Present_Velocity": (58, 2), # read-only + "Present_Load": (60, 2), # read-only + "Present_Voltage": (62, 1), # read-only + "Present_Temperature": (63, 1), # read-only + "Status": (65, 1), # read-only + "Moving": (66, 1), # read-only + "Present_Current": (69, 2), # read-only + "Goal_Position_2": (71, 2), # read-only + # Factory + "Moving_Velocity": (80, 1), + "Moving_Velocity_Threshold": (80, 1), + "DTs": (81, 1), # (ms) + "Velocity_Unit_factor": (82, 1), + "Hts": (83, 1), # (ns) valid for firmware >= 2.54, other versions keep 0 + "Maximum_Velocity_Limit": (84, 1), + "Maximum_Acceleration": (85, 1), + "Acceleration_Multiplier ": (86, 1), # Acceleration multiplier in effect when acceleration is 0 +} + +# http://doc.feetech.cn/#/prodinfodownload?srcType=FT-SCSCL-emanual-cbcc8ab2e3384282a01d4bf3 +SCS_SERIES_CONTROL_TABLE = { + # EPROM + "Firmware_Major_Version": FIRMWARE_MAJOR_VERSION, # read-only + "Firmware_Minor_Version": FIRMWARE_MINOR_VERSION, # read-only + "Model_Number": MODEL_NUMBER, # read-only + "ID": (5, 1), + "Baud_Rate": (6, 1), + "Return_Delay_Time": (7, 1), + "Response_Status_Level": (8, 1), + "Min_Position_Limit": (9, 2), + "Max_Position_Limit": (11, 2), + "Max_Temperature_Limit": (13, 1), + "Max_Voltage_Limit": (14, 1), + "Min_Voltage_Limit": (15, 1), + "Max_Torque_Limit": (16, 2), + "Phase": (18, 1), + "Unloading_Condition": (19, 1), + "LED_Alarm_Condition": (20, 1), + "P_Coefficient": (21, 1), + "D_Coefficient": (22, 1), + "I_Coefficient": (23, 1), + "Minimum_Startup_Force": (24, 2), + "CW_Dead_Zone": (26, 1), + "CCW_Dead_Zone": (27, 1), + "Protective_Torque": (37, 1), + "Protection_Time": (38, 1), + # SRAM + "Torque_Enable": (40, 1), + "Acceleration": (41, 1), + "Goal_Position": (42, 2), + "Running_Time": (44, 2), + "Goal_Velocity": (46, 2), + "Lock": (48, 1), + "Present_Position": (56, 2), # read-only + "Present_Velocity": (58, 2), # read-only + "Present_Load": (60, 2), # read-only + "Present_Voltage": (62, 1), # read-only + "Present_Temperature": (63, 1), # read-only + "Sync_Write_Flag": (64, 1), # read-only + "Status": (65, 1), # read-only + "Moving": (66, 1), # read-only + # Factory + "PWM_Maximum_Step": (78, 1), + "Moving_Velocity_Threshold*50": (79, 1), + "DTs": (80, 1), # (ms) + "Minimum_Velocity_Limit*50": (81, 1), + "Maximum_Velocity_Limit*50": (82, 1), + "Acceleration_2": (83, 1), # don't know what that is +} + +STS_SMS_SERIES_BAUDRATE_TABLE = { + 1_000_000: 0, + 500_000: 1, + 250_000: 2, + 128_000: 3, + 115_200: 4, + 57_600: 5, + 38_400: 6, + 19_200: 7, +} + +SCS_SERIES_BAUDRATE_TABLE = { + 1_000_000: 0, + 500_000: 1, + 250_000: 2, + 128_000: 3, + 115_200: 4, + 57_600: 5, + 38_400: 6, + 19_200: 7, +} + +MODEL_CONTROL_TABLE = { + "sts_series": STS_SMS_SERIES_CONTROL_TABLE, + "scs_series": SCS_SERIES_CONTROL_TABLE, + "sms_series": STS_SMS_SERIES_CONTROL_TABLE, + "sts3215": STS_SMS_SERIES_CONTROL_TABLE, + "sts3250": STS_SMS_SERIES_CONTROL_TABLE, + "scs0009": SCS_SERIES_CONTROL_TABLE, + "sm8512bl": STS_SMS_SERIES_CONTROL_TABLE, +} + +MODEL_RESOLUTION = { + "sts_series": 4096, + "sms_series": 4096, + "scs_series": 1024, + "sts3215": 4096, + "sts3250": 4096, + "sm8512bl": 4096, + "scs0009": 1024, +} + +MODEL_BAUDRATE_TABLE = { + "sts_series": STS_SMS_SERIES_BAUDRATE_TABLE, + "sms_series": STS_SMS_SERIES_BAUDRATE_TABLE, + "scs_series": SCS_SERIES_BAUDRATE_TABLE, + "sm8512bl": STS_SMS_SERIES_BAUDRATE_TABLE, + "sts3215": STS_SMS_SERIES_BAUDRATE_TABLE, + "sts3250": STS_SMS_SERIES_BAUDRATE_TABLE, + "scs0009": SCS_SERIES_BAUDRATE_TABLE, +} + +# Sign-Magnitude encoding bits +STS_SMS_SERIES_ENCODINGS_TABLE = { + "Homing_Offset": 11, + "Goal_Position": 15, + "Goal_Velocity": 15, + "Goal_Speed": 15, + "Present_Position": 15, + "Present_Velocity": 15, + "Present_Speed": 15, +} + +MODEL_ENCODING_TABLE = { + "sts_series": STS_SMS_SERIES_ENCODINGS_TABLE, + "sms_series": STS_SMS_SERIES_ENCODINGS_TABLE, + "scs_series": {}, + "sts3215": STS_SMS_SERIES_ENCODINGS_TABLE, + "sts3250": STS_SMS_SERIES_ENCODINGS_TABLE, + "sm8512bl": STS_SMS_SERIES_ENCODINGS_TABLE, + "scs0009": {}, +} + +SCAN_BAUDRATES = [ + 4_800, + 9_600, + 14_400, + 19_200, + 38_400, + 57_600, + 115_200, + 128_000, + 250_000, + 500_000, + 1_000_000, +] + +MODEL_NUMBER_TABLE = { + "sts3215": 777, + "sts3250": 2825, + "sm8512bl": 11272, + "scs0009": 1284, +} + +MODEL_PROTOCOL = { + "sts_series": 0, + "sms_series": 0, + "scs_series": 1, + "sts3215": 0, + "sts3250": 0, + "sm8512bl": 0, + "scs0009": 1, +} diff --git a/src/lerobot/motors/motors_bus.py b/src/lerobot/motors/motors_bus.py new file mode 100644 index 0000000000000000000000000000000000000000..11763cae815f63d41c613276041201c5f3fec0b6 --- /dev/null +++ b/src/lerobot/motors/motors_bus.py @@ -0,0 +1,1214 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ruff: noqa: N802 +# This noqa is for the Protocols classes: PortHandler, PacketHandler GroupSyncRead/Write +# TODO(aliberts): Add block noqa when feature below is available +# https://github.com/astral-sh/ruff/issues/3711 + +import abc +import logging +from contextlib import contextmanager +from dataclasses import dataclass +from enum import Enum +from functools import cached_property +from pprint import pformat +from typing import Protocol, TypeAlias + +import serial +from deepdiff import DeepDiff +from tqdm import tqdm + +from lerobot.utils.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError +from lerobot.utils.utils import enter_pressed, move_cursor_up + +NameOrID: TypeAlias = str | int +Value: TypeAlias = int | float + +logger = logging.getLogger(__name__) + + +def get_ctrl_table(model_ctrl_table: dict[str, dict], model: str) -> dict[str, tuple[int, int]]: + ctrl_table = model_ctrl_table.get(model) + if ctrl_table is None: + raise KeyError(f"Control table for {model=} not found.") + return ctrl_table + + +def get_address(model_ctrl_table: dict[str, dict], model: str, data_name: str) -> tuple[int, int]: + ctrl_table = get_ctrl_table(model_ctrl_table, model) + addr_bytes = ctrl_table.get(data_name) + if addr_bytes is None: + raise KeyError(f"Address for '{data_name}' not found in {model} control table.") + return addr_bytes + + +def assert_same_address(model_ctrl_table: dict[str, dict], motor_models: list[str], data_name: str) -> None: + all_addr = [] + all_bytes = [] + for model in motor_models: + addr, bytes = get_address(model_ctrl_table, model, data_name) + all_addr.append(addr) + all_bytes.append(bytes) + + if len(set(all_addr)) != 1: + raise NotImplementedError( + f"At least two motor models use a different address for `data_name`='{data_name}'" + f"({list(zip(motor_models, all_addr, strict=False))})." + ) + + if len(set(all_bytes)) != 1: + raise NotImplementedError( + f"At least two motor models use a different bytes representation for `data_name`='{data_name}'" + f"({list(zip(motor_models, all_bytes, strict=False))})." + ) + + +class MotorNormMode(str, Enum): + RANGE_0_100 = "range_0_100" + RANGE_M100_100 = "range_m100_100" + DEGREES = "degrees" + + +@dataclass +class MotorCalibration: + id: int + drive_mode: int + homing_offset: int + range_min: int + range_max: int + + +@dataclass +class Motor: + id: int + model: str + norm_mode: MotorNormMode + + +class PortHandler(Protocol): + def __init__(self, port_name): + self.is_open: bool + self.baudrate: int + self.packet_start_time: float + self.packet_timeout: float + self.tx_time_per_byte: float + self.is_using: bool + self.port_name: str + self.ser: serial.Serial + + def openPort(self): ... + def closePort(self): ... + def clearPort(self): ... + def setPortName(self, port_name): ... + def getPortName(self): ... + def setBaudRate(self, baudrate): ... + def getBaudRate(self): ... + def getBytesAvailable(self): ... + def readPort(self, length): ... + def writePort(self, packet): ... + def setPacketTimeout(self, packet_length): ... + def setPacketTimeoutMillis(self, msec): ... + def isPacketTimeout(self): ... + def getCurrentTime(self): ... + def getTimeSinceStart(self): ... + def setupPort(self, cflag_baud): ... + def getCFlagBaud(self, baudrate): ... + + +class PacketHandler(Protocol): + def getTxRxResult(self, result): ... + def getRxPacketError(self, error): ... + def txPacket(self, port, txpacket): ... + def rxPacket(self, port): ... + def txRxPacket(self, port, txpacket): ... + def ping(self, port, id): ... + def action(self, port, id): ... + def readTx(self, port, id, address, length): ... + def readRx(self, port, id, length): ... + def readTxRx(self, port, id, address, length): ... + def read1ByteTx(self, port, id, address): ... + def read1ByteRx(self, port, id): ... + def read1ByteTxRx(self, port, id, address): ... + def read2ByteTx(self, port, id, address): ... + def read2ByteRx(self, port, id): ... + def read2ByteTxRx(self, port, id, address): ... + def read4ByteTx(self, port, id, address): ... + def read4ByteRx(self, port, id): ... + def read4ByteTxRx(self, port, id, address): ... + def writeTxOnly(self, port, id, address, length, data): ... + def writeTxRx(self, port, id, address, length, data): ... + def write1ByteTxOnly(self, port, id, address, data): ... + def write1ByteTxRx(self, port, id, address, data): ... + def write2ByteTxOnly(self, port, id, address, data): ... + def write2ByteTxRx(self, port, id, address, data): ... + def write4ByteTxOnly(self, port, id, address, data): ... + def write4ByteTxRx(self, port, id, address, data): ... + def regWriteTxOnly(self, port, id, address, length, data): ... + def regWriteTxRx(self, port, id, address, length, data): ... + def syncReadTx(self, port, start_address, data_length, param, param_length): ... + def syncWriteTxOnly(self, port, start_address, data_length, param, param_length): ... + + +class GroupSyncRead(Protocol): + def __init__(self, port, ph, start_address, data_length): + self.port: str + self.ph: PortHandler + self.start_address: int + self.data_length: int + self.last_result: bool + self.is_param_changed: bool + self.param: list + self.data_dict: dict + + def makeParam(self): ... + def addParam(self, id): ... + def removeParam(self, id): ... + def clearParam(self): ... + def txPacket(self): ... + def rxPacket(self): ... + def txRxPacket(self): ... + def isAvailable(self, id, address, data_length): ... + def getData(self, id, address, data_length): ... + + +class GroupSyncWrite(Protocol): + def __init__(self, port, ph, start_address, data_length): + self.port: str + self.ph: PortHandler + self.start_address: int + self.data_length: int + self.is_param_changed: bool + self.param: list + self.data_dict: dict + + def makeParam(self): ... + def addParam(self, id, data): ... + def removeParam(self, id): ... + def changeParam(self, id, data): ... + def clearParam(self): ... + def txPacket(self): ... + + +class MotorsBus(abc.ABC): + """ + A MotorsBus allows to efficiently read and write to the attached motors. + It represents several motors daisy-chained together and connected through a serial port. + There are currently two implementations of this abstract class: + - DynamixelMotorsBus + - FeetechMotorsBus + + Note: This class may evolve in the future should we add support for other types of bus. + + A MotorsBus subclass instance requires a port (e.g. `FeetechMotorsBus(port="/dev/tty.usbmodem575E0031751"`)). + To find the port, you can run our utility script: + ```bash + lerobot-find-port.py + >>> Finding all available ports for the MotorsBus. + >>> ["/dev/tty.usbmodem575E0032081", "/dev/tty.usbmodem575E0031751"] + >>> Remove the usb cable from your MotorsBus and press Enter when done. + >>> The port of this MotorsBus is /dev/tty.usbmodem575E0031751. + >>> Reconnect the usb cable. + ``` + + Example of usage for 1 Feetech sts3215 motor connected to the bus: + ```python + bus = FeetechMotorsBus( + port="/dev/tty.usbmodem575E0031751", + motors={"my_motor": (1, "sts3215")}, + ) + bus.connect() + + position = bus.read("Present_Position", "my_motor", normalize=False) + + # Move from a few motor steps as an example + few_steps = 30 + bus.write("Goal_Position", "my_motor", position + few_steps, normalize=False) + + # When done, properly disconnect the port using + bus.disconnect() + ``` + """ + + apply_drive_mode: bool + available_baudrates: list[int] + default_baudrate: int + default_timeout: int + model_baudrate_table: dict[str, dict] + model_ctrl_table: dict[str, dict] + model_encoding_table: dict[str, dict] + model_number_table: dict[str, int] + model_resolution_table: dict[str, int] + normalized_data: list[str] + + def __init__( + self, + port: str, + motors: dict[str, Motor], + calibration: dict[str, MotorCalibration] | None = None, + ): + self.port = port + self.motors = motors + self.calibration = calibration if calibration else {} + + self.port_handler: PortHandler + self.packet_handler: PacketHandler + self.sync_reader: GroupSyncRead + self.sync_writer: GroupSyncWrite + self._comm_success: int + self._no_error: int + + self._id_to_model_dict = {m.id: m.model for m in self.motors.values()} + self._id_to_name_dict = {m.id: motor for motor, m in self.motors.items()} + self._model_nb_to_model_dict = {v: k for k, v in self.model_number_table.items()} + + self._validate_motors() + + def __len__(self): + return len(self.motors) + + def __repr__(self): + return ( + f"{self.__class__.__name__}(\n" + f" Port: '{self.port}',\n" + f" Motors: \n{pformat(self.motors, indent=8, sort_dicts=False)},\n" + ")',\n" + ) + + @cached_property + def _has_different_ctrl_tables(self) -> bool: + if len(self.models) < 2: + return False + + first_table = self.model_ctrl_table[self.models[0]] + return any( + DeepDiff(first_table, get_ctrl_table(self.model_ctrl_table, model)) for model in self.models[1:] + ) + + @cached_property + def models(self) -> list[str]: + return [m.model for m in self.motors.values()] + + @cached_property + def ids(self) -> list[int]: + return [m.id for m in self.motors.values()] + + def _model_nb_to_model(self, motor_nb: int) -> str: + return self._model_nb_to_model_dict[motor_nb] + + def _id_to_model(self, motor_id: int) -> str: + return self._id_to_model_dict[motor_id] + + def _id_to_name(self, motor_id: int) -> str: + return self._id_to_name_dict[motor_id] + + def _get_motor_id(self, motor: NameOrID) -> int: + if isinstance(motor, str): + return self.motors[motor].id + elif isinstance(motor, int): + return motor + else: + raise TypeError(f"'{motor}' should be int, str.") + + def _get_motor_model(self, motor: NameOrID) -> int: + if isinstance(motor, str): + return self.motors[motor].model + elif isinstance(motor, int): + return self._id_to_model_dict[motor] + else: + raise TypeError(f"'{motor}' should be int, str.") + + def _get_motors_list(self, motors: str | list[str] | None) -> list[str]: + if motors is None: + return list(self.motors) + elif isinstance(motors, str): + return [motors] + elif isinstance(motors, list): + return motors.copy() + else: + raise TypeError(motors) + + def _get_ids_values_dict(self, values: Value | dict[str, Value] | None) -> list[str]: + if isinstance(values, (int | float)): + return dict.fromkeys(self.ids, values) + elif isinstance(values, dict): + return {self.motors[motor].id: val for motor, val in values.items()} + else: + raise TypeError(f"'values' is expected to be a single value or a dict. Got {values}") + + def _validate_motors(self) -> None: + if len(self.ids) != len(set(self.ids)): + raise ValueError(f"Some motors have the same id!\n{self}") + + # Ensure ctrl table available for all models + for model in self.models: + get_ctrl_table(self.model_ctrl_table, model) + + def _is_comm_success(self, comm: int) -> bool: + return comm == self._comm_success + + def _is_error(self, error: int) -> bool: + return error != self._no_error + + def _assert_motors_exist(self) -> None: + expected_models = {m.id: self.model_number_table[m.model] for m in self.motors.values()} + + found_models = {} + for id_ in self.ids: + model_nb = self.ping(id_) + if model_nb is not None: + found_models[id_] = model_nb + + missing_ids = [id_ for id_ in self.ids if id_ not in found_models] + wrong_models = { + id_: (expected_models[id_], found_models[id_]) + for id_ in found_models + if expected_models.get(id_) != found_models[id_] + } + + if missing_ids or wrong_models: + error_lines = [f"{self.__class__.__name__} motor check failed on port '{self.port}':"] + + if missing_ids: + error_lines.append("\nMissing motor IDs:") + error_lines.extend( + f" - {id_} (expected model: {expected_models[id_]})" for id_ in missing_ids + ) + + if wrong_models: + error_lines.append("\nMotors with incorrect model numbers:") + error_lines.extend( + f" - {id_} ({self._id_to_name(id_)}): expected {expected}, found {found}" + for id_, (expected, found) in wrong_models.items() + ) + + error_lines.append("\nFull expected motor list (id: model_number):") + error_lines.append(pformat(expected_models, indent=4, sort_dicts=False)) + error_lines.append("\nFull found motor list (id: model_number):") + error_lines.append(pformat(found_models, indent=4, sort_dicts=False)) + + raise RuntimeError("\n".join(error_lines)) + + @abc.abstractmethod + def _assert_protocol_is_compatible(self, instruction_name: str) -> None: + pass + + @property + def is_connected(self) -> bool: + """bool: `True` if the underlying serial port is open.""" + return self.port_handler.is_open + + def connect(self, handshake: bool = True) -> None: + """Open the serial port and initialise communication. + + Args: + handshake (bool, optional): Pings every expected motor and performs additional + integrity checks specific to the implementation. Defaults to `True`. + + Raises: + DeviceAlreadyConnectedError: The port is already open. + ConnectionError: The underlying SDK failed to open the port or the handshake did not succeed. + """ + if self.is_connected: + raise DeviceAlreadyConnectedError( + f"{self.__class__.__name__}('{self.port}') is already connected. Do not call `{self.__class__.__name__}.connect()` twice." + ) + + self._connect(handshake) + self.set_timeout() + logger.debug(f"{self.__class__.__name__} connected.") + + def _connect(self, handshake: bool = True) -> None: + try: + if not self.port_handler.openPort(): + raise OSError(f"Failed to open port '{self.port}'.") + elif handshake: + self._handshake() + except (FileNotFoundError, OSError, serial.SerialException) as e: + raise ConnectionError( + f"\nCould not connect on port '{self.port}'. Make sure you are using the correct port." + "\nTry running `lerobot-find-port`\n" + ) from e + + @abc.abstractmethod + def _handshake(self) -> None: + pass + + def disconnect(self, disable_torque: bool = True) -> None: + """Close the serial port (optionally disabling torque first). + + Args: + disable_torque (bool, optional): If `True` (default) torque is disabled on every motor before + closing the port. This can prevent damaging motors if they are left applying resisting torque + after disconnect. + """ + if not self.is_connected: + raise DeviceNotConnectedError( + f"{self.__class__.__name__}('{self.port}') is not connected. Try running `{self.__class__.__name__}.connect()` first." + ) + + if disable_torque: + self.port_handler.clearPort() + self.port_handler.is_using = False + self.disable_torque(num_retry=5) + + self.port_handler.closePort() + logger.debug(f"{self.__class__.__name__} disconnected.") + + @classmethod + def scan_port(cls, port: str, *args, **kwargs) -> dict[int, list[int]]: + """Probe *port* at every supported baud-rate and list responding IDs. + + Args: + port (str): Serial/USB port to scan (e.g. ``"/dev/ttyUSB0"``). + *args, **kwargs: Forwarded to the subclass constructor. + + Returns: + dict[int, list[int]]: Mapping *baud-rate → list of motor IDs* + for every baud-rate that produced at least one response. + """ + bus = cls(port, {}, *args, **kwargs) + bus._connect(handshake=False) + baudrate_ids = {} + for baudrate in tqdm(bus.available_baudrates, desc="Scanning port"): + bus.set_baudrate(baudrate) + ids_models = bus.broadcast_ping() + if ids_models: + tqdm.write(f"Motors found for {baudrate=}: {pformat(ids_models, indent=4)}") + baudrate_ids[baudrate] = list(ids_models) + + bus.port_handler.closePort() + return baudrate_ids + + def setup_motor( + self, motor: str, initial_baudrate: int | None = None, initial_id: int | None = None + ) -> None: + """Assign the correct ID and baud-rate to a single motor. + + This helper temporarily switches to the motor's current settings, disables torque, sets the desired + ID, and finally programs the bus' default baud-rate. + + Args: + motor (str): Key of the motor in :pyattr:`motors`. + initial_baudrate (int | None, optional): Current baud-rate (skips scanning when provided). + Defaults to None. + initial_id (int | None, optional): Current ID (skips scanning when provided). Defaults to None. + + Raises: + RuntimeError: The motor could not be found or its model number + does not match the expected one. + ConnectionError: Communication with the motor failed. + """ + if not self.is_connected: + self._connect(handshake=False) + + if initial_baudrate is None: + initial_baudrate, initial_id = self._find_single_motor(motor) + + if initial_id is None: + _, initial_id = self._find_single_motor(motor, initial_baudrate) + + model = self.motors[motor].model + target_id = self.motors[motor].id + self.set_baudrate(initial_baudrate) + self._disable_torque(initial_id, model) + + # Set ID + addr, length = get_address(self.model_ctrl_table, model, "ID") + self._write(addr, length, initial_id, target_id) + + # Set Baudrate + addr, length = get_address(self.model_ctrl_table, model, "Baud_Rate") + baudrate_value = self.model_baudrate_table[model][self.default_baudrate] + self._write(addr, length, target_id, baudrate_value) + + self.set_baudrate(self.default_baudrate) + + @abc.abstractmethod + def _find_single_motor(self, motor: str, initial_baudrate: int | None) -> tuple[int, int]: + pass + + @abc.abstractmethod + def configure_motors(self) -> None: + """Write implementation-specific recommended settings to every motor. + + Typical changes include shortening the return delay, increasing + acceleration limits or disabling safety locks. + """ + pass + + @abc.abstractmethod + def disable_torque(self, motors: int | str | list[str] | None = None, num_retry: int = 0) -> None: + """Disable torque on selected motors. + + Disabling Torque allows to write to the motors' permanent memory area (EPROM/EEPROM). + + Args: + motors (int | str | list[str] | None, optional): Target motors. Accepts a motor name, an ID, a + list of names or `None` to affect every registered motor. Defaults to `None`. + num_retry (int, optional): Number of additional retry attempts on communication failure. + Defaults to 0. + """ + pass + + @abc.abstractmethod + def _disable_torque(self, motor: int, model: str, num_retry: int = 0) -> None: + pass + + @abc.abstractmethod + def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: + """Enable torque on selected motors. + + Args: + motor (int): Same semantics as :pymeth:`disable_torque`. Defaults to `None`. + num_retry (int, optional): Number of additional retry attempts on communication failure. + Defaults to 0. + """ + pass + + @contextmanager + def torque_disabled(self, motors: int | str | list[str] | None = None): + """Context-manager that guarantees torque is re-enabled. + + This helper is useful to temporarily disable torque when configuring motors. + + Examples: + >>> with bus.torque_disabled(): + ... # Safe operations here + ... pass + """ + self.disable_torque(motors) + try: + yield + finally: + self.enable_torque(motors) + + def set_timeout(self, timeout_ms: int | None = None): + """Change the packet timeout used by the SDK. + + Args: + timeout_ms (int | None, optional): Timeout in *milliseconds*. If `None` (default) the method falls + back to :pyattr:`default_timeout`. + """ + timeout_ms = timeout_ms if timeout_ms is not None else self.default_timeout + self.port_handler.setPacketTimeoutMillis(timeout_ms) + + def get_baudrate(self) -> int: + """Return the current baud-rate configured on the port. + + Returns: + int: Baud-rate in bits / second. + """ + return self.port_handler.getBaudRate() + + def set_baudrate(self, baudrate: int) -> None: + """Set a new UART baud-rate on the port. + + Args: + baudrate (int): Desired baud-rate in bits / second. + + Raises: + RuntimeError: The SDK failed to apply the change. + """ + present_bus_baudrate = self.port_handler.getBaudRate() + if present_bus_baudrate != baudrate: + logger.info(f"Setting bus baud rate to {baudrate}. Previously {present_bus_baudrate}.") + self.port_handler.setBaudRate(baudrate) + + if self.port_handler.getBaudRate() != baudrate: + raise RuntimeError("Failed to write bus baud rate.") + + @property + @abc.abstractmethod + def is_calibrated(self) -> bool: + """bool: ``True`` if the cached calibration matches the motors.""" + pass + + @abc.abstractmethod + def read_calibration(self) -> dict[str, MotorCalibration]: + """Read calibration parameters from the motors. + + Returns: + dict[str, MotorCalibration]: Mapping *motor name → calibration*. + """ + pass + + @abc.abstractmethod + def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None: + """Write calibration parameters to the motors and optionally cache them. + + Args: + calibration_dict (dict[str, MotorCalibration]): Calibration obtained from + :pymeth:`read_calibration` or crafted by the user. + cache (bool, optional): Save the calibration to :pyattr:`calibration`. Defaults to True. + """ + pass + + def reset_calibration(self, motors: NameOrID | list[NameOrID] | None = None) -> None: + """Restore factory calibration for the selected motors. + + Homing offset is set to ``0`` and min/max position limits are set to the full usable range. + The in-memory :pyattr:`calibration` is cleared. + + Args: + motors (NameOrID | list[NameOrID] | None, optional): Selection of motors. `None` (default) + resets every motor. + """ + if motors is None: + motors = list(self.motors) + elif isinstance(motors, (str | int)): + motors = [motors] + elif not isinstance(motors, list): + raise TypeError(motors) + + for motor in motors: + model = self._get_motor_model(motor) + max_res = self.model_resolution_table[model] - 1 + self.write("Homing_Offset", motor, 0, normalize=False) + self.write("Min_Position_Limit", motor, 0, normalize=False) + self.write("Max_Position_Limit", motor, max_res, normalize=False) + + self.calibration = {} + + def set_half_turn_homings(self, motors: NameOrID | list[NameOrID] | None = None) -> dict[NameOrID, Value]: + """Centre each motor range around its current position. + + The function computes and writes a homing offset such that the present position becomes exactly one + half-turn (e.g. `2047` on a 12-bit encoder). + + Args: + motors (NameOrID | list[NameOrID] | None, optional): Motors to adjust. Defaults to all motors (`None`). + + Returns: + dict[NameOrID, Value]: Mapping *motor → written homing offset*. + """ + if motors is None: + motors = list(self.motors) + elif isinstance(motors, (str | int)): + motors = [motors] + elif not isinstance(motors, list): + raise TypeError(motors) + + self.reset_calibration(motors) + actual_positions = self.sync_read("Present_Position", motors, normalize=False) + homing_offsets = self._get_half_turn_homings(actual_positions) + for motor, offset in homing_offsets.items(): + self.write("Homing_Offset", motor, offset) + + return homing_offsets + + @abc.abstractmethod + def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]: + pass + + def record_ranges_of_motion( + self, motors: NameOrID | list[NameOrID] | None = None, display_values: bool = True + ) -> tuple[dict[NameOrID, Value], dict[NameOrID, Value]]: + """Interactively record the min/max encoder values of each motor. + + Move the joints by hand (with torque disabled) while the method streams live positions. Press + :kbd:`Enter` to finish. + + Args: + motors (NameOrID | list[NameOrID] | None, optional): Motors to record. + Defaults to every motor (`None`). + display_values (bool, optional): When `True` (default) a live table is printed to the console. + + Returns: + tuple[dict[NameOrID, Value], dict[NameOrID, Value]]: Two dictionaries *mins* and *maxes* with the + extreme values observed for each motor. + """ + if motors is None: + motors = list(self.motors) + elif isinstance(motors, (str | int)): + motors = [motors] + elif not isinstance(motors, list): + raise TypeError(motors) + + start_positions = self.sync_read("Present_Position", motors, normalize=False) + mins = start_positions.copy() + maxes = start_positions.copy() + + user_pressed_enter = False + while not user_pressed_enter: + positions = self.sync_read("Present_Position", motors, normalize=False) + mins = {motor: min(positions[motor], min_) for motor, min_ in mins.items()} + maxes = {motor: max(positions[motor], max_) for motor, max_ in maxes.items()} + + if display_values: + print("\n-------------------------------------------") + print(f"{'NAME':<15} | {'MIN':>6} | {'POS':>6} | {'MAX':>6}") + for motor in motors: + print(f"{motor:<15} | {mins[motor]:>6} | {positions[motor]:>6} | {maxes[motor]:>6}") + + if enter_pressed(): + user_pressed_enter = True + + if display_values and not user_pressed_enter: + # Move cursor up to overwrite the previous output + move_cursor_up(len(motors) + 3) + + same_min_max = [motor for motor in motors if mins[motor] == maxes[motor]] + if same_min_max: + raise ValueError(f"Some motors have the same min and max values:\n{pformat(same_min_max)}") + + return mins, maxes + + def _normalize(self, ids_values: dict[int, int]) -> dict[int, float]: + if not self.calibration: + raise RuntimeError(f"{self} has no calibration registered.") + + normalized_values = {} + for id_, val in ids_values.items(): + motor = self._id_to_name(id_) + min_ = self.calibration[motor].range_min + max_ = self.calibration[motor].range_max + drive_mode = self.apply_drive_mode and self.calibration[motor].drive_mode + if max_ == min_: + raise ValueError(f"Invalid calibration for motor '{motor}': min and max are equal.") + + bounded_val = min(max_, max(min_, val)) + if self.motors[motor].norm_mode is MotorNormMode.RANGE_M100_100: + norm = (((bounded_val - min_) / (max_ - min_)) * 200) - 100 + normalized_values[id_] = -norm if drive_mode else norm + elif self.motors[motor].norm_mode is MotorNormMode.RANGE_0_100: + norm = ((bounded_val - min_) / (max_ - min_)) * 100 + normalized_values[id_] = 100 - norm if drive_mode else norm + elif self.motors[motor].norm_mode is MotorNormMode.DEGREES: + mid = (min_ + max_) / 2 + max_res = self.model_resolution_table[self._id_to_model(id_)] - 1 + normalized_values[id_] = (val - mid) * 360 / max_res + else: + raise NotImplementedError + + return normalized_values + + def _unnormalize(self, ids_values: dict[int, float]) -> dict[int, int]: + if not self.calibration: + raise RuntimeError(f"{self} has no calibration registered.") + + unnormalized_values = {} + for id_, val in ids_values.items(): + motor = self._id_to_name(id_) + min_ = self.calibration[motor].range_min + max_ = self.calibration[motor].range_max + drive_mode = self.apply_drive_mode and self.calibration[motor].drive_mode + if max_ == min_: + raise ValueError(f"Invalid calibration for motor '{motor}': min and max are equal.") + + if self.motors[motor].norm_mode is MotorNormMode.RANGE_M100_100: + val = -val if drive_mode else val + bounded_val = min(100.0, max(-100.0, val)) + unnormalized_values[id_] = int(((bounded_val + 100) / 200) * (max_ - min_) + min_) + elif self.motors[motor].norm_mode is MotorNormMode.RANGE_0_100: + val = 100 - val if drive_mode else val + bounded_val = min(100.0, max(0.0, val)) + unnormalized_values[id_] = int((bounded_val / 100) * (max_ - min_) + min_) + elif self.motors[motor].norm_mode is MotorNormMode.DEGREES: + mid = (min_ + max_) / 2 + max_res = self.model_resolution_table[self._id_to_model(id_)] - 1 + unnormalized_values[id_] = int((val * max_res / 360) + mid) + else: + raise NotImplementedError + + return unnormalized_values + + @abc.abstractmethod + def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + pass + + @abc.abstractmethod + def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: + pass + + def _serialize_data(self, value: int, length: int) -> list[int]: + """ + Converts an unsigned integer value into a list of byte-sized integers to be sent via a communication + protocol. Depending on the protocol, split values can be in big-endian or little-endian order. + + Supported data length for both Feetech and Dynamixel: + - 1 (for values 0 to 255) + - 2 (for values 0 to 65,535) + - 4 (for values 0 to 4,294,967,295) + """ + if value < 0: + raise ValueError(f"Negative values are not allowed: {value}") + + max_value = {1: 0xFF, 2: 0xFFFF, 4: 0xFFFFFFFF}.get(length) + if max_value is None: + raise NotImplementedError(f"Unsupported byte size: {length}. Expected [1, 2, 4].") + + if value > max_value: + raise ValueError(f"Value {value} exceeds the maximum for {length} bytes ({max_value}).") + + return self._split_into_byte_chunks(value, length) + + @abc.abstractmethod + def _split_into_byte_chunks(self, value: int, length: int) -> list[int]: + """Convert an integer into a list of byte-sized integers.""" + pass + + def ping(self, motor: NameOrID, num_retry: int = 0, raise_on_error: bool = False) -> int | None: + """Ping a single motor and return its model number. + + Args: + motor (NameOrID): Target motor (name or ID). + num_retry (int, optional): Extra attempts before giving up. Defaults to `0`. + raise_on_error (bool, optional): If `True` communication errors raise exceptions instead of + returning `None`. Defaults to `False`. + + Returns: + int | None: Motor model number or `None` on failure. + """ + id_ = self._get_motor_id(motor) + for n_try in range(1 + num_retry): + model_number, comm, error = self.packet_handler.ping(self.port_handler, id_) + if self._is_comm_success(comm): + break + logger.debug(f"ping failed for {id_=}: {n_try=} got {comm=} {error=}") + + if not self._is_comm_success(comm): + if raise_on_error: + raise ConnectionError(self.packet_handler.getTxRxResult(comm)) + else: + return + if self._is_error(error): + if raise_on_error: + raise RuntimeError(self.packet_handler.getRxPacketError(error)) + else: + return + + return model_number + + @abc.abstractmethod + def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None: + """Ping every ID on the bus using the broadcast address. + + Args: + num_retry (int, optional): Retry attempts. Defaults to `0`. + raise_on_error (bool, optional): When `True` failures raise an exception instead of returning + `None`. Defaults to `False`. + + Returns: + dict[int, int] | None: Mapping *id → model number* or `None` if the call failed. + """ + pass + + def read( + self, + data_name: str, + motor: str, + *, + normalize: bool = True, + num_retry: int = 0, + ) -> Value: + """Read a register from a motor. + + Args: + data_name (str): Control-table key (e.g. `"Present_Position"`). + motor (str): Motor name. + normalize (bool, optional): When `True` (default) scale the value to a user-friendly range as + defined by the calibration. + num_retry (int, optional): Retry attempts. Defaults to `0`. + + Returns: + Value: Raw or normalised value depending on *normalize*. + """ + if not self.is_connected: + raise DeviceNotConnectedError( + f"{self.__class__.__name__}('{self.port}') is not connected. You need to run `{self.__class__.__name__}.connect()`." + ) + + id_ = self.motors[motor].id + model = self.motors[motor].model + addr, length = get_address(self.model_ctrl_table, model, data_name) + + err_msg = f"Failed to read '{data_name}' on {id_=} after {num_retry + 1} tries." + value, _, _ = self._read(addr, length, id_, num_retry=num_retry, raise_on_error=True, err_msg=err_msg) + + id_value = self._decode_sign(data_name, {id_: value}) + + if normalize and data_name in self.normalized_data: + id_value = self._normalize(id_value) + + return id_value[id_] + + def _read( + self, + address: int, + length: int, + motor_id: int, + *, + num_retry: int = 0, + raise_on_error: bool = True, + err_msg: str = "", + ) -> tuple[int, int]: + if length == 1: + read_fn = self.packet_handler.read1ByteTxRx + elif length == 2: + read_fn = self.packet_handler.read2ByteTxRx + elif length == 4: + read_fn = self.packet_handler.read4ByteTxRx + else: + raise ValueError(length) + + for n_try in range(1 + num_retry): + value, comm, error = read_fn(self.port_handler, motor_id, address) + if self._is_comm_success(comm): + break + logger.debug( + f"Failed to read @{address=} ({length=}) on {motor_id=} ({n_try=}): " + + self.packet_handler.getTxRxResult(comm) + ) + + if not self._is_comm_success(comm) and raise_on_error: + raise ConnectionError(f"{err_msg} {self.packet_handler.getTxRxResult(comm)}") + elif self._is_error(error) and raise_on_error: + raise RuntimeError(f"{err_msg} {self.packet_handler.getRxPacketError(error)}") + + return value, comm, error + + def write( + self, data_name: str, motor: str, value: Value, *, normalize: bool = True, num_retry: int = 0 + ) -> None: + """Write a value to a single motor's register. + + Contrary to :pymeth:`sync_write`, this expects a response status packet emitted by the motor, which + provides a guarantee that the value was written to the register successfully. In consequence, it is + slower than :pymeth:`sync_write` but it is more reliable. It should typically be used when configuring + motors. + + Args: + data_name (str): Register name. + motor (str): Motor name. + value (Value): Value to write. If *normalize* is `True` the value is first converted to raw + units. + normalize (bool, optional): Enable or disable normalisation. Defaults to `True`. + num_retry (int, optional): Retry attempts. Defaults to `0`. + """ + if not self.is_connected: + raise DeviceNotConnectedError( + f"{self.__class__.__name__}('{self.port}') is not connected. You need to run `{self.__class__.__name__}.connect()`." + ) + + id_ = self.motors[motor].id + model = self.motors[motor].model + addr, length = get_address(self.model_ctrl_table, model, data_name) + + if normalize and data_name in self.normalized_data: + value = self._unnormalize({id_: value})[id_] + + value = self._encode_sign(data_name, {id_: value})[id_] + + err_msg = f"Failed to write '{data_name}' on {id_=} with '{value}' after {num_retry + 1} tries." + self._write(addr, length, id_, value, num_retry=num_retry, raise_on_error=True, err_msg=err_msg) + + def _write( + self, + addr: int, + length: int, + motor_id: int, + value: int, + *, + num_retry: int = 0, + raise_on_error: bool = True, + err_msg: str = "", + ) -> tuple[int, int]: + data = self._serialize_data(value, length) + for n_try in range(1 + num_retry): + comm, error = self.packet_handler.writeTxRx(self.port_handler, motor_id, addr, length, data) + if self._is_comm_success(comm): + break + logger.debug( + f"Failed to sync write @{addr=} ({length=}) on id={motor_id} with {value=} ({n_try=}): " + + self.packet_handler.getTxRxResult(comm) + ) + + if not self._is_comm_success(comm) and raise_on_error: + raise ConnectionError(f"{err_msg} {self.packet_handler.getTxRxResult(comm)}") + elif self._is_error(error) and raise_on_error: + raise RuntimeError(f"{err_msg} {self.packet_handler.getRxPacketError(error)}") + + return comm, error + + def sync_read( + self, + data_name: str, + motors: str | list[str] | None = None, + *, + normalize: bool = True, + num_retry: int = 0, + ) -> dict[str, Value]: + """Read the same register from several motors at once. + + Args: + data_name (str): Register name. + motors (str | list[str] | None, optional): Motors to query. `None` (default) reads every motor. + normalize (bool, optional): Normalisation flag. Defaults to `True`. + num_retry (int, optional): Retry attempts. Defaults to `0`. + + Returns: + dict[str, Value]: Mapping *motor name → value*. + """ + if not self.is_connected: + raise DeviceNotConnectedError( + f"{self.__class__.__name__}('{self.port}') is not connected. You need to run `{self.__class__.__name__}.connect()`." + ) + + self._assert_protocol_is_compatible("sync_read") + + names = self._get_motors_list(motors) + ids = [self.motors[motor].id for motor in names] + models = [self.motors[motor].model for motor in names] + + if self._has_different_ctrl_tables: + assert_same_address(self.model_ctrl_table, models, data_name) + + model = next(iter(models)) + addr, length = get_address(self.model_ctrl_table, model, data_name) + + err_msg = f"Failed to sync read '{data_name}' on {ids=} after {num_retry + 1} tries." + ids_values, _ = self._sync_read( + addr, length, ids, num_retry=num_retry, raise_on_error=True, err_msg=err_msg + ) + + ids_values = self._decode_sign(data_name, ids_values) + + if normalize and data_name in self.normalized_data: + ids_values = self._normalize(ids_values) + + return {self._id_to_name(id_): value for id_, value in ids_values.items()} + + def _sync_read( + self, + addr: int, + length: int, + motor_ids: list[int], + *, + num_retry: int = 0, + raise_on_error: bool = True, + err_msg: str = "", + ) -> tuple[dict[int, int], int]: + self._setup_sync_reader(motor_ids, addr, length) + for n_try in range(1 + num_retry): + comm = self.sync_reader.txRxPacket() + if self._is_comm_success(comm): + break + logger.debug( + f"Failed to sync read @{addr=} ({length=}) on {motor_ids=} ({n_try=}): " + + self.packet_handler.getTxRxResult(comm) + ) + + if not self._is_comm_success(comm) and raise_on_error: + raise ConnectionError(f"{err_msg} {self.packet_handler.getTxRxResult(comm)}") + + values = {id_: self.sync_reader.getData(id_, addr, length) for id_ in motor_ids} + return values, comm + + def _setup_sync_reader(self, motor_ids: list[int], addr: int, length: int) -> None: + self.sync_reader.clearParam() + self.sync_reader.start_address = addr + self.sync_reader.data_length = length + for id_ in motor_ids: + self.sync_reader.addParam(id_) + + # TODO(aliberts, pkooij): Implementing something like this could get even much faster read times if need be. + # Would have to handle the logic of checking if a packet has been sent previously though but doable. + # This could be at the cost of increase latency between the moment the data is produced by the motors and + # the moment it is used by a policy. + # def _async_read(self, motor_ids: list[int], address: int, length: int): + # if self.sync_reader.start_address != address or self.sync_reader.data_length != length or ...: + # self._setup_sync_reader(motor_ids, address, length) + # else: + # self.sync_reader.rxPacket() + # self.sync_reader.txPacket() + + # for id_ in motor_ids: + # value = self.sync_reader.getData(id_, address, length) + + def sync_write( + self, + data_name: str, + values: Value | dict[str, Value], + *, + normalize: bool = True, + num_retry: int = 0, + ) -> None: + """Write the same register on multiple motors. + + Contrary to :pymeth:`write`, this *does not* expects a response status packet emitted by the motor, which + can allow for lost packets. It is faster than :pymeth:`write` and should typically be used when + frequency matters and losing some packets is acceptable (e.g. teleoperation loops). + + Args: + data_name (str): Register name. + values (Value | dict[str, Value]): Either a single value (applied to every motor) or a mapping + *motor name → value*. + normalize (bool, optional): If `True` (default) convert values from the user range to raw units. + num_retry (int, optional): Retry attempts. Defaults to `0`. + """ + if not self.is_connected: + raise DeviceNotConnectedError( + f"{self.__class__.__name__}('{self.port}') is not connected. You need to run `{self.__class__.__name__}.connect()`." + ) + + ids_values = self._get_ids_values_dict(values) + models = [self._id_to_model(id_) for id_ in ids_values] + if self._has_different_ctrl_tables: + assert_same_address(self.model_ctrl_table, models, data_name) + + model = next(iter(models)) + addr, length = get_address(self.model_ctrl_table, model, data_name) + + if normalize and data_name in self.normalized_data: + ids_values = self._unnormalize(ids_values) + + ids_values = self._encode_sign(data_name, ids_values) + + err_msg = f"Failed to sync write '{data_name}' with {ids_values=} after {num_retry + 1} tries." + self._sync_write(addr, length, ids_values, num_retry=num_retry, raise_on_error=True, err_msg=err_msg) + + def _sync_write( + self, + addr: int, + length: int, + ids_values: dict[int, int], + num_retry: int = 0, + raise_on_error: bool = True, + err_msg: str = "", + ) -> int: + self._setup_sync_writer(ids_values, addr, length) + for n_try in range(1 + num_retry): + comm = self.sync_writer.txPacket() + if self._is_comm_success(comm): + break + logger.debug( + f"Failed to sync write @{addr=} ({length=}) with {ids_values=} ({n_try=}): " + + self.packet_handler.getTxRxResult(comm) + ) + + if not self._is_comm_success(comm) and raise_on_error: + raise ConnectionError(f"{err_msg} {self.packet_handler.getTxRxResult(comm)}") + + return comm + + def _setup_sync_writer(self, ids_values: dict[int, int], addr: int, length: int) -> None: + self.sync_writer.clearParam() + self.sync_writer.start_address = addr + self.sync_writer.data_length = length + for id_, value in ids_values.items(): + data = self._serialize_data(value, length) + self.sync_writer.addParam(id_, data) diff --git a/src/lerobot/optim/__init__.py b/src/lerobot/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e95939c9a00c5f9775524142f6aeb5cb348fb5e --- /dev/null +++ b/src/lerobot/optim/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .optimizers import OptimizerConfig as OptimizerConfig diff --git a/src/lerobot/optim/factory.py b/src/lerobot/optim/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..323f26bdaca9ac91230524d918b917e2771e85c5 --- /dev/null +++ b/src/lerobot/optim/factory.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LRScheduler + +from lerobot.configs.train import TrainPipelineConfig +from lerobot.policies.pretrained import PreTrainedPolicy + + +def make_optimizer_and_scheduler( + cfg: TrainPipelineConfig, policy: PreTrainedPolicy +) -> tuple[Optimizer, LRScheduler | None]: + """Generates the optimizer and scheduler based on configs. + + Args: + cfg (TrainPipelineConfig): The training config that contains optimizer and scheduler configs + policy (PreTrainedPolicy): The policy config from which parameters and presets must be taken from. + + Returns: + tuple[Optimizer, LRScheduler | None]: The couple (Optimizer, Scheduler). Scheduler can be `None`. + """ + params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters() + optimizer = cfg.optimizer.build(params) + lr_scheduler = cfg.scheduler.build(optimizer, cfg.steps) if cfg.scheduler is not None else None + return optimizer, lr_scheduler diff --git a/src/lerobot/optim/optimizers.py b/src/lerobot/optim/optimizers.py new file mode 100644 index 0000000000000000000000000000000000000000..4105689e9e45e6ac96c24b9231933408063abf75 --- /dev/null +++ b/src/lerobot/optim/optimizers.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import abc +from dataclasses import asdict, dataclass, field +from pathlib import Path +from typing import Any + +import draccus +import torch +from safetensors.torch import load_file, save_file + +from lerobot.datasets.utils import flatten_dict, unflatten_dict, write_json +from lerobot.utils.constants import ( + OPTIMIZER_PARAM_GROUPS, + OPTIMIZER_STATE, +) +from lerobot.utils.io_utils import deserialize_json_into_object + + +@dataclass +class OptimizerConfig(draccus.ChoiceRegistry, abc.ABC): + lr: float + weight_decay: float + grad_clip_norm: float + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + @classmethod + def default_choice_name(cls) -> str | None: + return "adam" + + @abc.abstractmethod + def build(self) -> torch.optim.Optimizer | dict[str, torch.optim.Optimizer]: + """ + Build the optimizer. It can be a single optimizer or a dictionary of optimizers. + NOTE: Multiple optimizers are useful when you have different models to optimize. + For example, you can have one optimizer for the policy and another one for the value function + in reinforcement learning settings. + + Returns: + The optimizer or a dictionary of optimizers. + """ + raise NotImplementedError + + +@OptimizerConfig.register_subclass("adam") +@dataclass +class AdamConfig(OptimizerConfig): + lr: float = 1e-3 + betas: tuple[float, float] = (0.9, 0.999) + eps: float = 1e-8 + weight_decay: float = 0.0 + grad_clip_norm: float = 10.0 + + def build(self, params: dict) -> torch.optim.Optimizer: + kwargs = asdict(self) + kwargs.pop("grad_clip_norm") + return torch.optim.Adam(params, **kwargs) + + +@OptimizerConfig.register_subclass("adamw") +@dataclass +class AdamWConfig(OptimizerConfig): + lr: float = 1e-3 + betas: tuple[float, float] = (0.9, 0.999) + eps: float = 1e-8 + weight_decay: float = 1e-2 + grad_clip_norm: float = 10.0 + + def build(self, params: dict) -> torch.optim.Optimizer: + kwargs = asdict(self) + kwargs.pop("grad_clip_norm") + return torch.optim.AdamW(params, **kwargs) + + +@OptimizerConfig.register_subclass("sgd") +@dataclass +class SGDConfig(OptimizerConfig): + lr: float = 1e-3 + momentum: float = 0.0 + dampening: float = 0.0 + nesterov: bool = False + weight_decay: float = 0.0 + grad_clip_norm: float = 10.0 + + def build(self, params: dict) -> torch.optim.Optimizer: + kwargs = asdict(self) + kwargs.pop("grad_clip_norm") + return torch.optim.SGD(params, **kwargs) + + +@OptimizerConfig.register_subclass("multi_adam") +@dataclass +class MultiAdamConfig(OptimizerConfig): + """Configuration for multiple Adam optimizers with different parameter groups. + + This creates a dictionary of Adam optimizers, each with its own hyperparameters. + + Args: + lr: Default learning rate (used if not specified for a group) + weight_decay: Default weight decay (used if not specified for a group) + optimizer_groups: Dictionary mapping parameter group names to their hyperparameters + grad_clip_norm: Gradient clipping norm + """ + + lr: float = 1e-3 + weight_decay: float = 0.0 + grad_clip_norm: float = 10.0 + optimizer_groups: dict[str, dict[str, Any]] = field(default_factory=dict) + + def build(self, params_dict: dict[str, list]) -> dict[str, torch.optim.Optimizer]: + """Build multiple Adam optimizers. + + Args: + params_dict: Dictionary mapping parameter group names to lists of parameters + The keys should match the keys in optimizer_groups + + Returns: + Dictionary mapping parameter group names to their optimizers + """ + optimizers = {} + + for name, params in params_dict.items(): + # Get group-specific hyperparameters or use defaults + group_config = self.optimizer_groups.get(name, {}) + + # Create optimizer with merged parameters (defaults + group-specific) + optimizer_kwargs = { + "lr": group_config.get("lr", self.lr), + "betas": group_config.get("betas", (0.9, 0.999)), + "eps": group_config.get("eps", 1e-5), + "weight_decay": group_config.get("weight_decay", self.weight_decay), + } + + optimizers[name] = torch.optim.Adam(params, **optimizer_kwargs) + + return optimizers + + +def save_optimizer_state( + optimizer: torch.optim.Optimizer | dict[str, torch.optim.Optimizer], save_dir: Path +) -> None: + """Save optimizer state to disk. + + Args: + optimizer: Either a single optimizer or a dictionary of optimizers. + save_dir: Directory to save the optimizer state. + """ + if isinstance(optimizer, dict): + # Handle dictionary of optimizers + for name, opt in optimizer.items(): + optimizer_dir = save_dir / name + optimizer_dir.mkdir(exist_ok=True, parents=True) + _save_single_optimizer_state(opt, optimizer_dir) + else: + # Handle single optimizer + _save_single_optimizer_state(optimizer, save_dir) + + +def _save_single_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> None: + """Save a single optimizer's state to disk.""" + state = optimizer.state_dict() + param_groups = state.pop("param_groups") + flat_state = flatten_dict(state) + save_file(flat_state, save_dir / OPTIMIZER_STATE) + write_json(param_groups, save_dir / OPTIMIZER_PARAM_GROUPS) + + +def load_optimizer_state( + optimizer: torch.optim.Optimizer | dict[str, torch.optim.Optimizer], save_dir: Path +) -> torch.optim.Optimizer | dict[str, torch.optim.Optimizer]: + """Load optimizer state from disk. + + Args: + optimizer: Either a single optimizer or a dictionary of optimizers. + save_dir: Directory to load the optimizer state from. + + Returns: + The updated optimizer(s) with loaded state. + """ + if isinstance(optimizer, dict): + # Handle dictionary of optimizers + loaded_optimizers = {} + for name, opt in optimizer.items(): + optimizer_dir = save_dir / name + if optimizer_dir.exists(): + loaded_optimizers[name] = _load_single_optimizer_state(opt, optimizer_dir) + else: + loaded_optimizers[name] = opt + return loaded_optimizers + else: + # Handle single optimizer + return _load_single_optimizer_state(optimizer, save_dir) + + +def _load_single_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> torch.optim.Optimizer: + """Load a single optimizer's state from disk.""" + current_state_dict = optimizer.state_dict() + flat_state = load_file(save_dir / OPTIMIZER_STATE) + state = unflatten_dict(flat_state) + + # Handle case where 'state' key might not exist (for newly created optimizers) + if "state" in state: + loaded_state_dict = {"state": {int(k): v for k, v in state["state"].items()}} + else: + loaded_state_dict = {"state": {}} + + if "param_groups" in current_state_dict: + param_groups = deserialize_json_into_object( + save_dir / OPTIMIZER_PARAM_GROUPS, current_state_dict["param_groups"] + ) + loaded_state_dict["param_groups"] = param_groups + + optimizer.load_state_dict(loaded_state_dict) + return optimizer diff --git a/src/lerobot/optim/schedulers.py b/src/lerobot/optim/schedulers.py new file mode 100644 index 0000000000000000000000000000000000000000..54fa027e9c9c0b08c8a0e79055e1e2b9f1b7351f --- /dev/null +++ b/src/lerobot/optim/schedulers.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import abc +import logging +import math +from dataclasses import asdict, dataclass +from pathlib import Path + +import draccus +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LambdaLR, LRScheduler + +from lerobot.datasets.utils import write_json +from lerobot.utils.constants import SCHEDULER_STATE +from lerobot.utils.io_utils import deserialize_json_into_object + + +@dataclass +class LRSchedulerConfig(draccus.ChoiceRegistry, abc.ABC): + num_warmup_steps: int + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + @abc.abstractmethod + def build(self, optimizer: Optimizer, num_training_steps: int) -> LRScheduler | None: + raise NotImplementedError + + +@LRSchedulerConfig.register_subclass("diffuser") +@dataclass +class DiffuserSchedulerConfig(LRSchedulerConfig): + name: str = "cosine" + num_warmup_steps: int | None = None + + def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: + from diffusers.optimization import get_scheduler + + kwargs = {**asdict(self), "num_training_steps": num_training_steps, "optimizer": optimizer} + return get_scheduler(**kwargs) + + +@LRSchedulerConfig.register_subclass("vqbet") +@dataclass +class VQBeTSchedulerConfig(LRSchedulerConfig): + num_warmup_steps: int + num_vqvae_training_steps: int + num_cycles: float = 0.5 + + def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: + def lr_lambda(current_step): + if current_step < self.num_vqvae_training_steps: + return float(1) + else: + adjusted_step = current_step - self.num_vqvae_training_steps + if adjusted_step < self.num_warmup_steps: + return float(adjusted_step) / float(max(1, self.num_warmup_steps)) + progress = float(adjusted_step - self.num_warmup_steps) / float( + max(1, num_training_steps - self.num_warmup_steps) + ) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(self.num_cycles) * 2.0 * progress))) + + return LambdaLR(optimizer, lr_lambda, -1) + + +@LRSchedulerConfig.register_subclass("cosine_decay_with_warmup") +@dataclass +class CosineDecayWithWarmupSchedulerConfig(LRSchedulerConfig): + """Used by Physical Intelligence to train Pi0. + + Automatically scales warmup and decay steps if num_training_steps < num_decay_steps. + This ensures the learning rate schedule completes properly even with shorter training runs. + """ + + num_warmup_steps: int + num_decay_steps: int + peak_lr: float + decay_lr: float + + def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: + # Auto-scale scheduler parameters if training steps are shorter than configured decay steps + actual_warmup_steps = self.num_warmup_steps + actual_decay_steps = self.num_decay_steps + + if num_training_steps < self.num_decay_steps: + # Calculate scaling factor to fit the schedule into the available training steps + scale_factor = num_training_steps / self.num_decay_steps + actual_warmup_steps = int(self.num_warmup_steps * scale_factor) + actual_decay_steps = num_training_steps + + logging.info( + f"Auto-scaling LR scheduler: " + f"num_training_steps ({num_training_steps}) < num_decay_steps ({self.num_decay_steps}). " + f"Scaling warmup: {self.num_warmup_steps} → {actual_warmup_steps}, " + f"decay: {self.num_decay_steps} → {actual_decay_steps} " + f"(scale factor: {scale_factor:.3f})" + ) + + def lr_lambda(current_step): + def linear_warmup_schedule(current_step): + if current_step <= 0: + return 1 / (actual_warmup_steps + 1) + frac = 1 - current_step / actual_warmup_steps + return (1 / (actual_warmup_steps + 1) - 1) * frac + 1 + + def cosine_decay_schedule(current_step): + step = min(current_step, actual_decay_steps) + cosine_decay = 0.5 * (1 + math.cos(math.pi * step / actual_decay_steps)) + alpha = self.decay_lr / self.peak_lr + decayed = (1 - alpha) * cosine_decay + alpha + return decayed + + if current_step < actual_warmup_steps: + return linear_warmup_schedule(current_step) + + return cosine_decay_schedule(current_step) + + return LambdaLR(optimizer, lr_lambda, -1) + + +def save_scheduler_state(scheduler: LRScheduler, save_dir: Path) -> None: + state_dict = scheduler.state_dict() + write_json(state_dict, save_dir / SCHEDULER_STATE) + + +def load_scheduler_state(scheduler: LRScheduler, save_dir: Path) -> LRScheduler: + state_dict = deserialize_json_into_object(save_dir / SCHEDULER_STATE, scheduler.state_dict()) + scheduler.load_state_dict(state_dict) + return scheduler diff --git a/src/lerobot/policies/__init__.py b/src/lerobot/policies/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d333d3ba39f1ec43916af9c6c3550c63be085593 --- /dev/null +++ b/src/lerobot/policies/__init__.py @@ -0,0 +1,34 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .act.configuration_act import ACTConfig as ACTConfig +from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig +from .groot.configuration_groot import GrootConfig as GrootConfig +from .pi0.configuration_pi0 import PI0Config as PI0Config +from .pi05.configuration_pi05 import PI05Config as PI05Config +from .smolvla.configuration_smolvla import SmolVLAConfig as SmolVLAConfig +from .smolvla.processor_smolvla import SmolVLANewLineProcessor +from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig +from .vqbet.configuration_vqbet import VQBeTConfig as VQBeTConfig + +__all__ = [ + "ACTConfig", + "DiffusionConfig", + "PI0Config", + "PI05Config", + "SmolVLAConfig", + "TDMPCConfig", + "VQBeTConfig", + "GrootConfig", +] diff --git a/src/lerobot/policies/act/README.md b/src/lerobot/policies/act/README.md new file mode 100644 index 0000000000000000000000000000000000000000..04602009852778a28be44647b6e7ba445dae3a95 --- /dev/null +++ b/src/lerobot/policies/act/README.md @@ -0,0 +1 @@ +../../../../docs/source/policy_act_README.md \ No newline at end of file diff --git a/src/lerobot/policies/act/configuration_act.py b/src/lerobot/policies/act/configuration_act.py new file mode 100644 index 0000000000000000000000000000000000000000..5c6fdf4275d6a5d40a7eebe443c8a756d3f0fc21 --- /dev/null +++ b/src/lerobot/policies/act/configuration_act.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python + +# Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass, field + +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import NormalizationMode +from lerobot.optim.optimizers import AdamWConfig + + +@PreTrainedConfig.register_subclass("act") +@dataclass +class ACTConfig(PreTrainedConfig): + """Configuration class for the Action Chunking Transformers policy. + + Defaults are configured for training on bimanual Aloha tasks like "insertion" or "transfer". + + The parameters you will most likely need to change are the ones which depend on the environment / sensors. + Those are: `input_shapes` and 'output_shapes`. + + Notes on the inputs and outputs: + - Either: + - At least one key starting with "observation.image is required as an input. + AND/OR + - The key "observation.environment_state" is required as input. + - If there are multiple keys beginning with "observation.images." they are treated as multiple camera + views. Right now we only support all images having the same shape. + - May optionally work without an "observation.state" key for the proprioceptive robot state. + - "action" is required as an output key. + + Args: + n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the + current step and additional steps going back). + chunk_size: The size of the action prediction "chunks" in units of environment steps. + n_action_steps: The number of action steps to run in the environment for one invocation of the policy. + This should be no greater than the chunk size. For example, if the chunk size size 100, you may + set this to 50. This would mean that the model predicts 100 steps worth of actions, runs 50 in the + environment, and throws the other 50 out. + input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents + the input data name, and the value is a list indicating the dimensions of the corresponding data. + For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96], + indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't + include batch dimension or temporal dimension. + output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents + the output data name, and the value is a list indicating the dimensions of the corresponding data. + For example, "action" refers to an output shape of [14], indicating 14-dimensional actions. + Importantly, `output_shapes` doesn't include batch dimension or temporal dimension. + input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"), + and the value specifies the normalization mode to apply. The two available modes are "mean_std" + which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a + [-1, 1] range. + output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the + original scale. Note that this is also used for normalizing the training targets. + vision_backbone: Name of the torchvision resnet backbone to use for encoding images. + pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone. + `None` means no pretrained weights. + replace_final_stride_with_dilation: Whether to replace the ResNet's final 2x2 stride with a dilated + convolution. + pre_norm: Whether to use "pre-norm" in the transformer blocks. + dim_model: The transformer blocks' main hidden dimension. + n_heads: The number of heads to use in the transformer blocks' multi-head attention. + dim_feedforward: The dimension to expand the transformer's hidden dimension to in the feed-forward + layers. + feedforward_activation: The activation to use in the transformer block's feed-forward layers. + n_encoder_layers: The number of transformer layers to use for the transformer encoder. + n_decoder_layers: The number of transformer layers to use for the transformer decoder. + use_vae: Whether to use a variational objective during training. This introduces another transformer + which is used as the VAE's encoder (not to be confused with the transformer encoder - see + documentation in the policy class). + latent_dim: The VAE's latent dimension. + n_vae_encoder_layers: The number of transformer layers to use for the VAE's encoder. + temporal_ensemble_coeff: Coefficient for the exponential weighting scheme to apply for temporal + ensembling. Defaults to None which means temporal ensembling is not used. `n_action_steps` must be + 1 when using this feature, as inference needs to happen at every step to form an ensemble. For + more information on how ensembling works, please see `ACTTemporalEnsembler`. + dropout: Dropout to use in the transformer layers (see code for details). + kl_weight: The weight to use for the KL-divergence component of the loss if the variational objective + is enabled. Loss is then calculated as: `reconstruction_loss + kl_weight * kld_loss`. + """ + + # Input / output structure. + n_obs_steps: int = 1 + chunk_size: int = 100 + n_action_steps: int = 100 + + normalization_mapping: dict[str, NormalizationMode] = field( + default_factory=lambda: { + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MEAN_STD, + "ACTION": NormalizationMode.MEAN_STD, + } + ) + + # Architecture. + # Vision backbone. + vision_backbone: str = "resnet18" + pretrained_backbone_weights: str | None = "ResNet18_Weights.IMAGENET1K_V1" + replace_final_stride_with_dilation: int = False + # Transformer layers. + pre_norm: bool = False + dim_model: int = 512 + n_heads: int = 8 + dim_feedforward: int = 3200 + feedforward_activation: str = "relu" + n_encoder_layers: int = 4 + # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code + # that means only the first layer is used. Here we match the original implementation by setting this to 1. + # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. + n_decoder_layers: int = 1 + # VAE. + use_vae: bool = True + latent_dim: int = 32 + n_vae_encoder_layers: int = 4 + + # Inference. + # Note: the value used in ACT when temporal ensembling is enabled is 0.01. + temporal_ensemble_coeff: float | None = None + + # Training and loss computation. + dropout: float = 0.1 + kl_weight: float = 10.0 + + # Training preset + optimizer_lr: float = 1e-5 + optimizer_weight_decay: float = 1e-4 + optimizer_lr_backbone: float = 1e-5 + + def __post_init__(self): + super().__post_init__() + + """Input validation (not exhaustive).""" + if not self.vision_backbone.startswith("resnet"): + raise ValueError( + f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}." + ) + if self.temporal_ensemble_coeff is not None and self.n_action_steps > 1: + raise NotImplementedError( + "`n_action_steps` must be 1 when using temporal ensembling. This is " + "because the policy needs to be queried every step to compute the ensembled action." + ) + if self.n_action_steps > self.chunk_size: + raise ValueError( + f"The chunk size is the upper bound for the number of action steps per model invocation. Got " + f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`." + ) + if self.n_obs_steps != 1: + raise ValueError( + f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`" + ) + + def get_optimizer_preset(self) -> AdamWConfig: + return AdamWConfig( + lr=self.optimizer_lr, + weight_decay=self.optimizer_weight_decay, + ) + + def get_scheduler_preset(self) -> None: + return None + + def validate_features(self) -> None: + if not self.image_features and not self.env_state_feature: + raise ValueError("You must provide at least one image or the environment state among the inputs.") + + @property + def observation_delta_indices(self) -> None: + return None + + @property + def action_delta_indices(self) -> list: + return list(range(self.chunk_size)) + + @property + def reward_delta_indices(self) -> None: + return None diff --git a/src/lerobot/policies/act/modeling_act.py b/src/lerobot/policies/act/modeling_act.py new file mode 100644 index 0000000000000000000000000000000000000000..461479a1b6213bb4f13bb68d34ff9944d6537006 --- /dev/null +++ b/src/lerobot/policies/act/modeling_act.py @@ -0,0 +1,745 @@ +#!/usr/bin/env python + +# Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Action Chunking Transformer Policy + +As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://huggingface.co/papers/2304.13705). +The majority of changes here involve removing unused code, unifying naming, and adding helpful comments. +""" + +import math +from collections import deque +from collections.abc import Callable +from itertools import chain + +import einops +import numpy as np +import torch +import torch.nn.functional as F # noqa: N812 +import torchvision +from torch import Tensor, nn +from torchvision.models._utils import IntermediateLayerGetter +from torchvision.ops.misc import FrozenBatchNorm2d + +from lerobot.policies.act.configuration_act import ACTConfig +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.utils.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE + + +class ACTPolicy(PreTrainedPolicy): + """ + Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost + Hardware (paper: https://huggingface.co/papers/2304.13705, code: https://github.com/tonyzhaozh/act) + """ + + config_class = ACTConfig + name = "act" + + def __init__( + self, + config: ACTConfig, + ): + """ + Args: + config: Policy configuration class instance or None, in which case the default instantiation of + the configuration class is used. + """ + super().__init__(config) + config.validate_features() + self.config = config + + self.model = ACT(config) + + if config.temporal_ensemble_coeff is not None: + self.temporal_ensembler = ACTTemporalEnsembler(config.temporal_ensemble_coeff, config.chunk_size) + + self.reset() + + def get_optim_params(self) -> dict: + # TODO(aliberts, rcadene): As of now, lr_backbone == lr + # Should we remove this and just `return self.parameters()`? + return [ + { + "params": [ + p + for n, p in self.named_parameters() + if not n.startswith("model.backbone") and p.requires_grad + ] + }, + { + "params": [ + p + for n, p in self.named_parameters() + if n.startswith("model.backbone") and p.requires_grad + ], + "lr": self.config.optimizer_lr_backbone, + }, + ] + + def reset(self): + """This should be called whenever the environment is reset.""" + if self.config.temporal_ensemble_coeff is not None: + self.temporal_ensembler.reset() + else: + self._action_queue = deque([], maxlen=self.config.n_action_steps) + + @torch.no_grad() + def select_action(self, batch: dict[str, Tensor]) -> Tensor: + """Select a single action given environment observations. + + This method wraps `select_actions` in order to return one action at a time for execution in the + environment. It works by managing the actions in a queue and only calling `select_actions` when the + queue is empty. + """ + self.eval() # keeping the policy in eval mode as it could be set to train mode while queue is consumed + + if self.config.temporal_ensemble_coeff is not None: + actions = self.predict_action_chunk(batch) + action = self.temporal_ensembler.update(actions) + return action + + # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by + # querying the policy. + if len(self._action_queue) == 0: + actions = self.predict_action_chunk(batch)[:, : self.config.n_action_steps] + + # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue + # effectively has shape (n_action_steps, batch_size, *), hence the transpose. + self._action_queue.extend(actions.transpose(0, 1)) + return self._action_queue.popleft() + + @torch.no_grad() + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Predict a chunk of actions given environment observations.""" + self.eval() + + if self.config.image_features: + batch = dict(batch) # shallow copy so that adding a key doesn't modify the original + batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features] + + actions = self.model(batch)[0] + return actions + + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: + """Run the batch through the model and compute the loss for training or validation.""" + if self.config.image_features: + batch = dict(batch) # shallow copy so that adding a key doesn't modify the original + batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features] + + actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch) + + l1_loss = ( + F.l1_loss(batch[ACTION], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1) + ).mean() + + loss_dict = {"l1_loss": l1_loss.item()} + if self.config.use_vae: + # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for + # each dimension independently, we sum over the latent dimension to get the total + # KL-divergence per batch element, then take the mean over the batch. + # (See App. B of https://huggingface.co/papers/1312.6114 for more details). + mean_kld = ( + (-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - (log_sigma_x2_hat).exp())).sum(-1).mean() + ) + loss_dict["kld_loss"] = mean_kld.item() + loss = l1_loss + mean_kld * self.config.kl_weight + else: + loss = l1_loss + + return loss, loss_dict + + +class ACTTemporalEnsembler: + def __init__(self, temporal_ensemble_coeff: float, chunk_size: int) -> None: + """Temporal ensembling as described in Algorithm 2 of https://huggingface.co/papers/2304.13705. + + The weights are calculated as wᵢ = exp(-temporal_ensemble_coeff * i) where w₀ is the oldest action. + They are then normalized to sum to 1 by dividing by Σwᵢ. Here's some intuition around how the + coefficient works: + - Setting it to 0 uniformly weighs all actions. + - Setting it positive gives more weight to older actions. + - Setting it negative gives more weight to newer actions. + NOTE: The default value for `temporal_ensemble_coeff` used by the original ACT work is 0.01. This + results in older actions being weighed more highly than newer actions (the experiments documented in + https://github.com/huggingface/lerobot/pull/319 hint at why highly weighing new actions might be + detrimental: doing so aggressively may diminish the benefits of action chunking). + + Here we use an online method for computing the average rather than caching a history of actions in + order to compute the average offline. For a simple 1D sequence it looks something like: + + ``` + import torch + + seq = torch.linspace(8, 8.5, 100) + print(seq) + + m = 0.01 + exp_weights = torch.exp(-m * torch.arange(len(seq))) + print(exp_weights) + + # Calculate offline + avg = (exp_weights * seq).sum() / exp_weights.sum() + print("offline", avg) + + # Calculate online + for i, item in enumerate(seq): + if i == 0: + avg = item + continue + avg *= exp_weights[:i].sum() + avg += item * exp_weights[i] + avg /= exp_weights[: i + 1].sum() + print("online", avg) + ``` + """ + self.chunk_size = chunk_size + self.ensemble_weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size)) + self.ensemble_weights_cumsum = torch.cumsum(self.ensemble_weights, dim=0) + self.reset() + + def reset(self): + """Resets the online computation variables.""" + self.ensembled_actions = None + # (chunk_size,) count of how many actions are in the ensemble for each time step in the sequence. + self.ensembled_actions_count = None + + def update(self, actions: Tensor) -> Tensor: + """ + Takes a (batch, chunk_size, action_dim) sequence of actions, update the temporal ensemble for all + time steps, and pop/return the next batch of actions in the sequence. + """ + self.ensemble_weights = self.ensemble_weights.to(device=actions.device) + self.ensemble_weights_cumsum = self.ensemble_weights_cumsum.to(device=actions.device) + if self.ensembled_actions is None: + # Initializes `self._ensembled_action` to the sequence of actions predicted during the first + # time step of the episode. + self.ensembled_actions = actions.clone() + # Note: The last dimension is unsqueeze to make sure we can broadcast properly for tensor + # operations later. + self.ensembled_actions_count = torch.ones( + (self.chunk_size, 1), dtype=torch.long, device=self.ensembled_actions.device + ) + else: + # self.ensembled_actions will have shape (batch_size, chunk_size - 1, action_dim). Compute + # the online update for those entries. + self.ensembled_actions *= self.ensemble_weights_cumsum[self.ensembled_actions_count - 1] + self.ensembled_actions += actions[:, :-1] * self.ensemble_weights[self.ensembled_actions_count] + self.ensembled_actions /= self.ensemble_weights_cumsum[self.ensembled_actions_count] + self.ensembled_actions_count = torch.clamp(self.ensembled_actions_count + 1, max=self.chunk_size) + # The last action, which has no prior online average, needs to get concatenated onto the end. + self.ensembled_actions = torch.cat([self.ensembled_actions, actions[:, -1:]], dim=1) + self.ensembled_actions_count = torch.cat( + [self.ensembled_actions_count, torch.ones_like(self.ensembled_actions_count[-1:])] + ) + # "Consume" the first action. + action, self.ensembled_actions, self.ensembled_actions_count = ( + self.ensembled_actions[:, 0], + self.ensembled_actions[:, 1:], + self.ensembled_actions_count[1:], + ) + return action + + +class ACT(nn.Module): + """Action Chunking Transformer: The underlying neural network for ACTPolicy. + + Note: In this code we use the terms `vae_encoder`, 'encoder', `decoder`. The meanings are as follows. + - The `vae_encoder` is, as per the literature around variational auto-encoders (VAE), the part of the + model that encodes the target data (a sequence of actions), and the condition (the robot + joint-space). + - A transformer with an `encoder` (not the VAE encoder) and `decoder` (not the VAE decoder) with + cross-attention is used as the VAE decoder. For these terms, we drop the `vae_` prefix because we + have an option to train this model without the variational objective (in which case we drop the + `vae_encoder` altogether, and nothing about this model has anything to do with a VAE). + + Transformer + Used alone for inference + (acts as VAE decoder + during training) + ┌───────────────────────┐ + │ Outputs │ + │ ▲ │ + │ ┌─────►┌───────┐ │ + ┌──────┐ │ │ │Transf.│ │ + │ │ │ ├─────►│decoder│ │ + ┌────┴────┐ │ │ │ │ │ │ + │ │ │ │ ┌───┴───┬─►│ │ │ + │ VAE │ │ │ │ │ └───────┘ │ + │ encoder │ │ │ │Transf.│ │ + │ │ │ │ │encoder│ │ + └───▲─────┘ │ │ │ │ │ + │ │ │ └▲──▲─▲─┘ │ + │ │ │ │ │ │ │ + inputs └─────┼──┘ │ image emb. │ + │ state emb. │ + └───────────────────────┘ + """ + + def __init__(self, config: ACTConfig): + # BERT style VAE encoder with input tokens [cls, robot_state, *action_sequence]. + # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]). + super().__init__() + self.config = config + + if self.config.use_vae: + self.vae_encoder = ACTEncoder(config, is_vae_encoder=True) + self.vae_encoder_cls_embed = nn.Embedding(1, config.dim_model) + # Projection layer for joint-space configuration to hidden dimension. + if self.config.robot_state_feature: + self.vae_encoder_robot_state_input_proj = nn.Linear( + self.config.robot_state_feature.shape[0], config.dim_model + ) + # Projection layer for action (joint-space target) to hidden dimension. + self.vae_encoder_action_input_proj = nn.Linear( + self.config.action_feature.shape[0], + config.dim_model, + ) + # Projection layer from the VAE encoder's output to the latent distribution's parameter space. + self.vae_encoder_latent_output_proj = nn.Linear(config.dim_model, config.latent_dim * 2) + # Fixed sinusoidal positional embedding for the input to the VAE encoder. Unsqueeze for batch + # dimension. + num_input_token_encoder = 1 + config.chunk_size + if self.config.robot_state_feature: + num_input_token_encoder += 1 + self.register_buffer( + "vae_encoder_pos_enc", + create_sinusoidal_pos_embedding(num_input_token_encoder, config.dim_model).unsqueeze(0), + ) + + # Backbone for image feature extraction. + if self.config.image_features: + backbone_model = getattr(torchvision.models, config.vision_backbone)( + replace_stride_with_dilation=[False, False, config.replace_final_stride_with_dilation], + weights=config.pretrained_backbone_weights, + norm_layer=FrozenBatchNorm2d, + ) + # Note: The assumption here is that we are using a ResNet model (and hence layer4 is the final + # feature map). + # Note: The forward method of this returns a dict: {"feature_map": output}. + self.backbone = IntermediateLayerGetter(backbone_model, return_layers={"layer4": "feature_map"}) + + # Transformer (acts as VAE decoder when training with the variational objective). + self.encoder = ACTEncoder(config) + self.decoder = ACTDecoder(config) + + # Transformer encoder input projections. The tokens will be structured like + # [latent, (robot_state), (env_state), (image_feature_map_pixels)]. + if self.config.robot_state_feature: + self.encoder_robot_state_input_proj = nn.Linear( + self.config.robot_state_feature.shape[0], config.dim_model + ) + if self.config.env_state_feature: + self.encoder_env_state_input_proj = nn.Linear( + self.config.env_state_feature.shape[0], config.dim_model + ) + self.encoder_latent_input_proj = nn.Linear(config.latent_dim, config.dim_model) + if self.config.image_features: + self.encoder_img_feat_input_proj = nn.Conv2d( + backbone_model.fc.in_features, config.dim_model, kernel_size=1 + ) + # Transformer encoder positional embeddings. + n_1d_tokens = 1 # for the latent + if self.config.robot_state_feature: + n_1d_tokens += 1 + if self.config.env_state_feature: + n_1d_tokens += 1 + self.encoder_1d_feature_pos_embed = nn.Embedding(n_1d_tokens, config.dim_model) + if self.config.image_features: + self.encoder_cam_feat_pos_embed = ACTSinusoidalPositionEmbedding2d(config.dim_model // 2) + + # Transformer decoder. + # Learnable positional embedding for the transformer's decoder (in the style of DETR object queries). + self.decoder_pos_embed = nn.Embedding(config.chunk_size, config.dim_model) + + # Final action regression head on the output of the transformer's decoder. + self.action_head = nn.Linear(config.dim_model, self.config.action_feature.shape[0]) + + self._reset_parameters() + + def _reset_parameters(self): + """Xavier-uniform initialization of the transformer parameters as in the original code.""" + for p in chain(self.encoder.parameters(), self.decoder.parameters()): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tensor] | tuple[None, None]]: + """A forward pass through the Action Chunking Transformer (with optional VAE encoder). + + `batch` should have the following structure: + { + [robot_state_feature] (optional): (B, state_dim) batch of robot states. + + [image_features]: (B, n_cameras, C, H, W) batch of images. + AND/OR + [env_state_feature]: (B, env_dim) batch of environment states. + + [action_feature] (optional, only if training with VAE): (B, chunk_size, action dim) batch of actions. + } + + Returns: + (B, chunk_size, action_dim) batch of action sequences + Tuple containing the latent PDF's parameters (mean, log(σ²)) both as (B, L) tensors where L is the + latent dimension. + """ + if self.config.use_vae and self.training: + assert ACTION in batch, ( + "actions must be provided when using the variational objective in training mode." + ) + + batch_size = batch[OBS_IMAGES][0].shape[0] if OBS_IMAGES in batch else batch[OBS_ENV_STATE].shape[0] + + # Prepare the latent for input to the transformer encoder. + if self.config.use_vae and ACTION in batch and self.training: + # Prepare the input to the VAE encoder: [cls, *joint_space_configuration, *action_sequence]. + cls_embed = einops.repeat( + self.vae_encoder_cls_embed.weight, "1 d -> b 1 d", b=batch_size + ) # (B, 1, D) + if self.config.robot_state_feature: + robot_state_embed = self.vae_encoder_robot_state_input_proj(batch[OBS_STATE]) + robot_state_embed = robot_state_embed.unsqueeze(1) # (B, 1, D) + action_embed = self.vae_encoder_action_input_proj(batch[ACTION]) # (B, S, D) + + if self.config.robot_state_feature: + vae_encoder_input = [cls_embed, robot_state_embed, action_embed] # (B, S+2, D) + else: + vae_encoder_input = [cls_embed, action_embed] + vae_encoder_input = torch.cat(vae_encoder_input, axis=1) + + # Prepare fixed positional embedding. + # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case. + pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D) + + # Prepare key padding mask for the transformer encoder. We have 1 or 2 extra tokens at the start of the + # sequence depending whether we use the input states or not (cls and robot state) + # False means not a padding token. + cls_joint_is_pad = torch.full( + (batch_size, 2 if self.config.robot_state_feature else 1), + False, + device=batch[OBS_STATE].device, + ) + key_padding_mask = torch.cat( + [cls_joint_is_pad, batch["action_is_pad"]], axis=1 + ) # (bs, seq+1 or 2) + + # Forward pass through VAE encoder to get the latent PDF parameters. + cls_token_out = self.vae_encoder( + vae_encoder_input.permute(1, 0, 2), + pos_embed=pos_embed.permute(1, 0, 2), + key_padding_mask=key_padding_mask, + )[0] # select the class token, with shape (B, D) + latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) + mu = latent_pdf_params[:, : self.config.latent_dim] + # This is 2log(sigma). Done this way to match the original implementation. + log_sigma_x2 = latent_pdf_params[:, self.config.latent_dim :] + + # Sample the latent with the reparameterization trick. + latent_sample = mu + log_sigma_x2.div(2).exp() * torch.randn_like(mu) + else: + # When not using the VAE encoder, we set the latent to be all zeros. + mu = log_sigma_x2 = None + # TODO(rcadene, alexander-soare): remove call to `.to` to speedup forward ; precompute and use buffer + latent_sample = torch.zeros([batch_size, self.config.latent_dim], dtype=torch.float32).to( + batch[OBS_STATE].device + ) + + # Prepare transformer encoder inputs. + encoder_in_tokens = [self.encoder_latent_input_proj(latent_sample)] + encoder_in_pos_embed = list(self.encoder_1d_feature_pos_embed.weight.unsqueeze(1)) + # Robot state token. + if self.config.robot_state_feature: + encoder_in_tokens.append(self.encoder_robot_state_input_proj(batch[OBS_STATE])) + # Environment state token. + if self.config.env_state_feature: + encoder_in_tokens.append(self.encoder_env_state_input_proj(batch[OBS_ENV_STATE])) + + if self.config.image_features: + # For a list of images, the H and W may vary but H*W is constant. + # NOTE: If modifying this section, verify on MPS devices that + # gradients remain stable (no explosions or NaNs). + for img in batch[OBS_IMAGES]: + cam_features = self.backbone(img)["feature_map"] + cam_pos_embed = self.encoder_cam_feat_pos_embed(cam_features).to(dtype=cam_features.dtype) + cam_features = self.encoder_img_feat_input_proj(cam_features) + + # Rearrange features to (sequence, batch, dim). + cam_features = einops.rearrange(cam_features, "b c h w -> (h w) b c") + cam_pos_embed = einops.rearrange(cam_pos_embed, "b c h w -> (h w) b c") + + # Extend immediately instead of accumulating and concatenating + # Convert to list to extend properly + encoder_in_tokens.extend(list(cam_features)) + encoder_in_pos_embed.extend(list(cam_pos_embed)) + + # Stack all tokens along the sequence dimension. + encoder_in_tokens = torch.stack(encoder_in_tokens, axis=0) + encoder_in_pos_embed = torch.stack(encoder_in_pos_embed, axis=0) + + # Forward pass through the transformer modules. + encoder_out = self.encoder(encoder_in_tokens, pos_embed=encoder_in_pos_embed) + # TODO(rcadene, alexander-soare): remove call to `device` ; precompute and use buffer + decoder_in = torch.zeros( + (self.config.chunk_size, batch_size, self.config.dim_model), + dtype=encoder_in_pos_embed.dtype, + device=encoder_in_pos_embed.device, + ) + decoder_out = self.decoder( + decoder_in, + encoder_out, + encoder_pos_embed=encoder_in_pos_embed, + decoder_pos_embed=self.decoder_pos_embed.weight.unsqueeze(1), + ) + + # Move back to (B, S, C). + decoder_out = decoder_out.transpose(0, 1) + + actions = self.action_head(decoder_out) + + return actions, (mu, log_sigma_x2) + + +class ACTEncoder(nn.Module): + """Convenience module for running multiple encoder layers, maybe followed by normalization.""" + + def __init__(self, config: ACTConfig, is_vae_encoder: bool = False): + super().__init__() + self.is_vae_encoder = is_vae_encoder + num_layers = config.n_vae_encoder_layers if self.is_vae_encoder else config.n_encoder_layers + self.layers = nn.ModuleList([ACTEncoderLayer(config) for _ in range(num_layers)]) + self.norm = nn.LayerNorm(config.dim_model) if config.pre_norm else nn.Identity() + + def forward( + self, x: Tensor, pos_embed: Tensor | None = None, key_padding_mask: Tensor | None = None + ) -> Tensor: + for layer in self.layers: + x = layer(x, pos_embed=pos_embed, key_padding_mask=key_padding_mask) + x = self.norm(x) + return x + + +class ACTEncoderLayer(nn.Module): + def __init__(self, config: ACTConfig): + super().__init__() + self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout) + + # Feed forward layers. + self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward) + self.dropout = nn.Dropout(config.dropout) + self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model) + + self.norm1 = nn.LayerNorm(config.dim_model) + self.norm2 = nn.LayerNorm(config.dim_model) + self.dropout1 = nn.Dropout(config.dropout) + self.dropout2 = nn.Dropout(config.dropout) + + self.activation = get_activation_fn(config.feedforward_activation) + self.pre_norm = config.pre_norm + + def forward(self, x, pos_embed: Tensor | None = None, key_padding_mask: Tensor | None = None) -> Tensor: + skip = x + if self.pre_norm: + x = self.norm1(x) + q = k = x if pos_embed is None else x + pos_embed + x = self.self_attn(q, k, value=x, key_padding_mask=key_padding_mask) + x = x[0] # note: [0] to select just the output, not the attention weights + x = skip + self.dropout1(x) + if self.pre_norm: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout2(x) + if not self.pre_norm: + x = self.norm2(x) + return x + + +class ACTDecoder(nn.Module): + def __init__(self, config: ACTConfig): + """Convenience module for running multiple decoder layers followed by normalization.""" + super().__init__() + self.layers = nn.ModuleList([ACTDecoderLayer(config) for _ in range(config.n_decoder_layers)]) + self.norm = nn.LayerNorm(config.dim_model) + + def forward( + self, + x: Tensor, + encoder_out: Tensor, + decoder_pos_embed: Tensor | None = None, + encoder_pos_embed: Tensor | None = None, + ) -> Tensor: + for layer in self.layers: + x = layer( + x, encoder_out, decoder_pos_embed=decoder_pos_embed, encoder_pos_embed=encoder_pos_embed + ) + if self.norm is not None: + x = self.norm(x) + return x + + +class ACTDecoderLayer(nn.Module): + def __init__(self, config: ACTConfig): + super().__init__() + self.self_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout) + self.multihead_attn = nn.MultiheadAttention(config.dim_model, config.n_heads, dropout=config.dropout) + + # Feed forward layers. + self.linear1 = nn.Linear(config.dim_model, config.dim_feedforward) + self.dropout = nn.Dropout(config.dropout) + self.linear2 = nn.Linear(config.dim_feedforward, config.dim_model) + + self.norm1 = nn.LayerNorm(config.dim_model) + self.norm2 = nn.LayerNorm(config.dim_model) + self.norm3 = nn.LayerNorm(config.dim_model) + self.dropout1 = nn.Dropout(config.dropout) + self.dropout2 = nn.Dropout(config.dropout) + self.dropout3 = nn.Dropout(config.dropout) + + self.activation = get_activation_fn(config.feedforward_activation) + self.pre_norm = config.pre_norm + + def maybe_add_pos_embed(self, tensor: Tensor, pos_embed: Tensor | None) -> Tensor: + return tensor if pos_embed is None else tensor + pos_embed + + def forward( + self, + x: Tensor, + encoder_out: Tensor, + decoder_pos_embed: Tensor | None = None, + encoder_pos_embed: Tensor | None = None, + ) -> Tensor: + """ + Args: + x: (Decoder Sequence, Batch, Channel) tensor of input tokens. + encoder_out: (Encoder Sequence, B, C) output features from the last layer of the encoder we are + cross-attending with. + encoder_pos_embed: (ES, 1, C) positional embedding for keys (from the encoder). + decoder_pos_embed: (DS, 1, C) positional embedding for the queries (from the decoder). + Returns: + (DS, B, C) tensor of decoder output features. + """ + skip = x + if self.pre_norm: + x = self.norm1(x) + q = k = self.maybe_add_pos_embed(x, decoder_pos_embed) + x = self.self_attn(q, k, value=x)[0] # select just the output, not the attention weights + x = skip + self.dropout1(x) + if self.pre_norm: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.multihead_attn( + query=self.maybe_add_pos_embed(x, decoder_pos_embed), + key=self.maybe_add_pos_embed(encoder_out, encoder_pos_embed), + value=encoder_out, + )[0] # select just the output, not the attention weights + x = skip + self.dropout2(x) + if self.pre_norm: + skip = x + x = self.norm3(x) + else: + x = self.norm2(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout3(x) + if not self.pre_norm: + x = self.norm3(x) + return x + + +def create_sinusoidal_pos_embedding(num_positions: int, dimension: int) -> Tensor: + """1D sinusoidal positional embeddings as in Attention is All You Need. + + Args: + num_positions: Number of token positions required. + Returns: (num_positions, dimension) position embeddings (the first dimension is the batch dimension). + + """ + + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / dimension) for hid_j in range(dimension)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(num_positions)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + return torch.from_numpy(sinusoid_table).float() + + +class ACTSinusoidalPositionEmbedding2d(nn.Module): + """2D sinusoidal positional embeddings similar to what's presented in Attention Is All You Need. + + The variation is that the position indices are normalized in [0, 2π] (not quite: the lower bound is 1/H + for the vertical direction, and 1/W for the horizontal direction. + """ + + def __init__(self, dimension: int): + """ + Args: + dimension: The desired dimension of the embeddings. + """ + super().__init__() + self.dimension = dimension + self._two_pi = 2 * math.pi + self._eps = 1e-6 + # Inverse "common ratio" for the geometric progression in sinusoid frequencies. + self._temperature = 10000 + + def forward(self, x: Tensor) -> Tensor: + """ + Args: + x: A (B, C, H, W) batch of 2D feature map to generate the embeddings for. + Returns: + A (1, C, H, W) batch of corresponding sinusoidal positional embeddings. + """ + not_mask = torch.ones_like(x[0, :1]) # (1, H, W) + # Note: These are like range(1, H+1) and range(1, W+1) respectively, but in most implementations + # they would be range(0, H) and range(0, W). Keeping it at as is to match the original code. + y_range = not_mask.cumsum(1, dtype=torch.float32) + x_range = not_mask.cumsum(2, dtype=torch.float32) + + # "Normalize" the position index such that it ranges in [0, 2π]. + # Note: Adding epsilon on the denominator should not be needed as all values of y_embed and x_range + # are non-zero by construction. This is an artifact of the original code. + y_range = y_range / (y_range[:, -1:, :] + self._eps) * self._two_pi + x_range = x_range / (x_range[:, :, -1:] + self._eps) * self._two_pi + + inverse_frequency = self._temperature ** ( + 2 * (torch.arange(self.dimension, dtype=torch.float32, device=x.device) // 2) / self.dimension + ) + + x_range = x_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1) + y_range = y_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1) + + # Note: this stack then flatten operation results in interleaved sine and cosine terms. + # pos_embed_x and pos_embed_y are (1, H, W, C // 2). + pos_embed_x = torch.stack((x_range[..., 0::2].sin(), x_range[..., 1::2].cos()), dim=-1).flatten(3) + pos_embed_y = torch.stack((y_range[..., 0::2].sin(), y_range[..., 1::2].cos()), dim=-1).flatten(3) + pos_embed = torch.cat((pos_embed_y, pos_embed_x), dim=3).permute(0, 3, 1, 2) # (1, C, H, W) + + return pos_embed + + +def get_activation_fn(activation: str) -> Callable: + """Return an activation function given a string.""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") diff --git a/src/lerobot/policies/act/processor_act.py b/src/lerobot/policies/act/processor_act.py new file mode 100644 index 0000000000000000000000000000000000000000..1dedf8a99dc60ed2bafe862646e69f77021bcf9e --- /dev/null +++ b/src/lerobot/policies/act/processor_act.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python + +# Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any + +import torch + +from lerobot.policies.act.configuration_act import ACTConfig +from lerobot.processor import ( + AddBatchDimensionProcessorStep, + DeviceProcessorStep, + NormalizerProcessorStep, + PolicyAction, + PolicyProcessorPipeline, + RenameObservationsProcessorStep, + UnnormalizerProcessorStep, +) +from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action +from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME + + +def make_act_pre_post_processors( + config: ACTConfig, + dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None, +) -> tuple[ + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], + PolicyProcessorPipeline[PolicyAction, PolicyAction], +]: + """Creates the pre- and post-processing pipelines for the ACT policy. + + The pre-processing pipeline handles normalization, batching, and device placement for the model inputs. + The post-processing pipeline handles unnormalization and moves the model outputs back to the CPU. + + Args: + config (ACTConfig): The ACT policy configuration object. + dataset_stats (dict[str, dict[str, torch.Tensor]] | None): A dictionary containing dataset + statistics (e.g., mean and std) used for normalization. Defaults to None. + + Returns: + tuple[PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], PolicyProcessorPipeline[PolicyAction, PolicyAction]]: A tuple containing the + pre-processor pipeline and the post-processor pipeline. + """ + + input_steps = [ + RenameObservationsProcessorStep(rename_map={}), + AddBatchDimensionProcessorStep(), + DeviceProcessorStep(device=config.device), + NormalizerProcessorStep( + features={**config.input_features, **config.output_features}, + norm_map=config.normalization_mapping, + stats=dataset_stats, + device=config.device, + ), + ] + output_steps = [ + UnnormalizerProcessorStep( + features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats + ), + DeviceProcessorStep(device="cpu"), + ] + + return ( + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]]( + steps=input_steps, + name=POLICY_PREPROCESSOR_DEFAULT_NAME, + ), + PolicyProcessorPipeline[PolicyAction, PolicyAction]( + steps=output_steps, + name=POLICY_POSTPROCESSOR_DEFAULT_NAME, + to_transition=policy_action_to_transition, + to_output=transition_to_policy_action, + ), + ) diff --git a/src/lerobot/policies/diffusion/README.md b/src/lerobot/policies/diffusion/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d332d79c89ecbb7abca0ad9d2aefb3db409cbb3f --- /dev/null +++ b/src/lerobot/policies/diffusion/README.md @@ -0,0 +1 @@ +../../../../docs/source/policy_diffusion_README.md \ No newline at end of file diff --git a/src/lerobot/policies/diffusion/configuration_diffusion.py b/src/lerobot/policies/diffusion/configuration_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..0aab8040daa399926107bb14e69edddce3f2544c --- /dev/null +++ b/src/lerobot/policies/diffusion/configuration_diffusion.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python + +# Copyright 2024 Columbia Artificial Intelligence, Robotics Lab, +# and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass, field + +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import NormalizationMode +from lerobot.optim.optimizers import AdamConfig +from lerobot.optim.schedulers import DiffuserSchedulerConfig + + +@PreTrainedConfig.register_subclass("diffusion") +@dataclass +class DiffusionConfig(PreTrainedConfig): + """Configuration class for DiffusionPolicy. + + Defaults are configured for training with PushT providing proprioceptive and single camera observations. + + The parameters you will most likely need to change are the ones which depend on the environment / sensors. + Those are: `input_shapes` and `output_shapes`. + + Notes on the inputs and outputs: + - "observation.state" is required as an input key. + - Either: + - At least one key starting with "observation.image is required as an input. + AND/OR + - The key "observation.environment_state" is required as input. + - If there are multiple keys beginning with "observation.image" they are treated as multiple camera + views. Right now we only support all images having the same shape. + - "action" is required as an output key. + + Args: + n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the + current step and additional steps going back). + horizon: Diffusion model action prediction size as detailed in `DiffusionPolicy.select_action`. + n_action_steps: The number of action steps to run in the environment for one invocation of the policy. + See `DiffusionPolicy.select_action` for more details. + input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents + the input data name, and the value is a list indicating the dimensions of the corresponding data. + For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96], + indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't + include batch dimension or temporal dimension. + output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents + the output data name, and the value is a list indicating the dimensions of the corresponding data. + For example, "action" refers to an output shape of [14], indicating 14-dimensional actions. + Importantly, `output_shapes` doesn't include batch dimension or temporal dimension. + input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"), + and the value specifies the normalization mode to apply. The two available modes are "mean_std" + which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a + [-1, 1] range. + output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the + original scale. Note that this is also used for normalizing the training targets. + vision_backbone: Name of the torchvision resnet backbone to use for encoding images. + crop_shape: (H, W) shape to crop images to as a preprocessing step for the vision backbone. Must fit + within the image size. If None, no cropping is done. + crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval + mode). + pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone. + `None` means no pretrained weights. + use_group_norm: Whether to replace batch normalization with group normalization in the backbone. + The group sizes are set to be about 16 (to be precise, feature_dim // 16). + spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax. + use_separate_rgb_encoders_per_camera: Whether to use a separate RGB encoder for each camera view. + down_dims: Feature dimension for each stage of temporal downsampling in the diffusion modeling Unet. + You may provide a variable number of dimensions, therefore also controlling the degree of + downsampling. + kernel_size: The convolutional kernel size of the diffusion modeling Unet. + n_groups: Number of groups used in the group norm of the Unet's convolutional blocks. + diffusion_step_embed_dim: The Unet is conditioned on the diffusion timestep via a small non-linear + network. This is the output dimension of that network, i.e., the embedding dimension. + use_film_scale_modulation: FiLM (https://huggingface.co/papers/1709.07871) is used for the Unet conditioning. + Bias modulation is used be default, while this parameter indicates whether to also use scale + modulation. + noise_scheduler_type: Name of the noise scheduler to use. Supported options: ["DDPM", "DDIM"]. + num_train_timesteps: Number of diffusion steps for the forward diffusion schedule. + beta_schedule: Name of the diffusion beta schedule as per DDPMScheduler from Hugging Face diffusers. + beta_start: Beta value for the first forward-diffusion step. + beta_end: Beta value for the last forward-diffusion step. + prediction_type: The type of prediction that the diffusion modeling Unet makes. Choose from "epsilon" + or "sample". These have equivalent outcomes from a latent variable modeling perspective, but + "epsilon" has been shown to work better in many deep neural network settings. + clip_sample: Whether to clip the sample to [-`clip_sample_range`, +`clip_sample_range`] for each + denoising step at inference time. WARNING: you will need to make sure your action-space is + normalized to fit within this range. + clip_sample_range: The magnitude of the clipping range as described above. + num_inference_steps: Number of reverse diffusion steps to use at inference time (steps are evenly + spaced). If not provided, this defaults to be the same as `num_train_timesteps`. + do_mask_loss_for_padding: Whether to mask the loss when there are copy-padded actions. See + `LeRobotDataset` and `load_previous_and_future_frames` for more information. Note, this defaults + to False as the original Diffusion Policy implementation does the same. + """ + + # Inputs / output structure. + n_obs_steps: int = 2 + horizon: int = 16 + n_action_steps: int = 8 + + normalization_mapping: dict[str, NormalizationMode] = field( + default_factory=lambda: { + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MIN_MAX, + "ACTION": NormalizationMode.MIN_MAX, + } + ) + + # The original implementation doesn't sample frames for the last 7 steps, + # which avoids excessive padding and leads to improved training results. + drop_n_last_frames: int = 7 # horizon - n_action_steps - n_obs_steps + 1 + + # Architecture / modeling. + # Vision backbone. + vision_backbone: str = "resnet18" + crop_shape: tuple[int, int] | None = (84, 84) + crop_is_random: bool = True + pretrained_backbone_weights: str | None = None + use_group_norm: bool = True + spatial_softmax_num_keypoints: int = 32 + use_separate_rgb_encoder_per_camera: bool = False + # Unet. + down_dims: tuple[int, ...] = (512, 1024, 2048) + kernel_size: int = 5 + n_groups: int = 8 + diffusion_step_embed_dim: int = 128 + use_film_scale_modulation: bool = True + # Noise scheduler. + noise_scheduler_type: str = "DDPM" + num_train_timesteps: int = 100 + beta_schedule: str = "squaredcos_cap_v2" + beta_start: float = 0.0001 + beta_end: float = 0.02 + prediction_type: str = "epsilon" + clip_sample: bool = True + clip_sample_range: float = 1.0 + + # Inference + num_inference_steps: int | None = None + + # Loss computation + do_mask_loss_for_padding: bool = False + + # Training presets + optimizer_lr: float = 1e-4 + optimizer_betas: tuple = (0.95, 0.999) + optimizer_eps: float = 1e-8 + optimizer_weight_decay: float = 1e-6 + scheduler_name: str = "cosine" + scheduler_warmup_steps: int = 500 + + def __post_init__(self): + super().__post_init__() + + """Input validation (not exhaustive).""" + if not self.vision_backbone.startswith("resnet"): + raise ValueError( + f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}." + ) + + supported_prediction_types = ["epsilon", "sample"] + if self.prediction_type not in supported_prediction_types: + raise ValueError( + f"`prediction_type` must be one of {supported_prediction_types}. Got {self.prediction_type}." + ) + supported_noise_schedulers = ["DDPM", "DDIM"] + if self.noise_scheduler_type not in supported_noise_schedulers: + raise ValueError( + f"`noise_scheduler_type` must be one of {supported_noise_schedulers}. " + f"Got {self.noise_scheduler_type}." + ) + + # Check that the horizon size and U-Net downsampling is compatible. + # U-Net downsamples by 2 with each stage. + downsampling_factor = 2 ** len(self.down_dims) + if self.horizon % downsampling_factor != 0: + raise ValueError( + "The horizon should be an integer multiple of the downsampling factor (which is determined " + f"by `len(down_dims)`). Got {self.horizon=} and {self.down_dims=}" + ) + + def get_optimizer_preset(self) -> AdamConfig: + return AdamConfig( + lr=self.optimizer_lr, + betas=self.optimizer_betas, + eps=self.optimizer_eps, + weight_decay=self.optimizer_weight_decay, + ) + + def get_scheduler_preset(self) -> DiffuserSchedulerConfig: + return DiffuserSchedulerConfig( + name=self.scheduler_name, + num_warmup_steps=self.scheduler_warmup_steps, + ) + + def validate_features(self) -> None: + if len(self.image_features) == 0 and self.env_state_feature is None: + raise ValueError("You must provide at least one image or the environment state among the inputs.") + + if self.crop_shape is not None: + for key, image_ft in self.image_features.items(): + if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]: + raise ValueError( + f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} " + f"for `crop_shape` and {image_ft.shape} for " + f"`{key}`." + ) + + # Check that all input images have the same shape. + if len(self.image_features) > 0: + first_image_key, first_image_ft = next(iter(self.image_features.items())) + for key, image_ft in self.image_features.items(): + if image_ft.shape != first_image_ft.shape: + raise ValueError( + f"`{key}` does not match `{first_image_key}`, but we expect all image shapes to match." + ) + + @property + def observation_delta_indices(self) -> list: + return list(range(1 - self.n_obs_steps, 1)) + + @property + def action_delta_indices(self) -> list: + return list(range(1 - self.n_obs_steps, 1 - self.n_obs_steps + self.horizon)) + + @property + def reward_delta_indices(self) -> None: + return None diff --git a/src/lerobot/policies/diffusion/modeling_diffusion.py b/src/lerobot/policies/diffusion/modeling_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..7dd8e50487275d2781bd87722bd0530d3e83b66c --- /dev/null +++ b/src/lerobot/policies/diffusion/modeling_diffusion.py @@ -0,0 +1,763 @@ +#!/usr/bin/env python + +# Copyright 2024 Columbia Artificial Intelligence, Robotics Lab, +# and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion" + +TODO(alexander-soare): + - Remove reliance on diffusers for DDPMScheduler and LR scheduler. +""" + +import math +from collections import deque +from collections.abc import Callable + +import einops +import numpy as np +import torch +import torch.nn.functional as F # noqa: N812 +import torchvision +from diffusers.schedulers.scheduling_ddim import DDIMScheduler +from diffusers.schedulers.scheduling_ddpm import DDPMScheduler +from torch import Tensor, nn + +from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.utils import ( + get_device_from_parameters, + get_dtype_from_parameters, + get_output_shape, + populate_queues, +) +from lerobot.utils.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE + + +class DiffusionPolicy(PreTrainedPolicy): + """ + Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion" + (paper: https://huggingface.co/papers/2303.04137, code: https://github.com/real-stanford/diffusion_policy). + """ + + config_class = DiffusionConfig + name = "diffusion" + + def __init__( + self, + config: DiffusionConfig, + ): + """ + Args: + config: Policy configuration class instance or None, in which case the default instantiation of + the configuration class is used. + dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected + that they will be passed with a call to `load_state_dict` before the policy is used. + """ + super().__init__(config) + config.validate_features() + self.config = config + + # queues are populated during rollout of the policy, they contain the n latest observations and actions + self._queues = None + + self.diffusion = DiffusionModel(config) + + self.reset() + + def get_optim_params(self) -> dict: + return self.diffusion.parameters() + + def reset(self): + """Clear observation and action queues. Should be called on `env.reset()`""" + self._queues = { + OBS_STATE: deque(maxlen=self.config.n_obs_steps), + ACTION: deque(maxlen=self.config.n_action_steps), + } + if self.config.image_features: + self._queues[OBS_IMAGES] = deque(maxlen=self.config.n_obs_steps) + if self.config.env_state_feature: + self._queues[OBS_ENV_STATE] = deque(maxlen=self.config.n_obs_steps) + + @torch.no_grad() + def predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + """Predict a chunk of actions given environment observations.""" + # stack n latest observations from the queue + batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} + actions = self.diffusion.generate_actions(batch, noise=noise) + + return actions + + @torch.no_grad() + def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + """Select a single action given environment observations. + + This method handles caching a history of observations and an action trajectory generated by the + underlying diffusion model. Here's how it works: + - `n_obs_steps` steps worth of observations are cached (for the first steps, the observation is + copied `n_obs_steps` times to fill the cache). + - The diffusion model generates `horizon` steps worth of actions. + - `n_action_steps` worth of actions are actually kept for execution, starting from the current step. + Schematically this looks like: + ---------------------------------------------------------------------------------------------- + (legend: o = n_obs_steps, h = horizon, a = n_action_steps) + |timestep | n-o+1 | n-o+2 | ..... | n | ..... | n+a-1 | n+a | ..... | n-o+h | + |observation is used | YES | YES | YES | YES | NO | NO | NO | NO | NO | + |action is generated | YES | YES | YES | YES | YES | YES | YES | YES | YES | + |action is used | NO | NO | NO | YES | YES | YES | NO | NO | NO | + ---------------------------------------------------------------------------------------------- + Note that this means we require: `n_action_steps <= horizon - n_obs_steps + 1`. Also, note that + "horizon" may not the best name to describe what the variable actually means, because this period is + actually measured from the first observation which (if `n_obs_steps` > 1) happened in the past. + """ + # NOTE: for offline evaluation, we have action in the batch, so we need to pop it out + if ACTION in batch: + batch.pop(ACTION) + + if self.config.image_features: + batch = dict(batch) # shallow copy so that adding a key doesn't modify the original + batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) + # NOTE: It's important that this happens after stacking the images into a single key. + self._queues = populate_queues(self._queues, batch) + + if len(self._queues[ACTION]) == 0: + actions = self.predict_action_chunk(batch, noise=noise) + self._queues[ACTION].extend(actions.transpose(0, 1)) + + action = self._queues[ACTION].popleft() + return action + + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, None]: + """Run the batch through the model and compute the loss for training or validation.""" + if self.config.image_features: + batch = dict(batch) # shallow copy so that adding a key doesn't modify the original + batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) + loss = self.diffusion.compute_loss(batch) + # no output_dict so returning None + return loss, None + + +def _make_noise_scheduler(name: str, **kwargs: dict) -> DDPMScheduler | DDIMScheduler: + """ + Factory for noise scheduler instances of the requested type. All kwargs are passed + to the scheduler. + """ + if name == "DDPM": + return DDPMScheduler(**kwargs) + elif name == "DDIM": + return DDIMScheduler(**kwargs) + else: + raise ValueError(f"Unsupported noise scheduler type {name}") + + +class DiffusionModel(nn.Module): + def __init__(self, config: DiffusionConfig): + super().__init__() + self.config = config + + # Build observation encoders (depending on which observations are provided). + global_cond_dim = self.config.robot_state_feature.shape[0] + if self.config.image_features: + num_images = len(self.config.image_features) + if self.config.use_separate_rgb_encoder_per_camera: + encoders = [DiffusionRgbEncoder(config) for _ in range(num_images)] + self.rgb_encoder = nn.ModuleList(encoders) + global_cond_dim += encoders[0].feature_dim * num_images + else: + self.rgb_encoder = DiffusionRgbEncoder(config) + global_cond_dim += self.rgb_encoder.feature_dim * num_images + if self.config.env_state_feature: + global_cond_dim += self.config.env_state_feature.shape[0] + + self.unet = DiffusionConditionalUnet1d(config, global_cond_dim=global_cond_dim * config.n_obs_steps) + + self.noise_scheduler = _make_noise_scheduler( + config.noise_scheduler_type, + num_train_timesteps=config.num_train_timesteps, + beta_start=config.beta_start, + beta_end=config.beta_end, + beta_schedule=config.beta_schedule, + clip_sample=config.clip_sample, + clip_sample_range=config.clip_sample_range, + prediction_type=config.prediction_type, + ) + + if config.num_inference_steps is None: + self.num_inference_steps = self.noise_scheduler.config.num_train_timesteps + else: + self.num_inference_steps = config.num_inference_steps + + # ========= inference ============ + def conditional_sample( + self, + batch_size: int, + global_cond: Tensor | None = None, + generator: torch.Generator | None = None, + noise: Tensor | None = None, + ) -> Tensor: + device = get_device_from_parameters(self) + dtype = get_dtype_from_parameters(self) + + # Sample prior. + sample = ( + noise + if noise is not None + else torch.randn( + size=(batch_size, self.config.horizon, self.config.action_feature.shape[0]), + dtype=dtype, + device=device, + generator=generator, + ) + ) + + self.noise_scheduler.set_timesteps(self.num_inference_steps) + + for t in self.noise_scheduler.timesteps: + # Predict model output. + model_output = self.unet( + sample, + torch.full(sample.shape[:1], t, dtype=torch.long, device=sample.device), + global_cond=global_cond, + ) + # Compute previous image: x_t -> x_t-1 + sample = self.noise_scheduler.step(model_output, t, sample, generator=generator).prev_sample + + return sample + + def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor: + """Encode image features and concatenate them all together along with the state vector.""" + batch_size, n_obs_steps = batch[OBS_STATE].shape[:2] + global_cond_feats = [batch[OBS_STATE]] + # Extract image features. + if self.config.image_features: + if self.config.use_separate_rgb_encoder_per_camera: + # Combine batch and sequence dims while rearranging to make the camera index dimension first. + images_per_camera = einops.rearrange(batch[OBS_IMAGES], "b s n ... -> n (b s) ...") + img_features_list = torch.cat( + [ + encoder(images) + for encoder, images in zip(self.rgb_encoder, images_per_camera, strict=True) + ] + ) + # Separate batch and sequence dims back out. The camera index dim gets absorbed into the + # feature dim (effectively concatenating the camera features). + img_features = einops.rearrange( + img_features_list, "(n b s) ... -> b s (n ...)", b=batch_size, s=n_obs_steps + ) + else: + # Combine batch, sequence, and "which camera" dims before passing to shared encoder. + img_features = self.rgb_encoder( + einops.rearrange(batch[OBS_IMAGES], "b s n ... -> (b s n) ...") + ) + # Separate batch dim and sequence dim back out. The camera index dim gets absorbed into the + # feature dim (effectively concatenating the camera features). + img_features = einops.rearrange( + img_features, "(b s n) ... -> b s (n ...)", b=batch_size, s=n_obs_steps + ) + global_cond_feats.append(img_features) + + if self.config.env_state_feature: + global_cond_feats.append(batch[OBS_ENV_STATE]) + + # Concatenate features then flatten to (B, global_cond_dim). + return torch.cat(global_cond_feats, dim=-1).flatten(start_dim=1) + + def generate_actions(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + """ + This function expects `batch` to have: + { + "observation.state": (B, n_obs_steps, state_dim) + + "observation.images": (B, n_obs_steps, num_cameras, C, H, W) + AND/OR + "observation.environment_state": (B, n_obs_steps, environment_dim) + } + """ + batch_size, n_obs_steps = batch[OBS_STATE].shape[:2] + assert n_obs_steps == self.config.n_obs_steps + + # Encode image features and concatenate them all together along with the state vector. + global_cond = self._prepare_global_conditioning(batch) # (B, global_cond_dim) + + # run sampling + actions = self.conditional_sample(batch_size, global_cond=global_cond, noise=noise) + + # Extract `n_action_steps` steps worth of actions (from the current observation). + start = n_obs_steps - 1 + end = start + self.config.n_action_steps + actions = actions[:, start:end] + + return actions + + def compute_loss(self, batch: dict[str, Tensor]) -> Tensor: + """ + This function expects `batch` to have (at least): + { + "observation.state": (B, n_obs_steps, state_dim) + + "observation.images": (B, n_obs_steps, num_cameras, C, H, W) + AND/OR + "observation.environment_state": (B, n_obs_steps, environment_dim) + + "action": (B, horizon, action_dim) + "action_is_pad": (B, horizon) + } + """ + # Input validation. + assert set(batch).issuperset({OBS_STATE, ACTION, "action_is_pad"}) + assert OBS_IMAGES in batch or OBS_ENV_STATE in batch + n_obs_steps = batch[OBS_STATE].shape[1] + horizon = batch[ACTION].shape[1] + assert horizon == self.config.horizon + assert n_obs_steps == self.config.n_obs_steps + + # Encode image features and concatenate them all together along with the state vector. + global_cond = self._prepare_global_conditioning(batch) # (B, global_cond_dim) + + # Forward diffusion. + trajectory = batch[ACTION] + # Sample noise to add to the trajectory. + eps = torch.randn(trajectory.shape, device=trajectory.device) + # Sample a random noising timestep for each item in the batch. + timesteps = torch.randint( + low=0, + high=self.noise_scheduler.config.num_train_timesteps, + size=(trajectory.shape[0],), + device=trajectory.device, + ).long() + # Add noise to the clean trajectories according to the noise magnitude at each timestep. + noisy_trajectory = self.noise_scheduler.add_noise(trajectory, eps, timesteps) + + # Run the denoising network (that might denoise the trajectory, or attempt to predict the noise). + pred = self.unet(noisy_trajectory, timesteps, global_cond=global_cond) + + # Compute the loss. + # The target is either the original trajectory, or the noise. + if self.config.prediction_type == "epsilon": + target = eps + elif self.config.prediction_type == "sample": + target = batch[ACTION] + else: + raise ValueError(f"Unsupported prediction type {self.config.prediction_type}") + + loss = F.mse_loss(pred, target, reduction="none") + + # Mask loss wherever the action is padded with copies (edges of the dataset trajectory). + if self.config.do_mask_loss_for_padding: + if "action_is_pad" not in batch: + raise ValueError( + "You need to provide 'action_is_pad' in the batch when " + f"{self.config.do_mask_loss_for_padding=}." + ) + in_episode_bound = ~batch["action_is_pad"] + loss = loss * in_episode_bound.unsqueeze(-1) + + return loss.mean() + + +class SpatialSoftmax(nn.Module): + """ + Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al. + (https://huggingface.co/papers/1509.06113). A minimal port of the robomimic implementation. + + At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass" + of activations of each channel, i.e., keypoints in the image space for the policy to focus on. + + Example: take feature maps of size (512x10x12). We generate a grid of normalized coordinates (10x12x2): + ----------------------------------------------------- + | (-1., -1.) | (-0.82, -1.) | ... | (1., -1.) | + | (-1., -0.78) | (-0.82, -0.78) | ... | (1., -0.78) | + | ... | ... | ... | ... | + | (-1., 1.) | (-0.82, 1.) | ... | (1., 1.) | + ----------------------------------------------------- + This is achieved by applying channel-wise softmax over the activations (512x120) and computing the dot + product with the coordinates (120x2) to get expected points of maximal activation (512x2). + + The example above results in 512 keypoints (corresponding to the 512 input channels). We can optionally + provide num_kp != None to control the number of keypoints. This is achieved by a first applying a learnable + linear mapping (in_channels, H, W) -> (num_kp, H, W). + """ + + def __init__(self, input_shape, num_kp=None): + """ + Args: + input_shape (list): (C, H, W) input feature map shape. + num_kp (int): number of keypoints in output. If None, output will have the same number of channels as input. + """ + super().__init__() + + assert len(input_shape) == 3 + self._in_c, self._in_h, self._in_w = input_shape + + if num_kp is not None: + self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1) + self._out_c = num_kp + else: + self.nets = None + self._out_c = self._in_c + + # we could use torch.linspace directly but that seems to behave slightly differently than numpy + # and causes a small degradation in pc_success of pre-trained models. + pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h)) + pos_x = torch.from_numpy(pos_x.reshape(self._in_h * self._in_w, 1)).float() + pos_y = torch.from_numpy(pos_y.reshape(self._in_h * self._in_w, 1)).float() + # register as buffer so it's moved to the correct device. + self.register_buffer("pos_grid", torch.cat([pos_x, pos_y], dim=1)) + + def forward(self, features: Tensor) -> Tensor: + """ + Args: + features: (B, C, H, W) input feature maps. + Returns: + (B, K, 2) image-space coordinates of keypoints. + """ + if self.nets is not None: + features = self.nets(features) + + # [B, K, H, W] -> [B * K, H * W] where K is number of keypoints + features = features.reshape(-1, self._in_h * self._in_w) + # 2d softmax normalization + attention = F.softmax(features, dim=-1) + # [B * K, H * W] x [H * W, 2] -> [B * K, 2] for spatial coordinate mean in x and y dimensions + expected_xy = attention @ self.pos_grid + # reshape to [B, K, 2] + feature_keypoints = expected_xy.view(-1, self._out_c, 2) + + return feature_keypoints + + +class DiffusionRgbEncoder(nn.Module): + """Encodes an RGB image into a 1D feature vector. + + Includes the ability to normalize and crop the image first. + """ + + def __init__(self, config: DiffusionConfig): + super().__init__() + # Set up optional preprocessing. + if config.crop_shape is not None: + self.do_crop = True + # Always use center crop for eval + self.center_crop = torchvision.transforms.CenterCrop(config.crop_shape) + if config.crop_is_random: + self.maybe_random_crop = torchvision.transforms.RandomCrop(config.crop_shape) + else: + self.maybe_random_crop = self.center_crop + else: + self.do_crop = False + + # Set up backbone. + backbone_model = getattr(torchvision.models, config.vision_backbone)( + weights=config.pretrained_backbone_weights + ) + # Note: This assumes that the layer4 feature map is children()[-3] + # TODO(alexander-soare): Use a safer alternative. + self.backbone = nn.Sequential(*(list(backbone_model.children())[:-2])) + if config.use_group_norm: + if config.pretrained_backbone_weights: + raise ValueError( + "You can't replace BatchNorm in a pretrained model without ruining the weights!" + ) + self.backbone = _replace_submodules( + root_module=self.backbone, + predicate=lambda x: isinstance(x, nn.BatchNorm2d), + func=lambda x: nn.GroupNorm(num_groups=x.num_features // 16, num_channels=x.num_features), + ) + + # Set up pooling and final layers. + # Use a dry run to get the feature map shape. + # The dummy input should take the number of image channels from `config.image_features` and it should + # use the height and width from `config.crop_shape` if it is provided, otherwise it should use the + # height and width from `config.image_features`. + + # Note: we have a check in the config class to make sure all images have the same shape. + images_shape = next(iter(config.image_features.values())).shape + dummy_shape_h_w = config.crop_shape if config.crop_shape is not None else images_shape[1:] + dummy_shape = (1, images_shape[0], *dummy_shape_h_w) + feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:] + + self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) + self.feature_dim = config.spatial_softmax_num_keypoints * 2 + self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim) + self.relu = nn.ReLU() + + def forward(self, x: Tensor) -> Tensor: + """ + Args: + x: (B, C, H, W) image tensor with pixel values in [0, 1]. + Returns: + (B, D) image feature. + """ + # Preprocess: maybe crop (if it was set up in the __init__). + if self.do_crop: + if self.training: # noqa: SIM108 + x = self.maybe_random_crop(x) + else: + # Always use center crop for eval. + x = self.center_crop(x) + # Extract backbone feature. + x = torch.flatten(self.pool(self.backbone(x)), start_dim=1) + # Final linear layer with non-linearity. + x = self.relu(self.out(x)) + return x + + +def _replace_submodules( + root_module: nn.Module, predicate: Callable[[nn.Module], bool], func: Callable[[nn.Module], nn.Module] +) -> nn.Module: + """ + Args: + root_module: The module for which the submodules need to be replaced + predicate: Takes a module as an argument and must return True if the that module is to be replaced. + func: Takes a module as an argument and returns a new module to replace it with. + Returns: + The root module with its submodules replaced. + """ + if predicate(root_module): + return func(root_module) + + replace_list = [k.split(".") for k, m in root_module.named_modules(remove_duplicate=True) if predicate(m)] + for *parents, k in replace_list: + parent_module = root_module + if len(parents) > 0: + parent_module = root_module.get_submodule(".".join(parents)) + if isinstance(parent_module, nn.Sequential): + src_module = parent_module[int(k)] + else: + src_module = getattr(parent_module, k) + tgt_module = func(src_module) + if isinstance(parent_module, nn.Sequential): + parent_module[int(k)] = tgt_module + else: + setattr(parent_module, k, tgt_module) + # verify that all BN are replaced + assert not any(predicate(m) for _, m in root_module.named_modules(remove_duplicate=True)) + return root_module + + +class DiffusionSinusoidalPosEmb(nn.Module): + """1D sinusoidal positional embeddings as in Attention is All You Need.""" + + def __init__(self, dim: int): + super().__init__() + self.dim = dim + + def forward(self, x: Tensor) -> Tensor: + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x.unsqueeze(-1) * emb.unsqueeze(0) + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class DiffusionConv1dBlock(nn.Module): + """Conv1d --> GroupNorm --> Mish""" + + def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): + super().__init__() + + self.block = nn.Sequential( + nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), + nn.GroupNorm(n_groups, out_channels), + nn.Mish(), + ) + + def forward(self, x): + return self.block(x) + + +class DiffusionConditionalUnet1d(nn.Module): + """A 1D convolutional UNet with FiLM modulation for conditioning. + + Note: this removes local conditioning as compared to the original diffusion policy code. + """ + + def __init__(self, config: DiffusionConfig, global_cond_dim: int): + super().__init__() + + self.config = config + + # Encoder for the diffusion timestep. + self.diffusion_step_encoder = nn.Sequential( + DiffusionSinusoidalPosEmb(config.diffusion_step_embed_dim), + nn.Linear(config.diffusion_step_embed_dim, config.diffusion_step_embed_dim * 4), + nn.Mish(), + nn.Linear(config.diffusion_step_embed_dim * 4, config.diffusion_step_embed_dim), + ) + + # The FiLM conditioning dimension. + cond_dim = config.diffusion_step_embed_dim + global_cond_dim + + # In channels / out channels for each downsampling block in the Unet's encoder. For the decoder, we + # just reverse these. + in_out = [(config.action_feature.shape[0], config.down_dims[0])] + list( + zip(config.down_dims[:-1], config.down_dims[1:], strict=True) + ) + + # Unet encoder. + common_res_block_kwargs = { + "cond_dim": cond_dim, + "kernel_size": config.kernel_size, + "n_groups": config.n_groups, + "use_film_scale_modulation": config.use_film_scale_modulation, + } + self.down_modules = nn.ModuleList([]) + for ind, (dim_in, dim_out) in enumerate(in_out): + is_last = ind >= (len(in_out) - 1) + self.down_modules.append( + nn.ModuleList( + [ + DiffusionConditionalResidualBlock1d(dim_in, dim_out, **common_res_block_kwargs), + DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs), + # Downsample as long as it is not the last block. + nn.Conv1d(dim_out, dim_out, 3, 2, 1) if not is_last else nn.Identity(), + ] + ) + ) + + # Processing in the middle of the auto-encoder. + self.mid_modules = nn.ModuleList( + [ + DiffusionConditionalResidualBlock1d( + config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs + ), + DiffusionConditionalResidualBlock1d( + config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs + ), + ] + ) + + # Unet decoder. + self.up_modules = nn.ModuleList([]) + for ind, (dim_out, dim_in) in enumerate(reversed(in_out[1:])): + is_last = ind >= (len(in_out) - 1) + self.up_modules.append( + nn.ModuleList( + [ + # dim_in * 2, because it takes the encoder's skip connection as well + DiffusionConditionalResidualBlock1d(dim_in * 2, dim_out, **common_res_block_kwargs), + DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs), + # Upsample as long as it is not the last block. + nn.ConvTranspose1d(dim_out, dim_out, 4, 2, 1) if not is_last else nn.Identity(), + ] + ) + ) + + self.final_conv = nn.Sequential( + DiffusionConv1dBlock(config.down_dims[0], config.down_dims[0], kernel_size=config.kernel_size), + nn.Conv1d(config.down_dims[0], config.action_feature.shape[0], 1), + ) + + def forward(self, x: Tensor, timestep: Tensor | int, global_cond=None) -> Tensor: + """ + Args: + x: (B, T, input_dim) tensor for input to the Unet. + timestep: (B,) tensor of (timestep_we_are_denoising_from - 1). + global_cond: (B, global_cond_dim) + output: (B, T, input_dim) + Returns: + (B, T, input_dim) diffusion model prediction. + """ + # For 1D convolutions we'll need feature dimension first. + x = einops.rearrange(x, "b t d -> b d t") + + timesteps_embed = self.diffusion_step_encoder(timestep) + + # If there is a global conditioning feature, concatenate it to the timestep embedding. + if global_cond is not None: + global_feature = torch.cat([timesteps_embed, global_cond], axis=-1) + else: + global_feature = timesteps_embed + + # Run encoder, keeping track of skip features to pass to the decoder. + encoder_skip_features: list[Tensor] = [] + for resnet, resnet2, downsample in self.down_modules: + x = resnet(x, global_feature) + x = resnet2(x, global_feature) + encoder_skip_features.append(x) + x = downsample(x) + + for mid_module in self.mid_modules: + x = mid_module(x, global_feature) + + # Run decoder, using the skip features from the encoder. + for resnet, resnet2, upsample in self.up_modules: + x = torch.cat((x, encoder_skip_features.pop()), dim=1) + x = resnet(x, global_feature) + x = resnet2(x, global_feature) + x = upsample(x) + + x = self.final_conv(x) + + x = einops.rearrange(x, "b d t -> b t d") + return x + + +class DiffusionConditionalResidualBlock1d(nn.Module): + """ResNet style 1D convolutional block with FiLM modulation for conditioning.""" + + def __init__( + self, + in_channels: int, + out_channels: int, + cond_dim: int, + kernel_size: int = 3, + n_groups: int = 8, + # Set to True to do scale modulation with FiLM as well as bias modulation (defaults to False meaning + # FiLM just modulates bias). + use_film_scale_modulation: bool = False, + ): + super().__init__() + + self.use_film_scale_modulation = use_film_scale_modulation + self.out_channels = out_channels + + self.conv1 = DiffusionConv1dBlock(in_channels, out_channels, kernel_size, n_groups=n_groups) + + # FiLM modulation (https://huggingface.co/papers/1709.07871) outputs per-channel bias and (maybe) scale. + cond_channels = out_channels * 2 if use_film_scale_modulation else out_channels + self.cond_encoder = nn.Sequential(nn.Mish(), nn.Linear(cond_dim, cond_channels)) + + self.conv2 = DiffusionConv1dBlock(out_channels, out_channels, kernel_size, n_groups=n_groups) + + # A final convolution for dimension matching the residual (if needed). + self.residual_conv = ( + nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity() + ) + + def forward(self, x: Tensor, cond: Tensor) -> Tensor: + """ + Args: + x: (B, in_channels, T) + cond: (B, cond_dim) + Returns: + (B, out_channels, T) + """ + out = self.conv1(x) + + # Get condition embedding. Unsqueeze for broadcasting to `out`, resulting in (B, out_channels, 1). + cond_embed = self.cond_encoder(cond).unsqueeze(-1) + if self.use_film_scale_modulation: + # Treat the embedding as a list of scales and biases. + scale = cond_embed[:, : self.out_channels] + bias = cond_embed[:, self.out_channels :] + out = scale * out + bias + else: + # Treat the embedding as biases. + out = out + cond_embed + + out = self.conv2(out) + out = out + self.residual_conv(x) + return out diff --git a/src/lerobot/policies/diffusion/processor_diffusion.py b/src/lerobot/policies/diffusion/processor_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..4e304b3ff59f4edb18e2d698db0d2eddc7769179 --- /dev/null +++ b/src/lerobot/policies/diffusion/processor_diffusion.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python + +# Copyright 2024 Columbia Artificial Intelligence, Robotics Lab, +# and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any + +import torch + +from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig +from lerobot.processor import ( + AddBatchDimensionProcessorStep, + DeviceProcessorStep, + NormalizerProcessorStep, + PolicyAction, + PolicyProcessorPipeline, + RenameObservationsProcessorStep, + UnnormalizerProcessorStep, +) +from lerobot.processor.converters import policy_action_to_transition, transition_to_policy_action +from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME + + +def make_diffusion_pre_post_processors( + config: DiffusionConfig, + dataset_stats: dict[str, dict[str, torch.Tensor]] | None = None, +) -> tuple[ + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], + PolicyProcessorPipeline[PolicyAction, PolicyAction], +]: + """ + Constructs pre-processor and post-processor pipelines for a diffusion policy. + + The pre-processing pipeline prepares the input data for the model by: + 1. Renaming features. + 2. Normalizing the input and output features based on dataset statistics. + 3. Adding a batch dimension. + 4. Moving the data to the specified device. + + The post-processing pipeline handles the model's output by: + 1. Moving the data to the CPU. + 2. Unnormalizing the output features to their original scale. + + Args: + config: The configuration object for the diffusion policy, + containing feature definitions, normalization mappings, and device information. + dataset_stats: A dictionary of statistics used for normalization. + Defaults to None. + + Returns: + A tuple containing the configured pre-processor and post-processor pipelines. + """ + + input_steps = [ + RenameObservationsProcessorStep(rename_map={}), + AddBatchDimensionProcessorStep(), + DeviceProcessorStep(device=config.device), + NormalizerProcessorStep( + features={**config.input_features, **config.output_features}, + norm_map=config.normalization_mapping, + stats=dataset_stats, + ), + ] + output_steps = [ + UnnormalizerProcessorStep( + features=config.output_features, norm_map=config.normalization_mapping, stats=dataset_stats + ), + DeviceProcessorStep(device="cpu"), + ] + return ( + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]]( + steps=input_steps, + name=POLICY_PREPROCESSOR_DEFAULT_NAME, + ), + PolicyProcessorPipeline[PolicyAction, PolicyAction]( + steps=output_steps, + name=POLICY_POSTPROCESSOR_DEFAULT_NAME, + to_transition=policy_action_to_transition, + to_output=transition_to_policy_action, + ), + ) diff --git a/src/lerobot/policies/factory.py b/src/lerobot/policies/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..ce601cef0a451ea8a61588be64fd78656fe15b10 --- /dev/null +++ b/src/lerobot/policies/factory.py @@ -0,0 +1,427 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging +from typing import Any, TypedDict + +import torch +from typing_extensions import Unpack + +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import FeatureType +from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata +from lerobot.datasets.utils import dataset_to_policy_features +from lerobot.envs.configs import EnvConfig +from lerobot.envs.utils import env_to_policy_features +from lerobot.policies.act.configuration_act import ACTConfig +from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig +from lerobot.policies.groot.configuration_groot import GrootConfig +from lerobot.policies.pi0.configuration_pi0 import PI0Config +from lerobot.policies.pi05.configuration_pi05 import PI05Config +from lerobot.policies.pretrained import PreTrainedPolicy +from lerobot.policies.sac.configuration_sac import SACConfig +from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig +from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig +from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig +from lerobot.policies.utils import validate_visual_features_consistency +from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig +from lerobot.processor import PolicyAction, PolicyProcessorPipeline +from lerobot.processor.converters import ( + batch_to_transition, + policy_action_to_transition, + transition_to_batch, + transition_to_policy_action, +) +from lerobot.utils.constants import POLICY_POSTPROCESSOR_DEFAULT_NAME, POLICY_PREPROCESSOR_DEFAULT_NAME + + +def get_policy_class(name: str) -> type[PreTrainedPolicy]: + """ + Retrieves a policy class by its registered name. + + This function uses dynamic imports to avoid loading all policy classes into memory + at once, improving startup time and reducing dependencies. + + Args: + name: The name of the policy. Supported names are "tdmpc", "diffusion", "act", + "vqbet", "pi0", "pi05", "sac", "reward_classifier", "smolvla". + + Returns: + The policy class corresponding to the given name. + + Raises: + NotImplementedError: If the policy name is not recognized. + """ + if name == "tdmpc": + from lerobot.policies.tdmpc.modeling_tdmpc import TDMPCPolicy + + return TDMPCPolicy + elif name == "diffusion": + from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy + + return DiffusionPolicy + elif name == "act": + from lerobot.policies.act.modeling_act import ACTPolicy + + return ACTPolicy + elif name == "vqbet": + from lerobot.policies.vqbet.modeling_vqbet import VQBeTPolicy + + return VQBeTPolicy + elif name == "pi0": + from lerobot.policies.pi0.modeling_pi0 import PI0Policy + + return PI0Policy + elif name == "pi05": + from lerobot.policies.pi05.modeling_pi05 import PI05Policy + + return PI05Policy + elif name == "sac": + from lerobot.policies.sac.modeling_sac import SACPolicy + + return SACPolicy + elif name == "reward_classifier": + from lerobot.policies.sac.reward_model.modeling_classifier import Classifier + + return Classifier + elif name == "smolvla": + from lerobot.policies.smolvla.modeling_smolvla import SmolVLAPolicy + + return SmolVLAPolicy + elif name == "groot": + from lerobot.policies.groot.modeling_groot import GrootPolicy + + return GrootPolicy + else: + raise NotImplementedError(f"Policy with name {name} is not implemented.") + + +def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig: + """ + Instantiates a policy configuration object based on the policy type. + + This factory function simplifies the creation of policy configuration objects by + mapping a string identifier to the corresponding config class. + + Args: + policy_type: The type of the policy. Supported types include "tdmpc", + "diffusion", "act", "vqbet", "pi0", "pi05", "sac", "smolvla", + "reward_classifier". + **kwargs: Keyword arguments to be passed to the configuration class constructor. + + Returns: + An instance of a `PreTrainedConfig` subclass. + + Raises: + ValueError: If the `policy_type` is not recognized. + """ + if policy_type == "tdmpc": + return TDMPCConfig(**kwargs) + elif policy_type == "diffusion": + return DiffusionConfig(**kwargs) + elif policy_type == "act": + return ACTConfig(**kwargs) + elif policy_type == "vqbet": + return VQBeTConfig(**kwargs) + elif policy_type == "pi0": + return PI0Config(**kwargs) + elif policy_type == "pi05": + return PI05Config(**kwargs) + elif policy_type == "sac": + return SACConfig(**kwargs) + elif policy_type == "smolvla": + return SmolVLAConfig(**kwargs) + elif policy_type == "reward_classifier": + return RewardClassifierConfig(**kwargs) + elif policy_type == "groot": + return GrootConfig(**kwargs) + else: + raise ValueError(f"Policy type '{policy_type}' is not available.") + + +class ProcessorConfigKwargs(TypedDict, total=False): + """ + A TypedDict defining the keyword arguments for processor configuration. + + This provides type hints for the optional arguments passed to `make_pre_post_processors`, + improving code clarity and enabling static analysis. + + Attributes: + preprocessor_config_filename: The filename for the preprocessor configuration. + postprocessor_config_filename: The filename for the postprocessor configuration. + preprocessor_overrides: A dictionary of overrides for the preprocessor configuration. + postprocessor_overrides: A dictionary of overrides for the postprocessor configuration. + dataset_stats: Dataset statistics for normalization. + """ + + preprocessor_config_filename: str | None + postprocessor_config_filename: str | None + preprocessor_overrides: dict[str, Any] | None + postprocessor_overrides: dict[str, Any] | None + dataset_stats: dict[str, dict[str, torch.Tensor]] | None + + +def make_pre_post_processors( + policy_cfg: PreTrainedConfig, + pretrained_path: str | None = None, + **kwargs: Unpack[ProcessorConfigKwargs], +) -> tuple[ + PolicyProcessorPipeline[dict[str, Any], dict[str, Any]], + PolicyProcessorPipeline[PolicyAction, PolicyAction], +]: + """ + Create or load pre- and post-processor pipelines for a given policy. + + This function acts as a factory. It can either load existing processor pipelines + from a pretrained path or create new ones from scratch based on the policy + configuration. Each policy type has a dedicated factory function for its + processors (e.g., `make_tdmpc_pre_post_processors`). + + Args: + policy_cfg: The configuration of the policy for which to create processors. + pretrained_path: An optional path to load pretrained processor pipelines from. + If provided, pipelines are loaded from this path. + **kwargs: Keyword arguments for processor configuration, as defined in + `ProcessorConfigKwargs`. + + Returns: + A tuple containing the input (pre-processor) and output (post-processor) pipelines. + + Raises: + NotImplementedError: If a processor factory is not implemented for the given + policy configuration type. + """ + if pretrained_path: + # TODO(Steven): Temporary patch, implement correctly the processors for Gr00t + if isinstance(policy_cfg, GrootConfig): + # GROOT handles normalization in groot_pack_inputs_v3 step + # Need to override both stats AND normalize_min_max since saved config might be empty + preprocessor_overrides = {} + postprocessor_overrides = {} + preprocessor_overrides["groot_pack_inputs_v3"] = { + "stats": kwargs.get("dataset_stats"), + "normalize_min_max": True, + } + + # Also ensure postprocessing slices to env action dim and unnormalizes with dataset stats + env_action_dim = policy_cfg.output_features["action"].shape[0] + postprocessor_overrides["groot_action_unpack_unnormalize_v1"] = { + "stats": kwargs.get("dataset_stats"), + "normalize_min_max": True, + "env_action_dim": env_action_dim, + } + kwargs["preprocessor_overrides"] = preprocessor_overrides + kwargs["postprocessor_overrides"] = postprocessor_overrides + + return ( + PolicyProcessorPipeline.from_pretrained( + pretrained_model_name_or_path=pretrained_path, + config_filename=kwargs.get( + "preprocessor_config_filename", f"{POLICY_PREPROCESSOR_DEFAULT_NAME}.json" + ), + overrides=kwargs.get("preprocessor_overrides", {}), + to_transition=batch_to_transition, + to_output=transition_to_batch, + ), + PolicyProcessorPipeline.from_pretrained( + pretrained_model_name_or_path=pretrained_path, + config_filename=kwargs.get( + "postprocessor_config_filename", f"{POLICY_POSTPROCESSOR_DEFAULT_NAME}.json" + ), + overrides=kwargs.get("postprocessor_overrides", {}), + to_transition=policy_action_to_transition, + to_output=transition_to_policy_action, + ), + ) + + # Create a new processor based on policy type + if isinstance(policy_cfg, TDMPCConfig): + from lerobot.policies.tdmpc.processor_tdmpc import make_tdmpc_pre_post_processors + + processors = make_tdmpc_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(policy_cfg, DiffusionConfig): + from lerobot.policies.diffusion.processor_diffusion import make_diffusion_pre_post_processors + + processors = make_diffusion_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(policy_cfg, ACTConfig): + from lerobot.policies.act.processor_act import make_act_pre_post_processors + + processors = make_act_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(policy_cfg, VQBeTConfig): + from lerobot.policies.vqbet.processor_vqbet import make_vqbet_pre_post_processors + + processors = make_vqbet_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(policy_cfg, PI0Config): + from lerobot.policies.pi0.processor_pi0 import make_pi0_pre_post_processors + + processors = make_pi0_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(policy_cfg, PI05Config): + from lerobot.policies.pi05.processor_pi05 import make_pi05_pre_post_processors + + processors = make_pi05_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(policy_cfg, SACConfig): + from lerobot.policies.sac.processor_sac import make_sac_pre_post_processors + + processors = make_sac_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(policy_cfg, RewardClassifierConfig): + from lerobot.policies.sac.reward_model.processor_classifier import make_classifier_processor + + processors = make_classifier_processor( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(policy_cfg, SmolVLAConfig): + from lerobot.policies.smolvla.processor_smolvla import make_smolvla_pre_post_processors + + processors = make_smolvla_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + elif isinstance(policy_cfg, GrootConfig): + from lerobot.policies.groot.processor_groot import make_groot_pre_post_processors + + processors = make_groot_pre_post_processors( + config=policy_cfg, + dataset_stats=kwargs.get("dataset_stats"), + ) + + else: + raise NotImplementedError(f"Processor for policy type '{policy_cfg.type}' is not implemented.") + + return processors + + +def make_policy( + cfg: PreTrainedConfig, + ds_meta: LeRobotDatasetMetadata | None = None, + env_cfg: EnvConfig | None = None, + rename_map: dict[str, str] | None = None, +) -> PreTrainedPolicy: + """ + Instantiate a policy model. + + This factory function handles the logic of creating a policy, which requires + determining the input and output feature shapes. These shapes can be derived + either from a `LeRobotDatasetMetadata` object or an `EnvConfig` object. The function + can either initialize a new policy from scratch or load a pretrained one. + + Args: + cfg: The configuration for the policy to be created. If `cfg.pretrained_path` is + set, the policy will be loaded with weights from that path. + ds_meta: Dataset metadata used to infer feature shapes and types. Also provides + statistics for normalization layers. + env_cfg: Environment configuration used to infer feature shapes and types. + One of `ds_meta` or `env_cfg` must be provided. + rename_map: Optional mapping of dataset or environment feature keys to match + expected policy feature names (e.g., `"left"` → `"camera1"`). + + Returns: + An instantiated and device-placed policy model. + + Raises: + ValueError: If both or neither of `ds_meta` and `env_cfg` are provided. + NotImplementedError: If attempting to use an unsupported policy-backend + combination (e.g., VQBeT with 'mps'). + """ + if bool(ds_meta) == bool(env_cfg): + raise ValueError("Either one of a dataset metadata or a sim env must be provided.") + + # NOTE: Currently, if you try to run vqbet with mps backend, you'll get this error. + # TODO(aliberts, rcadene): Implement a check_backend_compatibility in policies? + # NotImplementedError: The operator 'aten::unique_dim' is not currently implemented for the MPS device. If + # you want this op to be added in priority during the prototype phase of this feature, please comment on + # https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment + # variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be + # slower than running natively on MPS. + if cfg.type == "vqbet" and cfg.device == "mps": + raise NotImplementedError( + "Current implementation of VQBeT does not support `mps` backend. " + "Please use `cpu` or `cuda` backend." + ) + + policy_cls = get_policy_class(cfg.type) + + kwargs = {} + if ds_meta is not None: + features = dataset_to_policy_features(ds_meta.features) + else: + if not cfg.pretrained_path: + logging.warning( + "You are instantiating a policy from scratch and its features are parsed from an environment " + "rather than a dataset. Normalization modules inside the policy will have infinite values " + "by default without stats from a dataset." + ) + if env_cfg is None: + raise ValueError("env_cfg cannot be None when ds_meta is not provided") + features = env_to_policy_features(env_cfg) + + if not cfg.output_features: + cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} + if not cfg.input_features: + cfg.input_features = {key: ft for key, ft in features.items() if key not in cfg.output_features} + kwargs["config"] = cfg + + if cfg.pretrained_path: + # Load a pretrained policy and override the config if needed (for example, if there are inference-time + # hyperparameters that we want to vary). + kwargs["pretrained_name_or_path"] = cfg.pretrained_path + policy = policy_cls.from_pretrained(**kwargs) + else: + # Make a fresh policy. + policy = policy_cls(**kwargs) + + policy.to(cfg.device) + assert isinstance(policy, torch.nn.Module) + + # policy = torch.compile(policy, mode="reduce-overhead") + + if not rename_map: + validate_visual_features_consistency(cfg, features) + # TODO: (jadechoghari) - add a check_state(cfg, features) and check_action(cfg, features) + + return policy diff --git a/src/lerobot/policies/groot/README.md b/src/lerobot/policies/groot/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ff4937f5c25888cda9942094ed36e684a72dd564 --- /dev/null +++ b/src/lerobot/policies/groot/README.md @@ -0,0 +1 @@ +../../../../docs/source/policy_groot_README.md \ No newline at end of file diff --git a/src/lerobot/policies/groot/__init__.py b/src/lerobot/policies/groot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a25fb83d2b7cf6f42fe93c08df1c352e28d14352 --- /dev/null +++ b/src/lerobot/policies/groot/__init__.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python + +# Copyright 2025 Nvidia and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .configuration_groot import GrootConfig +from .modeling_groot import GrootPolicy +from .processor_groot import make_groot_pre_post_processors + +__all__ = ["GrootConfig", "GrootPolicy", "make_groot_pre_post_processors"] diff --git a/src/lerobot/policies/groot/action_head/__init__.py b/src/lerobot/policies/groot/action_head/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dac9a4d7496eb38831f1f3c820a90d50e25e2a7e --- /dev/null +++ b/src/lerobot/policies/groot/action_head/__init__.py @@ -0,0 +1,14 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/lerobot/policies/groot/action_head/action_encoder.py b/src/lerobot/policies/groot/action_head/action_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..57317af019fabf84e0f0c9328d9482a16b5aefe7 --- /dev/null +++ b/src/lerobot/policies/groot/action_head/action_encoder.py @@ -0,0 +1,54 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn + + +def swish(x): + return x * torch.sigmoid(x) + + +class SinusoidalPositionalEncoding(nn.Module): + """ + Produces a sinusoidal encoding of shape (B, T, w) + given timesteps of shape (B, T). + """ + + def __init__(self, embedding_dim): + super().__init__() + self.embedding_dim = embedding_dim + + def forward(self, timesteps): + # timesteps: shape (B, T) + # We'll compute sin/cos frequencies across dim T + timesteps = timesteps.float() # ensure float + + b, t = timesteps.shape + device = timesteps.device + + half_dim = self.embedding_dim // 2 + # typical log space frequencies for sinusoidal encoding + exponent = -torch.arange(half_dim, dtype=torch.float, device=device) * ( + torch.log(torch.tensor(10000.0)) / half_dim + ) + # Expand timesteps to (B, T, 1) then multiply + freqs = timesteps.unsqueeze(-1) * exponent.exp() # (B, T, half_dim) + + sin = torch.sin(freqs) + cos = torch.cos(freqs) + enc = torch.cat([sin, cos], dim=-1) # (B, T, w) + + return enc diff --git a/src/lerobot/policies/groot/action_head/cross_attention_dit.py b/src/lerobot/policies/groot/action_head/cross_attention_dit.py new file mode 100644 index 0000000000000000000000000000000000000000..707882fa60797b24028b2c8ab02d8a8b61d088d6 --- /dev/null +++ b/src/lerobot/policies/groot/action_head/cross_attention_dit.py @@ -0,0 +1,370 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch +import torch.nn.functional as F # noqa: N812 +from diffusers import ConfigMixin, ModelMixin +from diffusers.configuration_utils import register_to_config +from diffusers.models.attention import Attention, FeedForward +from diffusers.models.embeddings import ( + SinusoidalPositionalEmbedding, + TimestepEmbedding, + Timesteps, +) +from torch import nn + + +class TimestepEncoder(nn.Module): + def __init__(self, embedding_dim, compute_dtype=torch.float32): + super().__init__() + self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=1) + self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) + + def forward(self, timesteps): + dtype = next(self.parameters()).dtype + timesteps_proj = self.time_proj(timesteps).to(dtype) + timesteps_emb = self.timestep_embedder(timesteps_proj) # (N, D) + return timesteps_emb + + +class AdaLayerNorm(nn.Module): + def __init__( + self, + embedding_dim: int, + norm_elementwise_affine: bool = False, + norm_eps: float = 1e-5, + chunk_dim: int = 0, + ): + super().__init__() + self.chunk_dim = chunk_dim + output_dim = embedding_dim * 2 + self.silu = nn.SiLU() + self.linear = nn.Linear(embedding_dim, output_dim) + self.norm = nn.LayerNorm(output_dim // 2, norm_eps, norm_elementwise_affine) + + def forward( + self, + x: torch.Tensor, + temb: torch.Tensor | None = None, + ) -> torch.Tensor: + temb = self.linear(self.silu(temb)) + scale, shift = temb.chunk(2, dim=1) + x = self.norm(x) * (1 + scale[:, None]) + shift[:, None] + return x + + +class BasicTransformerBlock(nn.Module): + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + dropout=0.0, + cross_attention_dim: int | None = None, + activation_fn: str = "geglu", + attention_bias: bool = False, + upcast_attention: bool = False, + norm_elementwise_affine: bool = True, + norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen' + norm_eps: float = 1e-5, + final_dropout: bool = False, + attention_type: str = "default", + positional_embeddings: str | None = None, + num_positional_embeddings: int | None = None, + ff_inner_dim: int | None = None, + ff_bias: bool = True, + attention_out_bias: bool = True, + ): + super().__init__() + self.dim = dim + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + self.dropout = dropout + self.cross_attention_dim = cross_attention_dim + self.activation_fn = activation_fn + self.attention_bias = attention_bias + self.norm_elementwise_affine = norm_elementwise_affine + self.positional_embeddings = positional_embeddings + self.num_positional_embeddings = num_positional_embeddings + self.norm_type = norm_type + + if positional_embeddings and (num_positional_embeddings is None): + raise ValueError( + "If `positional_embeddings` type is defined, `num_positional_embeddings` must also be defined." + ) + + if positional_embeddings == "sinusoidal": + self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) + else: + self.pos_embed = None + + # Define 3 blocks. Each block has its own normalization layer. + # 1. Self-Attn + if norm_type == "ada_norm": + self.norm1 = AdaLayerNorm(dim) + else: + self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) + + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=cross_attention_dim, + upcast_attention=upcast_attention, + out_bias=attention_out_bias, + ) + + # 3. Feed-forward + self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) + self.ff = FeedForward( + dim, + dropout=dropout, + activation_fn=activation_fn, + final_dropout=final_dropout, + inner_dim=ff_inner_dim, + bias=ff_bias, + ) + if final_dropout: + self.final_dropout = nn.Dropout(dropout) + else: + self.final_dropout = None + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor | None = None, + encoder_hidden_states: torch.Tensor | None = None, + encoder_attention_mask: torch.Tensor | None = None, + temb: torch.LongTensor | None = None, + ) -> torch.Tensor: + # 0. Self-Attention + if self.norm_type == "ada_norm": + norm_hidden_states = self.norm1(hidden_states, temb) + else: + norm_hidden_states = self.norm1(hidden_states) + + if self.pos_embed is not None: + norm_hidden_states = self.pos_embed(norm_hidden_states) + + attn_output = self.attn1( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + # encoder_attention_mask=encoder_attention_mask, + ) + if self.final_dropout: + attn_output = self.final_dropout(attn_output) + + hidden_states = attn_output + hidden_states + if hidden_states.ndim == 4: + hidden_states = hidden_states.squeeze(1) + + # 4. Feed-forward + norm_hidden_states = self.norm3(hidden_states) + ff_output = self.ff(norm_hidden_states) + + hidden_states = ff_output + hidden_states + if hidden_states.ndim == 4: + hidden_states = hidden_states.squeeze(1) + return hidden_states + + +class DiT(ModelMixin, ConfigMixin): + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + num_attention_heads: int = 8, + attention_head_dim: int = 64, + output_dim: int = 26, + num_layers: int = 12, + dropout: float = 0.1, + attention_bias: bool = True, + activation_fn: str = "gelu-approximate", + num_embeds_ada_norm: int | None = 1000, + upcast_attention: bool = False, + norm_type: str = "ada_norm", + norm_elementwise_affine: bool = False, + norm_eps: float = 1e-5, + max_num_positional_embeddings: int = 512, + compute_dtype=torch.float32, + final_dropout: bool = True, + positional_embeddings: str | None = "sinusoidal", + interleave_self_attention=False, + cross_attention_dim: int | None = None, + ): + super().__init__() + + self.attention_head_dim = attention_head_dim + self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim + self.gradient_checkpointing = False + + # Timestep encoder + self.timestep_encoder = TimestepEncoder( + embedding_dim=self.inner_dim, compute_dtype=self.config.compute_dtype + ) + + all_blocks = [] + for idx in range(self.config.num_layers): + use_self_attn = idx % 2 == 1 and interleave_self_attention + curr_cross_attention_dim = cross_attention_dim if not use_self_attn else None + + all_blocks += [ + BasicTransformerBlock( + self.inner_dim, + self.config.num_attention_heads, + self.config.attention_head_dim, + dropout=self.config.dropout, + activation_fn=self.config.activation_fn, + attention_bias=self.config.attention_bias, + upcast_attention=self.config.upcast_attention, + norm_type=norm_type, + norm_elementwise_affine=self.config.norm_elementwise_affine, + norm_eps=self.config.norm_eps, + positional_embeddings=positional_embeddings, + num_positional_embeddings=self.config.max_num_positional_embeddings, + final_dropout=final_dropout, + cross_attention_dim=curr_cross_attention_dim, + ) + ] + self.transformer_blocks = nn.ModuleList(all_blocks) + + # Output blocks + self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) + self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim) + self.proj_out_2 = nn.Linear(self.inner_dim, self.config.output_dim) + print( + "Total number of DiT parameters: ", + sum(p.numel() for p in self.parameters() if p.requires_grad), + ) + + def forward( + self, + hidden_states: torch.Tensor, # Shape: (B, T, D) + encoder_hidden_states: torch.Tensor, # Shape: (B, S, D) + timestep: torch.LongTensor | None = None, + encoder_attention_mask: torch.Tensor | None = None, + return_all_hidden_states: bool = False, + ): + # Encode timesteps + temb = self.timestep_encoder(timestep) + + # Process through transformer blocks - single pass through the blocks + hidden_states = hidden_states.contiguous() + encoder_hidden_states = encoder_hidden_states.contiguous() + + all_hidden_states = [hidden_states] + + # Process through transformer blocks + for idx, block in enumerate(self.transformer_blocks): + if idx % 2 == 1 and self.config.interleave_self_attention: + hidden_states = block( + hidden_states, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + temb=temb, + ) + else: + hidden_states = block( + hidden_states, + attention_mask=None, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=None, + temb=temb, + ) + all_hidden_states.append(hidden_states) + + # Output processing + conditioning = temb + shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) + hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] + if return_all_hidden_states: + return self.proj_out_2(hidden_states), all_hidden_states + else: + return self.proj_out_2(hidden_states) + + +class SelfAttentionTransformer(ModelMixin, ConfigMixin): + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + num_attention_heads: int = 8, + attention_head_dim: int = 64, + output_dim: int = 26, + num_layers: int = 12, + dropout: float = 0.1, + attention_bias: bool = True, + activation_fn: str = "gelu-approximate", + num_embeds_ada_norm: int | None = 1000, + upcast_attention: bool = False, + max_num_positional_embeddings: int = 512, + compute_dtype=torch.float32, + final_dropout: bool = True, + positional_embeddings: str | None = "sinusoidal", + interleave_self_attention=False, + ): + super().__init__() + + self.attention_head_dim = attention_head_dim + self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim + self.gradient_checkpointing = False + + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + self.inner_dim, + self.config.num_attention_heads, + self.config.attention_head_dim, + dropout=self.config.dropout, + activation_fn=self.config.activation_fn, + attention_bias=self.config.attention_bias, + upcast_attention=self.config.upcast_attention, + positional_embeddings=positional_embeddings, + num_positional_embeddings=self.config.max_num_positional_embeddings, + final_dropout=final_dropout, + ) + for _ in range(self.config.num_layers) + ] + ) + print( + "Total number of SelfAttentionTransformer parameters: ", + sum(p.numel() for p in self.parameters() if p.requires_grad), + ) + + def forward( + self, + hidden_states: torch.Tensor, # Shape: (B, T, D) + return_all_hidden_states: bool = False, + ): + # Process through transformer blocks - single pass through the blocks + hidden_states = hidden_states.contiguous() + all_hidden_states = [hidden_states] + + # Process through transformer blocks + for _idx, block in enumerate(self.transformer_blocks): + hidden_states = block(hidden_states) + all_hidden_states.append(hidden_states) + + if return_all_hidden_states: + return hidden_states, all_hidden_states + else: + return hidden_states diff --git a/src/lerobot/policies/groot/action_head/flow_matching_action_head.py b/src/lerobot/policies/groot/action_head/flow_matching_action_head.py new file mode 100644 index 0000000000000000000000000000000000000000..274b46b0056a977c3170dc3ded45e4f701ba02cb --- /dev/null +++ b/src/lerobot/policies/groot/action_head/flow_matching_action_head.py @@ -0,0 +1,406 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import TYPE_CHECKING + +import torch +import torch.nn.functional as F # noqa: N812 +from torch import nn +from torch.distributions import Beta + +from lerobot.utils.import_utils import _transformers_available + +# Conditional import for type checking and lazy loading +if TYPE_CHECKING or _transformers_available: + from transformers import PretrainedConfig + from transformers.feature_extraction_utils import BatchFeature +else: + PretrainedConfig = object + BatchFeature = None + +from lerobot.policies.groot.action_head.action_encoder import ( + SinusoidalPositionalEncoding, + swish, +) + +from .cross_attention_dit import DiT, SelfAttentionTransformer + + +class CategorySpecificLinear(nn.Module): + def __init__(self, num_categories, input_dim, hidden_dim): + super().__init__() + self.num_categories = num_categories + # For each category, we have separate weights and biases. + self.W = nn.Parameter(0.02 * torch.randn(num_categories, input_dim, hidden_dim)) + self.b = nn.Parameter(torch.zeros(num_categories, hidden_dim)) + + def forward(self, x, cat_ids): + selected_w = self.W[cat_ids] + selected_b = self.b[cat_ids] + return torch.bmm(x, selected_w) + selected_b.unsqueeze(1) + + +class CategorySpecificMLP(nn.Module): + def __init__(self, num_categories, input_dim, hidden_dim, output_dim): + super().__init__() + self.num_categories = num_categories + self.layer1 = CategorySpecificLinear(num_categories, input_dim, hidden_dim) + self.layer2 = CategorySpecificLinear(num_categories, hidden_dim, output_dim) + + def forward(self, x, cat_ids): + hidden = F.relu(self.layer1(x, cat_ids)) + return self.layer2(hidden, cat_ids) + + +class MultiEmbodimentActionEncoder(nn.Module): + def __init__(self, action_dim, hidden_size, num_embodiments): + super().__init__() + self.hidden_size = hidden_size + self.num_embodiments = num_embodiments + + # W1: R^{w x d}, W2: R^{w x 2w}, W3: R^{w x w} + self.W1 = CategorySpecificLinear(num_embodiments, action_dim, hidden_size) # (d -> w) + self.W2 = CategorySpecificLinear(num_embodiments, 2 * hidden_size, hidden_size) # (2w -> w) + self.W3 = CategorySpecificLinear(num_embodiments, hidden_size, hidden_size) # (w -> w) + self.pos_encoding = SinusoidalPositionalEncoding(hidden_size) + + def forward(self, actions, timesteps, cat_ids): + """ + actions: shape (B, T, action_dim) + timesteps: shape (B,) -- a single scalar per batch item + cat_ids: shape (B,) + returns: shape (B, T, hidden_size) + """ + b, t, _ = actions.shape + + # 1) Expand each batch's single scalar time 'tau' across all T steps + # so that shape => (B, T) + # e.g. if timesteps is (B,), replicate across T + if timesteps.dim() == 1 and timesteps.shape[0] == b: + # shape (B,) => (B,T) + timesteps = timesteps.unsqueeze(1).expand(-1, t) + else: + raise ValueError("Expected `timesteps` to have shape (B,) so we can replicate across T.") + + # 2) Standard action MLP step for shape => (B, T, w) + a_emb = self.W1(actions, cat_ids) + + # 3) Get the sinusoidal encoding (B, T, w) + tau_emb = self.pos_encoding(timesteps).to(dtype=a_emb.dtype) + + # 4) Concat along last dim => (B, T, 2w), then W2 => (B, T, w), swish + x = torch.cat([a_emb, tau_emb], dim=-1) + x = swish(self.W2(x, cat_ids)) + + # 5) Finally W3 => (B, T, w) + x = self.W3(x, cat_ids) + return x + + +@dataclass +class FlowmatchingActionHeadConfig(PretrainedConfig): + """NOTE: N1.5 uses XEmbFlowmatchingPolicyHeadConfig as action head""" + + add_pos_embed: bool = field(default=True, metadata={"help": "Whether to add positional embedding"}) + model_dtype: str = field(default="float32", metadata={"help": "Model data type."}) + diffusion_model_cfg: dict = field(default=None, metadata={"help": "Diffusion model configuration."}) + input_embedding_dim: int = field(default=1536, metadata={"help": "Input embedding channel dimension."}) + backbone_embedding_dim: int = field( + default=1536, metadata={"help": "Backbone embedding channel dimension."} + ) + + hidden_size: int = field(default=1024, metadata={"help": "Input embedding dimension."}) + max_seq_len: int = field(default=1024, metadata={"help": "Maximum Sequence Length"}) + action_dim: int = field(default=None, metadata={"help": "Action dimension."}) + action_horizon: int = field(default=None, metadata={"help": "Action horizon."}) + noise_beta_alpha: float = field(default=1.5, metadata={"help": ""}) + noise_beta_beta: float = field(default=1.0, metadata={"help": ""}) + noise_s: float = field(default=0.999, metadata={"help": "Flow matching noise Beta distribution s."}) + num_timestep_buckets: int = field( + default=1000, metadata={"help": "Number of timestep discretization buckets."} + ) + num_inference_timesteps: int = field( + default=None, + metadata={"help": "Number of inference steps for noise diffusion."}, + ) + max_num_embodiments: int = field(default=32, metadata={"help": "Number of embodiments."}) + tune_projector: bool = field(default=True, metadata={"help": "Whether to tune the projector."}) + tune_diffusion_model: bool = field( + default=True, metadata={"help": "Whether to tune the diffusion model."} + ) + load_pretrained_det_decode_layer_path: str = field( + default=None, metadata={"help": "Path to pretrained detection model."} + ) + detection_coeff: float = field(default=1.0, metadata={"help": "Detection coefficient."}) + + freeze_decode_layer: bool = field(default=False) + expand_batch: int = field(default=None) + use_vlln: bool = field(default=True) + + vl_self_attention_cfg: dict = field(default=None) + num_target_vision_tokens: int = field(default=32, metadata={"help": "Number of target vision tokens."}) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + for key, value in kwargs.items(): + setattr(self, key, value) + + +class FlowmatchingActionHead(nn.Module): + config_class = FlowmatchingActionHeadConfig + supports_gradient_checkpointing = True + + def __init__( + self, + config: FlowmatchingActionHeadConfig, + ): + super().__init__() + self.hidden_size = config.hidden_size + self.input_embedding_dim = config.input_embedding_dim + + self.model = DiT(**config.diffusion_model_cfg) + self.action_dim = config.action_dim + self.action_horizon = config.action_horizon + self.num_inference_timesteps = config.num_inference_timesteps + + self.state_encoder = CategorySpecificMLP( + num_categories=config.max_num_embodiments, + input_dim=config.max_state_dim, + hidden_dim=self.hidden_size, + output_dim=self.input_embedding_dim, + ) + self.action_encoder = MultiEmbodimentActionEncoder( + action_dim=config.action_dim, + hidden_size=self.input_embedding_dim, + num_embodiments=config.max_num_embodiments, + ) + self.action_decoder = CategorySpecificMLP( + num_categories=config.max_num_embodiments, + input_dim=self.hidden_size, + hidden_dim=self.hidden_size, + output_dim=self.action_dim, + ) + self.future_tokens = nn.Embedding(config.num_target_vision_tokens, self.input_embedding_dim) + nn.init.normal_(self.future_tokens.weight, mean=0.0, std=0.02) + + self.vlln = nn.LayerNorm(config.backbone_embedding_dim) if config.use_vlln else nn.Identity() + self.vl_self_attention = ( + SelfAttentionTransformer(**config.vl_self_attention_cfg) if config.use_vlln else nn.Identity() + ) + + if config.add_pos_embed: + self.position_embedding = nn.Embedding(config.max_seq_len, self.input_embedding_dim) + nn.init.normal_(self.position_embedding.weight, mean=0.0, std=0.02) + + self.beta_dist = Beta(config.noise_beta_alpha, config.noise_beta_beta) + self.num_timestep_buckets = config.num_timestep_buckets + self.config = config + self.set_trainable_parameters(config.tune_projector, config.tune_diffusion_model) + + def set_trainable_parameters(self, tune_projector: bool, tune_diffusion_model: bool): + self.tune_projector = tune_projector + self.tune_diffusion_model = tune_diffusion_model + for p in self.parameters(): + p.requires_grad = True + if not tune_projector: + self.state_encoder.requires_grad_(False) + self.action_encoder.requires_grad_(False) + self.action_decoder.requires_grad_(False) + if self.config.add_pos_embed: + self.position_embedding.requires_grad_(False) + if not tune_diffusion_model: + self.model.requires_grad_(False) + print(f"Tune action head projector: {self.tune_projector}") + print(f"Tune action head diffusion model: {self.tune_diffusion_model}") + # Check if any parameters are still trainable. If not, print a warning. + if not tune_projector and not tune_diffusion_model: + for name, p in self.named_parameters(): + if p.requires_grad: + print(f"Action head trainable parameter: {name}") + if not any(p.requires_grad for p in self.parameters()): + print("Warning: No action head trainable parameters found.") + + def set_frozen_modules_to_eval_mode(self): + """ + Huggingface will call model.train() at each training_step. To ensure + the expected behaviors for modules like dropout, batchnorm, etc., we + need to call model.eval() for the frozen modules. + """ + if self.training: + if not self.tune_projector: + self.state_encoder.eval() + self.action_encoder.eval() + self.action_decoder.eval() + if self.config.add_pos_embed: + self.position_embedding.eval() + if not self.tune_diffusion_model: + self.model.eval() + + def sample_time(self, batch_size, device, dtype): + sample = self.beta_dist.sample([batch_size]).to(device, dtype=dtype) + return (self.config.noise_s - sample) / self.config.noise_s + + def prepare_input(self, batch: dict) -> BatchFeature: + return BatchFeature(data=batch) + + def process_backbone_output(self, backbone_output: BatchFeature) -> BatchFeature: + backbone_features = backbone_output["backbone_features"] + backbone_features = self.vlln(backbone_features) + backbone_features = self.vl_self_attention(backbone_features) + backbone_output["backbone_features"] = backbone_features + return backbone_output + + def forward(self, backbone_output: BatchFeature, action_input: BatchFeature) -> BatchFeature: + # Set frozen modules to eval + self.set_frozen_modules_to_eval_mode() + + backbone_output = self.process_backbone_output(backbone_output) + + if self.config.expand_batch is not None: + for k, v in backbone_output.items(): + ndim = len(v.shape) + factors = [self.config.expand_batch] + while len(factors) < ndim: + factors.append(1) + factors = tuple(factors) + expanded = v.repeat(*factors) + backbone_output[k] = expanded + + for k, v in action_input.items(): + ndim = len(v.shape) + factors = [self.config.expand_batch] + while len(factors) < ndim: + factors.append(1) + factors = tuple(factors) + expanded = v.repeat(*factors) + action_input[k] = expanded + + # Get vision and language embeddings. + vl_embs = backbone_output.backbone_features + device = vl_embs.device + + # Get embodiment ID. + embodiment_id = action_input.embodiment_id + + # Embed state. + state_features = self.state_encoder(action_input.state, embodiment_id) + + # Embed noised action trajectory. + actions = action_input.action + noise = torch.randn(actions.shape, device=actions.device, dtype=actions.dtype) + t = self.sample_time(actions.shape[0], device=actions.device, dtype=actions.dtype) + t = t[:, None, None] # shape (B,1,1) for broadcast + + noisy_trajectory = (1 - t) * noise + t * actions + velocity = actions - noise + + # Convert (continuous) t -> discrete if needed + t_discretized = (t[:, 0, 0] * self.num_timestep_buckets).long() + action_features = self.action_encoder(noisy_trajectory, t_discretized, embodiment_id) + + # Maybe add position embedding. + if self.config.add_pos_embed: + pos_ids = torch.arange(action_features.shape[1], dtype=torch.long, device=device) + pos_embs = self.position_embedding(pos_ids).unsqueeze(0) + action_features = action_features + pos_embs + + # Join vision, language, state and action embedding along sequence dimension. + future_tokens = self.future_tokens.weight.unsqueeze(0).expand(vl_embs.shape[0], -1, -1) + sa_embs = torch.cat((state_features, future_tokens, action_features), dim=1) + + vl_attn_mask = backbone_output.backbone_attention_mask + + model_output = self.model( + hidden_states=sa_embs, + encoder_hidden_states=vl_embs, + encoder_attention_mask=vl_attn_mask, + timestep=t_discretized, + return_all_hidden_states=False, # NOTE (YL): not using flare now + ) + pred = self.action_decoder(model_output, embodiment_id) + pred_actions = pred[:, -actions.shape[1] :] + + # Slice out only the action portion of pred and target. + action_mask = action_input.action_mask + loss = F.mse_loss(pred_actions, velocity, reduction="none") * action_mask + loss = loss.sum() / action_mask.sum() + output_dict = { + "loss": loss, + } + return BatchFeature(data=output_dict) + + @torch.no_grad() + def get_action(self, backbone_output: BatchFeature, action_input: BatchFeature) -> BatchFeature: + backbone_output = self.process_backbone_output(backbone_output) + + # Get vision and language embeddings. + vl_embs = backbone_output.backbone_features + embodiment_id = action_input.embodiment_id + + # Embed state. + state_features = self.state_encoder(action_input.state, embodiment_id) + + # Set initial actions as the sampled noise. + batch_size = vl_embs.shape[0] + device = vl_embs.device + actions = torch.randn( + size=(batch_size, self.config.action_horizon, self.config.action_dim), + dtype=vl_embs.dtype, + device=device, + ) + + num_steps = self.num_inference_timesteps + dt = 1.0 / num_steps + + # Run denoising steps. + for t in range(num_steps): + t_cont = t / float(num_steps) # e.g. goes 0, 1/N, 2/N, ... + t_discretized = int(t_cont * self.num_timestep_buckets) + + # Embed noised action trajectory. + timesteps_tensor = torch.full(size=(batch_size,), fill_value=t_discretized, device=device) + action_features = self.action_encoder(actions, timesteps_tensor, embodiment_id) + # Maybe add position embedding. + if self.config.add_pos_embed: + pos_ids = torch.arange(action_features.shape[1], dtype=torch.long, device=device) + pos_embs = self.position_embedding(pos_ids).unsqueeze(0) + action_features = action_features + pos_embs + + # Join vision, language, state and action embedding along sequence dimension. + future_tokens = self.future_tokens.weight.unsqueeze(0).expand(vl_embs.shape[0], -1, -1) + sa_embs = torch.cat((state_features, future_tokens, action_features), dim=1) + + # Run model forward. + model_output = self.model( + hidden_states=sa_embs, + encoder_hidden_states=vl_embs, + timestep=timesteps_tensor, + ) + pred = self.action_decoder(model_output, embodiment_id) + + pred_velocity = pred[:, -self.action_horizon :] + + # Update actions using euler integration. + actions = actions + dt * pred_velocity + return BatchFeature(data={"action_pred": actions}) + + @property + def device(self): + return next(iter(self.parameters())).device + + @property + def dtype(self): + return next(iter(self.parameters())).dtype diff --git a/src/lerobot/policies/groot/configuration_groot.py b/src/lerobot/policies/groot/configuration_groot.py new file mode 100644 index 0000000000000000000000000000000000000000..699952885fa53a50364d46658a1a12522d0e7e0c --- /dev/null +++ b/src/lerobot/policies/groot/configuration_groot.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python + +# Copyright 2024 NVIDIA Corporation and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field + +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature +from lerobot.optim.optimizers import AdamWConfig +from lerobot.optim.schedulers import CosineDecayWithWarmupSchedulerConfig + + +@PreTrainedConfig.register_subclass("groot") +@dataclass +class GrootConfig(PreTrainedConfig): + """Configuration for Groot policy wrapper.""" + + # Basic policy settings + n_obs_steps: int = 1 + chunk_size: int = 50 + n_action_steps: int = 50 + + # Dimension settings (must match pretrained GR00T model expectations) + # Maximum state dimension. Shorter states will be zero-padded. + max_state_dim: int = 64 + + # Maximum action dimension. Shorter actions will be zero-padded. + max_action_dim: int = 32 + + # Normalization (start with identity, adjust as needed) + normalization_mapping: dict[str, NormalizationMode] = field( + default_factory=lambda: { + "VISUAL": NormalizationMode.IDENTITY, + "STATE": NormalizationMode.MEAN_STD, + "ACTION": NormalizationMode.MEAN_STD, + } + ) + + # Image preprocessing (adjust to match Groot's expected input) + image_size: tuple[int, int] = (224, 224) + + # Groot-specific model parameters (from groot_finetune_script.py) + + # Path or HuggingFace model ID for the base Groot model + base_model_path: str = "nvidia/GR00T-N1.5-3B" + + # HF repo ID (or local path) that hosts vocab.json and merges.txt for Eagle tokenizer. + tokenizer_assets_repo: str = "lerobot/eagle2hg-processor-groot-n1p5" + + # Embodiment tag to use for training (e.g. 'new_embodiment', 'gr1') + embodiment_tag: str = "new_embodiment" + + # Fine-tuning control arguments + + # Whether to fine-tune the llm backbone + tune_llm: bool = False + + # Whether to fine-tune the vision tower + tune_visual: bool = False + + # Whether to fine-tune the projector + tune_projector: bool = True + + # Whether to fine-tune the diffusion model + tune_diffusion_model: bool = True + + # LoRA parameters (from groot_finetune_script.py) + # Rank for the LORA model. If 0, no LORA will be used. + lora_rank: int = 0 + + # Alpha value for the LORA model + lora_alpha: int = 16 + + # Dropout rate for the LORA model + lora_dropout: float = 0.1 + + # Whether to use the full model for LORA + lora_full_model: bool = False + + # Training parameters (matching groot_finetune_script.py) + optimizer_lr: float = 1e-4 + optimizer_betas: tuple[float, float] = (0.95, 0.999) + optimizer_eps: float = 1e-8 + optimizer_weight_decay: float = 1e-5 + warmup_ratio: float = 0.05 + use_bf16: bool = True + + # Dataset parameters + # Video backend to use for training ('decord' or 'torchvision_av') + video_backend: str = "decord" + + # Whether to balance dataset weights in mixture datasets + balance_dataset_weights: bool = True + + # Whether to sample trajectories weighted by their length + balance_trajectory_weights: bool = True + + # Optional dataset paths for delegating training to Isaac-GR00T runner + dataset_paths: list[str] | None = None + output_dir: str = "./tmp/gr00t" + save_steps: int = 1000 + max_steps: int = 10000 + batch_size: int = 32 + dataloader_num_workers: int = 8 + report_to: str = "wandb" + resume: bool = False + + def __post_init__(self): + super().__post_init__() + + if self.n_action_steps > self.chunk_size: + raise ValueError( + f"n_action_steps ({self.n_action_steps}) cannot exceed chunk_size ({self.chunk_size})" + ) + + # groot_repo_path is now optional since we ported the components + # No validation needed + + def validate_features(self) -> None: + """Validate and set up input/output features for Groot.""" + image_features = [key for key, feat in self.input_features.items() if feat.type == FeatureType.VISUAL] + if not image_features: + raise ValueError( + "Groot policy requires at least one visual input feature. " + "No features of type FeatureType.VISUAL found in input_features." + ) + + if "observation.state" not in self.input_features: + state_feature = PolicyFeature( + type=FeatureType.STATE, + shape=(self.max_state_dim,), + ) + self.input_features["observation.state"] = state_feature + else: + state_shape = self.input_features["observation.state"].shape + state_dim = state_shape[0] if state_shape else 0 + if state_dim > self.max_state_dim: + raise ValueError( + f"State dimension {state_dim} exceeds max_state_dim {self.max_state_dim}. " + f"Either reduce state dimension or increase max_state_dim in config." + ) + + if "action" not in self.output_features: + action_feature = PolicyFeature( + type=FeatureType.ACTION, + shape=(self.max_action_dim,), + ) + self.output_features["action"] = action_feature + else: + action_shape = self.output_features["action"].shape + action_dim = action_shape[0] if action_shape else 0 + if action_dim > self.max_action_dim: + raise ValueError( + f"Action dimension {action_dim} exceeds max_action_dim {self.max_action_dim}. " + f"Either reduce action dimension or increase max_action_dim in config." + ) + + def get_optimizer_preset(self) -> AdamWConfig: + """Return optimizer configuration.""" + return AdamWConfig( + lr=self.optimizer_lr, + betas=self.optimizer_betas, + eps=self.optimizer_eps, + weight_decay=self.optimizer_weight_decay, + ) + + def get_scheduler_preset(self) -> CosineDecayWithWarmupSchedulerConfig: + """Return scheduler configuration.""" + return CosineDecayWithWarmupSchedulerConfig( + num_warmup_steps=int(10000 * self.warmup_ratio), # 5% warmup by default + num_decay_steps=10000, # Adjust based on training steps + peak_lr=self.optimizer_lr, + decay_lr=self.optimizer_lr * 0.1, + ) + + @property + def observation_delta_indices(self) -> None: + """Return indices for delta observations (None for Groot).""" + return None + + @property + def action_delta_indices(self) -> list[int]: + """Return indices for delta actions.""" + return list(range(min(self.chunk_size, 16))) + + @property + def reward_delta_indices(self) -> None: + """Return indices for delta rewards (None for Groot).""" + return None diff --git a/src/lerobot/policies/groot/eagle2_hg_model/configuration_eagle2_5_vl.py b/src/lerobot/policies/groot/eagle2_hg_model/configuration_eagle2_5_vl.py new file mode 100644 index 0000000000000000000000000000000000000000..03624fe45a31410ac319c47354fb6ede8ffdb4af --- /dev/null +++ b/src/lerobot/policies/groot/eagle2_hg_model/configuration_eagle2_5_vl.py @@ -0,0 +1,135 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import copy + +from transformers.configuration_utils import PretrainedConfig +from transformers.models.llama.configuration_llama import LlamaConfig +from transformers.models.qwen2.configuration_qwen2 import Qwen2Config +from transformers.models.qwen3.configuration_qwen3 import Qwen3Config +from transformers.models.siglip.configuration_siglip import SiglipVisionConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class Eagle25VLConfig(PretrainedConfig): + model_type = "eagle_2_5_vl" + is_composition = True + sub_configs = {"vision_config": SiglipVisionConfig, "text_config": Qwen2Config} + + def __init__( + self, + vision_config=None, + text_config=None, + use_backbone_lora=0, + use_llm_lora=0, + pad2square=False, + select_layer=-4, + force_image_size=None, + downsample_ratio=0.5, + template=None, + dynamic_image_size=False, + use_thumbnail=False, + loss_version="v1", + min_dynamic_tiles=1, + max_dynamic_tiles=6, + mlp_checkpoint=False, + initializer_range=0.02, + _attn_implementation="flash_attention_2", + _attn_implementation_autoset=False, + llm_config=None, + image_token_index=None, + use_pixel_shuffle=True, + mlp_connector_layers=2, + **kwargs, + ): + super().__init__(**kwargs) + + if vision_config is None: + vision_config = {"model_type": "siglip_vision_model"} + logger.info("vision_config is None. Initializing the InternVisionConfig with default values.") + + if text_config is None: + text_config = {"architectures": ["Qwen2ForCausalLM"]} + logger.info( + "text_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`)." + ) + + if vision_config["model_type"] == "siglip_vision_model": + self.vision_config = SiglipVisionConfig(**vision_config) + else: + raise ValueError("Unsupported model_type: {}".format(vision_config["model_type"])) + + if text_config["architectures"][0] == "LlamaForCausalLM": + self.text_config = LlamaConfig(**text_config) + elif text_config["architectures"][0] == "Qwen2ForCausalLM": + self.text_config = Qwen2Config(**text_config) + elif text_config["architectures"][0] == "Qwen3ForCausalLM": + self.text_config = Qwen3Config(**text_config) + else: + raise ValueError("Unsupported architecture: {}".format(text_config["architectures"][0])) + self.use_backbone_lora = use_backbone_lora + self.use_llm_lora = use_llm_lora + self.mlp_checkpoint = mlp_checkpoint + self.pad2square = pad2square + self.select_layer = select_layer + self.force_image_size = force_image_size + self.downsample_ratio = downsample_ratio + self.template = template + self.dynamic_image_size = dynamic_image_size + self.use_thumbnail = use_thumbnail + self.loss_version = loss_version + self.initializer_range = initializer_range + self.min_dynamic_tiles = min_dynamic_tiles + self.max_dynamic_tiles = max_dynamic_tiles + self.tie_word_embeddings = self.text_config.tie_word_embeddings + self._attn_implementation = _attn_implementation + self._attn_implementation_autoset = _attn_implementation_autoset + self.image_token_index = image_token_index + self.use_pixel_shuffle = use_pixel_shuffle + self.mlp_connector_layers = mlp_connector_layers + logger.info(f"min_dynamic_tiles: {self.min_dynamic_tiles}") + logger.info(f"max_dynamic_tiles: {self.max_dynamic_tiles}") + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. + + Returns: + `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, + """ + output = copy.deepcopy(self.__dict__) + output["vision_config"] = self.vision_config.to_dict() + output["text_config"] = self.text_config.to_dict() + output["model_type"] = self.__class__.model_type + output["use_backbone_lora"] = self.use_backbone_lora + output["use_llm_lora"] = self.use_llm_lora + output["pad2square"] = self.pad2square + output["select_layer"] = self.select_layer + output["force_image_size"] = self.force_image_size + output["downsample_ratio"] = self.downsample_ratio + output["template"] = self.template + output["dynamic_image_size"] = self.dynamic_image_size + output["use_thumbnail"] = self.use_thumbnail + output["min_dynamic_tiles"] = self.min_dynamic_tiles + output["max_dynamic_tiles"] = self.max_dynamic_tiles + output["tie_word_embeddings"] = self.tie_word_embeddings + output["_attn_implementation"] = self._attn_implementation + output["_attn_implementation_autoset"] = self._attn_implementation_autoset + output["use_pixel_shuffle"] = self.use_pixel_shuffle + output["mlp_connector_layers"] = self.mlp_connector_layers + return output diff --git a/src/lerobot/policies/groot/eagle2_hg_model/image_processing_eagle2_5_vl_fast.py b/src/lerobot/policies/groot/eagle2_hg_model/image_processing_eagle2_5_vl_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..d42224e4b3e83414a1f032f68fc6861e69d54cb6 --- /dev/null +++ b/src/lerobot/policies/groot/eagle2_hg_model/image_processing_eagle2_5_vl_fast.py @@ -0,0 +1,504 @@ +# -------------------------------------------------------- +# NVIDIA +# Copyright (c) 2025 NVIDIA +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + + +# copy from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +from typing import Optional + +from transformers.image_processing_utils import ( + BatchFeature, + get_patch_output_size, +) +from transformers.image_processing_utils_fast import ( + BaseImageProcessorFast, + DefaultFastImageProcessorKwargs, + group_images_by_shape, + reorder_images, +) +from transformers.image_utils import ( + IMAGENET_STANDARD_MEAN, # 0.5, 0.5, 0.5 + IMAGENET_STANDARD_STD, # 0.5, 0.5, 0.5 + ChannelDimension, + ImageInput, + PILImageResampling, + SizeDict, + get_image_size, + make_flat_list_of_images, + validate_kwargs, +) +from transformers.processing_utils import Unpack +from transformers.utils import ( + TensorType, + add_start_docstrings, + is_torch_available, + is_torchvision_v2_available, +) +from transformers.video_utils import VideoInput + +if is_torch_available(): + import torch +if is_torchvision_v2_available(): + from torchvision.transforms.v2 import functional as F # noqa: N812 + from transformers.image_utils import pil_torch_interpolation_mapping +else: + from torchvision.transforms import functional as F # noqa: N812 + + +def crop(img: torch.Tensor, left: int, top: int, right: int, bottom: int) -> torch.Tensor: + """Crop the given numpy array. + + Args: + img (torch.Tensor): Image to be cropped. Format should be (C, H, W). + left (int): The left coordinate of the crop box. + top (int): The top coordinate of the crop box. + right (int): The right coordinate of the crop box. + bottom (int): The bottom coordinate of the crop box. + + Returns: + torch.Tensor: Cropped image. + """ + if not isinstance(img, torch.Tensor): + raise TypeError(f"img should be torch.Tensor. Got {type(img)}") + + if img.ndim not in [2, 3]: + raise ValueError(f"Image should have 2 or 3 dimensions. Got {img.ndim}") + + img_height = img.shape[1] + img_width = img.shape[2] + if top < 0 or left < 0 or bottom > img_height or right > img_width: + raise ValueError("Crop coordinates out of bounds") + + if top >= bottom or left >= right: + raise ValueError("Invalid crop coordinates") + + return img[:, top:bottom, left:right] + + +class Eagle25VLFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): + max_dynamic_tiles: int | None + min_dynamic_tiles: int | None + use_thumbnail: bool | None + pad_during_tiling: bool | None + do_pad: bool | None + + +@add_start_docstrings( + "Constructs a fast ConvNeXT image processor. Based on [`SiglipImageProcessor`] with incorporation of processing each video frame.", + # BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, TODO: this was depreciated from transformers remove! + """ + image_grid_pinpoints (`List[List[int]]`, *optional*): + A list of possible resolutions to use for processing high resolution images. The best resolution is selected + based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` + method. Not used for processing videos. + do_pad (`bool`, *optional*): + Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest + number of patches in the batch. Padding will be applied to the bottom and right with zeros. + """, +) +class Eagle25VLImageProcessorFast(BaseImageProcessorFast): + resample = PILImageResampling.BICUBIC + image_mean = IMAGENET_STANDARD_MEAN + image_std = IMAGENET_STANDARD_STD + size = {"height": 448, "width": 448} + default_to_square = False + crop_size = None + do_resize = True + do_center_crop = None + do_rescale = True + do_normalize = True + do_convert_rgb = True + do_pad = True + max_dynamic_tiles = 12 + min_dynamic_tiles = 1 + use_thumbnail = True + pad_during_tiling = False + valid_kwargs = Eagle25VLFastImageProcessorKwargs + model_input_names = ["pixel_values_videos"] + + def __init__(self, **kwargs: Unpack[Eagle25VLFastImageProcessorKwargs]): + super().__init__(**kwargs) + + @add_start_docstrings( + # BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, TODO: this was depreciated from transformers remove! + """ + max_dynamic_tiles (`int`, *optional*): + The maximum number of dynamic tiles to use for processing high resolution images. + min_dynamic_tiles (`int`, *optional*): + The minimum number of dynamic tiles to use for processing high resolution images. + use_thumbnail (`bool`, *optional*): + Whether to use a thumbnail for processing high resolution images. + pad_during_tiling (`bool`, *optional*): + Whether to pad the image during tiling. + do_pad (`bool`, *optional*): + Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest + number of patches in the batch. Padding will be applied to the bottom and right with zeros. + """, + ) + + # NOTE(YL): we will overload the preprocess method to add the image_flags + # def preprocess( + # self, images: ImageInput, **kwargs: Unpack[Eagle25VLFastImageProcessorKwargs] + # ) -> BatchFeature: + # return super().preprocess(images, **kwargs) + + def _prepare_images_structure( + self, + images: ImageInput, + expected_ndims: int = 3, + ) -> ImageInput: + """ + Prepare the images structure for processing. + + Args: + images (`ImageInput`): + The input images to process. + expected_ndims (`int`, *optional*, defaults to 3): + Expected number of dimensions for the images (added for transformers >=4.53.0 compatibility). + + Returns: + `ImageInput`: The images with a valid nesting. + """ + return make_flat_list_of_images(images) + + def _resize_for_patching( + self, + image: "torch.Tensor", + target_resolution: tuple, + interpolation: "F.InterpolationMode", + input_data_format: ChannelDimension, + ) -> "torch.Tensor": + """ + Resizes an image to a target resolution while maintaining aspect ratio. + + Args: + image ("torch.Tensor"): + The input image. + target_resolution (tuple): + The target resolution (height, width) of the image. + interpolation (`InterpolationMode`): + Resampling filter to use if resizing the image. + input_data_format (`ChannelDimension` or `str`): + The channel dimension format of the input image. + + Returns: + "torch.Tensor": The resized and padded image. + """ + new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format) + + # Resize the image + resized_image = F.resize(image, (new_height, new_width), interpolation=interpolation) + + return resized_image + + def find_closest_aspect_ratio(self, aspect_ratio, target_ratios, width, height, image_size): + """ + previous version mainly focus on ratio. + We also consider area ratio here. + """ + best_factor = float("-inf") + best_ratio = (1, 1) + area = width * height + for ratio in target_ratios: + target_aspect_ratio = ratio[0] / ratio[1] + # ratio_diff = abs(aspect_ratio - target_aspect_ratio) + # area_ratio = (ratio[0] * ratio[1] * image_size * image_size) / area + """ + new area > 60% of original image area is enough. + """ + factor_based_on_area_n_ratio = min( + (ratio[0] * ratio[1] * image_size * image_size) / area, 0.6 + ) * min(target_aspect_ratio / aspect_ratio, aspect_ratio / target_aspect_ratio) + + if factor_based_on_area_n_ratio > best_factor: + best_factor = factor_based_on_area_n_ratio + best_ratio = ratio + + return best_ratio + + def _pad_for_patching( + self, image: "torch.Tensor", target_resolution: tuple, input_data_format: ChannelDimension + ) -> "torch.Tensor": + """ + Pad an image to a target resolution while maintaining aspect ratio. + """ + target_height, target_width = target_resolution + new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format) + + paste_x = (target_width - new_width) // 2 + paste_y = (target_height - new_height) // 2 + + padded_image = F.pad(image, padding=[paste_x, paste_y, paste_x, paste_y]) + + return padded_image + + def _get_image_patches( + self, + image: "torch.Tensor", + min_num: int, + max_num: int, + size: tuple, + tile_size: int, + use_thumbnail: bool, + interpolation: "F.InterpolationMode", + pad_during_tiling: bool, + ) -> list["torch.Tensor"]: + image_size = get_image_size(image, channel_dim=ChannelDimension.FIRST) + orig_height, orig_width = image_size + aspect_ratio = orig_width / orig_height + + # calculate the existing image aspect ratio + target_ratios = { + (i, j) + for n in range(min_num, max_num + 1) + for i in range(1, n + 1) + for j in range(1, n + 1) + if i * j <= max_num and i * j >= min_num + } + target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) + + # find the closest aspect ratio to the target + target_aspect_ratio = self.find_closest_aspect_ratio( + aspect_ratio, target_ratios, orig_width, orig_height, tile_size + ) + + # calculate the target width and height + target_width = tile_size * target_aspect_ratio[0] + target_height = tile_size * target_aspect_ratio[1] + blocks = target_aspect_ratio[0] * target_aspect_ratio[1] + if pad_during_tiling: + resized_image = self._resize_for_patching( + image, + (target_height, target_width), + interpolation=interpolation, + input_data_format=ChannelDimension.FIRST, + ) + padded_image = self._pad_for_patching( + resized_image, + (target_height, target_width), + input_data_format=ChannelDimension.FIRST, + ) + image_used_to_split = padded_image + else: + image_used_to_split = F.resize(image, (target_height, target_width), interpolation=interpolation) + + processed_tiles = [] + for i in range(blocks): + box = ( + (i % (target_width // tile_size)) * tile_size, + (i // (target_width // tile_size)) * tile_size, + ((i % (target_width // tile_size)) + 1) * tile_size, + ((i // (target_width // tile_size)) + 1) * tile_size, + ) + # split the image + split_img = crop(image_used_to_split, box[0], box[1], box[2], box[3]) + processed_tiles.append(split_img) + assert len(processed_tiles) == blocks + + if use_thumbnail and len(processed_tiles) != 1: + thumbnail_img = F.resize(image, (tile_size, tile_size), interpolation=interpolation) + processed_tiles.append(thumbnail_img) + + return processed_tiles + + def _pad_for_batching( + self, + pixel_values: list["torch.Tensor"], + ) -> list["torch.Tensor"]: + """ + Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. + + Args: + pixel_values (`List[torch.Tensor]`): + An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) + + Returns: + List[`torch.Tensor`]: The padded images. + """ + max_patch = max(len(x) for x in pixel_values) + pixel_values = [ + torch.nn.functional.pad(image, pad=[0, 0, 0, 0, 0, 0, 0, max_patch - image.shape[0]]) + for image in pixel_values + ] + + return pixel_values + + def _preprocess( + self, + images: list["torch.Tensor"], + do_resize: bool, + size: SizeDict, + max_dynamic_tiles: int, + min_dynamic_tiles: int, + use_thumbnail: bool, + pad_during_tiling: bool, + interpolation: Optional["F.InterpolationMode"], + do_center_crop: bool, + crop_size: SizeDict, + do_rescale: bool, + rescale_factor: float, + do_normalize: bool, + image_mean: float | list[float] | None, + image_std: float | list[float] | None, + do_pad: bool, + return_tensors: str | TensorType | None, + pad_size: SizeDict | None = None, # Added for transformers >=4.53.0 compatibility + disable_grouping: bool | None = None, # Added for transformers >=4.53.0 compatibility + ) -> BatchFeature: + processed_images = [] + image_sizes = [] + # Determine the size tuple + if size and size.height and size.width: + size_tuple = (size.height, size.width) + else: + size_tuple = (size.shortest_edge, size.shortest_edge) + + # Determine the patch size + if crop_size and crop_size.height: + tile_size = crop_size.height + elif size and size.height: + tile_size = size.height + else: + tile_size = size.shortest_edge + + for image in images: + image_patches = self._get_image_patches( + image, + min_num=min_dynamic_tiles, + max_num=max_dynamic_tiles, + size=size_tuple, + tile_size=tile_size, + use_thumbnail=use_thumbnail, + interpolation=interpolation, + pad_during_tiling=pad_during_tiling, + ) + + # Group images by size for batched processing + processed_image_patches_grouped = {} + # Added for transformers >=4.53.0 compatibility + grouped_image_patches, grouped_image_patches_index = group_images_by_shape( + image_patches, + disable_grouping=disable_grouping, + ) + + for shape, stacked_image_patches in grouped_image_patches.items(): + if do_resize: + stacked_image_patches = self.resize( + image=stacked_image_patches, + size=size, + interpolation=interpolation, + ) + if do_center_crop: + stacked_image_patches = self.center_crop(stacked_image_patches, crop_size) + # Fused rescale and normalize + stacked_image_patches = self.rescale_and_normalize( + stacked_image_patches, + do_rescale, + rescale_factor, + do_normalize, + image_mean, + image_std, + ) + processed_image_patches_grouped[shape] = stacked_image_patches + processed_image_patches = reorder_images( + processed_image_patches_grouped, grouped_image_patches_index + ) + processed_image_patches = ( + torch.stack(processed_image_patches, dim=0) if return_tensors else processed_image_patches + ) + processed_images.append(processed_image_patches) + image_sizes.append(get_image_size(image, ChannelDimension.FIRST)) + + if do_pad: + processed_images = self._pad_for_batching(processed_images) + + # processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images + processed_images = torch.cat(processed_images, dim=0) if return_tensors else processed_images + return BatchFeature( + data={"pixel_values": processed_images, "image_sizes": image_sizes}, + tensor_type=return_tensors, + ) + + def preprocess( + self, + images: ImageInput, + videos: VideoInput = None, + **kwargs: Unpack[Eagle25VLFastImageProcessorKwargs], + ) -> BatchFeature: + validate_kwargs( + captured_kwargs=kwargs.keys(), + valid_processor_keys=self.valid_kwargs.__annotations__.keys(), + ) + # Set default kwargs from self. This ensures that if a kwarg is not provided + # by the user, it gets its default value from the instance, or is set to None. + for kwarg_name in self.valid_kwargs.__annotations__: + kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None)) + + # Extract parameters that are only used for preparing the input images + do_convert_rgb = kwargs.pop("do_convert_rgb") + input_data_format = kwargs.pop("input_data_format") + device = kwargs.pop("device") + # Prepare input images + # transformers >= 4.53.0: uses _prepare_image_like_inputs instead of _prepare_input_images + if images is not None: + images = self._prepare_image_like_inputs( + images=images, + do_convert_rgb=do_convert_rgb, + input_data_format=input_data_format, + device=device, + ) + + if videos is not None: + videos = self._prepare_image_like_inputs( + images=videos, + do_convert_rgb=do_convert_rgb, + input_data_format=input_data_format, + device=device, + ) + + # Update kwargs that need further processing before being validated + kwargs = self._further_process_kwargs(**kwargs) + + # Validate kwargs + self._validate_preprocess_kwargs(**kwargs) + + # torch resize uses interpolation instead of resample + # Added for transformers >=4.53.0 compatibility + resample = kwargs.pop("resample", self.resample) + kwargs["interpolation"] = ( + pil_torch_interpolation_mapping[resample] + if isinstance(resample, PILImageResampling | int) + else resample + ) + + # Filter kwargs to only include those accepted by _preprocess + valid_preprocess_kwargs = { + "do_resize", + "size", + "max_dynamic_tiles", + "min_dynamic_tiles", + "use_thumbnail", + "pad_during_tiling", + "interpolation", + "do_center_crop", + "crop_size", + "do_rescale", + "rescale_factor", + "do_normalize", + "image_mean", + "image_std", + "do_pad", + "return_tensors", + "pad_size", + "disable_grouping", + } + filtered_kwargs = {k: v for k, v in kwargs.items() if k in valid_preprocess_kwargs} + if images is not None: + return self._preprocess(images, **filtered_kwargs) + elif videos is not None: + return self._preprocess(videos, **filtered_kwargs) + + +__all__ = ["Eagle25VLImageProcessorFast"] diff --git a/src/lerobot/policies/groot/eagle2_hg_model/modeling_eagle2_5_vl.py b/src/lerobot/policies/groot/eagle2_hg_model/modeling_eagle2_5_vl.py new file mode 100644 index 0000000000000000000000000000000000000000..3111223f68e0bd8c4f5398822468336283e1b4c1 --- /dev/null +++ b/src/lerobot/policies/groot/eagle2_hg_model/modeling_eagle2_5_vl.py @@ -0,0 +1,395 @@ +# -------------------------------------------------------- +# NVIDIA +# Copyright (c) 2025 NVIDIA +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +import inspect + +import torch +import torch.utils.checkpoint as cp +from peft import LoraConfig, get_peft_model +from torch import nn +from torch.nn import CrossEntropyLoss +from transformers import GenerationConfig +from transformers.generation import GenerationMixin +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.modeling_utils import PreTrainedModel +from transformers.models.llama.modeling_llama import LlamaForCausalLM +from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM +from transformers.models.qwen3.modeling_qwen3 import Qwen3ForCausalLM +from transformers.models.siglip.modeling_siglip import SiglipVisionModel +from transformers.utils import add_start_docstrings, logging + +from .configuration_eagle2_5_vl import Eagle25VLConfig + +logger = logging.get_logger(__name__) + + +# copy from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava_onevision/modeling_llava_onevision.py#L241C1-L280C1 +EAGLE2_5_VL_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`Eagle25VLConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Eagle2_5_VL Model outputting raw hidden-states without any specific head on top.", + EAGLE2_5_VL_START_DOCSTRING, +) +class Eagle25VLPreTrainedModel(PreTrainedModel): + config_class = Eagle25VLConfig + base_model_prefix = "model" + main_input_name = "input_ids" + supports_gradient_checkpointing = True + _no_split_modules = [ + "Qwen2DecoderLayer", + "LlamaDecoderLayer", + "Siglip2EncoderLayer", + "SiglipEncoderLayer", + ] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_cache_class = True + _supports_static_cache = True + _supports_quantized_cache = True + _supports_sdpa = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear | nn.Conv2d): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +class Eagle25VLForConditionalGeneration(Eagle25VLPreTrainedModel, GenerationMixin): + config_class = Eagle25VLConfig + + def __init__(self, config: Eagle25VLConfig, vision_model=None, language_model=None): + super().__init__(config) + + image_size = config.force_image_size or config.vision_config.image_size + patch_size = config.vision_config.patch_size + self.patch_size = patch_size + if config.use_pixel_shuffle: + self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio**2)) + else: + self.num_image_token = int((image_size // patch_size) ** 2) + + self.select_layer = config.select_layer + self.downsample_ratio = config.downsample_ratio + self.loss_version = config.loss_version + self.mlp_checkpoint = config.mlp_checkpoint + self.use_pixel_shuffle = config.use_pixel_shuffle + self.mlp_connector_layers = config.mlp_connector_layers + logger.info(f"num_image_token: {self.num_image_token}") + logger.info(f"mlp_checkpoint: {self.mlp_checkpoint}") + if vision_model is not None: + self.vision_model = vision_model + else: + if config.vision_config.model_type == "siglip_vision_model": + config.vision_config._attn_implementation = "flash_attention_2" + self.vision_model = SiglipVisionModel(config.vision_config) + else: + raise NotImplementedError(f"{config.vision_config.model_type} is not implemented.") + + if language_model is not None: + self.language_model = language_model + else: + if config.text_config.architectures[0] == "LlamaForCausalLM": + self.language_model = LlamaForCausalLM(config.text_config) + elif config.text_config.architectures[0] == "Phi3ForCausalLM": + raise NotImplementedError("Phi3 is not implemented.") + # self.language_model = Phi3ForCausalLM(config.text_config) + elif config.text_config.architectures[0] == "Qwen2ForCausalLM": + assert config.text_config._attn_implementation == "flash_attention_2", ( + f"Qwen2 must use flash_attention_2 but got {config.text_config._attn_implementation}" + ) + self.language_model = Qwen2ForCausalLM(config.text_config) + elif config.text_config.architectures[0] == "Qwen3ForCausalLM": + self.language_model = Qwen3ForCausalLM(config.text_config) + else: + raise NotImplementedError(f"{config.text_config.architectures[0]} is not implemented.") + + vit_hidden_size = config.vision_config.hidden_size + llm_hidden_size = config.text_config.hidden_size + + if config.mlp_connector_layers == 2: + self.mlp1 = nn.Sequential( + nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2), + nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size), + nn.GELU(), + nn.Linear(llm_hidden_size, llm_hidden_size), + ) + elif config.mlp_connector_layers == 1 and config.use_pixel_shuffle: + self.mlp1 = nn.Sequential( + nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size), + ) + elif config.mlp_connector_layers == 1 and not config.use_pixel_shuffle: + self.mlp1 = nn.Sequential( + nn.Linear(vit_hidden_size, llm_hidden_size), + ) + else: + raise NotImplementedError(f"{config.mlp_connector_layers} is not implemented.") + + self.image_token_index = config.image_token_index + self.neftune_alpha = None + + if config.use_backbone_lora: + self.wrap_backbone_lora(r=config.use_backbone_lora, lora_alpha=2 * config.use_backbone_lora) + + self.use_llm_lora = config.use_llm_lora + if config.use_llm_lora: + self.wrap_llm_lora(r=config.use_llm_lora, lora_alpha=2 * config.use_llm_lora) + + self.check_forward_kwargs() + + def check_forward_kwargs(self): + # We intentionally avoid using **kwargs in forward because Hugging Face Transformers + # has special handling for functions with **kwargs parameters that would affect + # how our model is processed during training and inference. + forward_params = inspect.signature(self.forward).parameters + assert not any(k.kind == inspect.Parameter.VAR_KEYWORD for k in forward_params.values()) + + def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05): + lora_config = LoraConfig( + r=r, + target_modules=[ + "self_attn.q_proj", + "self_attn.k_proj", + "self_attn.v_proj", + "self_attn.out_proj", + "mlp.fc1", + "mlp.fc2", + ], + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + ) + self.vision_model = get_peft_model(self.vision_model, lora_config) + self.vision_model.print_trainable_parameters() + + def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05): + lora_config = LoraConfig( + r=r, + target_modules=[ + "self_attn.q_proj", + "self_attn.k_proj", + "self_attn.v_proj", + "self_attn.o_proj", + "mlp.gate_proj", + "mlp.down_proj", + "mlp.up_proj", + ], + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + task_type="CAUSAL_LM", + ) + self.language_model = get_peft_model(self.language_model, lora_config) + self.language_model.enable_input_require_grads() + self.language_model.print_trainable_parameters() + self.use_llm_lora = True + + def forward( + self, + pixel_values: torch.FloatTensor, + input_ids: torch.LongTensor = None, + attention_mask: torch.Tensor | None = None, + position_ids: torch.LongTensor | None = None, + image_flags: torch.LongTensor | None = None, + past_key_values: list[torch.FloatTensor] | None = None, + labels: torch.LongTensor | None = None, + use_cache: bool | None = None, + output_attentions: bool | None = None, + output_hidden_states: bool | None = None, + return_dict: bool | None = None, + num_tiles_list: list[torch.Tensor] | None = None, + ) -> tuple | CausalLMOutputWithPast: + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + input_embeds = self.language_model.get_input_embeddings()(input_ids) + + vit_embeds = self.extract_feature(pixel_values) + + if image_flags is not None: + image_flags = image_flags.view(-1) + vit_embeds = vit_embeds[image_flags == 1] + + b, n, c = input_embeds.shape + input_embeds = input_embeds.reshape(b * n, c) + + input_ids = input_ids.reshape(b * n) + selected = input_ids == self.image_token_index + try: + input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, c) + except Exception as e: + vit_embeds = vit_embeds.reshape(-1, c) + print( + f"warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, " + f"vit_embeds.shape={vit_embeds.shape}" + ) + n_token = selected.sum() + input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token] + + input_embeds = input_embeds.reshape(b, n, c) + + outputs = self.language_model( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + logits = outputs.logits + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def pixel_shuffle(self, x, scale_factor=0.5): + n, w, h, c = x.size() + # N, W, H, C --> N, W, H * scale, C // scale + x = x.view(n, w, int(h * scale_factor), int(c / scale_factor)) + # N, W, H * scale, C // scale --> N, H * scale, W, C // scale + x = x.permute(0, 2, 1, 3).contiguous() + # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2) + x = x.view(n, int(h * scale_factor), int(w * scale_factor), int(c / (scale_factor * scale_factor))) + + x = x.permute(0, 2, 1, 3).contiguous() + return x + + def extract_feature(self, pixel_values): + if self.select_layer == -1: + vit_embeds = self.vision_model( + pixel_values=pixel_values, output_hidden_states=False, return_dict=True + ) + if hasattr(vit_embeds, "last_hidden_state"): + vit_embeds = vit_embeds.last_hidden_state + + else: + vit_embeds = self.vision_model( + pixel_values=pixel_values, output_hidden_states=True, return_dict=True + ).hidden_states[self.select_layer] + + if self.use_pixel_shuffle: + h = w = int(vit_embeds.shape[1] ** 0.5) + vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1) + vit_embeds = self.pixel_shuffle( + vit_embeds, scale_factor=self.downsample_ratio + ) # torch.Size([B, 1024, 1024]) -> torch.Size([B, 16, 16, 4096]) + vit_embeds = vit_embeds.reshape( + vit_embeds.shape[0], -1, vit_embeds.shape[-1] + ) # torch.Size([B, 16, 16, 4096]) -> torch.Size([B, 256, 4096]) + + if self.mlp_checkpoint and vit_embeds.requires_grad: + vit_embeds = cp.checkpoint(self.mlp1, vit_embeds) + else: + vit_embeds = self.mlp1(vit_embeds) + + return vit_embeds + + @torch.no_grad() + def generate( + self, + pixel_values: torch.FloatTensor | None = None, + input_ids: torch.FloatTensor | None = None, + attention_mask: torch.LongTensor | None = None, + visual_features: torch.FloatTensor | None = None, + generation_config: GenerationConfig | None = None, + output_hidden_states: bool | None = None, + image_sizes: list[tuple[int, int]] | None = None, + **generate_kwargs, + ) -> torch.LongTensor: + if pixel_values is not None: + if visual_features is not None: + vit_embeds = visual_features + else: + vit_embeds = self.extract_feature(pixel_values) + + input_embeds = self.language_model.get_input_embeddings()(input_ids) + b, n, c = input_embeds.shape + input_embeds = input_embeds.reshape(b * n, c) + + input_ids = input_ids.reshape(b * n) + selected = input_ids == self.config.image_token_index + assert selected.sum() != 0 + input_embeds[selected] = vit_embeds.reshape(-1, c).to(input_embeds.device) + + input_embeds = input_embeds.reshape(b, n, c) + else: + input_embeds = self.language_model.get_input_embeddings()(input_ids) + + if "use_cache" not in generate_kwargs: + generate_kwargs["use_cache"] = True + + outputs = self.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + generation_config=generation_config, + output_hidden_states=output_hidden_states, + **generate_kwargs, + ) + + return outputs + + # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.get_input_embeddings + def get_input_embeddings(self): + return self.language_model.get_input_embeddings() + + # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.set_input_embeddings + def set_input_embeddings(self, value): + self.language_model.set_input_embeddings(value) + + # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.get_output_embeddings + def get_output_embeddings(self): + return self.language_model.get_output_embeddings() + + # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.language_model.set_output_embeddings(new_embeddings) + + # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.set_decoder + def set_decoder(self, decoder): + self.language_model.set_decoder(decoder) + + # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.get_decoder + def get_decoder(self): + return self.language_model.get_decoder() diff --git a/src/lerobot/policies/groot/eagle2_hg_model/processing_eagle2_5_vl.py b/src/lerobot/policies/groot/eagle2_hg_model/processing_eagle2_5_vl.py new file mode 100644 index 0000000000000000000000000000000000000000..79a52e691da83db7e8d9a962ded986b998450252 --- /dev/null +++ b/src/lerobot/policies/groot/eagle2_hg_model/processing_eagle2_5_vl.py @@ -0,0 +1,518 @@ +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Processor class for Eagle25VL. +copy from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava_onevision/processing_llava_onevision.py +""" + +import base64 +import os +import re +from io import BytesIO + +import requests +import torch +from PIL import Image +from transformers.feature_extraction_utils import BatchFeature +from transformers.image_utils import ImageInput +from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack +from transformers.tokenization_utils_base import PreTokenizedInput, TextInput +from transformers.utils import logging +from transformers.video_utils import VideoInput + +logger = logging.get_logger(__name__) + + +FRAME_FACTOR = 2 +FPS = 2.0 +FPS_MIN_FRAMES = 4 +FPS_MAX_FRAMES = 256 + + +def to_rgb(pil_image: Image.Image) -> Image.Image: + if pil_image.mode == "RGBA": + white_background = Image.new("RGB", pil_image.size, (255, 255, 255)) + white_background.paste(pil_image, mask=pil_image.split()[3]) # Use alpha channel as mask + return white_background + else: + return pil_image.convert("RGB") + + +def fetch_image(ele: dict[str, str | Image.Image]) -> Image.Image: + image = ele["image"] if "image" in ele else ele["image_url"] + image_obj = None + if isinstance(image, Image.Image): + image_obj = image + elif image.startswith("http://") or image.startswith("https://"): + response = requests.get(image, stream=True, timeout=10) + image_obj = Image.open(BytesIO(response.content)) + elif image.startswith("file://"): + image_obj = Image.open(image[7:]) + elif image.startswith("data:image"): + if "base64," in image: + _, base64_data = image.split("base64,", 1) + data = base64.b64decode(base64_data) + image_obj = Image.open(BytesIO(data)) + else: + image_obj = Image.open(image) + if image_obj is None: + raise ValueError( + f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}" + ) + image = to_rgb(image_obj) + if "scale_factor" in ele: + scale_factor = ele["scale_factor"] + image = image.resize((image.width * scale_factor, image.height * scale_factor), Image.BILINEAR) + return image + + +class Eagle25VLProcessorKwargs(ProcessingKwargs, total=False): + # see processing_utils.ProcessingKwargs documentation for usage. + _defaults = { + "text_kwargs": { + "padding": False, + }, + "images_kwargs": {}, + "videos_kwargs": {"max_dynamic_tiles": 1}, + } + + +class Eagle25VLProcessor(ProcessorMixin): + r""" + Constructs a Eagle25VL processor which wraps a Eagle25VL video processor, Eagle25VL image processor and a Eagle25VL tokenizer into a single processor. + + [`Eagle25VLProcessor`] offers all the functionalities of [`Eagle25VLVideoProcessor`], [`Eagle25VLImageProcessor`] and [`Eagle25VLTokenizer`]. See the + [`~Eagle25VLVideoProcessor.__call__`], [`~Eagle25VLProcessor.__call__`] and [`~Eagle25VLProcessor.decode`] for more information. + + Args: + image_processor ([`LlavaOnevisionImageProcessor`], *optional*): + The image processor is a required input. + tokenizer ([`LlamaTokenizerFast`], *optional*): + The tokenizer is a required input. + num_image_tokens (`int`, *optional*): + Number of image tokens for one imagethat will be returned by vision tower. + vision_feature_select_strategy (`str`, *optional*): + The feature selection strategy used to select the vision feature from the vision backbone. + Should be same as in model's config + chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages + in a chat into a tokenizable string. + image_token (`str`, *optional*, defaults to `""`): + Special token used to denote image location. + video_token (`str`, *optional*, defaults to `"