diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..6f6867127cff23636d6d8eeff06bfbd1bccd19f9
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,23 @@
+# ignore .git related folders
+.git/
+.github/
+.gitignore
+# ignore docs
+docs/
+# ignore logs
+**/logs/
+**/runs/
+**/output/*
+**/outputs/*
+**/videos/*
+*.tmp
+# ignore docker
+docker/cluster/exports/
+docker/.container.cfg
+# ignore recordings
+recordings/
+# ignore __pycache__
+**/__pycache__/
+**/*.egg-info/
+# ignore isaac sim symlink
+_isaac_sim?
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..a4f6e36c555e6960cc9fb3e9386d16777e65e6b5
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,24 @@
+# copied from https://github.com/isaac-sim/IsaacLab/blob/main/.flake8
+
+[flake8]
+show-source=True
+statistics=True
+per-file-ignores=*/__init__.py:F401
+# E402: Module level import not at top of file
+# E501: Line too long
+# W503: Line break before binary operator
+# E203: Whitespace before ':' -> conflicts with black
+# D401: First line should be in imperative mood
+# R504: Unnecessary variable assignment before return statement.
+# R505: Unnecessary elif after return statement
+# SIM102: Use a single if-statement instead of nested if-statements
+# SIM117: Merge with statements for context managers that have same scope.
+ignore=E402,E501,W503,E203,D401,R504,R505,SIM102,SIM117
+max-line-length = 120
+max-complexity = 30
+exclude=_*,.vscode,.git,docs/**
+# docstrings
+docstring-convention=google
+# annotations
+suppress-none-returning=True
+allow-star-arg-any=True
diff --git a/.gitattributes b/.gitattributes
index bed0738c7eeb449bca98b5d2f33c89a1ee56349a..f5fd1b2ab84d9680cb2725f1519a22bf62d1e0a1 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,60 +1,46 @@
-*.7z filter=lfs diff=lfs merge=lfs -text
-*.arrow filter=lfs diff=lfs merge=lfs -text
-*.avro filter=lfs diff=lfs merge=lfs -text
-*.bin filter=lfs diff=lfs merge=lfs -text
-*.bz2 filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.ftz filter=lfs diff=lfs merge=lfs -text
-*.gz filter=lfs diff=lfs merge=lfs -text
-*.h5 filter=lfs diff=lfs merge=lfs -text
-*.joblib filter=lfs diff=lfs merge=lfs -text
-*.lfs.* filter=lfs diff=lfs merge=lfs -text
-*.lz4 filter=lfs diff=lfs merge=lfs -text
-*.mds filter=lfs diff=lfs merge=lfs -text
-*.mlmodel filter=lfs diff=lfs merge=lfs -text
-*.model filter=lfs diff=lfs merge=lfs -text
-*.msgpack filter=lfs diff=lfs merge=lfs -text
-*.npy filter=lfs diff=lfs merge=lfs -text
-*.npz filter=lfs diff=lfs merge=lfs -text
-*.onnx filter=lfs diff=lfs merge=lfs -text
-*.ot filter=lfs diff=lfs merge=lfs -text
-*.parquet filter=lfs diff=lfs merge=lfs -text
-*.pb filter=lfs diff=lfs merge=lfs -text
-*.pickle filter=lfs diff=lfs merge=lfs -text
-*.pkl filter=lfs diff=lfs merge=lfs -text
-*.pt filter=lfs diff=lfs merge=lfs -text
-*.pth filter=lfs diff=lfs merge=lfs -text
-*.rar filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-saved_model/**/* filter=lfs diff=lfs merge=lfs -text
-*.tar.* filter=lfs diff=lfs merge=lfs -text
-*.tar filter=lfs diff=lfs merge=lfs -text
-*.tflite filter=lfs diff=lfs merge=lfs -text
-*.tgz filter=lfs diff=lfs merge=lfs -text
-*.wasm filter=lfs diff=lfs merge=lfs -text
-*.xz filter=lfs diff=lfs merge=lfs -text
-*.zip filter=lfs diff=lfs merge=lfs -text
-*.zst filter=lfs diff=lfs merge=lfs -text
-*tfevents* filter=lfs diff=lfs merge=lfs -text
-# Audio files - uncompressed
-*.pcm filter=lfs diff=lfs merge=lfs -text
-*.sam filter=lfs diff=lfs merge=lfs -text
-*.raw filter=lfs diff=lfs merge=lfs -text
-# Audio files - compressed
-*.aac filter=lfs diff=lfs merge=lfs -text
-*.flac filter=lfs diff=lfs merge=lfs -text
-*.mp3 filter=lfs diff=lfs merge=lfs -text
-*.ogg filter=lfs diff=lfs merge=lfs -text
-*.wav filter=lfs diff=lfs merge=lfs -text
-# Image files - uncompressed
-*.bmp filter=lfs diff=lfs merge=lfs -text
+# copied and modified from https://github.com/NVIDIA/warp/blob/main/.gitattributes
+* text=auto
+*.sh text eol=LF
+
+# copied from https://github.com/isaac-sim/IsaacLab/blob/main/.gitattributes
+*.usd filter=lfs diff=lfs merge=lfs -text
+*.dae filter=lfs diff=lfs merge=lfs -text
+*.mtl filter=lfs diff=lfs merge=lfs -text
+*.obj filter=lfs diff=lfs merge=lfs -text
*.gif filter=lfs diff=lfs merge=lfs -text
*.png filter=lfs diff=lfs merge=lfs -text
-*.tiff filter=lfs diff=lfs merge=lfs -text
-# Image files - compressed
*.jpg filter=lfs diff=lfs merge=lfs -text
-*.jpeg filter=lfs diff=lfs merge=lfs -text
-*.webp filter=lfs diff=lfs merge=lfs -text
-# Video files - compressed
+*.psd filter=lfs diff=lfs merge=lfs -text
*.mp4 filter=lfs diff=lfs merge=lfs -text
-*.webm filter=lfs diff=lfs merge=lfs -text
+*.usda filter=lfs diff=lfs merge=lfs -text
+*.hdr filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.jit filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000000.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000001.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000002.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000003.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000004.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_annotated.hdf5 filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000000.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000001.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000002.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000003.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000004.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000005.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000006.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000007.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000008.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000009.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000010.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000011.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000012.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000013.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000014.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000015.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000016.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000017.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000018.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000019.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000020.parquet filter=lfs diff=lfs merge=lfs -text
+datasets/apple_pick_place_generated_small.hdf5 filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..a78001bc99ea73c46d9e6a752a24c12bf710ba88
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,38 @@
+# Omniverse
+**/*.dmp
+**/.thumbs
+
+# Python
+.DS_Store
+**/*.egg-info/
+**/__pycache__/
+**/.pytest_cache/
+**/*.pyc
+**/*.pb
+
+# IDE
+**/.idea/
+**/.vscode/
+# Don't ignore the top-level .vscode directory as it is
+# used to configure VS Code settings
+!.vscode
+
+# Outputs
+**/runs/*
+**/logs/*
+**/recordings/*
+**/output/*
+**/outputs/*
+**/videos/*
+**/wandb/*
+**/.neptune/*
+docker/artifacts/
+*.tmp
+
+# Isaac-Sim packman
+_isaac_sim*
+_repo
+_build
+.lastformat
+
+datasets_large/
\ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000000000000000000000000000000000000..614fc571c3c3f0b814ca2e3460a71cd8b01abcb5
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,4 @@
+[submodule "submodules/Isaac-GR00T"]
+ path = submodules/Isaac-GR00T
+ url = git@github.com:NVIDIA/Isaac-GR00T.git
+ branch = n1-release
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d355d0273dae74aeb8b53de4d32aea87a53f800
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,49 @@
+repos:
+ - repo: https://github.com/python/black
+ rev: 23.10.1
+ hooks:
+ - id: black
+ args: ["--line-length", "120", "--preview"]
+ - repo: https://github.com/pycqa/flake8
+ rev: 6.1.0
+ hooks:
+ - id: flake8
+ additional_dependencies: [flake8-simplify, flake8-return]
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ - id: trailing-whitespace
+ - id: check-symlinks
+ - id: destroyed-symlinks
+ - id: check-yaml
+ - id: check-merge-conflict
+ - id: check-case-conflict
+ - id: check-executables-have-shebangs
+ - id: check-toml
+ - id: end-of-file-fixer
+ - id: check-shebang-scripts-are-executable
+ - id: detect-private-key
+ - id: debug-statements
+ - repo: https://github.com/pycqa/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+ name: isort (python)
+ args: ["--profile", "black", "--filter-files"]
+ - repo: https://github.com/asottile/pyupgrade
+ rev: v3.15.0
+ hooks:
+ - id: pyupgrade
+ args: ["--py37-plus"]
+ - repo: https://github.com/codespell-project/codespell
+ rev: v2.2.6
+ hooks:
+ - id: codespell
+ additional_dependencies:
+ - tomli
+ - repo: https://github.com/pre-commit/pygrep-hooks
+ rev: v1.10.0
+ hooks:
+ - id: rst-backticks
+ - id: rst-directive-colons
+ - id: rst-inline-touching-normal
diff --git a/.vscode/.gitignore b/.vscode/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..10b0af342ce38ac614e378cac00d334c5106edd0
--- /dev/null
+++ b/.vscode/.gitignore
@@ -0,0 +1,10 @@
+# Note: These files are kept for development purposes only.
+!tools/launch.template.json
+!tools/settings.template.json
+!tools/setup_vscode.py
+!extensions.json
+!tasks.json
+
+# Ignore all other files
+.python.env
+*.json
diff --git a/.vscode/extensions.json b/.vscode/extensions.json
new file mode 100644
index 0000000000000000000000000000000000000000..6306e43497082f89fa3eac81377aed8f0ac6c30e
--- /dev/null
+++ b/.vscode/extensions.json
@@ -0,0 +1,12 @@
+{
+ // See http://go.microsoft.com/fwlink/?LinkId=827846
+ // for the documentation about the extensions.json format
+ "recommendations": [
+ "ms-python.python",
+ "ms-python.vscode-pylance",
+ "ban.spellright",
+ "ms-iot.vscode-ros",
+ "ms-python.black-formatter",
+ "ms-python.flake8",
+ ]
+}
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
new file mode 100644
index 0000000000000000000000000000000000000000..d0df28dcf1f064a5d288362628ebb67139327a23
--- /dev/null
+++ b/.vscode/tasks.json
@@ -0,0 +1,23 @@
+{
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": "setup_python_env",
+ "type": "shell",
+ "linux": {
+ "command": "${input:isaac_path}/python.sh ${workspaceFolder}/.vscode/tools/setup_vscode.py --isaac_path ${input:isaac_path}"
+ },
+ "windows": {
+ "command": "${input:isaac_path}/python.bat ${workspaceFolder}/.vscode/tools/setup_vscode.py --isaac_path ${input:isaac_path}"
+ }
+ }
+ ],
+ "inputs": [
+ {
+ "id": "isaac_path",
+ "description": "Absolute path to the current Isaac Sim installation. Can be skipped if Isaac Sim installed from pip.",
+ "default": "${HOME}/.local/share/ov/pkg/isaac_sim-4.2.0",
+ "type": "promptString"
+ },
+ ]
+}
diff --git a/.vscode/tools/launch.template.json b/.vscode/tools/launch.template.json
new file mode 100644
index 0000000000000000000000000000000000000000..f941910168070b536b7c0b413a4b0c48dd4926d4
--- /dev/null
+++ b/.vscode/tools/launch.template.json
@@ -0,0 +1,66 @@
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ // For standalone script execution
+ {
+ "name": "Python: Current File",
+ "type": "debugpy",
+ "request": "launch",
+ "program": "${file}",
+ "console": "integratedTerminal",
+ },
+ {
+ "name": "Python: Train Environment",
+ "type": "debugpy",
+ "request": "launch",
+ "args" : ["--task", "Template-Isaac-Velocity-Flat-Anymal-D-v0", "--num_envs", "4096", "--headless"],
+ "program": "${workspaceFolder}/scripts/rsl_rl/train.py",
+ "console": "integratedTerminal",
+ },
+ {
+ "name": "Python: Play Environment",
+ "type": "debugpy",
+ "request": "launch",
+ "args" : ["--task", "Template-Isaac-Velocity-Flat-Anymal-D-Play-v0", "--num_envs", "32"],
+ "program": "${workspaceFolder}/scripts/rsl_rl/play.py",
+ "console": "integratedTerminal",
+ },
+
+ // For script execution inside a Docker
+ {
+ "name": "Docker: Current File",
+ "type": "debugpy",
+ "request": "launch",
+ "program": "${file}",
+ "console": "integratedTerminal",
+ "env": {
+ "PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
+ }
+ },
+ {
+ "name": "Docker: Train Environment",
+ "type": "debugpy",
+ "request": "launch",
+ "args" : ["--task", "Template-Isaac-Velocity-Flat-Anymal-D-v0", "--num_envs", "4096", "--headless"],
+ "program": "${workspaceFolder}/scripts/rsl_rl/train.py",
+ "console": "integratedTerminal",
+ "env": {
+ "PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
+ }
+ },
+ {
+ "name": "Docker: Play Environment",
+ "type": "debugpy",
+ "request": "launch",
+ "args" : ["--task", "Template-Isaac-Velocity-Flat-Anymal-D-Play-v0", "--num_envs", "32"],
+ "program": "${workspaceFolder}/scripts/rsl_rl/play.py",
+ "console": "integratedTerminal",
+ "env": {
+ "PYTHONPATH": "${env:PYTHONPATH}:${workspaceFolder}"
+ }
+ }
+ ]
+}
diff --git a/.vscode/tools/settings.template.json b/.vscode/tools/settings.template.json
new file mode 100644
index 0000000000000000000000000000000000000000..5b97ac267e51170d6963ddc5d33f52504a43820b
--- /dev/null
+++ b/.vscode/tools/settings.template.json
@@ -0,0 +1,86 @@
+{
+ "files.associations": {
+ "*.tpp": "cpp",
+ "*.kit": "toml",
+ "*.rst": "restructuredtext"
+ },
+ "editor.rulers": [120],
+
+ // files to be ignored by the linter
+ "files.watcherExclude": {
+ "**/.git/objects/**": true,
+ "**/.git/subtree-cache/**": true,
+ "**/node_modules/**": true,
+ "**/_isaac_sim/**": true,
+ "**/_compiler/**": true
+ },
+ // Configuration for spelling checker
+ "spellright.language": [
+ "en-US-10-1."
+ ],
+ "spellright.documentTypes": [
+ "markdown",
+ "latex",
+ "plaintext",
+ "cpp",
+ "asciidoc",
+ "python",
+ "restructuredtext"
+ ],
+ "cSpell.words": [
+ "literalinclude",
+ "linenos",
+ "instanceable",
+ "isaacSim",
+ "jacobians",
+ "pointcloud",
+ "ridgeback",
+ "rllib",
+ "robomimic",
+ "teleoperation",
+ "xform",
+ "numpy",
+ "tensordict",
+ "flatcache",
+ "physx",
+ "dpad",
+ "gamepad",
+ "linspace",
+ "upsampled",
+ "downsampled",
+ "arange",
+ "discretization",
+ "trimesh",
+ "uninstanceable"
+ ],
+ // This enables python language server. Seems to work slightly better than jedi:
+ "python.languageServer": "Pylance",
+ // We use "black" as a formatter:
+ "python.formatting.provider": "black",
+ "python.formatting.blackArgs": ["--line-length", "120"],
+ // Use flake8 for linting
+ "python.linting.pylintEnabled": false,
+ "python.linting.flake8Enabled": true,
+ "python.linting.flake8Args": [
+ "--max-line-length=120"
+ ],
+ // Use docstring generator
+ "autoDocstring.docstringFormat": "google",
+ "autoDocstring.guessTypes": true,
+ // Python environment path
+ // note: the default interpreter is overridden when user selects a workspace interpreter
+ // in the status bar. For example, the virtual environment python interpreter
+ "python.defaultInterpreterPath": "",
+ // ROS distribution
+ "ros.distro": "noetic",
+ // Language specific settings
+ "[python]": {
+ "editor.tabSize": 4
+ },
+ "[restructuredtext]": {
+ "editor.tabSize": 2
+ },
+ // Python extra paths
+ // Note: this is filled up when vscode is set up for the first time
+ "python.analysis.extraPaths": []
+}
diff --git a/.vscode/tools/setup_vscode.py b/.vscode/tools/setup_vscode.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0170f9a8f901127344fffc3ca1a90ba35f8f372
--- /dev/null
+++ b/.vscode/tools/setup_vscode.py
@@ -0,0 +1,220 @@
+# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+"""This script sets up the vs-code settings for the Isaac Lab project.
+
+This script merges the python.analysis.extraPaths from the "{ISAACSIM_DIR}/.vscode/settings.json" file into
+the ".vscode/settings.json" file.
+
+This is necessary because Isaac Sim 2022.2.1 onwards does not add the necessary python packages to the python path
+when the "setup_python_env.sh" is run as part of the vs-code launch configuration.
+"""
+
+import argparse
+import os
+import pathlib
+import platform
+import re
+import sys
+
+PROJECT_DIR = pathlib.Path(__file__).parents[2]
+"""Path to the the project directory."""
+
+try:
+ import isaacsim # noqa: F401
+
+ isaacsim_dir = os.environ.get("ISAAC_PATH", "")
+except ModuleNotFoundError or ImportError:
+ # Create a parser to get the isaac-sim path
+ parser = argparse.ArgumentParser(description="Setup the VSCode settings for the project.")
+ parser.add_argument("--isaac_path", type=str, help="The absolute path to the Isaac Sim installation.")
+ args = parser.parse_args()
+
+ # parse the isaac-sim directory
+ isaacsim_dir = args.isaac_path
+ # check if the isaac-sim directory is provided
+ if not os.path.exists(isaacsim_dir):
+ raise FileNotFoundError(
+ f"Could not find the isaac-sim directory: {isaacsim_dir}. Please provide the correct path to the Isaac Sim"
+ " installation."
+ )
+except EOFError:
+ print("Unable to trigger EULA acceptance. This is likely due to the script being run in a non-interactive shell.")
+ print("Please run the script in an interactive shell to accept the EULA.")
+ print("Skipping the setup of the VSCode settings...")
+ sys.exit(0)
+
+# check if the isaac-sim directory exists
+if not os.path.exists(isaacsim_dir):
+ raise FileNotFoundError(
+ f"Could not find the isaac-sim directory: {isaacsim_dir}. There are two possible reasons for this:"
+ "\n\t1. The Isaac Sim directory does not exist as provided CLI path."
+ "\n\t2. The script could import the 'isaacsim' package. This could be due to the 'isaacsim' package not being "
+ "installed in the Python environment.\n"
+ "\nPlease make sure that the Isaac Sim directory exists or that the 'isaacsim' package is installed."
+ )
+
+ISAACSIM_DIR = isaacsim_dir
+"""Path to the isaac-sim directory."""
+
+
+def overwrite_python_analysis_extra_paths(isaaclab_settings: str) -> str:
+ """Overwrite the python.analysis.extraPaths in the Isaac Lab settings file.
+
+ The extraPaths are replaced with the path names from the isaac-sim settings file that exists in the
+ "{ISAACSIM_DIR}/.vscode/settings.json" file.
+
+ If the isaac-sim settings file does not exist, the extraPaths are not overwritten.
+
+ Args:
+ isaaclab_settings: The settings string to use as template.
+
+ Returns:
+ The settings string with overwritten python analysis extra paths.
+ """
+ # isaac-sim settings
+ isaacsim_vscode_filename = os.path.join(ISAACSIM_DIR, ".vscode", "settings.json")
+
+ # we use the isaac-sim settings file to get the python.analysis.extraPaths for kit extensions
+ # if this file does not exist, we will not add any extra paths
+ if os.path.exists(isaacsim_vscode_filename):
+ # read the path names from the isaac-sim settings file
+ with open(isaacsim_vscode_filename) as f:
+ vscode_settings = f.read()
+ # extract the path names
+ # search for the python.analysis.extraPaths section and extract the contents
+ settings = re.search(
+ r"\"python.analysis.extraPaths\": \[.*?\]", vscode_settings, flags=re.MULTILINE | re.DOTALL
+ )
+ settings = settings.group(0)
+ settings = settings.split('"python.analysis.extraPaths": [')[-1]
+ settings = settings.split("]")[0]
+
+ # read the path names from the isaac-sim settings file
+ path_names = settings.split(",")
+ path_names = [path_name.strip().strip('"') for path_name in path_names]
+ path_names = [path_name for path_name in path_names if len(path_name) > 0]
+
+ # change the path names to be relative to the Isaac Lab directory
+ rel_path = os.path.relpath(ISAACSIM_DIR, PROJECT_DIR)
+ path_names = ['"${workspaceFolder}/' + rel_path + "/" + path_name + '"' for path_name in path_names]
+ else:
+ path_names = []
+ print(
+ f"[WARN] Could not find Isaac Sim VSCode settings: {isaacsim_vscode_filename}."
+ "\n\tThis will result in missing 'python.analysis.extraPaths' in the VSCode"
+ "\n\tsettings, which limits the functionality of the Python language server."
+ "\n\tHowever, it does not affect the functionality of the Isaac Lab project."
+ "\n\tWe are working on a fix for this issue with the Isaac Sim team."
+ )
+
+ # add the path names that are in the Isaac Lab extensions directory
+ isaaclab_extensions = os.listdir(os.path.join(PROJECT_DIR, "source"))
+ path_names.extend(['"${workspaceFolder}/source/' + ext + '"' for ext in isaaclab_extensions])
+
+ # combine them into a single string
+ path_names = ",\n\t\t".expandtabs(4).join(path_names)
+ # deal with the path separator being different on Windows and Unix
+ path_names = path_names.replace("\\", "/")
+
+ # replace the path names in the Isaac Lab settings file with the path names parsed
+ isaaclab_settings = re.sub(
+ r"\"python.analysis.extraPaths\": \[.*?\]",
+ '"python.analysis.extraPaths": [\n\t\t'.expandtabs(4) + path_names + "\n\t]".expandtabs(4),
+ isaaclab_settings,
+ flags=re.DOTALL,
+ )
+ # return the Isaac Lab settings string
+ return isaaclab_settings
+
+
+def overwrite_default_python_interpreter(isaaclab_settings: str) -> str:
+ """Overwrite the default python interpreter in the Isaac Lab settings file.
+
+ The default python interpreter is replaced with the path to the python interpreter used by the
+ isaac-sim project. This is necessary because the default python interpreter is the one shipped with
+ isaac-sim.
+
+ Args:
+ isaaclab_settings: The settings string to use as template.
+
+ Returns:
+ The settings string with overwritten default python interpreter.
+ """
+ # read executable name
+ python_exe = os.path.normpath(sys.executable)
+
+ # replace with Isaac Sim's python.sh or python.bat scripts to make sure python with correct
+ # source paths is set as default
+ if f"kit{os.sep}python{os.sep}bin{os.sep}python" in python_exe:
+ # Check if the OS is Windows or Linux to use appropriate shell file
+ if platform.system() == "Windows":
+ python_exe = python_exe.replace(f"kit{os.sep}python{os.sep}bin{os.sep}python3", "python.bat")
+ else:
+ python_exe = python_exe.replace(f"kit{os.sep}python{os.sep}bin{os.sep}python3", "python.sh")
+
+ # replace the default python interpreter in the Isaac Lab settings file with the path to the
+ # python interpreter in the Isaac Lab directory
+ isaaclab_settings = re.sub(
+ r"\"python.defaultInterpreterPath\": \".*?\"",
+ f'"python.defaultInterpreterPath": "{python_exe}"',
+ isaaclab_settings,
+ flags=re.DOTALL,
+ )
+ # return the Isaac Lab settings file
+ return isaaclab_settings
+
+
+def main():
+ # Isaac Lab template settings
+ isaaclab_vscode_template_filename = os.path.join(PROJECT_DIR, ".vscode", "tools", "settings.template.json")
+ # make sure the Isaac Lab template settings file exists
+ if not os.path.exists(isaaclab_vscode_template_filename):
+ raise FileNotFoundError(
+ f"Could not find the Isaac Lab template settings file: {isaaclab_vscode_template_filename}"
+ )
+ # read the Isaac Lab template settings file
+ with open(isaaclab_vscode_template_filename) as f:
+ isaaclab_template_settings = f.read()
+
+ # overwrite the python.analysis.extraPaths in the Isaac Lab settings file with the path names
+ isaaclab_settings = overwrite_python_analysis_extra_paths(isaaclab_template_settings)
+ # overwrite the default python interpreter in the Isaac Lab settings file with the path to the
+ # python interpreter used to call this script
+ isaaclab_settings = overwrite_default_python_interpreter(isaaclab_settings)
+
+ # add template notice to the top of the file
+ header_message = (
+ "// This file is a template and is automatically generated by the setup_vscode.py script.\n"
+ "// Do not edit this file directly.\n"
+ "// \n"
+ f"// Generated from: {isaaclab_vscode_template_filename}\n"
+ )
+ isaaclab_settings = header_message + isaaclab_settings
+
+ # write the Isaac Lab settings file
+ isaaclab_vscode_filename = os.path.join(PROJECT_DIR, ".vscode", "settings.json")
+ with open(isaaclab_vscode_filename, "w") as f:
+ f.write(isaaclab_settings)
+
+ # copy the launch.json file if it doesn't exist
+ isaaclab_vscode_launch_filename = os.path.join(PROJECT_DIR, ".vscode", "launch.json")
+ isaaclab_vscode_template_launch_filename = os.path.join(PROJECT_DIR, ".vscode", "tools", "launch.template.json")
+ if not os.path.exists(isaaclab_vscode_launch_filename):
+ # read template launch settings
+ with open(isaaclab_vscode_template_launch_filename) as f:
+ isaaclab_template_launch_settings = f.read()
+ # add header
+ header_message = header_message.replace(
+ isaaclab_vscode_template_filename, isaaclab_vscode_template_launch_filename
+ )
+ isaaclab_launch_settings = header_message + isaaclab_template_launch_settings
+ # write the Isaac Lab launch settings file
+ with open(isaaclab_vscode_launch_filename, "w") as f:
+ f.write(isaaclab_launch_settings)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/CONTIRBUTING.md b/CONTIRBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..e2a2401c6cd103b4c446314666cf9251d75ad3ed
--- /dev/null
+++ b/CONTIRBUTING.md
@@ -0,0 +1,17 @@
+Developer Certificate of Origin Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+
+Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or
+
+(c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it.
+
+(d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved.
diff --git a/LICENCE b/LICENCE
new file mode 100644
index 0000000000000000000000000000000000000000..dd5b3a58aa1849f452abc9b5cd1638dc71a5e482
--- /dev/null
+++ b/LICENCE
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b358286fdbf9f7c245777db6681f0428a96056ca
--- /dev/null
+++ b/README.md
@@ -0,0 +1,88 @@
+# Isaac Lab Evaluation Tasks
+
+[](https://docs.isaacsim.omniverse.nvidia.com/latest/index.html)
+[](https://isaac-sim.github.io/IsaacLab/main/index.html)
+[](https://docs.python.org/3/whatsnew/3.11.html)
+[](https://releases.ubuntu.com/20.04/)
+[](https://github.com/isaac-sim/IsaacLab/actions/workflows/pre-commit.yaml)
+[](https://opensource.org/license/apache-2-0)
+
+## 📝 Overview
+
+This repository introduces two new industrial manipulation tasks designed in [Isaac Lab](https://isaac-sim.github.io/IsaacLab/main/index.html), enabling simulating and evaluating manipulation policies (e.g. [Isaac GR00T N1](https://github.com/NVIDIA/Isaac-GR00T/tree/n1-release)) using a humanoid robot. The tasks are designed to simulate realistic industrial scenarios, including Nut Pouring and Exhaust Pipe Sorting.
+It also provides benchmarking scripts for closed-loop evaluation of manipulation policy (i.e. Isaac GR00T N1) with post-trained checkpoints. These scripts enable developers to load prebuilt Isaac Lab environments and industrial tasks—such as nut pouring and pipe sorting—and run standardized benchmarks to quantitatively assess policy performance.
+
+## 📦 Installation
+
+For detailed installation instructions, see [Installation Guide](doc/installation.md).
+
+## 🛠️ Evaluation Tasks
+
+Two industrial tasks have been created in [Isaac Lab](https://isaac-sim.github.io/IsaacLab/main/index.html) to simulate robotic manipulation scenarios. The environments are set up with a humanoid robot (i.e. Fourier GR1-T2) positioned in front of several industrial objects on a table. This can include multi-step bi-manual tasks such as grasping, moving, sorting, or placing the objects into specific locations.
+
+The robot is positioned upright, facing the table with both arms slightly bent and hands open. A first-person-view monocular RGB camera is mounted on its head to cover the workspace.
+
+
+### Nut Pouring
+
+
+

+
The robot picks up a beaker containing metallic nuts, pours one nut into a bowl, and places the bowl on a scale.
+
+
+The task is defined as successful if following criteria have been met.
+1. The sorting beaker is placed in the sorting bin
+2. The factory nut is in the sorting bowl
+3. The sorting bowl is placed on the sorting scale
+
+
+### Exhaust Pipe Sorting
+
+
+

+
The robot picks up the blue exhaust pipe, transfers it to the other hand, and places the pipe into the blue bin.
+
+
+The task is defined as successful if following criteria has been met.
+
+1. The blue exhaust pipe is placed in the correct position
+
+
+## 📦 Downloading Datasets (Optional)
+
+For dataset information and download instructions, see [Datasets Guide](doc/datasets.md).
+
+## 🤖 Isaac GR00T N1 Policy Post Training (Optional)
+
+For detailed post-training instructions including data conversion and model fine-tuning, see [Post Training Guide](doc/post-training.md).
+
+
+## 📦 Downloading Checkpoints
+
+For information on available pre-trained checkpoints and download instructions, see [Checkpoints Guide](doc/checkpoints.md).
+
+## 📈 Policy Closed-loop Evaluation
+
+For detailed evaluation instructions including benchmarking features and performance results, see [Evaluation Guide](doc/evaluation.md).
+
+## Code formatting
+
+We have a pre-commit template to automatically format your code.
+To install pre-commit:
+
+```bash
+pip install pre-commit
+```
+
+Then you can run pre-commit with:
+
+```bash
+pre-commit run --all-files
+```
+
+## Troubleshooting
+
+For common issues and solutions, see [Troubleshooting Guide](doc/troubleshooting.md).
+
+## Contributing
+For more details, see [CONTRIBUTING.md](CONTIRBUTING.md)
diff --git a/datasets/apple_pick_place_annotated.hdf5 b/datasets/apple_pick_place_annotated.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..188bbb225b27b559de8c9458d8ca311fe65dbe79
--- /dev/null
+++ b/datasets/apple_pick_place_annotated.hdf5
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cdd75e7702c0cdfd89ddb700f7a499c524c07359de86327ed45f40d401cad146
+size 75382054
diff --git a/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000000.parquet b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000000.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..2c2b437aaa43e945cad5dfa5e95e5208bda70513
--- /dev/null
+++ b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000000.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:11761bdb3d65d65b6ff05400746a5d6b2039d0a994fe4a0a52858cb12b4a459d
+size 76346
diff --git a/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000001.parquet b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000001.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..1eaf555fa82023a518c84e0ad889168daf9f24ce
--- /dev/null
+++ b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000001.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e63ba696a240161f86106fc0bf87b074c6621fc3dce2f7103301d09fd25761cb
+size 79495
diff --git a/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000002.parquet b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000002.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..80b7cf8f8cd27277e57a1926ddc55cbe0805c9ad
--- /dev/null
+++ b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000002.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:722cc4d69d0b42db83938187f9d251e91255016279bd37f49a696fc735c1ed21
+size 83052
diff --git a/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000003.parquet b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000003.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..ae9b5bd314b2154282e2df49acddbbe2f3ff9686
--- /dev/null
+++ b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000003.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6fa0c0a309d30b76e04e308dfc1f982ba7c730843060fac9417e3c74182f05e3
+size 71125
diff --git a/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000004.parquet b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000004.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..ed71cd9275c547b1360893b3c600075bbd3dca5b
--- /dev/null
+++ b/datasets/apple_pick_place_annotated/lerobot/data/chunk-000/episode_000004.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:713bad84326eb62219e3b06562a563e626f2e9c5c3e069c4bc488ae6dd85aced
+size 76048
diff --git a/datasets/apple_pick_place_annotated/lerobot/meta/episodes.jsonl b/datasets/apple_pick_place_annotated/lerobot/meta/episodes.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..b8e46ee062a025a44502849844b58db82e7d5240
--- /dev/null
+++ b/datasets/apple_pick_place_annotated/lerobot/meta/episodes.jsonl
@@ -0,0 +1,5 @@
+{"episode_index": 0, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 194}
+{"episode_index": 1, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 201}
+{"episode_index": 2, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 214}
+{"episode_index": 3, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 181}
+{"episode_index": 4, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 192}
diff --git a/datasets/apple_pick_place_annotated/lerobot/meta/info.json b/datasets/apple_pick_place_annotated/lerobot/meta/info.json
new file mode 100644
index 0000000000000000000000000000000000000000..ab1b146f834c83f58a58b860929fc51b01cf9292
--- /dev/null
+++ b/datasets/apple_pick_place_annotated/lerobot/meta/info.json
@@ -0,0 +1,158 @@
+{
+ "codebase_version": "v2.0",
+ "robot_type": "g1",
+ "total_episodes": 5,
+ "total_frames": 982,
+ "total_tasks": 2,
+ "total_videos": 5,
+ "total_chunks": 0,
+ "chunks_size": 1000,
+ "fps": 20,
+ "splits": {
+ "train": "0:100"
+ },
+ "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
+ "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
+ "features": {
+ "observation.images.ego_view": {
+ "dtype": "video",
+ "shape": [
+ 256,
+ 256,
+ 3
+ ],
+ "names": [
+ "height",
+ "width",
+ "channel"
+ ],
+ "video_info": {
+ "video.fps": 20.0,
+ "video.codec": "h264",
+ "video.pix_fmt": null,
+ "video.is_depth_map": false,
+ "has_audio": false
+ }
+ },
+ "observation.state": {
+ "dtype": "float64",
+ "shape": [
+ 28
+ ],
+ "names": [
+ "motor_0",
+ "motor_1",
+ "motor_2",
+ "motor_3",
+ "motor_4",
+ "motor_5",
+ "motor_6",
+ "motor_7",
+ "motor_8",
+ "motor_9",
+ "motor_10",
+ "motor_11",
+ "motor_12",
+ "motor_13",
+ "motor_14",
+ "motor_15",
+ "motor_16",
+ "motor_17",
+ "motor_18",
+ "motor_19",
+ "motor_20",
+ "motor_21",
+ "motor_22",
+ "motor_23",
+ "motor_24",
+ "motor_25",
+ "motor_26",
+ "motor_27"
+ ]
+ },
+ "action": {
+ "dtype": "float64",
+ "shape": [
+ 28
+ ],
+ "names": [
+ "motor_0",
+ "motor_1",
+ "motor_2",
+ "motor_3",
+ "motor_4",
+ "motor_5",
+ "motor_6",
+ "motor_7",
+ "motor_8",
+ "motor_9",
+ "motor_10",
+ "motor_11",
+ "motor_12",
+ "motor_13",
+ "motor_14",
+ "motor_15",
+ "motor_16",
+ "motor_17",
+ "motor_18",
+ "motor_19",
+ "motor_20",
+ "motor_21",
+ "motor_22",
+ "motor_23",
+ "motor_24",
+ "motor_25",
+ "motor_26",
+ "motor_27"
+ ]
+ },
+ "timestamp": {
+ "dtype": "float64",
+ "shape": [
+ 1
+ ]
+ },
+ "annotation.human.action.task_description": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "annotation.human.action.valid": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "episode_index": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "task_index": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "index": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "next.reward": {
+ "dtype": "float64",
+ "shape": [
+ 1
+ ]
+ },
+ "next.done": {
+ "dtype": "bool",
+ "shape": [
+ 1
+ ]
+ }
+ }
+}
\ No newline at end of file
diff --git a/datasets/apple_pick_place_annotated/lerobot/meta/modality.json b/datasets/apple_pick_place_annotated/lerobot/meta/modality.json
new file mode 100644
index 0000000000000000000000000000000000000000..ed6e415d0ae74aa65d3283ee1f64b0f1f3651fbb
--- /dev/null
+++ b/datasets/apple_pick_place_annotated/lerobot/meta/modality.json
@@ -0,0 +1,51 @@
+{
+ "state": {
+ "left_arm": {
+ "original_key": "observation.state",
+ "start": 0,
+ "end": 7
+ },
+ "right_arm": {
+ "original_key": "observation.state",
+ "start": 7,
+ "end": 14
+ },
+ "left_hand": {
+ "original_key": "observation.state",
+ "start": 14,
+ "end": 21
+ },
+ "right_hand": {
+ "original_key": "observation.state",
+ "start": 21,
+ "end": 28
+ }
+ },
+ "action": {
+ "left_arm": {
+ "start": 0,
+ "end": 7
+ },
+ "right_arm": {
+ "start": 7,
+ "end": 14
+ },
+ "left_hand": {
+ "start": 14,
+ "end": 21
+ },
+ "right_hand": {
+ "start": 21,
+ "end": 28
+ }
+ },
+ "video": {
+ "ego_view": {
+ "original_key": "observation.images.ego_view"
+ }
+ },
+ "annotation": {
+ "human.action.task_description": {},
+ "human.validity": {}
+ }
+}
\ No newline at end of file
diff --git a/datasets/apple_pick_place_annotated/lerobot/meta/tasks.jsonl b/datasets/apple_pick_place_annotated/lerobot/meta/tasks.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..6272d85a1005bf45f200cd3fbe898311379e2248
--- /dev/null
+++ b/datasets/apple_pick_place_annotated/lerobot/meta/tasks.jsonl
@@ -0,0 +1,2 @@
+{"task_index": 1, "task": "valid"}
+{"task_index": 5, "task": "Pick up the apple and place it on the plate."}
diff --git a/datasets/apple_pick_place_generated_small.hdf5 b/datasets/apple_pick_place_generated_small.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..2d120eb24abd1a9b45468a2ed2d3c84680f19353
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small.hdf5
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:606a51ec98856f200d952da51f61c437c5acc3571f64a7a1bc2192d1c7560197
+size 235878882
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000000.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000000.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..6644a84305715d62be136bd1f50776a4d41a6a24
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000000.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:128026bb0e51ae2819fa4c35f11a726932b621a9a8b0244106dc4385e293e955
+size 77806
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000001.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000001.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..e82f82c91e0be05a3c84fa85e53d0d5dfb7a5410
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000001.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a0b3a214b458ffb02d07f24826ddbcbf946113d1ffd11e50faafcd6ca628e6d
+size 77758
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000002.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000002.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..67245cf2dae3c3649d908e8c9a246feea6cb56cf
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000002.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0345813f04eabaadec102aefd853da4276ed43093f91f7e9e06069b151e222e0
+size 80867
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000003.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000003.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..b860383629369ee064e3041e095e03eb7b82d29e
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000003.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a5165345ea31edaffffb6225dbeafbc8a0e25c50254ca99e5ceefdb40fa1518
+size 80906
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000004.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000004.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..ec2c1c4ca48c61692f564a49b2d484c02b283c99
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000004.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:322d874dc1a5e3392c452d547a6117435a8612236abd5dba16a70ccbaa4b62a3
+size 84703
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000005.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000005.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..bece62ecd09175d790388d1ffba59a685f904749
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000005.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:301e1dcca6d872dfd42aafed64835daf25b3243cd018f102bd80616318b0ba42
+size 77764
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000006.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000006.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..f449bcd98a3ede9ff10c53d804345ec661debd4f
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000006.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b7cac628b4a07db1445547bf72d80c42ce2cf917c06602404153077afcbab62f
+size 80899
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000007.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000007.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..58eb84fb4f352d75a5b89ea0df63da189581b6e7
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000007.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8dc81dbe63cba23b7ceb9378cd9f80829759bd6d2140078b831460e2951cbedc
+size 80847
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000008.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000008.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..9b7065b5e10715b6ab10547fcba3e4d27241521b
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000008.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1685d6486e7f0013dc9fe13cb732092be73f8e1f6153785a47fbe3ad53e74145
+size 77696
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000009.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000009.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..e1f6395cbcc6a7e4b2b95a2b42e4fce6c1841112
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000009.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4e1ba4e767b900611f7d12663745707a087806d8eb283d4f9aa97d2a4d1db05a
+size 77732
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000010.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000010.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..345f728ddb329e68bed5dff15da9b48af5f4f4b1
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000010.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:11fd792c98e98dbee1cd4ff1f6f01cb2f42e99e5022dcddd9fef3f9432fd8cab
+size 77712
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000011.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000011.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..b99d92a103a4023d99202a88ae2f62fe3ca63f4d
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000011.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5ed7217a4747a87c705ca202a6314105b0a514e5a5e3e1545b873949e739db4c
+size 77724
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000012.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000012.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..512336a68443c014692ae0b9b3e414c387315d69
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000012.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:78cd97fdba9707da9a7dd110d9fb49c2d32bdcbfca1f21f00eeddc9de59cdb6f
+size 77859
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000013.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000013.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..c72459ab256c106aeb24b67e3b6f0b0206e8889d
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000013.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aac73484946c98a6e6d2ae6da3da673e56d1286a03002880add9dee9093b38ec
+size 77782
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000014.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000014.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..03f199a554f28c673823add9ce5872351d305c72
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000014.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d707b90baf50a03626c6f39c0fc346fba1b202d30e9d702de7d4ee2e59d07490
+size 77796
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000015.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000015.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..5343504215b8d0712298d75186ed76638ff04bf6
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000015.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9508065db0b743b1a53cc1ddfed97a1466576706adedfd08bc36fca27366b89
+size 77742
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000016.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000016.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..d91c9125c7f48bb1f35aef2ad2fa9446aacfdebe
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000016.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8bd4c0c1099b579e5cb6e85eba78cd188304a0509ef558b87f8b4367a0c70c6b
+size 80857
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000017.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000017.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..53701771927e56f6a071afe5c9b6e7189df83441
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000017.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:07ac24e6c037af51c5c77f2e9f92d3ca4b2b549f060c548a9baf53ab348cd4a7
+size 80919
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000018.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000018.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..b78cb71cd248d9b46b61259b4ba8ba4ec949a718
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000018.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:98a566a7ecbe3b564b8832642662b0082da65794124d0f30b161e307d920214a
+size 80976
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000019.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000019.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..3e47397104e3f050b267e66360094cfc29197f49
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000019.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dbe49234ba52f02d9b28e2f52e8946d9310b57db9e8b334e978f09d068a33153
+size 80839
diff --git a/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000020.parquet b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000020.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..c6f0ad328fac66ffef07e063370d8618fdf986a6
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/data/chunk-000/episode_000020.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4ce903c02f41deec03464a1b6c59616cf1d61d88a11cd1d052ce8f99493d4621
+size 80905
diff --git a/datasets/apple_pick_place_generated_small/lerobot/meta/episodes.jsonl b/datasets/apple_pick_place_generated_small/lerobot/meta/episodes.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..dc183010c915c9342ec04e5ccc8b07f9518ac9d7
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/meta/episodes.jsonl
@@ -0,0 +1,21 @@
+{"episode_index": 0, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 1, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 2, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 205}
+{"episode_index": 3, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 205}
+{"episode_index": 4, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 218}
+{"episode_index": 5, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 6, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 205}
+{"episode_index": 7, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 205}
+{"episode_index": 8, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 9, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 10, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 11, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 12, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 13, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 14, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 15, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 198}
+{"episode_index": 16, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 205}
+{"episode_index": 17, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 205}
+{"episode_index": 18, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 205}
+{"episode_index": 19, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 205}
+{"episode_index": 20, "tasks": ["valid", "Pick up the apple and place it on the plate."], "length": 205}
diff --git a/datasets/apple_pick_place_generated_small/lerobot/meta/info.json b/datasets/apple_pick_place_generated_small/lerobot/meta/info.json
new file mode 100644
index 0000000000000000000000000000000000000000..142ca975e4ca87f31eb5bbf3bfc4bf938a502497
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/meta/info.json
@@ -0,0 +1,158 @@
+{
+ "codebase_version": "v2.0",
+ "robot_type": "g1",
+ "total_episodes": 21,
+ "total_frames": 4241,
+ "total_tasks": 2,
+ "total_videos": 21,
+ "total_chunks": 0,
+ "chunks_size": 1000,
+ "fps": 20,
+ "splits": {
+ "train": "0:100"
+ },
+ "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
+ "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
+ "features": {
+ "observation.images.ego_view": {
+ "dtype": "video",
+ "shape": [
+ 256,
+ 256,
+ 3
+ ],
+ "names": [
+ "height",
+ "width",
+ "channel"
+ ],
+ "video_info": {
+ "video.fps": 20.0,
+ "video.codec": "h264",
+ "video.pix_fmt": null,
+ "video.is_depth_map": false,
+ "has_audio": false
+ }
+ },
+ "observation.state": {
+ "dtype": "float64",
+ "shape": [
+ 28
+ ],
+ "names": [
+ "motor_0",
+ "motor_1",
+ "motor_2",
+ "motor_3",
+ "motor_4",
+ "motor_5",
+ "motor_6",
+ "motor_7",
+ "motor_8",
+ "motor_9",
+ "motor_10",
+ "motor_11",
+ "motor_12",
+ "motor_13",
+ "motor_14",
+ "motor_15",
+ "motor_16",
+ "motor_17",
+ "motor_18",
+ "motor_19",
+ "motor_20",
+ "motor_21",
+ "motor_22",
+ "motor_23",
+ "motor_24",
+ "motor_25",
+ "motor_26",
+ "motor_27"
+ ]
+ },
+ "action": {
+ "dtype": "float64",
+ "shape": [
+ 28
+ ],
+ "names": [
+ "motor_0",
+ "motor_1",
+ "motor_2",
+ "motor_3",
+ "motor_4",
+ "motor_5",
+ "motor_6",
+ "motor_7",
+ "motor_8",
+ "motor_9",
+ "motor_10",
+ "motor_11",
+ "motor_12",
+ "motor_13",
+ "motor_14",
+ "motor_15",
+ "motor_16",
+ "motor_17",
+ "motor_18",
+ "motor_19",
+ "motor_20",
+ "motor_21",
+ "motor_22",
+ "motor_23",
+ "motor_24",
+ "motor_25",
+ "motor_26",
+ "motor_27"
+ ]
+ },
+ "timestamp": {
+ "dtype": "float64",
+ "shape": [
+ 1
+ ]
+ },
+ "annotation.human.action.task_description": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "annotation.human.action.valid": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "episode_index": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "task_index": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "index": {
+ "dtype": "int64",
+ "shape": [
+ 1
+ ]
+ },
+ "next.reward": {
+ "dtype": "float64",
+ "shape": [
+ 1
+ ]
+ },
+ "next.done": {
+ "dtype": "bool",
+ "shape": [
+ 1
+ ]
+ }
+ }
+}
\ No newline at end of file
diff --git a/datasets/apple_pick_place_generated_small/lerobot/meta/modality.json b/datasets/apple_pick_place_generated_small/lerobot/meta/modality.json
new file mode 100644
index 0000000000000000000000000000000000000000..ed6e415d0ae74aa65d3283ee1f64b0f1f3651fbb
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/meta/modality.json
@@ -0,0 +1,51 @@
+{
+ "state": {
+ "left_arm": {
+ "original_key": "observation.state",
+ "start": 0,
+ "end": 7
+ },
+ "right_arm": {
+ "original_key": "observation.state",
+ "start": 7,
+ "end": 14
+ },
+ "left_hand": {
+ "original_key": "observation.state",
+ "start": 14,
+ "end": 21
+ },
+ "right_hand": {
+ "original_key": "observation.state",
+ "start": 21,
+ "end": 28
+ }
+ },
+ "action": {
+ "left_arm": {
+ "start": 0,
+ "end": 7
+ },
+ "right_arm": {
+ "start": 7,
+ "end": 14
+ },
+ "left_hand": {
+ "start": 14,
+ "end": 21
+ },
+ "right_hand": {
+ "start": 21,
+ "end": 28
+ }
+ },
+ "video": {
+ "ego_view": {
+ "original_key": "observation.images.ego_view"
+ }
+ },
+ "annotation": {
+ "human.action.task_description": {},
+ "human.validity": {}
+ }
+}
\ No newline at end of file
diff --git a/datasets/apple_pick_place_generated_small/lerobot/meta/tasks.jsonl b/datasets/apple_pick_place_generated_small/lerobot/meta/tasks.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..6272d85a1005bf45f200cd3fbe898311379e2248
--- /dev/null
+++ b/datasets/apple_pick_place_generated_small/lerobot/meta/tasks.jsonl
@@ -0,0 +1,2 @@
+{"task_index": 1, "task": "valid"}
+{"task_index": 5, "task": "Pick up the apple and place it on the plate."}
diff --git a/doc/checkpoints.md b/doc/checkpoints.md
new file mode 100644
index 0000000000000000000000000000000000000000..2f97b0f30a79fdf4a88ffe3bb23591f0113cf071
--- /dev/null
+++ b/doc/checkpoints.md
@@ -0,0 +1,23 @@
+# Downloading Checkpoints
+
+We post-trained the Isaac GR00T N1 policy using the above dataset, and the finetuned checkpoints are available to download.
+
+## Available Checkpoints
+
+- [GR00T-N1-2B-tuned-Nut-Pouring-task](https://huggingface.co/nvidia/GR00T-N1-2B-tuned-Nut-Pouring-task)
+- [GR00T-N1-2B-tuned-Exhaust-Pipe-Sorting-task](https://huggingface.co/nvidia/GR00T-N1-2B-tuned-Exhaust-Pipe-Sorting-task)
+
+## Download Instructions
+
+Make sure you have registered your Hugging Face account and have read-access token ready.
+
+```bash
+# Provide your access token with read permission
+huggingface-cli login
+
+export CKPT="nvidia/GR00T-N1-2B-tuned-Nut-Pouring-task"
+# Or, to use the other checkpoint, uncomment the next line:
+# export CKPT="nvidia/GR00T-N1-2B-tuned-Exhaust-Pipe-Sorting-task"
+# Define the path to save the checkpoints as CKPT_LOCAL_DIR
+huggingface-cli download --resume-download $CKPT --local-dir $CKPT_LOCAL_DIR
+```
diff --git a/doc/datasets.md b/doc/datasets.md
new file mode 100644
index 0000000000000000000000000000000000000000..3120da72f2c71ad741d8fa189359ff06e7b43d46
--- /dev/null
+++ b/doc/datasets.md
@@ -0,0 +1,49 @@
+# Downloading Datasets
+
+The finetuning datasets are generated with Synethic Manipulation Motion Generation (SMMG), utilizing tools including
+GR00T-Teleop, Mimic on Isaac Lab simulation environment. More details related to how datasets are generated could be viewed in [Isaac Lab Teleop & IL](https://isaac-sim.github.io/IsaacLab/main/source/overview/teleop_imitation.html).
+
+## Available Datasets
+
+Datasets are hosted on Hugging Face as listed below.
+
+[nvidia/PhysicalAI-GR00T-Tuned-Tasks: Nut Pouring](https://huggingface.co/datasets/nvidia/PhysicalAI-GR00T-Tuned-Tasks/tree/main/Nut-Pouring-task)
+
+[nvidia/PhysicalAI-GR00T-Tuned-Tasks: Exhaust-Pipe-Sorting](https://huggingface.co/datasets/nvidia/PhysicalAI-GR00T-Tuned-Tasks/tree/main/Exhaust-Pipe-Sorting-task)
+
+You can download the GR00T-Lerobot format dataset ready for post training, or the original Mimic-generated HDF5 for data conversion.
+
+## Download Instructions
+
+Make sure you have registered your Hugging Face account and have read-access token ready.
+
+```bash
+# Provide your access token with read permission
+huggingface-cli login
+
+export DATASET="nvidia/PhysicalAI-GR00T-Tuned-Tasks"
+# Define the path to save the datasets as DATASET_ROOT_DIR
+huggingface-cli download --repo-type dataset --resume-download $DATASET --local-dir $DATASET_ROOT_DIR
+
+```
+
+## Dataset Structure
+
+`DATASET_ROOT_DIR` is the path to the directory where you want to store those assets as below.
+
+
+
+📂 PhysicalAI-GR00T-Tuned-Tasks
+├── 📂 Exhaust-Pipe-Sorting-task
+│ ├── 📂 data
+│ ├── 📂 meta
+│ └── 📂 videos
+├── exhaust_pipe_sorting_task.hdf5
+├── 📂 Nut-Pouring-task
+│ ├── 📂 data
+│ ├── 📂 meta
+│ └── 📂 videos
+├── nut_pouring_task.hdf5
+└── README.md
+
+
diff --git a/doc/evaluation.md b/doc/evaluation.md
new file mode 100644
index 0000000000000000000000000000000000000000..03f6b15a35c6d19d14069f1e27d29bf61da520b4
--- /dev/null
+++ b/doc/evaluation.md
@@ -0,0 +1,107 @@
+# Policy Closed-loop Evaluation
+
+You can deploy the post-trained GR00T N1 policy for closed-loop control of the GR1 robot within an Isaac Lab environment, and benchmark its success rate in parallel runs.
+
+## Benchmarking Features
+
+### 🚀 Parallelized Evaluation:
+Isaac Lab supports parallelized environment instances for scalable benchmarking. Configure multiple parallel runs (e.g., 10–100 instances) to statistically quantify policy success rates under varying initial conditions.
+
+
+
+
+ 
+ Nut Pouring
+ |
+
+ 
+ Exhaust Pipe Sorting
+ |
+
+
+
+### ✅ Success Metrics:
+- Task Completion: Binary success/failure based on object placement accuracy defined in the [evaluation tasks](#️-evaluation-tasks). Success rates are logged in the teriminal per episode as,
+
+```bash
+==================================================
+Successful trials: 9, out of 10 trials
+Success rate: 0.9
+==================================================
+```
+And the summary report as json file can be viewed as,
+
+{
+ "metadata": {
+ "checkpoint_name": "gr00t-n1-2b-tuned",
+ "seed": 10,
+ "date": "2025-05-20 16:42:54"
+ },
+ "summary": {
+ "successful_trials": 91,
+ "total_rollouts": 100,
+ "success_rate": 0.91
+ }
+
+
+## Running Evaluation
+
+### Nut Pouring Task
+
+To run parallel evaluation on the Nut Pouring task:
+
+```bash
+# Within IsaacLabEvalTasks directory
+# Assume the post-trained policy checkpoints are under CKPTS_PATH
+# Please use full path, instead of relative path for CKPTS_PATH
+# export EVAL_RESULTS_FNAME="./eval_nutpouring.json"
+python scripts/evaluate_gn1.py \
+ --num_feedback_actions 16 \
+ --num_envs 10 \
+ --task_name nutpouring \
+ --eval_file_path $EVAL_RESULTS_FNAME \
+ --model_path $CKPTS_PATH \
+ --rollout_length 30 \
+ --seed 10 \
+ --max_num_rollouts 100
+```
+
+### Exhaust Pipe Sorting Task
+
+To run parallel evaluation on the Exhaust Pipe Sorting task:
+
+```bash
+# Assume the post-trained policy checkpoints are under CKPTS_PATH
+# Please use full path, instead of relative path for CKPTS_PATH
+# export EVAL_RESULTS_FNAME="./eval_pipesorting.json"
+python scripts/evaluate_gn1.py \
+ --num_feedback_actions 16 \
+ --num_envs 10 \
+ --task_name pipesorting \
+ --eval_file_path $EVAL_RESULTS_FNAME \
+ --checkpoint_name gr00t-n1-2b-tuned-pipesorting \
+ --model_path $CKPTS_PATH \
+ --rollout_length 20 \
+ --seed 10 \
+ --max_num_rollouts 100
+```
+
+## Performance Results
+
+We report the success rate of evaluating tuned GR00T N1 policy over 200 trials, with random seed=15.
+
+| Evaluation Task | SR |
+|----------------------|----------|
+| Nut Pouring | 91% |
+| Exhaust Pipe Sorting | 95% |
+
+## Tips and Best Practices
+
+💡 **Tip:**
+1. Hardware requirement: Please follow the system requirements in [Isaac Sim](https://docs.isaacsim.omniverse.nvidia.com/latest/installation/requirements.html#system-requirements) and [Isaac GR00T](https://github.com/NVIDIA/Isaac-GR00T/tree/n1-release?tab=readme-ov-file#3-fine-tuning) to choose. The above evaluation results was reported on RTX A6000 Ada, Ubuntu 22.04.
+
+2. `num_feedback_actions` determines the number of feedback actions to execute per inference, and it can be less than `action_horizon`. This option will impact the success rate of evaluation task even with the same checkpoint.
+
+3. `rollout_length` impacts how many batched inference to make before task termination. Normally we set it between 20 to 30 for a faster turnaround.
+
+4. `num_envs` decides the number of environments to run in parallel. Too many parallel environments (e.g. >100 on RTX A6000 Ada) will significantly slow down the UI rendering. We recommend to set between 10 to 30 for smooth rendering and efficient benchmarking.
diff --git a/doc/installation.md b/doc/installation.md
new file mode 100644
index 0000000000000000000000000000000000000000..68eab5e9cc065f616d3a0c020c3d4b87631324fc
--- /dev/null
+++ b/doc/installation.md
@@ -0,0 +1,39 @@
+# Installation
+
+## Pre-requisites
+- For [Policy Closed-loop Evaluation](#-policy-closed-loop-evaluation), we have tested on Ubuntu 22.04, GPU: L40, RTX 4090 and A6000 Ada, and Python==3.11, CUDA version 12.8. DGX Spark and Blackwell has not yet been supported when running [GR00T](https://github.com/NVIDIA/Isaac-GR00T) models.
+- For [Policy Post Training](#post-training), see [GR00T-N1 pre-requisites](https://github.com/NVIDIA/Isaac-GR00T?tab=readme-ov-file#prerequisites)
+- Please make sure you have the following dependencies installed in your system: `ffmpeg`, `libsm6`, `libxext6`
+
+## Setup Development Environment
+- Install Isaac Lab by following the [installation guide](https://isaac-sim.github.io/IsaacLab/main/source/setup/installation/index.html). We recommend using the conda installation as it simplifies calling Python scripts from the terminal.
+
+- Clone this repository separately from the Isaac Lab installation (i.e. outside the `IsaacLab` directory):
+
+```bash
+git clone --recurse-submodules git@github.com:isaac-sim/IsaacLabEvalTasks.git
+```
+
+- Using a python interpreter or conda/virtual env that has Isaac Lab installed, install the library required by [Isaac GR00T N1](https://github.com/NVIDIA/Isaac-GR00T/tree/n1-release)
+
+```bash
+# Within IsaacLabEvalTasks directory
+cd submodules/Isaac-GR00T
+pip install --upgrade setuptools
+pip install -e .[base]
+pip install --no-build-isolation flash-attn==2.7.1.post4
+export PYTHONPATH=$PYTHONPATH:$INSTALL_DIR/IsaacLabEvalTasks/submodules/Isaac-GR00T
+```
+
+- Verify that the GR00T deps are correctly installed by running the following command:
+
+```bash
+python -c "import gr00t; print('gr00t imported successfully')"
+```
+
+- Using a python interpreter or conda/virtual env that has Isaac Lab installed, install the library of Evaluation Tasks
+
+```bash
+# Within IsaacLabEvalTasks directory
+python -m pip install -e source/isaaclab_eval_tasks
+```
diff --git a/doc/post-training.md b/doc/post-training.md
new file mode 100644
index 0000000000000000000000000000000000000000..c3da6ca65b673c9b15f2edab70d2a38b4ad30e6f
--- /dev/null
+++ b/doc/post-training.md
@@ -0,0 +1,92 @@
+# Post Training
+
+[GR00T N1](https://github.com/NVIDIA/Isaac-GR00T/tree/n1-release?tab=readme-ov-file#nvidia-isaac-gr00t-n1) is a foundation model for generalized humanoid robot reasoning and skills, trained on an extensive multimodal dataset that includes real-world, synthetic, and internet-scale data. The model is designed for cross-embodiment generalization and can be efficiently adapted to new robot embodiments, tasks, and environments through post training.
+
+We followed the recommended GR00T N1 post training workflow to adapt the model for the Fourier GR1 robot, targeting two industrial manipulation tasks: nut pouring and exhaust pipe sorting. The process involves multiple steps introduced below. You can also skip to the next section [Downloading Checkpoints](#downloading-checkpoints) to get post-trained checkpoints.
+
+## Data Conversion
+
+The process involved converting demonstration data (Mimic-generated motion trajectories in HDF5) into the LeRobot-compatible schema ([GR00T-Lerobot format guidelines](https://github.com/NVIDIA/Isaac-GR00T/blob/n1-release/getting_started/LeRobot_compatible_data_schema.md)).
+
+
+- Using a python interpreter or conda/virtual env that has Isaac Lab, GR00T and Eavluation Tasks installed, convert Mimic-generated trajectories by
+
+```bash
+# Example: Set `task_name` Based on Task
+# Nut Pouring
+export TASK_NAME="nutpouring"
+# Uncomment the below when Task is Exhaust Pipe Sorting
+# export TASK_NAME="pipesorting"
+
+# Within IsaacLabEvalTasks directory
+# DATASET_ROOT_DIR is directory of where Mimic-generated HDF5 is saved locally
+python scripts/convert_hdf5_to_lerobot.py --task_name $TASK_NAME --data_root $DATASET_ROOT_DIR
+```
+
+The GR00T-LeRobot-compatible datasets will be available in `DATASET_ROOT_DIR`.
+
+
+
+📂 PhysicalAI-GR00T-Tuned-Tasks
+├── exhaust_pipe_sorting_task.hdf5
+├── 📂 nut_pouring_task
+│ └── 📂 lerobot
+│ ├── 📂 data
+│ │ └── chunk-000
+│ ├── 📂 meta
+│ │ ├── episodes.jsonl
+│ │ ├── info.json
+│ │ ├── modality.json
+│ │ └── tasks.jsonl
+│ └── 📂videos
+│ └── chunk-000
+├── nut_pouring_task.hdf5
+└── README.md
+
+
+
+### Adapting to other embodiments & datasets
+
+During data collection, the lower body of the GR1 humanoid is fixed, and the upper body performs tabletop manipulation
+tasks. The ordered sets of joints observed in simulation ([i.e. robot states from Isaac Lab](scripts/config/gr1/state_joint_space.yaml)) and commanded in simulation ([i.e. robot actions from Isaac Lab](scripts/config/gr1/action_joint_space.yaml)) are included. During policy post training and inference, only non-mimic joints in the upper body, i.e. arms and hands, are captured by the policy's observations and predictions. The ordered set of joints observed and commanded in policy ([i.e. robot joints from GR00T N1](scripts/config/gr00t/gr00t_joint_space.yaml)) are specified for data conversion remapping.
+
+GR00T-Lerobot schema also requires [additional metadata](https://github.com/NVIDIA/Isaac-GR00T/blob/n1-release/getting_started/LeRobot_compatible_data_schema.md#meta). We include them ([info.json](scripts/config/gr00t/info.json), [modality.json](scripts/config/gr00t/info.json)) as templates to facilitate conversion. If you are working with other embodiments and data configurations, please modify them accordingly.
+
+If you are interested in leveraging this tool for other tasks, please change the task metadata in `EvalTaskConfig` defined in the [configuration](scripts/config/args.py). The `TASK_NAME` is associated with the pre-defined task description in [`Gr00tN1DatasetConfig`](scripts/config/args.py) class. The task_index indicates the index associated with language description, and 1 is reserved for data validity check, following GR00T-N1 guidelines. You may want to add other indices for your self-defined task. More manipulation tasks are coming soon!
+
+## Post Training
+
+We finetuned the pre-trained [GR00T-N1-2B policy](https://huggingface.co/nvidia/GR00T-N1-2B) on these two task-specific datasets. We provided the configurations with which we obtained the above checkpoints. With one node of H100s,
+
+```bash
+# Within IsaacLabEvalTasks directory
+cd submodules/Isaac-GR00T
+# Provide the directory where the GR00T-Lerobot data is stored as DATASET_PATH
+# Please use full path, instead of relative path
+# Nut pouring
+# E.g. export DATASET_PATH=/home/data/PhysicalAI-GR00T-Tuned-Tasks/nut_pouring_task/lerobot
+# Exhaust pipe sorting
+# E.g. export DATASET_PATH=/home/data/PhysicalAI-GR00T-Tuned-Tasks/Exhaust-Pipe-Sorting-task/lerobot
+python scripts/gr00t_finetune.py \
+ --dataset_path=${DATASET_PATH} \
+ --output_dir=${OUTPUT_DIR} \
+ --data_config=gr1_arms_only \
+ --batch_size=96 \
+ --max_steps=20000 \
+ --num_gpus=8 \
+ --save_steps=5000 \
+ --base_model_path=nvidia/GR00T-N1-2B \
+ --no_tune_llm \
+ --tune_visual \
+ --tune_projector \
+ --tune_diffusion_model \
+ --no-resume \
+ --dataloader_num_workers=16 \
+ --report_to=wandb \
+ --embodiment_tag=gr1
+```
+💡 **Tip:**
+
+1. Tuning with visual backend, action projector and diffusion model generally yields smaller trajectories errors (MSE), and higher closed-loop success rates.
+
+2. If you prefer tuning with less powerful GPUs, please follow the [reference guidelines](https://github.com/NVIDIA/Isaac-GR00T/tree/n1-release?tab=readme-ov-file#3-fine-tuning) about other finetuning options.
diff --git a/doc/troubleshooting.md b/doc/troubleshooting.md
new file mode 100644
index 0000000000000000000000000000000000000000..8982f6dcc84972b74eda73e066aa1c8747368f93
--- /dev/null
+++ b/doc/troubleshooting.md
@@ -0,0 +1,32 @@
+# Troubleshooting
+
+## Pip package version mismatch
+
+If you observe any of the following during [installation of GR00T](installation.md), you can ignore those errors.
+The GR00T policy runs on an older version of torch library with flash attention, and all other tools in this repository do not require
+torch>=2.7. Thus we downgrade the torch and related software to support GR00T inference. Mimic-related data generation workflows are not impacted.
+
+ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
+isaaclab 0.37.2 requires trimesh, which is not installed.
+dex-retargeting 0.4.6 requires lxml>=5.2.2, which is not installed.
+dex-retargeting 0.4.6 requires trimesh>=4.4.0, which is not installed.
+isaaclab-tasks 0.10.31 requires torch>=2.7, but you have torch 2.5.1 which is incompatible.
+isaacsim-kernel 5.0.0 requires wrapt==1.16.0, but you have wrapt 1.14.1 which is incompatible.
+isaaclab-rl 0.2.0 requires pillow==11.0.0, but you have pillow 11.2.1 which is incompatible.
+isaaclab-rl 0.2.0 requires torch>=2.7, but you have torch 2.5.1 which is incompatible.
+isaaclab 0.37.2 requires pillow==11.0.0, but you have pillow 11.2.1 which is incompatible.
+isaaclab 0.37.2 requires starlette==0.46.0, but you have starlette 0.45.3 which is incompatible.
+isaaclab 0.37.2 requires torch>=2.7, but you have torch 2.5.1 which is incompatible.
+isaacsim-core 5.0.0 requires torch==2.7.0, but you have torch 2.5.1 which is incompatible.
+
+
+## Running on DGX Spark and Blackwell GPUs
+
+Unfortunately, due to limited support of flash attention module (by May 2025), GR00T policy is not supported on DGX Spark and Blackwell GPUs. However you can run Mimic-related data generation workflows and GR00T-Lerobot data conversion on Blackwell. Blackwell support is coming soon.
+
+## Running evaluation on Multiple GPUs
+
+For rendering, please refer to the [Omniverse Developer Guideline](https://docs.omniverse.nvidia.com/dev-guide/latest/linux-troubleshooting.html#q9-how-to-specify-what-gpus-to-run-omniverse-apps-on) for setting single-gpu mode or multi-gpu mode of Isaac Sim. For physics, we suggest to the evaluation to run on CPU
+set by `simulation_device` in evaluation.
+
+However, GR00T N1 policy only supports single-GPU inference (by May 2025). We have not tested on multi-GPU inference.
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..eff23e40edf73fc06b34f2b1ceff5fa4b1748985
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,85 @@
+[build-system]
+requires = ["setuptools", "toml"]
+build-backend = "setuptools.build_meta"
+
+[tool.isort]
+
+atomic = true
+profile = "black"
+line_length = 120
+py_version = 310
+skip_glob = ["docs/*", "logs/*"]
+group_by_package = true
+
+sections = [
+ "FUTURE",
+ "STDLIB",
+ "THIRDPARTY",
+ "ISAACLABPARTY",
+ "FIRSTPARTY",
+ "LOCALFOLDER",
+]
+extra_standard_library = [
+ "numpy",
+ "h5py",
+ "open3d",
+ "torch",
+ "tensordict",
+ "bpy",
+ "matplotlib",
+ "gymnasium",
+ "gym",
+ "scipy",
+ "hid",
+ "yaml",
+ "prettytable",
+ "toml",
+ "trimesh",
+ "tqdm",
+ "psutil",
+]
+known_thirdparty = [
+ "isaacsim.core.api",
+ "omni.replicator.core",
+ "pxr",
+ "omni.kit.*",
+ "warp",
+ "carb",
+ "Semantics",
+]
+known_isaaclabparty = [
+ "isaaclab",
+ "isaaclab_tasks",
+ "isaaclab_assets",
+ "isaaclab_mimic",
+ "isaaclab_rl"
+]
+
+# Modify the following to include the package names of your first-party code
+known_firstparty = "isaaclab_eval_tasks"
+known_local_folder = "config"
+
+[tool.pyright]
+
+exclude = [
+ "**/__pycache__",
+ "**/docs",
+ "**/logs",
+ ".git",
+ ".vscode",
+]
+
+typeCheckingMode = "basic"
+pythonVersion = "3.10"
+pythonPlatform = "Linux"
+enableTypeIgnoreComments = true
+
+# This is required as the CI pre-commit does not download the module (i.e. numpy, torch, prettytable)
+# Therefore, we have to ignore missing imports
+reportMissingImports = "none"
+# This is required to ignore for type checks of modules with stubs missing.
+reportMissingModuleSource = "none" # -> most common: prettytable in mdp managers
+
+reportGeneralTypeIssues = "none" # -> raises 218 errors (usage of literal MISSING in dataclasses)
+reportOptionalMemberAccess = "warning" # -> raises 8 errors
+reportPrivateUsage = "warning"
diff --git a/scripts/closed_loop_policy.py b/scripts/closed_loop_policy.py
new file mode 100644
index 0000000000000000000000000000000000000000..250e9de37ef60089ebdf5a2d30a8cde4f9f51b1a
--- /dev/null
+++ b/scripts/closed_loop_policy.py
@@ -0,0 +1,46 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import random
+import torch
+
+from isaaclab_tasks.utils.parse_cfg import parse_env_cfg
+
+from config.args import Gr00tN1ClosedLoopArguments
+
+
+def create_sim_environment(args: Gr00tN1ClosedLoopArguments):
+ """
+ Creates a simulation environment based on the given arguments.
+
+ Args:
+ args (Gr00tN1ClosedLoopArguments): The arguments for the simulation environment.
+
+ Returns:
+ gym.Env: The created simulation environment.
+ """
+ env_name = args.task
+ env_cfg = parse_env_cfg(env_name, device=args.simulation_device, num_envs=args.num_envs)
+
+ # Disable all recorders
+ env_cfg.recorders = {}
+
+ # Set seed
+ torch.manual_seed(args.seed)
+ np.random.seed(args.seed)
+ random.seed(args.seed)
+
+ return env_cfg
diff --git a/scripts/config/__init__.py b/scripts/config/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3159bfe65645499015bd92609b99d476d69544e9
--- /dev/null
+++ b/scripts/config/__init__.py
@@ -0,0 +1,14 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/scripts/config/args.py b/scripts/config/args.py
new file mode 100644
index 0000000000000000000000000000000000000000..52e50fe0fe02ef2c2469f05f3f0940ed454ff0ac
--- /dev/null
+++ b/scripts/config/args.py
@@ -0,0 +1,374 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Optional
+
+
+class EvalTaskConfig(Enum):
+ NUTPOURING = (
+ "Isaac-NutPour-GR1T2-ClosedLoop-v0",
+ "/home/gr00t/GR00T-N1-2B-tuned-Nut-Pouring-task",
+ (
+ "Pick up the beaker and tilt it to pour out 1 metallic nut into the bowl. Pick up the bowl and place it on"
+ " the metallic measuring scale."
+ ),
+ "nut_pouring_task.hdf5",
+ 0 # 1 is reserved for data validity check, following GR00T-N1 guidelines.
+ )
+ PIPESORTING = (
+ "Isaac-ExhaustPipe-GR1T2-ClosedLoop-v0",
+ "/home/gr00t/GR00T-N1-2B-tuned-Exhaust-Pipe-Sorting-task",
+ "Pick up the blue pipe and place it into the blue bin.",
+ "exhaust_pipe_sorting_task.hdf5",
+ 2 # 1 is reserved for data validity check, following GR00T-N1 guidelines.
+ )
+ PICKPLACE_LARGE = (
+ "Isaac-PickPlace-Camera-G1-v0",
+ "~/IsaacLabEvalTasks/datasets/Isaac-PickPlace-Camera-G1-v0",
+ "Pick up the steering wheel and place it into the basket.",
+ "generated_dataset_pick_place_camera_g1.hdf5",
+ 3 # 1 is reserved for data validity check, following GR00T-N1 guidelines.
+ )
+ APPLE_LARGE = (
+ "Isaac-Apple-PickPlace-G1-v0",
+ "~/IsaacLabEvalTasks/datasets/Isaac-Apple-PickPlace-G1-v0",
+ "Pick up the apple and place it on the plate.",
+ "apple_pick_place_generated.hdf5",
+ 4
+ )
+ # Pick and place apple 5 teleop demonstrations
+ APPLE_5 = (
+ "Isaac-Apple-PickPlace-G1-v0",
+ "~/isaaclabevaltasks/datasets",
+ "Pick up the apple and place it on the plate.",
+ "apple_pick_place_annotated.hdf5",
+ 5
+ )
+ # Pick and place apple 20 generated demonstrations
+ APPLE_20 = ( # Specified by: --task_name apple_20
+ "Isaac-Apple-PickPlace-G1-v0",
+ "~/isaaclabevaltasks/datasets", # Specified by: --root_dir
+ "Pick up the apple and place it on the plate.",
+ "apple_pick_place_generated_small.hdf5",
+ 5
+ )
+ # === Define and new task here ===
+ STEERING_WHEEL = (
+ "Isaac-PickPlace-Camera-G1-Mimic-v0",
+ "~/isaaclab/datasets",
+ "Pick up the steering wheel and place it on the basket.",
+ "steering_wheel_generated.hdf5",
+ 6
+ )
+
+ def __init__(self, task: str, model_path: str, language_instruction: str, hdf5_name: str, task_index: int):
+ self.task = task
+ self.model_path = model_path
+ self.language_instruction = language_instruction
+ self.hdf5_name = hdf5_name
+ assert task_index != 1, "task_index must not be 1. (Use 0 for nutpouring, 2 for exhaustpipe, etc.)"
+ self.task_index = task_index
+
+@dataclass
+class Gr00tN1ClosedLoopArguments:
+ # Simulation specific parameters
+ headless: bool = field(
+ default=False, metadata={"description": "Whether to run the simulator in headless (no GUI) mode."}
+ )
+ num_envs: int = field(default=10, metadata={"description": "Number of environments to run in parallel."})
+ enable_pinocchio: bool = field(
+ default=True,
+ metadata={
+ "description": (
+ "Whether to use Pinocchio for physics simulation. Required for NutPouring and ExhaustPipe tasks."
+ )
+ },
+ )
+ record_camera: bool = field(
+ default=False,
+ metadata={"description": "Whether to record the camera images as videos during evaluation."},
+ )
+ record_video_output_path: str = field(
+ default="videos/",
+ metadata={"description": "Path to save the recorded videos."},
+ )
+
+ # model specific parameters
+ task_name: str = field(
+ default="nutpouring", metadata={"description": "Short name of the task to run (e.g., nutpouring, exhaustpipe)."}
+ )
+ task: str = field(default="", metadata={"description": "Full task name for the gym-registered environment."})
+ language_instruction: str = field(
+ default="", metadata={"description": "Instruction given to the policy in natural language."}
+ )
+ model_path: str = field(default="", metadata={"description": "Full path to the tuned model checkpoint directory."})
+ action_horizon: int = field(
+ default=16, metadata={"description": "Number of actions in the policy's predictionhorizon."}
+ )
+ embodiment_tag: str = field(
+ default="g1",
+ metadata={
+ "description": (
+ "Identifier for the robot embodiment used in the policy inference (e.g., 'g1' or 'new_embodiment')."
+ )
+ },
+ )
+ denoising_steps: int = field(
+ default=4, metadata={"description": "Number of denoising steps used in the policy inference."}
+ )
+ data_config: str = field(
+ default="g1", metadata={"description": "Name of the data configuration to use for the policy."}
+ )
+ original_image_size: tuple[int, int, int] = field(
+ default=(160, 256, 3), metadata={"description": "Original size of input images as (height, width, channels)."}
+ )
+ target_image_size: tuple[int, int, int] = field(
+ default=(256, 256, 3),
+ metadata={"description": "Target size for images after resizing and padding as (height, width, channels)."},
+ )
+ gr00t_joints_config_path: Path = field(
+ default=Path(__file__).parent.resolve() / "gr00t_g1" / "gr00t_joint_space.yaml",
+ metadata={"description": "Path to the YAML file specifying the joint ordering configuration for GR00T policy."},
+ )
+
+ # robot (G1) simulation specific parameters
+ action_joints_config_path: Path = field(
+ default=Path(__file__).parent.resolve() / "g1" / "action_joint_space.yaml",
+ metadata={
+ "description": (
+ "Path to the YAML file specifying the joint ordering configuration for G1 action space in Lab."
+ )
+ },
+ )
+ state_joints_config_path: Path = field(
+ default=Path(__file__).parent.resolve() / "g1" / "state_joint_space.yaml",
+ metadata={
+ "description": (
+ "Path to the YAML file specifying the joint ordering configuration for G1 state space in Lab."
+ )
+ },
+ )
+
+ # Default to GPU policy and CPU physics simulation
+ policy_device: str = field(
+ default="cuda", metadata={"description": "Device to run the policy model on (e.g., 'cuda' or 'cpu')."}
+ )
+ simulation_device: str = field(
+ default="cpu", metadata={"description": "Device to run the physics simulation on (e.g., 'cpu' or 'cuda')."}
+ )
+
+ # Evaluation parameters
+ max_num_rollouts: int = field(
+ default=100, metadata={"description": "Maximum number of rollouts to perform during evaluation."}
+ )
+ checkpoint_name: str = field(
+ default="gr00t-n1-2b-tuned", metadata={"description": "Name of the model checkpoint used for evaluation."}
+ )
+ eval_file_path: Optional[str] = field(
+ default=None, metadata={"description": "Path to the file where evaluation results will be saved."}
+ )
+
+ # Closed loop specific parameters
+ num_feedback_actions: int = field(
+ default=16,
+ metadata={
+ "description": "Number of feedback actions to execute per rollout (can be less than action_horizon)."
+ },
+ )
+ rollout_length: int = field(default=30, metadata={"description": "Number of steps in each rollout episode."})
+ seed: int = field(default=10, metadata={"description": "Random seed for reproducibility."})
+
+ def __post_init__(self):
+ # Populate fields from enum based on task_name
+ if self.task_name.upper() not in EvalTaskConfig.__members__:
+ raise ValueError(f"task_name must be one of: {', '.join(EvalTaskConfig.__members__.keys())}")
+ config = EvalTaskConfig[self.task_name.upper()]
+ if self.task == "":
+ self.task = config.task
+ if self.model_path == "":
+ self.model_path = config.model_path
+ if self.language_instruction == "":
+ self.language_instruction = config.language_instruction
+ # If model path is relative, return error
+ if not os.path.isabs(self.model_path):
+ raise ValueError("model_path must be an absolute path. Do not use relative paths.")
+ assert (
+ self.num_feedback_actions <= self.action_horizon
+ ), "num_feedback_actions must be less than or equal to action_horizon"
+ # assert all paths exist
+ assert Path(self.gr00t_joints_config_path).exists(), "gr00t_joints_config_path does not exist"
+ assert Path(self.action_joints_config_path).exists(), "action_joints_config_path does not exist"
+ assert Path(self.state_joints_config_path).exists(), "state_joints_config_path does not exist"
+ assert Path(self.model_path).exists(), "model_path does not exist."
+ # embodiment_tag
+ assert self.embodiment_tag in [
+ "g1",
+ "new_embodiment",
+ ], "embodiment_tag must be one of the following: " + ", ".join(["g1", "new_embodiment"])
+
+
+@dataclass
+class Gr00tN1DatasetConfig:
+ # Datasets & task specific parameters
+ data_root: Path = field(
+ default=Path("/mnt/datab/PhysicalAI-GR00T-Tuned-Tasks"),
+ metadata={"description": "Root directory for all data storage."},
+ )
+ task_name: str = field(
+ default="nutpouring", metadata={"description": "Short name of the task to run (e.g., nutpouring, exhaustpipe)."}
+ )
+ language_instruction: str = field(
+ default="", metadata={"description": "Instruction given to the policy in natural language."}
+ )
+ hdf5_name: str = field(default="", metadata={"description": "Name of the HDF5 file to use for the dataset."})
+
+ # Mimic-generated HDF5 datafield
+ state_name_sim: str = field(
+ default="robot_joint_pos", metadata={"description": "Name of the state in the HDF5 file."}
+ )
+ action_name_sim: str = field(
+ default="processed_actions", metadata={"description": "Name of the action in the HDF5 file."}
+ )
+ pov_cam_name_sim: str = field(
+ default="robot_pov_cam", metadata={"description": "Name of the POV camera in the HDF5 file."}
+ )
+ # Gr00t-LeRobot datafield
+ state_name_lerobot: str = field(
+ default="observation.state", metadata={"description": "Name of the state in the LeRobot file."}
+ )
+ action_name_lerobot: str = field(
+ default="action", metadata={"description": "Name of the action in the LeRobot file."}
+ )
+ video_name_lerobot: str = field(
+ default="observation.images.ego_view", metadata={"description": "Name of the video in the LeRobot file."}
+ )
+ task_description_lerobot: str = field(
+ default="annotation.human.action.task_description",
+ metadata={"description": "Name of the task description in the LeRobot file."},
+ )
+ valid_lerobot: str = field(
+ default="annotation.human.action.valid", metadata={"description": "Name of the validity in the LeRobot file."}
+ )
+
+ # Parquet
+ chunks_size: int = field(default=1000, metadata={"description": "Number of episodes per data chunk."})
+ # mp4 video
+ fps: int = field(default=20, metadata={"description": "Frames per second for video recording."})
+ # Metadata files
+ data_path: str = field(
+ default="data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
+ metadata={"description": "Template path for storing episode data files."},
+ )
+ video_path: str = field(
+ default="videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
+ metadata={"description": "Template path for storing episode video files."},
+ )
+ modality_template_path: Path = field(
+ default=Path(__file__).parent.resolve() / "gr00t_g1" / "modality.json",
+ metadata={"description": "Path to the modality template JSON file."},
+ )
+ modality_fname: str = field(
+ default="modality.json", metadata={"description": "Filename for the modality JSON file."}
+ )
+ episodes_fname: str = field(
+ default="episodes.jsonl", metadata={"description": "Filename for the episodes JSONL file."}
+ )
+ tasks_fname: str = field(default="tasks.jsonl", metadata={"description": "Filename for the tasks JSONL file."})
+ info_template_path: Path = field(
+ default=Path(__file__).parent.resolve() / "gr00t_g1" / "info.json",
+ metadata={"description": "Path to the info template JSON file."},
+ )
+ info_fname: str = field(default="info.json", metadata={"description": "Filename for the info JSON file."})
+ # GR00T policy specific parameters
+ gr00t_joints_config_path: Path = field(
+ default=Path(__file__).parent.resolve() / "gr00t_g1" / "gr00t_joint_space.yaml",
+ metadata={"description": "Path to the YAML file specifying the joint ordering configuration for GR00T policy."},
+ )
+ robot_type: str = field(
+ default="g1", metadata={"description": "Type of robot embodiment used in the policy fine-tuning."}
+ )
+ # robot (G1) simulation specific parameters
+ action_joints_config_path: Path = field(
+ default=Path(__file__).parent.resolve() / "g1" / "action_joint_space.yaml",
+ metadata={
+ "description": (
+ "Path to the YAML file specifying the joint ordering configuration for G1 action space in Lab."
+ )
+ },
+ )
+ state_joints_config_path: Path = field(
+ default=Path(__file__).parent.resolve() / "g1" / "state_joint_space.yaml",
+ metadata={
+ "description": (
+ "Path to the YAML file specifying the joint ordering configuration for G1 state space in Lab."
+ )
+ },
+ )
+ original_image_size: tuple[int, int, int] = field(
+ default=(160, 256, 3), metadata={"description": "Original size of input images as (height, width, channels)."}
+ )
+ target_image_size: tuple[int, int, int] = field(
+ default=(256, 256, 3), metadata={"description": "Target size for images after resizing and padding."}
+ )
+
+ hdf5_file_path: Path = field(init=False)
+ lerobot_data_dir: Path = field(init=False)
+ task_index: int = field(init=False) # task index for the task description in LeRobot file
+
+ def __post_init__(self):
+
+ # Populate fields from enum based on task_name
+ if self.task_name.upper() not in EvalTaskConfig.__members__:
+ raise ValueError(f"task_name must be one of: {', '.join(EvalTaskConfig.__members__.keys())}")
+ config = EvalTaskConfig[self.task_name.upper()]
+ self.language_instruction = config.language_instruction
+ self.hdf5_name = config.hdf5_name
+ self.task_index = config.task_index
+
+ self.hdf5_file_path = self.data_root / self.hdf5_name
+ self.lerobot_data_dir = self.data_root / self.hdf5_name.replace(".hdf5", "") / "lerobot"
+
+ # Assert all paths exist
+ assert self.hdf5_file_path.exists(), "hdf5_file_path does not exist"
+ assert Path(self.gr00t_joints_config_path).exists(), "gr00t_joints_config_path does not exist"
+ assert Path(self.action_joints_config_path).exists(), "action_joints_config_path does not exist"
+ assert Path(self.state_joints_config_path).exists(), "state_joints_config_path does not exist"
+ assert Path(self.info_template_path).exists(), "info_template_path does not exist"
+ assert Path(self.modality_template_path).exists(), "modality_template_path does not exist"
+ # if lerobot_data_dir not empty, throw a warning and remove
+ if self.lerobot_data_dir.exists():
+ print(f"Warning: lerobot_data_dir {self.lerobot_data_dir} already exists. Removing it.")
+ # remove directory contents and the directory itself using shutil
+ shutil.rmtree(self.lerobot_data_dir)
+ # Prepare data keys for mimic-generated hdf5 file
+ self.hdf5_keys = {
+ "state": self.state_name_sim,
+ "action": self.action_name_sim,
+ }
+ # Prepare data keys for LeRobot file
+ self.lerobot_keys = {
+ "state": self.state_name_lerobot,
+ "action": self.action_name_lerobot,
+ "video": self.video_name_lerobot,
+ "annotation": (
+ self.task_description_lerobot,
+ self.valid_lerobot,
+ ),
+ }
diff --git a/scripts/config/g1/action_joint_space.yaml b/scripts/config/g1/action_joint_space.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ba0865e3b67ff64dc837be0338ecd80eceddae22
--- /dev/null
+++ b/scripts/config/g1/action_joint_space.yaml
@@ -0,0 +1,40 @@
+# G1 Robot Joint Configuration used in the action joint space
+# Maps joint names to joint indices for easy reference
+joints:
+ # torso and arm joints (pink controller)
+ "waist_yaw_joint": 0
+ "waist_roll_joint": 1
+ "waist_pitch_joint": 2
+ "left_shoulder_pitch_joint": 3
+ "right_shoulder_pitch_joint": 4
+ "left_shoulder_roll_joint": 5
+ "right_shoulder_roll_joint": 6
+ "left_shoulder_yaw_joint": 7
+ "right_shoulder_yaw_joint": 8
+ "left_elbow_joint": 9
+ "right_elbow_joint": 10
+ "left_wrist_roll_joint": 11
+ "right_wrist_roll_joint": 12
+ "left_wrist_pitch_joint": 13
+ "right_wrist_pitch_joint": 14
+ "left_wrist_yaw_joint": 15
+ "right_wrist_yaw_joint": 16
+
+ # hand joints
+ "left_hand_index_0_joint": 17
+ "left_hand_middle_0_joint": 18
+ "left_hand_thumb_0_joint": 19
+ "right_hand_index_0_joint": 20
+ "right_hand_middle_0_joint": 21
+ "right_hand_thumb_0_joint": 22
+ "left_hand_index_1_joint": 23
+ "left_hand_middle_1_joint": 24
+ "left_hand_thumb_1_joint": 25
+ "right_hand_index_1_joint": 26
+ "right_hand_middle_1_joint": 27
+ "right_hand_thumb_1_joint": 28
+ "left_hand_thumb_2_joint": 29
+ "right_hand_thumb_2_joint": 30
+
+# Additional metadata
+total_joints: 31
diff --git a/scripts/config/g1/state_joint_space.yaml b/scripts/config/g1/state_joint_space.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1373832a988765426ed522f517517ddd0fb0f042
--- /dev/null
+++ b/scripts/config/g1/state_joint_space.yaml
@@ -0,0 +1,48 @@
+# G1 Robot Joint Configuration used in the action joint space
+# Maps joint names to joint indices for easy reference
+joints:
+ 'left_hip_pitch_joint': 0
+ 'right_hip_pitch_joint': 1
+ 'waist_yaw_joint': 2
+ 'left_hip_roll_joint': 3
+ 'right_hip_roll_joint': 4
+ 'waist_roll_joint': 5
+ 'left_hip_yaw_joint': 6
+ 'right_hip_yaw_joint': 7
+ 'waist_pitch_joint': 8
+ 'left_knee_joint': 9
+ 'right_knee_joint': 10
+ 'left_shoulder_pitch_joint': 11
+ 'right_shoulder_pitch_joint': 12
+ 'left_ankle_pitch_joint': 13
+ 'right_ankle_pitch_joint': 14
+ 'left_shoulder_roll_joint': 15
+ 'right_shoulder_roll_joint': 16
+ 'left_ankle_roll_joint': 17
+ 'right_ankle_roll_joint': 18
+ 'left_shoulder_yaw_joint': 19
+ 'right_shoulder_yaw_joint': 20
+ 'left_elbow_joint': 21
+ 'right_elbow_joint': 22
+ 'left_wrist_roll_joint': 23
+ 'right_wrist_roll_joint': 24
+ 'left_wrist_pitch_joint': 25
+ 'right_wrist_pitch_joint': 26
+ 'left_wrist_yaw_joint': 27
+ 'right_wrist_yaw_joint': 28
+ 'left_hand_index_0_joint': 29
+ 'left_hand_middle_0_joint': 30
+ 'left_hand_thumb_0_joint': 31
+ 'right_hand_index_0_joint': 32
+ 'right_hand_middle_0_joint': 33
+ 'right_hand_thumb_0_joint': 34
+ 'left_hand_index_1_joint': 35
+ 'left_hand_middle_1_joint': 36
+ 'left_hand_thumb_1_joint': 37
+ 'right_hand_index_1_joint': 38
+ 'right_hand_middle_1_joint': 39
+ 'right_hand_thumb_1_joint': 40
+ 'left_hand_thumb_2_joint': 41
+ 'right_hand_thumb_2_joint': 42
+
+total_joints: 43
diff --git a/scripts/config/gr00t/gr00t_joint_space.yaml b/scripts/config/gr00t/gr00t_joint_space.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..14cc3fb61296f562e855721ed59352817cc1168e
--- /dev/null
+++ b/scripts/config/gr00t/gr00t_joint_space.yaml
@@ -0,0 +1,44 @@
+# Expected input & output joint names following listed order for groot state/action space
+joints:
+ left_arm:
+ - left_shoulder_pitch_joint
+ - left_shoulder_roll_joint
+ - left_shoulder_yaw_joint
+ - left_elbow_pitch_joint
+ - left_wrist_yaw_joint
+ - left_wrist_roll_joint
+ - left_wrist_pitch_joint
+ right_arm:
+ - right_shoulder_pitch_joint
+ - right_shoulder_roll_joint
+ - right_shoulder_yaw_joint
+ - right_elbow_pitch_joint
+ - right_wrist_yaw_joint
+ - right_wrist_roll_joint
+ - right_wrist_pitch_joint
+ left_hand:
+ - L_pinky_proximal_joint
+ - L_ring_proximal_joint
+ - L_middle_proximal_joint
+ - L_index_proximal_joint
+ - L_thumb_proximal_yaw_joint
+ - L_thumb_proximal_pitch_joint
+ # Mimic joints
+ # - L_index_intermediate_joint
+ # - L_middle_intermediate_joint
+ # - L_ring_intermediate_joint
+ # - L_pinky_intermediate_joint
+ # - L_thumb_distal_joint
+ right_hand:
+ - R_pinky_proximal_joint
+ - R_ring_proximal_joint
+ - R_middle_proximal_joint
+ - R_index_proximal_joint
+ - R_thumb_proximal_yaw_joint
+ - R_thumb_proximal_pitch_joint
+ # Mimic joints
+ # - R_index_intermediate_joint
+ # - R_middle_intermediate_joint
+ # - R_ring_intermediate_joint
+ # - R_pinky_intermediate_joint
+ # - R_thumb_distal_joint
diff --git a/scripts/config/gr00t/info.json b/scripts/config/gr00t/info.json
new file mode 100644
index 0000000000000000000000000000000000000000..9778e5408be0c61c7dc1c9ae5f84f18497e3131b
--- /dev/null
+++ b/scripts/config/gr00t/info.json
@@ -0,0 +1,17 @@
+{
+ "codebase_version": "v2.0",
+ "robot_type": null,
+ "total_episodes": null,
+ "total_frames": null,
+ "total_tasks": null,
+ "total_videos": null,
+ "total_chunks": null,
+ "chunks_size": 1000,
+ "fps": null,
+ "splits": {
+ "train": "0:100"
+ },
+ "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
+ "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
+ "features": null
+}
diff --git a/scripts/config/gr00t/modality.json b/scripts/config/gr00t/modality.json
new file mode 100644
index 0000000000000000000000000000000000000000..d01a41bc4f09148354bb5f50331c3b2fd68ad32b
--- /dev/null
+++ b/scripts/config/gr00t/modality.json
@@ -0,0 +1,53 @@
+{
+ "state": {
+ "left_arm": {
+ "original_key": "observation.state",
+ "start": 0,
+ "end": 7
+ },
+ "right_arm": {
+ "original_key": "observation.state",
+ "start": 7,
+ "end": 14
+ },
+ "left_hand": {
+ "original_key": "observation.state",
+ "start": 14,
+ "end": 20
+ },
+ "right_hand": {
+ "original_key": "observation.state",
+ "start": 20,
+ "end": 26
+ }
+ },
+ "action": {
+ "left_arm": {
+ "start": 0,
+ "end": 7
+ },
+ "right_arm": {
+ "start": 7,
+ "end": 14
+ },
+ "left_hand": {
+ "start": 14,
+ "end": 20
+ },
+ "right_hand": {
+ "start": 20,
+ "end": 26
+ }
+ },
+ "video": {
+ "ego_view": {
+ "original_key": "observation.images.ego_view"
+ }
+ },
+ "annotation": {
+ "human.action.task_description": {
+ },
+ "human.validity": {
+ }
+ }
+}
diff --git a/scripts/config/gr00t_g1/gr00t_joint_space.yaml b/scripts/config/gr00t_g1/gr00t_joint_space.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a8be7253fc1fa8ca642fdc34c64ab2ca11bf7047
--- /dev/null
+++ b/scripts/config/gr00t_g1/gr00t_joint_space.yaml
@@ -0,0 +1,34 @@
+# Expected input & output joint names following listed order for groot state/action space
+joints:
+ left_arm:
+ - left_shoulder_pitch_joint
+ - left_shoulder_roll_joint
+ - left_shoulder_yaw_joint
+ - left_elbow_joint
+ - left_wrist_roll_joint
+ - left_wrist_pitch_joint
+ - left_wrist_yaw_joint
+ right_arm:
+ - right_shoulder_pitch_joint
+ - right_shoulder_roll_joint
+ - right_shoulder_yaw_joint
+ - right_elbow_joint
+ - right_wrist_roll_joint
+ - right_wrist_pitch_joint
+ - right_wrist_yaw_joint
+ left_hand:
+ - left_hand_index_0_joint
+ - left_hand_middle_0_joint
+ - left_hand_thumb_0_joint
+ - left_hand_index_1_joint
+ - left_hand_middle_1_joint
+ - left_hand_thumb_1_joint
+ - left_hand_thumb_2_joint
+ right_hand:
+ - right_hand_index_0_joint
+ - right_hand_middle_0_joint
+ - right_hand_thumb_0_joint
+ - right_hand_index_1_joint
+ - right_hand_middle_1_joint
+ - right_hand_thumb_1_joint
+ - right_hand_thumb_2_joint
\ No newline at end of file
diff --git a/scripts/config/gr00t_g1/info.json b/scripts/config/gr00t_g1/info.json
new file mode 100644
index 0000000000000000000000000000000000000000..9778e5408be0c61c7dc1c9ae5f84f18497e3131b
--- /dev/null
+++ b/scripts/config/gr00t_g1/info.json
@@ -0,0 +1,17 @@
+{
+ "codebase_version": "v2.0",
+ "robot_type": null,
+ "total_episodes": null,
+ "total_frames": null,
+ "total_tasks": null,
+ "total_videos": null,
+ "total_chunks": null,
+ "chunks_size": 1000,
+ "fps": null,
+ "splits": {
+ "train": "0:100"
+ },
+ "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
+ "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
+ "features": null
+}
diff --git a/scripts/config/gr00t_g1/modality.json b/scripts/config/gr00t_g1/modality.json
new file mode 100644
index 0000000000000000000000000000000000000000..ed6e415d0ae74aa65d3283ee1f64b0f1f3651fbb
--- /dev/null
+++ b/scripts/config/gr00t_g1/modality.json
@@ -0,0 +1,51 @@
+{
+ "state": {
+ "left_arm": {
+ "original_key": "observation.state",
+ "start": 0,
+ "end": 7
+ },
+ "right_arm": {
+ "original_key": "observation.state",
+ "start": 7,
+ "end": 14
+ },
+ "left_hand": {
+ "original_key": "observation.state",
+ "start": 14,
+ "end": 21
+ },
+ "right_hand": {
+ "original_key": "observation.state",
+ "start": 21,
+ "end": 28
+ }
+ },
+ "action": {
+ "left_arm": {
+ "start": 0,
+ "end": 7
+ },
+ "right_arm": {
+ "start": 7,
+ "end": 14
+ },
+ "left_hand": {
+ "start": 14,
+ "end": 21
+ },
+ "right_hand": {
+ "start": 21,
+ "end": 28
+ }
+ },
+ "video": {
+ "ego_view": {
+ "original_key": "observation.images.ego_view"
+ }
+ },
+ "annotation": {
+ "human.action.task_description": {},
+ "human.validity": {}
+ }
+}
\ No newline at end of file
diff --git a/scripts/config/gr1/action_joint_space.yaml b/scripts/config/gr1/action_joint_space.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..890707b187c2dd47ceff7b7c68befb0f4116186b
--- /dev/null
+++ b/scripts/config/gr1/action_joint_space.yaml
@@ -0,0 +1,45 @@
+# GR1 Robot Joint Configuration used in the action joint space
+# Maps joint names to joint indices for easy reference
+joints:
+ # arm joints
+ "left_shoulder_pitch_joint": 0
+ "right_shoulder_pitch_joint": 1
+ "left_shoulder_roll_joint": 2
+ "right_shoulder_roll_joint": 3
+ "left_shoulder_yaw_joint": 4
+ "right_shoulder_yaw_joint": 5
+ "left_elbow_pitch_joint": 6
+ "right_elbow_pitch_joint": 7
+ "left_wrist_yaw_joint": 8
+ "right_wrist_yaw_joint": 9
+ "left_wrist_roll_joint": 10
+ "right_wrist_roll_joint": 11
+ "left_wrist_pitch_joint": 12
+ "right_wrist_pitch_joint": 13
+
+ # hand joints
+ "L_index_proximal_joint": 14
+ "L_middle_proximal_joint": 15
+ "L_pinky_proximal_joint": 16
+ "L_ring_proximal_joint": 17
+ "L_thumb_proximal_yaw_joint": 18
+ "R_index_proximal_joint": 19
+ "R_middle_proximal_joint": 20
+ "R_pinky_proximal_joint": 21
+ "R_ring_proximal_joint": 22
+ "R_thumb_proximal_yaw_joint": 23
+ "L_index_intermediate_joint": 24
+ "L_middle_intermediate_joint": 25
+ "L_pinky_intermediate_joint": 26
+ "L_ring_intermediate_joint": 27
+ "L_thumb_proximal_pitch_joint": 28
+ "R_index_intermediate_joint": 29
+ "R_middle_intermediate_joint": 30
+ "R_pinky_intermediate_joint": 31
+ "R_ring_intermediate_joint": 32
+ "R_thumb_proximal_pitch_joint": 33
+ "L_thumb_distal_joint": 34
+ "R_thumb_distal_joint": 35
+
+# Additional metadata
+total_joints: 36
diff --git a/scripts/config/gr1/state_joint_space.yaml b/scripts/config/gr1/state_joint_space.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d40529b5525ce62faf80f3232ec192b2af767aea
--- /dev/null
+++ b/scripts/config/gr1/state_joint_space.yaml
@@ -0,0 +1,59 @@
+# GR1 Robot Joint Configuration used in the action joint space
+# Maps joint names to joint indices for easy reference
+joints:
+ 'left_hip_roll_joint': 0
+ 'right_hip_roll_joint': 1
+ 'waist_yaw_joint': 2
+ 'left_hip_yaw_joint': 3
+ 'right_hip_yaw_joint': 4
+ 'waist_pitch_joint': 5
+ 'left_hip_pitch_joint': 6
+ 'right_hip_pitch_joint': 7
+ 'waist_roll_joint': 8
+ 'left_knee_pitch_joint': 9
+ 'right_knee_pitch_joint': 10
+ 'head_roll_joint': 11
+ 'left_shoulder_pitch_joint': 12
+ 'right_shoulder_pitch_joint': 13
+ 'left_ankle_pitch_joint': 14
+ 'right_ankle_pitch_joint': 15
+ 'head_pitch_joint': 16
+ 'left_shoulder_roll_joint': 17
+ 'right_shoulder_roll_joint': 18
+ 'left_ankle_roll_joint': 19
+ 'right_ankle_roll_joint': 20
+ 'head_yaw_joint': 21
+ 'left_shoulder_yaw_joint': 22
+ 'right_shoulder_yaw_joint': 23
+ 'left_elbow_pitch_joint': 24
+ 'right_elbow_pitch_joint': 25
+ 'left_wrist_yaw_joint': 26
+ 'right_wrist_yaw_joint': 27
+ 'left_wrist_roll_joint': 28
+ 'right_wrist_roll_joint': 29
+ 'left_wrist_pitch_joint': 30
+ 'right_wrist_pitch_joint': 31
+ 'L_index_proximal_joint': 32
+ 'L_middle_proximal_joint': 33
+ 'L_pinky_proximal_joint': 34
+ 'L_ring_proximal_joint': 35
+ 'L_thumb_proximal_yaw_joint': 36
+ 'R_index_proximal_joint': 37
+ 'R_middle_proximal_joint': 38
+ 'R_pinky_proximal_joint': 39
+ 'R_ring_proximal_joint': 40
+ 'R_thumb_proximal_yaw_joint': 41
+ 'L_index_intermediate_joint': 42
+ 'L_middle_intermediate_joint': 43
+ 'L_pinky_intermediate_joint': 44
+ 'L_ring_intermediate_joint': 45
+ 'L_thumb_proximal_pitch_joint': 46
+ 'R_index_intermediate_joint': 47
+ 'R_middle_intermediate_joint': 48
+ 'R_pinky_intermediate_joint': 49
+ 'R_ring_intermediate_joint': 50
+ 'R_thumb_proximal_pitch_joint': 51
+ 'L_thumb_distal_joint': 52
+ 'R_thumb_distal_joint': 53
+
+total_joints: 54
diff --git a/scripts/convert_hdf5_to_lerobot.py b/scripts/convert_hdf5_to_lerobot.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0140387bc8b9ec23309a1347fd03ea4fc2184ed
--- /dev/null
+++ b/scripts/convert_hdf5_to_lerobot.py
@@ -0,0 +1,494 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import h5py
+import json
+import multiprocessing as mp
+import numpy as np
+import shutil
+import subprocess
+import traceback
+from pathlib import Path
+from tqdm import tqdm
+from typing import Any, Dict
+
+import pandas as pd
+import torchvision
+import tyro
+from io_utils import dump_json, dump_jsonl, load_gr1_joints_config, load_json
+from policies.image_conversion import resize_frames_with_padding
+from policies.joints_conversion import remap_sim_joints_to_policy_joints
+from robot_joints import JointsAbsPosition
+
+from config.args import Gr00tN1DatasetConfig
+
+import sys
+
+
+def _video_metadata_from_config(config: Gr00tN1DatasetConfig) -> Dict[str, Any]:
+ height, width, channels = config.target_image_size
+ return {
+ "dtype": "video",
+ "shape": [int(height), int(width), int(channels)],
+ "names": ["height", "width", "channel"],
+ "video_info": {
+ "video.fps": float(config.fps),
+ "video.codec": "h264",
+ # torchvision/ffmpeg backend may choose pix_fmt internally; keep best-effort.
+ "video.pix_fmt": None,
+ "video.is_depth_map": False,
+ "has_audio": False,
+ },
+ }
+
+
+def get_video_metadata(video_path: str | Path, config: Gr00tN1DatasetConfig) -> Dict[str, Any]:
+ """
+ Get video metadata in the specified format.
+
+ Args:
+ video_path: Path to the video file.
+
+ Returns:
+ Video metadata including shape, names, and video_info.
+ """
+ # Prefer ffprobe when available, but fall back to config-derived metadata.
+ # This makes the script robust on minimal AWS instances where ffprobe isn't installed.
+ if shutil.which("ffprobe") is None:
+ return _video_metadata_from_config(config)
+
+ cmd = [
+ "ffprobe",
+ "-v",
+ "error",
+ "-select_streams",
+ "v:0",
+ "-show_entries",
+ "stream=height,width,codec_name,pix_fmt,r_frame_rate",
+ "-of",
+ "json",
+ video_path,
+ ]
+
+ try:
+ output = subprocess.check_output(cmd).decode("utf-8")
+ probe_data = json.loads(output)
+ stream = probe_data["streams"][0]
+
+ # Parse frame rate (comes as fraction like "15/1")
+ num, den = map(int, stream["r_frame_rate"].split("/"))
+ fps = num / den
+
+ metadata = {
+ "dtype": "video",
+ "shape": [stream["height"], stream["width"], 3], # Assuming 3 channels
+ "names": ["height", "width", "channel"],
+ "video_info": {
+ "video.fps": fps,
+ "video.codec": stream["codec_name"],
+ "video.pix_fmt": stream["pix_fmt"],
+ "video.is_depth_map": False,
+ "has_audio": False,
+ },
+ }
+
+ return metadata
+
+ except subprocess.CalledProcessError as e:
+ print(f"Error running ffprobe: {e}")
+ return _video_metadata_from_config(config)
+ except FileNotFoundError:
+ return _video_metadata_from_config(config)
+ except json.JSONDecodeError as e:
+ print(f"Error parsing ffprobe output: {e}")
+ return _video_metadata_from_config(config)
+
+
+def get_feature_info(
+ step_data: pd.DataFrame, video_paths: Dict[str, str], config: Gr00tN1DatasetConfig
+) -> Dict[str, Any]:
+ """
+ Get feature info from each frame of the video.
+
+ Args:
+ step_data: DataFrame containing data of an episode.
+ video_paths: Dictionary mapping video keys to their file paths.
+ config: Configuration object containing dataset and path information.
+ Returns:
+ Dictionary containing feature information for each column and video.
+ """
+ features = {}
+ for video_key, video_path in video_paths.items():
+ video_metadata = get_video_metadata(video_path, config)
+ features[video_key] = video_metadata
+ assert isinstance(step_data, pd.DataFrame)
+ for column in step_data.columns:
+ column_data = np.stack(step_data[column], axis=0)
+ shape = column_data.shape
+ if len(shape) == 1:
+ shape = (1,)
+ else:
+ shape = shape[1:]
+ features[column] = {
+ "dtype": column_data.dtype.name,
+ "shape": shape,
+ }
+ # State & action
+ if column in [config.lerobot_keys["state"], config.lerobot_keys["action"]]:
+ dof = column_data.shape[1]
+ features[column]["names"] = [f"motor_{i}" for i in range(dof)]
+
+ return features
+
+
+def generate_info(
+ total_episodes: int,
+ total_frames: int,
+ total_tasks: int,
+ total_videos: int,
+ total_chunks: int,
+ config: Gr00tN1DatasetConfig,
+ step_data: pd.DataFrame,
+ video_paths: Dict[str, str],
+) -> Dict[str, Any]:
+ """
+ Generate the info.json data field.
+
+ Args:
+ total_episodes: Total number of episodes in the dataset.
+ total_frames: Total number of frames across all episodes.
+ total_tasks: Total number of tasks in the dataset.
+ total_videos: Total number of videos in the dataset.
+ total_chunks: Total number of data chunks.
+ config: Configuration object containing dataset and path information.
+ step_data: DataFrame containing step data for an example episode.
+ video_paths: Dictionary mapping video keys to their file paths.
+
+ Returns:
+ Dictionary containing the info.json data field.
+ """
+
+ info_template = load_json(config.info_template_path)
+
+ info_template["robot_type"] = config.robot_type
+ info_template["total_episodes"] = total_episodes
+ info_template["total_frames"] = total_frames
+ info_template["total_tasks"] = total_tasks
+ info_template["total_videos"] = total_videos
+ info_template["total_chunks"] = total_chunks
+ info_template["chunks_size"] = config.chunks_size
+ info_template["fps"] = config.fps
+
+ info_template["data_path"] = config.data_path
+ info_template["video_path"] = config.video_path
+
+ features = get_feature_info(step_data, video_paths, config)
+
+ info_template["features"] = features
+ return info_template
+
+
+def write_video_job(queue: mp.Queue, error_queue: mp.Queue, config: Gr00tN1DatasetConfig) -> None:
+ """
+ Write frames to videos in mp4 format.
+
+ Args:
+ queue: Multiprocessing queue containing video frame data to be written.
+ error_queue: Multiprocessing queue for reporting errors from worker processes.
+ config: Configuration object containing dataset and path information.
+
+ Returns:
+ None
+ """
+ while True:
+ job = queue.get()
+ if job is None:
+ break
+ try:
+ video_path, frames, fps, video_type = job
+ if video_type == "image":
+ # Create parent directory if it doesn't exist
+ video_path.parent.mkdir(parents=True, exist_ok=True)
+ assert frames.shape[1:] == config.original_image_size, f"Frames shape is {frames.shape}"
+ frames = resize_frames_with_padding(
+ frames, target_image_size=config.target_image_size, bgr_conversion=False, pad_img=True
+ )
+ # h264 codec encoding
+ torchvision.io.write_video(video_path, frames, fps, video_codec="h264")
+
+ except Exception as e:
+ # Get the traceback
+ error_queue.put(f"Error in process: {e}\n{traceback.format_exc()}")
+
+
+def convert_trajectory_to_df(
+ trajectory: h5py.Dataset,
+ episode_index: int,
+ index_start: int,
+ config: Gr00tN1DatasetConfig,
+) -> Dict[str, Any]:
+ """
+ Convert a single trajectory from HDF5 to a pandas DataFrame.
+
+ Args:
+ trajectory: HDF5 dataset containing trajectory data.
+ episode_index: Index of the current episode.
+ index_start: Starting index for the episode.
+ config: Configuration object containing dataset and path information.
+
+ Returns:
+ Dictionary containing the DataFrame, annotation set, and episode length.
+ """
+
+ return_dict = {}
+ data = {}
+
+ gr00t_modality_config = load_json(config.modality_template_path)
+
+ gr00t_joints_config = load_gr1_joints_config(config.gr00t_joints_config_path)
+ action_joints_config = load_gr1_joints_config(config.action_joints_config_path)
+ state_joints_config = load_gr1_joints_config(config.state_joints_config_path)
+
+ # 1. Get state, action, and timestamp
+ length = None
+ for key, hdf5_key_name in config.hdf5_keys.items():
+ assert key in ["state", "action"]
+ lerobot_key_name = config.lerobot_keys[key]
+ if key == "state":
+ joints = trajectory["obs"][hdf5_key_name]
+ else:
+ joints = trajectory[hdf5_key_name]
+ joints = joints.astype(np.float64)
+
+ if key == "state":
+ # remove the last obs
+ joints = joints[:-1]
+ input_joints_config = state_joints_config
+ elif key == "action":
+ # remove the last idle action
+ joints = joints[:-1]
+ input_joints_config = action_joints_config
+ else:
+ raise ValueError(f"Unknown key: {key}")
+ assert joints.ndim == 2
+ assert joints.shape[1] == len(input_joints_config)
+
+ # 1.1. Remap the joints to the LeRobot joint orders
+ joints = JointsAbsPosition.from_array(joints, input_joints_config, device="cpu")
+ remapped_joints = remap_sim_joints_to_policy_joints(joints, gr00t_joints_config)
+ # 1.2. Fill in the missing joints with zeros
+ ordered_joints = []
+ for joint_group in gr00t_modality_config[key].keys():
+ num_joints = (
+ gr00t_modality_config[key][joint_group]["end"] - gr00t_modality_config[key][joint_group]["start"]
+ )
+ if joint_group not in remapped_joints.keys():
+ remapped_joints[joint_group] = np.zeros(
+ (joints.get_joints_pos().shape[0], num_joints), dtype=np.float64
+ )
+ else:
+ assert remapped_joints[joint_group].shape[1] == num_joints
+ ordered_joints.append(remapped_joints[joint_group])
+
+ # 1.3. Concatenate the arrays for parquets
+ concatenated = np.concatenate(ordered_joints, axis=1)
+ data[lerobot_key_name] = [row for row in concatenated]
+
+ assert len(data[config.lerobot_keys["action"]]) == len(data[config.lerobot_keys["state"]])
+ length = len(data[config.lerobot_keys["action"]])
+ data["timestamp"] = np.arange(length).astype(np.float64) * (1.0 / config.fps)
+ # 2. Get the annotation
+ annotation_keys = config.lerobot_keys["annotation"]
+ # task selection
+ data[annotation_keys[0]] = np.ones(length, dtype=int) * config.task_index
+ # valid is 1
+ data[annotation_keys[1]] = np.ones(length, dtype=int) * 1
+
+ # 3. Other data
+ data["episode_index"] = np.ones(length, dtype=int) * episode_index
+ data["task_index"] = np.zeros(length, dtype=int)
+ data["index"] = np.arange(length, dtype=int) + index_start
+ # last frame is successful
+ reward = np.zeros(length, dtype=np.float64)
+ reward[-1] = 1
+ done = np.zeros(length, dtype=bool)
+ done[-1] = True
+ data["next.reward"] = reward
+ data["next.done"] = done
+
+ dataframe = pd.DataFrame(data)
+
+ return_dict["data"] = dataframe
+ return_dict["length"] = length
+ return_dict["annotation"] = set(data[annotation_keys[0]]) | set(data[annotation_keys[1]])
+ return return_dict
+
+
+def convert_hdf5_to_lerobot(config: Gr00tN1DatasetConfig):
+ """
+ Convert the MimcGen HDF5 dataset to Gr00t-LeRobot format.
+
+ Args:
+ config: Configuration object containing dataset and path information.
+
+ Returns:
+ None
+ """
+ # Create a queue to communicate with the worker processes
+ max_queue_size = 10
+ num_workers = 4
+ queue = mp.Queue(maxsize=max_queue_size)
+ error_queue = mp.Queue() # for error handling
+ # Start the worker processes
+ workers = []
+ for _ in range(num_workers):
+ worker = mp.Process(target=write_video_job, args=(queue, error_queue, config))
+ worker.start()
+ workers.append(worker)
+
+ assert Path(config.hdf5_file_path).exists()
+ hdf5_handler = h5py.File(config.hdf5_file_path, "r")
+ hdf5_data = hdf5_handler["data"]
+
+ # 1. Generate meta/ folder
+ config.lerobot_data_dir.mkdir(parents=True, exist_ok=True)
+ lerobot_meta_dir = config.lerobot_data_dir / "meta"
+ lerobot_meta_dir.mkdir(parents=True, exist_ok=True)
+
+ tasks = {1: "valid"}
+ tasks.update({config.task_index: f"{config.language_instruction}"})
+
+ # 2. Generate data/
+ total_length = 0
+ example_data = None
+ video_paths = {}
+
+ trajectory_ids = list(hdf5_data.keys())
+ episodes_info = []
+ for episode_index, trajectory_id in enumerate(tqdm(trajectory_ids)):
+
+ try:
+ trajectory = hdf5_data[trajectory_id]
+ df_ret_dict = convert_trajectory_to_df(
+ trajectory=trajectory, episode_index=episode_index, index_start=total_length, config=config
+ )
+ except Exception as e:
+ print(
+ f"Error loading trajectory {trajectory_id}: {type(e).__name__}: {e!r}"
+ )
+ print(traceback.format_exc())
+ sys.exit(1)
+ continue
+ # 2.1. Save the episode data
+ dataframe = df_ret_dict["data"]
+ episode_chunk = episode_index // config.chunks_size
+ save_relpath = config.data_path.format(episode_chunk=episode_chunk, episode_index=episode_index)
+ save_path = config.lerobot_data_dir / save_relpath
+ save_path.parent.mkdir(parents=True, exist_ok=True)
+ dataframe.to_parquet(save_path)
+
+ # 2.2. Update total length, episodes_info
+ length = df_ret_dict["length"]
+ total_length += length
+ episodes_info.append(
+ {
+ "episode_index": episode_index,
+ "tasks": [tasks[task_index] for task_index in df_ret_dict["annotation"]],
+ "length": length,
+ }
+ )
+ # 2.3. Generate videos/
+ new_video_relpath = config.video_path.format(
+ episode_chunk=episode_chunk, video_key=config.lerobot_keys["video"], episode_index=episode_index
+ )
+ new_video_path = config.lerobot_data_dir / new_video_relpath
+ if config.video_name_lerobot not in video_paths.keys():
+ video_paths[config.video_name_lerobot] = new_video_path
+ assert config.pov_cam_name_sim in trajectory["obs"]
+ frames = np.array(trajectory["obs"][config.pov_cam_name_sim])
+ # remove last frame due to how Lab reports observations
+ frames = frames[:-1]
+ assert len(frames) == length
+ queue.put((new_video_path, frames, config.fps, "image"))
+
+ if example_data is None:
+ example_data = df_ret_dict
+
+ # 3. Generate the rest of meta/
+ # 3.1. Generate tasks.json
+ tasks_path = lerobot_meta_dir / config.tasks_fname
+ task_jsonlines = [{"task_index": task_index, "task": task} for task_index, task in tasks.items()]
+ dump_jsonl(task_jsonlines, tasks_path)
+
+ # 3.2. Generate episodes.jsonl
+ episodes_path = lerobot_meta_dir / config.episodes_fname
+ dump_jsonl(episodes_info, episodes_path)
+
+ # 3.3. Generate modality.json
+ modality_path = lerobot_meta_dir / config.modality_fname
+ shutil.copy(config.modality_template_path, modality_path)
+
+ # # 3.4. Generate info.json
+ info_json = generate_info(
+ total_episodes=len(trajectory_ids),
+ total_frames=total_length,
+ total_tasks=len(tasks),
+ total_videos=len(trajectory_ids),
+ total_chunks=len(trajectory_ids) // config.chunks_size,
+ step_data=example_data["data"],
+ video_paths=video_paths,
+ config=config,
+ )
+ dump_json(info_json, lerobot_meta_dir / "info.json", indent=4)
+
+ try:
+ # Check for errors in the error queue
+ while not error_queue.empty():
+ error_message = error_queue.get()
+ print(f"Error in worker process: {error_message}")
+
+ # Stop the worker processes
+ for _ in range(num_workers):
+ queue.put(None)
+ for worker in workers:
+ worker.join()
+
+ # Close the HDF5 file handler
+ hdf5_handler.close()
+
+ except Exception as e:
+ print(f"Error in main process: {e}")
+ # Make sure to clean up even if there's an error
+ for worker in workers:
+ if worker.is_alive():
+ worker.terminate()
+ worker.join()
+ if not hdf5_handler.closed:
+ hdf5_handler.close()
+ raise # Re-raise the exception after cleanup
+
+
+if __name__ == "__main__":
+ # Parse arguments using tyro
+ config = tyro.cli(Gr00tN1DatasetConfig)
+
+ # Print the tyro config
+ print("\n" + "=" * 50)
+ print("GR00T LEROBOT DATASET CONFIGURATION:")
+ print("=" * 50)
+ for key, value in vars(config).items():
+ print(f"{key}: {value}")
+ print("=" * 50 + "\n")
+ convert_hdf5_to_lerobot(config)
diff --git a/scripts/evaluate_gn1.py b/scripts/evaluate_gn1.py
new file mode 100644
index 0000000000000000000000000000000000000000..d900a5148c825d06f127ce154d09b20eb673ab3c
--- /dev/null
+++ b/scripts/evaluate_gn1.py
@@ -0,0 +1,159 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from isaacsim import SimulationApp # noqa: F401 # isort: skip
+from isaaclab.app import AppLauncher # noqa: F401 # isort: skip
+
+import contextlib
+import os
+import torch
+import tqdm
+from typing import Optional
+
+import tyro
+from io_utils import VideoWriter
+
+from config.args import Gr00tN1ClosedLoopArguments
+
+args = tyro.cli(Gr00tN1ClosedLoopArguments)
+
+if args.enable_pinocchio:
+ # Import pinocchio before AppLauncher to force the use of the version installed by IsaacLab and
+ # not the one installed by Isaac Sim pinocchio is required by the Pink IK controllers and the
+ # GR1T2 retargeter
+ import pinocchio # noqa: F401
+
+# Launch the simulator
+app_launcher = AppLauncher(
+ headless=args.headless, enable_cameras=True, num_envs=args.num_envs, device=args.simulation_device
+)
+simulation_app = app_launcher.app
+
+import gymnasium as gym
+
+from closed_loop_policy import create_sim_environment
+from evaluators.gr00t_n1_evaluator import Gr00tN1Evaluator
+from policies.gr00t_n1_policy import Gr00tN1Policy
+from robot_joints import JointsAbsPosition
+
+from isaaclab.envs import ManagerBasedRLEnvCfg
+
+import isaaclab_eval_tasks.tasks # noqa: F401
+
+
+def run_closed_loop_policy(
+ args: Gr00tN1ClosedLoopArguments,
+ simulation_app: SimulationApp,
+ env_cfg: ManagerBasedRLEnvCfg,
+ policy: Gr00tN1Policy,
+ evaluator: Optional[Gr00tN1Evaluator] = None,
+):
+ # Extract success checking function
+ succeess_term = env_cfg.terminations.success
+ # Disable terminations to avoid reset env
+ env_cfg.terminations = {}
+
+ # Create environment from loaded config
+ env = gym.make(args.task, cfg=env_cfg).unwrapped
+ # Set seed
+ env.seed(args.seed)
+
+ # Create camera video recorder
+ video_writer = None
+ # Only record the first environment if multiple envs are running in one episode
+ if args.record_camera:
+ video_count = 0
+ video_fpath = os.path.join(
+ args.record_video_output_path, f"{args.task_name}_{args.checkpoint_name}_{video_count}.mp4"
+ )
+ video_writer = VideoWriter(
+ video_fpath,
+ # Height, width to be in the order of (width, height) for cv2
+ args.original_image_size[:2][::-1],
+ fps=20,
+ )
+
+ with contextlib.suppress(KeyboardInterrupt) and torch.inference_mode():
+ while simulation_app.is_running() and not simulation_app.is_exiting():
+
+ # Terminate the simulation_app if having enough rollouts counted by the evaluator
+ # Otherwise, continue the rollout endlessly
+ if evaluator is not None and evaluator.num_rollouts >= args.max_num_rollouts:
+ break
+
+ # Reset environment
+ env.sim.reset()
+ env.reset(seed=args.seed)
+
+ robot = env.scene["robot"]
+ robot_state_sim = JointsAbsPosition(
+ robot.data.joint_pos, policy.gr1_state_joints_config, args.simulation_device
+ )
+
+ ego_camera = env.scene["robot_pov_cam"]
+
+ if args.record_camera and video_writer is not None and os.path.exists(video_fpath):
+ # Replace the last part of the video file name with the video count
+ video_fpath = "_".join(video_fpath.split("_")[:-1]) + f"_{video_count}.mp4"
+ video_writer.change_file_path(video_fpath)
+
+ for _ in tqdm.tqdm(range(args.rollout_length)):
+ robot_state_sim.set_joints_pos(robot.data.joint_pos)
+
+ robot_action_sim = policy.get_new_goal(robot_state_sim, ego_camera, args.language_instruction)
+ rollout_action = robot_action_sim.get_joints_pos(args.simulation_device)
+
+ # Number of joints from policy shall match the env action reqs
+ assert rollout_action.shape[-1] == env.action_space.shape[1]
+
+ # take only the first num_feedback_actions, the rest are ignored, preventing over memorization
+ for i in range(args.num_feedback_actions):
+ assert rollout_action[:, i, :].shape[0] == args.num_envs
+ env.step(rollout_action[:, i, :])
+
+ if args.record_camera and video_writer is not None:
+ # Only record the first environment if multiple envs are running
+ video_writer.add_image(ego_camera.data.output["rgb"][0])
+
+ if args.record_camera and video_writer is not None:
+ video_count += 1
+
+ # Check if rollout was successful
+ if evaluator is not None:
+ evaluator.evaluate_step(env, succeess_term)
+ evaluator.summarize_demos()
+
+ # Log evaluation results to a file
+ if evaluator is not None:
+ evaluator.maybe_write_eval_file()
+ if video_writer is not None:
+ video_writer.close()
+ env.close()
+
+
+if __name__ == "__main__":
+ print("args", args)
+
+ # model and environment related params
+ gr00t_n1_policy = Gr00tN1Policy(args)
+ env_cfg = create_sim_environment(args)
+ evaluator = Gr00tN1Evaluator(args.checkpoint_name, args.eval_file_path, args.seed)
+
+ # Run the closed loop policy.
+ run_closed_loop_policy(
+ args=args, simulation_app=simulation_app, env_cfg=env_cfg, policy=gr00t_n1_policy, evaluator=evaluator
+ )
+
+ # Close simulation app after rollout is complete
+ simulation_app.close()
diff --git a/scripts/evaluators/__init__.py b/scripts/evaluators/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3159bfe65645499015bd92609b99d476d69544e9
--- /dev/null
+++ b/scripts/evaluators/__init__.py
@@ -0,0 +1,14 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/scripts/evaluators/evaluator_base.py b/scripts/evaluators/evaluator_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..d149ef59d388a6f0bb75448418c04ec91f8b0d2f
--- /dev/null
+++ b/scripts/evaluators/evaluator_base.py
@@ -0,0 +1,71 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import gymnasium as gym
+import json
+import os
+from abc import ABC, abstractmethod
+from datetime import datetime
+from typing import Dict, Optional
+
+JSON_INDENT = 4
+
+
+class EvaluatorBase(ABC):
+ """
+ Base class for all evaluators. An evaluator tracks the performance of a task over a series of demos.
+ """
+
+ def __init__(self, checkpoint_name: str, eval_file_path: Optional[str] = None, seed: int = 10) -> None:
+ """
+ Initializes the EvaluatorBase object.
+
+ Args:
+ eval_file_path (os.path, optional): The path where the the evaluation file should be stored.
+ Defaults to None (which means no evaluation file will be stored).
+ checkpoint_name (str, optional): Name of checkpoint used for evaluation.
+ """
+ if eval_file_path is not None:
+ assert os.path.exists(os.path.dirname(eval_file_path))
+ self.eval_file_path = eval_file_path
+ self.eval_dict = {}
+ self.eval_dict["metadata"] = {
+ "checkpoint_name": checkpoint_name,
+ "seed": seed,
+ "date": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+ }
+
+ @abstractmethod
+ def evaluate_step(self, env: gym.Env) -> None:
+ """
+ Evaluates the current state of the task.
+
+ Args:
+ observed_state (State): The observed state of the environment.
+ env (gym.Env): The environment in which the cube stacking task is being evaluated.
+ """
+ pass
+
+ def maybe_write_eval_file(self):
+ """
+ If the evaluation file is set, the eval dict will be written to it.
+ """
+ if self.eval_file_path is not None:
+ with open(self.eval_file_path, "w") as json_file:
+ json.dump(self.eval_dict, json_file, indent=JSON_INDENT)
+
+ @abstractmethod
+ def summarize_demos(self) -> Dict:
+ pass
diff --git a/scripts/evaluators/gr00t_n1_evaluator.py b/scripts/evaluators/gr00t_n1_evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..14a36ce8a0f5d2e6dc527b7d0f1befd2d11aaf33
--- /dev/null
+++ b/scripts/evaluators/gr00t_n1_evaluator.py
@@ -0,0 +1,54 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import gymnasium as gym
+import torch
+from typing import Optional
+
+from evaluators.evaluator_base import EvaluatorBase
+
+from isaaclab.managers import TerminationTermCfg as DoneTerm
+
+
+class Gr00tN1Evaluator(EvaluatorBase):
+ """
+ The purpose of this class is to evaluate the performance of gr00t-N1 policy on the nut pouring
+ and pipe sorting tasks, by tracking the success rate of the policy over a series of demos.
+ Success is defined as termination term in the environment configuration script
+ """
+
+ def __init__(self, checkpoint_name: str, eval_file_path: Optional[str] = None, seed: int = 10) -> None:
+ super().__init__(checkpoint_name, eval_file_path, seed)
+ self.num_success = 0
+ self.num_rollouts = 0
+
+ def evaluate_step(self, env: gym.Env, succeess_term: DoneTerm) -> None:
+ success_term_val = succeess_term.func(env, **succeess_term.params)
+
+ self.num_success += torch.sum(success_term_val).item()
+ self.num_rollouts += len(success_term_val)
+
+ def summarize_demos(self):
+ # printe in terminal with a table layout
+ print(f"\n{'='*50}")
+ print(f"\nSuccessful trials: {self.num_success}, out of {self.num_rollouts} trials")
+ print(f"Success rate: {self.num_success / self.num_rollouts}")
+ print(f"{'='*50}\n")
+
+ self.eval_dict["summary"] = {
+ "successful_trials": self.num_success,
+ "total_rollouts": self.num_rollouts,
+ "success_rate": self.num_success / self.num_rollouts,
+ }
diff --git a/scripts/io_utils.py b/scripts/io_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..79618788c2fc5ec019495858342e4047c718ddd5
--- /dev/null
+++ b/scripts/io_utils.py
@@ -0,0 +1,109 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import json
+import numpy as np
+import yaml
+from pathlib import Path
+from typing import Any, Dict, Tuple
+
+import cv2
+
+
+def dump_jsonl(data, file_path):
+ """
+ Write a sequence of data to a file in JSON Lines format.
+
+ Args:
+ data: Sequence of items to write, one per line.
+ file_path: Path to the output file.
+
+ Returns:
+ None
+ """
+ assert isinstance(data, collections.abc.Sequence) and not isinstance(data, str)
+ if isinstance(data, (np.ndarray, np.number)):
+ data = data.tolist()
+ with open(file_path, "w") as fp:
+ for line in data:
+ print(json.dumps(line), file=fp, flush=True)
+
+
+def dump_json(data, file_path, **kwargs):
+ """
+ Write data to a file in standard JSON format.
+
+ Args:
+ data: Data to write to the file.
+ file_path: Path to the output file.
+ **kwargs: Additional keyword arguments for json.dump.
+
+ Returns:
+ None
+ """
+ if isinstance(data, (np.ndarray, np.number)):
+ data = data.tolist()
+ with open(file_path, "w") as fp:
+ json.dump(data, fp, **kwargs)
+
+
+def load_json(file_path: str | Path, **kwargs) -> Dict[str, Any]:
+ """
+ Load a JSON file.
+
+ Args:
+ file_path: Path to the JSON file to load.
+ **kwargs: Additional keyword arguments for the JSON loader.
+
+ Returns:
+ Dictionary loaded from the JSON file.
+ """
+ with open(file_path) as fp:
+ return json.load(fp, **kwargs)
+
+
+def load_gr1_joints_config(yaml_path: str | Path) -> Dict[str, Any]:
+ """Load GR1 joint configuration from YAML file"""
+ with open(yaml_path, encoding="utf-8") as f:
+ config = yaml.safe_load(f)
+ return config.get("joints", {})
+
+
+class VideoWriter:
+ """
+ A class for writing videos from images.
+ """
+
+ def __init__(self, out_video_path: str, video_size: Tuple, fps: int = 20):
+ self.fourcc = cv2.VideoWriter_fourcc(*"mp4v")
+ self.fps = fps
+ self.video_size = video_size
+ self.writer = cv2.VideoWriter(out_video_path, self.fourcc, fps, video_size)
+ print(f"Writing video to: {out_video_path}")
+
+ def add_image(self, img):
+ # Permute to (BGR) and resize
+ img_bgr = img.squeeze()[:, :, [2, 1, 0]]
+ resized_img = cv2.resize(img_bgr.cpu().numpy(), self.video_size)
+ self.writer.write(resized_img)
+
+ def change_file_path(self, out_video_path: str):
+ self.writer.release()
+ self.writer = cv2.VideoWriter(out_video_path, self.fourcc, self.fps, self.video_size)
+ print(f"Writing video to: {out_video_path}")
+
+ def close(self):
+ self.writer.release()
diff --git a/scripts/policies/__init__.py b/scripts/policies/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3159bfe65645499015bd92609b99d476d69544e9
--- /dev/null
+++ b/scripts/policies/__init__.py
@@ -0,0 +1,14 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/scripts/policies/gr00t_n1_policy.py b/scripts/policies/gr00t_n1_policy.py
new file mode 100644
index 0000000000000000000000000000000000000000..2767b202400a1783a9524e9efb98486aac5a0897
--- /dev/null
+++ b/scripts/policies/gr00t_n1_policy.py
@@ -0,0 +1,112 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from gr00t.experiment.data_config import DATA_CONFIG_MAP
+from gr00t.model.policy import Gr00tPolicy
+from io_utils import load_gr1_joints_config
+from policies.image_conversion import resize_frames_with_padding
+from policies.joints_conversion import remap_policy_joints_to_sim_joints, remap_sim_joints_to_policy_joints
+from policies.policy_base import PolicyBase
+from robot_joints import JointsAbsPosition
+
+from isaaclab.sensors import Camera
+
+from config.args import Gr00tN1ClosedLoopArguments
+
+
+class Gr00tN1Policy(PolicyBase):
+ def __init__(self, args: Gr00tN1ClosedLoopArguments):
+ self.args = args
+ self.policy = self._load_policy()
+ self._load_policy_joints_config()
+ self._load_sim_joints_config()
+
+ def _load_policy_joints_config(self):
+ """Load the policy joint config from the data config."""
+ self.gr00t_joints_config = load_gr1_joints_config(self.args.gr00t_joints_config_path)
+
+ def _load_sim_joints_config(self):
+ """Load the simulation joint config from the data config."""
+ self.gr1_state_joints_config = load_gr1_joints_config(self.args.state_joints_config_path)
+ self.gr1_action_joints_config = load_gr1_joints_config(self.args.action_joints_config_path)
+
+ def _load_policy(self):
+ """Load the policy from the model path."""
+ assert os.path.exists(self.args.model_path), f"Model path {self.args.model_path} does not exist"
+
+ # Use the same data preprocessor as the loaded fine-tuned ckpts
+ self.data_config = DATA_CONFIG_MAP[self.args.data_config]
+
+ modality_config = self.data_config.modality_config()
+ modality_transform = self.data_config.transform()
+ # load the policy
+ return Gr00tPolicy(
+ model_path=self.args.model_path,
+ modality_config=modality_config,
+ modality_transform=modality_transform,
+ embodiment_tag=self.args.embodiment_tag,
+ denoising_steps=self.args.denoising_steps,
+ device=self.args.policy_device,
+ )
+
+ def step(self, current_state: JointsAbsPosition, camera: Camera) -> JointsAbsPosition:
+ """Call every simulation step to update policy's internal state."""
+ pass
+
+ def get_new_goal(
+ self, current_state: JointsAbsPosition, ego_camera: Camera, language_instruction: str
+ ) -> JointsAbsPosition:
+ """
+ Run policy prediction on the given observations. Produce a new action goal for the robot.
+
+ Args:
+ current_state: robot proprioceptive state observation
+ ego_camera: camera sensor observation
+ language_instruction: language instruction for the task
+
+ Returns:
+ A dictionary containing the inferred action for robot joints.
+ """
+ rgb = ego_camera.data.output["rgb"]
+ # Apply preprocessing to rgb
+ rgb = resize_frames_with_padding(
+ rgb, target_image_size=self.args.target_image_size, bgr_conversion=False, pad_img=True
+ )
+ # Retrieve joint positions as proprioceptive states and remap to policy joint orders
+ robot_state_policy = remap_sim_joints_to_policy_joints(current_state, self.gr00t_joints_config)
+
+ # Pack inputs to dictionary and run the inference
+ observations = {
+ "annotation.human.action.task_description": [language_instruction], # list of strings
+ "video.ego_view": rgb.reshape(-1, 1, 256, 256, 3), # numpy array of shape (N, 1, 256, 256, 3)
+ "state.left_arm": robot_state_policy["left_arm"].reshape(-1, 1, 7), # numpy array of shape (N, 1, 7)
+ "state.right_arm": robot_state_policy["right_arm"].reshape(-1, 1, 7), # numpy array of shape (N, 1, 7)
+ "state.left_hand": robot_state_policy["left_hand"].reshape(-1, 1, 6), # numpy array of shape (N, 1, 6)
+ "state.right_hand": robot_state_policy["right_hand"].reshape(-1, 1, 6), # numpy array of shape (N, 1, 6)
+ }
+ robot_action_policy = self.policy.get_action(observations)
+
+ robot_action_sim = remap_policy_joints_to_sim_joints(
+ robot_action_policy, self.gr00t_joints_config, self.gr1_action_joints_config, self.args.simulation_device
+ )
+
+ return robot_action_sim
+
+ def reset(self):
+ """Resets the policy's internal state."""
+ # As GN1 is a single-shot policy, we don't need to reset its internal state
+ pass
diff --git a/scripts/policies/image_conversion.py b/scripts/policies/image_conversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ebce677e0616c87622f3ee86a073f74ec97402b
--- /dev/null
+++ b/scripts/policies/image_conversion.py
@@ -0,0 +1,56 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import torch
+
+import cv2
+
+
+def resize_frames_with_padding(
+ frames: torch.Tensor | np.ndarray, target_image_size: tuple, bgr_conversion: bool = False, pad_img: bool = True
+) -> np.ndarray:
+ """Process batch of frames with padding and resizing vectorized
+ Args:
+ frames: np.ndarray of shape [N, 256, 160, 3]
+ target_image_size: target size (height, width)
+ bgr_conversion: whether to convert BGR to RGB
+ pad_img: whether to resize images
+ """
+ if isinstance(frames, torch.Tensor):
+ frames = frames.cpu().numpy()
+ elif not isinstance(frames, np.ndarray):
+ raise ValueError(f"Invalid frame type: {type(frames)}")
+
+ if bgr_conversion:
+ frames = cv2.cvtColor(frames, cv2.COLOR_BGR2RGB)
+
+ if pad_img:
+ top_padding = (frames.shape[2] - frames.shape[1]) // 2
+ bottom_padding = top_padding
+
+ # Add padding to all frames at once
+ frames = np.pad(
+ frames,
+ pad_width=((0, 0), (top_padding, bottom_padding), (0, 0), (0, 0)),
+ mode="constant",
+ constant_values=0,
+ )
+
+ # Resize all frames at once
+ if frames.shape[1:] != target_image_size:
+ frames = np.stack([cv2.resize(f, target_image_size) for f in frames])
+
+ return frames
diff --git a/scripts/policies/joints_conversion.py b/scripts/policies/joints_conversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..c83fc11fe977c99d598db684ea8c10314f2e0651
--- /dev/null
+++ b/scripts/policies/joints_conversion.py
@@ -0,0 +1,81 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import torch
+from typing import Dict, List
+
+from robot_joints import JointsAbsPosition
+
+
+def remap_sim_joints_to_policy_joints(
+ sim_joints_state: JointsAbsPosition, policy_joints_config: Dict[str, List[str]]
+) -> Dict[str, np.ndarray]:
+ """
+ Remap the state or actions joints from simulation joint orders to policy joint orders
+ """
+ data = {}
+ assert isinstance(sim_joints_state, JointsAbsPosition)
+ for group, joints_list in policy_joints_config.items():
+ data[group] = []
+ for joint_name in joints_list:
+ if joint_name in sim_joints_state.joints_order_config:
+ joint_index = sim_joints_state.joints_order_config[joint_name]
+ data[group].append(sim_joints_state.joints_pos[:, joint_index])
+ else:
+ raise ValueError(f"Joint {joint_name} not found in {sim_joints_state.joints_order_config}")
+
+ data[group] = np.stack(data[group], axis=1)
+ return data
+
+
+def remap_policy_joints_to_sim_joints(
+ policy_joints: Dict[str, np.array],
+ policy_joints_config: Dict[str, List[str]],
+ sim_joints_config: Dict[str, int],
+ device: torch.device,
+) -> JointsAbsPosition:
+ """
+ Remap the actions joints from policy joint orders to simulation joint orders
+ """
+ # assert all values in policy_joint keys are the same shape and save the shape to init data
+ policy_joint_shape = None
+ for _, joint_pos in policy_joints.items():
+ if policy_joint_shape is None:
+ policy_joint_shape = joint_pos.shape
+ else:
+ assert joint_pos.ndim == 3
+ assert joint_pos.shape[:2] == policy_joint_shape[:2]
+
+ assert policy_joint_shape is not None
+ data = torch.zeros([policy_joint_shape[0], policy_joint_shape[1], len(sim_joints_config)], device=device)
+ for joint_name, gr1_index in sim_joints_config.items():
+ match joint_name.split("_")[0]:
+ case "left":
+ joint_group = "left_arm"
+ case "right":
+ joint_group = "right_arm"
+ case "L":
+ joint_group = "left_hand"
+ case "R":
+ joint_group = "right_hand"
+ case _:
+ continue
+ if joint_name in policy_joints_config[joint_group]:
+ gr00t_index = policy_joints_config[joint_group].index(joint_name)
+ data[..., gr1_index] = torch.from_numpy(policy_joints[f"action.{joint_group}"][..., gr00t_index]).to(device)
+
+ sim_joints = JointsAbsPosition(joints_pos=data, joints_order_config=sim_joints_config, device=device)
+ return sim_joints
diff --git a/scripts/policies/policy_base.py b/scripts/policies/policy_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..c63ade6056ce0d532d78e225023fa13d2d51a1b8
--- /dev/null
+++ b/scripts/policies/policy_base.py
@@ -0,0 +1,40 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from abc import ABC, abstractmethod
+
+from robot_joints import JointsAbsPosition
+
+from isaaclab.sensors import Camera
+
+
+class PolicyBase(ABC):
+ """A base class for all policies."""
+
+ @abstractmethod
+ def step(self, current_state: JointsAbsPosition, camera: Camera) -> JointsAbsPosition:
+ """Called every simulation step to update policy's internal state."""
+ pass
+
+ @abstractmethod
+ def get_new_goal(self, current_state: JointsAbsPosition, camera: Camera) -> JointsAbsPosition:
+ """Generates a goal given the current state and camera observations."""
+ pass
+
+ @abstractmethod
+ def reset(self):
+ """Resets the policy's internal state."""
+ pass
diff --git a/scripts/robot_joints.py b/scripts/robot_joints.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a34c2841dd763e115c0fb3375147064fc40d025
--- /dev/null
+++ b/scripts/robot_joints.py
@@ -0,0 +1,57 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import torch
+from dataclasses import dataclass
+from typing import Dict
+
+
+@dataclass
+class JointsAbsPosition:
+ joints_pos: torch.Tensor
+ """Joint positions in radians"""
+
+ joints_order_config: Dict[str, int]
+ """Joints order configuration"""
+
+ device: torch.device
+ """Device to store the tensor on"""
+
+ @staticmethod
+ def zero(joint_order_config: Dict[str, int], device: torch.device):
+ return JointsAbsPosition(
+ joints_pos=torch.zeros((len(joint_order_config)), device=device),
+ joints_order_config=joint_order_config,
+ device=device,
+ )
+
+ def to_array(self) -> torch.Tensor:
+ return self.joints_pos.cpu().numpy()
+
+ @staticmethod
+ def from_array(array: np.ndarray, joint_order_config: Dict[str, int], device: torch.device) -> "JointsAbsPosition":
+ return JointsAbsPosition(
+ joints_pos=torch.from_numpy(array).to(device), joints_order_config=joint_order_config, device=device
+ )
+
+ def set_joints_pos(self, joints_pos: torch.Tensor):
+ self.joints_pos = joints_pos.to(self.device)
+
+ def get_joints_pos(self, device: torch.device = None) -> torch.Tensor:
+ if device is None:
+ return self.joints_pos
+ else:
+ return self.joints_pos.to(device)
diff --git a/source/isaaclab_eval_tasks/config/extension.toml b/source/isaaclab_eval_tasks/config/extension.toml
new file mode 100644
index 0000000000000000000000000000000000000000..5569a5b8ba53c02cc7bfdcd61a3b729ffb57afa1
--- /dev/null
+++ b/source/isaaclab_eval_tasks/config/extension.toml
@@ -0,0 +1,35 @@
+[package]
+
+# Semantic Versioning is used: https://semver.org/
+version = "0.1.0"
+
+# Description
+category = "isaaclab"
+readme = "README.md"
+
+title = "Extension Template"
+author = "Isaac Lab Project Developers"
+maintainer = "Isaac Lab Project Developers"
+description="Extension Template for Isaac Lab"
+repository = "https://github.com/isaac-sim/IsaacLabExtensionTemplate.git"
+keywords = ["extension", "template", "isaaclab"]
+
+[dependencies]
+"isaaclab" = {}
+"isaaclab_assets" = {}
+"isaaclab_mimic" = {}
+"isaaclab_rl" = {}
+"isaaclab_tasks" = {}
+# NOTE: Add additional dependencies here
+
+[[python.module]]
+name = "isaaclab_eval_tasks"
+
+[isaaclab_settings]
+# TODO: Uncomment and list any apt dependencies here.
+# If none, leave it commented out.
+# apt_deps = ["example_package"]
+# TODO: Uncomment and provide path to a ros_wsAdd commentMore actions
+# with rosdeps to be installed. If none,
+# leave it commented out.
+# ros_ws = "path/from/extension_root/to/ros_ws"
\ No newline at end of file
diff --git a/source/isaaclab_eval_tasks/isaaclab_eval_tasks/__init__.py b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..10030fe381182159e9e5ff1488e5748204a191ab
--- /dev/null
+++ b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/__init__.py
@@ -0,0 +1,21 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Python module serving as a project/extension template.
+"""
+
+# # Register Gym environments.
+# from .tasks import *
diff --git a/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tasks/__init__.py b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..50c7e2ffba2f9849edd851c112bf630c0983cde1
--- /dev/null
+++ b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tasks/__init__.py
@@ -0,0 +1,44 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package containing task implementations for various robotic environments."""
+
+import gymnasium as gym
+
+from isaaclab_tasks.utils import import_packages
+
+from .manipulation.pick_place import exhaustpipe_gr1t2_closedloop_env_cfg, nutpour_gr1t2_closedloop_env_cfg
+
+##
+# Register Gym environments.
+##
+gym.register(
+ id="Isaac-ExhaustPipe-GR1T2-ClosedLoop-v0",
+ entry_point="isaaclab.envs:ManagerBasedRLEnv",
+ kwargs={"env_cfg_entry_point": exhaustpipe_gr1t2_closedloop_env_cfg.ExhaustPipeGR1T2ClosedLoopEnvCfg},
+)
+
+gym.register(
+ id="Isaac-NutPour-GR1T2-ClosedLoop-v0",
+ entry_point="isaaclab.envs:ManagerBasedRLEnv",
+ kwargs={
+ "env_cfg_entry_point": nutpour_gr1t2_closedloop_env_cfg.NutPourGR1T2ClosedLoopEnvCfg,
+ },
+)
+
+# The blacklist is used to prevent importing configs from sub-packages
+_BLACKLIST_PKGS = ["utils"]
+# Import all configs in this package
+import_packages(__name__, _BLACKLIST_PKGS)
diff --git a/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tasks/manipulation/pick_place/exhaustpipe_gr1t2_closedloop_env_cfg.py b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tasks/manipulation/pick_place/exhaustpipe_gr1t2_closedloop_env_cfg.py
new file mode 100644
index 0000000000000000000000000000000000000000..4530a1a9c522664bc2a7233b0c3e85b63d52edee
--- /dev/null
+++ b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tasks/manipulation/pick_place/exhaustpipe_gr1t2_closedloop_env_cfg.py
@@ -0,0 +1,104 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import isaaclab.envs.mdp as mdp
+import isaaclab.sim as sim_utils
+from isaaclab.sensors.camera import TiledCameraCfg
+from isaaclab.utils import configclass
+from isaaclab_tasks.manager_based.manipulation.pick_place.exhaustpipe_gr1t2_base_env_cfg import (
+ ExhaustPipeGR1T2BaseEnvCfg,
+)
+
+joint_names_dict = {
+ # arm joint
+ "left_shoulder_pitch_joint": 0,
+ "right_shoulder_pitch_joint": 1,
+ "left_shoulder_roll_joint": 2,
+ "right_shoulder_roll_joint": 3,
+ "left_shoulder_yaw_joint": 4,
+ "right_shoulder_yaw_joint": 5,
+ "left_elbow_pitch_joint": 6,
+ "right_elbow_pitch_joint": 7,
+ "left_wrist_yaw_joint": 8,
+ "right_wrist_yaw_joint": 9,
+ "left_wrist_roll_joint": 10,
+ "right_wrist_roll_joint": 11,
+ "left_wrist_pitch_joint": 12,
+ "right_wrist_pitch_joint": 13,
+ # hand joints
+ "L_index_proximal_joint": 14,
+ "L_middle_proximal_joint": 15,
+ "L_pinky_proximal_joint": 16,
+ "L_ring_proximal_joint": 17,
+ "L_thumb_proximal_yaw_joint": 18,
+ "R_index_proximal_joint": 19,
+ "R_middle_proximal_joint": 20,
+ "R_pinky_proximal_joint": 21,
+ "R_ring_proximal_joint": 22,
+ "R_thumb_proximal_yaw_joint": 23,
+ "L_index_intermediate_joint": 24,
+ "L_middle_intermediate_joint": 25,
+ "L_pinky_intermediate_joint": 26,
+ "L_ring_intermediate_joint": 27,
+ "L_thumb_proximal_pitch_joint": 28,
+ "R_index_intermediate_joint": 29,
+ "R_middle_intermediate_joint": 30,
+ "R_pinky_intermediate_joint": 31,
+ "R_ring_intermediate_joint": 32,
+ "R_thumb_proximal_pitch_joint": 33,
+ "L_thumb_distal_joint": 34,
+ "R_thumb_distal_joint": 35,
+}
+joint_names = list(joint_names_dict.keys())
+tuned_joint_names = ["left-arm", "right-arm"]
+
+
+@configclass
+class ExhaustPipeGR1T2ClosedLoopEnvCfg(ExhaustPipeGR1T2BaseEnvCfg):
+ def __post_init__(self):
+ # post init of parent
+ super().__post_init__()
+ # replace the stiffness and dynamics in arm joints in the robot
+ for joint_name in tuned_joint_names:
+ self.scene.robot.actuators[joint_name].stiffness = 3000
+ self.scene.robot.actuators[joint_name].damping = 100
+
+ self.scene.robot_pov_cam = TiledCameraCfg(
+ height=160,
+ width=256,
+ offset=TiledCameraCfg.OffsetCfg(
+ pos=(0.0, 0.12, 1.85418), rot=(-0.17246, 0.98502, 0.0, 0.0), convention="ros"
+ ),
+ prim_path="{ENV_REGEX_NS}/RobotPOVCam",
+ update_period=0,
+ data_types=["rgb"],
+ spawn=sim_utils.PinholeCameraCfg(focal_length=18.15, clipping_range=(0.1, 2)),
+ )
+
+ self.actions.gr1_action = mdp.JointPositionActionCfg(
+ asset_name="robot", joint_names=joint_names, scale=1.0, use_default_offset=False
+ )
+ self.viewer.eye = (0.0, 1.8, 1.5)
+ self.viewer.lookat = (0.0, 0.0, 1.0)
+
+ self.episode_length_s = 20.0
+ # simulation settings
+ self.sim.dt = 1 / 100
+ self.decimation = 5
+
+ self.sim.render_interval = 5
+ # WAR to skip while loop bug after calling env.reset() followed by env.sim.reset()
+ # https://github.com/isaac-sim/IsaacLab-Internal/blob/devel/source/isaaclab/isaaclab/envs/manager_based_env.py#L311C13-L311C53
+ self.wait_for_textures = False
diff --git a/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tasks/manipulation/pick_place/nutpour_gr1t2_closedloop_env_cfg.py b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tasks/manipulation/pick_place/nutpour_gr1t2_closedloop_env_cfg.py
new file mode 100644
index 0000000000000000000000000000000000000000..79ed8eebc6f588dd747f5bbe276979fa0079a148
--- /dev/null
+++ b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tasks/manipulation/pick_place/nutpour_gr1t2_closedloop_env_cfg.py
@@ -0,0 +1,101 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import isaaclab.envs.mdp as mdp
+import isaaclab.sim as sim_utils
+from isaaclab.sensors.camera import TiledCameraCfg
+from isaaclab.utils import configclass
+from isaaclab_tasks.manager_based.manipulation.pick_place.nutpour_gr1t2_base_env_cfg import NutPourGR1T2BaseEnvCfg
+
+joint_names_dict = {
+ # arm joint
+ "left_shoulder_pitch_joint": 0,
+ "right_shoulder_pitch_joint": 1,
+ "left_shoulder_roll_joint": 2,
+ "right_shoulder_roll_joint": 3,
+ "left_shoulder_yaw_joint": 4,
+ "right_shoulder_yaw_joint": 5,
+ "left_elbow_pitch_joint": 6,
+ "right_elbow_pitch_joint": 7,
+ "left_wrist_yaw_joint": 8,
+ "right_wrist_yaw_joint": 9,
+ "left_wrist_roll_joint": 10,
+ "right_wrist_roll_joint": 11,
+ "left_wrist_pitch_joint": 12,
+ "right_wrist_pitch_joint": 13,
+ # hand joints
+ "L_index_proximal_joint": 14,
+ "L_middle_proximal_joint": 15,
+ "L_pinky_proximal_joint": 16,
+ "L_ring_proximal_joint": 17,
+ "L_thumb_proximal_yaw_joint": 18,
+ "R_index_proximal_joint": 19,
+ "R_middle_proximal_joint": 20,
+ "R_pinky_proximal_joint": 21,
+ "R_ring_proximal_joint": 22,
+ "R_thumb_proximal_yaw_joint": 23,
+ "L_index_intermediate_joint": 24,
+ "L_middle_intermediate_joint": 25,
+ "L_pinky_intermediate_joint": 26,
+ "L_ring_intermediate_joint": 27,
+ "L_thumb_proximal_pitch_joint": 28,
+ "R_index_intermediate_joint": 29,
+ "R_middle_intermediate_joint": 30,
+ "R_pinky_intermediate_joint": 31,
+ "R_ring_intermediate_joint": 32,
+ "R_thumb_proximal_pitch_joint": 33,
+ "L_thumb_distal_joint": 34,
+ "R_thumb_distal_joint": 35,
+}
+joint_names = list(joint_names_dict.keys())
+tuned_joint_names = ["left-arm", "right-arm"]
+
+
+@configclass
+class NutPourGR1T2ClosedLoopEnvCfg(NutPourGR1T2BaseEnvCfg):
+ def __post_init__(self):
+ # post init of parent
+ super().__post_init__()
+ # replace the stiffness and dynamics in arm joints in the robot
+ for joint_name in tuned_joint_names:
+ self.scene.robot.actuators[joint_name].stiffness = 3000
+ self.scene.robot.actuators[joint_name].damping = 100
+
+ self.scene.robot_pov_cam = TiledCameraCfg(
+ height=160,
+ width=256,
+ offset=TiledCameraCfg.OffsetCfg(
+ pos=(0.0, 0.12, 1.67675), rot=(-0.19848, 0.9801, 0.0, 0.0), convention="ros"
+ ),
+ prim_path="{ENV_REGEX_NS}/RobotPOVCam",
+ update_period=0,
+ data_types=["rgb"],
+ spawn=sim_utils.PinholeCameraCfg(focal_length=18.15, clipping_range=(0.1, 2)),
+ )
+ self.actions.gr1_action = mdp.JointPositionActionCfg(
+ asset_name="robot", joint_names=joint_names, scale=1.0, use_default_offset=False
+ )
+ self.viewer.eye = (0.0, 1.8, 1.5)
+ self.viewer.lookat = (0.0, 0.0, 1.0)
+
+ self.episode_length_s = 20.0
+ # simulation settings
+ self.sim.dt = 1 / 100
+ self.decimation = 5
+ self.sim.render_interval = 5
+
+ # WAR to skip while loop bug after calling env.reset() followed by env.sim.reset()
+ # https://github.com/isaac-sim/IsaacLab-Internal/blob/devel/source/isaaclab/isaaclab/envs/manager_based_env.py#L311C13-L311C53
+ self.wait_for_textures = False
diff --git a/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tests/test_environments.py b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tests/test_environments.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cad060683d3913fd4e09fc5b617abb0237e48e7
--- /dev/null
+++ b/source/isaaclab_eval_tasks/isaaclab_eval_tasks/tests/test_environments.py
@@ -0,0 +1,191 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Launch Isaac Sim Simulator first."""
+
+import sys
+
+# Import pinocchio in the main script to force the use of the dependencies installed by IsaacLab and not the one installed by Isaac Sim
+# pinocchio is required by the Pink IK controller
+if sys.platform != "win32":
+ import pinocchio # noqa: F401
+
+from isaaclab.app import AppLauncher, run_tests
+
+# launch the simulator
+app_launcher = AppLauncher(headless=True, enable_cameras=True)
+simulation_app = app_launcher.app
+
+
+"""Rest everything follows."""
+
+import gymnasium as gym
+import torch
+import unittest
+
+import carb
+
+# Omniverse logger
+import omni.log
+import omni.usd
+
+from isaaclab.envs import ManagerBasedRLEnvCfg
+from isaaclab.envs.utils.spaces import sample_space
+from isaaclab_tasks.utils.parse_cfg import parse_env_cfg
+
+# Import extensions to set up environment tasks
+import isaaclab_eval_tasks.tasks # noqa: F401
+
+
+class TestEnvironments(unittest.TestCase):
+ """Test cases for all registered environments."""
+
+ @classmethod
+ def setUpClass(cls):
+ # acquire all Isaac environments names
+ cls.registered_tasks = list()
+ for task_spec in gym.registry.values():
+ # Only test closed loop environments added in the isaaclab_eval_tasks extension
+ if "ClosedLoop-" in task_spec.id:
+ cls.registered_tasks.append(task_spec.id)
+ # sort environments by name
+ cls.registered_tasks.sort()
+ cls.single_env_tasks = []
+
+ # this flag is necessary to prevent a bug where the simulation gets stuck randomly when running the
+ # test on many environments.
+ carb_settings_iface = carb.settings.get_settings()
+ carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False)
+
+ """
+ Test fixtures.
+ """
+
+ def test_multiple_num_envs_on_gpu(self):
+ """Run all environments with multiple instances and check environments return valid signals."""
+ # common parameters
+ num_envs = 32
+ device = "cuda"
+ # iterate over all registered environments
+ for task_name in self.registered_tasks:
+ # skip these environments as they cannot be run with 32 environments within reasonable VRAM
+ if task_name in self.single_env_tasks:
+ continue
+ with self.subTest(task_name=task_name):
+ print(f">>> Running test for environment: {task_name}")
+ # check environment
+ self._check_random_actions(task_name, device, num_envs, num_steps=100)
+ # close the environment
+ print(f">>> Closing environment: {task_name}")
+ print("-" * 80)
+
+ def test_single_env_on_gpu(self):
+ """Run all environments with single instance and check environments return valid signals."""
+ # common parameters
+ num_envs = 1
+ device = "cuda"
+ # iterate over all registered environments
+ for task_name in self.registered_tasks:
+ with self.subTest(task_name=task_name):
+ print(f">>> Running test for environment: {task_name}")
+ # check environment
+ self._check_random_actions(task_name, device, num_envs, num_steps=100)
+ # close the environment
+ print(f">>> Closing environment: {task_name}")
+ print("-" * 80)
+
+ """
+ Helper functions.
+ """
+
+ def _check_random_actions(self, task_name: str, device: str, num_envs: int, num_steps: int = 1000):
+ """Run random actions and check environments returned signals are valid."""
+ # create a new stage
+ omni.usd.get_context().new_stage()
+ # reset the rtx sensors carb setting to False
+ carb.settings.get_settings().set_bool("/isaaclab/render/rtx_sensors", False)
+ try:
+ # parse configuration
+ env_cfg: ManagerBasedRLEnvCfg = parse_env_cfg(task_name, device=device, num_envs=num_envs)
+
+ # skip test if the environment is a multi-agent task
+ if hasattr(env_cfg, "possible_agents"):
+ print(f"[INFO]: Skipping {task_name} as it is a multi-agent task")
+ return
+
+ # create environment
+ env = gym.make(task_name, cfg=env_cfg)
+ except Exception as e:
+ if "env" in locals() and hasattr(env, "_is_closed"):
+ env.close()
+ else:
+ if hasattr(e, "obj") and hasattr(e.obj, "_is_closed"):
+ e.obj.close()
+ self.fail(f"Failed to set-up the environment for task {task_name}. Error: {e}")
+
+ # disable control on stop
+ env.unwrapped.sim._app_control_on_stop_handle = None # type: ignore
+
+ # override action space if set to inf for `Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0`
+ if task_name == "Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0":
+ for i in range(env.unwrapped.single_action_space.shape[0]):
+ if env.unwrapped.single_action_space.low[i] == float("-inf"):
+ env.unwrapped.single_action_space.low[i] = -1.0
+ if env.unwrapped.single_action_space.high[i] == float("inf"):
+ env.unwrapped.single_action_space.low[i] = 1.0
+
+ # reset environment
+ obs, _ = env.reset()
+ # check signal
+ self.assertTrue(self._check_valid_tensor(obs))
+ # simulate environment for num_steps steps
+ with torch.inference_mode():
+ for _ in range(num_steps):
+ # sample actions according to the defined space
+ actions = sample_space(
+ env.unwrapped.single_action_space, device=env.unwrapped.device, batch_size=num_envs
+ )
+ # apply actions
+ transition = env.step(actions)
+ # check signals
+ for data in transition[:-1]: # exclude info
+ self.assertTrue(self._check_valid_tensor(data), msg=f"Invalid data: {data}")
+
+ # close the environment
+ env.close()
+
+ @staticmethod
+ def _check_valid_tensor(data: torch.Tensor | dict) -> bool:
+ """Checks if given data does not have corrupted values.
+
+ Args:
+ data: Data buffer.
+
+ Returns:
+ True if the data is valid.
+ """
+ if isinstance(data, torch.Tensor):
+ return not torch.any(torch.isnan(data))
+ elif isinstance(data, (tuple, list)):
+ return all(TestEnvironments._check_valid_tensor(value) for value in data)
+ elif isinstance(data, dict):
+ return all(TestEnvironments._check_valid_tensor(value) for value in data.values())
+ else:
+ raise ValueError(f"Input data of invalid type: {type(data)}.")
+
+
+if __name__ == "__main__":
+ run_tests()
diff --git a/source/isaaclab_eval_tasks/pyproject.toml b/source/isaaclab_eval_tasks/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..d90ac3536f168228bdb8bb40c178ffa22f08bed2
--- /dev/null
+++ b/source/isaaclab_eval_tasks/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["setuptools", "wheel", "toml"]
+build-backend = "setuptools.build_meta"
diff --git a/source/isaaclab_eval_tasks/setup.py b/source/isaaclab_eval_tasks/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3f5c10b34d858592b8ef792249fdbfd03dc0fae
--- /dev/null
+++ b/source/isaaclab_eval_tasks/setup.py
@@ -0,0 +1,55 @@
+# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Installation script for the 'isaaclab_eval_tasks' python package."""
+
+import os
+import toml
+
+from setuptools import setup
+
+# Obtain the extension data from the extension.toml file
+EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__))
+# Read the extension.toml file
+EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml"))
+
+# Minimum dependencies required prior to installation
+INSTALL_REQUIRES = [
+ # NOTE: Add dependencies
+ "psutil",
+]
+
+# Installation operation
+setup(
+ name="isaaclab_eval_tasks",
+ packages=["isaaclab_eval_tasks"],
+ author=EXTENSION_TOML_DATA["package"]["author"],
+ maintainer=EXTENSION_TOML_DATA["package"]["maintainer"],
+ url=EXTENSION_TOML_DATA["package"]["repository"],
+ version=EXTENSION_TOML_DATA["package"]["version"],
+ description=EXTENSION_TOML_DATA["package"]["description"],
+ keywords=EXTENSION_TOML_DATA["package"]["keywords"],
+ install_requires=INSTALL_REQUIRES,
+ license="Apache-2.0",
+ include_package_data=True,
+ python_requires=">=3.11",
+ classifiers=[
+ "Natural Language :: English",
+ "Programming Language :: Python :: 3.11",
+ "Isaac Sim :: 2023.1.1",
+ "Isaac Sim :: 5.0.0",
+ ],
+ zip_safe=False,
+)