Add files using upload-large-folder tool
Browse files- project/ManiSkill3/src/maniskill3_environment/.github/workflows/publish-to-pypi.yml +35 -0
- project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/getting_started/quickstart.md +193 -0
- project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/learning_from_demos/baselines.md +28 -0
- project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/learning_from_demos/index.md +10 -0
- project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/learning_from_demos/setup.md +108 -0
- project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/reference/mani_skill.envs.sapien_env.md +10 -0
- project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/reference/mani_skill.utils.common.rst +10 -0
- project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/reference/mani_skill.utils.structs.rst +20 -0
- project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/tutorials/custom_tasks/loading_objects.md +170 -0
- project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/vision_language_action_models/index.md +11 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/.dockerignore +160 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/.gitattributes +20 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/.gitignore +173 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/.pre-commit-config.yaml +74 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/CODE_OF_CONDUCT.md +133 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/CONTRIBUTING.md +308 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/LICENSE +507 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/Makefile +142 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/README.md +393 -0
- project/ManiSkill3/src/maniskill3_environment/lerobot/pyproject.toml +137 -0
project/ManiSkill3/src/maniskill3_environment/.github/workflows/publish-to-pypi.yml
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
release:
|
| 5 |
+
types: [published]
|
| 6 |
+
|
| 7 |
+
jobs:
|
| 8 |
+
build-n-publish:
|
| 9 |
+
name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI
|
| 10 |
+
runs-on: ubuntu-latest
|
| 11 |
+
steps:
|
| 12 |
+
- uses: actions/checkout@v3
|
| 13 |
+
- name: Set up Python
|
| 14 |
+
uses: actions/setup-python@v4
|
| 15 |
+
with:
|
| 16 |
+
python-version: "3.9"
|
| 17 |
+
- name: Install pypa/build
|
| 18 |
+
run: >-
|
| 19 |
+
python3 -m
|
| 20 |
+
pip install
|
| 21 |
+
build
|
| 22 |
+
--user
|
| 23 |
+
- name: Build a binary wheel and a source tarball
|
| 24 |
+
run: >-
|
| 25 |
+
python3 -m
|
| 26 |
+
build
|
| 27 |
+
--sdist
|
| 28 |
+
--wheel
|
| 29 |
+
--outdir dist/
|
| 30 |
+
.
|
| 31 |
+
- name: Publish distribution 📦 to PyPI
|
| 32 |
+
if: startsWith(github.ref, 'refs/tags/v')
|
| 33 |
+
uses: pypa/gh-action-pypi-publish@release/v1
|
| 34 |
+
with:
|
| 35 |
+
password: ${{ secrets.PYPI_API_TOKEN }}
|
project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/getting_started/quickstart.md
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# {octicon}`rocket` Quickstart
|
| 2 |
+
|
| 3 |
+
<!-- TODO: add link to new sapien website eventually -->
|
| 4 |
+
ManiSkill is a robotics simulator built on top of SAPIEN. It provides a standard Gym/Gymnasium interface for easy use with existing learning workflows like RL and imitation learning. Moreover, ManiSkill supports simulation on both the GPU and CPU, as well as fast parallelized rendering.
|
| 5 |
+
|
| 6 |
+
## Interface
|
| 7 |
+
|
| 8 |
+
Here is a basic example of how to run a ManiSkill task following the interface of [Gymnasium](https://gymnasium.farama.org/) and executing a random policy with a few basic options
|
| 9 |
+
|
| 10 |
+
```python
|
| 11 |
+
import gymnasium as gym
|
| 12 |
+
import mani_skill.envs
|
| 13 |
+
|
| 14 |
+
env = gym.make(
|
| 15 |
+
"PickCube-v1", # there are more tasks e.g. "PushCube-v1", "PegInsertionSide-v1", ...
|
| 16 |
+
num_envs=1,
|
| 17 |
+
obs_mode="state", # there is also "state_dict", "rgbd", ...
|
| 18 |
+
control_mode="pd_ee_delta_pose", # there is also "pd_joint_delta_pos", ...
|
| 19 |
+
render_mode="human"
|
| 20 |
+
)
|
| 21 |
+
print("Observation space", env.observation_space)
|
| 22 |
+
print("Action space", env.action_space)
|
| 23 |
+
|
| 24 |
+
obs, _ = env.reset(seed=0) # reset with a seed for determinism
|
| 25 |
+
done = False
|
| 26 |
+
while not done:
|
| 27 |
+
action = env.action_space.sample()
|
| 28 |
+
obs, reward, terminated, truncated, info = env.step(action)
|
| 29 |
+
done = terminated or truncated
|
| 30 |
+
env.render() # a display is required to render
|
| 31 |
+
env.close()
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
Changing `num_envs` to a value greater than 1 will automatically turn on the GPU simulation mode. More quick details [covered below](#gpu-parallelizedvectorized-tasks).
|
| 35 |
+
|
| 36 |
+
You can also run the same code from the command line to demo random actions and play with rendering options
|
| 37 |
+
|
| 38 |
+
```bash
|
| 39 |
+
# run headless / without a display
|
| 40 |
+
python -m mani_skill.examples.demo_random_action -e PickCube-v1
|
| 41 |
+
# run with A GUI and ray tracing
|
| 42 |
+
python -m mani_skill.examples.demo_random_action -e PickCube-v1 --render-mode="human" --shader="rt-fast"
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
Running with `render_mode="human"` will open up a GUI, shown below, that you can use to interactively explore the scene, pause/play the script, teleport objects around, and more.
|
| 46 |
+
|
| 47 |
+
```{figure} images/demo_random_action_gui.png
|
| 48 |
+
---
|
| 49 |
+
alt: SAPIEN GUI showing the PickCube task
|
| 50 |
+
---
|
| 51 |
+
```
|
| 52 |
+
<!--
|
| 53 |
+
We also have demos for simulations of more interesting scenes like ReplicaCAD, which can be run by doing
|
| 54 |
+
|
| 55 |
+
```bash
|
| 56 |
+
python -m mani_skill.utils.download_asset "ReplicaCAD"
|
| 57 |
+
python -m mani_skill.examples.demo_random_action -e "ReplicaCAD_SceneManipulation-v1" --render-mode="rgb_array" --record-dir="videos" # run headless and save video
|
| 58 |
+
python -m mani_skill.examples.demo_random_action -e "ReplicaCAD_SceneManipulation-v1" --render-mode="human" # run with GUI (recommended!)
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
To try out ray-tracing for more photorealistic rendering (which can also be turned on in the render tab of the GUI) as shown below you can do
|
| 62 |
+
|
| 63 |
+
```bash
|
| 64 |
+
python -m mani_skill.utils.download_asset "ReplicaCAD"
|
| 65 |
+
python -m mani_skill.examples.demo_random_action -e "ReplicaCAD_SceneManipulation-v1" --render-mode="human" --shader="rt-fast"
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
<video preload="auto" controls="True" width="100%">
|
| 69 |
+
<source src="https://github.com/haosulab/ManiSkill/raw/main/docs/source/_static/videos/fetch_random_action_replica_cad_rt.mp4" type="video/mp4">
|
| 70 |
+
</video> -->
|
| 71 |
+
|
| 72 |
+
You will also notice that all data returned is a batched torch tensor. To reduce extra code handling numpy vs torch, cpu vs gpu sim, everything in ManiSkill defaults to serving/using batched torch tensors of all data. To change the environment to serve numpy, unbatched data simply do the following
|
| 73 |
+
|
| 74 |
+
```python
|
| 75 |
+
from mani_skill.utils.wrappers.gymnasium import CPUGymWrapper
|
| 76 |
+
env = gym.make(env_id, num_envs=1)
|
| 77 |
+
env = CPUGymWrapper(env)
|
| 78 |
+
obs, _ = env.reset() # obs is numpy and unbatched
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
To have the exact same API defined by [gym/gymnasium](https://gymnasium.farama.org/) for single/vectorized environments, see the section on [reinforcement learning setups](../reinforcement_learning/setup.md).
|
| 82 |
+
|
| 83 |
+
For a compilation of demos you can run without having to write any extra code check out the [demos page](../demos/index)
|
| 84 |
+
|
| 85 |
+
See {py:class}`mani_skill.envs.sapien_env` for the full list of environment instantiation options.
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
## GPU Parallelized/Vectorized Tasks
|
| 90 |
+
|
| 91 |
+
ManiSkill is powered by SAPIEN which supports GPU parallelized physics simulation and GPU parallelized rendering. This enables achieving 200,000+ state-based simulation FPS and 30,000+ FPS with rendering on a single 4090 GPU on a e.g. manipulation tasks. The FPS can be higher or lower depending on what is simulated. For full benchmarking results see [this page](../additional_resources/performance_benchmarking)
|
| 92 |
+
|
| 93 |
+
In order to run massively parallelized tasks on a GPU, it is as simple as adding the `num_envs` argument to `gym.make` as follows:
|
| 94 |
+
|
| 95 |
+
```python
|
| 96 |
+
import gymnasium as gym
|
| 97 |
+
import mani_skill.envs
|
| 98 |
+
|
| 99 |
+
env = gym.make(
|
| 100 |
+
"PickCube-v1",
|
| 101 |
+
obs_mode="state",
|
| 102 |
+
control_mode="pd_joint_delta_pos",
|
| 103 |
+
num_envs=16,
|
| 104 |
+
)
|
| 105 |
+
print(env.observation_space) # will now have shape (16, ...)
|
| 106 |
+
print(env.action_space) # will now have shape (16, ...)
|
| 107 |
+
# env.single_observation_space and env.single_action_space provide non batched spaces
|
| 108 |
+
|
| 109 |
+
obs, _ = env.reset(seed=0) # reset with a seed for determinism
|
| 110 |
+
for i in range(200):
|
| 111 |
+
action = env.action_space.sample() # this is batched now
|
| 112 |
+
obs, reward, terminated, truncated, info = env.step(action)
|
| 113 |
+
done = terminated | truncated
|
| 114 |
+
print(f"Obs shape: {obs.shape}, Reward shape {reward.shape}, Done shape {done.shape}")
|
| 115 |
+
env.close()
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
Note that all values returned by `env.step` and `env.reset` are batched and are torch tensors. Whether GPU or CPU simulation is used then determines what device the tensor is on (CUDA or CPU).
|
| 119 |
+
|
| 120 |
+
To benchmark the parallelized simulation, you can run
|
| 121 |
+
|
| 122 |
+
```bash
|
| 123 |
+
python -m mani_skill.examples.benchmarking.gpu_sim --num-envs=1024
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
To try out the parallelized rendering, you can run
|
| 127 |
+
|
| 128 |
+
```bash
|
| 129 |
+
# rendering RGB + Depth data from all cameras
|
| 130 |
+
python -m mani_skill.examples.benchmarking.gpu_sim --num-envs=64 --obs-mode="rgbd"
|
| 131 |
+
# directly save 64 videos of the visual observations put into one video
|
| 132 |
+
python -m mani_skill.examples.benchmarking.gpu_sim --num-envs=64 --save-video
|
| 133 |
+
```
|
| 134 |
+
which will look something like this
|
| 135 |
+
|
| 136 |
+
<video preload="auto" controls="True" width="100%">
|
| 137 |
+
<source src="https://github.com/haosulab/ManiSkill/raw/main/docs/source/_static/videos/mani_skill_gpu_sim-PickCube-v1-num_envs=16-obs_mode=state-render_mode=sensors.mp4" type="video/mp4">
|
| 138 |
+
</video>
|
| 139 |
+
|
| 140 |
+
### Parallel Rendering in one Scene
|
| 141 |
+
|
| 142 |
+
We further support via recording or GUI to view all parallel environments at once, and you can also turn on ray-tracing for more photo-realism. Note that this feature is not useful for any practical purposes (for e.g. machine learning) apart from generating cool demonstration videos.
|
| 143 |
+
|
| 144 |
+
Turning the parallel GUI render on simply requires adding the argument `parallel_in_single_scene` to `gym.make` as so
|
| 145 |
+
|
| 146 |
+
```python
|
| 147 |
+
import gymnasium as gym
|
| 148 |
+
import mani_skill.envs
|
| 149 |
+
|
| 150 |
+
env = gym.make(
|
| 151 |
+
"PickCube-v1",
|
| 152 |
+
obs_mode="state",
|
| 153 |
+
control_mode="pd_joint_delta_pos",
|
| 154 |
+
num_envs=16,
|
| 155 |
+
parallel_in_single_scene=True,
|
| 156 |
+
viewer_camera_configs=dict(shader_pack="rt-fast"),
|
| 157 |
+
)
|
| 158 |
+
env.reset()
|
| 159 |
+
while True:
|
| 160 |
+
env.step(env.action_space.sample())
|
| 161 |
+
env.render_human()
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
This will then open up a GUI that looks like so:
|
| 165 |
+
```{figure} images/parallel_gui_render.png
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
### Additional GPU simulation/rendering customization
|
| 169 |
+
|
| 170 |
+
Finally on servers with multiple GPUs you can directly pick which devices/backends to use for simulation and rendering by setting the `CUDA_VISIBLE_DEVICES` environment variable. You can do this by e.g. running `export CUDA_VISIBLE_DEVICES=1` and then run the same code. While everything is labeled as device "cuda:0" it is actually using GPU device 1 now, which you can verify by running `nvidia-smi`.
|
| 171 |
+
|
| 172 |
+
We currently do not properly support exposing multiple visible CUDA devices to a single process as it has some rendering bugs at the moment.
|
| 173 |
+
|
| 174 |
+
## Task Instantiation Options
|
| 175 |
+
|
| 176 |
+
For the full list of environment instantiation options see {py:class}`mani_skill.envs.sapien_env`. Here we list some common options:
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
Each ManiSkill task supports different **observation modes** and **control modes**, which determine its **observation space** and **action space**. They can be specified by `gym.make(env_id, obs_mode=..., control_mode=...)`.
|
| 180 |
+
|
| 181 |
+
The common observation modes are `state`, `rgbd`, `pointcloud`. We also support `state_dict` (states organized as a hierarchical dictionary) and `sensor_data` (raw visual observations without postprocessing). Please refer to [Observation](../concepts/observation.md) for more details. Furthermore, visual data generated by the simulator can be modified in many ways via shaders. Please refer to [the sensors/cameras tutorial](../tutorials/sensors/index.md) for more details.
|
| 182 |
+
|
| 183 |
+
We support a wide range of controllers. Different controllers can have different effects on your algorithms. Thus, it is recommended to understand the action space you are going to use. Please refer to [Controllers](../concepts/controllers.md) for more details.
|
| 184 |
+
|
| 185 |
+
Some tasks require **downloading assets** that are not stored in the python package itself. You can download task-specific assets by `python -m mani_skill.utils.download_asset ${ENV_ID}`. The assets will be downloaded to `~/maniskill/data` by default, but you can also use the environment variable `MS_ASSET_DIR` to change this destination. If you don't download assets ahead of the time you will be prompted to do so if they are missing when running an environment.
|
| 186 |
+
|
| 187 |
+
Some ManiSkill tasks also support swapping robot embodiments such as the `PickCube-v1` task. You can try using the fetch robot instead by running
|
| 188 |
+
|
| 189 |
+
```
|
| 190 |
+
gym.make("PickCube-v1", robot_uids="fetch")
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
You may also notice the argument is `robot_uids` plural, this is because we also support tasks with multiple robots which can be done by passing in tuple like `robot_uids=("fetch", "fetch", "panda")`. Note that not all tasks support loading any robot or multiple robots as they were designed to evaluate those settings.
|
project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/learning_from_demos/baselines.md
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Baselines
|
| 2 |
+
|
| 3 |
+
We provide a number of different baselines spanning different categories of learning from demonstrations research: Behavior Cloning / Supervised Learning, Offline Reinforcement Learning, and Online Learning from Demonstrations. This page is still a WIP as we finish running experiments and establish clear baselines and benchmarking setups.
|
| 4 |
+
|
| 5 |
+
<!-- As part of these baselines we establish a few standard learning from demonstration benchmarks that cover a wide range of difficulty (easy to solve for verification but not saturated) and diversity in types of demonstrations (human collected, motion planning collected, neural net policy generated) -->
|
| 6 |
+
|
| 7 |
+
**Behavior Cloning (BC) Baselines**
|
| 8 |
+
|
| 9 |
+
BC Baselines are characterized by supervised learning focused algorithms for learning from demonstrations, without any online interaction with the environment.
|
| 10 |
+
|
| 11 |
+
| Baseline | Code | Results | Paper |
|
| 12 |
+
| ------------------------------ | ------------------------------------------------------------------------------------------- | ------- | ------------------------------------------ |
|
| 13 |
+
| Standard Behavior Cloning (BC) | WIP | WIP | N/A |
|
| 14 |
+
| Diffusion Policy (DP) | [Link](https://github.com/haosulab/ManiSkill/blob/main/examples/baselines/diffusion_policy) | WIP | [Link](https://arxiv.org/abs/2303.04137v4) |
|
| 15 |
+
| Action Chunking Transformer (ACT) | [Link](https://github.com/haosulab/ManiSkill/blob/main/examples/baselines/act) | WIP | [Link](https://arxiv.org/abs/2304.13705) |
|
| 16 |
+
|
| 17 |
+
**Online Learning from Demonstrations Baselines**
|
| 18 |
+
|
| 19 |
+
Online learning from demonstrations baselines are characterized by learning from demonstrations while also leveraging online environment interactions.
|
| 20 |
+
|
| 21 |
+
| Baseline | Code | Results | Paper |
|
| 22 |
+
| --------------------------------------------- | ------------------------------------------------------------------------------- | ------- | ---------------------------------------- |
|
| 23 |
+
| Reverse Forward Curriculum Learning (RFCL)* | [Link](https://github.com/haosulab/ManiSkill/blob/main/examples/baselines/rfcl) | WIP | [Link](https://arxiv.org/abs/2405.03379) |
|
| 24 |
+
| Reinforcement Learning from Prior Data (RLPD) | [Link](https://github.com/haosulab/ManiSkill/blob/main/examples/baselines/rlpd) | WIP | [Link](https://arxiv.org/abs/2302.02948) |
|
| 25 |
+
| SAC + Demos (SAC+Demos) | WIP | N/A | |
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
\* - This indicates the baseline uses environment state reset which is typically a simulation only feature
|
project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/learning_from_demos/index.md
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Learning from Demonstrations
|
| 2 |
+
|
| 3 |
+
ManiSkill supports all kinds of learning from demonstration / imitation learning methods via a unified API and provides multiple ready, already tested, baselines for use/comparison. The pages below show how to [setup environments for learning from demonstrations](./setup.md) and how to use the [baselines](./baselines.md). All baseline results are published to our [public wandb page](https://wandb.ai/stonet2000/ManiSkill). On that page you can filter by algorithm used, environment type, etc. We are still in the progress of running all experiments so not all results are uploaded yet.
|
| 4 |
+
|
| 5 |
+
```{toctree}
|
| 6 |
+
:titlesonly:
|
| 7 |
+
|
| 8 |
+
setup
|
| 9 |
+
baselines
|
| 10 |
+
```
|
project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/learning_from_demos/setup.md
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Setup
|
| 2 |
+
|
| 3 |
+
This page documents key things to know when setting up ManiSkill environments for learning from demonstrations, including:
|
| 4 |
+
|
| 5 |
+
- How to [download and replay trajectories to standard datasets](#downloading-and-replaying-trajectories--standard-datasets) used for benchmarking state-based and vision-based imitation learning
|
| 6 |
+
- How to [evaluate trained models fairly and correctly](#evaluation)
|
| 7 |
+
- Some common [pitfalls to avoid](#common-pitfalls-to-avoid)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
## Downloading and Replaying Trajectories / Standard Datasets
|
| 11 |
+
|
| 12 |
+
By default for fast downloads and smaller file sizes, ManiSkill demonstrations are stored in a highly reduced/compressed format which includes not keeping any observation data. Run the command to download the raw minimized demonstration data.
|
| 13 |
+
|
| 14 |
+
```bash
|
| 15 |
+
python -m mani_skill.utils.download_demo "PickCube-v1"
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
To ensure everyone has the same preprocessed/replayed dataset, make sure to run this script: [https://github.com/haosulab/ManiSkill/blob/main/scripts/data_generation/replay_for_il_baselines.sh](https://github.com/haosulab/ManiSkill/blob/main/scripts/data_generation/replay_for_il_baselines.sh). Note that some scripts use the GPU simulation (marked by `-b physx_cuda`) for replay, which may potentially require more GPU memory than you have available. You can always lower the number of parallel environments used to be replayed by setting `--num-procs` to a lower number.
|
| 19 |
+
|
| 20 |
+
It has fixed settings for the trajectory replay to generate observation data and set the desired action space/controller for all benchmarked tasks. All benchmarked results in the [Wandb project detailing all benchmarked training runs](https://wandb.ai/stonet2000/ManiSkill) use the data replayed by the script above
|
| 21 |
+
|
| 22 |
+
If you need more advanced use-cases for trajectory replay (e.g. generating pointclouds, changing controller modes), see the [trajectory replay documentation](../datasets/replay.md).
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
## Evaluation
|
| 26 |
+
|
| 27 |
+
With the number of different types of environments, algorithms, and approaches to evaluation, we describe below a consistent and standardized way to evaluate all kinds of learning from demonstrations policies in ManiSkill fairly. In summary, the following setup is necessary to ensure fair evaluation:
|
| 28 |
+
|
| 29 |
+
- Partial resets are turned off and environments do not reset upon success/fail/termination (`ignore_terminations=True`). Instead we record multiple types of success/fail metrics.
|
| 30 |
+
- All parallel environments reconfigure on reset (`reconfiguration_freq=1`), which randomizes object geometries if the task has object randomization.
|
| 31 |
+
|
| 32 |
+
The code to fairly evaluate policies and record standard metrics in ManiSkill are shown below. We provide CPU and GPU vectorized options particularly because depending on what simulation backend your demonstration data is collected on you will want to evaluate your policy on the same backend.
|
| 33 |
+
|
| 34 |
+
For GPU vectorized environments the code to evaluate policies by environment ID the following is recommended:
|
| 35 |
+
|
| 36 |
+
```python
|
| 37 |
+
import gymnasium as gym
|
| 38 |
+
from mani_skill.vector.wrappers.gymnasium import ManiSkillVectorEnv
|
| 39 |
+
env_id = "PushCube-v1"
|
| 40 |
+
num_eval_envs = 64
|
| 41 |
+
env_kwargs = dict(obs_mode="state") # modify your env_kwargs here
|
| 42 |
+
eval_envs = gym.make(env_id, num_envs=num_eval_envs, reconfiguration_freq=1, **env_kwargs)
|
| 43 |
+
# add any other wrappers here
|
| 44 |
+
eval_envs = ManiSkillVectorEnv(eval_envs, ignore_terminations=True, record_metrics=True)
|
| 45 |
+
|
| 46 |
+
# evaluation loop, which will record metrics for complete episodes only
|
| 47 |
+
obs, _ = eval_envs.reset(seed=0)
|
| 48 |
+
eval_metrics = defaultdict(list)
|
| 49 |
+
for _ in range(400):
|
| 50 |
+
action = eval_envs.action_space.sample() # replace with your policy action
|
| 51 |
+
obs, rew, terminated, truncated, info = eval_envs.step(action)
|
| 52 |
+
# note as there are no partial resets, truncated is True for all environments at the same time
|
| 53 |
+
if truncated.any():
|
| 54 |
+
for k, v in info["final_info"]["episode"].items():
|
| 55 |
+
eval_metrics[k].append(v.float())
|
| 56 |
+
for k in eval_metrics.keys():
|
| 57 |
+
print(f"{k}_mean: {torch.mean(torch.stack(eval_metrics[k])).item()}")
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
And for CPU vectorized environments the following is recommended for evaluation:
|
| 61 |
+
|
| 62 |
+
```python
|
| 63 |
+
import gymnasium as gym
|
| 64 |
+
from mani_skill.utils.wrappers import CPUGymWrapper
|
| 65 |
+
env_id = "PickCube-v1"
|
| 66 |
+
num_eval_envs = 8
|
| 67 |
+
env_kwargs = dict(obs_mode="state") # modify your env_kwargs here
|
| 68 |
+
def cpu_make_env(env_id, env_kwargs = dict()):
|
| 69 |
+
def thunk():
|
| 70 |
+
env = gym.make(env_id, reconfiguration_freq=1, **env_kwargs)
|
| 71 |
+
env = CPUGymWrapper(env, ignore_terminations=True, record_metrics=True)
|
| 72 |
+
# add any other wrappers here
|
| 73 |
+
return env
|
| 74 |
+
return thunk
|
| 75 |
+
vector_cls = gym.vector.SyncVectorEnv if num_eval_envs == 1 else lambda x : gym.vector.AsyncVectorEnv(x, context="forkserver")
|
| 76 |
+
eval_envs = vector_cls([cpu_make_env(env_id, env_kwargs) for _ in range(num_eval_envs)])
|
| 77 |
+
|
| 78 |
+
# evaluation loop, which will record metrics for complete episodes only
|
| 79 |
+
obs, _ = eval_envs.reset(seed=0)
|
| 80 |
+
eval_metrics = defaultdict(list)
|
| 81 |
+
for _ in range(400):
|
| 82 |
+
action = eval_envs.action_space.sample() # replace with your policy action
|
| 83 |
+
obs, rew, terminated, truncated, info = eval_envs.step(action)
|
| 84 |
+
# note as there are no partial resets, truncated is True for all environments at the same time
|
| 85 |
+
if truncated.any():
|
| 86 |
+
for final_info in info["final_info"]:
|
| 87 |
+
for k, v in final_info["episode"].items():
|
| 88 |
+
eval_metrics[k].append(v)
|
| 89 |
+
for k in eval_metrics.keys():
|
| 90 |
+
print(f"{k}_mean: {np.mean(eval_metrics[k])}")
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
The following metrics are recorded and explained below:
|
| 94 |
+
- `success_once`: Whether the task was successful at any point in the episode.
|
| 95 |
+
- `success_at_end`: Whether the task was successful at the final step of the episode.
|
| 96 |
+
- `fail_once/fail_at_end`: Same as the above two but for failures. Note not all tasks have success/fail criteria.
|
| 97 |
+
- `return`: The total reward accumulated over the course of the episode.
|
| 98 |
+
|
| 99 |
+
<!-- NOTE (stao): the content for evaluation is the same as in the RL setup.md document, however I don't really want users have to click to a separate page to learn about evaluation... -->
|
| 100 |
+
|
| 101 |
+
Generally for learning from demonstrations the only metric that matters is "success_once" and is what is typically reported in research/work using ManiSkill.
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
## Common Pitfalls to Avoid
|
| 105 |
+
|
| 106 |
+
In general if demonstrations are collected in e.g. the PhysX CPU simulation, you want to ensure you evaluate any policy trained on that data in the same simulation backend. For highly precise tasks (e.g. PushT) where even a 1e-3 error can lead to different results, this is especially important. This is why all demonstrations replayed by our trajectory replay tool will annotate the simulation backend used on the trajectory file name
|
| 107 |
+
|
| 108 |
+
Your source of demonstration data can largely affect the training performance. Classic behavior cloning can do decently well to imitate demonstrations generated by a neural network / RL trained policy, but will struggle to imitate more multi-modal demonstrations (e.g. human teleoperated or motion planning generated). Methods like Diffusion Policy (DP) are designed to address this problem. If you are unsure, all official datasets from ManiSkill will detail clearly in the trajectory metadata JSON file how the data was collected and type of data it is.
|
project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/reference/mani_skill.envs.sapien_env.md
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# `mani_skill.envs.sapien_env`
|
| 2 |
+
|
| 3 |
+
The BaseEnv class is the class you should inherit from if you want to create a new environment/task. The arguments here also describe all the possible run-time arguments you can pass when creating environments via `gym.make`.
|
| 4 |
+
|
| 5 |
+
```{eval-rst}
|
| 6 |
+
.. automodule:: mani_skill.envs.sapien_env
|
| 7 |
+
:members:
|
| 8 |
+
:show-inheritance:
|
| 9 |
+
:undoc-members:
|
| 10 |
+
```
|
project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/reference/mani_skill.utils.common.rst
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
``mani_skill.utils.common`` module
|
| 2 |
+
===================================
|
| 3 |
+
|
| 4 |
+
.. currentmodule:: mani_skill.utils.common
|
| 5 |
+
|
| 6 |
+
.. automodule:: mani_skill.utils.common
|
| 7 |
+
:members:
|
| 8 |
+
:undoc-members:
|
| 9 |
+
|
| 10 |
+
.. rubric:: Functions
|
project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/reference/mani_skill.utils.structs.rst
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
``mani_skill.utils.structs`` module
|
| 2 |
+
===================================
|
| 3 |
+
|
| 4 |
+
Structs in ManiSkill are objects that efficiently wrap around raw SAPIEN objects and manage their data on the GPU in a user-friendly way so you don't have to deal with complex memory management code. The APIs exposed here are the most commonly used structs that you will encounter when using ManiSkill.
|
| 5 |
+
|
| 6 |
+
.. currentmodule:: mani_skill.utils.structs
|
| 7 |
+
|
| 8 |
+
.. automodule:: mani_skill.utils.structs
|
| 9 |
+
|
| 10 |
+
.. autosummary::
|
| 11 |
+
|
| 12 |
+
:template: class_no_parameters.rst
|
| 13 |
+
|
| 14 |
+
:toctree: _autosummary
|
| 15 |
+
Pose
|
| 16 |
+
Actor
|
| 17 |
+
Articulation
|
| 18 |
+
Link
|
| 19 |
+
ArticulationJoint
|
| 20 |
+
SimConfig
|
project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/tutorials/custom_tasks/loading_objects.md
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Loading Actors and Articulations
|
| 2 |
+
|
| 3 |
+
The [introductory tutorial](./intro.md) covered the overall process of building a custom task. This tutorial covers how to load a wider variety of objects, whether they are objects from asset datasets like [YCB](https://www.ycbbenchmarks.com/), or articulated datasets like [Partnet Mobility](https://sapien.ucsd.edu/browse). Loading these more complicated geometries enables you to build more complex and interesting robotics tasks.
|
| 4 |
+
|
| 5 |
+
## Loading Actors
|
| 6 |
+
|
| 7 |
+
ManiSkill provides two ways to load actors, loading directly from existing simulation-ready asset datasets, or via the lower-level ActorBuilder API.
|
| 8 |
+
|
| 9 |
+
### Loading from Existing Datasets
|
| 10 |
+
|
| 11 |
+
ManiSkill supports easily loading assets from existing datasets such as the YCB dataset. In the beta release this is the only asset database available, more will be provided once we finish integrating a 3D asset database system.
|
| 12 |
+
|
| 13 |
+
```python
|
| 14 |
+
import sapien
|
| 15 |
+
from mani_skill.utils.building import actors
|
| 16 |
+
def _load_scene(self, options):
|
| 17 |
+
builder = actors.get_actor_builder(
|
| 18 |
+
self.scene,
|
| 19 |
+
id=f"ycb:{model_id}",
|
| 20 |
+
)
|
| 21 |
+
# choose a reasonable initial pose that doesn't intersect other objects
|
| 22 |
+
builder.initial_pose = sapien.Pose(p=[0, 0, 0.5])
|
| 23 |
+
builder.build(name="object")
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
### Using the ActorBuilder API
|
| 27 |
+
|
| 28 |
+
To build custom actors in python code, you first create the ActorBuilder as so in your task:
|
| 29 |
+
|
| 30 |
+
```python
|
| 31 |
+
def _load_scene(self, options):
|
| 32 |
+
builder = self.scene.create_actor_builder()
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
Then you can use the standard SAPIEN API for creating actors, a tutorial of which can be found on the [SAPIEN actors tutorial documentation](https://sapien-sim.github.io/docs/user_guide/getting_started/create_actors.html#create-an-actor-with-actorbuilder)
|
| 36 |
+
|
| 37 |
+
## Loading Articulations
|
| 38 |
+
|
| 39 |
+
There are several ways to load articulations as detailed below as well as some limitations to be aware of
|
| 40 |
+
|
| 41 |
+
### Loading from Existing Datasets
|
| 42 |
+
|
| 43 |
+
Like actors, ManiSkill supports easily loading articulated assets from existing datasets such as the Partnet Mobility dataset. In the beta release this is the only asset database available, more will be provided once we finish integrating a 3D asset database system.
|
| 44 |
+
|
| 45 |
+
```python
|
| 46 |
+
from mani_skill.utils.building import articulations
|
| 47 |
+
def _load_scene(self, options):
|
| 48 |
+
builder = articulations.get_articulation_builder(
|
| 49 |
+
self.scene, f"partnet-mobility:{model_id}"
|
| 50 |
+
)
|
| 51 |
+
# choose a reasonable initial pose that doesn't intersect other objects
|
| 52 |
+
# this matters a lot for articulations in GPU sim or else simulation bugs can occur
|
| 53 |
+
builder.inital_pose = sapien.Pose(p=[0, 0, 0.5])
|
| 54 |
+
builder.build(name="object")
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
### Using the ArticulationBuilder API
|
| 59 |
+
|
| 60 |
+
To build custom articulations in python code, you first create the ArticulationBuilder as so in your task:
|
| 61 |
+
|
| 62 |
+
```python
|
| 63 |
+
def _load_scene(self, options):
|
| 64 |
+
builder = self.scene.create_articulation_builder()
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
Then you can use the standard SAPIEN API for creating articulations, a tutorial of which can be found on the [SAPIEN articulation tutorial documentation](https://sapien-sim.github.io/docs/user_guide/getting_started/create_articulations.html). You essentially just need to define what the links and joints are and how they connect. Links are created like Actors and can have visual and collision shapes added via the python API.
|
| 68 |
+
|
| 69 |
+
### Using the URDF Loader
|
| 70 |
+
|
| 71 |
+
If your articulation is defined with a URDF file, you can use a URDF loader to load that articulation and make modifications as needed.
|
| 72 |
+
|
| 73 |
+
```python
|
| 74 |
+
def _load_scene(self, options):
|
| 75 |
+
loader = scene.create_urdf_loader()
|
| 76 |
+
# the .parse function can also parse multiple articulations
|
| 77 |
+
# actors and cameras but we only use the articulations
|
| 78 |
+
articulation_builders = loader.parse(str(urdf_path))["articulation_builders"]
|
| 79 |
+
builder = articulation_builders[0]
|
| 80 |
+
# choose a reasonable initial pose that doesn't intersect other objects
|
| 81 |
+
# this matters a lot for articulations in GPU sim or else simulation bugs can occur
|
| 82 |
+
builder.initial_pose = sapien.Pose(p=[0, 0, 0.5])
|
| 83 |
+
builder.build(name="my_articulation")
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
You can also programmatically change various properties of articulations and their links prior to building it, see below for examples which range from fixing root links, collision mesh loading logic, and modifying physical properties. These can be useful for e.g. domain randomization
|
| 87 |
+
|
| 88 |
+
```python
|
| 89 |
+
def _load_scene(self, options):
|
| 90 |
+
loader = scene.create_urdf_loader()
|
| 91 |
+
|
| 92 |
+
# change friction values of all links
|
| 93 |
+
loader.set_material(static_friction, dynamic_friction, restitution)
|
| 94 |
+
# change friction values of specific links
|
| 95 |
+
loader.set_link_material(link_name, static_friction, dynamic_friction, restitution)
|
| 96 |
+
# change patch radius values of specific links
|
| 97 |
+
loader.set_link_min_patch_radius(link_name, min_patch_radius)
|
| 98 |
+
loader.set_link_patch_radius(link_name, patch_radius)
|
| 99 |
+
# set density of all links
|
| 100 |
+
loader.set_density(density)
|
| 101 |
+
# set density of specific links
|
| 102 |
+
loader.set_link_density(link_name, density)
|
| 103 |
+
# fix/unfix root link in place
|
| 104 |
+
loader.fix_root_link = True # or False
|
| 105 |
+
# change the scale of the loaded articulation geometries (visual+collision)
|
| 106 |
+
loader.scale = 1.0 # default is 1.0
|
| 107 |
+
# if collision meshes contain multiple convex meshes
|
| 108 |
+
# you can set this to True to try and load them
|
| 109 |
+
loader.load_multiple_collisions_from_file = True
|
| 110 |
+
|
| 111 |
+
articulation_builders = loader.parse(str(urdf_path))["articulation_builders"]
|
| 112 |
+
builder = articulation_builders[0]
|
| 113 |
+
builder.build(name="my_articulation")
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
### Articulation Limitations
|
| 117 |
+
|
| 118 |
+
For the physx simulation backend, any single articulation can have a maximum of 64 links. More complex articulated objects will either need to be simplified by merging links together. Most of the time this is readily possible by inspecting the URDF and fusing together links held together by fixed joints. The less fixed joints and links there are, the better the simulation will run in terms of accuracy and speed.
|
| 119 |
+
|
| 120 |
+
## Using the MJCF Loader
|
| 121 |
+
|
| 122 |
+
If your actor/articulation is defined with a MJCF file, you can use a MJCF loader to load that articulation and make modifications as needed. It works the exact same as the [URDF loader](./loading_objects.md#using-the-urdf-loader). Note that however not all properties in MJCF/Mujoco are supported in SAPIEN/ManiSkill at this moment, so you should always verify your articulation/actors are loaded correctly from the MJCF.
|
| 123 |
+
|
| 124 |
+
```python
|
| 125 |
+
def _load_scene(self, options):
|
| 126 |
+
loader = scene.create_mjcf_loader()
|
| 127 |
+
builders = loader.parse(str(mjcf_path))
|
| 128 |
+
articulation_builders = builders["articulation_builders"]
|
| 129 |
+
actor_builders = builders["actor_builders"]
|
| 130 |
+
```
|
| 131 |
+
|
| 132 |
+
## Reconfiguring and Optimization
|
| 133 |
+
|
| 134 |
+
In general loading is always quite slow, especially on the GPU so by default, ManiSkill reconfigures just once. Any call to `env.reset()` will not trigger a reconfiguration unless you call `env.reset(seed=seed, options=dict(reconfigure=True))` (seed is not needed but recommended if you are reconfiguring for reproducibility).
|
| 135 |
+
|
| 136 |
+
However, during CPU simulation with just a single environment (or GPU simulation with very few environments) the loaded object geometries never get to change as reconfiguration doesn't happen more than once. This behavior can be changed by setting the `reconfiguration_freq` value of your task.
|
| 137 |
+
|
| 138 |
+
The recommended way to do this is as follows (taken from the PickSingleYCB task):
|
| 139 |
+
|
| 140 |
+
```python
|
| 141 |
+
class PickSingleYCBEnv(BaseEnv):
|
| 142 |
+
|
| 143 |
+
SUPPORTED_ROBOTS = ["panda", "fetch"]
|
| 144 |
+
agent: Union[Panda, Fetch]
|
| 145 |
+
goal_thresh = 0.025
|
| 146 |
+
|
| 147 |
+
def __init__(
|
| 148 |
+
self, *args, robot_uids="panda", robot_init_qpos_noise=0.02,
|
| 149 |
+
num_envs=1,
|
| 150 |
+
reconfiguration_freq=None,
|
| 151 |
+
**kwargs,
|
| 152 |
+
):
|
| 153 |
+
# ...
|
| 154 |
+
if reconfiguration_freq is None:
|
| 155 |
+
if num_envs == 1:
|
| 156 |
+
reconfiguration_freq = 1
|
| 157 |
+
else:
|
| 158 |
+
reconfiguration_freq = 0
|
| 159 |
+
super().__init__(
|
| 160 |
+
*args,
|
| 161 |
+
robot_uids=robot_uids,
|
| 162 |
+
reconfiguration_freq=reconfiguration_freq,
|
| 163 |
+
num_envs=num_envs,
|
| 164 |
+
**kwargs,
|
| 165 |
+
)
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
A `reconfiguration_freq` value of 1 means during every reset we reconfigure. A `reconfiguration_freq` of `k` means every `k` resets we reconfigure. A `reconfiguration_freq` of 0 (the default) means we never reconfigure again.
|
| 169 |
+
|
| 170 |
+
In general one use case of setting a positive `reconfiguration_freq` value is for when you want to simulate a task in parallel where each parallel environment is working with a different object/articulation and there are way more object variants than number of parallel environments. For machine learning / RL workflows, setting `reconfiguration_freq` to e.g. 10 ensures every 10 resets the objects being simulated on are randomized which can diversify the data collected for online training while keeping simulation fast by reconfiguring infrequently.
|
project/ManiSkill3/src/maniskill3_environment/docs/source/user_guide/vision_language_action_models/index.md
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Vision Language Action Models
|
| 2 |
+
|
| 3 |
+
ManiSkill supports evaluating and pretraining vision language action models. Currently the following VLAs have been tested via the ManiSkill framework:
|
| 4 |
+
|
| 5 |
+
- [Octo](https://github.com/octo-models/octo)
|
| 6 |
+
- [RDT-1B](https://github.com/thu-ml/RoboticsDiffusionTransformer)
|
| 7 |
+
- [RT-x](https://robotics-transformer-x.github.io/)
|
| 8 |
+
|
| 9 |
+
RDT-1B uses some of the ManiSkill demonstrations for pretraining data and evaluates by fine-tuning on some demonstrations on various ManiSkill tasks, see their [README](#https://github.com/thu-ml/RoboticsDiffusionTransformer?tab=readme-ov-file#simulation-benchmark) for more details.
|
| 10 |
+
|
| 11 |
+
Octo and RT series of models are evaluated through various real2sim environments as part of the SIMPLER project, see their [README](https://github.com/simpler-env/SimplerEnv/tree/maniskill3) for details on how to run the evaluation setup.
|
project/ManiSkill3/src/maniskill3_environment/lerobot/.dockerignore
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# Misc
|
| 16 |
+
.git
|
| 17 |
+
tmp
|
| 18 |
+
wandb
|
| 19 |
+
data
|
| 20 |
+
outputs
|
| 21 |
+
.vscode
|
| 22 |
+
rl
|
| 23 |
+
media
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# Logging
|
| 27 |
+
logs
|
| 28 |
+
|
| 29 |
+
# HPC
|
| 30 |
+
nautilus/*.yaml
|
| 31 |
+
*.key
|
| 32 |
+
|
| 33 |
+
# Slurm
|
| 34 |
+
sbatch*.sh
|
| 35 |
+
|
| 36 |
+
# Byte-compiled / optimized / DLL files
|
| 37 |
+
__pycache__/
|
| 38 |
+
*.py[cod]
|
| 39 |
+
*$py.class
|
| 40 |
+
|
| 41 |
+
# C extensions
|
| 42 |
+
*.so
|
| 43 |
+
|
| 44 |
+
# Distribution / packaging
|
| 45 |
+
.Python
|
| 46 |
+
build/
|
| 47 |
+
develop-eggs/
|
| 48 |
+
dist/
|
| 49 |
+
downloads/
|
| 50 |
+
eggs/
|
| 51 |
+
.eggs/
|
| 52 |
+
lib/
|
| 53 |
+
lib64/
|
| 54 |
+
parts/
|
| 55 |
+
sdist/
|
| 56 |
+
var/
|
| 57 |
+
wheels/
|
| 58 |
+
pip-wheel-metadata/
|
| 59 |
+
share/python-wheels/
|
| 60 |
+
*.egg-info/
|
| 61 |
+
.installed.cfg
|
| 62 |
+
*.egg
|
| 63 |
+
MANIFEST
|
| 64 |
+
|
| 65 |
+
# PyInstaller
|
| 66 |
+
# Usually these files are written by a python script from a template
|
| 67 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 68 |
+
*.manifest
|
| 69 |
+
*.spec
|
| 70 |
+
|
| 71 |
+
# Installer logs
|
| 72 |
+
pip-log.txt
|
| 73 |
+
pip-delete-this-directory.txt
|
| 74 |
+
|
| 75 |
+
# Unit test / coverage reports
|
| 76 |
+
!tests/artifacts
|
| 77 |
+
htmlcov/
|
| 78 |
+
.tox/
|
| 79 |
+
.nox/
|
| 80 |
+
.coverage
|
| 81 |
+
.coverage.*
|
| 82 |
+
nosetests.xml
|
| 83 |
+
coverage.xml
|
| 84 |
+
*.cover
|
| 85 |
+
*.py,cover
|
| 86 |
+
.hypothesis/
|
| 87 |
+
.pytest_cache/
|
| 88 |
+
|
| 89 |
+
# Ignore .cache except calibration
|
| 90 |
+
.cache/*
|
| 91 |
+
!.cache/calibration/
|
| 92 |
+
!.cache/calibration/**
|
| 93 |
+
|
| 94 |
+
# Translations
|
| 95 |
+
*.mo
|
| 96 |
+
*.pot
|
| 97 |
+
|
| 98 |
+
# Django stuff:
|
| 99 |
+
*.log
|
| 100 |
+
local_settings.py
|
| 101 |
+
db.sqlite3
|
| 102 |
+
db.sqlite3-journal
|
| 103 |
+
|
| 104 |
+
# Flask stuff:
|
| 105 |
+
instance/
|
| 106 |
+
.webassets-cache
|
| 107 |
+
|
| 108 |
+
# Scrapy stuff:
|
| 109 |
+
.scrapy
|
| 110 |
+
|
| 111 |
+
# Sphinx documentation
|
| 112 |
+
docs/_build/
|
| 113 |
+
|
| 114 |
+
# PyBuilder
|
| 115 |
+
target/
|
| 116 |
+
|
| 117 |
+
# Jupyter Notebook
|
| 118 |
+
.ipynb_checkpoints
|
| 119 |
+
|
| 120 |
+
# IPython
|
| 121 |
+
profile_default/
|
| 122 |
+
ipython_config.py
|
| 123 |
+
|
| 124 |
+
# pyenv
|
| 125 |
+
.python-version
|
| 126 |
+
|
| 127 |
+
# pipenv
|
| 128 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 129 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 130 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 131 |
+
# install all needed dependencies.
|
| 132 |
+
#Pipfile.lock
|
| 133 |
+
|
| 134 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 135 |
+
__pypackages__/
|
| 136 |
+
|
| 137 |
+
# Celery stuff
|
| 138 |
+
celerybeat-schedule
|
| 139 |
+
celerybeat.pid
|
| 140 |
+
|
| 141 |
+
# SageMath parsed files
|
| 142 |
+
*.sage.py
|
| 143 |
+
|
| 144 |
+
# Spyder project settings
|
| 145 |
+
.spyderproject
|
| 146 |
+
.spyproject
|
| 147 |
+
|
| 148 |
+
# Rope project settings
|
| 149 |
+
.ropeproject
|
| 150 |
+
|
| 151 |
+
# mkdocs documentation
|
| 152 |
+
/site
|
| 153 |
+
|
| 154 |
+
# mypy
|
| 155 |
+
.mypy_cache/
|
| 156 |
+
.dmypy.json
|
| 157 |
+
dmypy.json
|
| 158 |
+
|
| 159 |
+
# Pyre type checker
|
| 160 |
+
.pyre/
|
project/ManiSkill3/src/maniskill3_environment/lerobot/.gitattributes
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
*.memmap filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.stl filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.json !text !filter !merge !diff
|
project/ManiSkill3/src/maniskill3_environment/lerobot/.gitignore
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# Logging
|
| 16 |
+
logs
|
| 17 |
+
tmp
|
| 18 |
+
wandb
|
| 19 |
+
|
| 20 |
+
# Data
|
| 21 |
+
data
|
| 22 |
+
outputs
|
| 23 |
+
|
| 24 |
+
# Apple
|
| 25 |
+
.DS_Store
|
| 26 |
+
|
| 27 |
+
# VS Code
|
| 28 |
+
.vscode
|
| 29 |
+
|
| 30 |
+
# HPC
|
| 31 |
+
nautilus/*.yaml
|
| 32 |
+
*.key
|
| 33 |
+
|
| 34 |
+
# Slurm
|
| 35 |
+
sbatch*.sh
|
| 36 |
+
|
| 37 |
+
# Byte-compiled / optimized / DLL files
|
| 38 |
+
__pycache__/
|
| 39 |
+
*.py[cod]
|
| 40 |
+
*$py.class
|
| 41 |
+
|
| 42 |
+
# C extensions
|
| 43 |
+
*.so
|
| 44 |
+
|
| 45 |
+
# Distribution / packaging
|
| 46 |
+
.Python
|
| 47 |
+
build/
|
| 48 |
+
develop-eggs/
|
| 49 |
+
dist/
|
| 50 |
+
downloads/
|
| 51 |
+
eggs/
|
| 52 |
+
.eggs/
|
| 53 |
+
lib/
|
| 54 |
+
lib64/
|
| 55 |
+
parts/
|
| 56 |
+
sdist/
|
| 57 |
+
var/
|
| 58 |
+
wheels/
|
| 59 |
+
pip-wheel-metadata/
|
| 60 |
+
share/python-wheels/
|
| 61 |
+
*.egg-info/
|
| 62 |
+
.installed.cfg
|
| 63 |
+
*.egg
|
| 64 |
+
MANIFEST
|
| 65 |
+
|
| 66 |
+
# uv/poetry lock files
|
| 67 |
+
poetry.lock
|
| 68 |
+
uv.lock
|
| 69 |
+
|
| 70 |
+
# PyInstaller
|
| 71 |
+
# Usually these files are written by a python script from a template
|
| 72 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 73 |
+
*.manifest
|
| 74 |
+
*.spec
|
| 75 |
+
|
| 76 |
+
# Installer logs
|
| 77 |
+
pip-log.txt
|
| 78 |
+
pip-delete-this-directory.txt
|
| 79 |
+
|
| 80 |
+
# Unit test / coverage reports
|
| 81 |
+
!tests/artifacts
|
| 82 |
+
htmlcov/
|
| 83 |
+
.tox/
|
| 84 |
+
.nox/
|
| 85 |
+
.coverage
|
| 86 |
+
.coverage.*
|
| 87 |
+
nosetests.xml
|
| 88 |
+
coverage.xml
|
| 89 |
+
*.cover
|
| 90 |
+
*.py,cover
|
| 91 |
+
.hypothesis/
|
| 92 |
+
.pytest_cache/
|
| 93 |
+
|
| 94 |
+
# Ignore .cache except calibration
|
| 95 |
+
.cache/*
|
| 96 |
+
!.cache/calibration/
|
| 97 |
+
!.cache/calibration/**
|
| 98 |
+
|
| 99 |
+
# Translations
|
| 100 |
+
*.mo
|
| 101 |
+
*.pot
|
| 102 |
+
|
| 103 |
+
# Django stuff:
|
| 104 |
+
*.log
|
| 105 |
+
local_settings.py
|
| 106 |
+
db.sqlite3
|
| 107 |
+
db.sqlite3-journal
|
| 108 |
+
|
| 109 |
+
# Flask stuff:
|
| 110 |
+
instance/
|
| 111 |
+
.webassets-cache
|
| 112 |
+
|
| 113 |
+
# Scrapy stuff:
|
| 114 |
+
.scrapy
|
| 115 |
+
|
| 116 |
+
# Sphinx documentation
|
| 117 |
+
docs/_build/
|
| 118 |
+
|
| 119 |
+
# PyBuilder
|
| 120 |
+
.pybuilder/
|
| 121 |
+
target/
|
| 122 |
+
|
| 123 |
+
# Jupyter Notebook
|
| 124 |
+
.ipynb_checkpoints
|
| 125 |
+
|
| 126 |
+
# IPython
|
| 127 |
+
profile_default/
|
| 128 |
+
ipython_config.py
|
| 129 |
+
|
| 130 |
+
# pyenv
|
| 131 |
+
.python-version
|
| 132 |
+
|
| 133 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 134 |
+
__pypackages__/
|
| 135 |
+
|
| 136 |
+
# Celery stuff
|
| 137 |
+
celerybeat-schedule
|
| 138 |
+
celerybeat.pid
|
| 139 |
+
|
| 140 |
+
# SageMath parsed files
|
| 141 |
+
*.sage.py
|
| 142 |
+
|
| 143 |
+
# Environments
|
| 144 |
+
.env
|
| 145 |
+
.venv
|
| 146 |
+
env/
|
| 147 |
+
venv/
|
| 148 |
+
env.bak/
|
| 149 |
+
venv.bak/
|
| 150 |
+
|
| 151 |
+
# Spyder project settings
|
| 152 |
+
.spyderproject
|
| 153 |
+
.spyproject
|
| 154 |
+
|
| 155 |
+
# Rope project settings
|
| 156 |
+
.ropeproject
|
| 157 |
+
|
| 158 |
+
# mkdocs documentation
|
| 159 |
+
/site
|
| 160 |
+
|
| 161 |
+
# mypy
|
| 162 |
+
.mypy_cache/
|
| 163 |
+
.dmypy.json
|
| 164 |
+
dmypy.json
|
| 165 |
+
|
| 166 |
+
# Pyre type checker
|
| 167 |
+
.pyre/
|
| 168 |
+
|
| 169 |
+
# pytype static type analyzer
|
| 170 |
+
.pytype/
|
| 171 |
+
|
| 172 |
+
# Cython debug symbols
|
| 173 |
+
cython_debug/
|
project/ManiSkill3/src/maniskill3_environment/lerobot/.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
exclude: "tests/artifacts/.*\\.safetensors$"
|
| 16 |
+
default_language_version:
|
| 17 |
+
python: python3.10
|
| 18 |
+
repos:
|
| 19 |
+
##### Meta #####
|
| 20 |
+
- repo: meta
|
| 21 |
+
hooks:
|
| 22 |
+
- id: check-useless-excludes
|
| 23 |
+
- id: check-hooks-apply
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
##### Style / Misc. #####
|
| 27 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 28 |
+
rev: v5.0.0
|
| 29 |
+
hooks:
|
| 30 |
+
- id: check-added-large-files
|
| 31 |
+
- id: debug-statements
|
| 32 |
+
- id: check-merge-conflict
|
| 33 |
+
- id: check-case-conflict
|
| 34 |
+
- id: check-yaml
|
| 35 |
+
- id: check-toml
|
| 36 |
+
- id: end-of-file-fixer
|
| 37 |
+
- id: trailing-whitespace
|
| 38 |
+
|
| 39 |
+
- repo: https://github.com/adhtruong/mirrors-typos
|
| 40 |
+
rev: v1.31.1
|
| 41 |
+
hooks:
|
| 42 |
+
- id: typos
|
| 43 |
+
args: [--force-exclude]
|
| 44 |
+
|
| 45 |
+
- repo: https://github.com/asottile/pyupgrade
|
| 46 |
+
rev: v3.19.1
|
| 47 |
+
hooks:
|
| 48 |
+
- id: pyupgrade
|
| 49 |
+
|
| 50 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
| 51 |
+
rev: v0.11.4
|
| 52 |
+
hooks:
|
| 53 |
+
- id: ruff
|
| 54 |
+
args: [--fix]
|
| 55 |
+
- id: ruff-format
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
##### Security #####
|
| 59 |
+
- repo: https://github.com/gitleaks/gitleaks
|
| 60 |
+
rev: v8.24.2
|
| 61 |
+
hooks:
|
| 62 |
+
- id: gitleaks
|
| 63 |
+
|
| 64 |
+
- repo: https://github.com/woodruffw/zizmor-pre-commit
|
| 65 |
+
rev: v1.5.2
|
| 66 |
+
hooks:
|
| 67 |
+
- id: zizmor
|
| 68 |
+
|
| 69 |
+
- repo: https://github.com/PyCQA/bandit
|
| 70 |
+
rev: 1.8.3
|
| 71 |
+
hooks:
|
| 72 |
+
- id: bandit
|
| 73 |
+
args: ["-c", "pyproject.toml"]
|
| 74 |
+
additional_dependencies: ["bandit[toml]"]
|
project/ManiSkill3/src/maniskill3_environment/lerobot/CODE_OF_CONDUCT.md
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Contributor Covenant Code of Conduct
|
| 3 |
+
|
| 4 |
+
## Our Pledge
|
| 5 |
+
|
| 6 |
+
We as members, contributors, and leaders pledge to make participation in our
|
| 7 |
+
community a harassment-free experience for everyone, regardless of age, body
|
| 8 |
+
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
| 9 |
+
identity and expression, level of experience, education, socio-economic status,
|
| 10 |
+
nationality, personal appearance, race, caste, color, religion, or sexual
|
| 11 |
+
identity and orientation.
|
| 12 |
+
|
| 13 |
+
We pledge to act and interact in ways that contribute to an open, welcoming,
|
| 14 |
+
diverse, inclusive, and healthy community.
|
| 15 |
+
|
| 16 |
+
## Our Standards
|
| 17 |
+
|
| 18 |
+
Examples of behavior that contributes to a positive environment for our
|
| 19 |
+
community include:
|
| 20 |
+
|
| 21 |
+
* Demonstrating empathy and kindness toward other people
|
| 22 |
+
* Being respectful of differing opinions, viewpoints, and experiences
|
| 23 |
+
* Giving and gracefully accepting constructive feedback
|
| 24 |
+
* Accepting responsibility and apologizing to those affected by our mistakes,
|
| 25 |
+
and learning from the experience
|
| 26 |
+
* Focusing on what is best not just for us as individuals, but for the overall
|
| 27 |
+
community
|
| 28 |
+
|
| 29 |
+
Examples of unacceptable behavior include:
|
| 30 |
+
|
| 31 |
+
* The use of sexualized language or imagery, and sexual attention or advances of
|
| 32 |
+
any kind
|
| 33 |
+
* Trolling, insulting or derogatory comments, and personal or political attacks
|
| 34 |
+
* Public or private harassment
|
| 35 |
+
* Publishing others' private information, such as a physical or email address,
|
| 36 |
+
without their explicit permission
|
| 37 |
+
* Other conduct which could reasonably be considered inappropriate in a
|
| 38 |
+
professional setting
|
| 39 |
+
|
| 40 |
+
## Enforcement Responsibilities
|
| 41 |
+
|
| 42 |
+
Community leaders are responsible for clarifying and enforcing our standards of
|
| 43 |
+
acceptable behavior and will take appropriate and fair corrective action in
|
| 44 |
+
response to any behavior that they deem inappropriate, threatening, offensive,
|
| 45 |
+
or harmful.
|
| 46 |
+
|
| 47 |
+
Community leaders have the right and responsibility to remove, edit, or reject
|
| 48 |
+
comments, commits, code, wiki edits, issues, and other contributions that are
|
| 49 |
+
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
| 50 |
+
decisions when appropriate.
|
| 51 |
+
|
| 52 |
+
## Scope
|
| 53 |
+
|
| 54 |
+
This Code of Conduct applies within all community spaces, and also applies when
|
| 55 |
+
an individual is officially representing the community in public spaces.
|
| 56 |
+
Examples of representing our community include using an official email address,
|
| 57 |
+
posting via an official social media account, or acting as an appointed
|
| 58 |
+
representative at an online or offline event.
|
| 59 |
+
|
| 60 |
+
## Enforcement
|
| 61 |
+
|
| 62 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
| 63 |
+
reported to the community leaders responsible for enforcement at
|
| 64 |
+
[feedback@huggingface.co](mailto:feedback@huggingface.co).
|
| 65 |
+
All complaints will be reviewed and investigated promptly and fairly.
|
| 66 |
+
|
| 67 |
+
All community leaders are obligated to respect the privacy and security of the
|
| 68 |
+
reporter of any incident.
|
| 69 |
+
|
| 70 |
+
## Enforcement Guidelines
|
| 71 |
+
|
| 72 |
+
Community leaders will follow these Community Impact Guidelines in determining
|
| 73 |
+
the consequences for any action they deem in violation of this Code of Conduct:
|
| 74 |
+
|
| 75 |
+
### 1. Correction
|
| 76 |
+
|
| 77 |
+
**Community Impact**: Use of inappropriate language or other behavior deemed
|
| 78 |
+
unprofessional or unwelcome in the community.
|
| 79 |
+
|
| 80 |
+
**Consequence**: A private, written warning from community leaders, providing
|
| 81 |
+
clarity around the nature of the violation and an explanation of why the
|
| 82 |
+
behavior was inappropriate. A public apology may be requested.
|
| 83 |
+
|
| 84 |
+
### 2. Warning
|
| 85 |
+
|
| 86 |
+
**Community Impact**: A violation through a single incident or series of
|
| 87 |
+
actions.
|
| 88 |
+
|
| 89 |
+
**Consequence**: A warning with consequences for continued behavior. No
|
| 90 |
+
interaction with the people involved, including unsolicited interaction with
|
| 91 |
+
those enforcing the Code of Conduct, for a specified period of time. This
|
| 92 |
+
includes avoiding interactions in community spaces as well as external channels
|
| 93 |
+
like social media. Violating these terms may lead to a temporary or permanent
|
| 94 |
+
ban.
|
| 95 |
+
|
| 96 |
+
### 3. Temporary Ban
|
| 97 |
+
|
| 98 |
+
**Community Impact**: A serious violation of community standards, including
|
| 99 |
+
sustained inappropriate behavior.
|
| 100 |
+
|
| 101 |
+
**Consequence**: A temporary ban from any sort of interaction or public
|
| 102 |
+
communication with the community for a specified period of time. No public or
|
| 103 |
+
private interaction with the people involved, including unsolicited interaction
|
| 104 |
+
with those enforcing the Code of Conduct, is allowed during this period.
|
| 105 |
+
Violating these terms may lead to a permanent ban.
|
| 106 |
+
|
| 107 |
+
### 4. Permanent Ban
|
| 108 |
+
|
| 109 |
+
**Community Impact**: Demonstrating a pattern of violation of community
|
| 110 |
+
standards, including sustained inappropriate behavior, harassment of an
|
| 111 |
+
individual, or aggression toward or disparagement of classes of individuals.
|
| 112 |
+
|
| 113 |
+
**Consequence**: A permanent ban from any sort of public interaction within the
|
| 114 |
+
community.
|
| 115 |
+
|
| 116 |
+
## Attribution
|
| 117 |
+
|
| 118 |
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
| 119 |
+
version 2.1, available at
|
| 120 |
+
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
| 121 |
+
|
| 122 |
+
Community Impact Guidelines were inspired by
|
| 123 |
+
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
| 124 |
+
|
| 125 |
+
For answers to common questions about this code of conduct, see the FAQ at
|
| 126 |
+
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
| 127 |
+
[https://www.contributor-covenant.org/translations][translations].
|
| 128 |
+
|
| 129 |
+
[homepage]: https://www.contributor-covenant.org
|
| 130 |
+
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
| 131 |
+
[Mozilla CoC]: https://github.com/mozilla/diversity
|
| 132 |
+
[FAQ]: https://www.contributor-covenant.org/faq
|
| 133 |
+
[translations]: https://www.contributor-covenant.org/translations
|
project/ManiSkill3/src/maniskill3_environment/lerobot/CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# How to contribute to 🤗 LeRobot?
|
| 2 |
+
|
| 3 |
+
Everyone is welcome to contribute, and we value everybody's contribution. Code
|
| 4 |
+
is thus not the only way to help the community. Answering questions, helping
|
| 5 |
+
others, reaching out and improving the documentations are immensely valuable to
|
| 6 |
+
the community.
|
| 7 |
+
|
| 8 |
+
It also helps us if you spread the word: reference the library from blog posts
|
| 9 |
+
on the awesome projects it made possible, shout out on Twitter when it has
|
| 10 |
+
helped you, or simply ⭐️ the repo to say "thank you".
|
| 11 |
+
|
| 12 |
+
Whichever way you choose to contribute, please be mindful to respect our
|
| 13 |
+
[code of conduct](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md).
|
| 14 |
+
|
| 15 |
+
## You can contribute in so many ways!
|
| 16 |
+
|
| 17 |
+
Some of the ways you can contribute to 🤗 LeRobot:
|
| 18 |
+
* Fixing outstanding issues with the existing code.
|
| 19 |
+
* Implementing new models, datasets or simulation environments.
|
| 20 |
+
* Contributing to the examples or to the documentation.
|
| 21 |
+
* Submitting issues related to bugs or desired new features.
|
| 22 |
+
|
| 23 |
+
Following the guides below, feel free to open issues and PRs and to coordinate your efforts with the community on our [Discord Channel](https://discord.gg/VjFz58wn3R). For specific inquiries, reach out to [Remi Cadene](mailto:remi.cadene@huggingface.co).
|
| 24 |
+
|
| 25 |
+
If you are not sure how to contribute or want to know the next features we working on, look on this project page: [LeRobot TODO](https://github.com/orgs/huggingface/projects/46)
|
| 26 |
+
|
| 27 |
+
## Submitting a new issue or feature request
|
| 28 |
+
|
| 29 |
+
Do your best to follow these guidelines when submitting an issue or a feature
|
| 30 |
+
request. It will make it easier for us to come back to you quickly and with good
|
| 31 |
+
feedback.
|
| 32 |
+
|
| 33 |
+
### Did you find a bug?
|
| 34 |
+
|
| 35 |
+
The 🤗 LeRobot library is robust and reliable thanks to the users who notify us of
|
| 36 |
+
the problems they encounter. So thank you for reporting an issue.
|
| 37 |
+
|
| 38 |
+
First, we would really appreciate it if you could **make sure the bug was not
|
| 39 |
+
already reported** (use the search bar on Github under Issues).
|
| 40 |
+
|
| 41 |
+
Did not find it? :( So we can act quickly on it, please follow these steps:
|
| 42 |
+
|
| 43 |
+
* Include your **OS type and version**, the versions of **Python** and **PyTorch**.
|
| 44 |
+
* A short, self-contained, code snippet that allows us to reproduce the bug in
|
| 45 |
+
less than 30s.
|
| 46 |
+
* The full traceback if an exception is raised.
|
| 47 |
+
* Attach any other additional information, like screenshots, you think may help.
|
| 48 |
+
|
| 49 |
+
### Do you want a new feature?
|
| 50 |
+
|
| 51 |
+
A good feature request addresses the following points:
|
| 52 |
+
|
| 53 |
+
1. Motivation first:
|
| 54 |
+
* Is it related to a problem/frustration with the library? If so, please explain
|
| 55 |
+
why. Providing a code snippet that demonstrates the problem is best.
|
| 56 |
+
* Is it related to something you would need for a project? We'd love to hear
|
| 57 |
+
about it!
|
| 58 |
+
* Is it something you worked on and think could benefit the community?
|
| 59 |
+
Awesome! Tell us what problem it solved for you.
|
| 60 |
+
2. Write a *paragraph* describing the feature.
|
| 61 |
+
3. Provide a **code snippet** that demonstrates its future use.
|
| 62 |
+
4. In case this is related to a paper, please attach a link.
|
| 63 |
+
5. Attach any additional information (drawings, screenshots, etc.) you think may help.
|
| 64 |
+
|
| 65 |
+
If your issue is well written we're already 80% of the way there by the time you
|
| 66 |
+
post it.
|
| 67 |
+
|
| 68 |
+
## Adding new policies, datasets or environments
|
| 69 |
+
|
| 70 |
+
Look at our implementations for [datasets](./lerobot/common/datasets/), [policies](./lerobot/common/policies/),
|
| 71 |
+
environments ([aloha](https://github.com/huggingface/gym-aloha),
|
| 72 |
+
[xarm](https://github.com/huggingface/gym-xarm),
|
| 73 |
+
[pusht](https://github.com/huggingface/gym-pusht))
|
| 74 |
+
and follow the same api design.
|
| 75 |
+
|
| 76 |
+
When implementing a new dataset loadable with LeRobotDataset follow these steps:
|
| 77 |
+
- Update `available_datasets_per_env` in `lerobot/__init__.py`
|
| 78 |
+
|
| 79 |
+
When implementing a new environment (e.g. `gym_aloha`), follow these steps:
|
| 80 |
+
- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
|
| 81 |
+
|
| 82 |
+
When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
|
| 83 |
+
- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
|
| 84 |
+
- Set the required `name` class attribute.
|
| 85 |
+
- Update variables in `tests/test_available.py` by importing your new Policy class
|
| 86 |
+
|
| 87 |
+
## Submitting a pull request (PR)
|
| 88 |
+
|
| 89 |
+
Before writing code, we strongly advise you to search through the existing PRs or
|
| 90 |
+
issues to make sure that nobody is already working on the same thing. If you are
|
| 91 |
+
unsure, it is always a good idea to open an issue to get some feedback.
|
| 92 |
+
|
| 93 |
+
You will need basic `git` proficiency to be able to contribute to
|
| 94 |
+
🤗 LeRobot. `git` is not the easiest tool to use but it has the greatest
|
| 95 |
+
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
|
| 96 |
+
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
| 97 |
+
|
| 98 |
+
Follow these steps to start contributing:
|
| 99 |
+
|
| 100 |
+
1. Fork the [repository](https://github.com/huggingface/lerobot) by
|
| 101 |
+
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
|
| 102 |
+
under your GitHub user account.
|
| 103 |
+
|
| 104 |
+
2. Clone your fork to your local disk, and add the base repository as a remote. The following command
|
| 105 |
+
assumes you have your public SSH key uploaded to GitHub. See the following guide for more
|
| 106 |
+
[information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).
|
| 107 |
+
|
| 108 |
+
```bash
|
| 109 |
+
git clone git@github.com:<your Github handle>/lerobot.git
|
| 110 |
+
cd lerobot
|
| 111 |
+
git remote add upstream https://github.com/huggingface/lerobot.git
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
3. Create a new branch to hold your development changes, and do this for every new PR you work on.
|
| 115 |
+
|
| 116 |
+
Start by synchronizing your `main` branch with the `upstream/main` branch (more details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)):
|
| 117 |
+
|
| 118 |
+
```bash
|
| 119 |
+
git checkout main
|
| 120 |
+
git fetch upstream
|
| 121 |
+
git rebase upstream/main
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
Once your `main` branch is synchronized, create a new branch from it:
|
| 125 |
+
|
| 126 |
+
```bash
|
| 127 |
+
git checkout -b a-descriptive-name-for-my-changes
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
🚨 **Do not** work on the `main` branch.
|
| 131 |
+
|
| 132 |
+
4. for development, we advise to use a tool like `poetry` or `uv` instead of just `pip` to easily track our dependencies.
|
| 133 |
+
Follow the instructions to [install poetry](https://python-poetry.org/docs/#installation) (use a version >=2.1.0) or to [install uv](https://docs.astral.sh/uv/getting-started/installation/#installation-methods) if you don't have one of them already.
|
| 134 |
+
|
| 135 |
+
Set up a development environment with conda or miniconda:
|
| 136 |
+
```bash
|
| 137 |
+
conda create -y -n lerobot-dev python=3.10 && conda activate lerobot-dev
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
If you're using `uv`, it can manage python versions so you can instead do:
|
| 141 |
+
```bash
|
| 142 |
+
uv venv --python 3.10 && source .venv/bin/activate
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
To develop on 🤗 LeRobot, you will at least need to install the `dev` and `test` extras dependencies along with the core library:
|
| 146 |
+
|
| 147 |
+
using `poetry`
|
| 148 |
+
```bash
|
| 149 |
+
poetry sync --extras "dev test"
|
| 150 |
+
```
|
| 151 |
+
|
| 152 |
+
using `uv`
|
| 153 |
+
```bash
|
| 154 |
+
uv sync --extra dev --extra test
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
You can also install the project with all its dependencies (including environments):
|
| 158 |
+
|
| 159 |
+
using `poetry`
|
| 160 |
+
```bash
|
| 161 |
+
poetry sync --all-extras
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
using `uv`
|
| 165 |
+
```bash
|
| 166 |
+
uv sync --all-extras
|
| 167 |
+
```
|
| 168 |
+
|
| 169 |
+
> **Note:** If you don't install simulation environments with `--all-extras`, the tests that require them will be skipped when running the pytest suite locally. However, they *will* be tested in the CI. In general, we advise you to install everything and test locally before pushing.
|
| 170 |
+
|
| 171 |
+
Whichever command you chose to install the project (e.g. `poetry sync --all-extras`), you should run it again when pulling code with an updated version of `pyproject.toml` and `poetry.lock` in order to synchronize your virtual environment with the new dependencies.
|
| 172 |
+
|
| 173 |
+
The equivalent of `pip install some-package`, would just be:
|
| 174 |
+
|
| 175 |
+
using `poetry`
|
| 176 |
+
```bash
|
| 177 |
+
poetry add some-package
|
| 178 |
+
```
|
| 179 |
+
|
| 180 |
+
using `uv`
|
| 181 |
+
```bash
|
| 182 |
+
uv add some-package
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
When making changes to the poetry sections of the `pyproject.toml`, you should run the following command to lock dependencies.
|
| 186 |
+
using `poetry`
|
| 187 |
+
```bash
|
| 188 |
+
poetry lock
|
| 189 |
+
```
|
| 190 |
+
|
| 191 |
+
using `uv`
|
| 192 |
+
```bash
|
| 193 |
+
uv lock
|
| 194 |
+
```
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
5. Develop the features on your branch.
|
| 198 |
+
|
| 199 |
+
As you work on the features, you should make sure that the test suite
|
| 200 |
+
passes. You should run the tests impacted by your changes like this (see
|
| 201 |
+
below an explanation regarding the environment variable):
|
| 202 |
+
|
| 203 |
+
```bash
|
| 204 |
+
pytest tests/<TEST_TO_RUN>.py
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
6. Follow our style.
|
| 208 |
+
|
| 209 |
+
`lerobot` relies on `ruff` to format its source code
|
| 210 |
+
consistently. Set up [`pre-commit`](https://pre-commit.com/) to run these checks
|
| 211 |
+
automatically as Git commit hooks.
|
| 212 |
+
|
| 213 |
+
Install `pre-commit` hooks:
|
| 214 |
+
```bash
|
| 215 |
+
pre-commit install
|
| 216 |
+
```
|
| 217 |
+
|
| 218 |
+
You can run these hooks whenever you need on staged files with:
|
| 219 |
+
```bash
|
| 220 |
+
pre-commit
|
| 221 |
+
```
|
| 222 |
+
|
| 223 |
+
Once you're happy with your changes, add changed files using `git add` and
|
| 224 |
+
make a commit with `git commit` to record your changes locally:
|
| 225 |
+
|
| 226 |
+
```bash
|
| 227 |
+
git add modified_file.py
|
| 228 |
+
git commit
|
| 229 |
+
```
|
| 230 |
+
|
| 231 |
+
Note, if you already committed some changes that have a wrong formatting, you can use:
|
| 232 |
+
```bash
|
| 233 |
+
pre-commit run --all-files
|
| 234 |
+
```
|
| 235 |
+
|
| 236 |
+
Please write [good commit messages](https://chris.beams.io/posts/git-commit/).
|
| 237 |
+
|
| 238 |
+
It is a good idea to sync your copy of the code with the original
|
| 239 |
+
repository regularly. This way you can quickly account for changes:
|
| 240 |
+
|
| 241 |
+
```bash
|
| 242 |
+
git fetch upstream
|
| 243 |
+
git rebase upstream/main
|
| 244 |
+
```
|
| 245 |
+
|
| 246 |
+
Push the changes to your account using:
|
| 247 |
+
|
| 248 |
+
```bash
|
| 249 |
+
git push -u origin a-descriptive-name-for-my-changes
|
| 250 |
+
```
|
| 251 |
+
|
| 252 |
+
6. Once you are satisfied (**and the checklist below is happy too**), go to the
|
| 253 |
+
webpage of your fork on GitHub. Click on 'Pull request' to send your changes
|
| 254 |
+
to the project maintainers for review.
|
| 255 |
+
|
| 256 |
+
7. It's ok if maintainers ask you for changes. It happens to core contributors
|
| 257 |
+
too! So everyone can see the changes in the Pull request, work in your local
|
| 258 |
+
branch and push the changes to your fork. They will automatically appear in
|
| 259 |
+
the pull request.
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
### Checklist
|
| 263 |
+
|
| 264 |
+
1. The title of your pull request should be a summary of its contribution;
|
| 265 |
+
2. If your pull request addresses an issue, please mention the issue number in
|
| 266 |
+
the pull request description to make sure they are linked (and people
|
| 267 |
+
consulting the issue know you are working on it);
|
| 268 |
+
3. To indicate a work in progress please prefix the title with `[WIP]`, or preferably mark
|
| 269 |
+
the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate
|
| 270 |
+
it from PRs ready to be merged;
|
| 271 |
+
4. Make sure existing tests pass;
|
| 272 |
+
<!-- 5. Add high-coverage tests. No quality testing = no merge.
|
| 273 |
+
|
| 274 |
+
See an example of a good PR here: https://github.com/huggingface/lerobot/pull/ -->
|
| 275 |
+
|
| 276 |
+
### Tests
|
| 277 |
+
|
| 278 |
+
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/lerobot/tree/main/tests).
|
| 279 |
+
|
| 280 |
+
Install [git lfs](https://git-lfs.com/) to retrieve test artifacts (if you don't have it already).
|
| 281 |
+
|
| 282 |
+
On Mac:
|
| 283 |
+
```bash
|
| 284 |
+
brew install git-lfs
|
| 285 |
+
git lfs install
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
On Ubuntu:
|
| 289 |
+
```bash
|
| 290 |
+
sudo apt-get install git-lfs
|
| 291 |
+
git lfs install
|
| 292 |
+
```
|
| 293 |
+
|
| 294 |
+
Pull artifacts if they're not in [tests/artifacts](tests/artifacts)
|
| 295 |
+
```bash
|
| 296 |
+
git lfs pull
|
| 297 |
+
```
|
| 298 |
+
|
| 299 |
+
We use `pytest` in order to run the tests. From the root of the
|
| 300 |
+
repository, here's how to run tests with `pytest` for the library:
|
| 301 |
+
|
| 302 |
+
```bash
|
| 303 |
+
python -m pytest -sv ./tests
|
| 304 |
+
```
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
You can specify a smaller set of tests in order to test only the feature
|
| 308 |
+
you're working on.
|
project/ManiSkill3/src/maniskill3_environment/lerobot/LICENSE
ADDED
|
@@ -0,0 +1,507 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright 2024 The Hugging Face team. All rights reserved.
|
| 2 |
+
|
| 3 |
+
Apache License
|
| 4 |
+
Version 2.0, January 2004
|
| 5 |
+
http://www.apache.org/licenses/
|
| 6 |
+
|
| 7 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 8 |
+
|
| 9 |
+
1. Definitions.
|
| 10 |
+
|
| 11 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 12 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 13 |
+
|
| 14 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 15 |
+
the copyright owner that is granting the License.
|
| 16 |
+
|
| 17 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 18 |
+
other entities that control, are controlled by, or are under common
|
| 19 |
+
control with that entity. For the purposes of this definition,
|
| 20 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 21 |
+
direction or management of such entity, whether by contract or
|
| 22 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 23 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 24 |
+
|
| 25 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 26 |
+
exercising permissions granted by this License.
|
| 27 |
+
|
| 28 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 29 |
+
including but not limited to software source code, documentation
|
| 30 |
+
source, and configuration files.
|
| 31 |
+
|
| 32 |
+
"Object" form shall mean any form resulting from mechanical
|
| 33 |
+
transformation or translation of a Source form, including but
|
| 34 |
+
not limited to compiled object code, generated documentation,
|
| 35 |
+
and conversions to other media types.
|
| 36 |
+
|
| 37 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 38 |
+
Object form, made available under the License, as indicated by a
|
| 39 |
+
copyright notice that is included in or attached to the work
|
| 40 |
+
(an example is provided in the Appendix below).
|
| 41 |
+
|
| 42 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 43 |
+
form, that is based on (or derived from) the Work and for which the
|
| 44 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 45 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 46 |
+
of this License, Derivative Works shall not include works that remain
|
| 47 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 48 |
+
the Work and Derivative Works thereof.
|
| 49 |
+
|
| 50 |
+
"Contribution" shall mean any work of authorship, including
|
| 51 |
+
the original version of the Work and any modifications or additions
|
| 52 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 53 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 54 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 55 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 56 |
+
means any form of electronic, verbal, or written communication sent
|
| 57 |
+
to the Licensor or its representatives, including but not limited to
|
| 58 |
+
communication on electronic mailing lists, source code control systems,
|
| 59 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 60 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 61 |
+
excluding communication that is conspicuously marked or otherwise
|
| 62 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 63 |
+
|
| 64 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 65 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 66 |
+
subsequently incorporated within the Work.
|
| 67 |
+
|
| 68 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 69 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 70 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 71 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 72 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 73 |
+
Work and such Derivative Works in Source or Object form.
|
| 74 |
+
|
| 75 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 76 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 77 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 78 |
+
(except as stated in this section) patent license to make, have made,
|
| 79 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 80 |
+
where such license applies only to those patent claims licensable
|
| 81 |
+
by such Contributor that are necessarily infringed by their
|
| 82 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 83 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 84 |
+
institute patent litigation against any entity (including a
|
| 85 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 86 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 87 |
+
or contributory patent infringement, then any patent licenses
|
| 88 |
+
granted to You under this License for that Work shall terminate
|
| 89 |
+
as of the date such litigation is filed.
|
| 90 |
+
|
| 91 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 92 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 93 |
+
modifications, and in Source or Object form, provided that You
|
| 94 |
+
meet the following conditions:
|
| 95 |
+
|
| 96 |
+
(a) You must give any other recipients of the Work or
|
| 97 |
+
Derivative Works a copy of this License; and
|
| 98 |
+
|
| 99 |
+
(b) You must cause any modified files to carry prominent notices
|
| 100 |
+
stating that You changed the files; and
|
| 101 |
+
|
| 102 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 103 |
+
that You distribute, all copyright, patent, trademark, and
|
| 104 |
+
attribution notices from the Source form of the Work,
|
| 105 |
+
excluding those notices that do not pertain to any part of
|
| 106 |
+
the Derivative Works; and
|
| 107 |
+
|
| 108 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 109 |
+
distribution, then any Derivative Works that You distribute must
|
| 110 |
+
include a readable copy of the attribution notices contained
|
| 111 |
+
within such NOTICE file, excluding those notices that do not
|
| 112 |
+
pertain to any part of the Derivative Works, in at least one
|
| 113 |
+
of the following places: within a NOTICE text file distributed
|
| 114 |
+
as part of the Derivative Works; within the Source form or
|
| 115 |
+
documentation, if provided along with the Derivative Works; or,
|
| 116 |
+
within a display generated by the Derivative Works, if and
|
| 117 |
+
wherever such third-party notices normally appear. The contents
|
| 118 |
+
of the NOTICE file are for informational purposes only and
|
| 119 |
+
do not modify the License. You may add Your own attribution
|
| 120 |
+
notices within Derivative Works that You distribute, alongside
|
| 121 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 122 |
+
that such additional attribution notices cannot be construed
|
| 123 |
+
as modifying the License.
|
| 124 |
+
|
| 125 |
+
You may add Your own copyright statement to Your modifications and
|
| 126 |
+
may provide additional or different license terms and conditions
|
| 127 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 128 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 129 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 130 |
+
the conditions stated in this License.
|
| 131 |
+
|
| 132 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 133 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 134 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 135 |
+
this License, without any additional terms or conditions.
|
| 136 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 137 |
+
the terms of any separate license agreement you may have executed
|
| 138 |
+
with Licensor regarding such Contributions.
|
| 139 |
+
|
| 140 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 141 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 142 |
+
except as required for reasonable and customary use in describing the
|
| 143 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 144 |
+
|
| 145 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 146 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 147 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 148 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 149 |
+
implied, including, without limitation, any warranties or conditions
|
| 150 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 151 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 152 |
+
appropriateness of using or redistributing the Work and assume any
|
| 153 |
+
risks associated with Your exercise of permissions under this License.
|
| 154 |
+
|
| 155 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 156 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 157 |
+
unless required by applicable law (such as deliberate and grossly
|
| 158 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 159 |
+
liable to You for damages, including any direct, indirect, special,
|
| 160 |
+
incidental, or consequential damages of any character arising as a
|
| 161 |
+
result of this License or out of the use or inability to use the
|
| 162 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 163 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 164 |
+
other commercial damages or losses), even if such Contributor
|
| 165 |
+
has been advised of the possibility of such damages.
|
| 166 |
+
|
| 167 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 168 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 169 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 170 |
+
or other liability obligations and/or rights consistent with this
|
| 171 |
+
License. However, in accepting such obligations, You may act only
|
| 172 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 173 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 174 |
+
defend, and hold each Contributor harmless for any liability
|
| 175 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 176 |
+
of your accepting any such warranty or additional liability.
|
| 177 |
+
|
| 178 |
+
END OF TERMS AND CONDITIONS
|
| 179 |
+
|
| 180 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 181 |
+
|
| 182 |
+
To apply the Apache License to your work, attach the following
|
| 183 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 184 |
+
replaced with your own identifying information. (Don't include
|
| 185 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 186 |
+
comment syntax for the file format. We also recommend that a
|
| 187 |
+
file or class name and description of purpose be included on the
|
| 188 |
+
same "printed page" as the copyright notice for easier
|
| 189 |
+
identification within third-party archives.
|
| 190 |
+
|
| 191 |
+
Copyright [yyyy] [name of copyright owner]
|
| 192 |
+
|
| 193 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 194 |
+
you may not use this file except in compliance with the License.
|
| 195 |
+
You may obtain a copy of the License at
|
| 196 |
+
|
| 197 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 198 |
+
|
| 199 |
+
Unless required by applicable law or agreed to in writing, software
|
| 200 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 201 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 202 |
+
See the License for the specific language governing permissions and
|
| 203 |
+
limitations under the License.
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
## Some of lerobot's code is derived from Diffusion Policy, which is subject to the following copyright notice:
|
| 207 |
+
|
| 208 |
+
MIT License
|
| 209 |
+
|
| 210 |
+
Copyright (c) 2023 Columbia Artificial Intelligence and Robotics Lab
|
| 211 |
+
|
| 212 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 213 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 214 |
+
in the Software without restriction, including without limitation the rights
|
| 215 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 216 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 217 |
+
furnished to do so, subject to the following conditions:
|
| 218 |
+
|
| 219 |
+
The above copyright notice and this permission notice shall be included in all
|
| 220 |
+
copies or substantial portions of the Software.
|
| 221 |
+
|
| 222 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 223 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 224 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 225 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 226 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 227 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 228 |
+
SOFTWARE.
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
## Some of lerobot's code is derived from FOWM, which is subject to the following copyright notice:
|
| 232 |
+
|
| 233 |
+
MIT License
|
| 234 |
+
|
| 235 |
+
Copyright (c) 2023 Yunhai Feng
|
| 236 |
+
|
| 237 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 238 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 239 |
+
in the Software without restriction, including without limitation the rights
|
| 240 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 241 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 242 |
+
furnished to do so, subject to the following conditions:
|
| 243 |
+
|
| 244 |
+
The above copyright notice and this permission notice shall be included in all
|
| 245 |
+
copies or substantial portions of the Software.
|
| 246 |
+
|
| 247 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 248 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 249 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 250 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 251 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 252 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 253 |
+
SOFTWARE.
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
## Some of lerobot's code is derived from simxarm, which is subject to the following copyright notice:
|
| 257 |
+
|
| 258 |
+
MIT License
|
| 259 |
+
|
| 260 |
+
Copyright (c) 2023 Nicklas Hansen & Yanjie Ze
|
| 261 |
+
|
| 262 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 263 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 264 |
+
in the Software without restriction, including without limitation the rights
|
| 265 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 266 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 267 |
+
furnished to do so, subject to the following conditions:
|
| 268 |
+
|
| 269 |
+
The above copyright notice and this permission notice shall be included in all
|
| 270 |
+
copies or substantial portions of the Software.
|
| 271 |
+
|
| 272 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 273 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 274 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 275 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 276 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 277 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 278 |
+
SOFTWARE.
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
## Some of lerobot's code is derived from ALOHA, which is subject to the following copyright notice:
|
| 282 |
+
|
| 283 |
+
MIT License
|
| 284 |
+
|
| 285 |
+
Copyright (c) 2023 Tony Z. Zhao
|
| 286 |
+
|
| 287 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 288 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 289 |
+
in the Software without restriction, including without limitation the rights
|
| 290 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 291 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 292 |
+
furnished to do so, subject to the following conditions:
|
| 293 |
+
|
| 294 |
+
The above copyright notice and this permission notice shall be included in all
|
| 295 |
+
copies or substantial portions of the Software.
|
| 296 |
+
|
| 297 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 298 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 299 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 300 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 301 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 302 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 303 |
+
SOFTWARE.
|
| 304 |
+
|
| 305 |
+
## Some of lerobot's code is derived from DETR, which is subject to the following copyright notice:
|
| 306 |
+
|
| 307 |
+
Apache License
|
| 308 |
+
Version 2.0, January 2004
|
| 309 |
+
http://www.apache.org/licenses/
|
| 310 |
+
|
| 311 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 312 |
+
|
| 313 |
+
1. Definitions.
|
| 314 |
+
|
| 315 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 316 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 317 |
+
|
| 318 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 319 |
+
the copyright owner that is granting the License.
|
| 320 |
+
|
| 321 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 322 |
+
other entities that control, are controlled by, or are under common
|
| 323 |
+
control with that entity. For the purposes of this definition,
|
| 324 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 325 |
+
direction or management of such entity, whether by contract or
|
| 326 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 327 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 328 |
+
|
| 329 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 330 |
+
exercising permissions granted by this License.
|
| 331 |
+
|
| 332 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 333 |
+
including but not limited to software source code, documentation
|
| 334 |
+
source, and configuration files.
|
| 335 |
+
|
| 336 |
+
"Object" form shall mean any form resulting from mechanical
|
| 337 |
+
transformation or translation of a Source form, including but
|
| 338 |
+
not limited to compiled object code, generated documentation,
|
| 339 |
+
and conversions to other media types.
|
| 340 |
+
|
| 341 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 342 |
+
Object form, made available under the License, as indicated by a
|
| 343 |
+
copyright notice that is included in or attached to the work
|
| 344 |
+
(an example is provided in the Appendix below).
|
| 345 |
+
|
| 346 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 347 |
+
form, that is based on (or derived from) the Work and for which the
|
| 348 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 349 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 350 |
+
of this License, Derivative Works shall not include works that remain
|
| 351 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 352 |
+
the Work and Derivative Works thereof.
|
| 353 |
+
|
| 354 |
+
"Contribution" shall mean any work of authorship, including
|
| 355 |
+
the original version of the Work and any modifications or additions
|
| 356 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 357 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 358 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 359 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 360 |
+
means any form of electronic, verbal, or written communication sent
|
| 361 |
+
to the Licensor or its representatives, including but not limited to
|
| 362 |
+
communication on electronic mailing lists, source code control systems,
|
| 363 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 364 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 365 |
+
excluding communication that is conspicuously marked or otherwise
|
| 366 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 367 |
+
|
| 368 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 369 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 370 |
+
subsequently incorporated within the Work.
|
| 371 |
+
|
| 372 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 373 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 374 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 375 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 376 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 377 |
+
Work and such Derivative Works in Source or Object form.
|
| 378 |
+
|
| 379 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 380 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 381 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 382 |
+
(except as stated in this section) patent license to make, have made,
|
| 383 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 384 |
+
where such license applies only to those patent claims licensable
|
| 385 |
+
by such Contributor that are necessarily infringed by their
|
| 386 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 387 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 388 |
+
institute patent litigation against any entity (including a
|
| 389 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 390 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 391 |
+
or contributory patent infringement, then any patent licenses
|
| 392 |
+
granted to You under this License for that Work shall terminate
|
| 393 |
+
as of the date such litigation is filed.
|
| 394 |
+
|
| 395 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 396 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 397 |
+
modifications, and in Source or Object form, provided that You
|
| 398 |
+
meet the following conditions:
|
| 399 |
+
|
| 400 |
+
(a) You must give any other recipients of the Work or
|
| 401 |
+
Derivative Works a copy of this License; and
|
| 402 |
+
|
| 403 |
+
(b) You must cause any modified files to carry prominent notices
|
| 404 |
+
stating that You changed the files; and
|
| 405 |
+
|
| 406 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 407 |
+
that You distribute, all copyright, patent, trademark, and
|
| 408 |
+
attribution notices from the Source form of the Work,
|
| 409 |
+
excluding those notices that do not pertain to any part of
|
| 410 |
+
the Derivative Works; and
|
| 411 |
+
|
| 412 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 413 |
+
distribution, then any Derivative Works that You distribute must
|
| 414 |
+
include a readable copy of the attribution notices contained
|
| 415 |
+
within such NOTICE file, excluding those notices that do not
|
| 416 |
+
pertain to any part of the Derivative Works, in at least one
|
| 417 |
+
of the following places: within a NOTICE text file distributed
|
| 418 |
+
as part of the Derivative Works; within the Source form or
|
| 419 |
+
documentation, if provided along with the Derivative Works; or,
|
| 420 |
+
within a display generated by the Derivative Works, if and
|
| 421 |
+
wherever such third-party notices normally appear. The contents
|
| 422 |
+
of the NOTICE file are for informational purposes only and
|
| 423 |
+
do not modify the License. You may add Your own attribution
|
| 424 |
+
notices within Derivative Works that You distribute, alongside
|
| 425 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 426 |
+
that such additional attribution notices cannot be construed
|
| 427 |
+
as modifying the License.
|
| 428 |
+
|
| 429 |
+
You may add Your own copyright statement to Your modifications and
|
| 430 |
+
may provide additional or different license terms and conditions
|
| 431 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 432 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 433 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 434 |
+
the conditions stated in this License.
|
| 435 |
+
|
| 436 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 437 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 438 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 439 |
+
this License, without any additional terms or conditions.
|
| 440 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 441 |
+
the terms of any separate license agreement you may have executed
|
| 442 |
+
with Licensor regarding such Contributions.
|
| 443 |
+
|
| 444 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 445 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 446 |
+
except as required for reasonable and customary use in describing the
|
| 447 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 448 |
+
|
| 449 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 450 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 451 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 452 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 453 |
+
implied, including, without limitation, any warranties or conditions
|
| 454 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 455 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 456 |
+
appropriateness of using or redistributing the Work and assume any
|
| 457 |
+
risks associated with Your exercise of permissions under this License.
|
| 458 |
+
|
| 459 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 460 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 461 |
+
unless required by applicable law (such as deliberate and grossly
|
| 462 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 463 |
+
liable to You for damages, including any direct, indirect, special,
|
| 464 |
+
incidental, or consequential damages of any character arising as a
|
| 465 |
+
result of this License or out of the use or inability to use the
|
| 466 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 467 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 468 |
+
other commercial damages or losses), even if such Contributor
|
| 469 |
+
has been advised of the possibility of such damages.
|
| 470 |
+
|
| 471 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 472 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 473 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 474 |
+
or other liability obligations and/or rights consistent with this
|
| 475 |
+
License. However, in accepting such obligations, You may act only
|
| 476 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 477 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 478 |
+
defend, and hold each Contributor harmless for any liability
|
| 479 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 480 |
+
of your accepting any such warranty or additional liability.
|
| 481 |
+
|
| 482 |
+
END OF TERMS AND CONDITIONS
|
| 483 |
+
|
| 484 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 485 |
+
|
| 486 |
+
To apply the Apache License to your work, attach the following
|
| 487 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 488 |
+
replaced with your own identifying information. (Don't include
|
| 489 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 490 |
+
comment syntax for the file format. We also recommend that a
|
| 491 |
+
file or class name and description of purpose be included on the
|
| 492 |
+
same "printed page" as the copyright notice for easier
|
| 493 |
+
identification within third-party archives.
|
| 494 |
+
|
| 495 |
+
Copyright 2020 - present, Facebook, Inc
|
| 496 |
+
|
| 497 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 498 |
+
you may not use this file except in compliance with the License.
|
| 499 |
+
You may obtain a copy of the License at
|
| 500 |
+
|
| 501 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 502 |
+
|
| 503 |
+
Unless required by applicable law or agreed to in writing, software
|
| 504 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 505 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 506 |
+
See the License for the specific language governing permissions and
|
| 507 |
+
limitations under the License.
|
project/ManiSkill3/src/maniskill3_environment/lerobot/Makefile
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
.PHONY: tests
|
| 16 |
+
|
| 17 |
+
PYTHON_PATH := $(shell which python)
|
| 18 |
+
|
| 19 |
+
# If uv is installed and a virtual environment exists, use it
|
| 20 |
+
UV_CHECK := $(shell command -v uv)
|
| 21 |
+
ifneq ($(UV_CHECK),)
|
| 22 |
+
PYTHON_PATH := $(shell .venv/bin/python)
|
| 23 |
+
endif
|
| 24 |
+
|
| 25 |
+
export PATH := $(dir $(PYTHON_PATH)):$(PATH)
|
| 26 |
+
|
| 27 |
+
DEVICE ?= cpu
|
| 28 |
+
|
| 29 |
+
build-cpu:
|
| 30 |
+
docker build -t lerobot:latest -f docker/lerobot-cpu/Dockerfile .
|
| 31 |
+
|
| 32 |
+
build-gpu:
|
| 33 |
+
docker build -t lerobot:latest -f docker/lerobot-gpu/Dockerfile .
|
| 34 |
+
|
| 35 |
+
test-end-to-end:
|
| 36 |
+
${MAKE} DEVICE=$(DEVICE) test-act-ete-train
|
| 37 |
+
${MAKE} DEVICE=$(DEVICE) test-act-ete-train-resume
|
| 38 |
+
${MAKE} DEVICE=$(DEVICE) test-act-ete-eval
|
| 39 |
+
${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-train
|
| 40 |
+
${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-eval
|
| 41 |
+
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train
|
| 42 |
+
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval
|
| 43 |
+
|
| 44 |
+
test-act-ete-train:
|
| 45 |
+
python lerobot/scripts/train.py \
|
| 46 |
+
--policy.type=act \
|
| 47 |
+
--policy.dim_model=64 \
|
| 48 |
+
--policy.n_action_steps=20 \
|
| 49 |
+
--policy.chunk_size=20 \
|
| 50 |
+
--policy.device=$(DEVICE) \
|
| 51 |
+
--env.type=aloha \
|
| 52 |
+
--env.episode_length=5 \
|
| 53 |
+
--dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
|
| 54 |
+
--dataset.image_transforms.enable=true \
|
| 55 |
+
--dataset.episodes="[0]" \
|
| 56 |
+
--batch_size=2 \
|
| 57 |
+
--steps=4 \
|
| 58 |
+
--eval_freq=2 \
|
| 59 |
+
--eval.n_episodes=1 \
|
| 60 |
+
--eval.batch_size=1 \
|
| 61 |
+
--save_freq=2 \
|
| 62 |
+
--save_checkpoint=true \
|
| 63 |
+
--log_freq=1 \
|
| 64 |
+
--wandb.enable=false \
|
| 65 |
+
--output_dir=tests/outputs/act/
|
| 66 |
+
|
| 67 |
+
test-act-ete-train-resume:
|
| 68 |
+
python lerobot/scripts/train.py \
|
| 69 |
+
--config_path=tests/outputs/act/checkpoints/000002/pretrained_model/train_config.json \
|
| 70 |
+
--resume=true
|
| 71 |
+
|
| 72 |
+
test-act-ete-eval:
|
| 73 |
+
python lerobot/scripts/eval.py \
|
| 74 |
+
--policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \
|
| 75 |
+
--policy.device=$(DEVICE) \
|
| 76 |
+
--env.type=aloha \
|
| 77 |
+
--env.episode_length=5 \
|
| 78 |
+
--eval.n_episodes=1 \
|
| 79 |
+
--eval.batch_size=1
|
| 80 |
+
|
| 81 |
+
test-diffusion-ete-train:
|
| 82 |
+
python lerobot/scripts/train.py \
|
| 83 |
+
--policy.type=diffusion \
|
| 84 |
+
--policy.down_dims='[64,128,256]' \
|
| 85 |
+
--policy.diffusion_step_embed_dim=32 \
|
| 86 |
+
--policy.num_inference_steps=10 \
|
| 87 |
+
--policy.device=$(DEVICE) \
|
| 88 |
+
--env.type=pusht \
|
| 89 |
+
--env.episode_length=5 \
|
| 90 |
+
--dataset.repo_id=lerobot/pusht \
|
| 91 |
+
--dataset.image_transforms.enable=true \
|
| 92 |
+
--dataset.episodes="[0]" \
|
| 93 |
+
--batch_size=2 \
|
| 94 |
+
--steps=2 \
|
| 95 |
+
--eval_freq=2 \
|
| 96 |
+
--eval.n_episodes=1 \
|
| 97 |
+
--eval.batch_size=1 \
|
| 98 |
+
--save_checkpoint=true \
|
| 99 |
+
--save_freq=2 \
|
| 100 |
+
--log_freq=1 \
|
| 101 |
+
--wandb.enable=false \
|
| 102 |
+
--output_dir=tests/outputs/diffusion/
|
| 103 |
+
|
| 104 |
+
test-diffusion-ete-eval:
|
| 105 |
+
python lerobot/scripts/eval.py \
|
| 106 |
+
--policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \
|
| 107 |
+
--policy.device=$(DEVICE) \
|
| 108 |
+
--env.type=pusht \
|
| 109 |
+
--env.episode_length=5 \
|
| 110 |
+
--eval.n_episodes=1 \
|
| 111 |
+
--eval.batch_size=1
|
| 112 |
+
|
| 113 |
+
test-tdmpc-ete-train:
|
| 114 |
+
python lerobot/scripts/train.py \
|
| 115 |
+
--policy.type=tdmpc \
|
| 116 |
+
--policy.device=$(DEVICE) \
|
| 117 |
+
--env.type=xarm \
|
| 118 |
+
--env.task=XarmLift-v0 \
|
| 119 |
+
--env.episode_length=5 \
|
| 120 |
+
--dataset.repo_id=lerobot/xarm_lift_medium \
|
| 121 |
+
--dataset.image_transforms.enable=true \
|
| 122 |
+
--dataset.episodes="[0]" \
|
| 123 |
+
--batch_size=2 \
|
| 124 |
+
--steps=2 \
|
| 125 |
+
--eval_freq=2 \
|
| 126 |
+
--eval.n_episodes=1 \
|
| 127 |
+
--eval.batch_size=1 \
|
| 128 |
+
--save_checkpoint=true \
|
| 129 |
+
--save_freq=2 \
|
| 130 |
+
--log_freq=1 \
|
| 131 |
+
--wandb.enable=false \
|
| 132 |
+
--output_dir=tests/outputs/tdmpc/
|
| 133 |
+
|
| 134 |
+
test-tdmpc-ete-eval:
|
| 135 |
+
python lerobot/scripts/eval.py \
|
| 136 |
+
--policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
|
| 137 |
+
--policy.device=$(DEVICE) \
|
| 138 |
+
--env.type=xarm \
|
| 139 |
+
--env.episode_length=5 \
|
| 140 |
+
--env.task=XarmLift-v0 \
|
| 141 |
+
--eval.n_episodes=1 \
|
| 142 |
+
--eval.batch_size=1
|
project/ManiSkill3/src/maniskill3_environment/lerobot/README.md
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<p align="center">
|
| 2 |
+
<picture>
|
| 3 |
+
<source media="(prefers-color-scheme: dark)" srcset="media/lerobot-logo-thumbnail.png">
|
| 4 |
+
<source media="(prefers-color-scheme: light)" srcset="media/lerobot-logo-thumbnail.png">
|
| 5 |
+
<img alt="LeRobot, Hugging Face Robotics Library" src="media/lerobot-logo-thumbnail.png" style="max-width: 100%;">
|
| 6 |
+
</picture>
|
| 7 |
+
<br/>
|
| 8 |
+
<br/>
|
| 9 |
+
</p>
|
| 10 |
+
|
| 11 |
+
<div align="center">
|
| 12 |
+
|
| 13 |
+
[](https://github.com/huggingface/lerobot/actions/workflows/nightly-tests.yml?query=branch%3Amain)
|
| 14 |
+
[](https://codecov.io/gh/huggingface/lerobot)
|
| 15 |
+
[](https://www.python.org/downloads/)
|
| 16 |
+
[](https://github.com/huggingface/lerobot/blob/main/LICENSE)
|
| 17 |
+
[](https://pypi.org/project/lerobot/)
|
| 18 |
+
[](https://pypi.org/project/lerobot/)
|
| 19 |
+
[](https://github.com/huggingface/lerobot/tree/main/examples)
|
| 20 |
+
[](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md)
|
| 21 |
+
[](https://discord.gg/s3KuuzsPFb)
|
| 22 |
+
|
| 23 |
+
</div>
|
| 24 |
+
|
| 25 |
+
<h2 align="center">
|
| 26 |
+
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">
|
| 27 |
+
Build Your Own SO-100 Robot!</a></p>
|
| 28 |
+
</h2>
|
| 29 |
+
|
| 30 |
+
<div align="center">
|
| 31 |
+
<img src="media/so100/leader_follower.webp?raw=true" alt="SO-100 leader and follower arms" title="SO-100 leader and follower arms" width="50%">
|
| 32 |
+
|
| 33 |
+
<p><strong>Meet the SO-100 – Just $110 per arm!</strong></p>
|
| 34 |
+
<p>Train it in minutes with a few simple moves on your laptop.</p>
|
| 35 |
+
<p>Then sit back and watch your creation act autonomously! 🤯</p>
|
| 36 |
+
|
| 37 |
+
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">
|
| 38 |
+
Get the full SO-100 tutorial here.</a></p>
|
| 39 |
+
|
| 40 |
+
<p>Want to take it to the next level? Make your SO-100 mobile by building LeKiwi!</p>
|
| 41 |
+
<p>Check out the <a href="https://github.com/huggingface/lerobot/blob/main/examples/11_use_lekiwi.md">LeKiwi tutorial</a> and bring your robot to life on wheels.</p>
|
| 42 |
+
|
| 43 |
+
<img src="media/lekiwi/kiwi.webp?raw=true" alt="LeKiwi mobile robot" title="LeKiwi mobile robot" width="50%">
|
| 44 |
+
</div>
|
| 45 |
+
|
| 46 |
+
<br/>
|
| 47 |
+
|
| 48 |
+
<h3 align="center">
|
| 49 |
+
<p>LeRobot: State-of-the-art AI for real-world robotics</p>
|
| 50 |
+
</h3>
|
| 51 |
+
|
| 52 |
+
---
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
|
| 56 |
+
|
| 57 |
+
🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
|
| 58 |
+
|
| 59 |
+
🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulation environments to get started without assembling a robot. In the coming weeks, the plan is to add more and more support for real-world robotics on the most affordable and capable robots out there.
|
| 60 |
+
|
| 61 |
+
🤗 LeRobot hosts pretrained models and datasets on this Hugging Face community page: [huggingface.co/lerobot](https://huggingface.co/lerobot)
|
| 62 |
+
|
| 63 |
+
#### Examples of pretrained models on simulation environments
|
| 64 |
+
|
| 65 |
+
<table>
|
| 66 |
+
<tr>
|
| 67 |
+
<td><img src="media/gym/aloha_act.gif" width="100%" alt="ACT policy on ALOHA env"/></td>
|
| 68 |
+
<td><img src="media/gym/simxarm_tdmpc.gif" width="100%" alt="TDMPC policy on SimXArm env"/></td>
|
| 69 |
+
<td><img src="media/gym/pusht_diffusion.gif" width="100%" alt="Diffusion policy on PushT env"/></td>
|
| 70 |
+
</tr>
|
| 71 |
+
<tr>
|
| 72 |
+
<td align="center">ACT policy on ALOHA env</td>
|
| 73 |
+
<td align="center">TDMPC policy on SimXArm env</td>
|
| 74 |
+
<td align="center">Diffusion policy on PushT env</td>
|
| 75 |
+
</tr>
|
| 76 |
+
</table>
|
| 77 |
+
|
| 78 |
+
### Acknowledgment
|
| 79 |
+
|
| 80 |
+
- Thanks to Tony Zhao, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io).
|
| 81 |
+
- Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io).
|
| 82 |
+
- Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM).
|
| 83 |
+
- Thanks to Antonio Loquercio and Ashish Kumar for their early support.
|
| 84 |
+
- Thanks to [Seungjae (Jay) Lee](https://sjlee.cc/), [Mahi Shafiullah](https://mahis.life/) and colleagues for open sourcing [VQ-BeT](https://sjlee.cc/vq-bet/) policy and helping us adapt the codebase to our repository. The policy is adapted from [VQ-BeT repo](https://github.com/jayLEE0301/vq_bet_official).
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
## Installation
|
| 88 |
+
|
| 89 |
+
Download our source code:
|
| 90 |
+
```bash
|
| 91 |
+
git clone https://github.com/huggingface/lerobot.git
|
| 92 |
+
cd lerobot
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniconda`](https://docs.anaconda.com/free/miniconda/index.html):
|
| 96 |
+
```bash
|
| 97 |
+
conda create -y -n lerobot python=3.10
|
| 98 |
+
conda activate lerobot
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
When using `miniconda`, install `ffmpeg` in your environment:
|
| 102 |
+
```bash
|
| 103 |
+
conda install ffmpeg -c conda-forge
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
Install 🤗 LeRobot:
|
| 107 |
+
```bash
|
| 108 |
+
pip install -e .
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
> **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run:
|
| 112 |
+
`sudo apt-get install cmake build-essential python-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
|
| 113 |
+
|
| 114 |
+
For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras:
|
| 115 |
+
- [aloha](https://github.com/huggingface/gym-aloha)
|
| 116 |
+
- [xarm](https://github.com/huggingface/gym-xarm)
|
| 117 |
+
- [pusht](https://github.com/huggingface/gym-pusht)
|
| 118 |
+
|
| 119 |
+
For instance, to install 🤗 LeRobot with aloha and pusht, use:
|
| 120 |
+
```bash
|
| 121 |
+
pip install -e ".[aloha, pusht]"
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
|
| 125 |
+
```bash
|
| 126 |
+
wandb login
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
(note: you will also need to enable WandB in the configuration. See below.)
|
| 130 |
+
|
| 131 |
+
## Walkthrough
|
| 132 |
+
|
| 133 |
+
```
|
| 134 |
+
.
|
| 135 |
+
├── examples # contains demonstration examples, start here to learn about LeRobot
|
| 136 |
+
| └── advanced # contains even more examples for those who have mastered the basics
|
| 137 |
+
├── lerobot
|
| 138 |
+
| ├── configs # contains config classes with all options that you can override in the command line
|
| 139 |
+
| ├── common # contains classes and utilities
|
| 140 |
+
| | ├── datasets # various datasets of human demonstrations: aloha, pusht, xarm
|
| 141 |
+
| | ├── envs # various sim environments: aloha, pusht, xarm
|
| 142 |
+
| | ├── policies # various policies: act, diffusion, tdmpc
|
| 143 |
+
| | ├── robot_devices # various real devices: dynamixel motors, opencv cameras, koch robots
|
| 144 |
+
| | └── utils # various utilities
|
| 145 |
+
| └── scripts # contains functions to execute via command line
|
| 146 |
+
| ├── eval.py # load policy and evaluate it on an environment
|
| 147 |
+
| ├── train.py # train a policy via imitation learning and/or reinforcement learning
|
| 148 |
+
| ├── control_robot.py # teleoperate a real robot, record data, run a policy
|
| 149 |
+
| ├── push_dataset_to_hub.py # convert your dataset into LeRobot dataset format and upload it to the Hugging Face hub
|
| 150 |
+
| └── visualize_dataset.py # load a dataset and render its demonstrations
|
| 151 |
+
├── outputs # contains results of scripts execution: logs, videos, model checkpoints
|
| 152 |
+
└── tests # contains pytest utilities for continuous integration
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
### Visualize datasets
|
| 156 |
+
|
| 157 |
+
Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub.
|
| 158 |
+
|
| 159 |
+
You can also locally visualize episodes from a dataset on the hub by executing our script from the command line:
|
| 160 |
+
```bash
|
| 161 |
+
python lerobot/scripts/visualize_dataset.py \
|
| 162 |
+
--repo-id lerobot/pusht \
|
| 163 |
+
--episode-index 0
|
| 164 |
+
```
|
| 165 |
+
|
| 166 |
+
or from a dataset in a local folder with the `root` option and the `--local-files-only` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`)
|
| 167 |
+
```bash
|
| 168 |
+
python lerobot/scripts/visualize_dataset.py \
|
| 169 |
+
--repo-id lerobot/pusht \
|
| 170 |
+
--root ./my_local_data_dir \
|
| 171 |
+
--local-files-only 1 \
|
| 172 |
+
--episode-index 0
|
| 173 |
+
```
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
It will open `rerun.io` and display the camera streams, robot states and actions, like this:
|
| 177 |
+
|
| 178 |
+
https://github-production-user-asset-6210df.s3.amazonaws.com/4681518/328035972-fd46b787-b532-47e2-bb6f-fd536a55a7ed.mov?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240505T172924Z&X-Amz-Expires=300&X-Amz-Signature=d680b26c532eeaf80740f08af3320d22ad0b8a4e4da1bcc4f33142c15b509eda&X-Amz-SignedHeaders=host&actor_id=24889239&key_id=0&repo_id=748713144
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
Our script can also visualize datasets stored on a distant server. See `python lerobot/scripts/visualize_dataset.py --help` for more instructions.
|
| 182 |
+
|
| 183 |
+
### The `LeRobotDataset` format
|
| 184 |
+
|
| 185 |
+
A dataset in `LeRobotDataset` format is very simple to use. It can be loaded from a repository on the Hugging Face hub or a local folder simply with e.g. `dataset = LeRobotDataset("lerobot/aloha_static_coffee")` and can be indexed into like any Hugging Face and PyTorch dataset. For instance `dataset[0]` will retrieve a single temporal frame from the dataset containing observation(s) and an action as PyTorch tensors ready to be fed to a model.
|
| 186 |
+
|
| 187 |
+
A specificity of `LeRobotDataset` is that, rather than retrieving a single frame by its index, we can retrieve several frames based on their temporal relationship with the indexed frame, by setting `delta_timestamps` to a list of relative times with respect to the indexed frame. For example, with `delta_timestamps = {"observation.image": [-1, -0.5, -0.2, 0]}` one can retrieve, for a given index, 4 frames: 3 "previous" frames 1 second, 0.5 seconds, and 0.2 seconds before the indexed frame, and the indexed frame itself (corresponding to the 0 entry). See example [1_load_lerobot_dataset.py](examples/1_load_lerobot_dataset.py) for more details on `delta_timestamps`.
|
| 188 |
+
|
| 189 |
+
Under the hood, the `LeRobotDataset` format makes use of several ways to serialize data which can be useful to understand if you plan to work more closely with this format. We tried to make a flexible yet simple dataset format that would cover most type of features and specificities present in reinforcement learning and robotics, in simulation and in real-world, with a focus on cameras and robot states but easily extended to other types of sensory inputs as long as they can be represented by a tensor.
|
| 190 |
+
|
| 191 |
+
Here are the important details and internal structure organization of a typical `LeRobotDataset` instantiated with `dataset = LeRobotDataset("lerobot/aloha_static_coffee")`. The exact features will change from dataset to dataset but not the main aspects:
|
| 192 |
+
|
| 193 |
+
```
|
| 194 |
+
dataset attributes:
|
| 195 |
+
├ hf_dataset: a Hugging Face dataset (backed by Arrow/parquet). Typical features example:
|
| 196 |
+
│ ├ observation.images.cam_high (VideoFrame):
|
| 197 |
+
│ │ VideoFrame = {'path': path to a mp4 video, 'timestamp' (float32): timestamp in the video}
|
| 198 |
+
│ ├ observation.state (list of float32): position of an arm joints (for instance)
|
| 199 |
+
│ ... (more observations)
|
| 200 |
+
│ ├ action (list of float32): goal position of an arm joints (for instance)
|
| 201 |
+
│ ├ episode_index (int64): index of the episode for this sample
|
| 202 |
+
│ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode
|
| 203 |
+
│ ├ timestamp (float32): timestamp in the episode
|
| 204 |
+
│ ├ next.done (bool): indicates the end of en episode ; True for the last frame in each episode
|
| 205 |
+
│ └ index (int64): general index in the whole dataset
|
| 206 |
+
├ episode_data_index: contains 2 tensors with the start and end indices of each episode
|
| 207 |
+
│ ├ from (1D int64 tensor): first frame index for each episode — shape (num episodes,) starts with 0
|
| 208 |
+
│ └ to: (1D int64 tensor): last frame index for each episode — shape (num episodes,)
|
| 209 |
+
├ stats: a dictionary of statistics (max, mean, min, std) for each feature in the dataset, for instance
|
| 210 |
+
│ ├ observation.images.cam_high: {'max': tensor with same number of dimensions (e.g. `(c, 1, 1)` for images, `(c,)` for states), etc.}
|
| 211 |
+
│ ...
|
| 212 |
+
├ info: a dictionary of metadata on the dataset
|
| 213 |
+
│ ├ codebase_version (str): this is to keep track of the codebase version the dataset was created with
|
| 214 |
+
│ ├ fps (float): frame per second the dataset is recorded/synchronized to
|
| 215 |
+
│ ├ video (bool): indicates if frames are encoded in mp4 video files to save space or stored as png files
|
| 216 |
+
│ └ encoding (dict): if video, this documents the main options that were used with ffmpeg to encode the videos
|
| 217 |
+
├ videos_dir (Path): where the mp4 videos or png images are stored/accessed
|
| 218 |
+
└ camera_keys (list of string): the keys to access camera features in the item returned by the dataset (e.g. `["observation.images.cam_high", ...]`)
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely:
|
| 222 |
+
- hf_dataset stored using Hugging Face datasets library serialization to parquet
|
| 223 |
+
- videos are stored in mp4 format to save space
|
| 224 |
+
- metadata are stored in plain json/jsonl files
|
| 225 |
+
|
| 226 |
+
Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location.
|
| 227 |
+
|
| 228 |
+
### Evaluate a pretrained policy
|
| 229 |
+
|
| 230 |
+
Check out [example 2](./examples/2_evaluate_pretrained_policy.py) that illustrates how to download a pretrained policy from Hugging Face hub, and run an evaluation on its corresponding environment.
|
| 231 |
+
|
| 232 |
+
We also provide a more capable script to parallelize the evaluation over multiple environments during the same rollout. Here is an example with a pretrained model hosted on [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht):
|
| 233 |
+
```bash
|
| 234 |
+
python lerobot/scripts/eval.py \
|
| 235 |
+
--policy.path=lerobot/diffusion_pusht \
|
| 236 |
+
--env.type=pusht \
|
| 237 |
+
--eval.batch_size=10 \
|
| 238 |
+
--eval.n_episodes=10 \
|
| 239 |
+
--policy.use_amp=false \
|
| 240 |
+
--policy.device=cuda
|
| 241 |
+
```
|
| 242 |
+
|
| 243 |
+
Note: After training your own policy, you can re-evaluate the checkpoints with:
|
| 244 |
+
|
| 245 |
+
```bash
|
| 246 |
+
python lerobot/scripts/eval.py --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model
|
| 247 |
+
```
|
| 248 |
+
|
| 249 |
+
See `python lerobot/scripts/eval.py --help` for more instructions.
|
| 250 |
+
|
| 251 |
+
### Train your own policy
|
| 252 |
+
|
| 253 |
+
Check out [example 3](./examples/3_train_policy.py) that illustrate how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line.
|
| 254 |
+
|
| 255 |
+
To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding `--wandb.enable=true`.
|
| 256 |
+
|
| 257 |
+
A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](./examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs.
|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
|
| 261 |
+
Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python lerobot/scripts/eval.py --help` for more instructions.
|
| 262 |
+
|
| 263 |
+
#### Reproduce state-of-the-art (SOTA)
|
| 264 |
+
|
| 265 |
+
We provide some pretrained policies on our [hub page](https://huggingface.co/lerobot) that can achieve state-of-the-art performances.
|
| 266 |
+
You can reproduce their training by loading the config from their run. Simply running:
|
| 267 |
+
```bash
|
| 268 |
+
python lerobot/scripts/train.py --config_path=lerobot/diffusion_pusht
|
| 269 |
+
```
|
| 270 |
+
reproduces SOTA results for Diffusion Policy on the PushT task.
|
| 271 |
+
|
| 272 |
+
## Contribute
|
| 273 |
+
|
| 274 |
+
If you would like to contribute to 🤗 LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md).
|
| 275 |
+
|
| 276 |
+
<!-- ### Add a new dataset
|
| 277 |
+
|
| 278 |
+
To add a dataset to the hub, you need to login using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
| 279 |
+
```bash
|
| 280 |
+
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
| 281 |
+
```
|
| 282 |
+
|
| 283 |
+
Then point to your raw dataset folder (e.g. `data/aloha_static_pingpong_test_raw`), and push your dataset to the hub with:
|
| 284 |
+
```bash
|
| 285 |
+
python lerobot/scripts/push_dataset_to_hub.py \
|
| 286 |
+
--raw-dir data/aloha_static_pingpong_test_raw \
|
| 287 |
+
--out-dir data \
|
| 288 |
+
--repo-id lerobot/aloha_static_pingpong_test \
|
| 289 |
+
--raw-format aloha_hdf5
|
| 290 |
+
```
|
| 291 |
+
|
| 292 |
+
See `python lerobot/scripts/push_dataset_to_hub.py --help` for more instructions.
|
| 293 |
+
|
| 294 |
+
If your dataset format is not supported, implement your own in `lerobot/common/datasets/push_dataset_to_hub/${raw_format}_format.py` by copying examples like [pusht_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py), [umi_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py), [aloha_hdf5](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py), or [xarm_pkl](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py). -->
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
### Add a pretrained policy
|
| 298 |
+
|
| 299 |
+
Once you have trained a policy you may upload it to the Hugging Face hub using a hub id that looks like `${hf_user}/${repo_name}` (e.g. [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)).
|
| 300 |
+
|
| 301 |
+
You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain:
|
| 302 |
+
- `config.json`: A serialized version of the policy configuration (following the policy's dataclass config).
|
| 303 |
+
- `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format.
|
| 304 |
+
- `train_config.json`: A consolidated configuration containing all parameter userd for training. The policy configuration should match `config.json` exactly. Thisis useful for anyone who wants to evaluate your policy or for reproducibility.
|
| 305 |
+
|
| 306 |
+
To upload these to the hub, run the following:
|
| 307 |
+
```bash
|
| 308 |
+
huggingface-cli upload ${hf_user}/${repo_name} path/to/pretrained_model
|
| 309 |
+
```
|
| 310 |
+
|
| 311 |
+
See [eval.py](https://github.com/huggingface/lerobot/blob/main/lerobot/scripts/eval.py) for an example of how other people may use your policy.
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
### Improve your code with profiling
|
| 315 |
+
|
| 316 |
+
An example of a code snippet to profile the evaluation of a policy:
|
| 317 |
+
```python
|
| 318 |
+
from torch.profiler import profile, record_function, ProfilerActivity
|
| 319 |
+
|
| 320 |
+
def trace_handler(prof):
|
| 321 |
+
prof.export_chrome_trace(f"tmp/trace_schedule_{prof.step_num}.json")
|
| 322 |
+
|
| 323 |
+
with profile(
|
| 324 |
+
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
|
| 325 |
+
schedule=torch.profiler.schedule(
|
| 326 |
+
wait=2,
|
| 327 |
+
warmup=2,
|
| 328 |
+
active=3,
|
| 329 |
+
),
|
| 330 |
+
on_trace_ready=trace_handler
|
| 331 |
+
) as prof:
|
| 332 |
+
with record_function("eval_policy"):
|
| 333 |
+
for i in range(num_episodes):
|
| 334 |
+
prof.step()
|
| 335 |
+
# insert code to profile, potentially whole body of eval_policy function
|
| 336 |
+
```
|
| 337 |
+
|
| 338 |
+
## Citation
|
| 339 |
+
|
| 340 |
+
If you want, you can cite this work with:
|
| 341 |
+
```bibtex
|
| 342 |
+
@misc{cadene2024lerobot,
|
| 343 |
+
author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Wolf, Thomas},
|
| 344 |
+
title = {LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch},
|
| 345 |
+
howpublished = "\url{https://github.com/huggingface/lerobot}",
|
| 346 |
+
year = {2024}
|
| 347 |
+
}
|
| 348 |
+
```
|
| 349 |
+
|
| 350 |
+
Additionally, if you are using any of the particular policy architecture, pretrained models, or datasets, it is recommended to cite the original authors of the work as they appear below:
|
| 351 |
+
|
| 352 |
+
- [Diffusion Policy](https://diffusion-policy.cs.columbia.edu)
|
| 353 |
+
```bibtex
|
| 354 |
+
@article{chi2024diffusionpolicy,
|
| 355 |
+
author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song},
|
| 356 |
+
title ={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion},
|
| 357 |
+
journal = {The International Journal of Robotics Research},
|
| 358 |
+
year = {2024},
|
| 359 |
+
}
|
| 360 |
+
```
|
| 361 |
+
- [ACT or ALOHA](https://tonyzhaozh.github.io/aloha)
|
| 362 |
+
```bibtex
|
| 363 |
+
@article{zhao2023learning,
|
| 364 |
+
title={Learning fine-grained bimanual manipulation with low-cost hardware},
|
| 365 |
+
author={Zhao, Tony Z and Kumar, Vikash and Levine, Sergey and Finn, Chelsea},
|
| 366 |
+
journal={arXiv preprint arXiv:2304.13705},
|
| 367 |
+
year={2023}
|
| 368 |
+
}
|
| 369 |
+
```
|
| 370 |
+
|
| 371 |
+
- [TDMPC](https://www.nicklashansen.com/td-mpc/)
|
| 372 |
+
|
| 373 |
+
```bibtex
|
| 374 |
+
@inproceedings{Hansen2022tdmpc,
|
| 375 |
+
title={Temporal Difference Learning for Model Predictive Control},
|
| 376 |
+
author={Nicklas Hansen and Xiaolong Wang and Hao Su},
|
| 377 |
+
booktitle={ICML},
|
| 378 |
+
year={2022}
|
| 379 |
+
}
|
| 380 |
+
```
|
| 381 |
+
|
| 382 |
+
- [VQ-BeT](https://sjlee.cc/vq-bet/)
|
| 383 |
+
```bibtex
|
| 384 |
+
@article{lee2024behavior,
|
| 385 |
+
title={Behavior generation with latent actions},
|
| 386 |
+
author={Lee, Seungjae and Wang, Yibin and Etukuru, Haritheja and Kim, H Jin and Shafiullah, Nur Muhammad Mahi and Pinto, Lerrel},
|
| 387 |
+
journal={arXiv preprint arXiv:2403.03181},
|
| 388 |
+
year={2024}
|
| 389 |
+
}
|
| 390 |
+
```
|
| 391 |
+
## Star History
|
| 392 |
+
|
| 393 |
+
[](https://star-history.com/#huggingface/lerobot&Timeline)
|
project/ManiSkill3/src/maniskill3_environment/lerobot/pyproject.toml
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
[project.urls]
|
| 16 |
+
homepage = "https://github.com/huggingface/lerobot"
|
| 17 |
+
issues = "https://github.com/huggingface/lerobot/issues"
|
| 18 |
+
discord = "https://discord.gg/s3KuuzsPFb"
|
| 19 |
+
|
| 20 |
+
[project]
|
| 21 |
+
name = "lerobot"
|
| 22 |
+
version = "0.1.0"
|
| 23 |
+
description = "🤗 LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch"
|
| 24 |
+
authors = [
|
| 25 |
+
{ name = "Rémi Cadène", email = "re.cadene@gmail.com" },
|
| 26 |
+
{ name = "Simon Alibert", email = "alibert.sim@gmail.com" },
|
| 27 |
+
{ name = "Alexander Soare", email = "alexander.soare159@gmail.com" },
|
| 28 |
+
{ name = "Quentin Gallouédec", email = "quentin.gallouedec@ec-lyon.fr" },
|
| 29 |
+
{ name = "Adil Zouitine", email = "adilzouitinegm@gmail.com" },
|
| 30 |
+
{ name = "Thomas Wolf", email = "thomaswolfcontact@gmail.com" },
|
| 31 |
+
{ name = "Steven Palma", email = "imstevenpmwork@ieee.org" },
|
| 32 |
+
]
|
| 33 |
+
readme = "README.md"
|
| 34 |
+
license = { text = "Apache-2.0" }
|
| 35 |
+
requires-python = ">=3.10"
|
| 36 |
+
keywords = ["robotics", "deep learning", "pytorch"]
|
| 37 |
+
classifiers = [
|
| 38 |
+
"Development Status :: 3 - Alpha",
|
| 39 |
+
"Intended Audience :: Developers",
|
| 40 |
+
"Intended Audience :: Education",
|
| 41 |
+
"Intended Audience :: Science/Research",
|
| 42 |
+
"Topic :: Software Development :: Build Tools",
|
| 43 |
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
| 44 |
+
"License :: OSI Approved :: Apache Software License",
|
| 45 |
+
"Programming Language :: Python :: 3.10",
|
| 46 |
+
]
|
| 47 |
+
dependencies = [
|
| 48 |
+
"cmake>=3.29.0.1",
|
| 49 |
+
"datasets>=2.19.0",
|
| 50 |
+
"deepdiff>=7.0.1",
|
| 51 |
+
"diffusers>=0.27.2",
|
| 52 |
+
"draccus>=0.10.0",
|
| 53 |
+
"einops>=0.8.0",
|
| 54 |
+
"flask>=3.0.3",
|
| 55 |
+
"gdown>=5.1.0",
|
| 56 |
+
"gymnasium==0.29.1", # TODO(rcadene, aliberts): Make gym 1.0.0 work
|
| 57 |
+
"h5py>=3.10.0",
|
| 58 |
+
"huggingface-hub[hf-transfer,cli]>=0.27.1 ; python_version < '4.0'",
|
| 59 |
+
"imageio[ffmpeg]>=2.34.0",
|
| 60 |
+
"jsonlines>=4.0.0",
|
| 61 |
+
"numba>=0.59.0",
|
| 62 |
+
"omegaconf>=2.3.0",
|
| 63 |
+
"opencv-python-headless>=4.9.0",
|
| 64 |
+
"packaging>=24.2",
|
| 65 |
+
"av>=12.0.5",
|
| 66 |
+
"pymunk>=6.6.0",
|
| 67 |
+
"pynput>=1.7.7",
|
| 68 |
+
"pyzmq>=26.2.1",
|
| 69 |
+
"rerun-sdk>=0.21.0",
|
| 70 |
+
"termcolor>=2.4.0",
|
| 71 |
+
"torch>=2.2.1",
|
| 72 |
+
"torchcodec>=0.2.1; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')",
|
| 73 |
+
"torchvision>=0.21.0",
|
| 74 |
+
"wandb>=0.16.3",
|
| 75 |
+
"zarr>=2.17.0",
|
| 76 |
+
]
|
| 77 |
+
|
| 78 |
+
[project.optional-dependencies]
|
| 79 |
+
aloha = ["gym-aloha>=0.1.1 ; python_version < '4.0'"]
|
| 80 |
+
dev = ["pre-commit>=3.7.0", "debugpy>=1.8.1"]
|
| 81 |
+
dora = [
|
| 82 |
+
"gym-dora @ git+https://github.com/dora-rs/dora-lerobot.git#subdirectory=gym_dora ; python_version < '4.0'",
|
| 83 |
+
]
|
| 84 |
+
dynamixel = ["dynamixel-sdk>=3.7.31", "pynput>=1.7.7"]
|
| 85 |
+
feetech = ["feetech-servo-sdk>=1.0.0", "pynput>=1.7.7"]
|
| 86 |
+
intelrealsense = ["pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'"]
|
| 87 |
+
pi0 = ["transformers>=4.48.0"]
|
| 88 |
+
pusht = ["gym-pusht>=0.1.5 ; python_version < '4.0'"]
|
| 89 |
+
stretch = [
|
| 90 |
+
"hello-robot-stretch-body>=0.7.27 ; python_version < '4.0' and sys_platform == 'linux'",
|
| 91 |
+
"pyrender @ git+https://github.com/mmatl/pyrender.git ; sys_platform == 'linux'",
|
| 92 |
+
"pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'",
|
| 93 |
+
"pynput>=1.7.7",
|
| 94 |
+
]
|
| 95 |
+
test = ["pytest>=8.1.0", "pytest-cov>=5.0.0", "pyserial>=3.5"]
|
| 96 |
+
umi = ["imagecodecs>=2024.1.1"]
|
| 97 |
+
video_benchmark = ["scikit-image>=0.23.2", "pandas>=2.2.2"]
|
| 98 |
+
xarm = ["gym-xarm>=0.1.1 ; python_version < '4.0'"]
|
| 99 |
+
|
| 100 |
+
[tool.poetry]
|
| 101 |
+
requires-poetry = ">=2.1"
|
| 102 |
+
|
| 103 |
+
[tool.ruff]
|
| 104 |
+
line-length = 110
|
| 105 |
+
target-version = "py310"
|
| 106 |
+
exclude = ["tests/artifacts/**/*.safetensors"]
|
| 107 |
+
|
| 108 |
+
[tool.ruff.lint]
|
| 109 |
+
select = ["E4", "E7", "E9", "F", "I", "N", "B", "C4", "SIM"]
|
| 110 |
+
|
| 111 |
+
[tool.bandit]
|
| 112 |
+
exclude_dirs = [
|
| 113 |
+
"tests",
|
| 114 |
+
"benchmarks",
|
| 115 |
+
"lerobot/common/datasets/push_dataset_to_hub",
|
| 116 |
+
"lerobot/common/datasets/v2/convert_dataset_v1_to_v2",
|
| 117 |
+
"lerobot/common/policies/pi0/conversion_scripts",
|
| 118 |
+
"lerobot/scripts/push_dataset_to_hub.py",
|
| 119 |
+
]
|
| 120 |
+
skips = ["B101", "B311", "B404", "B603"]
|
| 121 |
+
|
| 122 |
+
[tool.typos]
|
| 123 |
+
default.extend-ignore-re = [
|
| 124 |
+
"(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # spellchecker:disable-line
|
| 125 |
+
"(?s)(#|//)\\s*spellchecker:off.*?\\n\\s*(#|//)\\s*spellchecker:on", # spellchecker:<on|off>
|
| 126 |
+
]
|
| 127 |
+
default.extend-ignore-identifiers-re = [
|
| 128 |
+
# Add individual words here to ignore them
|
| 129 |
+
"2nd",
|
| 130 |
+
"pn",
|
| 131 |
+
"ser",
|
| 132 |
+
"ein",
|
| 133 |
+
]
|
| 134 |
+
|
| 135 |
+
[build-system]
|
| 136 |
+
requires = ["poetry-core"]
|
| 137 |
+
build-backend = "poetry.core.masonry.api"
|