Seohong Park commited on
Commit ·
df1b471
0
Parent(s):
Initial commit
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +3 -0
- .gitignore +8 -0
- LICENSE +21 -0
- README.md +339 -0
- assets/env_teaser.png +3 -0
- assets/ogbench.svg +17 -0
- data_gen_scripts/commands.sh +194 -0
- data_gen_scripts/generate_antsoccer.py +225 -0
- data_gen_scripts/generate_locomaze.py +212 -0
- data_gen_scripts/generate_manipspace.py +204 -0
- data_gen_scripts/generate_powderworld.py +111 -0
- data_gen_scripts/main_sac.py +201 -0
- data_gen_scripts/online_env_utils.py +46 -0
- data_gen_scripts/viz_utils.py +52 -0
- impls/agents/__init__.py +17 -0
- impls/agents/crl.py +338 -0
- impls/agents/gcbc.py +170 -0
- impls/agents/gciql.py +309 -0
- impls/agents/gcivl.py +255 -0
- impls/agents/hiql.py +355 -0
- impls/agents/qrl.py +328 -0
- impls/agents/sac.py +217 -0
- impls/hyperparameters.sh +0 -0
- impls/main.py +163 -0
- impls/requirements.txt +8 -0
- impls/utils/__init__.py +0 -0
- impls/utils/datasets.py +397 -0
- impls/utils/encoders.py +144 -0
- impls/utils/env_utils.py +117 -0
- impls/utils/evaluation.py +117 -0
- impls/utils/flax_utils.py +202 -0
- impls/utils/log_utils.py +146 -0
- impls/utils/networks.py +517 -0
- ogbench/__init__.py +15 -0
- ogbench/locomaze/__init__.py +241 -0
- ogbench/locomaze/ant.py +119 -0
- ogbench/locomaze/assets/ant.xml +96 -0
- ogbench/locomaze/assets/humanoid.xml +212 -0
- ogbench/locomaze/assets/point.xml +41 -0
- ogbench/locomaze/humanoid.py +174 -0
- ogbench/locomaze/maze.py +650 -0
- ogbench/locomaze/point.py +112 -0
- ogbench/manipspace/__init__.py +164 -0
- ogbench/manipspace/controllers/__init__.py +3 -0
- ogbench/manipspace/controllers/diff_ik.py +115 -0
- ogbench/manipspace/descriptions/button_inner.xml +26 -0
- ogbench/manipspace/descriptions/button_outer.xml +39 -0
- ogbench/manipspace/descriptions/buttons.xml +84 -0
- ogbench/manipspace/descriptions/cube.xml +19 -0
- ogbench/manipspace/descriptions/cube_inner.xml +12 -0
.gitattributes
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.stl filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
dist/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.egg-info/
|
| 6 |
+
.DS_Store
|
| 7 |
+
.idea/
|
| 8 |
+
.ruff_cache/
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The MIT License (MIT)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024 OGBench Authors
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in
|
| 13 |
+
all copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
| 21 |
+
THE SOFTWARE.
|
README.md
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
<img src="assets/ogbench.svg" width="300px"/>
|
| 3 |
+
|
| 4 |
+
<div id="user-content-toc">
|
| 5 |
+
<ul align="center" style="list-style: none;">
|
| 6 |
+
<summary>
|
| 7 |
+
<h1>OGBench: Benchmarking Offline Goal-Conditioned RL</h1>
|
| 8 |
+
</summary>
|
| 9 |
+
</ul>
|
| 10 |
+
</div>
|
| 11 |
+
|
| 12 |
+
<a href="https://www.python.org/"><img src="https://img.shields.io/badge/Python-3.8%2B-598BE7?style=for-the-badge&logo=python&logoColor=598BE7&labelColor=F0F0F0"/></a>  
|
| 13 |
+
<a href="https://pypi.org/project/ogbench/"><img src="https://img.shields.io/pypi/v/ogbench?style=for-the-badge&labelColor=F0F0F0&color=598BE7"/></a>  
|
| 14 |
+
<a href="https://docs.astral.sh/ruff/"><img src="https://img.shields.io/badge/Code style-ruff-598BE7?style=for-the-badge&labelColor=F0F0F0"/></a>  
|
| 15 |
+
<a href="https://github.com/seohongpark/ogbench/blob/master/LICENSE"><img src="https://img.shields.io/badge/License-MIT-598BE7?style=for-the-badge&labelColor=F0F0F0"/></a>
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+
<div id="toc">
|
| 21 |
+
<ul align="center" style="list-style: none;">
|
| 22 |
+
<summary>
|
| 23 |
+
<h2><a href="https://seohong.me/projects/ogbench/">Paper</a>   <a href="https://seohong.me/projects/ogbench/">Project page</a></h2>
|
| 24 |
+
</summary>
|
| 25 |
+
</ul>
|
| 26 |
+
</div>
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
</div>
|
| 30 |
+
|
| 31 |
+
# Overview
|
| 32 |
+
|
| 33 |
+
OGBench is a benchmark designed to facilitate algorithms research in offline goal-conditioned reinforcement learning (RL),
|
| 34 |
+
offline unsupervised RL, and offline RL.
|
| 35 |
+
See the [project page](https://seohong.me/projects/ogbench/) for videos and more details about the environments, tasks, and datasets.
|
| 36 |
+
|
| 37 |
+
### Features
|
| 38 |
+
|
| 39 |
+
- **8 types** of cool, realistic, diverse environments ([videos](https://seohong.me/projects/ogbench/)):
|
| 40 |
+
- **Locomotion**: PointMaze, AntMaze, HumanoidMaze, and AntSoccer.
|
| 41 |
+
- **Manipulation**: Cube, Scene, and Puzzle.
|
| 42 |
+
- **Drawing**: Powderworld.
|
| 43 |
+
- **85 datasets** covering various challenges in offline goal-conditioned RL.
|
| 44 |
+
- Support for both **pixel-based** and **state-based** observations.
|
| 45 |
+
- **Clean, well-tuned reference implementations** of 6 offline goal-conditioned RL algorithms
|
| 46 |
+
(GCBC, GCIVL, GCIQL, QRL, CRL, and HIQL) based on Jax.
|
| 47 |
+
- **Fully reproducible** scripts for [the entire benchmark table](impls/hyperparameters.sh)
|
| 48 |
+
and [datasets](data_gen_scripts/commands.sh).
|
| 49 |
+
- `pip`-installable, easy-to-use APIs based on Gymnasium.
|
| 50 |
+
- No major dependencies other than MuJoCo.
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# How to use the OGBench environments
|
| 55 |
+
|
| 56 |
+
### Installation
|
| 57 |
+
|
| 58 |
+
OGBench can be easily installed via PyPI:
|
| 59 |
+
|
| 60 |
+
```shell
|
| 61 |
+
pip install ogbench
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
It requires Python 3.8+ and has only three dependencies: `mujoco >= 3.1.6`, `dm_control >= 1.0.20`,
|
| 65 |
+
and `gymnasium`.
|
| 66 |
+
|
| 67 |
+
### Quick start
|
| 68 |
+
|
| 69 |
+
After installing OGBench, you can create an environment and datasets using `ogbench.make_env_and_datasets`.
|
| 70 |
+
The environment follows the [Gymnasium](https://gymnasium.farama.org/) interface.
|
| 71 |
+
The datasets will be automatically downloaded during the first run.
|
| 72 |
+
|
| 73 |
+
Here is an example of how to use OGBench:
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
import ogbench
|
| 77 |
+
|
| 78 |
+
# Make an environment and datasets (they will be automatically downloaded).
|
| 79 |
+
dataset_name = 'antmaze-large-navigate-v0'
|
| 80 |
+
env, train_dataset, val_dataset = ogbench.make_env_and_datasets(dataset_name)
|
| 81 |
+
|
| 82 |
+
# Train your offline goal-conditioned RL agent on the dataset.
|
| 83 |
+
# ...
|
| 84 |
+
|
| 85 |
+
# Evaluate the agent.
|
| 86 |
+
for task_id in [1, 2, 3, 4, 5]:
|
| 87 |
+
# Reset the environment and set the evaluation task.
|
| 88 |
+
ob, info = env.reset(
|
| 89 |
+
options=dict(
|
| 90 |
+
task_id=task_id, # Set the evaluation task. Each environment provides five
|
| 91 |
+
# evaluation goals, and `task_id` must be in [1, 5].
|
| 92 |
+
render_goal=True, # Set to `True` to get a rendered goal image (optional).
|
| 93 |
+
)
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
goal = info['goal'] # Get the goal observation to pass to the agent.
|
| 97 |
+
goal_rendered = info['goal_rendered'] # Get the rendered goal image (optional).
|
| 98 |
+
|
| 99 |
+
done = False
|
| 100 |
+
while not done:
|
| 101 |
+
action = env.action_space.sample() # Replace this with your agent's action.
|
| 102 |
+
ob, reward, terminated, truncated, info = env.step(action) # Gymnasium-style step.
|
| 103 |
+
# If the agent reaches the goal, `terminated` will be `True`. If the episode length
|
| 104 |
+
# exceeds the maximum length without reaching the goal, `truncated` will be `True`.
|
| 105 |
+
done = terminated or truncated
|
| 106 |
+
frame = env.render() # Render the current frame (optional).
|
| 107 |
+
|
| 108 |
+
success = info['success'] # Whether the agent reached the goal (0 or 1).
|
| 109 |
+
# `terminated` also indicates this.
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
You can find a complete example of a training script for offline goal-conditioned RL in the `impls` directory.
|
| 113 |
+
See the next section for more details on the reference implementations.
|
| 114 |
+
|
| 115 |
+
### Dataset APIs
|
| 116 |
+
|
| 117 |
+
OGBench provides several APIs to download and load datasets.
|
| 118 |
+
The simplest way is to use `ogbench.make_env_and_datasets` as shown above,
|
| 119 |
+
which creates an environment and loads training and validation datasets.
|
| 120 |
+
The datasets will automatically be downloaded to the directory specified by `dataset_dir` during the first run
|
| 121 |
+
(default: `~/.ogbench/data`).
|
| 122 |
+
`ogbench.make_env_and_datasets` also provides the `compact_dataset` option,
|
| 123 |
+
which returns a dataset without the `next_observations` field (see below).
|
| 124 |
+
For example:
|
| 125 |
+
```python
|
| 126 |
+
import ogbench
|
| 127 |
+
|
| 128 |
+
# Make an environment and load datasets.
|
| 129 |
+
dataset_name = 'antmaze-large-navigate-v0'
|
| 130 |
+
env, train_dataset, val_dataset = ogbench.make_env_and_datasets(
|
| 131 |
+
dataset_name, # Dataset name.
|
| 132 |
+
dataset_dir='~/.ogbench/data', # Directory to save datasets (optional).
|
| 133 |
+
compact_dataset=False, # Whether to use a compact dataset (optional; see below).
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# Assume each dataset trajectory has a length of 4, and (s0, a0, s1), (s1, a1, s2),
|
| 137 |
+
# (s2, a2, s3), (s3, a3, s4) are the transition tuples.
|
| 138 |
+
# If `compact_dataset` is `False`, the dataset will have the following structure:
|
| 139 |
+
# |<- traj 1 ->| |<- traj 2 ->| ...
|
| 140 |
+
# ----------------------------------------------------------
|
| 141 |
+
# 'observations' : [s0, s1, s2, s3, s0, s1, s2, s3, ...]
|
| 142 |
+
# 'actions' : [a0, a1, a2, a3, a0, a1, a2, a3, ...]
|
| 143 |
+
# 'next_observations': [s1, s2, s3, s4, s1, s2, s3, s4, ...]
|
| 144 |
+
# 'terminals' : [ 0, 0, 0, 1, 0, 0, 0, 1, ...]
|
| 145 |
+
|
| 146 |
+
# If `compact_dataset` is `True`, the dataset will have the following structure, where the
|
| 147 |
+
# `next_observations` field is omitted. Instead, it includes a `valids` field indicating
|
| 148 |
+
# whether the next observation is valid:
|
| 149 |
+
# |<--- traj 1 --->| |<--- traj 2 --->| ...
|
| 150 |
+
# ------------------------------------------------------------------
|
| 151 |
+
# 'observations' : [s0, s1, s2, s3, s4, s0, s1, s2, s3, s4, ...]
|
| 152 |
+
# 'actions' : [a0, a1, a2, a3, a4, a0, a1, a2, a3, a4, ...]
|
| 153 |
+
# 'terminals' : [ 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, ...]
|
| 154 |
+
# 'valids' : [ 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, ...]
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
To download multiple datasets at once, you can use `ogbench.download_datasets`:
|
| 158 |
+
```python
|
| 159 |
+
import ogbench
|
| 160 |
+
|
| 161 |
+
dataset_names = [
|
| 162 |
+
'humanoidmaze-medium-navigate-v0',
|
| 163 |
+
'visual-puzzle-3x3-play-v0',
|
| 164 |
+
'powderworld-easy-play-v0',
|
| 165 |
+
]
|
| 166 |
+
ogbench.download_datasets(
|
| 167 |
+
dataset_names, # List of dataset names.
|
| 168 |
+
dataset_dir='~/.ogbench/data', # Directory to save datasets (optional).
|
| 169 |
+
)
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# How to use the reference implementations
|
| 174 |
+
|
| 175 |
+
OGBench also provides Jax-based reference implementations of six offline goal-conditioned RL algorithms
|
| 176 |
+
(GCBC, GCIVL, GCIQL, QRL, CRL and HIQL).
|
| 177 |
+
They are provided in the `impls` directory as a **standalone** codebase.
|
| 178 |
+
You can safely remove the other parts of the repository if you only need the reference implementations
|
| 179 |
+
and do not want to modify the environments.
|
| 180 |
+
|
| 181 |
+
### Installation
|
| 182 |
+
|
| 183 |
+
Our reference implementations require Python 3.9+ and additional dependencies, including `jax >= 0.4.26`.
|
| 184 |
+
To install these dependencies, run:
|
| 185 |
+
|
| 186 |
+
```shell
|
| 187 |
+
cd impls
|
| 188 |
+
pip install -r requirements.txt
|
| 189 |
+
```
|
| 190 |
+
|
| 191 |
+
By default, it uses the PyPI version of OGBench.
|
| 192 |
+
If you want to use a local version of OGBench (e.g., for training methods on modified environments),
|
| 193 |
+
run instead `pip install -e ".[train]"` in the root directory.
|
| 194 |
+
|
| 195 |
+
### Running the reference implementations
|
| 196 |
+
|
| 197 |
+
Each algorithm is implemented in a separate file in the `agents` directory.
|
| 198 |
+
We provide implementations of the following offline goal-conditioned RL algorithms:
|
| 199 |
+
|
| 200 |
+
- `gcbc.py`: Goal-Conditioned Behavioral Cloning (GCBC)
|
| 201 |
+
- `gcivl.py`: Goal-Conditioned Implicit V-Learning (GCIVL)
|
| 202 |
+
- `gciql.py`: Goal-Conditioned Implicit Q-Learning (GCIQL)
|
| 203 |
+
- `qrl.py`: Quasimetric Reinforcement Learning (QRL)
|
| 204 |
+
- `crl.py`: Contrastive Reinforcement Learning (CRL)
|
| 205 |
+
- `hiql.py`: Hierarchical Implicit Q-Learning (HIQL)
|
| 206 |
+
|
| 207 |
+
To train an agent, you can run the `main.py` script.
|
| 208 |
+
Training metrics, evaluation metrics, and videos are logged via `wandb` by default.
|
| 209 |
+
Here are some example commands (see [hyperparameters.sh](impls/hyperparameters.sh) for the full list of commands):
|
| 210 |
+
|
| 211 |
+
```shell
|
| 212 |
+
# antmaze-large-navigate-v0 (GCBC)
|
| 213 |
+
python main.py --env_name=antmaze-large-navigate-v0 --agent=agents/gcbc.py
|
| 214 |
+
# antmaze-large-navigate-v0 (GCIVL)
|
| 215 |
+
python main.py --env_name=antmaze-large-navigate-v0 --agent=agents/gcivl.py --agent.alpha=10.0
|
| 216 |
+
# antmaze-large-navigate-v0 (GCIQL)
|
| 217 |
+
python main.py --env_name=antmaze-large-navigate-v0 --agent=agents/gciql.py --agent.alpha=0.3
|
| 218 |
+
# antmaze-large-navigate-v0 (QRL)
|
| 219 |
+
python main.py --env_name=antmaze-large-navigate-v0 --agent=agents/qrl.py --agent.alpha=0.003
|
| 220 |
+
# antmaze-large-navigate-v0 (CRL)
|
| 221 |
+
python main.py --env_name=antmaze-large-navigate-v0 --agent=agents/crl.py --agent.alpha=0.1
|
| 222 |
+
# antmaze-large-navigate-v0 (HIQL)
|
| 223 |
+
python main.py --env_name=antmaze-large-navigate-v0 --agent=agents/hiql.py --agent.high_alpha=3.0 --agent.low_alpha=3.0
|
| 224 |
+
```
|
| 225 |
+
|
| 226 |
+
Each run typically takes 2-5 hours (on state-based tasks)
|
| 227 |
+
or 5-12 hours (on pixel-based tasks) on a single A5000 GPU.
|
| 228 |
+
For large pixel-based datasets (e.g., `visual-puzzle-4x6-play-v0` with 5M transitions),
|
| 229 |
+
up to 120GB of RAM may be required.
|
| 230 |
+
|
| 231 |
+
### Notes on hyperparameters and flags
|
| 232 |
+
|
| 233 |
+
To reproduce the results in the paper, you need to use the hyperparameters provided.
|
| 234 |
+
We provide a complete list of the exact command-line flags used to produce the main benchmark table
|
| 235 |
+
in the paper in [hyperparameters.sh](impls/hyperparameters.sh).
|
| 236 |
+
Below, we highlight some important hyperparameters and common pitfalls:
|
| 237 |
+
|
| 238 |
+
- Regardless of the algorithms, one of the most important hyperparameters is `agent.alpha` (i.e., the temperature (AWR) or the BC coefficient (DDPG+BC))
|
| 239 |
+
for the actor loss. It is crucial to tune this hyperparameter when running an algorithm on a new environment.
|
| 240 |
+
In the paper, we provide a separate table of the policy extraction hyperparameters,
|
| 241 |
+
which are individually tuned for each environment and dataset category.
|
| 242 |
+
- By default, actor goals are uniformly sampled from the future states in the same trajectory.
|
| 243 |
+
We found this works best in most cases, but you can adjust this to allow random actor goals
|
| 244 |
+
(e.g., by setting `--agent.actor_p_trajgoal=0.5 --agent.actor_p_randomgoal=0.5`).
|
| 245 |
+
This is especially important for datasets that require stitching.
|
| 246 |
+
See the hyperparameter table in the paper for the values used in benchmarking.
|
| 247 |
+
- For GCIQL, CRL, and QRL, we provide two policy extraction methods: AWR and DDPG+BC.
|
| 248 |
+
In general, DDPG+BC works better than AWR (see [this paper](https://arxiv.org/abs/2406.09329) for the reasons),
|
| 249 |
+
but DDPG+BC is usually more sensitive to the `alpha` hyperparameter than AWR.
|
| 250 |
+
As such, in a new environment, we recommend starting with AWR to get a sence of the performance
|
| 251 |
+
and then switching to DDPG+BC to further improve the performance.
|
| 252 |
+
- Our QRL implementation provides two quasimetric parameterizations: MRN and IQE.
|
| 253 |
+
We found that IQE (default) works better in general, but it is almost twice as slow as MRN.
|
| 254 |
+
- In CRL, we found that using `--agent.actor_log_q=True` (which is set by default) is important for strong performance, especially in locomotion environments.
|
| 255 |
+
We found this doesn't help much with other algorithms.
|
| 256 |
+
- In HIQL, setting `--agent.low_actor_rep_grad=True` (which is `False` by default) is crucial in pixel-based environments.
|
| 257 |
+
This allows gradients to flow from the low-level actor loss to the subgoal representation, which helps maintain better representations.
|
| 258 |
+
- In pixel-based environments, don't forget to set `agent.encoder`. We used `--agent.encoder=impala_small` across all pixel-based environments.
|
| 259 |
+
- In discrete-action environments (e.g., Powderworld), don't forget to set `--agent.discrete=True`.
|
| 260 |
+
- In Powderworld, use `--eval_temperature=0.3`, which helps prevent the agent from getting stuck in certain states.
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
# How to reproduce the datasets
|
| 264 |
+
|
| 265 |
+
We provide the full scripts and exact command-line flags used to produce all the datasets in OGBench.
|
| 266 |
+
The scripts are provided in the `data_gen_scripts` directory.
|
| 267 |
+
|
| 268 |
+
### Installation
|
| 269 |
+
|
| 270 |
+
Data-generation scripts for locomotion environments require Python 3.9+ and additional dependencies,
|
| 271 |
+
including `jax >= 0.4.26`, to train and load expert agents.
|
| 272 |
+
For manipulation and drawing environments, no additional dependencies are required.
|
| 273 |
+
To install the necessary dependencies for locomotion environments, run the following command in the root directory:
|
| 274 |
+
```shell
|
| 275 |
+
pip install -e ".[train]"
|
| 276 |
+
```
|
| 277 |
+
|
| 278 |
+
This installs the same dependencies as the reference implementations, but in the editable mode (`-e`).
|
| 279 |
+
|
| 280 |
+
### Reproducing the datasets
|
| 281 |
+
|
| 282 |
+
To reproduce the datasets, you can run the scripts in the `data_gen_scripts` directory.
|
| 283 |
+
For locomotion environments, you need to first download the expert policies.
|
| 284 |
+
We provide the exact command-line flags used to produce the datasets in [commands.sh](data_gen_scripts/commands.sh).
|
| 285 |
+
Here is an example of how to reproduce a dataset for the `antmaze-large-navigate-v0` task:
|
| 286 |
+
|
| 287 |
+
```shell
|
| 288 |
+
cd data_gen_scripts
|
| 289 |
+
# Download the expert policies for locomotion environments (not required for other environments).
|
| 290 |
+
wget https://rail.eecs.berkeley.edu/datasets/ogbench/experts.tar.gz
|
| 291 |
+
tar xf experts.tar.gz && rm experts.tar.gz
|
| 292 |
+
# Create a directory to save datasets.
|
| 293 |
+
mkdir -p data
|
| 294 |
+
# Add the `impls` directory to PYTHONPATH.
|
| 295 |
+
# Alternatively, you can move the contents of `data_gen_scripts` to `impls` instead of setting PYTHONPATH.
|
| 296 |
+
export PYTHONPATH="../impls:${PYTHONPATH}"
|
| 297 |
+
# Generate a dataset for `antmaze-large-navigate-v0`.
|
| 298 |
+
python generate_locomaze.py --env_name=antmaze-large-v0 --save_path=data/antmaze-large-navigate-v0.npz
|
| 299 |
+
```
|
| 300 |
+
|
| 301 |
+
### Reproducing the expert policies
|
| 302 |
+
|
| 303 |
+
If you want to train your own expert policies from scratch, you can run the corresponding commands in [commands.sh](data_gen_scripts/commands.sh).
|
| 304 |
+
For example, to train an Ant expert policy, you can run the following command in the `data_gen_scripts` directory after setting `PYTHONPATH` as above:
|
| 305 |
+
```shell
|
| 306 |
+
python main_sac.py --env_name=online-ant-xy-v0
|
| 307 |
+
```
|
| 308 |
+
|
| 309 |
+
# Questions?
|
| 310 |
+
|
| 311 |
+
If you have any questions or issues, feel free to open an issue on this repository.
|
| 312 |
+
You can also reach out via email to [Seohong Park](https://seohong.me) at [seohong@berkeley.edu](mailto:seohong@berkeley.edu).
|
| 313 |
+
|
| 314 |
+
# Acknowledgments
|
| 315 |
+
|
| 316 |
+
This codebase is inspired by or partly uses code from the following repositories:
|
| 317 |
+
- [D4RL](https://github.com/Farama-Foundation/D4RL) for the dataset structure and the AntMaze environment.
|
| 318 |
+
- [Gymnasium](https://github.com/Farama-Foundation/Gymnasium) and [dm_control](https://github.com/google-deepmind/dm_control) for the agents (Ant and Humanoid) in the locomotion environments.
|
| 319 |
+
- [MuJoCo Menagerie](https://github.com/google-deepmind/mujoco_menagerie) for the robot descriptions (Universal Robots UR5e and Robotiq 2F-85) in the manipulation environments.
|
| 320 |
+
- [jaxlie](https://github.com/brentyi/jaxlie) for Lie group operations in the manipulation environments.
|
| 321 |
+
- [Meta-World](https://github.com/Farama-Foundation/Metaworld) for the objects (drawer, window, and button) in the manipulation environments.
|
| 322 |
+
- [Powderworld](https://github.com/kvfrans/powderworld) for the Powderworld environment.
|
| 323 |
+
- [NumPyConv2D](https://github.com/99991/NumPyConv2D) for the NumPy Conv2D implementation in the Powderworld environment.
|
| 324 |
+
- [jaxrl_m](https://github.com/dibyaghosh/jaxrl_m), [rlbase](https://github.com/kvfrans/rlbase_stable),
|
| 325 |
+
[HIQL](https://github.com/seohongpark/HIQL), and [cmd-notebook](https://github.com/vivekmyers/cmd-notebook)
|
| 326 |
+
for Jax-based implementations of RL algorithms.
|
| 327 |
+
|
| 328 |
+
Special thanks to [Kevin Zakka](https://kzakka.com/) for providing the initial codebase for the manipulation environments.
|
| 329 |
+
|
| 330 |
+
# Citation
|
| 331 |
+
|
| 332 |
+
```bibtex
|
| 333 |
+
@article{ogbench_park2024,
|
| 334 |
+
title={OGBench: Benchmarking Offline Goal-Conditioned RL},
|
| 335 |
+
author={Seohong Park and Kevin Frans and Benjamin Eysenbach and Sergey Levine},
|
| 336 |
+
journal={ArXiv},
|
| 337 |
+
year={2024}
|
| 338 |
+
}
|
| 339 |
+
```
|
assets/env_teaser.png
ADDED
|
Git LFS Details
|
assets/ogbench.svg
ADDED
|
|
data_gen_scripts/commands.sh
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Commands to train expert policies.
|
| 2 |
+
|
| 3 |
+
# ant (online-ant-xy-v0)
|
| 4 |
+
python main_sac.py --env_name=online-ant-xy-v0 --train_steps=400000 --eval_interval=100000 --save_interval=400000 --log_interval=5000
|
| 5 |
+
# antball (online-antball-v0)
|
| 6 |
+
python main_sac.py --env_name=online-antball-v0 --train_steps=12000000 --train_interval=4 --eval_interval=500000 --save_interval=12000000 --log_interval=20000 --agent.layer_norm=True --terminate_at_end=1
|
| 7 |
+
# humanoid (online-humanoid-xy-v0)
|
| 8 |
+
python main_sac.py --env_name=online-humanoid-xy-v0 --train_steps=40000000 --train_interval=4 --eval_interval=500000 --save_interval=40000000 --log_interval=20000 --agent.value_hidden_dims="(1024, 1024, 1024)" --agent.layer_norm=True --agent.min_q=False
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Commands to reproduce datasets.
|
| 12 |
+
|
| 13 |
+
# pointmaze-medium-navigate-v0
|
| 14 |
+
python generate_locomaze.py --env_name=pointmaze-medium-v0 --save_path=data/pointmaze-medium-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --noise=0.5
|
| 15 |
+
# pointmaze-large-navigate-v0
|
| 16 |
+
python generate_locomaze.py --env_name=pointmaze-large-v0 --save_path=data/pointmaze-large-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --noise=0.5
|
| 17 |
+
# pointmaze-giant-navigate-v0
|
| 18 |
+
python generate_locomaze.py --env_name=pointmaze-giant-v0 --save_path=data/pointmaze-giant-navigate-v0.npz --dataset_type=navigate --num_episodes=500 --max_episode_steps=2001 --noise=0.5
|
| 19 |
+
# pointmaze-teleport-navigate-v0
|
| 20 |
+
python generate_locomaze.py --env_name=pointmaze-teleport-v0 --save_path=data/pointmaze-teleport-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --noise=0.5
|
| 21 |
+
# pointmaze-medium-stitch-v0
|
| 22 |
+
python generate_locomaze.py --env_name=pointmaze-medium-v0 --save_path=data/pointmaze-medium-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --noise=0.5
|
| 23 |
+
# pointmaze-large-stitch-v0
|
| 24 |
+
python generate_locomaze.py --env_name=pointmaze-large-v0 --save_path=data/pointmaze-large-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --noise=0.5
|
| 25 |
+
# pointmaze-giant-stitch-v0
|
| 26 |
+
python generate_locomaze.py --env_name=pointmaze-giant-v0 --save_path=data/pointmaze-giant-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --noise=0.5
|
| 27 |
+
# pointmaze-teleport-stitch-v0
|
| 28 |
+
python generate_locomaze.py --env_name=pointmaze-teleport-v0 --save_path=data/pointmaze-teleport-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --noise=0.5
|
| 29 |
+
|
| 30 |
+
# antmaze-medium-navigate-v0
|
| 31 |
+
python generate_locomaze.py --env_name=antmaze-medium-v0 --save_path=data/antmaze-medium-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --restore_path=experts/ant --restore_epoch=400000
|
| 32 |
+
# antmaze-large-navigate-v0
|
| 33 |
+
python generate_locomaze.py --env_name=antmaze-large-v0 --save_path=data/antmaze-large-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --restore_path=experts/ant --restore_epoch=400000
|
| 34 |
+
# antmaze-giant-navigate-v0
|
| 35 |
+
python generate_locomaze.py --env_name=antmaze-giant-v0 --save_path=data/antmaze-giant-navigate-v0.npz --dataset_type=navigate --num_episodes=500 --max_episode_steps=2001 --restore_path=experts/ant --restore_epoch=400000
|
| 36 |
+
# antmaze-teleport-navigate-v0
|
| 37 |
+
python generate_locomaze.py --env_name=antmaze-teleport-v0 --save_path=data/antmaze-teleport-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --restore_path=experts/ant --restore_epoch=400000
|
| 38 |
+
# antmaze-medium-stitch-v0
|
| 39 |
+
python generate_locomaze.py --env_name=antmaze-medium-v0 --save_path=data/antmaze-medium-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --restore_path=experts/ant --restore_epoch=400000
|
| 40 |
+
# antmaze-large-stitch-v0
|
| 41 |
+
python generate_locomaze.py --env_name=antmaze-large-v0 --save_path=data/antmaze-large-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --restore_path=experts/ant --restore_epoch=400000
|
| 42 |
+
# antmaze-giant-stitch-v0
|
| 43 |
+
python generate_locomaze.py --env_name=antmaze-giant-v0 --save_path=data/antmaze-giant-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --restore_path=experts/ant --restore_epoch=400000
|
| 44 |
+
# antmaze-teleport-stitch-v0
|
| 45 |
+
python generate_locomaze.py --env_name=antmaze-teleport-v0 --save_path=data/antmaze-teleport-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --restore_path=experts/ant --restore_epoch=400000
|
| 46 |
+
# antmaze-medium-explore-v0
|
| 47 |
+
python generate_locomaze.py --env_name=antmaze-medium-v0 --save_path=data/antmaze-medium-explore-v0.npz --dataset_type=explore --num_episodes=10000 --max_episode_steps=501 --noise=1.0 --restore_path=experts/ant --restore_epoch=400000
|
| 48 |
+
# antmaze-large-explore-v0
|
| 49 |
+
python generate_locomaze.py --env_name=antmaze-large-v0 --save_path=data/antmaze-large-explore-v0.npz --dataset_type=explore --num_episodes=10000 --max_episode_steps=501 --noise=1.0 --restore_path=experts/ant --restore_epoch=400000
|
| 50 |
+
# antmaze-teleport-explore-v0
|
| 51 |
+
python generate_locomaze.py --env_name=antmaze-teleport-v0 --save_path=data/antmaze-teleport-explore-v0.npz --dataset_type=explore --num_episodes=10000 --max_episode_steps=501 --noise=1.0 --restore_path=experts/ant --restore_epoch=400000
|
| 52 |
+
|
| 53 |
+
# humanoidmaze-medium-navigate-v0
|
| 54 |
+
python generate_locomaze.py --env_name=humanoidmaze-medium-v0 --save_path=data/humanoidmaze-medium-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=2001 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 55 |
+
# humanoidmaze-large-navigate-v0
|
| 56 |
+
python generate_locomaze.py --env_name=humanoidmaze-large-v0 --save_path=data/humanoidmaze-large-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=2001 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 57 |
+
# humanoidmaze-giant-navigate-v0
|
| 58 |
+
python generate_locomaze.py --env_name=humanoidmaze-giant-v0 --save_path=data/humanoidmaze-giant-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=4001 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 59 |
+
# humanoidmaze-medium-stitch-v0
|
| 60 |
+
python generate_locomaze.py --env_name=humanoidmaze-medium-v0 --save_path=data/humanoidmaze-medium-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=401 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 61 |
+
# humanoidmaze-large-stitch-v0
|
| 62 |
+
python generate_locomaze.py --env_name=humanoidmaze-large-v0 --save_path=data/humanoidmaze-large-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=401 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 63 |
+
# humanoidmaze-giant-stitch-v0
|
| 64 |
+
python generate_locomaze.py --env_name=humanoidmaze-giant-v0 --save_path=data/humanoidmaze-giant-stitch-v0.npz --dataset_type=stitch --num_episodes=10000 --max_episode_steps=401 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 65 |
+
|
| 66 |
+
# antsoccer-arena-navigate-v0
|
| 67 |
+
python generate_antsoccer.py --env_name=antsoccer-arena-v0 --save_path=data/antsoccer-arena-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --loco_restore_path=experts/ant --loco_restore_epoch=400000 --ball_restore_path=experts/antball --ball_restore_epoch=12000000
|
| 68 |
+
# antsoccer-medium-navigate-v0
|
| 69 |
+
python generate_antsoccer.py --env_name=antsoccer-medium-v0 --save_path=data/antsoccer-medium-navigate-v0.npz --dataset_type=navigate --num_episodes=4000 --max_episode_steps=1001 --loco_restore_path=experts/ant --loco_restore_epoch=400000 --ball_restore_path=experts/antball --ball_restore_epoch=12000000
|
| 70 |
+
# antsoccer-arena-stitch-v0
|
| 71 |
+
python generate_antsoccer.py --env_name=antsoccer-arena-v0 --save_path=data/antsoccer-arena-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --loco_restore_path=experts/ant --loco_restore_epoch=400000 --ball_restore_path=experts/antball --ball_restore_epoch=12000000
|
| 72 |
+
# antsoccer-medium-stitch-v0
|
| 73 |
+
python generate_antsoccer.py --env_name=antsoccer-medium-v0 --save_path=data/antsoccer-medium-stitch-v0.npz --dataset_type=stitch --num_episodes=8000 --max_episode_steps=501 --loco_restore_path=experts/ant --loco_restore_epoch=400000 --ball_restore_path=experts/antball --ball_restore_epoch=12000000
|
| 74 |
+
|
| 75 |
+
# visual-antmaze-medium-navigate-v0
|
| 76 |
+
python generate_locomaze.py --env_name=visual-antmaze-medium-v0 --save_path=data/visual-antmaze-medium-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --restore_path=experts/ant --restore_epoch=400000
|
| 77 |
+
# visual-antmaze-large-navigate-v0
|
| 78 |
+
python generate_locomaze.py --env_name=visual-antmaze-large-v0 --save_path=data/visual-antmaze-large-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --restore_path=experts/ant --restore_epoch=400000
|
| 79 |
+
# visual-antmaze-giant-navigate-v0
|
| 80 |
+
python generate_locomaze.py --env_name=visual-antmaze-giant-v0 --save_path=data/visual-antmaze-giant-navigate-v0.npz --dataset_type=navigate --num_episodes=500 --max_episode_steps=2001 --restore_path=experts/ant --restore_epoch=400000
|
| 81 |
+
# visual-antmaze-teleport-navigate-v0
|
| 82 |
+
python generate_locomaze.py --env_name=visual-antmaze-teleport-v0 --save_path=data/visual-antmaze-teleport-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=1001 --restore_path=experts/ant --restore_epoch=400000
|
| 83 |
+
# visual-antmaze-medium-stitch-v0
|
| 84 |
+
python generate_locomaze.py --env_name=visual-antmaze-medium-v0 --save_path=data/visual-antmaze-medium-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --restore_path=experts/ant --restore_epoch=400000
|
| 85 |
+
# visual-antmaze-large-stitch-v0
|
| 86 |
+
python generate_locomaze.py --env_name=visual-antmaze-large-v0 --save_path=data/visual-antmaze-large-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --restore_path=experts/ant --restore_epoch=400000
|
| 87 |
+
# visual-antmaze-giant-stitch-v0
|
| 88 |
+
python generate_locomaze.py --env_name=visual-antmaze-giant-v0 --save_path=data/visual-antmaze-giant-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --restore_path=experts/ant --restore_epoch=400000
|
| 89 |
+
# visual-antmaze-teleport-stitch-v0
|
| 90 |
+
python generate_locomaze.py --env_name=visual-antmaze-teleport-v0 --save_path=data/visual-antmaze-teleport-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=201 --restore_path=experts/ant --restore_epoch=400000
|
| 91 |
+
# visual-antmaze-medium-explore-v0
|
| 92 |
+
python generate_locomaze.py --env_name=visual-antmaze-medium-v0 --save_path=data/visual-antmaze-medium-explore-v0.npz --dataset_type=explore --num_episodes=10000 --max_episode_steps=501 --noise=1.0 --restore_path=experts/ant --restore_epoch=400000
|
| 93 |
+
# visual-antmaze-large-explore-v0
|
| 94 |
+
python generate_locomaze.py --env_name=visual-antmaze-large-v0 --save_path=data/visual-antmaze-large-explore-v0.npz --dataset_type=explore --num_episodes=10000 --max_episode_steps=501 --noise=1.0 --restore_path=experts/ant --restore_epoch=400000
|
| 95 |
+
# visual-antmaze-teleport-explore-v0
|
| 96 |
+
python generate_locomaze.py --env_name=visual-antmaze-teleport-v0 --save_path=data/visual-antmaze-teleport-explore-v0.npz --dataset_type=explore --num_episodes=10000 --max_episode_steps=501 --noise=1.0 --restore_path=experts/ant --restore_epoch=400000
|
| 97 |
+
|
| 98 |
+
# visual-humanoidmaze-medium-navigate-v0
|
| 99 |
+
python generate_locomaze.py --env_name=visual-humanoidmaze-medium-v0 --save_path=data/visual-humanoidmaze-medium-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=2001 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 100 |
+
# visual-humanoidmaze-large-navigate-v0
|
| 101 |
+
python generate_locomaze.py --env_name=visual-humanoidmaze-large-v0 --save_path=data/visual-humanoidmaze-large-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=2001 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 102 |
+
# visual-humanoidmaze-giant-navigate-v0
|
| 103 |
+
python generate_locomaze.py --env_name=visual-humanoidmaze-giant-v0 --save_path=data/visual-humanoidmaze-giant-navigate-v0.npz --dataset_type=navigate --num_episodes=1000 --max_episode_steps=4001 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 104 |
+
# visual-humanoidmaze-medium-stitch-v0
|
| 105 |
+
python generate_locomaze.py --env_name=visual-humanoidmaze-medium-v0 --save_path=data/visual-humanoidmaze-medium-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=401 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 106 |
+
# visual-humanoidmaze-large-stitch-v0
|
| 107 |
+
python generate_locomaze.py --env_name=visual-humanoidmaze-large-v0 --save_path=data/visual-humanoidmaze-large-stitch-v0.npz --dataset_type=stitch --num_episodes=5000 --max_episode_steps=401 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 108 |
+
# visual-humanoidmaze-giant-stitch-v0
|
| 109 |
+
python generate_locomaze.py --env_name=visual-humanoidmaze-giant-v0 --save_path=data/visual-humanoidmaze-giant-stitch-v0.npz --dataset_type=stitch --num_episodes=10000 --max_episode_steps=401 --restore_path=experts/humanoid --restore_epoch=40000000
|
| 110 |
+
|
| 111 |
+
# cube-single-play-v0
|
| 112 |
+
python generate_manipspace.py --env_name=cube-single-v0 --save_path=data/cube-single-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 113 |
+
# cube-double-play-v0
|
| 114 |
+
python generate_manipspace.py --env_name=cube-double-v0 --save_path=data/cube-double-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 115 |
+
# cube-triple-play-v0
|
| 116 |
+
python generate_manipspace.py --env_name=cube-triple-v0 --save_path=data/cube-triple-play-v0.npz --num_episodes=3000 --max_episode_steps=1001 --dataset_type=play
|
| 117 |
+
# cube-quadruple-play-v0
|
| 118 |
+
python generate_manipspace.py --env_name=cube-quadruple-v0 --save_path=data/cube-quadruple-play-v0.npz --num_episodes=5000 --max_episode_steps=1001 --dataset_type=play
|
| 119 |
+
# cube-single-noisy-v0
|
| 120 |
+
python generate_manipspace.py --env_name=cube-single-v0 --save_path=data/cube-single-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 121 |
+
# cube-double-noisy-v0
|
| 122 |
+
python generate_manipspace.py --env_name=cube-double-v0 --save_path=data/cube-double-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 123 |
+
# cube-triple-noisy-v0
|
| 124 |
+
python generate_manipspace.py --env_name=cube-triple-v0 --save_path=data/cube-triple-noisy-v0.npz --num_episodes=3000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 125 |
+
# cube-quadruple-noisy-v0
|
| 126 |
+
python generate_manipspace.py --env_name=cube-quadruple-v0 --save_path=data/cube-quadruple-noisy-v0.npz --num_episodes=5000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 127 |
+
|
| 128 |
+
# scene-play-v0
|
| 129 |
+
python generate_manipspace.py --env_name=scene-v0 --save_path=data/scene-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 130 |
+
# scene-noisy-v0
|
| 131 |
+
python generate_manipspace.py --env_name=scene-v0 --save_path=data/scene-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 132 |
+
|
| 133 |
+
# puzzle-3x3-play-v0
|
| 134 |
+
python generate_manipspace.py --env_name=puzzle-3x3-v0 --save_path=data/puzzle-3x3-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 135 |
+
# puzzle-4x4-play-v0
|
| 136 |
+
python generate_manipspace.py --env_name=puzzle-4x4-v0 --save_path=data/puzzle-4x4-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 137 |
+
# puzzle-4x5-play-v0
|
| 138 |
+
python generate_manipspace.py --env_name=puzzle-4x5-v0 --save_path=data/puzzle-4x5-play-v0.npz --num_episodes=3000 --max_episode_steps=1001 --dataset_type=play
|
| 139 |
+
# puzzle-4x6-play-v0
|
| 140 |
+
python generate_manipspace.py --env_name=puzzle-4x6-v0 --save_path=data/puzzle-4x6-play-v0.npz --num_episodes=5000 --max_episode_steps=1001 --dataset_type=play
|
| 141 |
+
# puzzle-3x3-noisy-v0
|
| 142 |
+
python generate_manipspace.py --env_name=puzzle-3x3-v0 --save_path=data/puzzle-3x3-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.2
|
| 143 |
+
# puzzle-4x4-noisy-v0
|
| 144 |
+
python generate_manipspace.py --env_name=puzzle-4x4-v0 --save_path=data/puzzle-4x4-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.2
|
| 145 |
+
# puzzle-4x5-noisy-v0
|
| 146 |
+
python generate_manipspace.py --env_name=puzzle-4x5-v0 --save_path=data/puzzle-4x5-noisy-v0.npz --num_episodes=3000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.2
|
| 147 |
+
# puzzle-4x6-noisy-v0
|
| 148 |
+
python generate_manipspace.py --env_name=puzzle-4x6-v0 --save_path=data/puzzle-4x6-noisy-v0.npz --num_episodes=5000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.2
|
| 149 |
+
|
| 150 |
+
# visual-cube-single-play-v0
|
| 151 |
+
python generate_manipspace.py --env_name=visual-cube-single-v0 --save_path=data/visual-cube-single-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 152 |
+
# visual-cube-double-play-v0
|
| 153 |
+
python generate_manipspace.py --env_name=visual-cube-double-v0 --save_path=data/visual-cube-double-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 154 |
+
# visual-cube-triple-play-v0
|
| 155 |
+
python generate_manipspace.py --env_name=visual-cube-triple-v0 --save_path=data/visual-cube-triple-play-v0.npz --num_episodes=3000 --max_episode_steps=1001 --dataset_type=play
|
| 156 |
+
# visual-cube-quadruple-play-v0
|
| 157 |
+
python generate_manipspace.py --env_name=visual-cube-quadruple-v0 --save_path=data/visual-cube-quadruple-play-v0.npz --num_episodes=5000 --max_episode_steps=1001 --dataset_type=play
|
| 158 |
+
# visual-cube-single-noisy-v0
|
| 159 |
+
python generate_manipspace.py --env_name=visual-cube-single-v0 --save_path=data/visual-cube-single-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 160 |
+
# visual-cube-double-noisy-v0
|
| 161 |
+
python generate_manipspace.py --env_name=visual-cube-double-v0 --save_path=data/visual-cube-double-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 162 |
+
# visual-cube-triple-noisy-v0
|
| 163 |
+
python generate_manipspace.py --env_name=visual-cube-triple-v0 --save_path=data/visual-cube-triple-noisy-v0.npz --num_episodes=3000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 164 |
+
# visual-cube-quadruple-noisy-v0
|
| 165 |
+
python generate_manipspace.py --env_name=visual-cube-quadruple-v0 --save_path=data/visual-cube-quadruple-noisy-v0.npz --num_episodes=5000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 166 |
+
|
| 167 |
+
# visual-scene-play-v0
|
| 168 |
+
python generate_manipspace.py --env_name=visual-scene-v0 --save_path=data/visual-scene-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 169 |
+
# visual-scene-noisy-v0
|
| 170 |
+
python generate_manipspace.py --env_name=visual-scene-v0 --save_path=data/visual-scene-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.1
|
| 171 |
+
|
| 172 |
+
# visual-puzzle-3x3-play-v0
|
| 173 |
+
python generate_manipspace.py --env_name=visual-puzzle-3x3-v0 --save_path=data/visual-puzzle-3x3-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 174 |
+
# visual-puzzle-4x4-play-v0
|
| 175 |
+
python generate_manipspace.py --env_name=visual-puzzle-4x4-v0 --save_path=data/visual-puzzle-4x4-play-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=play
|
| 176 |
+
# visual-puzzle-4x5-play-v0
|
| 177 |
+
python generate_manipspace.py --env_name=visual-puzzle-4x5-v0 --save_path=data/visual-puzzle-4x5-play-v0.npz --num_episodes=3000 --max_episode_steps=1001 --dataset_type=play
|
| 178 |
+
# visual-puzzle-4x6-play-v0
|
| 179 |
+
python generate_manipspace.py --env_name=visual-puzzle-4x6-v0 --save_path=data/visual-puzzle-4x6-play-v0.npz --num_episodes=5000 --max_episode_steps=1001 --dataset_type=play
|
| 180 |
+
# visual-puzzle-3x3-noisy-v0
|
| 181 |
+
python generate_manipspace.py --env_name=visual-puzzle-3x3-v0 --save_path=data/visual-puzzle-3x3-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.2
|
| 182 |
+
# visual-puzzle-4x4-noisy-v0
|
| 183 |
+
python generate_manipspace.py --env_name=visual-puzzle-4x4-v0 --save_path=data/visual-puzzle-4x4-noisy-v0.npz --num_episodes=1000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.2
|
| 184 |
+
# visual-puzzle-4x5-noisy-v0
|
| 185 |
+
python generate_manipspace.py --env_name=visual-puzzle-4x5-v0 --save_path=data/visual-puzzle-4x5-noisy-v0.npz --num_episodes=3000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.2
|
| 186 |
+
# visual-puzzle-4x6-noisy-v0
|
| 187 |
+
python generate_manipspace.py --env_name=visual-puzzle-4x6-v0 --save_path=data/visual-puzzle-4x6-noisy-v0.npz --num_episodes=5000 --max_episode_steps=1001 --dataset_type=noisy --p_random_action=0.2
|
| 188 |
+
|
| 189 |
+
# powderworld-easy-play-v0
|
| 190 |
+
python generate_powderworld.py --env_name=powderworld-easy-v0 --save_path=data/powderworld-easy-play-v0.npz --dataset_type=play --num_episodes=1000 --max_episode_steps=1001
|
| 191 |
+
# powderworld-medium-play-v0
|
| 192 |
+
python generate_powderworld.py --env_name=powderworld-medium-v0 --save_path=data/powderworld-medium-play-v0.npz --dataset_type=play --num_episodes=3000 --max_episode_steps=1001
|
| 193 |
+
# powderworld-hard-play-v0
|
| 194 |
+
python generate_powderworld.py --env_name=powderworld-hard-v0 --save_path=data/powderworld-hard-play-v0.npz --dataset_type=play --num_episodes=5000 --max_episode_steps=1001
|
data_gen_scripts/generate_antsoccer.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
import json
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
|
| 5 |
+
import gymnasium
|
| 6 |
+
import numpy as np
|
| 7 |
+
from absl import app, flags
|
| 8 |
+
from agents import SACAgent
|
| 9 |
+
from tqdm import trange
|
| 10 |
+
from utils.evaluation import supply_rng
|
| 11 |
+
from utils.flax_utils import restore_agent
|
| 12 |
+
|
| 13 |
+
import ogbench.locomaze # noqa
|
| 14 |
+
|
| 15 |
+
FLAGS = flags.FLAGS
|
| 16 |
+
|
| 17 |
+
flags.DEFINE_integer('seed', 0, 'Random seed.')
|
| 18 |
+
flags.DEFINE_string('env_name', 'antsoccer-arena-v0', 'Environment name.')
|
| 19 |
+
flags.DEFINE_string('dataset_type', 'navigate', 'Dataset type.')
|
| 20 |
+
flags.DEFINE_string('loco_restore_path', 'experts/ant', 'Locomotion agent restore path.')
|
| 21 |
+
flags.DEFINE_integer('loco_restore_epoch', 400000, 'Locomotion agent restore epoch.')
|
| 22 |
+
flags.DEFINE_string('ball_restore_path', 'experts/antball', 'Ball agent restore path.')
|
| 23 |
+
flags.DEFINE_integer('ball_restore_epoch', 12000000, 'Ball agent restore epoch.')
|
| 24 |
+
flags.DEFINE_string('save_path', None, 'Save path.')
|
| 25 |
+
flags.DEFINE_float('noise', 0.2, 'Gaussian action noise level.')
|
| 26 |
+
flags.DEFINE_integer('num_episodes', 1000, 'Number of episodes.')
|
| 27 |
+
flags.DEFINE_integer('max_episode_steps', 1001, 'Maximum number of steps in an episode.')
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def load_agent(restore_path, restore_epoch, ob_dim, action_dim):
|
| 31 |
+
"""Initialize and load a SAC agent from a given path."""
|
| 32 |
+
# Load agent config.
|
| 33 |
+
candidates = glob.glob(restore_path)
|
| 34 |
+
assert len(candidates) == 1, f'Found {len(candidates)} candidates: {candidates}'
|
| 35 |
+
|
| 36 |
+
with open(candidates[0] + '/flags.json', 'r') as f:
|
| 37 |
+
agent_config = json.load(f)['agent']
|
| 38 |
+
|
| 39 |
+
# Load agent.
|
| 40 |
+
agent = SACAgent.create(
|
| 41 |
+
FLAGS.seed,
|
| 42 |
+
np.zeros(ob_dim),
|
| 43 |
+
np.zeros(action_dim),
|
| 44 |
+
agent_config,
|
| 45 |
+
)
|
| 46 |
+
agent = restore_agent(agent, restore_path, restore_epoch)
|
| 47 |
+
|
| 48 |
+
return agent
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def main(_):
|
| 52 |
+
assert FLAGS.dataset_type in ['navigate', 'stitch']
|
| 53 |
+
# 'navigate': Repeatedly navigate to the ball and then to a goal in a single episode.
|
| 54 |
+
# 'stitch': Either only navigate or only dribble the ball to a goal in a single episode.
|
| 55 |
+
|
| 56 |
+
# Initialize environment.
|
| 57 |
+
env = gymnasium.make(
|
| 58 |
+
FLAGS.env_name,
|
| 59 |
+
terminate_at_goal=False,
|
| 60 |
+
max_episode_steps=FLAGS.max_episode_steps,
|
| 61 |
+
)
|
| 62 |
+
ob_dim = env.observation_space.shape[0]
|
| 63 |
+
action_dim = env.action_space.shape[0]
|
| 64 |
+
|
| 65 |
+
# Initialize oracle agent.
|
| 66 |
+
loco_agent = load_agent(FLAGS.loco_restore_path, FLAGS.loco_restore_epoch, ob_dim, action_dim)
|
| 67 |
+
ball_agent = load_agent(FLAGS.ball_restore_path, FLAGS.ball_restore_epoch, ob_dim, action_dim)
|
| 68 |
+
loco_actor_fn = supply_rng(loco_agent.sample_actions, rng=loco_agent.rng)
|
| 69 |
+
ball_actor_fn = supply_rng(ball_agent.sample_actions, rng=ball_agent.rng)
|
| 70 |
+
|
| 71 |
+
def get_agent_action(ob, goal_xy):
|
| 72 |
+
"""Get an action for the agent to navigate to the goal."""
|
| 73 |
+
if 'arena' not in FLAGS.env_name:
|
| 74 |
+
# In the actual maze environment, replace the goal with the oracle subgoal.
|
| 75 |
+
goal_xy, _ = env.unwrapped.get_oracle_subgoal(ob[:2], goal_xy)
|
| 76 |
+
goal_dir = goal_xy - ob[:2]
|
| 77 |
+
goal_dir = goal_dir / (np.linalg.norm(goal_dir) + 1e-6)
|
| 78 |
+
# Concatenate the agent's joint positions (excluding the x-y position), joint velocities, and goal direction.
|
| 79 |
+
agent_ob = np.concatenate([ob[2:15], ob[22:36], goal_dir])
|
| 80 |
+
action = loco_actor_fn(agent_ob, temperature=0)
|
| 81 |
+
return action
|
| 82 |
+
|
| 83 |
+
def get_ball_action(ob, ball_xy, goal_xy):
|
| 84 |
+
"""Get an action for the agent to dribble the ball to the goal."""
|
| 85 |
+
if 'arena' in FLAGS.env_name:
|
| 86 |
+
if np.linalg.norm(goal_xy - ball_xy) > 10:
|
| 87 |
+
# If the ball is too far from the goal, set a virtual goal 10 units away from the ball. This is because
|
| 88 |
+
# the ball agent is not trained to dribble the ball to the goal that is too far away.
|
| 89 |
+
goal_xy = ball_xy + 10 * (goal_xy - ball_xy) / np.linalg.norm(goal_xy - ball_xy)
|
| 90 |
+
else:
|
| 91 |
+
# In the actual maze environment, replace the goal with the oracle subgoal.
|
| 92 |
+
goal_xy, _ = env.unwrapped.get_oracle_subgoal(ball_xy, goal_xy)
|
| 93 |
+
# Concatenate the agent and ball's joint positions (excluding their x-y positions), their joint velocities, and
|
| 94 |
+
# the relative positions of the ball and the goal.
|
| 95 |
+
agent_ob = np.concatenate([ob[2:15], ob[17:], ball_xy - agent_xy, goal_xy - ball_xy])
|
| 96 |
+
action = ball_actor_fn(agent_ob, temperature=0)
|
| 97 |
+
return action
|
| 98 |
+
|
| 99 |
+
# Store all empty cells.
|
| 100 |
+
all_cells = []
|
| 101 |
+
maze_map = env.unwrapped.maze_map
|
| 102 |
+
for i in range(maze_map.shape[0]):
|
| 103 |
+
for j in range(maze_map.shape[1]):
|
| 104 |
+
if maze_map[i, j] == 0:
|
| 105 |
+
all_cells.append((i, j))
|
| 106 |
+
|
| 107 |
+
# Collect data.
|
| 108 |
+
dataset = defaultdict(list)
|
| 109 |
+
total_steps = 0
|
| 110 |
+
total_train_steps = 0
|
| 111 |
+
num_train_episodes = FLAGS.num_episodes
|
| 112 |
+
num_val_episodes = FLAGS.num_episodes // 10
|
| 113 |
+
for ep_idx in trange(num_train_episodes + num_val_episodes):
|
| 114 |
+
if FLAGS.dataset_type == 'navigate':
|
| 115 |
+
# Sample random initial positions for the agent, the ball, and the goal.
|
| 116 |
+
agent_init_idx, ball_init_idx, goal_idx = np.random.choice(len(all_cells), 3, replace=False)
|
| 117 |
+
agent_init_ij = all_cells[agent_init_idx]
|
| 118 |
+
ball_init_ij = all_cells[ball_init_idx]
|
| 119 |
+
goal_ij = all_cells[goal_idx]
|
| 120 |
+
elif FLAGS.dataset_type == 'stitch':
|
| 121 |
+
# Randomly choose between the 'navigate' and 'dribble' modes.
|
| 122 |
+
cur_mode = 'navigate' if np.random.randint(2) == 0 else 'dribble'
|
| 123 |
+
|
| 124 |
+
# Sample random initial positions for the agent, the ball, and the goal. In the 'dribble' mode, the ball
|
| 125 |
+
# always starts at the agent's position.
|
| 126 |
+
agent_init_idx, ball_init_idx, goal_idx = np.random.choice(len(all_cells), 3, replace=False)
|
| 127 |
+
agent_init_ij = all_cells[agent_init_idx]
|
| 128 |
+
ball_init_ij = all_cells[ball_init_idx] if cur_mode == 'navigate' else agent_init_ij
|
| 129 |
+
goal_ij = all_cells[goal_idx]
|
| 130 |
+
else:
|
| 131 |
+
raise ValueError(f'Unsupported dataset_type: {FLAGS.dataset_type}')
|
| 132 |
+
|
| 133 |
+
ob, _ = env.reset(
|
| 134 |
+
options=dict(task_info=dict(agent_init_ij=agent_init_ij, ball_init_ij=ball_init_ij, goal_ij=goal_ij))
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
done = False
|
| 138 |
+
step = 0
|
| 139 |
+
|
| 140 |
+
virtual_agent_goal_xy = None # Virtual goal for the agent to move to when stuck.
|
| 141 |
+
|
| 142 |
+
while not done:
|
| 143 |
+
agent_xy, ball_xy = env.unwrapped.get_agent_ball_xy()
|
| 144 |
+
agent_xy, ball_xy = np.array(agent_xy), np.array(ball_xy)
|
| 145 |
+
goal_xy = np.array(env.unwrapped.cur_goal_xy)
|
| 146 |
+
|
| 147 |
+
if FLAGS.dataset_type == 'navigate':
|
| 148 |
+
if virtual_agent_goal_xy is None:
|
| 149 |
+
if np.linalg.norm(agent_xy - ball_xy) > 2:
|
| 150 |
+
# If the agent is far from the ball, move to the ball.
|
| 151 |
+
action = get_agent_action(ob, ball_xy)
|
| 152 |
+
else:
|
| 153 |
+
# If the agent is close to the ball, dribble the ball to the goal.
|
| 154 |
+
action = get_ball_action(ob, ball_xy, goal_xy)
|
| 155 |
+
else:
|
| 156 |
+
# When virtual_agent_goal_xy is set, move to the virtual goal.
|
| 157 |
+
action = get_agent_action(ob, virtual_agent_goal_xy)
|
| 158 |
+
elif FLAGS.dataset_type == 'stitch':
|
| 159 |
+
if cur_mode == 'navigate':
|
| 160 |
+
# Navigate to the goal.
|
| 161 |
+
action = get_agent_action(ob, goal_xy)
|
| 162 |
+
else:
|
| 163 |
+
# Dribble the ball to the goal.
|
| 164 |
+
action = get_ball_action(ob, ball_xy, goal_xy)
|
| 165 |
+
|
| 166 |
+
# Add Gaussian noise to the action.
|
| 167 |
+
action = action + np.random.normal(0, FLAGS.noise, action.shape)
|
| 168 |
+
action = np.clip(action, -1, 1)
|
| 169 |
+
|
| 170 |
+
next_ob, reward, terminated, truncated, info = env.step(action)
|
| 171 |
+
done = terminated or truncated
|
| 172 |
+
success = info['success']
|
| 173 |
+
|
| 174 |
+
if virtual_agent_goal_xy is not None and np.linalg.norm(virtual_agent_goal_xy - next_ob[:2]) <= 0.5:
|
| 175 |
+
# If the agent reaches the virtual goal, clear it.
|
| 176 |
+
virtual_agent_goal_xy = None
|
| 177 |
+
|
| 178 |
+
if FLAGS.dataset_type == 'navigate':
|
| 179 |
+
if success:
|
| 180 |
+
# Sample a new goal state when the current goal is reached.
|
| 181 |
+
goal_ij = all_cells[np.random.randint(len(all_cells))]
|
| 182 |
+
env.unwrapped.set_goal(goal_ij)
|
| 183 |
+
|
| 184 |
+
# Determine whether the agent is stuck.
|
| 185 |
+
if (
|
| 186 |
+
step > 150
|
| 187 |
+
and virtual_agent_goal_xy is None
|
| 188 |
+
and np.linalg.norm(np.array(dataset['observations'][-150:])[:, :2] - next_ob[:2], axis=1).max() <= 2
|
| 189 |
+
):
|
| 190 |
+
# When the agent is stuck for 150 steps, set a virtual goal to move to a random cell.
|
| 191 |
+
virtual_agent_goal_ij = all_cells[np.random.randint(len(all_cells))]
|
| 192 |
+
virtual_agent_goal_xy = np.array(env.unwrapped.ij_to_xy(virtual_agent_goal_ij))
|
| 193 |
+
|
| 194 |
+
dataset['observations'].append(ob)
|
| 195 |
+
dataset['actions'].append(action)
|
| 196 |
+
dataset['terminals'].append(done)
|
| 197 |
+
dataset['qpos'].append(info['prev_qpos'])
|
| 198 |
+
dataset['qvel'].append(info['prev_qvel'])
|
| 199 |
+
|
| 200 |
+
ob = next_ob
|
| 201 |
+
step += 1
|
| 202 |
+
|
| 203 |
+
total_steps += step
|
| 204 |
+
if ep_idx < num_train_episodes:
|
| 205 |
+
total_train_steps += step
|
| 206 |
+
|
| 207 |
+
print('Total steps:', total_steps)
|
| 208 |
+
|
| 209 |
+
train_path = FLAGS.save_path
|
| 210 |
+
val_path = FLAGS.save_path.replace('.npz', '-val.npz')
|
| 211 |
+
|
| 212 |
+
# Split the dataset into training and validation sets.
|
| 213 |
+
train_dataset = {
|
| 214 |
+
k: np.array(v[:total_train_steps], dtype=np.float32 if k != 'terminals' else bool) for k, v in dataset.items()
|
| 215 |
+
}
|
| 216 |
+
val_dataset = {
|
| 217 |
+
k: np.array(v[total_train_steps:], dtype=np.float32 if k != 'terminals' else bool) for k, v in dataset.items()
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
for path, dataset in [(train_path, train_dataset), (val_path, val_dataset)]:
|
| 221 |
+
np.savez_compressed(path, **dataset)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
if __name__ == '__main__':
|
| 225 |
+
app.run(main)
|
data_gen_scripts/generate_locomaze.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
import json
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
|
| 5 |
+
import gymnasium
|
| 6 |
+
import numpy as np
|
| 7 |
+
from absl import app, flags
|
| 8 |
+
from agents import SACAgent
|
| 9 |
+
from tqdm import trange
|
| 10 |
+
from utils.evaluation import supply_rng
|
| 11 |
+
from utils.flax_utils import restore_agent
|
| 12 |
+
|
| 13 |
+
import ogbench.locomaze # noqa
|
| 14 |
+
|
| 15 |
+
FLAGS = flags.FLAGS
|
| 16 |
+
|
| 17 |
+
flags.DEFINE_integer('seed', 0, 'Random seed.')
|
| 18 |
+
flags.DEFINE_string('env_name', 'antmaze-large-v0', 'Environment name.')
|
| 19 |
+
flags.DEFINE_string('dataset_type', 'navigate', 'Dataset type.')
|
| 20 |
+
flags.DEFINE_string('restore_path', 'experts/ant', 'Expert agent restore path.')
|
| 21 |
+
flags.DEFINE_integer('restore_epoch', 400000, 'Expert agent restore epoch.')
|
| 22 |
+
flags.DEFINE_string('save_path', None, 'Save path.')
|
| 23 |
+
flags.DEFINE_float('noise', 0.2, 'Gaussian action noise level.')
|
| 24 |
+
flags.DEFINE_integer('num_episodes', 1000, 'Number of episodes.')
|
| 25 |
+
flags.DEFINE_integer('max_episode_steps', 1001, 'Maximum number of steps in an episode.')
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def main(_):
|
| 29 |
+
assert FLAGS.dataset_type in ['path', 'navigate', 'stitch', 'explore']
|
| 30 |
+
# 'path': Reach a single goal and stay there.
|
| 31 |
+
# 'navigate': Repeatedly reach randomly sampled goals in a single episode.
|
| 32 |
+
# 'stitch': Reach a nearby goal that is 4 cells away and stay there.
|
| 33 |
+
# 'explore': Repeatedly follow random directions sampled every 10 steps.
|
| 34 |
+
|
| 35 |
+
# Initialize environment.
|
| 36 |
+
env = gymnasium.make(
|
| 37 |
+
FLAGS.env_name,
|
| 38 |
+
terminate_at_goal=False,
|
| 39 |
+
max_episode_steps=FLAGS.max_episode_steps,
|
| 40 |
+
)
|
| 41 |
+
ob_dim = env.observation_space.shape[0]
|
| 42 |
+
|
| 43 |
+
# Initialize oracle agent.
|
| 44 |
+
if 'point' in FLAGS.env_name:
|
| 45 |
+
|
| 46 |
+
def actor_fn(ob, temperature):
|
| 47 |
+
return ob[-2:]
|
| 48 |
+
else:
|
| 49 |
+
# Load agent config.
|
| 50 |
+
restore_path = FLAGS.restore_path
|
| 51 |
+
candidates = glob.glob(restore_path)
|
| 52 |
+
assert len(candidates) == 1, f'Found {len(candidates)} candidates: {candidates}'
|
| 53 |
+
|
| 54 |
+
with open(candidates[0] + '/flags.json', 'r') as f:
|
| 55 |
+
agent_config = json.load(f)['agent']
|
| 56 |
+
|
| 57 |
+
# Load agent.
|
| 58 |
+
agent = SACAgent.create(
|
| 59 |
+
FLAGS.seed,
|
| 60 |
+
np.zeros(ob_dim),
|
| 61 |
+
env.action_space.sample(),
|
| 62 |
+
agent_config,
|
| 63 |
+
)
|
| 64 |
+
agent = restore_agent(agent, FLAGS.restore_path, FLAGS.restore_epoch)
|
| 65 |
+
actor_fn = supply_rng(agent.sample_actions, rng=agent.rng)
|
| 66 |
+
|
| 67 |
+
# Store all empty cells and vertex cells.
|
| 68 |
+
all_cells = []
|
| 69 |
+
vertex_cells = []
|
| 70 |
+
maze_map = env.unwrapped.maze_map
|
| 71 |
+
for i in range(maze_map.shape[0]):
|
| 72 |
+
for j in range(maze_map.shape[1]):
|
| 73 |
+
if maze_map[i, j] == 0:
|
| 74 |
+
all_cells.append((i, j))
|
| 75 |
+
|
| 76 |
+
# Exclude hallway cells.
|
| 77 |
+
if (
|
| 78 |
+
maze_map[i - 1, j] == 0
|
| 79 |
+
and maze_map[i + 1, j] == 0
|
| 80 |
+
and maze_map[i, j - 1] == 1
|
| 81 |
+
and maze_map[i, j + 1] == 1
|
| 82 |
+
):
|
| 83 |
+
continue
|
| 84 |
+
if (
|
| 85 |
+
maze_map[i, j - 1] == 0
|
| 86 |
+
and maze_map[i, j + 1] == 0
|
| 87 |
+
and maze_map[i - 1, j] == 1
|
| 88 |
+
and maze_map[i + 1, j] == 1
|
| 89 |
+
):
|
| 90 |
+
continue
|
| 91 |
+
|
| 92 |
+
vertex_cells.append((i, j))
|
| 93 |
+
|
| 94 |
+
# Collect data.
|
| 95 |
+
dataset = defaultdict(list)
|
| 96 |
+
total_steps = 0
|
| 97 |
+
total_train_steps = 0
|
| 98 |
+
num_train_episodes = FLAGS.num_episodes
|
| 99 |
+
num_val_episodes = FLAGS.num_episodes // 10
|
| 100 |
+
for ep_idx in trange(num_train_episodes + num_val_episodes):
|
| 101 |
+
if FLAGS.dataset_type in ['path', 'navigate', 'explore']:
|
| 102 |
+
# Sample an initial state from all cells.
|
| 103 |
+
init_ij = all_cells[np.random.randint(len(all_cells))]
|
| 104 |
+
# Sample a goal state from vertex cells.
|
| 105 |
+
goal_ij = vertex_cells[np.random.randint(len(vertex_cells))]
|
| 106 |
+
elif FLAGS.dataset_type == 'stitch':
|
| 107 |
+
# Sample an initial state from all cells.
|
| 108 |
+
init_ij = all_cells[np.random.randint(len(all_cells))]
|
| 109 |
+
|
| 110 |
+
# Perform BFS to find adjacent cells.
|
| 111 |
+
adj_cells = []
|
| 112 |
+
adj_steps = 4 # Target distance from the initial cell.
|
| 113 |
+
bfs_map = maze_map.copy()
|
| 114 |
+
for i in range(bfs_map.shape[0]):
|
| 115 |
+
for j in range(bfs_map.shape[1]):
|
| 116 |
+
bfs_map[i][j] = -1
|
| 117 |
+
bfs_map[init_ij[0], init_ij[1]] = 0
|
| 118 |
+
queue = [init_ij]
|
| 119 |
+
while len(queue) > 0:
|
| 120 |
+
i, j = queue.pop(0)
|
| 121 |
+
for di, dj in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
|
| 122 |
+
ni, nj = i + di, j + dj
|
| 123 |
+
if (
|
| 124 |
+
0 <= ni < bfs_map.shape[0]
|
| 125 |
+
and 0 <= nj < bfs_map.shape[1]
|
| 126 |
+
and maze_map[ni, nj] == 0
|
| 127 |
+
and bfs_map[ni, nj] == -1
|
| 128 |
+
):
|
| 129 |
+
bfs_map[ni][nj] = bfs_map[i][j] + 1
|
| 130 |
+
queue.append((ni, nj))
|
| 131 |
+
if bfs_map[ni][nj] == adj_steps:
|
| 132 |
+
adj_cells.append((ni, nj))
|
| 133 |
+
|
| 134 |
+
# Sample a goal state from adjacent cells.
|
| 135 |
+
goal_ij = adj_cells[np.random.randint(len(adj_cells))] if len(adj_cells) > 0 else init_ij
|
| 136 |
+
else:
|
| 137 |
+
raise ValueError(f'Unsupported dataset_type: {FLAGS.dataset_type}')
|
| 138 |
+
|
| 139 |
+
ob, _ = env.reset(options=dict(task_info=dict(init_ij=init_ij, goal_ij=goal_ij)))
|
| 140 |
+
|
| 141 |
+
done = False
|
| 142 |
+
step = 0
|
| 143 |
+
|
| 144 |
+
cur_subgoal_dir = None # Current subgoal direction (only for 'explore').
|
| 145 |
+
|
| 146 |
+
while not done:
|
| 147 |
+
if FLAGS.dataset_type == 'explore':
|
| 148 |
+
# Sample a random direction every 10 steps.
|
| 149 |
+
if step % 10 == 0:
|
| 150 |
+
cur_subgoal_dir = np.random.randn(2)
|
| 151 |
+
cur_subgoal_dir = cur_subgoal_dir / (np.linalg.norm(cur_subgoal_dir) + 1e-6)
|
| 152 |
+
subgoal_dir = cur_subgoal_dir
|
| 153 |
+
else:
|
| 154 |
+
# Get the oracle subgoal and compute the direction.
|
| 155 |
+
subgoal_xy, _ = env.unwrapped.get_oracle_subgoal(env.unwrapped.get_xy(), env.unwrapped.cur_goal_xy)
|
| 156 |
+
subgoal_dir = subgoal_xy - env.unwrapped.get_xy()
|
| 157 |
+
subgoal_dir = subgoal_dir / (np.linalg.norm(subgoal_dir) + 1e-6)
|
| 158 |
+
|
| 159 |
+
agent_ob = env.unwrapped.get_ob(ob_type='states')
|
| 160 |
+
# Exclude the agent's position and add the subgoal direction.
|
| 161 |
+
agent_ob = np.concatenate([agent_ob[2:], subgoal_dir])
|
| 162 |
+
action = actor_fn(agent_ob, temperature=0)
|
| 163 |
+
# Add Gaussian noise to the action.
|
| 164 |
+
action = action + np.random.normal(0, FLAGS.noise, action.shape)
|
| 165 |
+
action = np.clip(action, -1, 1)
|
| 166 |
+
|
| 167 |
+
next_ob, reward, terminated, truncated, info = env.step(action)
|
| 168 |
+
done = terminated or truncated
|
| 169 |
+
success = info['success']
|
| 170 |
+
|
| 171 |
+
# Sample a new goal state when the current goal is reached.
|
| 172 |
+
if success and FLAGS.dataset_type == 'navigate':
|
| 173 |
+
goal_ij = vertex_cells[np.random.randint(len(vertex_cells))]
|
| 174 |
+
env.unwrapped.set_goal(goal_ij)
|
| 175 |
+
|
| 176 |
+
dataset['observations'].append(ob)
|
| 177 |
+
dataset['actions'].append(action)
|
| 178 |
+
dataset['terminals'].append(done)
|
| 179 |
+
dataset['qpos'].append(info['prev_qpos'])
|
| 180 |
+
dataset['qvel'].append(info['prev_qvel'])
|
| 181 |
+
|
| 182 |
+
ob = next_ob
|
| 183 |
+
step += 1
|
| 184 |
+
|
| 185 |
+
total_steps += step
|
| 186 |
+
if ep_idx < num_train_episodes:
|
| 187 |
+
total_train_steps += step
|
| 188 |
+
|
| 189 |
+
print('Total steps:', total_steps)
|
| 190 |
+
|
| 191 |
+
train_path = FLAGS.save_path
|
| 192 |
+
val_path = FLAGS.save_path.replace('.npz', '-val.npz')
|
| 193 |
+
|
| 194 |
+
# Split the dataset into training and validation sets.
|
| 195 |
+
train_dataset = {}
|
| 196 |
+
val_dataset = {}
|
| 197 |
+
for k, v in dataset.items():
|
| 198 |
+
if 'observations' in k and v[0].dtype == np.uint8:
|
| 199 |
+
dtype = np.uint8
|
| 200 |
+
elif k == 'terminals':
|
| 201 |
+
dtype = bool
|
| 202 |
+
else:
|
| 203 |
+
dtype = np.float32
|
| 204 |
+
train_dataset[k] = np.array(v[:total_train_steps], dtype=dtype)
|
| 205 |
+
val_dataset[k] = np.array(v[total_train_steps:], dtype=dtype)
|
| 206 |
+
|
| 207 |
+
for path, dataset in [(train_path, train_dataset), (val_path, val_dataset)]:
|
| 208 |
+
np.savez_compressed(path, **dataset)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
if __name__ == '__main__':
|
| 212 |
+
app.run(main)
|
data_gen_scripts/generate_manipspace.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
|
| 3 |
+
import gymnasium
|
| 4 |
+
import numpy as np
|
| 5 |
+
from absl import app, flags
|
| 6 |
+
from tqdm import trange
|
| 7 |
+
|
| 8 |
+
import ogbench.manipspace # noqa
|
| 9 |
+
from ogbench.manipspace.oracles.markov.button_markov import ButtonMarkovOracle
|
| 10 |
+
from ogbench.manipspace.oracles.markov.cube_markov import CubeMarkovOracle
|
| 11 |
+
from ogbench.manipspace.oracles.markov.drawer_markov import DrawerMarkovOracle
|
| 12 |
+
from ogbench.manipspace.oracles.markov.window_markov import WindowMarkovOracle
|
| 13 |
+
from ogbench.manipspace.oracles.plan.button_plan import ButtonPlanOracle
|
| 14 |
+
from ogbench.manipspace.oracles.plan.cube_plan import CubePlanOracle
|
| 15 |
+
from ogbench.manipspace.oracles.plan.drawer_plan import DrawerPlanOracle
|
| 16 |
+
from ogbench.manipspace.oracles.plan.window_plan import WindowPlanOracle
|
| 17 |
+
|
| 18 |
+
FLAGS = flags.FLAGS
|
| 19 |
+
|
| 20 |
+
flags.DEFINE_integer('seed', 0, 'Random seed.')
|
| 21 |
+
flags.DEFINE_string('env_name', 'cube-single-v0', 'Environment name.')
|
| 22 |
+
flags.DEFINE_string('dataset_type', 'play', 'Dataset type.')
|
| 23 |
+
flags.DEFINE_string('save_path', None, 'Save path.')
|
| 24 |
+
flags.DEFINE_float('noise', 0.1, 'Action noise level.')
|
| 25 |
+
flags.DEFINE_float('noise_smoothing', 0.5, 'Action noise smoothing level for PlanOracle.')
|
| 26 |
+
flags.DEFINE_float('min_norm', 0.4, 'Minimum action norm for MarkovOracle.')
|
| 27 |
+
flags.DEFINE_float('p_random_action', 0, 'Probability of selecting a random action.')
|
| 28 |
+
flags.DEFINE_integer('num_episodes', 1000, 'Number of episodes.')
|
| 29 |
+
flags.DEFINE_integer('max_episode_steps', 1001, 'Number of episodes.')
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def main(_):
|
| 33 |
+
assert FLAGS.dataset_type in ['play', 'noisy']
|
| 34 |
+
# 'play': Use a non-Markovian oracle (PlanOracle) that follows a pre-computed plan.
|
| 35 |
+
# 'noisy': Use a Markovian, closed-loop oracle (MarkovOracle) with Gaussian action noise.
|
| 36 |
+
|
| 37 |
+
# Initialize environment.
|
| 38 |
+
env = gymnasium.make(
|
| 39 |
+
FLAGS.env_name,
|
| 40 |
+
terminate_at_goal=False,
|
| 41 |
+
mode='data_collection',
|
| 42 |
+
max_episode_steps=FLAGS.max_episode_steps,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
# Initialize oracles.
|
| 46 |
+
oracle_type = 'plan' if FLAGS.dataset_type == 'play' else 'markov'
|
| 47 |
+
has_button_states = hasattr(env.unwrapped, '_cur_button_states')
|
| 48 |
+
if 'cube' in FLAGS.env_name:
|
| 49 |
+
if oracle_type == 'markov':
|
| 50 |
+
agents = {
|
| 51 |
+
'cube': CubeMarkovOracle(env=env, min_norm=FLAGS.min_norm),
|
| 52 |
+
}
|
| 53 |
+
else:
|
| 54 |
+
agents = {
|
| 55 |
+
'cube': CubePlanOracle(env=env, noise=FLAGS.noise, noise_smoothing=FLAGS.noise_smoothing),
|
| 56 |
+
}
|
| 57 |
+
elif 'scene' in FLAGS.env_name:
|
| 58 |
+
if oracle_type == 'markov':
|
| 59 |
+
agents = {
|
| 60 |
+
'cube': CubeMarkovOracle(env=env, min_norm=FLAGS.min_norm, max_step=100),
|
| 61 |
+
'button': ButtonMarkovOracle(env=env, min_norm=FLAGS.min_norm),
|
| 62 |
+
'drawer': DrawerMarkovOracle(env=env, min_norm=FLAGS.min_norm),
|
| 63 |
+
'window': WindowMarkovOracle(env=env, min_norm=FLAGS.min_norm),
|
| 64 |
+
}
|
| 65 |
+
else:
|
| 66 |
+
agents = {
|
| 67 |
+
'cube': CubePlanOracle(env=env, noise=FLAGS.noise, noise_smoothing=FLAGS.noise_smoothing),
|
| 68 |
+
'button': ButtonPlanOracle(env=env, noise=FLAGS.noise, noise_smoothing=FLAGS.noise_smoothing),
|
| 69 |
+
'drawer': DrawerPlanOracle(env=env, noise=FLAGS.noise, noise_smoothing=FLAGS.noise_smoothing),
|
| 70 |
+
'window': WindowPlanOracle(env=env, noise=FLAGS.noise, noise_smoothing=FLAGS.noise_smoothing),
|
| 71 |
+
}
|
| 72 |
+
elif 'puzzle' in FLAGS.env_name:
|
| 73 |
+
if oracle_type == 'markov':
|
| 74 |
+
agents = {
|
| 75 |
+
'button': ButtonMarkovOracle(env=env, min_norm=FLAGS.min_norm, gripper_always_closed=True),
|
| 76 |
+
}
|
| 77 |
+
else:
|
| 78 |
+
agents = {
|
| 79 |
+
'button': ButtonPlanOracle(
|
| 80 |
+
env=env,
|
| 81 |
+
noise=FLAGS.noise,
|
| 82 |
+
noise_smoothing=FLAGS.noise_smoothing,
|
| 83 |
+
gripper_always_closed=True,
|
| 84 |
+
),
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
# Collect data.
|
| 88 |
+
dataset = defaultdict(list)
|
| 89 |
+
total_steps = 0
|
| 90 |
+
total_train_steps = 0
|
| 91 |
+
num_train_episodes = FLAGS.num_episodes
|
| 92 |
+
num_val_episodes = FLAGS.num_episodes // 10
|
| 93 |
+
for ep_idx in trange(num_train_episodes + num_val_episodes):
|
| 94 |
+
# Have an additional while loop to handle rare cases with undesirable states (for the Scene environment).
|
| 95 |
+
while True:
|
| 96 |
+
ob, info = env.reset()
|
| 97 |
+
|
| 98 |
+
# Set the cube stacking probability for this episode.
|
| 99 |
+
if 'single' in FLAGS.env_name:
|
| 100 |
+
p_stack = 0.0
|
| 101 |
+
elif 'double' in FLAGS.env_name:
|
| 102 |
+
p_stack = np.random.uniform(0.0, 0.25)
|
| 103 |
+
elif 'triple' in FLAGS.env_name:
|
| 104 |
+
p_stack = np.random.uniform(0.05, 0.35)
|
| 105 |
+
elif 'quadruple' in FLAGS.env_name:
|
| 106 |
+
p_stack = np.random.uniform(0.1, 0.5)
|
| 107 |
+
else:
|
| 108 |
+
p_stack = 0.5
|
| 109 |
+
|
| 110 |
+
if oracle_type == 'markov':
|
| 111 |
+
# Set the action noise level for this episode.
|
| 112 |
+
xi = np.random.uniform(0, FLAGS.noise)
|
| 113 |
+
|
| 114 |
+
agent = agents[info['privileged/target_task']]
|
| 115 |
+
agent.reset(ob, info)
|
| 116 |
+
|
| 117 |
+
done = False
|
| 118 |
+
step = 0
|
| 119 |
+
ep_qpos = []
|
| 120 |
+
|
| 121 |
+
while not done:
|
| 122 |
+
if np.random.rand() < FLAGS.p_random_action:
|
| 123 |
+
# Sample a random action.
|
| 124 |
+
action = env.action_space.sample()
|
| 125 |
+
else:
|
| 126 |
+
# Get an action from the oracle.
|
| 127 |
+
action = agent.select_action(ob, info)
|
| 128 |
+
action = np.array(action)
|
| 129 |
+
if oracle_type == 'markov':
|
| 130 |
+
# Add Gaussian noise to the action.
|
| 131 |
+
action = action + np.random.normal(0, [xi, xi, xi, xi * 3, xi * 10], action.shape)
|
| 132 |
+
action = np.clip(action, -1, 1)
|
| 133 |
+
next_ob, reward, terminated, truncated, info = env.step(action)
|
| 134 |
+
done = terminated or truncated
|
| 135 |
+
|
| 136 |
+
if agent.done:
|
| 137 |
+
# Set a new task when the current task is done.
|
| 138 |
+
agent_ob, agent_info = env.unwrapped.set_new_target(p_stack=p_stack)
|
| 139 |
+
agent = agents[agent_info['privileged/target_task']]
|
| 140 |
+
agent.reset(agent_ob, agent_info)
|
| 141 |
+
|
| 142 |
+
dataset['observations'].append(ob)
|
| 143 |
+
dataset['actions'].append(action)
|
| 144 |
+
dataset['terminals'].append(done)
|
| 145 |
+
dataset['qpos'].append(info['prev_qpos'])
|
| 146 |
+
dataset['qvel'].append(info['prev_qvel'])
|
| 147 |
+
if has_button_states:
|
| 148 |
+
dataset['button_states'].append(info['prev_button_states'])
|
| 149 |
+
ep_qpos.append(info['prev_qpos'])
|
| 150 |
+
|
| 151 |
+
ob = next_ob
|
| 152 |
+
step += 1
|
| 153 |
+
|
| 154 |
+
if 'scene' in FLAGS.env_name:
|
| 155 |
+
# Perform health check. We want to ensure that the cube is always visible unless it's in the drawer.
|
| 156 |
+
# Otherwise, the test-time goal images may become ambiguous.
|
| 157 |
+
is_healthy = True
|
| 158 |
+
ep_qpos = np.array(ep_qpos)
|
| 159 |
+
block_xyzs = ep_qpos[:, 14:17]
|
| 160 |
+
if (block_xyzs[:, 1] >= 0.29).any():
|
| 161 |
+
is_healthy = False # Block goes too far right.
|
| 162 |
+
if ((block_xyzs[:, 1] <= -0.3) & ((block_xyzs[:, 2] < 0.06) | (block_xyzs[:, 2] > 0.08))).any():
|
| 163 |
+
is_healthy = False # Block goes too far left, without being in the drawer.
|
| 164 |
+
|
| 165 |
+
if is_healthy:
|
| 166 |
+
break
|
| 167 |
+
else:
|
| 168 |
+
# Remove the last episode and retry.
|
| 169 |
+
print('Unhealthy episode, retrying...', flush=True)
|
| 170 |
+
for k in dataset.keys():
|
| 171 |
+
dataset[k] = dataset[k][:-step]
|
| 172 |
+
else:
|
| 173 |
+
break
|
| 174 |
+
|
| 175 |
+
total_steps += step
|
| 176 |
+
if ep_idx < num_train_episodes:
|
| 177 |
+
total_train_steps += step
|
| 178 |
+
|
| 179 |
+
print('Total steps:', total_steps)
|
| 180 |
+
|
| 181 |
+
train_path = FLAGS.save_path
|
| 182 |
+
val_path = FLAGS.save_path.replace('.npz', '-val.npz')
|
| 183 |
+
|
| 184 |
+
# Split the dataset into training and validation sets.
|
| 185 |
+
train_dataset = {}
|
| 186 |
+
val_dataset = {}
|
| 187 |
+
for k, v in dataset.items():
|
| 188 |
+
if 'observations' in k and v[0].dtype == np.uint8:
|
| 189 |
+
dtype = np.uint8
|
| 190 |
+
elif k == 'terminals':
|
| 191 |
+
dtype = bool
|
| 192 |
+
elif k == 'button_states':
|
| 193 |
+
dtype = np.int64
|
| 194 |
+
else:
|
| 195 |
+
dtype = np.float32
|
| 196 |
+
train_dataset[k] = np.array(v[:total_train_steps], dtype=dtype)
|
| 197 |
+
val_dataset[k] = np.array(v[total_train_steps:], dtype=dtype)
|
| 198 |
+
|
| 199 |
+
for path, dataset in [(train_path, train_dataset), (val_path, val_dataset)]:
|
| 200 |
+
np.savez_compressed(path, **dataset)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
if __name__ == '__main__':
|
| 204 |
+
app.run(main)
|
data_gen_scripts/generate_powderworld.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
|
| 3 |
+
import gymnasium
|
| 4 |
+
import numpy as np
|
| 5 |
+
from absl import app, flags
|
| 6 |
+
from tqdm import trange
|
| 7 |
+
|
| 8 |
+
import ogbench.powderworld # noqa
|
| 9 |
+
from ogbench.powderworld.behaviors import FillBehavior, LineBehavior, SquareBehavior
|
| 10 |
+
|
| 11 |
+
FLAGS = flags.FLAGS
|
| 12 |
+
|
| 13 |
+
flags.DEFINE_integer('seed', 0, 'Random seed.')
|
| 14 |
+
flags.DEFINE_string('env_name', 'powderworld-v0', 'Environment name.')
|
| 15 |
+
flags.DEFINE_string('dataset_type', 'play', 'Dataset type.')
|
| 16 |
+
flags.DEFINE_string('save_path', None, 'Save path.')
|
| 17 |
+
flags.DEFINE_integer('num_episodes', 1000, 'Number of episodes.')
|
| 18 |
+
flags.DEFINE_integer('max_episode_steps', 1001, 'Maximum number of steps in an episode.')
|
| 19 |
+
flags.DEFINE_float('p_random_action', 0.5, 'Probability of selecting a random action.')
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def main(_):
|
| 23 |
+
assert FLAGS.dataset_type in ['play']
|
| 24 |
+
|
| 25 |
+
# Initialize environment.
|
| 26 |
+
env = gymnasium.make(
|
| 27 |
+
FLAGS.env_name,
|
| 28 |
+
mode='data_collection',
|
| 29 |
+
max_episode_steps=FLAGS.max_episode_steps,
|
| 30 |
+
)
|
| 31 |
+
env.reset()
|
| 32 |
+
|
| 33 |
+
# Initialize agents.
|
| 34 |
+
agents = [
|
| 35 |
+
FillBehavior(env=env),
|
| 36 |
+
LineBehavior(env=env),
|
| 37 |
+
SquareBehavior(env=env),
|
| 38 |
+
]
|
| 39 |
+
probs = np.array([1, 3, 3]) # Agent selection probabilities.
|
| 40 |
+
probs = probs / probs.sum()
|
| 41 |
+
|
| 42 |
+
# Collect data.
|
| 43 |
+
dataset = defaultdict(list)
|
| 44 |
+
total_steps = 0
|
| 45 |
+
total_train_steps = 0
|
| 46 |
+
num_train_episodes = FLAGS.num_episodes
|
| 47 |
+
num_val_episodes = FLAGS.num_episodes // 10
|
| 48 |
+
for ep_idx in trange(num_train_episodes + num_val_episodes):
|
| 49 |
+
ob, info = env.reset()
|
| 50 |
+
agent = np.random.choice(agents, p=probs)
|
| 51 |
+
agent.reset(ob, info)
|
| 52 |
+
|
| 53 |
+
done = False
|
| 54 |
+
step = 0
|
| 55 |
+
|
| 56 |
+
action_step = 0 # Action cycle counter (0, 1, 2).
|
| 57 |
+
while not done:
|
| 58 |
+
if action_step == 0:
|
| 59 |
+
# Select an action every 3 steps.
|
| 60 |
+
if np.random.rand() < FLAGS.p_random_action:
|
| 61 |
+
# Sample a random action.
|
| 62 |
+
semantic_action = env.unwrapped.sample_semantic_action()
|
| 63 |
+
else:
|
| 64 |
+
# Get an action from the agent.
|
| 65 |
+
semantic_action = agent.select_action(ob, info)
|
| 66 |
+
action = env.unwrapped.semantic_action_to_action(*semantic_action)
|
| 67 |
+
next_ob, reward, terminated, truncated, info = env.step(action)
|
| 68 |
+
done = terminated or truncated
|
| 69 |
+
|
| 70 |
+
if agent.done and FLAGS.dataset_type == 'play':
|
| 71 |
+
agent = np.random.choice(agents, p=probs)
|
| 72 |
+
agent.reset(ob, info)
|
| 73 |
+
|
| 74 |
+
dataset['observations'].append(ob)
|
| 75 |
+
dataset['actions'].append(action)
|
| 76 |
+
dataset['terminals'].append(done)
|
| 77 |
+
|
| 78 |
+
ob = next_ob
|
| 79 |
+
step += 1
|
| 80 |
+
action_step = (action_step + 1) % 3
|
| 81 |
+
|
| 82 |
+
total_steps += step
|
| 83 |
+
if ep_idx < num_train_episodes:
|
| 84 |
+
total_train_steps += step
|
| 85 |
+
|
| 86 |
+
print('Total steps:', total_steps)
|
| 87 |
+
|
| 88 |
+
train_path = FLAGS.save_path
|
| 89 |
+
val_path = FLAGS.save_path.replace('.npz', '-val.npz')
|
| 90 |
+
|
| 91 |
+
# Split the dataset into training and validation sets.
|
| 92 |
+
train_dataset = {}
|
| 93 |
+
val_dataset = {}
|
| 94 |
+
for k, v in dataset.items():
|
| 95 |
+
if 'observations' in k and v[0].dtype == np.uint8:
|
| 96 |
+
dtype = np.uint8
|
| 97 |
+
elif 'actions':
|
| 98 |
+
dtype = np.int32
|
| 99 |
+
elif k == 'terminals':
|
| 100 |
+
dtype = bool
|
| 101 |
+
else:
|
| 102 |
+
dtype = np.float32
|
| 103 |
+
train_dataset[k] = np.array(v[:total_train_steps], dtype=dtype)
|
| 104 |
+
val_dataset[k] = np.array(v[total_train_steps:], dtype=dtype)
|
| 105 |
+
|
| 106 |
+
for path, dataset in [(train_path, train_dataset), (val_path, val_dataset)]:
|
| 107 |
+
np.savez_compressed(path, **dataset)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
if __name__ == '__main__':
|
| 111 |
+
app.run(main)
|
data_gen_scripts/main_sac.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
import jax
|
| 7 |
+
import numpy as np
|
| 8 |
+
import tqdm
|
| 9 |
+
import wandb
|
| 10 |
+
from absl import app, flags
|
| 11 |
+
from agents import agents
|
| 12 |
+
from ml_collections import config_flags
|
| 13 |
+
from online_env_utils import make_online_env
|
| 14 |
+
from utils.datasets import ReplayBuffer
|
| 15 |
+
from utils.evaluation import evaluate, flatten
|
| 16 |
+
from utils.flax_utils import restore_agent, save_agent
|
| 17 |
+
from utils.log_utils import CsvLogger, get_exp_name, get_flag_dict, get_wandb_video, setup_wandb
|
| 18 |
+
from viz_utils import visualize_trajs
|
| 19 |
+
|
| 20 |
+
FLAGS = flags.FLAGS
|
| 21 |
+
|
| 22 |
+
flags.DEFINE_string('run_group', 'Debug', 'Run group.')
|
| 23 |
+
flags.DEFINE_integer('seed', 0, 'Random seed.')
|
| 24 |
+
flags.DEFINE_string('env_name', 'online-ant-xy-v0', 'Environment name.')
|
| 25 |
+
flags.DEFINE_string('save_dir', 'exp/', 'Save directory.')
|
| 26 |
+
flags.DEFINE_string('restore_path', None, 'Restore path.')
|
| 27 |
+
flags.DEFINE_integer('restore_epoch', None, 'Restore epoch.')
|
| 28 |
+
|
| 29 |
+
flags.DEFINE_integer('seed_steps', 10000, 'Number of seed steps.')
|
| 30 |
+
flags.DEFINE_integer('train_steps', 1000000, 'Number of training steps.')
|
| 31 |
+
flags.DEFINE_integer('train_interval', 1, 'Train interval.')
|
| 32 |
+
flags.DEFINE_integer('num_epochs', 1, 'Number of updates per train interval.')
|
| 33 |
+
flags.DEFINE_integer('log_interval', 5000, 'Logging interval.')
|
| 34 |
+
flags.DEFINE_integer('eval_interval', 100000, 'Evaluation interval.')
|
| 35 |
+
flags.DEFINE_integer('save_interval', 1000000, 'Saving interval.')
|
| 36 |
+
flags.DEFINE_integer('reset_interval', 0, 'Full parameter reset interval.')
|
| 37 |
+
flags.DEFINE_integer('terminate_at_end', 0, 'Whether to set terminated=True when truncated=True.')
|
| 38 |
+
|
| 39 |
+
flags.DEFINE_integer('eval_episodes', 50, 'Number of episodes for each task.')
|
| 40 |
+
flags.DEFINE_float('eval_temperature', 0, 'Actor temperature for evaluation.')
|
| 41 |
+
flags.DEFINE_float('eval_gaussian', None, 'Action Gaussian noise for evaluation.')
|
| 42 |
+
flags.DEFINE_integer('video_episodes', 1, 'Number of video episodes for each task.')
|
| 43 |
+
flags.DEFINE_integer('video_frame_skip', 3, 'Frame skip for videos.')
|
| 44 |
+
flags.DEFINE_integer('eval_on_cpu', 1, 'Whether to evaluate on CPU.')
|
| 45 |
+
|
| 46 |
+
config_flags.DEFINE_config_file('agent', '../impls/agents/sac.py', lock_config=False)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def main(_):
|
| 50 |
+
# Set up logger.
|
| 51 |
+
exp_name = get_exp_name(FLAGS.seed)
|
| 52 |
+
setup_wandb(project='OGBench', group=FLAGS.run_group, name=exp_name)
|
| 53 |
+
|
| 54 |
+
FLAGS.save_dir = os.path.join(FLAGS.save_dir, wandb.run.project, FLAGS.run_group, exp_name)
|
| 55 |
+
os.makedirs(FLAGS.save_dir, exist_ok=True)
|
| 56 |
+
flag_dict = get_flag_dict()
|
| 57 |
+
with open(os.path.join(FLAGS.save_dir, 'flags.json'), 'w') as f:
|
| 58 |
+
json.dump(flag_dict, f)
|
| 59 |
+
|
| 60 |
+
config = FLAGS.agent
|
| 61 |
+
|
| 62 |
+
# Set up environments and replay buffer.
|
| 63 |
+
env = make_online_env(FLAGS.env_name)
|
| 64 |
+
eval_env = make_online_env(FLAGS.env_name)
|
| 65 |
+
|
| 66 |
+
example_transition = dict(
|
| 67 |
+
observations=env.observation_space.sample(),
|
| 68 |
+
actions=env.action_space.sample(),
|
| 69 |
+
rewards=0.0,
|
| 70 |
+
masks=1.0,
|
| 71 |
+
next_observations=env.observation_space.sample(),
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
replay_buffer = ReplayBuffer.create(example_transition, size=int(1e6))
|
| 75 |
+
|
| 76 |
+
# Initialize agent.
|
| 77 |
+
random.seed(FLAGS.seed)
|
| 78 |
+
np.random.seed(FLAGS.seed)
|
| 79 |
+
|
| 80 |
+
agent_class = agents[config['agent_name']]
|
| 81 |
+
agent = agent_class.create(
|
| 82 |
+
FLAGS.seed,
|
| 83 |
+
example_transition['observations'],
|
| 84 |
+
example_transition['actions'],
|
| 85 |
+
config,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
# Restore agent.
|
| 89 |
+
if FLAGS.restore_path is not None:
|
| 90 |
+
agent = restore_agent(agent, FLAGS.restore_path, FLAGS.restore_epoch)
|
| 91 |
+
|
| 92 |
+
# Train agent.
|
| 93 |
+
expl_metrics = dict()
|
| 94 |
+
expl_rng = jax.random.PRNGKey(FLAGS.seed)
|
| 95 |
+
ob, _ = env.reset()
|
| 96 |
+
|
| 97 |
+
train_logger = CsvLogger(os.path.join(FLAGS.save_dir, 'train.csv'))
|
| 98 |
+
eval_logger = CsvLogger(os.path.join(FLAGS.save_dir, 'eval.csv'))
|
| 99 |
+
first_time = time.time()
|
| 100 |
+
last_time = time.time()
|
| 101 |
+
update_info = None
|
| 102 |
+
for i in tqdm.tqdm(range(1, FLAGS.train_steps + 1), smoothing=0.1, dynamic_ncols=True):
|
| 103 |
+
# Sample transition.
|
| 104 |
+
if i < FLAGS.seed_steps:
|
| 105 |
+
action = env.action_space.sample()
|
| 106 |
+
else:
|
| 107 |
+
expl_rng, key = jax.random.split(expl_rng)
|
| 108 |
+
action = agent.sample_actions(observations=ob, seed=key)
|
| 109 |
+
|
| 110 |
+
action = np.array(action)
|
| 111 |
+
next_ob, reward, terminated, truncated, info = env.step(action)
|
| 112 |
+
if FLAGS.terminate_at_end and truncated:
|
| 113 |
+
terminated = True
|
| 114 |
+
|
| 115 |
+
replay_buffer.add_transition(
|
| 116 |
+
dict(
|
| 117 |
+
observations=ob,
|
| 118 |
+
actions=action,
|
| 119 |
+
rewards=reward,
|
| 120 |
+
masks=float(not terminated),
|
| 121 |
+
next_observations=next_ob,
|
| 122 |
+
)
|
| 123 |
+
)
|
| 124 |
+
ob = next_ob
|
| 125 |
+
|
| 126 |
+
if terminated or truncated:
|
| 127 |
+
expl_metrics = {f'exploration/{k}': np.mean(v) for k, v in flatten(info).items()}
|
| 128 |
+
ob, _ = env.reset()
|
| 129 |
+
|
| 130 |
+
if replay_buffer.size < FLAGS.seed_steps:
|
| 131 |
+
continue
|
| 132 |
+
|
| 133 |
+
# Update agent.
|
| 134 |
+
if i % FLAGS.train_interval == 0:
|
| 135 |
+
for _ in range(FLAGS.num_epochs):
|
| 136 |
+
batch = replay_buffer.sample(config['batch_size'])
|
| 137 |
+
agent, update_info = agent.update(batch)
|
| 138 |
+
|
| 139 |
+
# Log metrics.
|
| 140 |
+
if i % FLAGS.log_interval == 0 and update_info is not None:
|
| 141 |
+
train_metrics = {f'training/{k}': v for k, v in update_info.items()}
|
| 142 |
+
train_metrics['time/epoch_time'] = (time.time() - last_time) / FLAGS.log_interval
|
| 143 |
+
train_metrics['time/total_time'] = time.time() - first_time
|
| 144 |
+
train_metrics.update(expl_metrics)
|
| 145 |
+
last_time = time.time()
|
| 146 |
+
wandb.log(train_metrics, step=i)
|
| 147 |
+
train_logger.log(train_metrics, step=i)
|
| 148 |
+
|
| 149 |
+
# Evaluate agent.
|
| 150 |
+
if i % FLAGS.eval_interval == 0:
|
| 151 |
+
if FLAGS.eval_on_cpu:
|
| 152 |
+
eval_agent = jax.device_put(agent, device=jax.devices('cpu')[0])
|
| 153 |
+
else:
|
| 154 |
+
eval_agent = agent
|
| 155 |
+
eval_metrics = {}
|
| 156 |
+
eval_info, trajs, renders = evaluate(
|
| 157 |
+
agent=eval_agent,
|
| 158 |
+
env=eval_env,
|
| 159 |
+
task_id=None,
|
| 160 |
+
config=config,
|
| 161 |
+
num_eval_episodes=FLAGS.eval_episodes,
|
| 162 |
+
num_video_episodes=FLAGS.video_episodes,
|
| 163 |
+
video_frame_skip=FLAGS.video_frame_skip,
|
| 164 |
+
eval_temperature=FLAGS.eval_temperature,
|
| 165 |
+
eval_gaussian=FLAGS.eval_gaussian,
|
| 166 |
+
)
|
| 167 |
+
eval_metrics.update({f'evaluation/{k}': v for k, v in eval_info.items()})
|
| 168 |
+
|
| 169 |
+
if FLAGS.video_episodes > 0:
|
| 170 |
+
video = get_wandb_video(renders=renders)
|
| 171 |
+
eval_metrics['video'] = video
|
| 172 |
+
|
| 173 |
+
traj_image = visualize_trajs(FLAGS.env_name, trajs)
|
| 174 |
+
if traj_image is not None:
|
| 175 |
+
eval_metrics['traj'] = wandb.Image(traj_image)
|
| 176 |
+
|
| 177 |
+
wandb.log(eval_metrics, step=i)
|
| 178 |
+
eval_logger.log(eval_metrics, step=i)
|
| 179 |
+
|
| 180 |
+
# Save agent.
|
| 181 |
+
if i % FLAGS.save_interval == 0:
|
| 182 |
+
save_agent(agent, FLAGS.save_dir, i)
|
| 183 |
+
|
| 184 |
+
# Reset agent.
|
| 185 |
+
if FLAGS.reset_interval > 0 and i % FLAGS.reset_interval == 0:
|
| 186 |
+
new_agent = agent_class.create(
|
| 187 |
+
FLAGS.seed + i,
|
| 188 |
+
example_transition['observations'],
|
| 189 |
+
example_transition['actions'],
|
| 190 |
+
config,
|
| 191 |
+
)
|
| 192 |
+
agent = agent.replace(
|
| 193 |
+
network=agent.network.replace(params=new_agent.network.params, opt_state=new_agent.network.opt_state)
|
| 194 |
+
)
|
| 195 |
+
del new_agent
|
| 196 |
+
train_logger.close()
|
| 197 |
+
eval_logger.close()
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
if __name__ == '__main__':
|
| 201 |
+
app.run(main)
|
data_gen_scripts/online_env_utils.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gymnasium
|
| 2 |
+
from utils.env_utils import EpisodeMonitor, setup_egl
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def make_online_env(env_name):
|
| 6 |
+
"""Make online environment.
|
| 7 |
+
|
| 8 |
+
If the environment name contains the '-xy' suffix, the environment will be wrapped with a directional locomotion
|
| 9 |
+
wrapper. For example, 'online-ant-xy-v0' will return an 'online-ant-v0' environment wrapped with GymXYWrapper.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
env_name: Name of the environment.
|
| 13 |
+
"""
|
| 14 |
+
import ogbench.online_locomotion # noqa
|
| 15 |
+
|
| 16 |
+
setup_egl()
|
| 17 |
+
|
| 18 |
+
# Manually recognize the '-xy' suffix, which indicates that the environment should be wrapped with a directional
|
| 19 |
+
# locomotion wrapper.
|
| 20 |
+
if '-xy' in env_name:
|
| 21 |
+
env_name = env_name.replace('-xy', '')
|
| 22 |
+
apply_xy_wrapper = True
|
| 23 |
+
else:
|
| 24 |
+
apply_xy_wrapper = False
|
| 25 |
+
|
| 26 |
+
# Set camera.
|
| 27 |
+
if 'humanoid' in env_name:
|
| 28 |
+
extra_kwargs = dict(camera_id=0)
|
| 29 |
+
else:
|
| 30 |
+
extra_kwargs = dict()
|
| 31 |
+
|
| 32 |
+
# Make environment.
|
| 33 |
+
env = gymnasium.make(env_name, render_mode='rgb_array', height=200, width=200, **extra_kwargs)
|
| 34 |
+
|
| 35 |
+
if apply_xy_wrapper:
|
| 36 |
+
# Apply the directional locomotion wrapper.
|
| 37 |
+
from ogbench.online_locomotion.wrappers import DMCHumanoidXYWrapper, GymXYWrapper
|
| 38 |
+
|
| 39 |
+
if 'humanoid' in env_name:
|
| 40 |
+
env = DMCHumanoidXYWrapper(env, resample_interval=200)
|
| 41 |
+
else:
|
| 42 |
+
env = GymXYWrapper(env, resample_interval=100)
|
| 43 |
+
|
| 44 |
+
env = EpisodeMonitor(env)
|
| 45 |
+
|
| 46 |
+
return env
|
data_gen_scripts/viz_utils.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib
|
| 2 |
+
import numpy as np
|
| 3 |
+
from matplotlib import figure
|
| 4 |
+
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def get_2d_colors(points, min_point, max_point):
|
| 8 |
+
"""Get colors corresponding to 2-D points."""
|
| 9 |
+
points = np.array(points)
|
| 10 |
+
min_point = np.array(min_point)
|
| 11 |
+
max_point = np.array(max_point)
|
| 12 |
+
|
| 13 |
+
colors = (points - min_point) / (max_point - min_point)
|
| 14 |
+
colors = np.hstack((colors, (2 - np.sum(colors, axis=1, keepdims=True)) / 2))
|
| 15 |
+
colors = np.clip(colors, 0, 1)
|
| 16 |
+
colors = np.c_[colors, np.full(len(colors), 0.8)]
|
| 17 |
+
|
| 18 |
+
return colors
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def visualize_trajs(env_name, trajs):
|
| 22 |
+
"""Visualize x-y trajectories in locomotion environments.
|
| 23 |
+
|
| 24 |
+
It reads 'xy' and 'direction' from the 'info' field of the trajectories.
|
| 25 |
+
"""
|
| 26 |
+
matplotlib.use('Agg')
|
| 27 |
+
|
| 28 |
+
fig = figure.Figure(tight_layout=True)
|
| 29 |
+
canvas = FigureCanvas(fig)
|
| 30 |
+
if 'xy' in trajs[0]['info'][0]:
|
| 31 |
+
ax = fig.add_subplot()
|
| 32 |
+
|
| 33 |
+
max_xy = 0.0
|
| 34 |
+
for traj in trajs:
|
| 35 |
+
xy = np.array([info['xy'] for info in traj['info']])
|
| 36 |
+
direction = np.array([info['direction'] for info in traj['info']])
|
| 37 |
+
color = get_2d_colors(direction, [-1, -1], [1, 1])
|
| 38 |
+
for i in range(len(xy) - 1):
|
| 39 |
+
ax.plot(xy[i : i + 2, 0], xy[i : i + 2, 1], color=color[i], linewidth=0.7)
|
| 40 |
+
max_xy = max(max_xy, np.abs(xy).max() * 1.2)
|
| 41 |
+
|
| 42 |
+
plot_axis = [-max_xy, max_xy, -max_xy, max_xy]
|
| 43 |
+
ax.axis(plot_axis)
|
| 44 |
+
ax.set_aspect('equal')
|
| 45 |
+
else:
|
| 46 |
+
return None
|
| 47 |
+
|
| 48 |
+
fig.tight_layout()
|
| 49 |
+
canvas.draw()
|
| 50 |
+
out_image = np.frombuffer(canvas.tostring_rgb(), dtype='uint8')
|
| 51 |
+
out_image = out_image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
| 52 |
+
return out_image
|
impls/agents/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agents.crl import CRLAgent
|
| 2 |
+
from agents.gcbc import GCBCAgent
|
| 3 |
+
from agents.gciql import GCIQLAgent
|
| 4 |
+
from agents.gcivl import GCIVLAgent
|
| 5 |
+
from agents.hiql import HIQLAgent
|
| 6 |
+
from agents.qrl import QRLAgent
|
| 7 |
+
from agents.sac import SACAgent
|
| 8 |
+
|
| 9 |
+
agents = dict(
|
| 10 |
+
crl=CRLAgent,
|
| 11 |
+
gcbc=GCBCAgent,
|
| 12 |
+
gciql=GCIQLAgent,
|
| 13 |
+
gcivl=GCIVLAgent,
|
| 14 |
+
hiql=HIQLAgent,
|
| 15 |
+
qrl=QRLAgent,
|
| 16 |
+
sac=SACAgent,
|
| 17 |
+
)
|
impls/agents/crl.py
ADDED
|
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
import flax
|
| 4 |
+
import jax
|
| 5 |
+
import jax.numpy as jnp
|
| 6 |
+
import ml_collections
|
| 7 |
+
import optax
|
| 8 |
+
from utils.encoders import GCEncoder, encoder_modules
|
| 9 |
+
from utils.flax_utils import ModuleDict, TrainState, nonpytree_field
|
| 10 |
+
from utils.networks import GCActor, GCBilinearValue, GCDiscreteActor, GCDiscreteBilinearCritic
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class CRLAgent(flax.struct.PyTreeNode):
|
| 14 |
+
"""Contrastive RL (CRL) agent.
|
| 15 |
+
|
| 16 |
+
This implementation supports both AWR (actor_loss='awr') and DDPG+BC (actor_loss='ddpgbc') for the actor loss.
|
| 17 |
+
CRL with DDPG+BC only fits a Q function, while CRL with AWR fits both Q and V functions to compute advantages.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
rng: Any
|
| 21 |
+
network: Any
|
| 22 |
+
config: Any = nonpytree_field()
|
| 23 |
+
|
| 24 |
+
def contrastive_loss(self, batch, grad_params, module_name='critic'):
|
| 25 |
+
"""Compute the contrastive value loss for the Q or V function."""
|
| 26 |
+
batch_size = batch['observations'].shape[0]
|
| 27 |
+
|
| 28 |
+
if module_name == 'critic':
|
| 29 |
+
actions = batch['actions']
|
| 30 |
+
else:
|
| 31 |
+
actions = None
|
| 32 |
+
v, phi, psi = self.network.select(module_name)(
|
| 33 |
+
batch['observations'],
|
| 34 |
+
batch['value_goals'],
|
| 35 |
+
actions=actions,
|
| 36 |
+
info=True,
|
| 37 |
+
params=grad_params,
|
| 38 |
+
)
|
| 39 |
+
if len(phi.shape) == 2: # Non-ensemble.
|
| 40 |
+
phi = phi[None, ...]
|
| 41 |
+
psi = psi[None, ...]
|
| 42 |
+
logits = jnp.einsum('eik,ejk->ije', phi, psi) / jnp.sqrt(phi.shape[-1])
|
| 43 |
+
# logits.shape is (B, B, e) with one term for positive pair and (B - 1) terms for negative pairs in each row.
|
| 44 |
+
I = jnp.eye(batch_size)
|
| 45 |
+
contrastive_loss = jax.vmap(
|
| 46 |
+
lambda _logits: optax.sigmoid_binary_cross_entropy(logits=_logits, labels=I),
|
| 47 |
+
in_axes=-1,
|
| 48 |
+
out_axes=-1,
|
| 49 |
+
)(logits)
|
| 50 |
+
contrastive_loss = jnp.mean(contrastive_loss)
|
| 51 |
+
|
| 52 |
+
# Compute additional statistics.
|
| 53 |
+
logits = jnp.mean(logits, axis=-1)
|
| 54 |
+
correct = jnp.argmax(logits, axis=1) == jnp.argmax(I, axis=1)
|
| 55 |
+
logits_pos = jnp.sum(logits * I) / jnp.sum(I)
|
| 56 |
+
logits_neg = jnp.sum(logits * (1 - I)) / jnp.sum(1 - I)
|
| 57 |
+
|
| 58 |
+
return contrastive_loss, {
|
| 59 |
+
'contrastive_loss': contrastive_loss,
|
| 60 |
+
'v_mean': v.mean(),
|
| 61 |
+
'v_max': v.max(),
|
| 62 |
+
'v_min': v.min(),
|
| 63 |
+
'binary_accuracy': jnp.mean((logits > 0) == I),
|
| 64 |
+
'categorical_accuracy': jnp.mean(correct),
|
| 65 |
+
'logits_pos': logits_pos,
|
| 66 |
+
'logits_neg': logits_neg,
|
| 67 |
+
'logits': logits.mean(),
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
def actor_loss(self, batch, grad_params, rng=None):
|
| 71 |
+
"""Compute the actor loss (AWR or DDPG+BC)."""
|
| 72 |
+
# Maximize log Q if actor_log_q is True (which is default).
|
| 73 |
+
if self.config['actor_log_q']:
|
| 74 |
+
|
| 75 |
+
def value_transform(x):
|
| 76 |
+
return jnp.log(jnp.maximum(x, 1e-6))
|
| 77 |
+
else:
|
| 78 |
+
|
| 79 |
+
def value_transform(x):
|
| 80 |
+
return x
|
| 81 |
+
|
| 82 |
+
if self.config['actor_loss'] == 'awr':
|
| 83 |
+
# AWR loss.
|
| 84 |
+
v = value_transform(self.network.select('value')(batch['observations'], batch['actor_goals']))
|
| 85 |
+
q1, q2 = value_transform(
|
| 86 |
+
self.network.select('critic')(batch['observations'], batch['actor_goals'], batch['actions'])
|
| 87 |
+
)
|
| 88 |
+
q = jnp.minimum(q1, q2)
|
| 89 |
+
adv = q - v
|
| 90 |
+
|
| 91 |
+
exp_a = jnp.exp(adv * self.config['alpha'])
|
| 92 |
+
exp_a = jnp.minimum(exp_a, 100.0)
|
| 93 |
+
|
| 94 |
+
dist = self.network.select('actor')(batch['observations'], batch['actor_goals'], params=grad_params)
|
| 95 |
+
log_prob = dist.log_prob(batch['actions'])
|
| 96 |
+
|
| 97 |
+
actor_loss = -(exp_a * log_prob).mean()
|
| 98 |
+
|
| 99 |
+
actor_info = {
|
| 100 |
+
'actor_loss': actor_loss,
|
| 101 |
+
'adv': adv.mean(),
|
| 102 |
+
'bc_log_prob': log_prob.mean(),
|
| 103 |
+
}
|
| 104 |
+
if not self.config['discrete']:
|
| 105 |
+
actor_info.update(
|
| 106 |
+
{
|
| 107 |
+
'mse': jnp.mean((dist.mode() - batch['actions']) ** 2),
|
| 108 |
+
'std': jnp.mean(dist.scale_diag),
|
| 109 |
+
}
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
return actor_loss, actor_info
|
| 113 |
+
elif self.config['actor_loss'] == 'ddpgbc':
|
| 114 |
+
# DDPG+BC loss.
|
| 115 |
+
assert not self.config['discrete']
|
| 116 |
+
|
| 117 |
+
dist = self.network.select('actor')(batch['observations'], batch['actor_goals'], params=grad_params)
|
| 118 |
+
if self.config['const_std']:
|
| 119 |
+
q_actions = jnp.clip(dist.mode(), -1, 1)
|
| 120 |
+
else:
|
| 121 |
+
q_actions = jnp.clip(dist.sample(seed=rng), -1, 1)
|
| 122 |
+
q1, q2 = value_transform(
|
| 123 |
+
self.network.select('critic')(batch['observations'], batch['actor_goals'], q_actions)
|
| 124 |
+
)
|
| 125 |
+
q = jnp.minimum(q1, q2)
|
| 126 |
+
|
| 127 |
+
# Normalize Q values by the absolute mean to make the loss scale invariant.
|
| 128 |
+
q_loss = -q.mean() / jax.lax.stop_gradient(jnp.abs(q).mean() + 1e-6)
|
| 129 |
+
log_prob = dist.log_prob(batch['actions'])
|
| 130 |
+
|
| 131 |
+
bc_loss = -(self.config['alpha'] * log_prob).mean()
|
| 132 |
+
|
| 133 |
+
actor_loss = q_loss + bc_loss
|
| 134 |
+
|
| 135 |
+
return actor_loss, {
|
| 136 |
+
'actor_loss': actor_loss,
|
| 137 |
+
'q_loss': q_loss,
|
| 138 |
+
'bc_loss': bc_loss,
|
| 139 |
+
'q_mean': q.mean(),
|
| 140 |
+
'q_abs_mean': jnp.abs(q).mean(),
|
| 141 |
+
'bc_log_prob': log_prob.mean(),
|
| 142 |
+
'mse': jnp.mean((dist.mode() - batch['actions']) ** 2),
|
| 143 |
+
'std': jnp.mean(dist.scale_diag),
|
| 144 |
+
}
|
| 145 |
+
else:
|
| 146 |
+
raise ValueError(f'Unsupported actor loss: {self.config["actor_loss"]}')
|
| 147 |
+
|
| 148 |
+
@jax.jit
|
| 149 |
+
def total_loss(self, batch, grad_params, rng=None):
|
| 150 |
+
"""Compute the total loss."""
|
| 151 |
+
info = {}
|
| 152 |
+
rng = rng if rng is not None else self.rng
|
| 153 |
+
|
| 154 |
+
critic_loss, critic_info = self.contrastive_loss(batch, grad_params, 'critic')
|
| 155 |
+
for k, v in critic_info.items():
|
| 156 |
+
info[f'critic/{k}'] = v
|
| 157 |
+
|
| 158 |
+
if self.config['actor_loss'] == 'awr':
|
| 159 |
+
value_loss, value_info = self.contrastive_loss(batch, grad_params, 'value')
|
| 160 |
+
for k, v in value_info.items():
|
| 161 |
+
info[f'value/{k}'] = v
|
| 162 |
+
else:
|
| 163 |
+
value_loss = 0.0
|
| 164 |
+
|
| 165 |
+
rng, actor_rng = jax.random.split(rng)
|
| 166 |
+
actor_loss, actor_info = self.actor_loss(batch, grad_params, actor_rng)
|
| 167 |
+
for k, v in actor_info.items():
|
| 168 |
+
info[f'actor/{k}'] = v
|
| 169 |
+
|
| 170 |
+
loss = critic_loss + value_loss + actor_loss
|
| 171 |
+
return loss, info
|
| 172 |
+
|
| 173 |
+
@jax.jit
|
| 174 |
+
def update(self, batch):
|
| 175 |
+
"""Update the agent and return a new agent with information dictionary."""
|
| 176 |
+
new_rng, rng = jax.random.split(self.rng)
|
| 177 |
+
|
| 178 |
+
def loss_fn(grad_params):
|
| 179 |
+
return self.total_loss(batch, grad_params, rng=rng)
|
| 180 |
+
|
| 181 |
+
new_network, info = self.network.apply_loss_fn(loss_fn=loss_fn)
|
| 182 |
+
|
| 183 |
+
return self.replace(network=new_network, rng=new_rng), info
|
| 184 |
+
|
| 185 |
+
@jax.jit
|
| 186 |
+
def sample_actions(
|
| 187 |
+
self,
|
| 188 |
+
observations,
|
| 189 |
+
goals=None,
|
| 190 |
+
seed=None,
|
| 191 |
+
temperature=1.0,
|
| 192 |
+
):
|
| 193 |
+
"""Sample actions from the actor."""
|
| 194 |
+
dist = self.network.select('actor')(observations, goals, temperature=temperature)
|
| 195 |
+
actions = dist.sample(seed=seed)
|
| 196 |
+
if not self.config['discrete']:
|
| 197 |
+
actions = jnp.clip(actions, -1, 1)
|
| 198 |
+
return actions
|
| 199 |
+
|
| 200 |
+
@classmethod
|
| 201 |
+
def create(
|
| 202 |
+
cls,
|
| 203 |
+
seed,
|
| 204 |
+
ex_observations,
|
| 205 |
+
ex_actions,
|
| 206 |
+
config,
|
| 207 |
+
):
|
| 208 |
+
"""Create a new agent.
|
| 209 |
+
|
| 210 |
+
Args:
|
| 211 |
+
seed: Random seed.
|
| 212 |
+
ex_observations: Example observations.
|
| 213 |
+
ex_actions: Example batch of actions. In discrete-action MDPs, this should contain the maximum action value.
|
| 214 |
+
config: Configuration dictionary.
|
| 215 |
+
"""
|
| 216 |
+
rng = jax.random.PRNGKey(seed)
|
| 217 |
+
rng, init_rng = jax.random.split(rng, 2)
|
| 218 |
+
|
| 219 |
+
ex_goals = ex_observations
|
| 220 |
+
if config['discrete']:
|
| 221 |
+
action_dim = ex_actions.max() + 1
|
| 222 |
+
else:
|
| 223 |
+
action_dim = ex_actions.shape[-1]
|
| 224 |
+
|
| 225 |
+
# Define encoders.
|
| 226 |
+
encoders = dict()
|
| 227 |
+
if config['encoder'] is not None:
|
| 228 |
+
encoder_module = encoder_modules[config['encoder']]
|
| 229 |
+
encoders['critic_state'] = encoder_module()
|
| 230 |
+
encoders['critic_goal'] = encoder_module()
|
| 231 |
+
encoders['actor'] = GCEncoder(concat_encoder=encoder_module())
|
| 232 |
+
if config['actor_loss'] == 'awr':
|
| 233 |
+
encoders['value_state'] = encoder_module()
|
| 234 |
+
encoders['value_goal'] = encoder_module()
|
| 235 |
+
|
| 236 |
+
# Define value and actor networks.
|
| 237 |
+
if config['discrete']:
|
| 238 |
+
critic_def = GCDiscreteBilinearCritic(
|
| 239 |
+
hidden_dims=config['value_hidden_dims'],
|
| 240 |
+
latent_dim=config['latent_dim'],
|
| 241 |
+
layer_norm=config['layer_norm'],
|
| 242 |
+
ensemble=True,
|
| 243 |
+
value_exp=True,
|
| 244 |
+
state_encoder=encoders.get('critic_state'),
|
| 245 |
+
goal_encoder=encoders.get('critic_goal'),
|
| 246 |
+
action_dim=action_dim,
|
| 247 |
+
)
|
| 248 |
+
else:
|
| 249 |
+
critic_def = GCBilinearValue(
|
| 250 |
+
hidden_dims=config['value_hidden_dims'],
|
| 251 |
+
latent_dim=config['latent_dim'],
|
| 252 |
+
layer_norm=config['layer_norm'],
|
| 253 |
+
ensemble=True,
|
| 254 |
+
value_exp=True,
|
| 255 |
+
state_encoder=encoders.get('critic_state'),
|
| 256 |
+
goal_encoder=encoders.get('critic_goal'),
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
if config['actor_loss'] == 'awr':
|
| 260 |
+
# AWR requires a separate V network to compute advantages (Q - V).
|
| 261 |
+
value_def = GCBilinearValue(
|
| 262 |
+
hidden_dims=config['value_hidden_dims'],
|
| 263 |
+
latent_dim=config['latent_dim'],
|
| 264 |
+
layer_norm=config['layer_norm'],
|
| 265 |
+
ensemble=False,
|
| 266 |
+
value_exp=True,
|
| 267 |
+
state_encoder=encoders.get('value_state'),
|
| 268 |
+
goal_encoder=encoders.get('value_goal'),
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
if config['discrete']:
|
| 272 |
+
actor_def = GCDiscreteActor(
|
| 273 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 274 |
+
action_dim=action_dim,
|
| 275 |
+
gc_encoder=encoders.get('actor'),
|
| 276 |
+
)
|
| 277 |
+
else:
|
| 278 |
+
actor_def = GCActor(
|
| 279 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 280 |
+
action_dim=action_dim,
|
| 281 |
+
state_dependent_std=False,
|
| 282 |
+
const_std=config['const_std'],
|
| 283 |
+
gc_encoder=encoders.get('actor'),
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
network_info = dict(
|
| 287 |
+
critic=(critic_def, (ex_observations, ex_goals, ex_actions)),
|
| 288 |
+
actor=(actor_def, (ex_observations, ex_goals)),
|
| 289 |
+
)
|
| 290 |
+
if config['actor_loss'] == 'awr':
|
| 291 |
+
network_info.update(
|
| 292 |
+
value=(value_def, (ex_observations, ex_goals)),
|
| 293 |
+
)
|
| 294 |
+
networks = {k: v[0] for k, v in network_info.items()}
|
| 295 |
+
network_args = {k: v[1] for k, v in network_info.items()}
|
| 296 |
+
|
| 297 |
+
network_def = ModuleDict(networks)
|
| 298 |
+
network_tx = optax.adam(learning_rate=config['lr'])
|
| 299 |
+
network_params = network_def.init(init_rng, **network_args)['params']
|
| 300 |
+
network = TrainState.create(network_def, network_params, tx=network_tx)
|
| 301 |
+
|
| 302 |
+
return cls(rng, network=network, config=flax.core.FrozenDict(**config))
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def get_config():
|
| 306 |
+
config = ml_collections.ConfigDict(
|
| 307 |
+
dict(
|
| 308 |
+
# Agent hyperparameters.
|
| 309 |
+
agent_name='crl', # Agent name.
|
| 310 |
+
lr=3e-4, # Learning rate.
|
| 311 |
+
batch_size=1024, # Batch size.
|
| 312 |
+
actor_hidden_dims=(512, 512, 512), # Actor network hidden dimensions.
|
| 313 |
+
value_hidden_dims=(512, 512, 512), # Value network hidden dimensions.
|
| 314 |
+
latent_dim=512, # Latent dimension for phi and psi.
|
| 315 |
+
layer_norm=True, # Whether to use layer normalization.
|
| 316 |
+
discount=0.99, # Discount factor.
|
| 317 |
+
actor_loss='ddpgbc', # Actor loss type ('awr' or 'ddpgbc').
|
| 318 |
+
alpha=0.1, # Temperature in AWR or BC coefficient in DDPG+BC.
|
| 319 |
+
actor_log_q=True, # Whether to maximize log Q (True) or Q itself (False) in the actor loss.
|
| 320 |
+
const_std=True, # Whether to use constant standard deviation for the actor.
|
| 321 |
+
discrete=False, # Whether the action space is discrete.
|
| 322 |
+
encoder=ml_collections.config_dict.placeholder(str), # Visual encoder name (None, 'impala_small', etc.).
|
| 323 |
+
# Dataset hyperparameters.
|
| 324 |
+
dataset_class='GCDataset', # Dataset class name.
|
| 325 |
+
value_p_curgoal=0.0, # Probability of using the current state as the value goal.
|
| 326 |
+
value_p_trajgoal=1.0, # Probability of using a future state in the same trajectory as the value goal.
|
| 327 |
+
value_p_randomgoal=0.0, # Probability of using a random state as the value goal.
|
| 328 |
+
value_geom_sample=True, # Whether to use geometric sampling for future value goals.
|
| 329 |
+
actor_p_curgoal=0.0, # Probability of using the current state as the actor goal.
|
| 330 |
+
actor_p_trajgoal=1.0, # Probability of using a future state in the same trajectory as the actor goal.
|
| 331 |
+
actor_p_randomgoal=0.0, # Probability of using a random state as the actor goal.
|
| 332 |
+
actor_geom_sample=False, # Whether to use geometric sampling for future actor goals.
|
| 333 |
+
gc_negative=False, # Unused (defined for compatibility with GCDataset).
|
| 334 |
+
p_aug=0.0, # Probability of applying image augmentation.
|
| 335 |
+
frame_stack=ml_collections.config_dict.placeholder(int), # Number of frames to stack.
|
| 336 |
+
)
|
| 337 |
+
)
|
| 338 |
+
return config
|
impls/agents/gcbc.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
import flax
|
| 4 |
+
import jax
|
| 5 |
+
import jax.numpy as jnp
|
| 6 |
+
import ml_collections
|
| 7 |
+
import optax
|
| 8 |
+
from utils.encoders import GCEncoder, encoder_modules
|
| 9 |
+
from utils.flax_utils import ModuleDict, TrainState, nonpytree_field
|
| 10 |
+
from utils.networks import GCActor, GCDiscreteActor
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class GCBCAgent(flax.struct.PyTreeNode):
|
| 14 |
+
"""Goal-conditioned behavioral cloning (GCBC) agent."""
|
| 15 |
+
|
| 16 |
+
rng: Any
|
| 17 |
+
network: Any
|
| 18 |
+
config: Any = nonpytree_field()
|
| 19 |
+
|
| 20 |
+
def actor_loss(self, batch, grad_params, rng=None):
|
| 21 |
+
"""Compute the BC actor loss."""
|
| 22 |
+
dist = self.network.select('actor')(batch['observations'], batch['actor_goals'], params=grad_params)
|
| 23 |
+
log_prob = dist.log_prob(batch['actions'])
|
| 24 |
+
|
| 25 |
+
actor_loss = -log_prob.mean()
|
| 26 |
+
|
| 27 |
+
actor_info = {
|
| 28 |
+
'actor_loss': actor_loss,
|
| 29 |
+
'bc_log_prob': log_prob.mean(),
|
| 30 |
+
}
|
| 31 |
+
if not self.config['discrete']:
|
| 32 |
+
actor_info.update(
|
| 33 |
+
{
|
| 34 |
+
'mse': jnp.mean((dist.mode() - batch['actions']) ** 2),
|
| 35 |
+
'std': jnp.mean(dist.scale_diag),
|
| 36 |
+
}
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
return actor_loss, actor_info
|
| 40 |
+
|
| 41 |
+
@jax.jit
|
| 42 |
+
def total_loss(self, batch, grad_params, rng=None):
|
| 43 |
+
"""Compute the total loss."""
|
| 44 |
+
info = {}
|
| 45 |
+
rng = rng if rng is not None else self.rng
|
| 46 |
+
|
| 47 |
+
rng, actor_rng = jax.random.split(rng)
|
| 48 |
+
actor_loss, actor_info = self.actor_loss(batch, grad_params, actor_rng)
|
| 49 |
+
for k, v in actor_info.items():
|
| 50 |
+
info[f'actor/{k}'] = v
|
| 51 |
+
|
| 52 |
+
loss = actor_loss
|
| 53 |
+
return loss, info
|
| 54 |
+
|
| 55 |
+
@jax.jit
|
| 56 |
+
def update(self, batch):
|
| 57 |
+
"""Update the agent and return a new agent with information dictionary."""
|
| 58 |
+
new_rng, rng = jax.random.split(self.rng)
|
| 59 |
+
|
| 60 |
+
def loss_fn(grad_params):
|
| 61 |
+
return self.total_loss(batch, grad_params, rng=rng)
|
| 62 |
+
|
| 63 |
+
new_network, info = self.network.apply_loss_fn(loss_fn=loss_fn)
|
| 64 |
+
|
| 65 |
+
return self.replace(network=new_network, rng=new_rng), info
|
| 66 |
+
|
| 67 |
+
@jax.jit
|
| 68 |
+
def sample_actions(
|
| 69 |
+
self,
|
| 70 |
+
observations,
|
| 71 |
+
goals=None,
|
| 72 |
+
seed=None,
|
| 73 |
+
temperature=1.0,
|
| 74 |
+
):
|
| 75 |
+
"""Sample actions from the actor."""
|
| 76 |
+
dist = self.network.select('actor')(observations, goals, temperature=temperature)
|
| 77 |
+
actions = dist.sample(seed=seed)
|
| 78 |
+
if not self.config['discrete']:
|
| 79 |
+
actions = jnp.clip(actions, -1, 1)
|
| 80 |
+
return actions
|
| 81 |
+
|
| 82 |
+
@classmethod
|
| 83 |
+
def create(
|
| 84 |
+
cls,
|
| 85 |
+
seed,
|
| 86 |
+
ex_observations,
|
| 87 |
+
ex_actions,
|
| 88 |
+
config,
|
| 89 |
+
):
|
| 90 |
+
"""Create a new agent.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
seed: Random seed.
|
| 94 |
+
ex_observations: Example batch of observations.
|
| 95 |
+
ex_actions: Example batch of actions. In discrete-action MDPs, this should contain the maximum action value.
|
| 96 |
+
config: Configuration dictionary.
|
| 97 |
+
"""
|
| 98 |
+
rng = jax.random.PRNGKey(seed)
|
| 99 |
+
rng, init_rng = jax.random.split(rng, 2)
|
| 100 |
+
|
| 101 |
+
ex_goals = ex_observations
|
| 102 |
+
if config['discrete']:
|
| 103 |
+
action_dim = ex_actions.max() + 1
|
| 104 |
+
else:
|
| 105 |
+
action_dim = ex_actions.shape[-1]
|
| 106 |
+
|
| 107 |
+
# Define encoder.
|
| 108 |
+
encoders = dict()
|
| 109 |
+
if config['encoder'] is not None:
|
| 110 |
+
encoder_module = encoder_modules[config['encoder']]
|
| 111 |
+
encoders['actor'] = GCEncoder(concat_encoder=encoder_module())
|
| 112 |
+
|
| 113 |
+
# Define actor network.
|
| 114 |
+
if config['discrete']:
|
| 115 |
+
actor_def = GCDiscreteActor(
|
| 116 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 117 |
+
action_dim=action_dim,
|
| 118 |
+
gc_encoder=encoders.get('actor'),
|
| 119 |
+
)
|
| 120 |
+
else:
|
| 121 |
+
actor_def = GCActor(
|
| 122 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 123 |
+
action_dim=action_dim,
|
| 124 |
+
state_dependent_std=False,
|
| 125 |
+
const_std=config['const_std'],
|
| 126 |
+
gc_encoder=encoders.get('actor'),
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
network_info = dict(
|
| 130 |
+
actor=(actor_def, (ex_observations, ex_goals)),
|
| 131 |
+
)
|
| 132 |
+
networks = {k: v[0] for k, v in network_info.items()}
|
| 133 |
+
network_args = {k: v[1] for k, v in network_info.items()}
|
| 134 |
+
|
| 135 |
+
network_def = ModuleDict(networks)
|
| 136 |
+
network_tx = optax.adam(learning_rate=config['lr'])
|
| 137 |
+
network_params = network_def.init(init_rng, **network_args)['params']
|
| 138 |
+
network = TrainState.create(network_def, network_params, tx=network_tx)
|
| 139 |
+
|
| 140 |
+
return cls(rng, network=network, config=flax.core.FrozenDict(**config))
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def get_config():
|
| 144 |
+
config = ml_collections.ConfigDict(
|
| 145 |
+
dict(
|
| 146 |
+
# Agent hyperparameters.
|
| 147 |
+
agent_name='gcbc', # Agent name.
|
| 148 |
+
lr=3e-4, # Learning rate.
|
| 149 |
+
batch_size=1024, # Batch size.
|
| 150 |
+
actor_hidden_dims=(512, 512, 512), # Actor network hidden dimensions.
|
| 151 |
+
discount=0.99, # Discount factor (unused by default; can be used for geometric goal sampling in GCDataset).
|
| 152 |
+
const_std=True, # Whether to use constant standard deviation for the actor.
|
| 153 |
+
discrete=False, # Whether the action space is discrete.
|
| 154 |
+
encoder=ml_collections.config_dict.placeholder(str), # Visual encoder name (None, 'impala_small', etc.).
|
| 155 |
+
# Dataset hyperparameters.
|
| 156 |
+
dataset_class='GCDataset', # Dataset class name.
|
| 157 |
+
value_p_curgoal=0.0, # Unused (defined for compatibility with GCDataset).
|
| 158 |
+
value_p_trajgoal=1.0, # Unused (defined for compatibility with GCDataset).
|
| 159 |
+
value_p_randomgoal=0.0, # Unused (defined for compatibility with GCDataset).
|
| 160 |
+
value_geom_sample=False, # Unused (defined for compatibility with GCDataset).
|
| 161 |
+
actor_p_curgoal=0.0, # Probability of using the current state as the actor goal.
|
| 162 |
+
actor_p_trajgoal=1.0, # Probability of using a future state in the same trajectory as the actor goal.
|
| 163 |
+
actor_p_randomgoal=0.0, # Probability of using a random state as the actor goal.
|
| 164 |
+
actor_geom_sample=False, # Whether to use geometric sampling for future actor goals.
|
| 165 |
+
gc_negative=True, # Unused (defined for compatibility with GCDataset).
|
| 166 |
+
p_aug=0.0, # Probability of applying image augmentation.
|
| 167 |
+
frame_stack=ml_collections.config_dict.placeholder(int), # Number of frames to stack.
|
| 168 |
+
)
|
| 169 |
+
)
|
| 170 |
+
return config
|
impls/agents/gciql.py
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from typing import Any
|
| 3 |
+
|
| 4 |
+
import flax
|
| 5 |
+
import jax
|
| 6 |
+
import jax.numpy as jnp
|
| 7 |
+
import ml_collections
|
| 8 |
+
import optax
|
| 9 |
+
from utils.encoders import GCEncoder, encoder_modules
|
| 10 |
+
from utils.flax_utils import ModuleDict, TrainState, nonpytree_field
|
| 11 |
+
from utils.networks import GCActor, GCDiscreteActor, GCDiscreteCritic, GCValue
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class GCIQLAgent(flax.struct.PyTreeNode):
|
| 15 |
+
"""Goal-conditioned implicit Q-learning (GCIQL) agent.
|
| 16 |
+
|
| 17 |
+
This implementation supports both AWR (actor_loss='awr') and DDPG+BC (actor_loss='ddpgbc') for the actor loss.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
rng: Any
|
| 21 |
+
network: Any
|
| 22 |
+
config: Any = nonpytree_field()
|
| 23 |
+
|
| 24 |
+
@staticmethod
|
| 25 |
+
def expectile_loss(adv, diff, expectile):
|
| 26 |
+
"""Compute the expectile loss."""
|
| 27 |
+
weight = jnp.where(adv >= 0, expectile, (1 - expectile))
|
| 28 |
+
return weight * (diff**2)
|
| 29 |
+
|
| 30 |
+
def value_loss(self, batch, grad_params):
|
| 31 |
+
"""Compute the IQL value loss."""
|
| 32 |
+
q1, q2 = self.network.select('target_critic')(batch['observations'], batch['value_goals'], batch['actions'])
|
| 33 |
+
q = jnp.minimum(q1, q2)
|
| 34 |
+
v = self.network.select('value')(batch['observations'], batch['value_goals'], params=grad_params)
|
| 35 |
+
value_loss = self.expectile_loss(q - v, q - v, self.config['expectile']).mean()
|
| 36 |
+
|
| 37 |
+
return value_loss, {
|
| 38 |
+
'value_loss': value_loss,
|
| 39 |
+
'v_mean': v.mean(),
|
| 40 |
+
'v_max': v.max(),
|
| 41 |
+
'v_min': v.min(),
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
def critic_loss(self, batch, grad_params):
|
| 45 |
+
"""Compute the IQL critic loss."""
|
| 46 |
+
next_v = self.network.select('value')(batch['next_observations'], batch['value_goals'])
|
| 47 |
+
q = batch['rewards'] + self.config['discount'] * batch['masks'] * next_v
|
| 48 |
+
|
| 49 |
+
q1, q2 = self.network.select('critic')(
|
| 50 |
+
batch['observations'], batch['value_goals'], batch['actions'], params=grad_params
|
| 51 |
+
)
|
| 52 |
+
critic_loss = ((q1 - q) ** 2 + (q2 - q) ** 2).mean()
|
| 53 |
+
|
| 54 |
+
return critic_loss, {
|
| 55 |
+
'critic_loss': critic_loss,
|
| 56 |
+
'q_mean': q.mean(),
|
| 57 |
+
'q_max': q.max(),
|
| 58 |
+
'q_min': q.min(),
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
def actor_loss(self, batch, grad_params, rng=None):
|
| 62 |
+
"""Compute the actor loss (AWR or DDPG+BC)."""
|
| 63 |
+
if self.config['actor_loss'] == 'awr':
|
| 64 |
+
# AWR loss.
|
| 65 |
+
v = self.network.select('value')(batch['observations'], batch['actor_goals'])
|
| 66 |
+
q1, q2 = self.network.select('critic')(batch['observations'], batch['actor_goals'], batch['actions'])
|
| 67 |
+
q = jnp.minimum(q1, q2)
|
| 68 |
+
adv = q - v
|
| 69 |
+
|
| 70 |
+
exp_a = jnp.exp(adv * self.config['alpha'])
|
| 71 |
+
exp_a = jnp.minimum(exp_a, 100.0)
|
| 72 |
+
|
| 73 |
+
dist = self.network.select('actor')(batch['observations'], batch['actor_goals'], params=grad_params)
|
| 74 |
+
log_prob = dist.log_prob(batch['actions'])
|
| 75 |
+
|
| 76 |
+
actor_loss = -(exp_a * log_prob).mean()
|
| 77 |
+
|
| 78 |
+
actor_info = {
|
| 79 |
+
'actor_loss': actor_loss,
|
| 80 |
+
'adv': adv.mean(),
|
| 81 |
+
'bc_log_prob': log_prob.mean(),
|
| 82 |
+
}
|
| 83 |
+
if not self.config['discrete']:
|
| 84 |
+
actor_info.update(
|
| 85 |
+
{
|
| 86 |
+
'mse': jnp.mean((dist.mode() - batch['actions']) ** 2),
|
| 87 |
+
'std': jnp.mean(dist.scale_diag),
|
| 88 |
+
}
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
return actor_loss, actor_info
|
| 92 |
+
elif self.config['actor_loss'] == 'ddpgbc':
|
| 93 |
+
# DDPG+BC loss.
|
| 94 |
+
assert not self.config['discrete']
|
| 95 |
+
|
| 96 |
+
dist = self.network.select('actor')(batch['observations'], batch['actor_goals'], params=grad_params)
|
| 97 |
+
if self.config['const_std']:
|
| 98 |
+
q_actions = jnp.clip(dist.mode(), -1, 1)
|
| 99 |
+
else:
|
| 100 |
+
q_actions = jnp.clip(dist.sample(seed=rng), -1, 1)
|
| 101 |
+
q1, q2 = self.network.select('critic')(batch['observations'], batch['actor_goals'], q_actions)
|
| 102 |
+
q = jnp.minimum(q1, q2)
|
| 103 |
+
|
| 104 |
+
# Normalize Q values by the absolute mean to make the loss scale invariant.
|
| 105 |
+
q_loss = -q.mean() / jax.lax.stop_gradient(jnp.abs(q).mean() + 1e-6)
|
| 106 |
+
log_prob = dist.log_prob(batch['actions'])
|
| 107 |
+
|
| 108 |
+
bc_loss = -(self.config['alpha'] * log_prob).mean()
|
| 109 |
+
|
| 110 |
+
actor_loss = q_loss + bc_loss
|
| 111 |
+
|
| 112 |
+
return actor_loss, {
|
| 113 |
+
'actor_loss': actor_loss,
|
| 114 |
+
'q_loss': q_loss,
|
| 115 |
+
'bc_loss': bc_loss,
|
| 116 |
+
'q_mean': q.mean(),
|
| 117 |
+
'q_abs_mean': jnp.abs(q).mean(),
|
| 118 |
+
'bc_log_prob': log_prob.mean(),
|
| 119 |
+
'mse': jnp.mean((dist.mode() - batch['actions']) ** 2),
|
| 120 |
+
'std': jnp.mean(dist.scale_diag),
|
| 121 |
+
}
|
| 122 |
+
else:
|
| 123 |
+
raise ValueError(f'Unsupported actor loss: {self.config["actor_loss"]}')
|
| 124 |
+
|
| 125 |
+
@jax.jit
|
| 126 |
+
def total_loss(self, batch, grad_params, rng=None):
|
| 127 |
+
"""Compute the total loss."""
|
| 128 |
+
info = {}
|
| 129 |
+
rng = rng if rng is not None else self.rng
|
| 130 |
+
|
| 131 |
+
value_loss, value_info = self.value_loss(batch, grad_params)
|
| 132 |
+
for k, v in value_info.items():
|
| 133 |
+
info[f'value/{k}'] = v
|
| 134 |
+
|
| 135 |
+
critic_loss, critic_info = self.critic_loss(batch, grad_params)
|
| 136 |
+
for k, v in critic_info.items():
|
| 137 |
+
info[f'critic/{k}'] = v
|
| 138 |
+
|
| 139 |
+
rng, actor_rng = jax.random.split(rng)
|
| 140 |
+
actor_loss, actor_info = self.actor_loss(batch, grad_params, actor_rng)
|
| 141 |
+
for k, v in actor_info.items():
|
| 142 |
+
info[f'actor/{k}'] = v
|
| 143 |
+
|
| 144 |
+
loss = value_loss + critic_loss + actor_loss
|
| 145 |
+
return loss, info
|
| 146 |
+
|
| 147 |
+
def target_update(self, network, module_name):
|
| 148 |
+
"""Update the target network."""
|
| 149 |
+
new_target_params = jax.tree_util.tree_map(
|
| 150 |
+
lambda p, tp: p * self.config['tau'] + tp * (1 - self.config['tau']),
|
| 151 |
+
self.network.params[f'modules_{module_name}'],
|
| 152 |
+
self.network.params[f'modules_target_{module_name}'],
|
| 153 |
+
)
|
| 154 |
+
network.params[f'modules_target_{module_name}'] = new_target_params
|
| 155 |
+
|
| 156 |
+
@jax.jit
|
| 157 |
+
def update(self, batch):
|
| 158 |
+
"""Update the agent and return a new agent with information dictionary."""
|
| 159 |
+
new_rng, rng = jax.random.split(self.rng)
|
| 160 |
+
|
| 161 |
+
def loss_fn(grad_params):
|
| 162 |
+
return self.total_loss(batch, grad_params, rng=rng)
|
| 163 |
+
|
| 164 |
+
new_network, info = self.network.apply_loss_fn(loss_fn=loss_fn)
|
| 165 |
+
self.target_update(new_network, 'critic')
|
| 166 |
+
|
| 167 |
+
return self.replace(network=new_network, rng=new_rng), info
|
| 168 |
+
|
| 169 |
+
@jax.jit
|
| 170 |
+
def sample_actions(
|
| 171 |
+
self,
|
| 172 |
+
observations,
|
| 173 |
+
goals=None,
|
| 174 |
+
seed=None,
|
| 175 |
+
temperature=1.0,
|
| 176 |
+
):
|
| 177 |
+
"""Sample actions from the actor."""
|
| 178 |
+
dist = self.network.select('actor')(observations, goals, temperature=temperature)
|
| 179 |
+
actions = dist.sample(seed=seed)
|
| 180 |
+
if not self.config['discrete']:
|
| 181 |
+
actions = jnp.clip(actions, -1, 1)
|
| 182 |
+
return actions
|
| 183 |
+
|
| 184 |
+
@classmethod
|
| 185 |
+
def create(
|
| 186 |
+
cls,
|
| 187 |
+
seed,
|
| 188 |
+
ex_observations,
|
| 189 |
+
ex_actions,
|
| 190 |
+
config,
|
| 191 |
+
):
|
| 192 |
+
"""Create a new agent.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
seed: Random seed.
|
| 196 |
+
ex_observations: Example observations.
|
| 197 |
+
ex_actions: Example batch of actions. In discrete-action MDPs, this should contain the maximum action value.
|
| 198 |
+
config: Configuration dictionary.
|
| 199 |
+
"""
|
| 200 |
+
rng = jax.random.PRNGKey(seed)
|
| 201 |
+
rng, init_rng = jax.random.split(rng, 2)
|
| 202 |
+
|
| 203 |
+
ex_goals = ex_observations
|
| 204 |
+
if config['discrete']:
|
| 205 |
+
action_dim = ex_actions.max() + 1
|
| 206 |
+
else:
|
| 207 |
+
action_dim = ex_actions.shape[-1]
|
| 208 |
+
|
| 209 |
+
# Define encoders.
|
| 210 |
+
encoders = dict()
|
| 211 |
+
if config['encoder'] is not None:
|
| 212 |
+
encoder_module = encoder_modules[config['encoder']]
|
| 213 |
+
encoders['value'] = GCEncoder(concat_encoder=encoder_module())
|
| 214 |
+
encoders['critic'] = GCEncoder(concat_encoder=encoder_module())
|
| 215 |
+
encoders['actor'] = GCEncoder(concat_encoder=encoder_module())
|
| 216 |
+
|
| 217 |
+
# Define value and actor networks.
|
| 218 |
+
value_def = GCValue(
|
| 219 |
+
hidden_dims=config['value_hidden_dims'],
|
| 220 |
+
layer_norm=config['layer_norm'],
|
| 221 |
+
ensemble=False,
|
| 222 |
+
gc_encoder=encoders.get('value'),
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
if config['discrete']:
|
| 226 |
+
critic_def = GCDiscreteCritic(
|
| 227 |
+
hidden_dims=config['value_hidden_dims'],
|
| 228 |
+
layer_norm=config['layer_norm'],
|
| 229 |
+
ensemble=True,
|
| 230 |
+
gc_encoder=encoders.get('critic'),
|
| 231 |
+
action_dim=action_dim,
|
| 232 |
+
)
|
| 233 |
+
else:
|
| 234 |
+
critic_def = GCValue(
|
| 235 |
+
hidden_dims=config['value_hidden_dims'],
|
| 236 |
+
layer_norm=config['layer_norm'],
|
| 237 |
+
ensemble=True,
|
| 238 |
+
gc_encoder=encoders.get('critic'),
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
if config['discrete']:
|
| 242 |
+
actor_def = GCDiscreteActor(
|
| 243 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 244 |
+
action_dim=action_dim,
|
| 245 |
+
gc_encoder=encoders.get('actor'),
|
| 246 |
+
)
|
| 247 |
+
else:
|
| 248 |
+
actor_def = GCActor(
|
| 249 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 250 |
+
action_dim=action_dim,
|
| 251 |
+
state_dependent_std=False,
|
| 252 |
+
const_std=config['const_std'],
|
| 253 |
+
gc_encoder=encoders.get('actor'),
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
network_info = dict(
|
| 257 |
+
value=(value_def, (ex_observations, ex_goals)),
|
| 258 |
+
critic=(critic_def, (ex_observations, ex_goals, ex_actions)),
|
| 259 |
+
target_critic=(copy.deepcopy(critic_def), (ex_observations, ex_goals, ex_actions)),
|
| 260 |
+
actor=(actor_def, (ex_observations, ex_goals)),
|
| 261 |
+
)
|
| 262 |
+
networks = {k: v[0] for k, v in network_info.items()}
|
| 263 |
+
network_args = {k: v[1] for k, v in network_info.items()}
|
| 264 |
+
|
| 265 |
+
network_def = ModuleDict(networks)
|
| 266 |
+
network_tx = optax.adam(learning_rate=config['lr'])
|
| 267 |
+
network_params = network_def.init(init_rng, **network_args)['params']
|
| 268 |
+
network = TrainState.create(network_def, network_params, tx=network_tx)
|
| 269 |
+
|
| 270 |
+
params = network_params
|
| 271 |
+
params['modules_target_critic'] = params['modules_critic']
|
| 272 |
+
|
| 273 |
+
return cls(rng, network=network, config=flax.core.FrozenDict(**config))
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def get_config():
|
| 277 |
+
config = ml_collections.ConfigDict(
|
| 278 |
+
dict(
|
| 279 |
+
# Agent hyperparameters.
|
| 280 |
+
agent_name='gciql', # Agent name.
|
| 281 |
+
lr=3e-4, # Learning rate.
|
| 282 |
+
batch_size=1024, # Batch size.
|
| 283 |
+
actor_hidden_dims=(512, 512, 512), # Actor network hidden dimensions.
|
| 284 |
+
value_hidden_dims=(512, 512, 512), # Value network hidden dimensions.
|
| 285 |
+
layer_norm=True, # Whether to use layer normalization.
|
| 286 |
+
discount=0.99, # Discount factor.
|
| 287 |
+
tau=0.005, # Target network update rate.
|
| 288 |
+
expectile=0.9, # IQL expectile.
|
| 289 |
+
actor_loss='ddpgbc', # Actor loss type ('awr' or 'ddpgbc').
|
| 290 |
+
alpha=0.3, # Temperature in AWR or BC coefficient in DDPG+BC.
|
| 291 |
+
const_std=True, # Whether to use constant standard deviation for the actor.
|
| 292 |
+
discrete=False, # Whether the action space is discrete.
|
| 293 |
+
encoder=ml_collections.config_dict.placeholder(str), # Visual encoder name (None, 'impala_small', etc.).
|
| 294 |
+
# Dataset hyperparameters.
|
| 295 |
+
dataset_class='GCDataset', # Dataset class name.
|
| 296 |
+
value_p_curgoal=0.2, # Probability of using the current state as the value goal.
|
| 297 |
+
value_p_trajgoal=0.5, # Probability of using a future state in the same trajectory as the value goal.
|
| 298 |
+
value_p_randomgoal=0.3, # Probability of using a random state as the value goal.
|
| 299 |
+
value_geom_sample=True, # Whether to use geometric sampling for future value goals.
|
| 300 |
+
actor_p_curgoal=0.0, # Probability of using the current state as the actor goal.
|
| 301 |
+
actor_p_trajgoal=1.0, # Probability of using a future state in the same trajectory as the actor goal.
|
| 302 |
+
actor_p_randomgoal=0.0, # Probability of using a random state as the actor goal.
|
| 303 |
+
actor_geom_sample=False, # Whether to use geometric sampling for future actor goals.
|
| 304 |
+
gc_negative=True, # Whether to use '0 if s == g else -1' (True) or '1 if s == g else 0' (False) as reward.
|
| 305 |
+
p_aug=0.0, # Probability of applying image augmentation.
|
| 306 |
+
frame_stack=ml_collections.config_dict.placeholder(int), # Number of frames to stack.
|
| 307 |
+
)
|
| 308 |
+
)
|
| 309 |
+
return config
|
impls/agents/gcivl.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from typing import Any
|
| 3 |
+
|
| 4 |
+
import flax
|
| 5 |
+
import jax
|
| 6 |
+
import jax.numpy as jnp
|
| 7 |
+
import ml_collections
|
| 8 |
+
import optax
|
| 9 |
+
from utils.encoders import GCEncoder, encoder_modules
|
| 10 |
+
from utils.flax_utils import ModuleDict, TrainState, nonpytree_field
|
| 11 |
+
from utils.networks import GCActor, GCDiscreteActor, GCValue
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class GCIVLAgent(flax.struct.PyTreeNode):
|
| 15 |
+
"""Goal-conditioned implicit V-learning (GCIVL) agent.
|
| 16 |
+
|
| 17 |
+
This is a variant of GCIQL that only uses a V function, without Q functions.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
rng: Any
|
| 21 |
+
network: Any
|
| 22 |
+
config: Any = nonpytree_field()
|
| 23 |
+
|
| 24 |
+
@staticmethod
|
| 25 |
+
def expectile_loss(adv, diff, expectile):
|
| 26 |
+
"""Compute the expectile loss."""
|
| 27 |
+
weight = jnp.where(adv >= 0, expectile, (1 - expectile))
|
| 28 |
+
return weight * (diff**2)
|
| 29 |
+
|
| 30 |
+
def value_loss(self, batch, grad_params):
|
| 31 |
+
"""Compute the IVL value loss.
|
| 32 |
+
|
| 33 |
+
This value loss is similar to the original IQL value loss, but involves additional tricks to stabilize training.
|
| 34 |
+
For example, when computing the expectile loss, we separate the advantage part (which is used to compute the
|
| 35 |
+
weight) and the difference part (which is used to compute the loss), where we use the target value function to
|
| 36 |
+
compute the former and the current value function to compute the latter. This is similar to how double DQN
|
| 37 |
+
mitigates overestimation bias.
|
| 38 |
+
"""
|
| 39 |
+
(next_v1_t, next_v2_t) = self.network.select('target_value')(batch['next_observations'], batch['value_goals'])
|
| 40 |
+
next_v_t = jnp.minimum(next_v1_t, next_v2_t)
|
| 41 |
+
q = batch['rewards'] + self.config['discount'] * batch['masks'] * next_v_t
|
| 42 |
+
|
| 43 |
+
(v1_t, v2_t) = self.network.select('target_value')(batch['observations'], batch['value_goals'])
|
| 44 |
+
v_t = (v1_t + v2_t) / 2
|
| 45 |
+
adv = q - v_t
|
| 46 |
+
|
| 47 |
+
q1 = batch['rewards'] + self.config['discount'] * batch['masks'] * next_v1_t
|
| 48 |
+
q2 = batch['rewards'] + self.config['discount'] * batch['masks'] * next_v2_t
|
| 49 |
+
(v1, v2) = self.network.select('value')(batch['observations'], batch['value_goals'], params=grad_params)
|
| 50 |
+
v = (v1 + v2) / 2
|
| 51 |
+
|
| 52 |
+
value_loss1 = self.expectile_loss(adv, q1 - v1, self.config['expectile']).mean()
|
| 53 |
+
value_loss2 = self.expectile_loss(adv, q2 - v2, self.config['expectile']).mean()
|
| 54 |
+
value_loss = value_loss1 + value_loss2
|
| 55 |
+
|
| 56 |
+
return value_loss, {
|
| 57 |
+
'value_loss': value_loss,
|
| 58 |
+
'v_mean': v.mean(),
|
| 59 |
+
'v_max': v.max(),
|
| 60 |
+
'v_min': v.min(),
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
def actor_loss(self, batch, grad_params, rng=None):
|
| 64 |
+
"""Compute the AWR actor loss."""
|
| 65 |
+
v1, v2 = self.network.select('value')(batch['observations'], batch['actor_goals'])
|
| 66 |
+
nv1, nv2 = self.network.select('value')(batch['next_observations'], batch['actor_goals'])
|
| 67 |
+
v = (v1 + v2) / 2
|
| 68 |
+
nv = (nv1 + nv2) / 2
|
| 69 |
+
adv = nv - v
|
| 70 |
+
|
| 71 |
+
exp_a = jnp.exp(adv * self.config['alpha'])
|
| 72 |
+
exp_a = jnp.minimum(exp_a, 100.0)
|
| 73 |
+
|
| 74 |
+
dist = self.network.select('actor')(batch['observations'], batch['actor_goals'], params=grad_params)
|
| 75 |
+
log_prob = dist.log_prob(batch['actions'])
|
| 76 |
+
|
| 77 |
+
actor_loss = -(exp_a * log_prob).mean()
|
| 78 |
+
|
| 79 |
+
actor_info = {
|
| 80 |
+
'actor_loss': actor_loss,
|
| 81 |
+
'adv': adv.mean(),
|
| 82 |
+
'bc_log_prob': log_prob.mean(),
|
| 83 |
+
}
|
| 84 |
+
if not self.config['discrete']:
|
| 85 |
+
actor_info.update(
|
| 86 |
+
{
|
| 87 |
+
'mse': jnp.mean((dist.mode() - batch['actions']) ** 2),
|
| 88 |
+
'std': jnp.mean(dist.scale_diag),
|
| 89 |
+
}
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
return actor_loss, actor_info
|
| 93 |
+
|
| 94 |
+
@jax.jit
|
| 95 |
+
def total_loss(self, batch, grad_params, rng=None):
|
| 96 |
+
"""Compute the total loss."""
|
| 97 |
+
info = {}
|
| 98 |
+
rng = rng if rng is not None else self.rng
|
| 99 |
+
|
| 100 |
+
value_loss, value_info = self.value_loss(batch, grad_params)
|
| 101 |
+
for k, v in value_info.items():
|
| 102 |
+
info[f'value/{k}'] = v
|
| 103 |
+
|
| 104 |
+
rng, actor_rng = jax.random.split(rng)
|
| 105 |
+
actor_loss, actor_info = self.actor_loss(batch, grad_params, actor_rng)
|
| 106 |
+
for k, v in actor_info.items():
|
| 107 |
+
info[f'actor/{k}'] = v
|
| 108 |
+
|
| 109 |
+
loss = value_loss + actor_loss
|
| 110 |
+
return loss, info
|
| 111 |
+
|
| 112 |
+
def target_update(self, network, module_name):
|
| 113 |
+
"""Update the target network."""
|
| 114 |
+
new_target_params = jax.tree_util.tree_map(
|
| 115 |
+
lambda p, tp: p * self.config['tau'] + tp * (1 - self.config['tau']),
|
| 116 |
+
self.network.params[f'modules_{module_name}'],
|
| 117 |
+
self.network.params[f'modules_target_{module_name}'],
|
| 118 |
+
)
|
| 119 |
+
network.params[f'modules_target_{module_name}'] = new_target_params
|
| 120 |
+
|
| 121 |
+
@jax.jit
|
| 122 |
+
def update(self, batch):
|
| 123 |
+
"""Update the agent and return a new agent with information dictionary."""
|
| 124 |
+
new_rng, rng = jax.random.split(self.rng)
|
| 125 |
+
|
| 126 |
+
def loss_fn(grad_params):
|
| 127 |
+
return self.total_loss(batch, grad_params, rng=rng)
|
| 128 |
+
|
| 129 |
+
new_network, info = self.network.apply_loss_fn(loss_fn=loss_fn)
|
| 130 |
+
self.target_update(new_network, 'value')
|
| 131 |
+
|
| 132 |
+
return self.replace(network=new_network, rng=new_rng), info
|
| 133 |
+
|
| 134 |
+
@jax.jit
|
| 135 |
+
def sample_actions(
|
| 136 |
+
self,
|
| 137 |
+
observations,
|
| 138 |
+
goals=None,
|
| 139 |
+
seed=None,
|
| 140 |
+
temperature=1.0,
|
| 141 |
+
):
|
| 142 |
+
"""Sample actions from the actor."""
|
| 143 |
+
dist = self.network.select('actor')(observations, goals, temperature=temperature)
|
| 144 |
+
actions = dist.sample(seed=seed)
|
| 145 |
+
if not self.config['discrete']:
|
| 146 |
+
actions = jnp.clip(actions, -1, 1)
|
| 147 |
+
return actions
|
| 148 |
+
|
| 149 |
+
@classmethod
|
| 150 |
+
def create(
|
| 151 |
+
cls,
|
| 152 |
+
seed,
|
| 153 |
+
ex_observations,
|
| 154 |
+
ex_actions,
|
| 155 |
+
config,
|
| 156 |
+
):
|
| 157 |
+
"""Create a new agent.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
seed: Random seed.
|
| 161 |
+
ex_observations: Example observations.
|
| 162 |
+
ex_actions: Example batch of actions. In discrete-action MDPs, this should contain the maximum action value.
|
| 163 |
+
config: Configuration dictionary.
|
| 164 |
+
"""
|
| 165 |
+
rng = jax.random.PRNGKey(seed)
|
| 166 |
+
rng, init_rng = jax.random.split(rng, 2)
|
| 167 |
+
|
| 168 |
+
ex_goals = ex_observations
|
| 169 |
+
if config['discrete']:
|
| 170 |
+
action_dim = ex_actions.max() + 1
|
| 171 |
+
else:
|
| 172 |
+
action_dim = ex_actions.shape[-1]
|
| 173 |
+
|
| 174 |
+
# Define encoders.
|
| 175 |
+
encoders = dict()
|
| 176 |
+
if config['encoder'] is not None:
|
| 177 |
+
encoder_module = encoder_modules[config['encoder']]
|
| 178 |
+
encoders['value'] = GCEncoder(concat_encoder=encoder_module())
|
| 179 |
+
encoders['actor'] = GCEncoder(concat_encoder=encoder_module())
|
| 180 |
+
|
| 181 |
+
# Define value and actor networks.
|
| 182 |
+
value_def = GCValue(
|
| 183 |
+
hidden_dims=config['value_hidden_dims'],
|
| 184 |
+
layer_norm=config['layer_norm'],
|
| 185 |
+
ensemble=True,
|
| 186 |
+
gc_encoder=encoders.get('value'),
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
if config['discrete']:
|
| 190 |
+
actor_def = GCDiscreteActor(
|
| 191 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 192 |
+
action_dim=action_dim,
|
| 193 |
+
gc_encoder=encoders.get('actor'),
|
| 194 |
+
)
|
| 195 |
+
else:
|
| 196 |
+
actor_def = GCActor(
|
| 197 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 198 |
+
action_dim=action_dim,
|
| 199 |
+
state_dependent_std=False,
|
| 200 |
+
const_std=config['const_std'],
|
| 201 |
+
gc_encoder=encoders.get('actor'),
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
network_info = dict(
|
| 205 |
+
value=(value_def, (ex_observations, ex_goals)),
|
| 206 |
+
target_value=(copy.deepcopy(value_def), (ex_observations, ex_goals)),
|
| 207 |
+
actor=(actor_def, (ex_observations, ex_goals)),
|
| 208 |
+
)
|
| 209 |
+
networks = {k: v[0] for k, v in network_info.items()}
|
| 210 |
+
network_args = {k: v[1] for k, v in network_info.items()}
|
| 211 |
+
|
| 212 |
+
network_def = ModuleDict(networks)
|
| 213 |
+
network_tx = optax.adam(learning_rate=config['lr'])
|
| 214 |
+
network_params = network_def.init(init_rng, **network_args)['params']
|
| 215 |
+
network = TrainState.create(network_def, network_params, tx=network_tx)
|
| 216 |
+
|
| 217 |
+
params = network_params
|
| 218 |
+
params['modules_target_value'] = params['modules_value']
|
| 219 |
+
|
| 220 |
+
return cls(rng, network=network, config=flax.core.FrozenDict(**config))
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def get_config():
|
| 224 |
+
config = ml_collections.ConfigDict(
|
| 225 |
+
dict(
|
| 226 |
+
# Agent hyperparameters.
|
| 227 |
+
agent_name='gcivl', # Agent name.
|
| 228 |
+
lr=3e-4, # Learning rate.
|
| 229 |
+
batch_size=1024, # Batch size.
|
| 230 |
+
actor_hidden_dims=(512, 512, 512), # Actor network hidden dimensions.
|
| 231 |
+
value_hidden_dims=(512, 512, 512), # Value network hidden dimensions.
|
| 232 |
+
layer_norm=True, # Whether to use layer normalization.
|
| 233 |
+
discount=0.99, # Discount factor.
|
| 234 |
+
tau=0.005, # Target network update rate.
|
| 235 |
+
expectile=0.9, # IQL expectile.
|
| 236 |
+
alpha=10.0, # AWR temperature.
|
| 237 |
+
const_std=True, # Whether to use constant standard deviation for the actor.
|
| 238 |
+
discrete=False, # Whether the action space is discrete.
|
| 239 |
+
encoder=ml_collections.config_dict.placeholder(str), # Visual encoder name (None, 'impala_small', etc.).
|
| 240 |
+
# Dataset hyperparameters.
|
| 241 |
+
dataset_class='GCDataset', # Dataset class name.
|
| 242 |
+
value_p_curgoal=0.2, # Probability of using the current state as the value goal.
|
| 243 |
+
value_p_trajgoal=0.5, # Probability of using a future state in the same trajectory as the value goal.
|
| 244 |
+
value_p_randomgoal=0.3, # Probability of using a random state as the value goal.
|
| 245 |
+
value_geom_sample=True, # Whether to use geometric sampling for future value goals.
|
| 246 |
+
actor_p_curgoal=0.0, # Probability of using the current state as the actor goal.
|
| 247 |
+
actor_p_trajgoal=1.0, # Probability of using a future state in the same trajectory as the actor goal.
|
| 248 |
+
actor_p_randomgoal=0.0, # Probability of using a random state as the actor goal.
|
| 249 |
+
actor_geom_sample=False, # Whether to use geometric sampling for future actor goals.
|
| 250 |
+
gc_negative=True, # Whether to use '0 if s == g else -1' (True) or '1 if s == g else 0' (False) as reward.
|
| 251 |
+
p_aug=0.0, # Probability of applying image augmentation.
|
| 252 |
+
frame_stack=ml_collections.config_dict.placeholder(int), # Number of frames to stack.
|
| 253 |
+
)
|
| 254 |
+
)
|
| 255 |
+
return config
|
impls/agents/hiql.py
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
import flax
|
| 4 |
+
import flax.linen as nn
|
| 5 |
+
import jax
|
| 6 |
+
import jax.numpy as jnp
|
| 7 |
+
import ml_collections
|
| 8 |
+
import optax
|
| 9 |
+
from utils.encoders import GCEncoder, encoder_modules
|
| 10 |
+
from utils.flax_utils import ModuleDict, TrainState, nonpytree_field
|
| 11 |
+
from utils.networks import MLP, GCActor, GCDiscreteActor, GCValue, Identity, LengthNormalize
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class HIQLAgent(flax.struct.PyTreeNode):
|
| 15 |
+
"""Hierarchical implicit Q-learning (HIQL) agent."""
|
| 16 |
+
|
| 17 |
+
rng: Any
|
| 18 |
+
network: Any
|
| 19 |
+
config: Any = nonpytree_field()
|
| 20 |
+
|
| 21 |
+
@staticmethod
|
| 22 |
+
def expectile_loss(adv, diff, expectile):
|
| 23 |
+
"""Compute the expectile loss."""
|
| 24 |
+
weight = jnp.where(adv >= 0, expectile, (1 - expectile))
|
| 25 |
+
return weight * (diff**2)
|
| 26 |
+
|
| 27 |
+
def value_loss(self, batch, grad_params):
|
| 28 |
+
"""Compute the IVL value loss.
|
| 29 |
+
|
| 30 |
+
This value loss is similar to the original IQL value loss, but involves additional tricks to stabilize training.
|
| 31 |
+
For example, when computing the expectile loss, we separate the advantage part (which is used to compute the
|
| 32 |
+
weight) and the difference part (which is used to compute the loss), where we use the target value function to
|
| 33 |
+
compute the former and the current value function to compute the latter. This is similar to how double DQN
|
| 34 |
+
mitigates overestimation bias.
|
| 35 |
+
"""
|
| 36 |
+
(next_v1_t, next_v2_t) = self.network.select('target_value')(batch['next_observations'], batch['value_goals'])
|
| 37 |
+
next_v_t = jnp.minimum(next_v1_t, next_v2_t)
|
| 38 |
+
q = batch['rewards'] + self.config['discount'] * batch['masks'] * next_v_t
|
| 39 |
+
|
| 40 |
+
(v1_t, v2_t) = self.network.select('target_value')(batch['observations'], batch['value_goals'])
|
| 41 |
+
v_t = (v1_t + v2_t) / 2
|
| 42 |
+
adv = q - v_t
|
| 43 |
+
|
| 44 |
+
q1 = batch['rewards'] + self.config['discount'] * batch['masks'] * next_v1_t
|
| 45 |
+
q2 = batch['rewards'] + self.config['discount'] * batch['masks'] * next_v2_t
|
| 46 |
+
(v1, v2) = self.network.select('value')(batch['observations'], batch['value_goals'], params=grad_params)
|
| 47 |
+
v = (v1 + v2) / 2
|
| 48 |
+
|
| 49 |
+
value_loss1 = self.expectile_loss(adv, q1 - v1, self.config['expectile']).mean()
|
| 50 |
+
value_loss2 = self.expectile_loss(adv, q2 - v2, self.config['expectile']).mean()
|
| 51 |
+
value_loss = value_loss1 + value_loss2
|
| 52 |
+
|
| 53 |
+
return value_loss, {
|
| 54 |
+
'value_loss': value_loss,
|
| 55 |
+
'v_mean': v.mean(),
|
| 56 |
+
'v_max': v.max(),
|
| 57 |
+
'v_min': v.min(),
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
def low_actor_loss(self, batch, grad_params):
|
| 61 |
+
"""Compute the low-level actor loss."""
|
| 62 |
+
v1, v2 = self.network.select('value')(batch['observations'], batch['low_actor_goals'])
|
| 63 |
+
nv1, nv2 = self.network.select('value')(batch['next_observations'], batch['low_actor_goals'])
|
| 64 |
+
v = (v1 + v2) / 2
|
| 65 |
+
nv = (nv1 + nv2) / 2
|
| 66 |
+
adv = nv - v
|
| 67 |
+
|
| 68 |
+
exp_a = jnp.exp(adv * self.config['low_alpha'])
|
| 69 |
+
exp_a = jnp.minimum(exp_a, 100.0)
|
| 70 |
+
|
| 71 |
+
# Compute the goal representations of the subgoals.
|
| 72 |
+
goal_reps = self.network.select('goal_rep')(
|
| 73 |
+
jnp.concatenate([batch['observations'], batch['low_actor_goals']], axis=-1),
|
| 74 |
+
params=grad_params,
|
| 75 |
+
)
|
| 76 |
+
if not self.config['low_actor_rep_grad']:
|
| 77 |
+
# Stop gradients through the goal representations.
|
| 78 |
+
goal_reps = jax.lax.stop_gradient(goal_reps)
|
| 79 |
+
dist = self.network.select('low_actor')(batch['observations'], goal_reps, goal_encoded=True, params=grad_params)
|
| 80 |
+
log_prob = dist.log_prob(batch['actions'])
|
| 81 |
+
|
| 82 |
+
actor_loss = -(exp_a * log_prob).mean()
|
| 83 |
+
|
| 84 |
+
actor_info = {
|
| 85 |
+
'actor_loss': actor_loss,
|
| 86 |
+
'adv': adv.mean(),
|
| 87 |
+
'bc_log_prob': log_prob.mean(),
|
| 88 |
+
}
|
| 89 |
+
if not self.config['discrete']:
|
| 90 |
+
actor_info.update(
|
| 91 |
+
{
|
| 92 |
+
'mse': jnp.mean((dist.mode() - batch['actions']) ** 2),
|
| 93 |
+
'std': jnp.mean(dist.scale_diag),
|
| 94 |
+
}
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
return actor_loss, actor_info
|
| 98 |
+
|
| 99 |
+
def high_actor_loss(self, batch, grad_params):
|
| 100 |
+
"""Compute the high-level actor loss."""
|
| 101 |
+
v1, v2 = self.network.select('value')(batch['observations'], batch['high_actor_goals'])
|
| 102 |
+
nv1, nv2 = self.network.select('value')(batch['high_actor_targets'], batch['high_actor_goals'])
|
| 103 |
+
v = (v1 + v2) / 2
|
| 104 |
+
nv = (nv1 + nv2) / 2
|
| 105 |
+
adv = nv - v
|
| 106 |
+
|
| 107 |
+
exp_a = jnp.exp(adv * self.config['high_alpha'])
|
| 108 |
+
exp_a = jnp.minimum(exp_a, 100.0)
|
| 109 |
+
|
| 110 |
+
dist = self.network.select('high_actor')(batch['observations'], batch['high_actor_goals'], params=grad_params)
|
| 111 |
+
target = self.network.select('goal_rep')(
|
| 112 |
+
jnp.concatenate([batch['observations'], batch['high_actor_targets']], axis=-1)
|
| 113 |
+
)
|
| 114 |
+
log_prob = dist.log_prob(target)
|
| 115 |
+
|
| 116 |
+
actor_loss = -(exp_a * log_prob).mean()
|
| 117 |
+
|
| 118 |
+
return actor_loss, {
|
| 119 |
+
'actor_loss': actor_loss,
|
| 120 |
+
'adv': adv.mean(),
|
| 121 |
+
'bc_log_prob': log_prob.mean(),
|
| 122 |
+
'mse': jnp.mean((dist.mode() - target) ** 2),
|
| 123 |
+
'std': jnp.mean(dist.scale_diag),
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
@jax.jit
|
| 127 |
+
def total_loss(self, batch, grad_params, rng=None):
|
| 128 |
+
"""Compute the total loss."""
|
| 129 |
+
info = {}
|
| 130 |
+
|
| 131 |
+
value_loss, value_info = self.value_loss(batch, grad_params)
|
| 132 |
+
for k, v in value_info.items():
|
| 133 |
+
info[f'value/{k}'] = v
|
| 134 |
+
|
| 135 |
+
low_actor_loss, low_actor_info = self.low_actor_loss(batch, grad_params)
|
| 136 |
+
for k, v in low_actor_info.items():
|
| 137 |
+
info[f'low_actor/{k}'] = v
|
| 138 |
+
|
| 139 |
+
high_actor_loss, high_actor_info = self.high_actor_loss(batch, grad_params)
|
| 140 |
+
for k, v in high_actor_info.items():
|
| 141 |
+
info[f'high_actor/{k}'] = v
|
| 142 |
+
|
| 143 |
+
loss = value_loss + low_actor_loss + high_actor_loss
|
| 144 |
+
return loss, info
|
| 145 |
+
|
| 146 |
+
def target_update(self, network, module_name):
|
| 147 |
+
"""Update the target network."""
|
| 148 |
+
new_target_params = jax.tree_util.tree_map(
|
| 149 |
+
lambda p, tp: p * self.config['tau'] + tp * (1 - self.config['tau']),
|
| 150 |
+
self.network.params[f'modules_{module_name}'],
|
| 151 |
+
self.network.params[f'modules_target_{module_name}'],
|
| 152 |
+
)
|
| 153 |
+
network.params[f'modules_target_{module_name}'] = new_target_params
|
| 154 |
+
|
| 155 |
+
@jax.jit
|
| 156 |
+
def update(self, batch):
|
| 157 |
+
"""Update the agent and return a new agent with information dictionary."""
|
| 158 |
+
new_rng, rng = jax.random.split(self.rng)
|
| 159 |
+
|
| 160 |
+
def loss_fn(grad_params):
|
| 161 |
+
return self.total_loss(batch, grad_params, rng=rng)
|
| 162 |
+
|
| 163 |
+
new_network, info = self.network.apply_loss_fn(loss_fn=loss_fn)
|
| 164 |
+
self.target_update(new_network, 'value')
|
| 165 |
+
|
| 166 |
+
return self.replace(network=new_network, rng=new_rng), info
|
| 167 |
+
|
| 168 |
+
@jax.jit
|
| 169 |
+
def sample_actions(
|
| 170 |
+
self,
|
| 171 |
+
observations,
|
| 172 |
+
goals=None,
|
| 173 |
+
seed=None,
|
| 174 |
+
temperature=1.0,
|
| 175 |
+
):
|
| 176 |
+
"""Sample actions from the actor.
|
| 177 |
+
|
| 178 |
+
It first queries the high-level actor to obtain subgoal representations, and then queries the low-level actor
|
| 179 |
+
to obtain raw actions.
|
| 180 |
+
"""
|
| 181 |
+
high_seed, low_seed = jax.random.split(seed)
|
| 182 |
+
|
| 183 |
+
high_dist = self.network.select('high_actor')(observations, goals, temperature=temperature)
|
| 184 |
+
goal_reps = high_dist.sample(seed=high_seed)
|
| 185 |
+
goal_reps = goal_reps / jnp.linalg.norm(goal_reps, axis=-1, keepdims=True) * jnp.sqrt(goal_reps.shape[-1])
|
| 186 |
+
|
| 187 |
+
low_dist = self.network.select('low_actor')(observations, goal_reps, goal_encoded=True, temperature=temperature)
|
| 188 |
+
actions = low_dist.sample(seed=low_seed)
|
| 189 |
+
|
| 190 |
+
if not self.config['discrete']:
|
| 191 |
+
actions = jnp.clip(actions, -1, 1)
|
| 192 |
+
return actions
|
| 193 |
+
|
| 194 |
+
@classmethod
|
| 195 |
+
def create(
|
| 196 |
+
cls,
|
| 197 |
+
seed,
|
| 198 |
+
ex_observations,
|
| 199 |
+
ex_actions,
|
| 200 |
+
config,
|
| 201 |
+
):
|
| 202 |
+
"""Create a new agent.
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
seed: Random seed.
|
| 206 |
+
ex_observations: Example observations.
|
| 207 |
+
ex_actions: Example batch of actions. In discrete-action MDPs, this should contain the maximum action value.
|
| 208 |
+
config: Configuration dictionary.
|
| 209 |
+
"""
|
| 210 |
+
rng = jax.random.PRNGKey(seed)
|
| 211 |
+
rng, init_rng = jax.random.split(rng, 2)
|
| 212 |
+
|
| 213 |
+
ex_goals = ex_observations
|
| 214 |
+
if config['discrete']:
|
| 215 |
+
action_dim = ex_actions.max() + 1
|
| 216 |
+
else:
|
| 217 |
+
action_dim = ex_actions.shape[-1]
|
| 218 |
+
|
| 219 |
+
# Define (state-dependent) subgoal representation phi([s; g]) that outputs a length-normalized vector.
|
| 220 |
+
if config['encoder'] is not None:
|
| 221 |
+
encoder_module = encoder_modules[config['encoder']]
|
| 222 |
+
goal_rep_seq = [encoder_module()]
|
| 223 |
+
else:
|
| 224 |
+
goal_rep_seq = []
|
| 225 |
+
goal_rep_seq.append(
|
| 226 |
+
MLP(
|
| 227 |
+
hidden_dims=(*config['value_hidden_dims'], config['rep_dim']),
|
| 228 |
+
activate_final=False,
|
| 229 |
+
layer_norm=config['layer_norm'],
|
| 230 |
+
)
|
| 231 |
+
)
|
| 232 |
+
goal_rep_seq.append(LengthNormalize())
|
| 233 |
+
goal_rep_def = nn.Sequential(goal_rep_seq)
|
| 234 |
+
|
| 235 |
+
# Define the encoders that handle the inputs to the value and actor networks.
|
| 236 |
+
# The subgoal representation phi([s; g]) is trained by the parameterized value function V(s, phi([s; g])).
|
| 237 |
+
# The high-level actor predicts the subgoal representation phi([s; w]) for subgoal w given s and g.
|
| 238 |
+
# The low-level actor predicts actions given the current state s and the subgoal representation phi([s; w]).
|
| 239 |
+
if config['encoder'] is not None:
|
| 240 |
+
# Pixel-based environments require visual encoders for state inputs, in addition to the pre-defined shared
|
| 241 |
+
# encoder for subgoal representations.
|
| 242 |
+
|
| 243 |
+
# Value: V(encoder^V(s), phi([s; g]))
|
| 244 |
+
value_encoder_def = GCEncoder(state_encoder=encoder_module(), concat_encoder=goal_rep_def)
|
| 245 |
+
target_value_encoder_def = GCEncoder(state_encoder=encoder_module(), concat_encoder=goal_rep_def)
|
| 246 |
+
# Low-level actor: pi^l(. | encoder^l(s), phi([s; w]))
|
| 247 |
+
low_actor_encoder_def = GCEncoder(state_encoder=encoder_module(), concat_encoder=goal_rep_def)
|
| 248 |
+
# High-level actor: pi^h(. | encoder^h([s; g]))
|
| 249 |
+
high_actor_encoder_def = GCEncoder(concat_encoder=encoder_module())
|
| 250 |
+
else:
|
| 251 |
+
# State-based environments only use the pre-defined shared encoder for subgoal representations.
|
| 252 |
+
|
| 253 |
+
# Value: V(s, phi([s; g]))
|
| 254 |
+
value_encoder_def = GCEncoder(state_encoder=Identity(), concat_encoder=goal_rep_def)
|
| 255 |
+
target_value_encoder_def = GCEncoder(state_encoder=Identity(), concat_encoder=goal_rep_def)
|
| 256 |
+
# Low-level actor: pi^l(. | s, phi([s; w]))
|
| 257 |
+
low_actor_encoder_def = GCEncoder(state_encoder=Identity(), concat_encoder=goal_rep_def)
|
| 258 |
+
# High-level actor: pi^h(. | s, g) (i.e., no encoder)
|
| 259 |
+
high_actor_encoder_def = None
|
| 260 |
+
|
| 261 |
+
# Define value and actor networks.
|
| 262 |
+
value_def = GCValue(
|
| 263 |
+
hidden_dims=config['value_hidden_dims'],
|
| 264 |
+
layer_norm=config['layer_norm'],
|
| 265 |
+
ensemble=True,
|
| 266 |
+
gc_encoder=value_encoder_def,
|
| 267 |
+
)
|
| 268 |
+
target_value_def = GCValue(
|
| 269 |
+
hidden_dims=config['value_hidden_dims'],
|
| 270 |
+
layer_norm=config['layer_norm'],
|
| 271 |
+
ensemble=True,
|
| 272 |
+
gc_encoder=target_value_encoder_def,
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
if config['discrete']:
|
| 276 |
+
low_actor_def = GCDiscreteActor(
|
| 277 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 278 |
+
action_dim=action_dim,
|
| 279 |
+
gc_encoder=low_actor_encoder_def,
|
| 280 |
+
)
|
| 281 |
+
else:
|
| 282 |
+
low_actor_def = GCActor(
|
| 283 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 284 |
+
action_dim=action_dim,
|
| 285 |
+
state_dependent_std=False,
|
| 286 |
+
const_std=config['const_std'],
|
| 287 |
+
gc_encoder=low_actor_encoder_def,
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
high_actor_def = GCActor(
|
| 291 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 292 |
+
action_dim=config['rep_dim'],
|
| 293 |
+
state_dependent_std=False,
|
| 294 |
+
const_std=config['const_std'],
|
| 295 |
+
gc_encoder=high_actor_encoder_def,
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
network_info = dict(
|
| 299 |
+
goal_rep=(goal_rep_def, (jnp.concatenate([ex_observations, ex_goals], axis=-1))),
|
| 300 |
+
value=(value_def, (ex_observations, ex_goals)),
|
| 301 |
+
target_value=(target_value_def, (ex_observations, ex_goals)),
|
| 302 |
+
low_actor=(low_actor_def, (ex_observations, ex_goals)),
|
| 303 |
+
high_actor=(high_actor_def, (ex_observations, ex_goals)),
|
| 304 |
+
)
|
| 305 |
+
networks = {k: v[0] for k, v in network_info.items()}
|
| 306 |
+
network_args = {k: v[1] for k, v in network_info.items()}
|
| 307 |
+
|
| 308 |
+
network_def = ModuleDict(networks)
|
| 309 |
+
network_tx = optax.adam(learning_rate=config['lr'])
|
| 310 |
+
network_params = network_def.init(init_rng, **network_args)['params']
|
| 311 |
+
network = TrainState.create(network_def, network_params, tx=network_tx)
|
| 312 |
+
|
| 313 |
+
params = network.params
|
| 314 |
+
params['modules_target_value'] = params['modules_value']
|
| 315 |
+
|
| 316 |
+
return cls(rng, network=network, config=flax.core.FrozenDict(**config))
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def get_config():
|
| 320 |
+
config = ml_collections.ConfigDict(
|
| 321 |
+
dict(
|
| 322 |
+
# Agent hyperparameters.
|
| 323 |
+
agent_name='hiql', # Agent name.
|
| 324 |
+
lr=3e-4, # Learning rate.
|
| 325 |
+
batch_size=1024, # Batch size.
|
| 326 |
+
actor_hidden_dims=(512, 512, 512), # Actor network hidden dimensions.
|
| 327 |
+
value_hidden_dims=(512, 512, 512), # Value network hidden dimensions.
|
| 328 |
+
layer_norm=True, # Whether to use layer normalization.
|
| 329 |
+
discount=0.99, # Discount factor.
|
| 330 |
+
tau=0.005, # Target network update rate.
|
| 331 |
+
expectile=0.7, # IQL expectile.
|
| 332 |
+
low_alpha=3.0, # Low-level AWR temperature.
|
| 333 |
+
high_alpha=3.0, # High-level AWR temperature.
|
| 334 |
+
subgoal_steps=25, # Subgoal steps.
|
| 335 |
+
rep_dim=10, # Goal representation dimension.
|
| 336 |
+
low_actor_rep_grad=False, # Whether low-actor gradients flow to goal representation (use True for pixels).
|
| 337 |
+
const_std=True, # Whether to use constant standard deviation for the actors.
|
| 338 |
+
discrete=False, # Whether the action space is discrete.
|
| 339 |
+
encoder=ml_collections.config_dict.placeholder(str), # Visual encoder name (None, 'impala_small', etc.).
|
| 340 |
+
# Dataset hyperparameters.
|
| 341 |
+
dataset_class='HGCDataset', # Dataset class name.
|
| 342 |
+
value_p_curgoal=0.2, # Probability of using the current state as the value goal.
|
| 343 |
+
value_p_trajgoal=0.5, # Probability of using a future state in the same trajectory as the value goal.
|
| 344 |
+
value_p_randomgoal=0.3, # Probability of using a random state as the value goal.
|
| 345 |
+
value_geom_sample=True, # Whether to use geometric sampling for future value goals.
|
| 346 |
+
actor_p_curgoal=0.0, # Probability of using the current state as the actor goal.
|
| 347 |
+
actor_p_trajgoal=1.0, # Probability of using a future state in the same trajectory as the actor goal.
|
| 348 |
+
actor_p_randomgoal=0.0, # Probability of using a random state as the actor goal.
|
| 349 |
+
actor_geom_sample=False, # Whether to use geometric sampling for future actor goals.
|
| 350 |
+
gc_negative=True, # Whether to use '0 if s == g else -1' (True) or '1 if s == g else 0' (False) as reward.
|
| 351 |
+
p_aug=0.0, # Probability of applying image augmentation.
|
| 352 |
+
frame_stack=ml_collections.config_dict.placeholder(int), # Number of frames to stack.
|
| 353 |
+
)
|
| 354 |
+
)
|
| 355 |
+
return config
|
impls/agents/qrl.py
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
import flax
|
| 4 |
+
import jax
|
| 5 |
+
import jax.numpy as jnp
|
| 6 |
+
import ml_collections
|
| 7 |
+
import numpy as np
|
| 8 |
+
import optax
|
| 9 |
+
from utils.encoders import GCEncoder, encoder_modules
|
| 10 |
+
from utils.flax_utils import ModuleDict, TrainState, nonpytree_field
|
| 11 |
+
from utils.networks import MLP, GCActor, GCDiscreteActor, GCIQEValue, GCMRNValue, LogParam
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class QRLAgent(flax.struct.PyTreeNode):
|
| 15 |
+
"""Quasimetric RL (QRL) agent.
|
| 16 |
+
|
| 17 |
+
This implementation supports the following variants:
|
| 18 |
+
(1) Value parameterizations: IQE (quasimetric_type='iqe') and MRN (quasimetric_type='mrn').
|
| 19 |
+
(2) Actor losses: AWR (actor_loss='awr') and latent dynamics-based DDPG+BC (actor_loss='ddpgbc').
|
| 20 |
+
|
| 21 |
+
QRL with AWR only fits a quasimetric value function and an actor network. QRL with DDPG+BC fits a quasimetric value
|
| 22 |
+
function, an actor network, and a latent dynamics model. The latent dynamics model is used to compute
|
| 23 |
+
reparameterized gradients for the actor loss. The original implementation of QRL uses IQE and DDPG+BC.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
rng: Any
|
| 27 |
+
network: Any
|
| 28 |
+
config: Any = nonpytree_field()
|
| 29 |
+
|
| 30 |
+
def value_loss(self, batch, grad_params):
|
| 31 |
+
"""Compute the QRL value loss."""
|
| 32 |
+
d_neg = self.network.select('value')(batch['observations'], batch['value_goals'], params=grad_params)
|
| 33 |
+
d_pos = self.network.select('value')(batch['observations'], batch['next_observations'], params=grad_params)
|
| 34 |
+
lam = self.network.select('lam')(params=grad_params)
|
| 35 |
+
|
| 36 |
+
# Apply loss shaping following the original implementation.
|
| 37 |
+
d_neg_loss = (100 * jax.nn.softplus(5 - d_neg / 100)).mean()
|
| 38 |
+
d_pos_loss = (jax.nn.relu(d_pos - 1) ** 2).mean()
|
| 39 |
+
|
| 40 |
+
value_loss = d_neg_loss + d_pos_loss * jax.lax.stop_gradient(lam)
|
| 41 |
+
lam_loss = lam * (self.config['eps'] - jax.lax.stop_gradient(d_pos_loss))
|
| 42 |
+
|
| 43 |
+
total_loss = value_loss + lam_loss
|
| 44 |
+
|
| 45 |
+
return total_loss, {
|
| 46 |
+
'total_loss': total_loss,
|
| 47 |
+
'value_loss': value_loss,
|
| 48 |
+
'lam_loss': lam_loss,
|
| 49 |
+
'd_neg_loss': d_neg_loss,
|
| 50 |
+
'd_neg_mean': d_neg.mean(),
|
| 51 |
+
'd_neg_max': d_neg.max(),
|
| 52 |
+
'd_neg_min': d_neg.min(),
|
| 53 |
+
'd_pos_loss': d_pos_loss,
|
| 54 |
+
'd_pos_mean': d_pos.mean(),
|
| 55 |
+
'd_pos_max': d_pos.max(),
|
| 56 |
+
'd_pos_min': d_pos.min(),
|
| 57 |
+
'lam': lam,
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
def dynamics_loss(self, batch, grad_params):
|
| 61 |
+
"""Compute the dynamics loss."""
|
| 62 |
+
_, ob_reps, next_ob_reps = self.network.select('value')(
|
| 63 |
+
batch['observations'], batch['next_observations'], info=True, params=grad_params
|
| 64 |
+
)
|
| 65 |
+
# Dynamics model predicts the delta of the next observation.
|
| 66 |
+
pred_next_ob_reps = ob_reps + self.network.select('dynamics')(
|
| 67 |
+
jnp.concatenate([ob_reps, batch['actions']], axis=-1), params=grad_params
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
dist1 = self.network.select('value')(next_ob_reps, pred_next_ob_reps, is_phi=True, params=grad_params)
|
| 71 |
+
dist2 = self.network.select('value')(pred_next_ob_reps, next_ob_reps, is_phi=True, params=grad_params)
|
| 72 |
+
dynamics_loss = (dist1 + dist2).mean() / 2
|
| 73 |
+
|
| 74 |
+
return dynamics_loss, {
|
| 75 |
+
'dynamics_loss': dynamics_loss,
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
def actor_loss(self, batch, grad_params, rng=None):
|
| 79 |
+
"""Compute the actor loss (AWR or DDPG+BC)."""
|
| 80 |
+
if self.config['actor_loss'] == 'awr':
|
| 81 |
+
# Compute AWR loss based on V(s', g) - V(s, g).
|
| 82 |
+
v = -self.network.select('value')(batch['observations'], batch['actor_goals'])
|
| 83 |
+
nv = -self.network.select('value')(batch['next_observations'], batch['actor_goals'])
|
| 84 |
+
adv = nv - v
|
| 85 |
+
|
| 86 |
+
exp_a = jnp.exp(adv * self.config['alpha'])
|
| 87 |
+
exp_a = jnp.minimum(exp_a, 100.0)
|
| 88 |
+
|
| 89 |
+
dist = self.network.select('actor')(batch['observations'], batch['actor_goals'], params=grad_params)
|
| 90 |
+
log_prob = dist.log_prob(batch['actions'])
|
| 91 |
+
|
| 92 |
+
actor_loss = -(exp_a * log_prob).mean()
|
| 93 |
+
|
| 94 |
+
actor_info = {
|
| 95 |
+
'actor_loss': actor_loss,
|
| 96 |
+
'adv': adv.mean(),
|
| 97 |
+
'bc_log_prob': log_prob.mean(),
|
| 98 |
+
}
|
| 99 |
+
if not self.config['discrete']:
|
| 100 |
+
actor_info.update(
|
| 101 |
+
{
|
| 102 |
+
'mse': jnp.mean((dist.mode() - batch['actions']) ** 2),
|
| 103 |
+
'std': jnp.mean(dist.scale_diag),
|
| 104 |
+
}
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
return actor_loss, actor_info
|
| 108 |
+
elif self.config['actor_loss'] == 'ddpgbc':
|
| 109 |
+
# Compute DDPG+BC loss based on latent dynamics model.
|
| 110 |
+
assert not self.config['discrete']
|
| 111 |
+
|
| 112 |
+
dist = self.network.select('actor')(batch['observations'], batch['actor_goals'], params=grad_params)
|
| 113 |
+
if self.config['const_std']:
|
| 114 |
+
q_actions = jnp.clip(dist.mode(), -1, 1)
|
| 115 |
+
else:
|
| 116 |
+
q_actions = jnp.clip(dist.sample(seed=rng), -1, 1)
|
| 117 |
+
|
| 118 |
+
_, ob_reps, goal_reps = self.network.select('value')(batch['observations'], batch['actor_goals'], info=True)
|
| 119 |
+
pred_next_ob_reps = ob_reps + self.network.select('dynamics')(
|
| 120 |
+
jnp.concatenate([ob_reps, q_actions], axis=-1)
|
| 121 |
+
)
|
| 122 |
+
q = -self.network.select('value')(pred_next_ob_reps, goal_reps, is_phi=True)
|
| 123 |
+
|
| 124 |
+
# Normalize Q values by the absolute mean to make the loss scale invariant.
|
| 125 |
+
q_loss = -q.mean() / jax.lax.stop_gradient(jnp.abs(q).mean() + 1e-6)
|
| 126 |
+
log_prob = dist.log_prob(batch['actions'])
|
| 127 |
+
|
| 128 |
+
bc_loss = -(self.config['alpha'] * log_prob).mean()
|
| 129 |
+
|
| 130 |
+
actor_loss = q_loss + bc_loss
|
| 131 |
+
|
| 132 |
+
return actor_loss, {
|
| 133 |
+
'actor_loss': actor_loss,
|
| 134 |
+
'q_loss': q_loss,
|
| 135 |
+
'bc_loss': bc_loss,
|
| 136 |
+
'q_mean': q.mean(),
|
| 137 |
+
'q_abs_mean': jnp.abs(q).mean(),
|
| 138 |
+
'bc_log_prob': log_prob.mean(),
|
| 139 |
+
'mse': jnp.mean((dist.mode() - batch['actions']) ** 2),
|
| 140 |
+
'std': jnp.mean(dist.scale_diag),
|
| 141 |
+
}
|
| 142 |
+
else:
|
| 143 |
+
raise ValueError(f'Unsupported actor loss: {self.config["actor_loss"]}')
|
| 144 |
+
|
| 145 |
+
@jax.jit
|
| 146 |
+
def total_loss(self, batch, grad_params, rng=None):
|
| 147 |
+
"""Compute the total loss."""
|
| 148 |
+
info = {}
|
| 149 |
+
rng = rng if rng is not None else self.rng
|
| 150 |
+
|
| 151 |
+
value_loss, value_info = self.value_loss(batch, grad_params)
|
| 152 |
+
for k, v in value_info.items():
|
| 153 |
+
info[f'value/{k}'] = v
|
| 154 |
+
|
| 155 |
+
if self.config['actor_loss'] == 'ddpgbc':
|
| 156 |
+
dynamics_loss, dynamics_info = self.dynamics_loss(batch, grad_params)
|
| 157 |
+
for k, v in dynamics_info.items():
|
| 158 |
+
info[f'dynamics/{k}'] = v
|
| 159 |
+
else:
|
| 160 |
+
dynamics_loss = 0.0
|
| 161 |
+
|
| 162 |
+
rng, actor_rng = jax.random.split(rng)
|
| 163 |
+
actor_loss, actor_info = self.actor_loss(batch, grad_params, actor_rng)
|
| 164 |
+
for k, v in actor_info.items():
|
| 165 |
+
info[f'actor/{k}'] = v
|
| 166 |
+
|
| 167 |
+
loss = value_loss + dynamics_loss + actor_loss
|
| 168 |
+
return loss, info
|
| 169 |
+
|
| 170 |
+
@jax.jit
|
| 171 |
+
def update(self, batch):
|
| 172 |
+
"""Update the agent and return a new agent with information dictionary."""
|
| 173 |
+
new_rng, rng = jax.random.split(self.rng)
|
| 174 |
+
|
| 175 |
+
def loss_fn(grad_params):
|
| 176 |
+
return self.total_loss(batch, grad_params, rng=rng)
|
| 177 |
+
|
| 178 |
+
new_network, info = self.network.apply_loss_fn(loss_fn=loss_fn)
|
| 179 |
+
|
| 180 |
+
return self.replace(network=new_network, rng=new_rng), info
|
| 181 |
+
|
| 182 |
+
@jax.jit
|
| 183 |
+
def sample_actions(
|
| 184 |
+
self,
|
| 185 |
+
observations,
|
| 186 |
+
goals=None,
|
| 187 |
+
seed=None,
|
| 188 |
+
temperature=1.0,
|
| 189 |
+
):
|
| 190 |
+
"""Sample actions from the actor."""
|
| 191 |
+
dist = self.network.select('actor')(observations, goals, temperature=temperature)
|
| 192 |
+
actions = dist.sample(seed=seed)
|
| 193 |
+
if not self.config['discrete']:
|
| 194 |
+
actions = jnp.clip(actions, -1, 1)
|
| 195 |
+
return actions
|
| 196 |
+
|
| 197 |
+
@classmethod
|
| 198 |
+
def create(
|
| 199 |
+
cls,
|
| 200 |
+
seed,
|
| 201 |
+
ex_observations,
|
| 202 |
+
ex_actions,
|
| 203 |
+
config,
|
| 204 |
+
):
|
| 205 |
+
"""Create a new agent.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
seed: Random seed.
|
| 209 |
+
ex_observations: Example observations.
|
| 210 |
+
ex_actions: Example batch of actions. In discrete-action MDPs, this should contain the maximum action value.
|
| 211 |
+
config: Configuration dictionary.
|
| 212 |
+
"""
|
| 213 |
+
rng = jax.random.PRNGKey(seed)
|
| 214 |
+
rng, init_rng = jax.random.split(rng, 2)
|
| 215 |
+
|
| 216 |
+
ex_goals = ex_observations
|
| 217 |
+
ex_latents = np.zeros((ex_observations.shape[0], config['latent_dim']), dtype=np.float32)
|
| 218 |
+
if config['discrete']:
|
| 219 |
+
action_dim = ex_actions.max() + 1
|
| 220 |
+
else:
|
| 221 |
+
action_dim = ex_actions.shape[-1]
|
| 222 |
+
|
| 223 |
+
# Define encoders.
|
| 224 |
+
encoders = dict()
|
| 225 |
+
if config['encoder'] is not None:
|
| 226 |
+
encoder_module = encoder_modules[config['encoder']]
|
| 227 |
+
encoders['value'] = encoder_module()
|
| 228 |
+
encoders['actor'] = GCEncoder(concat_encoder=encoder_module())
|
| 229 |
+
|
| 230 |
+
# Define value and actor networks.
|
| 231 |
+
if config['quasimetric_type'] == 'mrn':
|
| 232 |
+
value_def = GCMRNValue(
|
| 233 |
+
hidden_dims=config['value_hidden_dims'],
|
| 234 |
+
latent_dim=config['latent_dim'],
|
| 235 |
+
layer_norm=config['layer_norm'],
|
| 236 |
+
encoder=encoders.get('value'),
|
| 237 |
+
)
|
| 238 |
+
elif config['quasimetric_type'] == 'iqe':
|
| 239 |
+
value_def = GCIQEValue(
|
| 240 |
+
hidden_dims=config['value_hidden_dims'],
|
| 241 |
+
latent_dim=config['latent_dim'],
|
| 242 |
+
dim_per_component=8,
|
| 243 |
+
layer_norm=config['layer_norm'],
|
| 244 |
+
encoder=encoders.get('value'),
|
| 245 |
+
)
|
| 246 |
+
else:
|
| 247 |
+
raise ValueError(f'Unsupported quasimetric type: {config["quasimetric_type"]}')
|
| 248 |
+
|
| 249 |
+
if config['actor_loss'] == 'ddpgbc':
|
| 250 |
+
# DDPG+BC requires a latent dynamics model.
|
| 251 |
+
dynamics_def = MLP(
|
| 252 |
+
hidden_dims=(*config['value_hidden_dims'], config['latent_dim']),
|
| 253 |
+
layer_norm=config['layer_norm'],
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
if config['discrete']:
|
| 257 |
+
actor_def = GCDiscreteActor(
|
| 258 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 259 |
+
action_dim=action_dim,
|
| 260 |
+
gc_encoder=encoders.get('actor'),
|
| 261 |
+
)
|
| 262 |
+
else:
|
| 263 |
+
actor_def = GCActor(
|
| 264 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 265 |
+
action_dim=action_dim,
|
| 266 |
+
state_dependent_std=False,
|
| 267 |
+
const_std=config['const_std'],
|
| 268 |
+
gc_encoder=encoders.get('actor'),
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
# Define the dual lambda variable.
|
| 272 |
+
lam_def = LogParam()
|
| 273 |
+
|
| 274 |
+
network_info = dict(
|
| 275 |
+
value=(value_def, (ex_observations, ex_goals)),
|
| 276 |
+
actor=(actor_def, (ex_observations, ex_goals)),
|
| 277 |
+
lam=(lam_def, ()),
|
| 278 |
+
)
|
| 279 |
+
if config['actor_loss'] == 'ddpgbc':
|
| 280 |
+
network_info.update(
|
| 281 |
+
dynamics=(dynamics_def, np.concatenate([ex_latents, ex_actions], axis=-1)),
|
| 282 |
+
)
|
| 283 |
+
networks = {k: v[0] for k, v in network_info.items()}
|
| 284 |
+
network_args = {k: v[1] for k, v in network_info.items()}
|
| 285 |
+
|
| 286 |
+
network_def = ModuleDict(networks)
|
| 287 |
+
network_tx = optax.adam(learning_rate=config['lr'])
|
| 288 |
+
network_params = network_def.init(init_rng, **network_args)['params']
|
| 289 |
+
network = TrainState.create(network_def, network_params, tx=network_tx)
|
| 290 |
+
|
| 291 |
+
return cls(rng, network=network, config=flax.core.FrozenDict(**config))
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def get_config():
|
| 295 |
+
config = ml_collections.ConfigDict(
|
| 296 |
+
dict(
|
| 297 |
+
# Agent hyperparameters.
|
| 298 |
+
agent_name='qrl', # Agent name.
|
| 299 |
+
lr=3e-4, # Learning rate.
|
| 300 |
+
batch_size=1024, # Batch size.
|
| 301 |
+
actor_hidden_dims=(512, 512, 512), # Actor network hidden dimensions.
|
| 302 |
+
value_hidden_dims=(512, 512, 512), # Value network hidden dimensions.
|
| 303 |
+
quasimetric_type='iqe', # Quasimetric parameterization type ('iqe' or 'mrn').
|
| 304 |
+
latent_dim=512, # Latent dimension for the quasimetric value function.
|
| 305 |
+
layer_norm=True, # Whether to use layer normalization.
|
| 306 |
+
discount=0.99, # Discount factor (unused by default; can be used for geometric goal sampling in GCDataset).
|
| 307 |
+
eps=0.05, # Margin for the dual lambda loss.
|
| 308 |
+
actor_loss='ddpgbc', # Actor loss type ('awr' or 'ddpgbc').
|
| 309 |
+
alpha=0.003, # Temperature in AWR or BC coefficient in DDPG+BC.
|
| 310 |
+
const_std=True, # Whether to use constant standard deviation for the actor.
|
| 311 |
+
discrete=False, # Whether the action space is discrete.
|
| 312 |
+
encoder=ml_collections.config_dict.placeholder(str), # Visual encoder name (None, 'impala_small', etc.).
|
| 313 |
+
# Dataset hyperparameters.
|
| 314 |
+
dataset_class='GCDataset', # Dataset class name.
|
| 315 |
+
value_p_curgoal=0.0, # Probability of using the current state as the value goal.
|
| 316 |
+
value_p_trajgoal=0.0, # Probability of using a future state in the same trajectory as the value goal.
|
| 317 |
+
value_p_randomgoal=1.0, # Probability of using a random state as the value goal.
|
| 318 |
+
value_geom_sample=True, # Whether to use geometric sampling for future value goals.
|
| 319 |
+
actor_p_curgoal=0.0, # Probability of using the current state as the actor goal.
|
| 320 |
+
actor_p_trajgoal=1.0, # Probability of using a future state in the same trajectory as the actor goal.
|
| 321 |
+
actor_p_randomgoal=0.0, # Probability of using a random state as the actor goal.
|
| 322 |
+
actor_geom_sample=False, # Whether to use geometric sampling for future actor goals.
|
| 323 |
+
gc_negative=False, # Unused (defined for compatibility with GCDataset).
|
| 324 |
+
p_aug=0.0, # Probability of applying image augmentation.
|
| 325 |
+
frame_stack=ml_collections.config_dict.placeholder(int), # Number of frames to stack.
|
| 326 |
+
)
|
| 327 |
+
)
|
| 328 |
+
return config
|
impls/agents/sac.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from typing import Any
|
| 3 |
+
|
| 4 |
+
import flax
|
| 5 |
+
import jax
|
| 6 |
+
import jax.numpy as jnp
|
| 7 |
+
import ml_collections
|
| 8 |
+
import optax
|
| 9 |
+
from utils.flax_utils import ModuleDict, TrainState, nonpytree_field
|
| 10 |
+
from utils.networks import GCActor, GCValue, LogParam
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class SACAgent(flax.struct.PyTreeNode):
|
| 14 |
+
"""Soft actor-critic (SAC) agent."""
|
| 15 |
+
|
| 16 |
+
rng: Any
|
| 17 |
+
network: Any
|
| 18 |
+
config: Any = nonpytree_field()
|
| 19 |
+
|
| 20 |
+
def critic_loss(self, batch, grad_params, rng):
|
| 21 |
+
"""Compute the SAC critic loss."""
|
| 22 |
+
next_dist = self.network.select('actor')(batch['next_observations'])
|
| 23 |
+
next_actions, next_log_probs = next_dist.sample_and_log_prob(seed=rng)
|
| 24 |
+
|
| 25 |
+
next_qs = self.network.select('target_critic')(batch['next_observations'], next_actions)
|
| 26 |
+
if self.config['min_q']:
|
| 27 |
+
next_q = jnp.min(next_qs, axis=0)
|
| 28 |
+
else:
|
| 29 |
+
next_q = jnp.mean(next_qs, axis=0)
|
| 30 |
+
|
| 31 |
+
target_q = batch['rewards'] + self.config['discount'] * batch['masks'] * next_q
|
| 32 |
+
target_q = target_q - self.config['discount'] * batch['masks'] * next_log_probs * self.network.select('alpha')()
|
| 33 |
+
|
| 34 |
+
q = self.network.select('critic')(batch['observations'], batch['actions'], params=grad_params)
|
| 35 |
+
critic_loss = jnp.square(q - target_q).mean()
|
| 36 |
+
|
| 37 |
+
return critic_loss, {
|
| 38 |
+
'critic_loss': critic_loss,
|
| 39 |
+
'q_mean': q.mean(),
|
| 40 |
+
'q_max': q.max(),
|
| 41 |
+
'q_min': q.min(),
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
def actor_loss(self, batch, grad_params, rng):
|
| 45 |
+
"""Compute the SAC actor loss."""
|
| 46 |
+
# Actor loss.
|
| 47 |
+
dist = self.network.select('actor')(batch['observations'], params=grad_params)
|
| 48 |
+
actions, log_probs = dist.sample_and_log_prob(seed=rng)
|
| 49 |
+
|
| 50 |
+
qs = self.network.select('critic')(batch['observations'], actions)
|
| 51 |
+
if self.config['min_q']:
|
| 52 |
+
q = jnp.min(qs, axis=0)
|
| 53 |
+
else:
|
| 54 |
+
q = jnp.mean(qs, axis=0)
|
| 55 |
+
|
| 56 |
+
actor_loss = (log_probs * self.network.select('alpha')() - q).mean()
|
| 57 |
+
|
| 58 |
+
# Entropy loss.
|
| 59 |
+
alpha = self.network.select('alpha')(params=grad_params)
|
| 60 |
+
entropy = -jax.lax.stop_gradient(log_probs).mean()
|
| 61 |
+
alpha_loss = (alpha * (entropy - self.config['target_entropy'])).mean()
|
| 62 |
+
|
| 63 |
+
total_loss = actor_loss + alpha_loss
|
| 64 |
+
|
| 65 |
+
if self.config['tanh_squash']:
|
| 66 |
+
action_std = dist._distribution.stddev()
|
| 67 |
+
else:
|
| 68 |
+
action_std = dist.stddev().mean()
|
| 69 |
+
|
| 70 |
+
return total_loss, {
|
| 71 |
+
'total_loss': total_loss,
|
| 72 |
+
'actor_loss': actor_loss,
|
| 73 |
+
'alpha_loss': alpha_loss,
|
| 74 |
+
'alpha': alpha,
|
| 75 |
+
'entropy': -log_probs.mean(),
|
| 76 |
+
'std': action_std.mean(),
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
@jax.jit
|
| 80 |
+
def total_loss(self, batch, grad_params, rng=None):
|
| 81 |
+
"""Compute the total loss."""
|
| 82 |
+
info = {}
|
| 83 |
+
rng = rng if rng is not None else self.rng
|
| 84 |
+
|
| 85 |
+
rng, actor_rng, critic_rng = jax.random.split(rng, 3)
|
| 86 |
+
|
| 87 |
+
critic_loss, critic_info = self.critic_loss(batch, grad_params, critic_rng)
|
| 88 |
+
for k, v in critic_info.items():
|
| 89 |
+
info[f'critic/{k}'] = v
|
| 90 |
+
|
| 91 |
+
actor_loss, actor_info = self.actor_loss(batch, grad_params, actor_rng)
|
| 92 |
+
for k, v in actor_info.items():
|
| 93 |
+
info[f'actor/{k}'] = v
|
| 94 |
+
|
| 95 |
+
loss = critic_loss + actor_loss
|
| 96 |
+
return loss, info
|
| 97 |
+
|
| 98 |
+
def target_update(self, network, module_name):
|
| 99 |
+
"""Update the target network."""
|
| 100 |
+
new_target_params = jax.tree_util.tree_map(
|
| 101 |
+
lambda p, tp: p * self.config['tau'] + tp * (1 - self.config['tau']),
|
| 102 |
+
self.network.params[f'modules_{module_name}'],
|
| 103 |
+
self.network.params[f'modules_target_{module_name}'],
|
| 104 |
+
)
|
| 105 |
+
network.params[f'modules_target_{module_name}'] = new_target_params
|
| 106 |
+
|
| 107 |
+
@jax.jit
|
| 108 |
+
def update(self, batch):
|
| 109 |
+
"""Update the agent and return a new agent with information dictionary."""
|
| 110 |
+
new_rng, rng = jax.random.split(self.rng)
|
| 111 |
+
|
| 112 |
+
def loss_fn(grad_params):
|
| 113 |
+
return self.total_loss(batch, grad_params, rng=rng)
|
| 114 |
+
|
| 115 |
+
new_network, info = self.network.apply_loss_fn(loss_fn=loss_fn)
|
| 116 |
+
self.target_update(new_network, 'critic')
|
| 117 |
+
|
| 118 |
+
return self.replace(network=new_network, rng=new_rng), info
|
| 119 |
+
|
| 120 |
+
@jax.jit
|
| 121 |
+
def sample_actions(
|
| 122 |
+
self,
|
| 123 |
+
observations,
|
| 124 |
+
goals=None,
|
| 125 |
+
seed=None,
|
| 126 |
+
temperature=1.0,
|
| 127 |
+
):
|
| 128 |
+
"""Sample actions from the actor."""
|
| 129 |
+
dist = self.network.select('actor')(observations, goals, temperature=temperature)
|
| 130 |
+
actions = dist.sample(seed=seed)
|
| 131 |
+
actions = jnp.clip(actions, -1, 1)
|
| 132 |
+
return actions
|
| 133 |
+
|
| 134 |
+
@classmethod
|
| 135 |
+
def create(
|
| 136 |
+
cls,
|
| 137 |
+
seed,
|
| 138 |
+
ex_observations,
|
| 139 |
+
ex_actions,
|
| 140 |
+
config,
|
| 141 |
+
):
|
| 142 |
+
"""Create a new agent.
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
seed: Random seed.
|
| 146 |
+
ex_observations: Example observations.
|
| 147 |
+
ex_actions: Example batch of actions.
|
| 148 |
+
config: Configuration dictionary.
|
| 149 |
+
"""
|
| 150 |
+
rng = jax.random.PRNGKey(seed)
|
| 151 |
+
rng, init_rng = jax.random.split(rng, 2)
|
| 152 |
+
|
| 153 |
+
action_dim = ex_actions.shape[-1]
|
| 154 |
+
|
| 155 |
+
if config['target_entropy'] is None:
|
| 156 |
+
config['target_entropy'] = -config['target_entropy_multiplier'] * action_dim
|
| 157 |
+
|
| 158 |
+
# Define critic and actor networks.
|
| 159 |
+
critic_def = GCValue(
|
| 160 |
+
hidden_dims=config['value_hidden_dims'],
|
| 161 |
+
layer_norm=config['layer_norm'],
|
| 162 |
+
ensemble=True,
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
actor_def = GCActor(
|
| 166 |
+
hidden_dims=config['actor_hidden_dims'],
|
| 167 |
+
action_dim=action_dim,
|
| 168 |
+
log_std_min=-5,
|
| 169 |
+
tanh_squash=config['tanh_squash'],
|
| 170 |
+
state_dependent_std=config['state_dependent_std'],
|
| 171 |
+
const_std=False,
|
| 172 |
+
final_fc_init_scale=config['actor_fc_scale'],
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
# Define the dual alpha variable.
|
| 176 |
+
alpha_def = LogParam()
|
| 177 |
+
|
| 178 |
+
network_info = dict(
|
| 179 |
+
critic=(critic_def, (ex_observations, None, ex_actions)),
|
| 180 |
+
target_critic=(copy.deepcopy(critic_def), (ex_observations, None, ex_actions)),
|
| 181 |
+
actor=(actor_def, (ex_observations, None)),
|
| 182 |
+
alpha=(alpha_def, ()),
|
| 183 |
+
)
|
| 184 |
+
networks = {k: v[0] for k, v in network_info.items()}
|
| 185 |
+
network_args = {k: v[1] for k, v in network_info.items()}
|
| 186 |
+
|
| 187 |
+
network_def = ModuleDict(networks)
|
| 188 |
+
network_tx = optax.adam(learning_rate=config['lr'])
|
| 189 |
+
network_params = network_def.init(init_rng, **network_args)['params']
|
| 190 |
+
network = TrainState.create(network_def, network_params, tx=network_tx)
|
| 191 |
+
|
| 192 |
+
params = network.params
|
| 193 |
+
params['modules_target_critic'] = params['modules_critic']
|
| 194 |
+
|
| 195 |
+
return cls(rng, network=network, config=flax.core.FrozenDict(**config))
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def get_config():
|
| 199 |
+
config = ml_collections.ConfigDict(
|
| 200 |
+
dict(
|
| 201 |
+
agent_name='sac', # Agent name.
|
| 202 |
+
lr=1e-4, # Learning rate.
|
| 203 |
+
batch_size=256, # Batch size.
|
| 204 |
+
actor_hidden_dims=(256, 256), # Actor network hidden dimensions.
|
| 205 |
+
value_hidden_dims=(256, 256), # Value network hidden dimensions.
|
| 206 |
+
layer_norm=False, # Whether to use layer normalization.
|
| 207 |
+
discount=0.99, # Discount factor.
|
| 208 |
+
tau=0.005, # Target network update rate.
|
| 209 |
+
target_entropy=ml_collections.config_dict.placeholder(float), # Target entropy (None for automatic tuning).
|
| 210 |
+
target_entropy_multiplier=0.5, # Multiplier to dim(A) for target entropy.
|
| 211 |
+
tanh_squash=True, # Whether to squash actions with tanh.
|
| 212 |
+
state_dependent_std=True, # Whether to use state-dependent standard deviations for actor.
|
| 213 |
+
actor_fc_scale=0.01, # Final layer initialization scale for actor.
|
| 214 |
+
min_q=True, # Whether to use min Q (True) or mean Q (False).
|
| 215 |
+
)
|
| 216 |
+
)
|
| 217 |
+
return config
|
impls/hyperparameters.sh
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
impls/main.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import time
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
import jax
|
| 8 |
+
import numpy as np
|
| 9 |
+
import tqdm
|
| 10 |
+
import wandb
|
| 11 |
+
from absl import app, flags
|
| 12 |
+
from agents import agents
|
| 13 |
+
from ml_collections import config_flags
|
| 14 |
+
from utils.datasets import Dataset, GCDataset, HGCDataset
|
| 15 |
+
from utils.env_utils import make_env_and_datasets
|
| 16 |
+
from utils.evaluation import evaluate
|
| 17 |
+
from utils.flax_utils import restore_agent, save_agent
|
| 18 |
+
from utils.log_utils import CsvLogger, get_exp_name, get_flag_dict, get_wandb_video, setup_wandb
|
| 19 |
+
|
| 20 |
+
FLAGS = flags.FLAGS
|
| 21 |
+
|
| 22 |
+
flags.DEFINE_string('run_group', 'Debug', 'Run group.')
|
| 23 |
+
flags.DEFINE_integer('seed', 0, 'Random seed.')
|
| 24 |
+
flags.DEFINE_string('env_name', 'antmaze-large-navigate-v0', 'Environment (dataset) name.')
|
| 25 |
+
flags.DEFINE_string('save_dir', 'exp/', 'Save directory.')
|
| 26 |
+
flags.DEFINE_string('restore_path', None, 'Restore path.')
|
| 27 |
+
flags.DEFINE_integer('restore_epoch', None, 'Restore epoch.')
|
| 28 |
+
|
| 29 |
+
flags.DEFINE_integer('train_steps', 1000000, 'Number of training steps.')
|
| 30 |
+
flags.DEFINE_integer('log_interval', 5000, 'Logging interval.')
|
| 31 |
+
flags.DEFINE_integer('eval_interval', 100000, 'Evaluation interval.')
|
| 32 |
+
flags.DEFINE_integer('save_interval', 1000000, 'Saving interval.')
|
| 33 |
+
|
| 34 |
+
flags.DEFINE_integer('eval_tasks', None, 'Number of tasks to evaluate (None for all).')
|
| 35 |
+
flags.DEFINE_integer('eval_episodes', 20, 'Number of episodes for each task.')
|
| 36 |
+
flags.DEFINE_float('eval_temperature', 0, 'Actor temperature for evaluation.')
|
| 37 |
+
flags.DEFINE_float('eval_gaussian', None, 'Action Gaussian noise for evaluation.')
|
| 38 |
+
flags.DEFINE_integer('video_episodes', 1, 'Number of video episodes for each task.')
|
| 39 |
+
flags.DEFINE_integer('video_frame_skip', 3, 'Frame skip for videos.')
|
| 40 |
+
flags.DEFINE_integer('eval_on_cpu', 1, 'Whether to evaluate on CPU.')
|
| 41 |
+
|
| 42 |
+
config_flags.DEFINE_config_file('agent', 'agents/gciql.py', lock_config=False)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def main(_):
|
| 46 |
+
# Set up logger.
|
| 47 |
+
exp_name = get_exp_name(FLAGS.seed)
|
| 48 |
+
setup_wandb(project='OGBench', group=FLAGS.run_group, name=exp_name)
|
| 49 |
+
|
| 50 |
+
FLAGS.save_dir = os.path.join(FLAGS.save_dir, wandb.run.project, FLAGS.run_group, exp_name)
|
| 51 |
+
os.makedirs(FLAGS.save_dir, exist_ok=True)
|
| 52 |
+
flag_dict = get_flag_dict()
|
| 53 |
+
with open(os.path.join(FLAGS.save_dir, 'flags.json'), 'w') as f:
|
| 54 |
+
json.dump(flag_dict, f)
|
| 55 |
+
|
| 56 |
+
# Set up environment and dataset.
|
| 57 |
+
config = FLAGS.agent
|
| 58 |
+
env, train_dataset, val_dataset = make_env_and_datasets(FLAGS.env_name, frame_stack=config['frame_stack'])
|
| 59 |
+
|
| 60 |
+
dataset_class = {
|
| 61 |
+
'GCDataset': GCDataset,
|
| 62 |
+
'HGCDataset': HGCDataset,
|
| 63 |
+
}[config['dataset_class']]
|
| 64 |
+
train_dataset = dataset_class(Dataset.create(**train_dataset), config)
|
| 65 |
+
if val_dataset is not None:
|
| 66 |
+
val_dataset = dataset_class(Dataset.create(**val_dataset), config)
|
| 67 |
+
|
| 68 |
+
# Initialize agent.
|
| 69 |
+
random.seed(FLAGS.seed)
|
| 70 |
+
np.random.seed(FLAGS.seed)
|
| 71 |
+
|
| 72 |
+
example_batch = train_dataset.sample(1)
|
| 73 |
+
if config['discrete']:
|
| 74 |
+
# Fill with the maximum action to let the agent know the action space size.
|
| 75 |
+
example_batch['actions'] = np.full_like(example_batch['actions'], env.action_space.n - 1)
|
| 76 |
+
|
| 77 |
+
agent_class = agents[config['agent_name']]
|
| 78 |
+
agent = agent_class.create(
|
| 79 |
+
FLAGS.seed,
|
| 80 |
+
example_batch['observations'],
|
| 81 |
+
example_batch['actions'],
|
| 82 |
+
config,
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
# Restore agent.
|
| 86 |
+
if FLAGS.restore_path is not None:
|
| 87 |
+
agent = restore_agent(agent, FLAGS.restore_path, FLAGS.restore_epoch)
|
| 88 |
+
|
| 89 |
+
# Train agent.
|
| 90 |
+
train_logger = CsvLogger(os.path.join(FLAGS.save_dir, 'train.csv'))
|
| 91 |
+
eval_logger = CsvLogger(os.path.join(FLAGS.save_dir, 'eval.csv'))
|
| 92 |
+
first_time = time.time()
|
| 93 |
+
last_time = time.time()
|
| 94 |
+
for i in tqdm.tqdm(range(1, FLAGS.train_steps + 1), smoothing=0.1, dynamic_ncols=True):
|
| 95 |
+
# Update agent.
|
| 96 |
+
batch = train_dataset.sample(config['batch_size'])
|
| 97 |
+
agent, update_info = agent.update(batch)
|
| 98 |
+
|
| 99 |
+
# Log metrics.
|
| 100 |
+
if i % FLAGS.log_interval == 0:
|
| 101 |
+
train_metrics = {f'training/{k}': v for k, v in update_info.items()}
|
| 102 |
+
if val_dataset is not None:
|
| 103 |
+
val_batch = val_dataset.sample(config['batch_size'])
|
| 104 |
+
_, val_info = agent.total_loss(val_batch, grad_params=None)
|
| 105 |
+
train_metrics.update({f'validation/{k}': v for k, v in val_info.items()})
|
| 106 |
+
train_metrics['time/epoch_time'] = (time.time() - last_time) / FLAGS.log_interval
|
| 107 |
+
train_metrics['time/total_time'] = time.time() - first_time
|
| 108 |
+
last_time = time.time()
|
| 109 |
+
wandb.log(train_metrics, step=i)
|
| 110 |
+
train_logger.log(train_metrics, step=i)
|
| 111 |
+
|
| 112 |
+
# Evaluate agent.
|
| 113 |
+
if i == 1 or i % FLAGS.eval_interval == 0:
|
| 114 |
+
if FLAGS.eval_on_cpu:
|
| 115 |
+
eval_agent = jax.device_put(agent, device=jax.devices('cpu')[0])
|
| 116 |
+
else:
|
| 117 |
+
eval_agent = agent
|
| 118 |
+
renders = []
|
| 119 |
+
eval_metrics = {}
|
| 120 |
+
overall_metrics = defaultdict(list)
|
| 121 |
+
task_infos = env.unwrapped.task_infos if hasattr(env.unwrapped, 'task_infos') else env.task_infos
|
| 122 |
+
num_tasks = FLAGS.eval_tasks if FLAGS.eval_tasks is not None else len(task_infos)
|
| 123 |
+
for task_id in tqdm.trange(1, num_tasks + 1):
|
| 124 |
+
task_name = task_infos[task_id - 1]['task_name']
|
| 125 |
+
eval_info, trajs, cur_renders = evaluate(
|
| 126 |
+
agent=eval_agent,
|
| 127 |
+
env=env,
|
| 128 |
+
task_id=task_id,
|
| 129 |
+
config=config,
|
| 130 |
+
num_eval_episodes=FLAGS.eval_episodes,
|
| 131 |
+
num_video_episodes=FLAGS.video_episodes,
|
| 132 |
+
video_frame_skip=FLAGS.video_frame_skip,
|
| 133 |
+
eval_temperature=FLAGS.eval_temperature,
|
| 134 |
+
eval_gaussian=FLAGS.eval_gaussian,
|
| 135 |
+
)
|
| 136 |
+
renders.extend(cur_renders)
|
| 137 |
+
metric_names = ['success']
|
| 138 |
+
eval_metrics.update(
|
| 139 |
+
{f'evaluation/{task_name}_{k}': v for k, v in eval_info.items() if k in metric_names}
|
| 140 |
+
)
|
| 141 |
+
for k, v in eval_info.items():
|
| 142 |
+
if k in metric_names:
|
| 143 |
+
overall_metrics[k].append(v)
|
| 144 |
+
for k, v in overall_metrics.items():
|
| 145 |
+
eval_metrics[f'evaluation/overall_{k}'] = np.mean(v)
|
| 146 |
+
|
| 147 |
+
if FLAGS.video_episodes > 0:
|
| 148 |
+
video = get_wandb_video(renders=renders, n_cols=num_tasks)
|
| 149 |
+
eval_metrics['video'] = video
|
| 150 |
+
|
| 151 |
+
wandb.log(eval_metrics, step=i)
|
| 152 |
+
eval_logger.log(eval_metrics, step=i)
|
| 153 |
+
|
| 154 |
+
# Save agent.
|
| 155 |
+
if i % FLAGS.save_interval == 0:
|
| 156 |
+
save_agent(agent, FLAGS.save_dir, i)
|
| 157 |
+
|
| 158 |
+
train_logger.close()
|
| 159 |
+
eval_logger.close()
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
if __name__ == '__main__':
|
| 163 |
+
app.run(main)
|
impls/requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ogbench # Use the PyPI version of OGBench. Replace this with `pip install -e .` if you want to use the local version.
|
| 2 |
+
jax[cuda12] >= 0.4.26
|
| 3 |
+
flax >= 0.8.4
|
| 4 |
+
distrax >= 0.1.5
|
| 5 |
+
ml_collections
|
| 6 |
+
matplotlib
|
| 7 |
+
moviepy
|
| 8 |
+
wandb
|
impls/utils/__init__.py
ADDED
|
File without changes
|
impls/utils/datasets.py
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
from functools import partial
|
| 3 |
+
from typing import Any
|
| 4 |
+
|
| 5 |
+
import jax
|
| 6 |
+
import jax.numpy as jnp
|
| 7 |
+
import numpy as np
|
| 8 |
+
from flax.core.frozen_dict import FrozenDict
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_size(data):
|
| 12 |
+
"""Return the size of the dataset."""
|
| 13 |
+
sizes = jax.tree_util.tree_map(lambda arr: len(arr), data)
|
| 14 |
+
return max(jax.tree_util.tree_leaves(sizes))
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@partial(jax.jit, static_argnames=('padding',))
|
| 18 |
+
def random_crop(img, crop_from, padding):
|
| 19 |
+
"""Randomly crop an image.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
img: Image to crop.
|
| 23 |
+
crop_from: Coordinates to crop from.
|
| 24 |
+
padding: Padding size.
|
| 25 |
+
"""
|
| 26 |
+
padded_img = jnp.pad(img, ((padding, padding), (padding, padding), (0, 0)), mode='edge')
|
| 27 |
+
return jax.lax.dynamic_slice(padded_img, crop_from, img.shape)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@partial(jax.jit, static_argnames=('padding',))
|
| 31 |
+
def batched_random_crop(imgs, crop_froms, padding):
|
| 32 |
+
"""Batched version of random_crop."""
|
| 33 |
+
return jax.vmap(random_crop, (0, 0, None))(imgs, crop_froms, padding)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class Dataset(FrozenDict):
|
| 37 |
+
"""Dataset class.
|
| 38 |
+
|
| 39 |
+
This class supports both regular datasets (i.e., storing both observations and next_observations) and
|
| 40 |
+
compact datasets (i.e., storing only observations). It assumes 'observations' is always present in the keys. If
|
| 41 |
+
'next_observations' is not present, it will be inferred from 'observations' by shifting the indices by 1. In this
|
| 42 |
+
case, set 'valids' appropriately to mask out the last state of each trajectory.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
@classmethod
|
| 46 |
+
def create(cls, freeze=True, **fields):
|
| 47 |
+
"""Create a dataset from the fields.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
freeze: Whether to freeze the arrays.
|
| 51 |
+
**fields: Keys and values of the dataset.
|
| 52 |
+
"""
|
| 53 |
+
data = fields
|
| 54 |
+
assert 'observations' in data
|
| 55 |
+
if freeze:
|
| 56 |
+
jax.tree_util.tree_map(lambda arr: arr.setflags(write=False), data)
|
| 57 |
+
return cls(data)
|
| 58 |
+
|
| 59 |
+
def __init__(self, *args, **kwargs):
|
| 60 |
+
super().__init__(*args, **kwargs)
|
| 61 |
+
self.size = get_size(self._dict)
|
| 62 |
+
if 'valids' in self._dict:
|
| 63 |
+
(self.valid_idxs,) = np.nonzero(self['valids'] > 0)
|
| 64 |
+
|
| 65 |
+
def get_random_idxs(self, num_idxs):
|
| 66 |
+
"""Return `num_idxs` random indices."""
|
| 67 |
+
if 'valids' in self._dict:
|
| 68 |
+
return self.valid_idxs[np.random.randint(len(self.valid_idxs), size=num_idxs)]
|
| 69 |
+
else:
|
| 70 |
+
return np.random.randint(self.size, size=num_idxs)
|
| 71 |
+
|
| 72 |
+
def sample(self, batch_size: int, idxs=None):
|
| 73 |
+
"""Sample a batch of transitions."""
|
| 74 |
+
if idxs is None:
|
| 75 |
+
idxs = self.get_random_idxs(batch_size)
|
| 76 |
+
return self.get_subset(idxs)
|
| 77 |
+
|
| 78 |
+
def get_subset(self, idxs):
|
| 79 |
+
"""Return a subset of the dataset given the indices."""
|
| 80 |
+
result = jax.tree_util.tree_map(lambda arr: arr[idxs], self._dict)
|
| 81 |
+
if 'next_observations' not in result:
|
| 82 |
+
result['next_observations'] = self._dict['observations'][np.minimum(idxs + 1, self.size - 1)]
|
| 83 |
+
return result
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class ReplayBuffer(Dataset):
|
| 87 |
+
"""Replay buffer class.
|
| 88 |
+
|
| 89 |
+
This class extends Dataset to support adding transitions.
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
@classmethod
|
| 93 |
+
def create(cls, transition, size):
|
| 94 |
+
"""Create a replay buffer from the example transition.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
transition: Example transition (dict).
|
| 98 |
+
size: Size of the replay buffer.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
def create_buffer(example):
|
| 102 |
+
example = np.array(example)
|
| 103 |
+
return np.zeros((size, *example.shape), dtype=example.dtype)
|
| 104 |
+
|
| 105 |
+
buffer_dict = jax.tree_util.tree_map(create_buffer, transition)
|
| 106 |
+
return cls(buffer_dict)
|
| 107 |
+
|
| 108 |
+
@classmethod
|
| 109 |
+
def create_from_initial_dataset(cls, init_dataset, size):
|
| 110 |
+
"""Create a replay buffer from the initial dataset.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
init_dataset: Initial dataset.
|
| 114 |
+
size: Size of the replay buffer.
|
| 115 |
+
"""
|
| 116 |
+
|
| 117 |
+
def create_buffer(init_buffer):
|
| 118 |
+
buffer = np.zeros((size, *init_buffer.shape[1:]), dtype=init_buffer.dtype)
|
| 119 |
+
buffer[: len(init_buffer)] = init_buffer
|
| 120 |
+
return buffer
|
| 121 |
+
|
| 122 |
+
buffer_dict = jax.tree_util.tree_map(create_buffer, init_dataset)
|
| 123 |
+
dataset = cls(buffer_dict)
|
| 124 |
+
dataset.size = dataset.pointer = get_size(init_dataset)
|
| 125 |
+
return dataset
|
| 126 |
+
|
| 127 |
+
def __init__(self, *args, **kwargs):
|
| 128 |
+
super().__init__(*args, **kwargs)
|
| 129 |
+
|
| 130 |
+
self.max_size = get_size(self._dict)
|
| 131 |
+
self.size = 0
|
| 132 |
+
self.pointer = 0
|
| 133 |
+
|
| 134 |
+
def add_transition(self, transition):
|
| 135 |
+
"""Add a transition to the replay buffer."""
|
| 136 |
+
|
| 137 |
+
def set_idx(buffer, new_element):
|
| 138 |
+
buffer[self.pointer] = new_element
|
| 139 |
+
|
| 140 |
+
jax.tree_util.tree_map(set_idx, self._dict, transition)
|
| 141 |
+
self.pointer = (self.pointer + 1) % self.max_size
|
| 142 |
+
self.size = max(self.pointer, self.size)
|
| 143 |
+
|
| 144 |
+
def clear(self):
|
| 145 |
+
"""Clear the replay buffer."""
|
| 146 |
+
self.size = self.pointer = 0
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@dataclasses.dataclass
|
| 150 |
+
class GCDataset:
|
| 151 |
+
"""Dataset class for goal-conditioned RL.
|
| 152 |
+
|
| 153 |
+
This class provides a method to sample a batch of transitions with goals (value_goals and actor_goals) from the
|
| 154 |
+
dataset. The goals are sampled from the current state, future states in the same trajectory, and random states.
|
| 155 |
+
It also supports frame stacking and random-cropping image augmentation.
|
| 156 |
+
|
| 157 |
+
It reads the following keys from the config:
|
| 158 |
+
- discount: Discount factor for geometric sampling.
|
| 159 |
+
- value_p_curgoal: Probability of using the current state as the value goal.
|
| 160 |
+
- value_p_trajgoal: Probability of using a future state in the same trajectory as the value goal.
|
| 161 |
+
- value_p_randomgoal: Probability of using a random state as the value goal.
|
| 162 |
+
- value_geom_sample: Whether to use geometric sampling for future value goals.
|
| 163 |
+
- actor_p_curgoal: Probability of using the current state as the actor goal.
|
| 164 |
+
- actor_p_trajgoal: Probability of using a future state in the same trajectory as the actor goal.
|
| 165 |
+
- actor_p_randomgoal: Probability of using a random state as the actor goal.
|
| 166 |
+
- actor_geom_sample: Whether to use geometric sampling for future actor goals.
|
| 167 |
+
- gc_negative: Whether to use '0 if s == g else -1' (True) or '1 if s == g else 0' (False) as the reward.
|
| 168 |
+
- p_aug: Probability of applying image augmentation.
|
| 169 |
+
- frame_stack: Number of frames to stack.
|
| 170 |
+
|
| 171 |
+
Attributes:
|
| 172 |
+
dataset: Dataset object.
|
| 173 |
+
config: Configuration dictionary.
|
| 174 |
+
preprocess_frame_stack: Whether to preprocess frame stacks. If False, frame stacks are computed on-the-fly. This
|
| 175 |
+
saves memory but may slow down training.
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
dataset: Dataset
|
| 179 |
+
config: Any
|
| 180 |
+
preprocess_frame_stack: bool = True
|
| 181 |
+
|
| 182 |
+
def __post_init__(self):
|
| 183 |
+
self.size = self.dataset.size
|
| 184 |
+
|
| 185 |
+
# Pre-compute trajectory boundaries.
|
| 186 |
+
(self.terminal_locs,) = np.nonzero(self.dataset['terminals'] > 0)
|
| 187 |
+
self.initial_locs = np.concatenate([[0], self.terminal_locs[:-1] + 1])
|
| 188 |
+
assert self.terminal_locs[-1] == self.size - 1
|
| 189 |
+
|
| 190 |
+
# Assert probabilities sum to 1.
|
| 191 |
+
assert np.isclose(
|
| 192 |
+
self.config['value_p_curgoal'] + self.config['value_p_trajgoal'] + self.config['value_p_randomgoal'], 1.0
|
| 193 |
+
)
|
| 194 |
+
assert np.isclose(
|
| 195 |
+
self.config['actor_p_curgoal'] + self.config['actor_p_trajgoal'] + self.config['actor_p_randomgoal'], 1.0
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
if self.config['frame_stack'] is not None:
|
| 199 |
+
# Only support compact (observation-only) datasets.
|
| 200 |
+
assert 'next_observations' not in self.dataset
|
| 201 |
+
if self.preprocess_frame_stack:
|
| 202 |
+
stacked_observations = self.get_stacked_observations(np.arange(self.size))
|
| 203 |
+
self.dataset = Dataset(self.dataset.copy(dict(observations=stacked_observations)))
|
| 204 |
+
|
| 205 |
+
def sample(self, batch_size: int, idxs=None, evaluation=False):
|
| 206 |
+
"""Sample a batch of transitions with goals.
|
| 207 |
+
|
| 208 |
+
This method samples a batch of transitions with goals (value_goals and actor_goals) from the dataset. They are
|
| 209 |
+
stored in the keys 'value_goals' and 'actor_goals', respectively. It also computes the 'rewards' and 'masks'
|
| 210 |
+
based on the indices of the goals.
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
batch_size: Batch size.
|
| 214 |
+
idxs: Indices of the transitions to sample. If None, random indices are sampled.
|
| 215 |
+
evaluation: Whether to sample for evaluation. If True, image augmentation is not applied.
|
| 216 |
+
"""
|
| 217 |
+
if idxs is None:
|
| 218 |
+
idxs = self.dataset.get_random_idxs(batch_size)
|
| 219 |
+
|
| 220 |
+
batch = self.dataset.sample(batch_size, idxs)
|
| 221 |
+
if self.config['frame_stack'] is not None:
|
| 222 |
+
batch['observations'] = self.get_observations(idxs)
|
| 223 |
+
batch['next_observations'] = self.get_observations(idxs + 1)
|
| 224 |
+
|
| 225 |
+
value_goal_idxs = self.sample_goals(
|
| 226 |
+
idxs,
|
| 227 |
+
self.config['value_p_curgoal'],
|
| 228 |
+
self.config['value_p_trajgoal'],
|
| 229 |
+
self.config['value_p_randomgoal'],
|
| 230 |
+
self.config['value_geom_sample'],
|
| 231 |
+
)
|
| 232 |
+
actor_goal_idxs = self.sample_goals(
|
| 233 |
+
idxs,
|
| 234 |
+
self.config['actor_p_curgoal'],
|
| 235 |
+
self.config['actor_p_trajgoal'],
|
| 236 |
+
self.config['actor_p_randomgoal'],
|
| 237 |
+
self.config['actor_geom_sample'],
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
batch['value_goals'] = self.get_observations(value_goal_idxs)
|
| 241 |
+
batch['actor_goals'] = self.get_observations(actor_goal_idxs)
|
| 242 |
+
successes = (idxs == value_goal_idxs).astype(float)
|
| 243 |
+
batch['masks'] = 1.0 - successes
|
| 244 |
+
batch['rewards'] = successes - (1.0 if self.config['gc_negative'] else 0.0)
|
| 245 |
+
|
| 246 |
+
if self.config['p_aug'] is not None and not evaluation:
|
| 247 |
+
if np.random.rand() < self.config['p_aug']:
|
| 248 |
+
self.augment(batch, ['observations', 'next_observations', 'value_goals', 'actor_goals'])
|
| 249 |
+
|
| 250 |
+
return batch
|
| 251 |
+
|
| 252 |
+
def sample_goals(self, idxs, p_curgoal, p_trajgoal, p_randomgoal, geom_sample):
|
| 253 |
+
"""Sample goals for the given indices."""
|
| 254 |
+
batch_size = len(idxs)
|
| 255 |
+
|
| 256 |
+
# Random goals.
|
| 257 |
+
random_goal_idxs = self.dataset.get_random_idxs(batch_size)
|
| 258 |
+
|
| 259 |
+
# Goals from the same trajectory (excluding the current state, unless it is the final state).
|
| 260 |
+
final_state_idxs = self.terminal_locs[np.searchsorted(self.terminal_locs, idxs)]
|
| 261 |
+
if geom_sample:
|
| 262 |
+
# Geometric sampling.
|
| 263 |
+
offsets = np.random.geometric(p=1 - self.config['discount'], size=batch_size) # in [1, inf)
|
| 264 |
+
middle_goal_idxs = np.minimum(idxs + offsets, final_state_idxs)
|
| 265 |
+
else:
|
| 266 |
+
# Uniform sampling.
|
| 267 |
+
distances = np.random.rand(batch_size) # in [0, 1)
|
| 268 |
+
middle_goal_idxs = np.round(
|
| 269 |
+
(np.minimum(idxs + 1, final_state_idxs) * distances + final_state_idxs * (1 - distances))
|
| 270 |
+
).astype(int)
|
| 271 |
+
goal_idxs = np.where(
|
| 272 |
+
np.random.rand(batch_size) < p_trajgoal / (1.0 - p_curgoal + 1e-6), middle_goal_idxs, random_goal_idxs
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
# Goals at the current state.
|
| 276 |
+
goal_idxs = np.where(np.random.rand(batch_size) < p_curgoal, idxs, goal_idxs)
|
| 277 |
+
|
| 278 |
+
return goal_idxs
|
| 279 |
+
|
| 280 |
+
def augment(self, batch, keys):
|
| 281 |
+
"""Apply image augmentation to the given keys."""
|
| 282 |
+
padding = 3
|
| 283 |
+
batch_size = len(batch[keys[0]])
|
| 284 |
+
crop_froms = np.random.randint(0, 2 * padding + 1, (batch_size, 2))
|
| 285 |
+
crop_froms = np.concatenate([crop_froms, np.zeros((batch_size, 1), dtype=np.int64)], axis=1)
|
| 286 |
+
for key in keys:
|
| 287 |
+
batch[key] = jax.tree_util.tree_map(
|
| 288 |
+
lambda arr: np.array(batched_random_crop(arr, crop_froms, padding)) if len(arr.shape) == 4 else arr,
|
| 289 |
+
batch[key],
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
def get_observations(self, idxs):
|
| 293 |
+
"""Return the observations for the given indices."""
|
| 294 |
+
if self.config['frame_stack'] is None or self.preprocess_frame_stack:
|
| 295 |
+
return jax.tree_util.tree_map(lambda arr: arr[idxs], self.dataset['observations'])
|
| 296 |
+
else:
|
| 297 |
+
return self.get_stacked_observations(idxs)
|
| 298 |
+
|
| 299 |
+
def get_stacked_observations(self, idxs):
|
| 300 |
+
"""Return the frame-stacked observations for the given indices."""
|
| 301 |
+
initial_state_idxs = self.initial_locs[np.searchsorted(self.initial_locs, idxs, side='right') - 1]
|
| 302 |
+
rets = []
|
| 303 |
+
for i in reversed(range(self.config['frame_stack'])):
|
| 304 |
+
cur_idxs = np.maximum(idxs - i, initial_state_idxs)
|
| 305 |
+
rets.append(jax.tree_util.tree_map(lambda arr: arr[cur_idxs], self.dataset['observations']))
|
| 306 |
+
return jax.tree_util.tree_map(lambda *args: np.concatenate(args, axis=-1), *rets)
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
@dataclasses.dataclass
|
| 310 |
+
class HGCDataset(GCDataset):
|
| 311 |
+
"""Dataset class for hierarchical goal-conditioned RL.
|
| 312 |
+
|
| 313 |
+
This class extends GCDataset to support high-level actor goals and prediction targets. It reads the following
|
| 314 |
+
additional key from the config:
|
| 315 |
+
- subgoal_steps: Subgoal steps (i.e., the number of steps to reach the low-level goal).
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
def sample(self, batch_size: int, idxs=None, evaluation=False):
|
| 319 |
+
"""Sample a batch of transitions with goals.
|
| 320 |
+
|
| 321 |
+
This method samples a batch of transitions with goals from the dataset. The goals are stored in the keys
|
| 322 |
+
'value_goals', 'low_actor_goals', 'high_actor_goals', and 'high_actor_targets'. It also computes the 'rewards'
|
| 323 |
+
and 'masks' based on the indices of the goals.
|
| 324 |
+
|
| 325 |
+
Args:
|
| 326 |
+
batch_size: Batch size.
|
| 327 |
+
idxs: Indices of the transitions to sample. If None, random indices are sampled.
|
| 328 |
+
evaluation: Whether to sample for evaluation. If True, image augmentation is not applied.
|
| 329 |
+
"""
|
| 330 |
+
if idxs is None:
|
| 331 |
+
idxs = self.dataset.get_random_idxs(batch_size)
|
| 332 |
+
|
| 333 |
+
batch = self.dataset.sample(batch_size, idxs)
|
| 334 |
+
if self.config['frame_stack'] is not None:
|
| 335 |
+
batch['observations'] = self.get_observations(idxs)
|
| 336 |
+
batch['next_observations'] = self.get_observations(idxs + 1)
|
| 337 |
+
|
| 338 |
+
# Sample value goals.
|
| 339 |
+
value_goal_idxs = self.sample_goals(
|
| 340 |
+
idxs,
|
| 341 |
+
self.config['value_p_curgoal'],
|
| 342 |
+
self.config['value_p_trajgoal'],
|
| 343 |
+
self.config['value_p_randomgoal'],
|
| 344 |
+
self.config['value_geom_sample'],
|
| 345 |
+
)
|
| 346 |
+
batch['value_goals'] = self.get_observations(value_goal_idxs)
|
| 347 |
+
|
| 348 |
+
successes = (idxs == value_goal_idxs).astype(float)
|
| 349 |
+
batch['masks'] = 1.0 - successes
|
| 350 |
+
batch['rewards'] = successes - (1.0 if self.config['gc_negative'] else 0.0)
|
| 351 |
+
|
| 352 |
+
# Set low-level actor goals.
|
| 353 |
+
final_state_idxs = self.terminal_locs[np.searchsorted(self.terminal_locs, idxs)]
|
| 354 |
+
low_goal_idxs = np.minimum(idxs + self.config['subgoal_steps'], final_state_idxs)
|
| 355 |
+
batch['low_actor_goals'] = self.get_observations(low_goal_idxs)
|
| 356 |
+
|
| 357 |
+
# Sample high-level actor goals and set prediction targets.
|
| 358 |
+
# High-level future goals.
|
| 359 |
+
if self.config['actor_geom_sample']:
|
| 360 |
+
# Geometric sampling.
|
| 361 |
+
offsets = np.random.geometric(p=1 - self.config['discount'], size=batch_size) # in [1, inf)
|
| 362 |
+
high_traj_goal_idxs = np.minimum(idxs + offsets, final_state_idxs)
|
| 363 |
+
else:
|
| 364 |
+
# Uniform sampling.
|
| 365 |
+
distances = np.random.rand(batch_size) # in [0, 1)
|
| 366 |
+
high_traj_goal_idxs = np.round(
|
| 367 |
+
(np.minimum(idxs + 1, final_state_idxs) * distances + final_state_idxs * (1 - distances))
|
| 368 |
+
).astype(int)
|
| 369 |
+
high_traj_target_idxs = np.minimum(idxs + self.config['subgoal_steps'], high_traj_goal_idxs)
|
| 370 |
+
|
| 371 |
+
# High-level random goals.
|
| 372 |
+
high_random_goal_idxs = self.dataset.get_random_idxs(batch_size)
|
| 373 |
+
high_random_target_idxs = np.minimum(idxs + self.config['subgoal_steps'], final_state_idxs)
|
| 374 |
+
|
| 375 |
+
# Pick between high-level future goals and random goals.
|
| 376 |
+
pick_random = np.random.rand(batch_size) < self.config['actor_p_randomgoal']
|
| 377 |
+
high_goal_idxs = np.where(pick_random, high_random_goal_idxs, high_traj_goal_idxs)
|
| 378 |
+
high_target_idxs = np.where(pick_random, high_random_target_idxs, high_traj_target_idxs)
|
| 379 |
+
|
| 380 |
+
batch['high_actor_goals'] = self.get_observations(high_goal_idxs)
|
| 381 |
+
batch['high_actor_targets'] = self.get_observations(high_target_idxs)
|
| 382 |
+
|
| 383 |
+
if self.config['p_aug'] is not None and not evaluation:
|
| 384 |
+
if np.random.rand() < self.config['p_aug']:
|
| 385 |
+
self.augment(
|
| 386 |
+
batch,
|
| 387 |
+
[
|
| 388 |
+
'observations',
|
| 389 |
+
'next_observations',
|
| 390 |
+
'value_goals',
|
| 391 |
+
'low_actor_goals',
|
| 392 |
+
'high_actor_goals',
|
| 393 |
+
'high_actor_targets',
|
| 394 |
+
],
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
return batch
|
impls/utils/encoders.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
from typing import Sequence
|
| 3 |
+
|
| 4 |
+
import flax.linen as nn
|
| 5 |
+
import jax.numpy as jnp
|
| 6 |
+
|
| 7 |
+
from utils.networks import MLP
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ResnetStack(nn.Module):
|
| 11 |
+
"""ResNet stack module."""
|
| 12 |
+
|
| 13 |
+
num_features: int
|
| 14 |
+
num_blocks: int
|
| 15 |
+
max_pooling: bool = True
|
| 16 |
+
|
| 17 |
+
@nn.compact
|
| 18 |
+
def __call__(self, x):
|
| 19 |
+
initializer = nn.initializers.xavier_uniform()
|
| 20 |
+
conv_out = nn.Conv(
|
| 21 |
+
features=self.num_features,
|
| 22 |
+
kernel_size=(3, 3),
|
| 23 |
+
strides=1,
|
| 24 |
+
kernel_init=initializer,
|
| 25 |
+
padding='SAME',
|
| 26 |
+
)(x)
|
| 27 |
+
|
| 28 |
+
if self.max_pooling:
|
| 29 |
+
conv_out = nn.max_pool(
|
| 30 |
+
conv_out,
|
| 31 |
+
window_shape=(3, 3),
|
| 32 |
+
padding='SAME',
|
| 33 |
+
strides=(2, 2),
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
for _ in range(self.num_blocks):
|
| 37 |
+
block_input = conv_out
|
| 38 |
+
conv_out = nn.relu(conv_out)
|
| 39 |
+
conv_out = nn.Conv(
|
| 40 |
+
features=self.num_features,
|
| 41 |
+
kernel_size=(3, 3),
|
| 42 |
+
strides=1,
|
| 43 |
+
padding='SAME',
|
| 44 |
+
kernel_init=initializer,
|
| 45 |
+
)(conv_out)
|
| 46 |
+
|
| 47 |
+
conv_out = nn.relu(conv_out)
|
| 48 |
+
conv_out = nn.Conv(
|
| 49 |
+
features=self.num_features,
|
| 50 |
+
kernel_size=(3, 3),
|
| 51 |
+
strides=1,
|
| 52 |
+
padding='SAME',
|
| 53 |
+
kernel_init=initializer,
|
| 54 |
+
)(conv_out)
|
| 55 |
+
conv_out += block_input
|
| 56 |
+
|
| 57 |
+
return conv_out
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class ImpalaEncoder(nn.Module):
|
| 61 |
+
"""IMPALA encoder."""
|
| 62 |
+
|
| 63 |
+
width: int = 1
|
| 64 |
+
stack_sizes: tuple = (16, 32, 32)
|
| 65 |
+
num_blocks: int = 2
|
| 66 |
+
dropout_rate: float = None
|
| 67 |
+
mlp_hidden_dims: Sequence[int] = (512,)
|
| 68 |
+
layer_norm: bool = False
|
| 69 |
+
|
| 70 |
+
def setup(self):
|
| 71 |
+
stack_sizes = self.stack_sizes
|
| 72 |
+
self.stack_blocks = [
|
| 73 |
+
ResnetStack(
|
| 74 |
+
num_features=stack_sizes[i] * self.width,
|
| 75 |
+
num_blocks=self.num_blocks,
|
| 76 |
+
)
|
| 77 |
+
for i in range(len(stack_sizes))
|
| 78 |
+
]
|
| 79 |
+
if self.dropout_rate is not None:
|
| 80 |
+
self.dropout = nn.Dropout(rate=self.dropout_rate)
|
| 81 |
+
|
| 82 |
+
@nn.compact
|
| 83 |
+
def __call__(self, x, train=True, cond_var=None):
|
| 84 |
+
x = x.astype(jnp.float32) / 255.0
|
| 85 |
+
|
| 86 |
+
conv_out = x
|
| 87 |
+
|
| 88 |
+
for idx in range(len(self.stack_blocks)):
|
| 89 |
+
conv_out = self.stack_blocks[idx](conv_out)
|
| 90 |
+
if self.dropout_rate is not None:
|
| 91 |
+
conv_out = self.dropout(conv_out, deterministic=not train)
|
| 92 |
+
|
| 93 |
+
conv_out = nn.relu(conv_out)
|
| 94 |
+
if self.layer_norm:
|
| 95 |
+
conv_out = nn.LayerNorm()(conv_out)
|
| 96 |
+
out = conv_out.reshape((*x.shape[:-3], -1))
|
| 97 |
+
|
| 98 |
+
out = MLP(self.mlp_hidden_dims, activate_final=True, layer_norm=self.layer_norm)(out)
|
| 99 |
+
|
| 100 |
+
return out
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class GCEncoder(nn.Module):
|
| 104 |
+
"""Helper module to handle inputs to goal-conditioned networks.
|
| 105 |
+
|
| 106 |
+
It takes in observations (s) and goals (g) and returns the concatenation of `state_encoder(s)`, `goal_encoder(g)`,
|
| 107 |
+
and `concat_encoder([s, g])`. It ignores the encoders that are not provided. This way, the module can handle both
|
| 108 |
+
early and late fusion (or their variants) of state and goal information.
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
state_encoder: nn.Module = None
|
| 112 |
+
goal_encoder: nn.Module = None
|
| 113 |
+
concat_encoder: nn.Module = None
|
| 114 |
+
|
| 115 |
+
@nn.compact
|
| 116 |
+
def __call__(self, observations, goals=None, goal_encoded=False):
|
| 117 |
+
"""Returns the representations of observations and goals.
|
| 118 |
+
|
| 119 |
+
If `goal_encoded` is True, `goals` is assumed to be already encoded representations. In this case, either
|
| 120 |
+
`goal_encoder` or `concat_encoder` must be None.
|
| 121 |
+
"""
|
| 122 |
+
reps = []
|
| 123 |
+
if self.state_encoder is not None:
|
| 124 |
+
reps.append(self.state_encoder(observations))
|
| 125 |
+
if goals is not None:
|
| 126 |
+
if goal_encoded:
|
| 127 |
+
# Can't have both goal_encoder and concat_encoder in this case.
|
| 128 |
+
assert self.goal_encoder is None or self.concat_encoder is None
|
| 129 |
+
reps.append(goals)
|
| 130 |
+
else:
|
| 131 |
+
if self.goal_encoder is not None:
|
| 132 |
+
reps.append(self.goal_encoder(goals))
|
| 133 |
+
if self.concat_encoder is not None:
|
| 134 |
+
reps.append(self.concat_encoder(jnp.concatenate([observations, goals], axis=-1)))
|
| 135 |
+
reps = jnp.concatenate(reps, axis=-1)
|
| 136 |
+
return reps
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
encoder_modules = {
|
| 140 |
+
'impala': ImpalaEncoder,
|
| 141 |
+
'impala_debug': functools.partial(ImpalaEncoder, num_blocks=1, stack_sizes=(4, 4)),
|
| 142 |
+
'impala_small': functools.partial(ImpalaEncoder, num_blocks=1),
|
| 143 |
+
'impala_large': functools.partial(ImpalaEncoder, stack_sizes=(64, 128, 128), mlp_hidden_dims=(1024,)),
|
| 144 |
+
}
|
impls/utils/env_utils.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import os
|
| 3 |
+
import platform
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
import gymnasium
|
| 7 |
+
import numpy as np
|
| 8 |
+
from gymnasium.spaces import Box
|
| 9 |
+
|
| 10 |
+
import ogbench
|
| 11 |
+
from utils.datasets import Dataset
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class EpisodeMonitor(gymnasium.Wrapper):
|
| 15 |
+
"""Environment wrapper to monitor episode statistics."""
|
| 16 |
+
|
| 17 |
+
def __init__(self, env):
|
| 18 |
+
super().__init__(env)
|
| 19 |
+
self._reset_stats()
|
| 20 |
+
self.total_timesteps = 0
|
| 21 |
+
|
| 22 |
+
def _reset_stats(self):
|
| 23 |
+
self.reward_sum = 0.0
|
| 24 |
+
self.episode_length = 0
|
| 25 |
+
self.start_time = time.time()
|
| 26 |
+
|
| 27 |
+
def step(self, action):
|
| 28 |
+
observation, reward, terminated, truncated, info = self.env.step(action)
|
| 29 |
+
|
| 30 |
+
self.reward_sum += reward
|
| 31 |
+
self.episode_length += 1
|
| 32 |
+
self.total_timesteps += 1
|
| 33 |
+
info['total'] = {'timesteps': self.total_timesteps}
|
| 34 |
+
|
| 35 |
+
if terminated or truncated:
|
| 36 |
+
info['episode'] = {}
|
| 37 |
+
info['episode']['return'] = self.reward_sum
|
| 38 |
+
info['episode']['length'] = self.episode_length
|
| 39 |
+
info['episode']['duration'] = time.time() - self.start_time
|
| 40 |
+
|
| 41 |
+
if hasattr(self.unwrapped, 'get_normalized_score'):
|
| 42 |
+
info['episode']['normalized_return'] = (
|
| 43 |
+
self.unwrapped.get_normalized_score(info['episode']['return']) * 100.0
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
return observation, reward, terminated, truncated, info
|
| 47 |
+
|
| 48 |
+
def reset(self, *args, **kwargs):
|
| 49 |
+
self._reset_stats()
|
| 50 |
+
return self.env.reset(*args, **kwargs)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class FrameStackWrapper(gymnasium.Wrapper):
|
| 54 |
+
"""Environment wrapper to stack observations."""
|
| 55 |
+
|
| 56 |
+
def __init__(self, env, num_stack):
|
| 57 |
+
super().__init__(env)
|
| 58 |
+
|
| 59 |
+
self.num_stack = num_stack
|
| 60 |
+
self.frames = collections.deque(maxlen=num_stack)
|
| 61 |
+
|
| 62 |
+
low = np.concatenate([self.observation_space.low] * num_stack, axis=-1)
|
| 63 |
+
high = np.concatenate([self.observation_space.high] * num_stack, axis=-1)
|
| 64 |
+
self.observation_space = Box(low=low, high=high, dtype=self.observation_space.dtype)
|
| 65 |
+
|
| 66 |
+
def get_observation(self):
|
| 67 |
+
assert len(self.frames) == self.num_stack
|
| 68 |
+
return np.concatenate(list(self.frames), axis=-1)
|
| 69 |
+
|
| 70 |
+
def reset(self, **kwargs):
|
| 71 |
+
ob, info = self.env.reset(**kwargs)
|
| 72 |
+
for _ in range(self.num_stack):
|
| 73 |
+
self.frames.append(ob)
|
| 74 |
+
if 'goal' in info:
|
| 75 |
+
info['goal'] = np.concatenate([info['goal']] * self.num_stack, axis=-1)
|
| 76 |
+
return self.get_observation(), info
|
| 77 |
+
|
| 78 |
+
def step(self, action):
|
| 79 |
+
observation, reward, terminated, truncated, info = self.env.step(action)
|
| 80 |
+
self.frames.append(observation)
|
| 81 |
+
return self.get_observation(), reward, terminated, truncated, info
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def setup_egl():
|
| 85 |
+
"""Set up EGL for rendering."""
|
| 86 |
+
if 'mac' in platform.platform():
|
| 87 |
+
# macOS doesn't support EGL.
|
| 88 |
+
pass
|
| 89 |
+
else:
|
| 90 |
+
os.environ['MUJOCO_GL'] = 'egl'
|
| 91 |
+
if 'SLURM_STEP_GPUS' in os.environ:
|
| 92 |
+
os.environ['EGL_DEVICE_ID'] = os.environ['SLURM_STEP_GPUS']
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def make_env_and_datasets(dataset_name, frame_stack=None):
|
| 96 |
+
"""Make OGBench environment and datasets.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
dataset_name: Name of the dataset.
|
| 100 |
+
frame_stack: Number of frames to stack.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
A tuple of the environment, training dataset, and validation dataset.
|
| 104 |
+
"""
|
| 105 |
+
setup_egl()
|
| 106 |
+
|
| 107 |
+
# Use compact dataset to save memory.
|
| 108 |
+
env, train_dataset, val_dataset = ogbench.make_env_and_datasets(dataset_name, compact_dataset=False)
|
| 109 |
+
train_dataset = Dataset.create(**train_dataset)
|
| 110 |
+
val_dataset = Dataset.create(**val_dataset)
|
| 111 |
+
|
| 112 |
+
if frame_stack is not None:
|
| 113 |
+
env = FrameStackWrapper(env, frame_stack)
|
| 114 |
+
|
| 115 |
+
env.reset()
|
| 116 |
+
|
| 117 |
+
return env, train_dataset, val_dataset
|
impls/utils/evaluation.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
|
| 3 |
+
import jax
|
| 4 |
+
import numpy as np
|
| 5 |
+
from tqdm import trange
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def supply_rng(f, rng=jax.random.PRNGKey(0)):
|
| 9 |
+
"""Helper function to split the random number generator key before each call to the function."""
|
| 10 |
+
|
| 11 |
+
def wrapped(*args, **kwargs):
|
| 12 |
+
nonlocal rng
|
| 13 |
+
rng, key = jax.random.split(rng)
|
| 14 |
+
return f(*args, seed=key, **kwargs)
|
| 15 |
+
|
| 16 |
+
return wrapped
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def flatten(d, parent_key='', sep='.'):
|
| 20 |
+
"""Flatten a dictionary."""
|
| 21 |
+
items = []
|
| 22 |
+
for k, v in d.items():
|
| 23 |
+
new_key = parent_key + sep + k if parent_key else k
|
| 24 |
+
if hasattr(v, 'items'):
|
| 25 |
+
items.extend(flatten(v, new_key, sep=sep).items())
|
| 26 |
+
else:
|
| 27 |
+
items.append((new_key, v))
|
| 28 |
+
return dict(items)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def add_to(dict_of_lists, single_dict):
|
| 32 |
+
"""Append values to the corresponding lists in the dictionary."""
|
| 33 |
+
for k, v in single_dict.items():
|
| 34 |
+
dict_of_lists[k].append(v)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def evaluate(
|
| 38 |
+
agent,
|
| 39 |
+
env,
|
| 40 |
+
task_id=None,
|
| 41 |
+
config=None,
|
| 42 |
+
num_eval_episodes=50,
|
| 43 |
+
num_video_episodes=0,
|
| 44 |
+
video_frame_skip=3,
|
| 45 |
+
eval_temperature=0,
|
| 46 |
+
eval_gaussian=None,
|
| 47 |
+
):
|
| 48 |
+
"""Evaluate the agent in the environment.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
agent: Agent.
|
| 52 |
+
env: Environment.
|
| 53 |
+
task_id: Task ID to be passed to the environment.
|
| 54 |
+
config: Configuration dictionary.
|
| 55 |
+
num_eval_episodes: Number of episodes to evaluate the agent.
|
| 56 |
+
num_video_episodes: Number of episodes to render. These episodes are not included in the statistics.
|
| 57 |
+
video_frame_skip: Number of frames to skip between renders.
|
| 58 |
+
eval_temperature: Action sampling temperature.
|
| 59 |
+
eval_gaussian: Standard deviation of the Gaussian noise to add to the actions.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
A tuple containing the statistics, trajectories, and rendered videos.
|
| 63 |
+
"""
|
| 64 |
+
actor_fn = supply_rng(agent.sample_actions, rng=jax.random.PRNGKey(np.random.randint(0, 2**32)))
|
| 65 |
+
trajs = []
|
| 66 |
+
stats = defaultdict(list)
|
| 67 |
+
|
| 68 |
+
renders = []
|
| 69 |
+
for i in trange(num_eval_episodes + num_video_episodes):
|
| 70 |
+
traj = defaultdict(list)
|
| 71 |
+
should_render = i >= num_eval_episodes
|
| 72 |
+
|
| 73 |
+
observation, info = env.reset(options=dict(task_id=task_id, render_goal=should_render))
|
| 74 |
+
goal = info.get('goal')
|
| 75 |
+
goal_frame = info.get('goal_rendered')
|
| 76 |
+
done = False
|
| 77 |
+
step = 0
|
| 78 |
+
render = []
|
| 79 |
+
while not done:
|
| 80 |
+
action = actor_fn(observations=observation, goals=goal, temperature=eval_temperature)
|
| 81 |
+
action = np.array(action)
|
| 82 |
+
if not config.get('discrete'):
|
| 83 |
+
if eval_gaussian is not None:
|
| 84 |
+
action = np.random.normal(action, eval_gaussian)
|
| 85 |
+
action = np.clip(action, -1, 1)
|
| 86 |
+
|
| 87 |
+
next_observation, reward, terminated, truncated, info = env.step(action)
|
| 88 |
+
done = terminated or truncated
|
| 89 |
+
step += 1
|
| 90 |
+
|
| 91 |
+
if should_render and (step % video_frame_skip == 0 or done):
|
| 92 |
+
frame = env.render().copy()
|
| 93 |
+
if goal_frame is not None:
|
| 94 |
+
render.append(np.concatenate([goal_frame, frame], axis=0))
|
| 95 |
+
else:
|
| 96 |
+
render.append(frame)
|
| 97 |
+
|
| 98 |
+
transition = dict(
|
| 99 |
+
observation=observation,
|
| 100 |
+
next_observation=next_observation,
|
| 101 |
+
action=action,
|
| 102 |
+
reward=reward,
|
| 103 |
+
done=done,
|
| 104 |
+
info=info,
|
| 105 |
+
)
|
| 106 |
+
add_to(traj, transition)
|
| 107 |
+
observation = next_observation
|
| 108 |
+
if i < num_eval_episodes:
|
| 109 |
+
add_to(stats, flatten(info))
|
| 110 |
+
trajs.append(traj)
|
| 111 |
+
else:
|
| 112 |
+
renders.append(np.array(render))
|
| 113 |
+
|
| 114 |
+
for k, v in stats.items():
|
| 115 |
+
stats[k] = np.mean(v)
|
| 116 |
+
|
| 117 |
+
return stats, trajs, renders
|
impls/utils/flax_utils.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import glob
|
| 3 |
+
import os
|
| 4 |
+
import pickle
|
| 5 |
+
from typing import Any, Dict, Mapping, Sequence
|
| 6 |
+
|
| 7 |
+
import flax
|
| 8 |
+
import flax.linen as nn
|
| 9 |
+
import jax
|
| 10 |
+
import jax.numpy as jnp
|
| 11 |
+
import optax
|
| 12 |
+
|
| 13 |
+
nonpytree_field = functools.partial(flax.struct.field, pytree_node=False)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ModuleDict(nn.Module):
|
| 17 |
+
"""A dictionary of modules.
|
| 18 |
+
|
| 19 |
+
This allows sharing parameters between modules and provides a convenient way to access them.
|
| 20 |
+
|
| 21 |
+
Attributes:
|
| 22 |
+
modules: Dictionary of modules.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
modules: Dict[str, nn.Module]
|
| 26 |
+
|
| 27 |
+
@nn.compact
|
| 28 |
+
def __call__(self, *args, name=None, **kwargs):
|
| 29 |
+
"""Forward pass.
|
| 30 |
+
|
| 31 |
+
For initialization, call with `name=None` and provide the arguments for each module in `kwargs`.
|
| 32 |
+
Otherwise, call with `name=<module_name>` and provide the arguments for that module.
|
| 33 |
+
"""
|
| 34 |
+
if name is None:
|
| 35 |
+
if kwargs.keys() != self.modules.keys():
|
| 36 |
+
raise ValueError(
|
| 37 |
+
f'When `name` is not specified, kwargs must contain the arguments for each module. '
|
| 38 |
+
f'Got kwargs keys {kwargs.keys()} but module keys {self.modules.keys()}'
|
| 39 |
+
)
|
| 40 |
+
out = {}
|
| 41 |
+
for key, value in kwargs.items():
|
| 42 |
+
if isinstance(value, Mapping):
|
| 43 |
+
out[key] = self.modules[key](**value)
|
| 44 |
+
elif isinstance(value, Sequence):
|
| 45 |
+
out[key] = self.modules[key](*value)
|
| 46 |
+
else:
|
| 47 |
+
out[key] = self.modules[key](value)
|
| 48 |
+
return out
|
| 49 |
+
|
| 50 |
+
return self.modules[name](*args, **kwargs)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class TrainState(flax.struct.PyTreeNode):
|
| 54 |
+
"""Custom train state for models.
|
| 55 |
+
|
| 56 |
+
Attributes:
|
| 57 |
+
step: Counter to keep track of the training steps. It is incremented by 1 after each `apply_gradients` call.
|
| 58 |
+
apply_fn: Apply function of the model.
|
| 59 |
+
model_def: Model definition.
|
| 60 |
+
params: Parameters of the model.
|
| 61 |
+
tx: optax optimizer.
|
| 62 |
+
opt_state: Optimizer state.
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
step: int
|
| 66 |
+
apply_fn: Any = nonpytree_field()
|
| 67 |
+
model_def: Any = nonpytree_field()
|
| 68 |
+
params: Any
|
| 69 |
+
tx: Any = nonpytree_field()
|
| 70 |
+
opt_state: Any
|
| 71 |
+
|
| 72 |
+
@classmethod
|
| 73 |
+
def create(cls, model_def, params, tx=None, **kwargs):
|
| 74 |
+
"""Create a new train state."""
|
| 75 |
+
if tx is not None:
|
| 76 |
+
opt_state = tx.init(params)
|
| 77 |
+
else:
|
| 78 |
+
opt_state = None
|
| 79 |
+
|
| 80 |
+
return cls(
|
| 81 |
+
step=1,
|
| 82 |
+
apply_fn=model_def.apply,
|
| 83 |
+
model_def=model_def,
|
| 84 |
+
params=params,
|
| 85 |
+
tx=tx,
|
| 86 |
+
opt_state=opt_state,
|
| 87 |
+
**kwargs,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
def __call__(self, *args, params=None, method=None, **kwargs):
|
| 91 |
+
"""Forward pass.
|
| 92 |
+
|
| 93 |
+
When `params` is not provided, it uses the stored parameters.
|
| 94 |
+
|
| 95 |
+
The typical use case is to set `params` to `None` when you want to *stop* the gradients, and to pass the current
|
| 96 |
+
traced parameters when you want to flow the gradients. In other words, the default behavior is to stop the
|
| 97 |
+
gradients, and you need to explicitly provide the parameters to flow the gradients.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
*args: Arguments to pass to the model.
|
| 101 |
+
params: Parameters to use for the forward pass. If `None`, it uses the stored parameters, without flowing
|
| 102 |
+
the gradients.
|
| 103 |
+
method: Method to call in the model. If `None`, it uses the default `apply` method.
|
| 104 |
+
**kwargs: Keyword arguments to pass to the model.
|
| 105 |
+
"""
|
| 106 |
+
if params is None:
|
| 107 |
+
params = self.params
|
| 108 |
+
variables = {'params': params}
|
| 109 |
+
if method is not None:
|
| 110 |
+
method_name = getattr(self.model_def, method)
|
| 111 |
+
else:
|
| 112 |
+
method_name = None
|
| 113 |
+
|
| 114 |
+
return self.apply_fn(variables, *args, method=method_name, **kwargs)
|
| 115 |
+
|
| 116 |
+
def select(self, name):
|
| 117 |
+
"""Helper function to select a module from a `ModuleDict`."""
|
| 118 |
+
return functools.partial(self, name=name)
|
| 119 |
+
|
| 120 |
+
def apply_gradients(self, grads, **kwargs):
|
| 121 |
+
"""Apply the gradients and return the updated state."""
|
| 122 |
+
updates, new_opt_state = self.tx.update(grads, self.opt_state, self.params)
|
| 123 |
+
new_params = optax.apply_updates(self.params, updates)
|
| 124 |
+
|
| 125 |
+
return self.replace(
|
| 126 |
+
step=self.step + 1,
|
| 127 |
+
params=new_params,
|
| 128 |
+
opt_state=new_opt_state,
|
| 129 |
+
**kwargs,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
def apply_loss_fn(self, loss_fn):
|
| 133 |
+
"""Apply the loss function and return the updated state and info.
|
| 134 |
+
|
| 135 |
+
It additionally computes the gradient statistics and adds them to the dictionary.
|
| 136 |
+
"""
|
| 137 |
+
grads, info = jax.grad(loss_fn, has_aux=True)(self.params)
|
| 138 |
+
|
| 139 |
+
grad_max = jax.tree_util.tree_map(jnp.max, grads)
|
| 140 |
+
grad_min = jax.tree_util.tree_map(jnp.min, grads)
|
| 141 |
+
grad_norm = jax.tree_util.tree_map(jnp.linalg.norm, grads)
|
| 142 |
+
|
| 143 |
+
grad_max_flat = jnp.concatenate([jnp.reshape(x, -1) for x in jax.tree_util.tree_leaves(grad_max)], axis=0)
|
| 144 |
+
grad_min_flat = jnp.concatenate([jnp.reshape(x, -1) for x in jax.tree_util.tree_leaves(grad_min)], axis=0)
|
| 145 |
+
grad_norm_flat = jnp.concatenate([jnp.reshape(x, -1) for x in jax.tree_util.tree_leaves(grad_norm)], axis=0)
|
| 146 |
+
|
| 147 |
+
final_grad_max = jnp.max(grad_max_flat)
|
| 148 |
+
final_grad_min = jnp.min(grad_min_flat)
|
| 149 |
+
final_grad_norm = jnp.linalg.norm(grad_norm_flat, ord=1)
|
| 150 |
+
|
| 151 |
+
info.update(
|
| 152 |
+
{
|
| 153 |
+
'grad/max': final_grad_max,
|
| 154 |
+
'grad/min': final_grad_min,
|
| 155 |
+
'grad/norm': final_grad_norm,
|
| 156 |
+
}
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
return self.apply_gradients(grads=grads), info
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def save_agent(agent, save_dir, epoch):
|
| 163 |
+
"""Save the agent to a file.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
agent: Agent.
|
| 167 |
+
save_dir: Directory to save the agent.
|
| 168 |
+
epoch: Epoch number.
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
save_dict = dict(
|
| 172 |
+
agent=flax.serialization.to_state_dict(agent),
|
| 173 |
+
)
|
| 174 |
+
save_path = os.path.join(save_dir, f'params_{epoch}.pkl')
|
| 175 |
+
with open(save_path, 'wb') as f:
|
| 176 |
+
pickle.dump(save_dict, f)
|
| 177 |
+
|
| 178 |
+
print(f'Saved to {save_path}')
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def restore_agent(agent, restore_path, restore_epoch):
|
| 182 |
+
"""Restore the agent from a file.
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
agent: Agent.
|
| 186 |
+
restore_path: Path to the directory containing the saved agent.
|
| 187 |
+
restore_epoch: Epoch number.
|
| 188 |
+
"""
|
| 189 |
+
candidates = glob.glob(restore_path)
|
| 190 |
+
|
| 191 |
+
assert len(candidates) == 1, f'Found {len(candidates)} candidates: {candidates}'
|
| 192 |
+
|
| 193 |
+
restore_path = candidates[0] + f'/params_{restore_epoch}.pkl'
|
| 194 |
+
|
| 195 |
+
with open(restore_path, 'rb') as f:
|
| 196 |
+
load_dict = pickle.load(f)
|
| 197 |
+
|
| 198 |
+
agent = flax.serialization.from_state_dict(agent, load_dict['agent'])
|
| 199 |
+
|
| 200 |
+
print(f'Restored from {restore_path}')
|
| 201 |
+
|
| 202 |
+
return agent
|
impls/utils/log_utils.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import tempfile
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
import absl.flags as flags
|
| 6 |
+
import ml_collections
|
| 7 |
+
import numpy as np
|
| 8 |
+
import wandb
|
| 9 |
+
from PIL import Image, ImageEnhance
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class CsvLogger:
|
| 13 |
+
"""CSV logger for logging metrics to a CSV file."""
|
| 14 |
+
|
| 15 |
+
def __init__(self, path):
|
| 16 |
+
self.path = path
|
| 17 |
+
self.header = None
|
| 18 |
+
self.file = None
|
| 19 |
+
self.disallowed_types = (wandb.Image, wandb.Video, wandb.Histogram)
|
| 20 |
+
|
| 21 |
+
def log(self, row, step):
|
| 22 |
+
row['step'] = step
|
| 23 |
+
if self.file is None:
|
| 24 |
+
self.file = open(self.path, 'w')
|
| 25 |
+
if self.header is None:
|
| 26 |
+
self.header = [k for k, v in row.items() if not isinstance(v, self.disallowed_types)]
|
| 27 |
+
self.file.write(','.join(self.header) + '\n')
|
| 28 |
+
filtered_row = {k: v for k, v in row.items() if not isinstance(v, self.disallowed_types)}
|
| 29 |
+
self.file.write(','.join([str(filtered_row.get(k, '')) for k in self.header]) + '\n')
|
| 30 |
+
else:
|
| 31 |
+
filtered_row = {k: v for k, v in row.items() if not isinstance(v, self.disallowed_types)}
|
| 32 |
+
self.file.write(','.join([str(filtered_row.get(k, '')) for k in self.header]) + '\n')
|
| 33 |
+
self.file.flush()
|
| 34 |
+
|
| 35 |
+
def close(self):
|
| 36 |
+
if self.file is not None:
|
| 37 |
+
self.file.close()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def get_exp_name(seed):
|
| 41 |
+
"""Return the experiment name."""
|
| 42 |
+
exp_name = ''
|
| 43 |
+
exp_name += f'sd{seed:03d}_'
|
| 44 |
+
if 'SLURM_JOB_ID' in os.environ:
|
| 45 |
+
exp_name += f's_{os.environ["SLURM_JOB_ID"]}.'
|
| 46 |
+
if 'SLURM_PROCID' in os.environ:
|
| 47 |
+
exp_name += f'{os.environ["SLURM_PROCID"]}.'
|
| 48 |
+
exp_name += f'{datetime.now().strftime("%Y%m%d_%H%M%S")}'
|
| 49 |
+
|
| 50 |
+
return exp_name
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def get_flag_dict():
|
| 54 |
+
"""Return the dictionary of flags."""
|
| 55 |
+
flag_dict = {k: getattr(flags.FLAGS, k) for k in flags.FLAGS if '.' not in k}
|
| 56 |
+
for k in flag_dict:
|
| 57 |
+
if isinstance(flag_dict[k], ml_collections.ConfigDict):
|
| 58 |
+
flag_dict[k] = flag_dict[k].to_dict()
|
| 59 |
+
return flag_dict
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def setup_wandb(
|
| 63 |
+
entity=None,
|
| 64 |
+
project='project',
|
| 65 |
+
group=None,
|
| 66 |
+
name=None,
|
| 67 |
+
mode='online',
|
| 68 |
+
):
|
| 69 |
+
"""Set up Weights & Biases for logging."""
|
| 70 |
+
wandb_output_dir = tempfile.mkdtemp()
|
| 71 |
+
tags = [group] if group is not None else None
|
| 72 |
+
|
| 73 |
+
init_kwargs = dict(
|
| 74 |
+
config=get_flag_dict(),
|
| 75 |
+
project=project,
|
| 76 |
+
entity=entity,
|
| 77 |
+
tags=tags,
|
| 78 |
+
group=group,
|
| 79 |
+
dir=wandb_output_dir,
|
| 80 |
+
name=name,
|
| 81 |
+
settings=wandb.Settings(
|
| 82 |
+
start_method='thread',
|
| 83 |
+
_disable_stats=False,
|
| 84 |
+
),
|
| 85 |
+
mode=mode,
|
| 86 |
+
save_code=True,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
run = wandb.init(**init_kwargs)
|
| 90 |
+
|
| 91 |
+
return run
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def reshape_video(v, n_cols=None):
|
| 95 |
+
"""Helper function to reshape videos."""
|
| 96 |
+
if v.ndim == 4:
|
| 97 |
+
v = v[None,]
|
| 98 |
+
|
| 99 |
+
_, t, h, w, c = v.shape
|
| 100 |
+
|
| 101 |
+
if n_cols is None:
|
| 102 |
+
# Set n_cols to the square root of the number of videos.
|
| 103 |
+
n_cols = np.ceil(np.sqrt(v.shape[0])).astype(int)
|
| 104 |
+
if v.shape[0] % n_cols != 0:
|
| 105 |
+
len_addition = n_cols - v.shape[0] % n_cols
|
| 106 |
+
v = np.concatenate((v, np.zeros(shape=(len_addition, t, h, w, c))), axis=0)
|
| 107 |
+
n_rows = v.shape[0] // n_cols
|
| 108 |
+
|
| 109 |
+
v = np.reshape(v, newshape=(n_rows, n_cols, t, h, w, c))
|
| 110 |
+
v = np.transpose(v, axes=(2, 5, 0, 3, 1, 4))
|
| 111 |
+
v = np.reshape(v, newshape=(t, c, n_rows * h, n_cols * w))
|
| 112 |
+
|
| 113 |
+
return v
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def get_wandb_video(renders=None, n_cols=None, fps=15):
|
| 117 |
+
"""Return a Weights & Biases video.
|
| 118 |
+
|
| 119 |
+
It takes a list of videos and reshapes them into a single video with the specified number of columns.
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
renders: List of videos. Each video should be a numpy array of shape (t, h, w, c).
|
| 123 |
+
n_cols: Number of columns for the reshaped video. If None, it is set to the square root of the number of videos.
|
| 124 |
+
"""
|
| 125 |
+
# Pad videos to the same length.
|
| 126 |
+
max_length = max([len(render) for render in renders])
|
| 127 |
+
for i, render in enumerate(renders):
|
| 128 |
+
assert render.dtype == np.uint8
|
| 129 |
+
|
| 130 |
+
# Decrease brightness of the padded frames.
|
| 131 |
+
final_frame = render[-1]
|
| 132 |
+
final_image = Image.fromarray(final_frame)
|
| 133 |
+
enhancer = ImageEnhance.Brightness(final_image)
|
| 134 |
+
final_image = enhancer.enhance(0.5)
|
| 135 |
+
final_frame = np.array(final_image)
|
| 136 |
+
|
| 137 |
+
pad = np.repeat(final_frame[np.newaxis, ...], max_length - len(render), axis=0)
|
| 138 |
+
renders[i] = np.concatenate([render, pad], axis=0)
|
| 139 |
+
|
| 140 |
+
# Add borders.
|
| 141 |
+
renders[i] = np.pad(renders[i], ((0, 0), (1, 1), (1, 1), (0, 0)), mode='constant', constant_values=0)
|
| 142 |
+
renders = np.array(renders) # (n, t, h, w, c)
|
| 143 |
+
|
| 144 |
+
renders = reshape_video(renders, n_cols) # (t, c, nr * h, nc * w)
|
| 145 |
+
|
| 146 |
+
return wandb.Video(renders, fps=fps, format='mp4')
|
impls/utils/networks.py
ADDED
|
@@ -0,0 +1,517 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Optional, Sequence
|
| 2 |
+
|
| 3 |
+
import distrax
|
| 4 |
+
import flax
|
| 5 |
+
import flax.linen as nn
|
| 6 |
+
import jax
|
| 7 |
+
import jax.numpy as jnp
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def default_init(scale=1.0):
|
| 11 |
+
"""Default kernel initializer."""
|
| 12 |
+
return nn.initializers.variance_scaling(scale, 'fan_avg', 'uniform')
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def ensemblize(cls, num_qs, out_axes=0, **kwargs):
|
| 16 |
+
"""Ensemblize a module."""
|
| 17 |
+
return nn.vmap(
|
| 18 |
+
cls,
|
| 19 |
+
variable_axes={'params': 0},
|
| 20 |
+
split_rngs={'params': True},
|
| 21 |
+
in_axes=None,
|
| 22 |
+
out_axes=out_axes,
|
| 23 |
+
axis_size=num_qs,
|
| 24 |
+
**kwargs,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class Identity(nn.Module):
|
| 29 |
+
"""Identity layer."""
|
| 30 |
+
|
| 31 |
+
def __call__(self, x):
|
| 32 |
+
return x
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class MLP(nn.Module):
|
| 36 |
+
"""Multi-layer perceptron.
|
| 37 |
+
|
| 38 |
+
Attributes:
|
| 39 |
+
hidden_dims: Hidden layer dimensions.
|
| 40 |
+
activations: Activation function.
|
| 41 |
+
activate_final: Whether to apply activation to the final layer.
|
| 42 |
+
kernel_init: Kernel initializer.
|
| 43 |
+
layer_norm: Whether to apply layer normalization.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
hidden_dims: Sequence[int]
|
| 47 |
+
activations: Any = nn.gelu
|
| 48 |
+
activate_final: bool = False
|
| 49 |
+
kernel_init: Any = default_init()
|
| 50 |
+
layer_norm: bool = False
|
| 51 |
+
|
| 52 |
+
@nn.compact
|
| 53 |
+
def __call__(self, x):
|
| 54 |
+
for i, size in enumerate(self.hidden_dims):
|
| 55 |
+
x = nn.Dense(size, kernel_init=self.kernel_init)(x)
|
| 56 |
+
if i + 1 < len(self.hidden_dims) or self.activate_final:
|
| 57 |
+
x = self.activations(x)
|
| 58 |
+
if self.layer_norm:
|
| 59 |
+
x = nn.LayerNorm()(x)
|
| 60 |
+
return x
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class LengthNormalize(nn.Module):
|
| 64 |
+
"""Length normalization layer.
|
| 65 |
+
|
| 66 |
+
It normalizes the input along the last dimension to have a length of sqrt(dim).
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
@nn.compact
|
| 70 |
+
def __call__(self, x):
|
| 71 |
+
return x / jnp.linalg.norm(x, axis=-1, keepdims=True) * jnp.sqrt(x.shape[-1])
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class Param(nn.Module):
|
| 75 |
+
"""Scalar parameter module."""
|
| 76 |
+
|
| 77 |
+
init_value: float = 0.0
|
| 78 |
+
|
| 79 |
+
@nn.compact
|
| 80 |
+
def __call__(self):
|
| 81 |
+
return self.param('value', init_fn=lambda key: jnp.full((), self.init_value))
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class LogParam(nn.Module):
|
| 85 |
+
"""Scalar parameter module with log scale."""
|
| 86 |
+
|
| 87 |
+
init_value: float = 1.0
|
| 88 |
+
|
| 89 |
+
@nn.compact
|
| 90 |
+
def __call__(self):
|
| 91 |
+
log_value = self.param('log_value', init_fn=lambda key: jnp.full((), jnp.log(self.init_value)))
|
| 92 |
+
return jnp.exp(log_value)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class TransformedWithMode(distrax.Transformed):
|
| 96 |
+
"""Transformed distribution with mode calculation."""
|
| 97 |
+
|
| 98 |
+
def mode(self):
|
| 99 |
+
return self.bijector.forward(self.distribution.mode())
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class RunningMeanStd(flax.struct.PyTreeNode):
|
| 103 |
+
"""Running mean and standard deviation.
|
| 104 |
+
|
| 105 |
+
Attributes:
|
| 106 |
+
eps: Epsilon value to avoid division by zero.
|
| 107 |
+
mean: Running mean.
|
| 108 |
+
var: Running variance.
|
| 109 |
+
clip_max: Clip value after normalization.
|
| 110 |
+
count: Number of samples.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
eps: Any = 1e-6
|
| 114 |
+
mean: Any = 1.0
|
| 115 |
+
var: Any = 1.0
|
| 116 |
+
clip_max: Any = 10.0
|
| 117 |
+
count: int = 0
|
| 118 |
+
|
| 119 |
+
def normalize(self, batch):
|
| 120 |
+
batch = (batch - self.mean) / jnp.sqrt(self.var + self.eps)
|
| 121 |
+
batch = jnp.clip(batch, -self.clip_max, self.clip_max)
|
| 122 |
+
return batch
|
| 123 |
+
|
| 124 |
+
def unnormalize(self, batch):
|
| 125 |
+
return batch * jnp.sqrt(self.var + self.eps) + self.mean
|
| 126 |
+
|
| 127 |
+
def update(self, batch):
|
| 128 |
+
batch_mean, batch_var = jnp.mean(batch, axis=0), jnp.var(batch, axis=0)
|
| 129 |
+
batch_count = len(batch)
|
| 130 |
+
|
| 131 |
+
delta = batch_mean - self.mean
|
| 132 |
+
total_count = self.count + batch_count
|
| 133 |
+
|
| 134 |
+
new_mean = self.mean + delta * batch_count / total_count
|
| 135 |
+
m_a = self.var * self.count
|
| 136 |
+
m_b = batch_var * batch_count
|
| 137 |
+
m_2 = m_a + m_b + delta**2 * self.count * batch_count / total_count
|
| 138 |
+
new_var = m_2 / total_count
|
| 139 |
+
|
| 140 |
+
return self.replace(mean=new_mean, var=new_var, count=total_count)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class GCActor(nn.Module):
|
| 144 |
+
"""Goal-conditioned actor.
|
| 145 |
+
|
| 146 |
+
Attributes:
|
| 147 |
+
hidden_dims: Hidden layer dimensions.
|
| 148 |
+
action_dim: Action dimension.
|
| 149 |
+
log_std_min: Minimum value of log standard deviation.
|
| 150 |
+
log_std_max: Maximum value of log standard deviation.
|
| 151 |
+
tanh_squash: Whether to squash the action with tanh.
|
| 152 |
+
state_dependent_std: Whether to use state-dependent standard deviation.
|
| 153 |
+
const_std: Whether to use constant standard deviation.
|
| 154 |
+
final_fc_init_scale: Initial scale of the final fully-connected layer.
|
| 155 |
+
gc_encoder: Optional GCEncoder module to encode the inputs.
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
hidden_dims: Sequence[int]
|
| 159 |
+
action_dim: int
|
| 160 |
+
log_std_min: Optional[float] = -5
|
| 161 |
+
log_std_max: Optional[float] = 2
|
| 162 |
+
tanh_squash: bool = False
|
| 163 |
+
state_dependent_std: bool = False
|
| 164 |
+
const_std: bool = True
|
| 165 |
+
final_fc_init_scale: float = 1e-2
|
| 166 |
+
gc_encoder: nn.Module = None
|
| 167 |
+
|
| 168 |
+
def setup(self):
|
| 169 |
+
self.actor_net = MLP(self.hidden_dims, activate_final=True)
|
| 170 |
+
self.mean_net = nn.Dense(self.action_dim, kernel_init=default_init(self.final_fc_init_scale))
|
| 171 |
+
if self.state_dependent_std:
|
| 172 |
+
self.log_std_net = nn.Dense(self.action_dim, kernel_init=default_init(self.final_fc_init_scale))
|
| 173 |
+
else:
|
| 174 |
+
if not self.const_std:
|
| 175 |
+
self.log_stds = self.param('log_stds', nn.initializers.zeros, (self.action_dim,))
|
| 176 |
+
|
| 177 |
+
def __call__(
|
| 178 |
+
self,
|
| 179 |
+
observations,
|
| 180 |
+
goals=None,
|
| 181 |
+
goal_encoded=False,
|
| 182 |
+
temperature=1.0,
|
| 183 |
+
):
|
| 184 |
+
"""Return the action distribution.
|
| 185 |
+
|
| 186 |
+
Args:
|
| 187 |
+
observations: Observations.
|
| 188 |
+
goals: Goals (optional).
|
| 189 |
+
goal_encoded: Whether the goals are already encoded.
|
| 190 |
+
temperature: Scaling factor for the standard deviation.
|
| 191 |
+
"""
|
| 192 |
+
if self.gc_encoder is not None:
|
| 193 |
+
inputs = self.gc_encoder(observations, goals, goal_encoded=goal_encoded)
|
| 194 |
+
else:
|
| 195 |
+
inputs = [observations]
|
| 196 |
+
if goals is not None:
|
| 197 |
+
inputs.append(goals)
|
| 198 |
+
inputs = jnp.concatenate(inputs, axis=-1)
|
| 199 |
+
outputs = self.actor_net(inputs)
|
| 200 |
+
|
| 201 |
+
means = self.mean_net(outputs)
|
| 202 |
+
if self.state_dependent_std:
|
| 203 |
+
log_stds = self.log_std_net(outputs)
|
| 204 |
+
else:
|
| 205 |
+
if self.const_std:
|
| 206 |
+
log_stds = jnp.zeros_like(means)
|
| 207 |
+
else:
|
| 208 |
+
log_stds = self.log_stds
|
| 209 |
+
|
| 210 |
+
log_stds = jnp.clip(log_stds, self.log_std_min, self.log_std_max)
|
| 211 |
+
|
| 212 |
+
distribution = distrax.MultivariateNormalDiag(loc=means, scale_diag=jnp.exp(log_stds) * temperature)
|
| 213 |
+
if self.tanh_squash:
|
| 214 |
+
distribution = TransformedWithMode(distribution, distrax.Block(distrax.Tanh(), ndims=1))
|
| 215 |
+
|
| 216 |
+
return distribution
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class GCDiscreteActor(nn.Module):
|
| 220 |
+
"""Goal-conditioned actor for discrete actions.
|
| 221 |
+
|
| 222 |
+
Attributes:
|
| 223 |
+
hidden_dims: Hidden layer dimensions.
|
| 224 |
+
action_dim: Action dimension.
|
| 225 |
+
final_fc_init_scale: Initial scale of the final fully-connected layer.
|
| 226 |
+
gc_encoder: Optional GCEncoder module to encode the inputs.
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
hidden_dims: Sequence[int]
|
| 230 |
+
action_dim: int
|
| 231 |
+
final_fc_init_scale: float = 1e-2
|
| 232 |
+
gc_encoder: nn.Module = None
|
| 233 |
+
|
| 234 |
+
def setup(self):
|
| 235 |
+
self.actor_net = MLP(self.hidden_dims, activate_final=True)
|
| 236 |
+
self.logit_net = nn.Dense(self.action_dim, kernel_init=default_init(self.final_fc_init_scale))
|
| 237 |
+
|
| 238 |
+
def __call__(
|
| 239 |
+
self,
|
| 240 |
+
observations,
|
| 241 |
+
goals=None,
|
| 242 |
+
goal_encoded=False,
|
| 243 |
+
temperature=1.0,
|
| 244 |
+
):
|
| 245 |
+
"""Return the action distribution.
|
| 246 |
+
|
| 247 |
+
Args:
|
| 248 |
+
observations: Observations.
|
| 249 |
+
goals: Goals (optional).
|
| 250 |
+
goal_encoded: Whether the goals are already encoded.
|
| 251 |
+
temperature: Inverse scaling factor for the logits (set to 0 to get the argmax).
|
| 252 |
+
"""
|
| 253 |
+
if self.gc_encoder is not None:
|
| 254 |
+
inputs = self.gc_encoder(observations, goals, goal_encoded=goal_encoded)
|
| 255 |
+
else:
|
| 256 |
+
inputs = [observations]
|
| 257 |
+
if goals is not None:
|
| 258 |
+
inputs.append(goals)
|
| 259 |
+
inputs = jnp.concatenate(inputs, axis=-1)
|
| 260 |
+
outputs = self.actor_net(inputs)
|
| 261 |
+
|
| 262 |
+
logits = self.logit_net(outputs)
|
| 263 |
+
|
| 264 |
+
distribution = distrax.Categorical(logits=logits / jnp.maximum(1e-6, temperature))
|
| 265 |
+
|
| 266 |
+
return distribution
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class GCValue(nn.Module):
|
| 270 |
+
"""Goal-conditioned value/critic function.
|
| 271 |
+
|
| 272 |
+
This module can be used for both value V(s, g) and critic Q(s, a, g) functions.
|
| 273 |
+
|
| 274 |
+
Attributes:
|
| 275 |
+
hidden_dims: Hidden layer dimensions.
|
| 276 |
+
layer_norm: Whether to apply layer normalization.
|
| 277 |
+
ensemble: Whether to ensemble the value function.
|
| 278 |
+
gc_encoder: Optional GCEncoder module to encode the inputs.
|
| 279 |
+
"""
|
| 280 |
+
|
| 281 |
+
hidden_dims: Sequence[int]
|
| 282 |
+
layer_norm: bool = True
|
| 283 |
+
ensemble: bool = True
|
| 284 |
+
gc_encoder: nn.Module = None
|
| 285 |
+
|
| 286 |
+
def setup(self):
|
| 287 |
+
mlp_module = MLP
|
| 288 |
+
if self.ensemble:
|
| 289 |
+
mlp_module = ensemblize(mlp_module, 2)
|
| 290 |
+
value_net = mlp_module((*self.hidden_dims, 1), activate_final=False, layer_norm=self.layer_norm)
|
| 291 |
+
|
| 292 |
+
self.value_net = value_net
|
| 293 |
+
|
| 294 |
+
def __call__(self, observations, goals=None, actions=None):
|
| 295 |
+
"""Return the value/critic function.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
observations: Observations.
|
| 299 |
+
goals: Goals (optional).
|
| 300 |
+
actions: Actions (optional).
|
| 301 |
+
"""
|
| 302 |
+
if self.gc_encoder is not None:
|
| 303 |
+
inputs = [self.gc_encoder(observations, goals)]
|
| 304 |
+
else:
|
| 305 |
+
inputs = [observations]
|
| 306 |
+
if goals is not None:
|
| 307 |
+
inputs.append(goals)
|
| 308 |
+
if actions is not None:
|
| 309 |
+
inputs.append(actions)
|
| 310 |
+
inputs = jnp.concatenate(inputs, axis=-1)
|
| 311 |
+
|
| 312 |
+
v = self.value_net(inputs).squeeze(-1)
|
| 313 |
+
|
| 314 |
+
return v
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
class GCDiscreteCritic(GCValue):
|
| 318 |
+
"""Goal-conditioned critic for discrete actions."""
|
| 319 |
+
|
| 320 |
+
action_dim: int = None
|
| 321 |
+
|
| 322 |
+
def __call__(self, observations, goals=None, actions=None):
|
| 323 |
+
actions = jnp.eye(self.action_dim)[actions]
|
| 324 |
+
return super().__call__(observations, goals, actions)
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
class GCBilinearValue(nn.Module):
|
| 328 |
+
"""Goal-conditioned bilinear value/critic function.
|
| 329 |
+
|
| 330 |
+
This module computes the value function as V(s, g) = phi(s)^T psi(g) / sqrt(d) or the critic function as
|
| 331 |
+
Q(s, a, g) = phi(s, a)^T psi(g) / sqrt(d), where phi and psi output d-dimensional vectors.
|
| 332 |
+
|
| 333 |
+
Attributes:
|
| 334 |
+
hidden_dims: Hidden layer dimensions.
|
| 335 |
+
latent_dim: Latent dimension.
|
| 336 |
+
layer_norm: Whether to apply layer normalization.
|
| 337 |
+
ensemble: Whether to ensemble the value function.
|
| 338 |
+
value_exp: Whether to exponentiate the value. Useful for contrastive learning.
|
| 339 |
+
state_encoder: Optional state encoder.
|
| 340 |
+
goal_encoder: Optional goal encoder.
|
| 341 |
+
"""
|
| 342 |
+
|
| 343 |
+
hidden_dims: Sequence[int]
|
| 344 |
+
latent_dim: int
|
| 345 |
+
layer_norm: bool = True
|
| 346 |
+
ensemble: bool = True
|
| 347 |
+
value_exp: bool = False
|
| 348 |
+
state_encoder: nn.Module = None
|
| 349 |
+
goal_encoder: nn.Module = None
|
| 350 |
+
|
| 351 |
+
def setup(self) -> None:
|
| 352 |
+
mlp_module = MLP
|
| 353 |
+
if self.ensemble:
|
| 354 |
+
mlp_module = ensemblize(mlp_module, 2)
|
| 355 |
+
|
| 356 |
+
self.phi = mlp_module((*self.hidden_dims, self.latent_dim), activate_final=False, layer_norm=self.layer_norm)
|
| 357 |
+
self.psi = mlp_module((*self.hidden_dims, self.latent_dim), activate_final=False, layer_norm=self.layer_norm)
|
| 358 |
+
|
| 359 |
+
def __call__(self, observations, goals, actions=None, info=False):
|
| 360 |
+
"""Return the value/critic function.
|
| 361 |
+
|
| 362 |
+
Args:
|
| 363 |
+
observations: Observations.
|
| 364 |
+
goals: Goals.
|
| 365 |
+
actions: Actions (optional).
|
| 366 |
+
info: Whether to additionally return the representations phi and psi.
|
| 367 |
+
"""
|
| 368 |
+
if self.state_encoder is not None:
|
| 369 |
+
observations = self.state_encoder(observations)
|
| 370 |
+
if self.goal_encoder is not None:
|
| 371 |
+
goals = self.goal_encoder(goals)
|
| 372 |
+
|
| 373 |
+
if actions is None:
|
| 374 |
+
phi_inputs = observations
|
| 375 |
+
else:
|
| 376 |
+
phi_inputs = jnp.concatenate([observations, actions], axis=-1)
|
| 377 |
+
|
| 378 |
+
phi = self.phi(phi_inputs)
|
| 379 |
+
psi = self.psi(goals)
|
| 380 |
+
|
| 381 |
+
v = (phi * psi / jnp.sqrt(self.latent_dim)).sum(axis=-1)
|
| 382 |
+
|
| 383 |
+
if self.value_exp:
|
| 384 |
+
v = jnp.exp(v)
|
| 385 |
+
|
| 386 |
+
if info:
|
| 387 |
+
return v, phi, psi
|
| 388 |
+
else:
|
| 389 |
+
return v
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
class GCDiscreteBilinearCritic(GCBilinearValue):
|
| 393 |
+
"""Goal-conditioned bilinear critic for discrete actions."""
|
| 394 |
+
|
| 395 |
+
action_dim: int = None
|
| 396 |
+
|
| 397 |
+
def __call__(self, observations, goals=None, actions=None, info=False):
|
| 398 |
+
actions = jnp.eye(self.action_dim)[actions]
|
| 399 |
+
return super().__call__(observations, goals, actions, info)
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
class GCMRNValue(nn.Module):
|
| 403 |
+
"""Metric residual network (MRN) value function.
|
| 404 |
+
|
| 405 |
+
This module computes the value function as the sum of a symmetric Euclidean distance and an asymmetric
|
| 406 |
+
L^infinity-based quasimetric.
|
| 407 |
+
|
| 408 |
+
Attributes:
|
| 409 |
+
hidden_dims: Hidden layer dimensions.
|
| 410 |
+
latent_dim: Latent dimension.
|
| 411 |
+
layer_norm: Whether to apply layer normalization.
|
| 412 |
+
encoder: Optional state/goal encoder.
|
| 413 |
+
"""
|
| 414 |
+
|
| 415 |
+
hidden_dims: Sequence[int]
|
| 416 |
+
latent_dim: int
|
| 417 |
+
layer_norm: bool = True
|
| 418 |
+
encoder: nn.Module = None
|
| 419 |
+
|
| 420 |
+
def setup(self) -> None:
|
| 421 |
+
self.phi = MLP((*self.hidden_dims, self.latent_dim), activate_final=False, layer_norm=self.layer_norm)
|
| 422 |
+
|
| 423 |
+
def __call__(self, observations, goals, is_phi=False, info=False):
|
| 424 |
+
"""Return the MRN value function.
|
| 425 |
+
|
| 426 |
+
Args:
|
| 427 |
+
observations: Observations.
|
| 428 |
+
goals: Goals.
|
| 429 |
+
is_phi: Whether the inputs are already encoded by phi.
|
| 430 |
+
info: Whether to additionally return the representations phi_s and phi_g.
|
| 431 |
+
"""
|
| 432 |
+
if is_phi:
|
| 433 |
+
phi_s = observations
|
| 434 |
+
phi_g = goals
|
| 435 |
+
else:
|
| 436 |
+
if self.encoder is not None:
|
| 437 |
+
observations = self.encoder(observations)
|
| 438 |
+
goals = self.encoder(goals)
|
| 439 |
+
phi_s = self.phi(observations)
|
| 440 |
+
phi_g = self.phi(goals)
|
| 441 |
+
|
| 442 |
+
sym_s = phi_s[..., : self.latent_dim // 2]
|
| 443 |
+
sym_g = phi_g[..., : self.latent_dim // 2]
|
| 444 |
+
asym_s = phi_s[..., self.latent_dim // 2 :]
|
| 445 |
+
asym_g = phi_g[..., self.latent_dim // 2 :]
|
| 446 |
+
squared_dist = ((sym_s - sym_g) ** 2).sum(axis=-1)
|
| 447 |
+
quasi = jax.nn.relu((asym_s - asym_g).max(axis=-1))
|
| 448 |
+
v = jnp.sqrt(jnp.maximum(squared_dist, 1e-12)) + quasi
|
| 449 |
+
|
| 450 |
+
if info:
|
| 451 |
+
return v, phi_s, phi_g
|
| 452 |
+
else:
|
| 453 |
+
return v
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
class GCIQEValue(nn.Module):
|
| 457 |
+
"""Interval quasimetric embedding (IQE) value function.
|
| 458 |
+
|
| 459 |
+
This module computes the value function as an IQE-based quasimetric.
|
| 460 |
+
|
| 461 |
+
Attributes:
|
| 462 |
+
hidden_dims: Hidden layer dimensions.
|
| 463 |
+
latent_dim: Latent dimension.
|
| 464 |
+
dim_per_component: Dimension of each component in IQE (i.e., number of intervals in each group).
|
| 465 |
+
layer_norm: Whether to apply layer normalization.
|
| 466 |
+
encoder: Optional state/goal encoder.
|
| 467 |
+
"""
|
| 468 |
+
|
| 469 |
+
hidden_dims: Sequence[int]
|
| 470 |
+
latent_dim: int
|
| 471 |
+
dim_per_component: int
|
| 472 |
+
layer_norm: bool = True
|
| 473 |
+
encoder: nn.Module = None
|
| 474 |
+
|
| 475 |
+
def setup(self) -> None:
|
| 476 |
+
self.phi = MLP((*self.hidden_dims, self.latent_dim), activate_final=False, layer_norm=self.layer_norm)
|
| 477 |
+
self.alpha = Param()
|
| 478 |
+
|
| 479 |
+
def __call__(self, observations, goals, is_phi=False, info=False):
|
| 480 |
+
"""Return the IQE value function.
|
| 481 |
+
|
| 482 |
+
Args:
|
| 483 |
+
observations: Observations.
|
| 484 |
+
goals: Goals.
|
| 485 |
+
is_phi: Whether the inputs are already encoded by phi.
|
| 486 |
+
info: Whether to additionally return the representations phi_s and phi_g.
|
| 487 |
+
"""
|
| 488 |
+
alpha = jax.nn.sigmoid(self.alpha())
|
| 489 |
+
if is_phi:
|
| 490 |
+
phi_s = observations
|
| 491 |
+
phi_g = goals
|
| 492 |
+
else:
|
| 493 |
+
if self.encoder is not None:
|
| 494 |
+
observations = self.encoder(observations)
|
| 495 |
+
goals = self.encoder(goals)
|
| 496 |
+
phi_s = self.phi(observations)
|
| 497 |
+
phi_g = self.phi(goals)
|
| 498 |
+
|
| 499 |
+
x = jnp.reshape(phi_s, (*phi_s.shape[:-1], -1, self.dim_per_component))
|
| 500 |
+
y = jnp.reshape(phi_g, (*phi_g.shape[:-1], -1, self.dim_per_component))
|
| 501 |
+
valid = x < y
|
| 502 |
+
xy = jnp.concatenate(jnp.broadcast_arrays(x, y), axis=-1)
|
| 503 |
+
ixy = xy.argsort(axis=-1)
|
| 504 |
+
sxy = jnp.take_along_axis(xy, ixy, axis=-1)
|
| 505 |
+
neg_inc_copies = jnp.take_along_axis(valid, ixy % self.dim_per_component, axis=-1) * jnp.where(
|
| 506 |
+
ixy < self.dim_per_component, -1, 1
|
| 507 |
+
)
|
| 508 |
+
neg_inp_copies = jnp.cumsum(neg_inc_copies, axis=-1)
|
| 509 |
+
neg_f = -1.0 * (neg_inp_copies < 0)
|
| 510 |
+
neg_incf = jnp.concatenate([neg_f[..., :1], neg_f[..., 1:] - neg_f[..., :-1]], axis=-1)
|
| 511 |
+
components = (sxy * neg_incf).sum(axis=-1)
|
| 512 |
+
v = alpha * components.mean(axis=-1) + (1 - alpha) * components.max(axis=-1)
|
| 513 |
+
|
| 514 |
+
if info:
|
| 515 |
+
return v, phi_s, phi_g
|
| 516 |
+
else:
|
| 517 |
+
return v
|
ogbench/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OGBench: Benchmarking Offline Goal-Conditioned RL"""
|
| 2 |
+
|
| 3 |
+
import ogbench.locomaze
|
| 4 |
+
import ogbench.manipspace
|
| 5 |
+
import ogbench.powderworld
|
| 6 |
+
from ogbench.utils import download_datasets, load_dataset, make_env_and_datasets
|
| 7 |
+
|
| 8 |
+
__all__ = (
|
| 9 |
+
'locomaze',
|
| 10 |
+
'manipspace',
|
| 11 |
+
'powderworld',
|
| 12 |
+
'download_datasets',
|
| 13 |
+
'load_dataset',
|
| 14 |
+
'make_env_and_datasets',
|
| 15 |
+
)
|
ogbench/locomaze/__init__.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gymnasium.envs.registration import register
|
| 2 |
+
|
| 3 |
+
visual_dict = dict(
|
| 4 |
+
ob_type='pixels',
|
| 5 |
+
render_mode='rgb_array',
|
| 6 |
+
width=64,
|
| 7 |
+
height=64,
|
| 8 |
+
camera_name='back',
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
register(
|
| 12 |
+
id='pointmaze-medium-v0',
|
| 13 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 14 |
+
max_episode_steps=1000,
|
| 15 |
+
kwargs=dict(
|
| 16 |
+
loco_env_type='point',
|
| 17 |
+
maze_env_type='maze',
|
| 18 |
+
maze_type='medium',
|
| 19 |
+
),
|
| 20 |
+
)
|
| 21 |
+
register(
|
| 22 |
+
id='pointmaze-large-v0',
|
| 23 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 24 |
+
max_episode_steps=1000,
|
| 25 |
+
kwargs=dict(
|
| 26 |
+
loco_env_type='point',
|
| 27 |
+
maze_env_type='maze',
|
| 28 |
+
maze_type='large',
|
| 29 |
+
),
|
| 30 |
+
)
|
| 31 |
+
register(
|
| 32 |
+
id='pointmaze-giant-v0',
|
| 33 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 34 |
+
max_episode_steps=1000,
|
| 35 |
+
kwargs=dict(
|
| 36 |
+
loco_env_type='point',
|
| 37 |
+
maze_env_type='maze',
|
| 38 |
+
maze_type='giant',
|
| 39 |
+
),
|
| 40 |
+
)
|
| 41 |
+
register(
|
| 42 |
+
id='pointmaze-teleport-v0',
|
| 43 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 44 |
+
max_episode_steps=1000,
|
| 45 |
+
kwargs=dict(
|
| 46 |
+
loco_env_type='point',
|
| 47 |
+
maze_env_type='maze',
|
| 48 |
+
maze_type='teleport',
|
| 49 |
+
),
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
register(
|
| 53 |
+
id='antmaze-medium-v0',
|
| 54 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 55 |
+
max_episode_steps=1000,
|
| 56 |
+
kwargs=dict(
|
| 57 |
+
loco_env_type='ant',
|
| 58 |
+
maze_env_type='maze',
|
| 59 |
+
maze_type='medium',
|
| 60 |
+
),
|
| 61 |
+
)
|
| 62 |
+
register(
|
| 63 |
+
id='visual-antmaze-medium-v0',
|
| 64 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 65 |
+
max_episode_steps=1000,
|
| 66 |
+
kwargs=dict(
|
| 67 |
+
loco_env_type='ant',
|
| 68 |
+
maze_env_type='maze',
|
| 69 |
+
maze_type='medium',
|
| 70 |
+
**visual_dict,
|
| 71 |
+
),
|
| 72 |
+
)
|
| 73 |
+
register(
|
| 74 |
+
id='antmaze-large-v0',
|
| 75 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 76 |
+
max_episode_steps=1000,
|
| 77 |
+
kwargs=dict(
|
| 78 |
+
loco_env_type='ant',
|
| 79 |
+
maze_env_type='maze',
|
| 80 |
+
maze_type='large',
|
| 81 |
+
),
|
| 82 |
+
)
|
| 83 |
+
register(
|
| 84 |
+
id='visual-antmaze-large-v0',
|
| 85 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 86 |
+
max_episode_steps=1000,
|
| 87 |
+
kwargs=dict(
|
| 88 |
+
loco_env_type='ant',
|
| 89 |
+
maze_env_type='maze',
|
| 90 |
+
maze_type='large',
|
| 91 |
+
**visual_dict,
|
| 92 |
+
),
|
| 93 |
+
)
|
| 94 |
+
register(
|
| 95 |
+
id='antmaze-giant-v0',
|
| 96 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 97 |
+
max_episode_steps=1000,
|
| 98 |
+
kwargs=dict(
|
| 99 |
+
loco_env_type='ant',
|
| 100 |
+
maze_env_type='maze',
|
| 101 |
+
maze_type='giant',
|
| 102 |
+
),
|
| 103 |
+
)
|
| 104 |
+
register(
|
| 105 |
+
id='visual-antmaze-giant-v0',
|
| 106 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 107 |
+
max_episode_steps=1000,
|
| 108 |
+
kwargs=dict(
|
| 109 |
+
loco_env_type='ant',
|
| 110 |
+
maze_env_type='maze',
|
| 111 |
+
maze_type='giant',
|
| 112 |
+
**visual_dict,
|
| 113 |
+
),
|
| 114 |
+
)
|
| 115 |
+
register(
|
| 116 |
+
id='antmaze-teleport-v0',
|
| 117 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 118 |
+
max_episode_steps=1000,
|
| 119 |
+
kwargs=dict(
|
| 120 |
+
loco_env_type='ant',
|
| 121 |
+
maze_env_type='maze',
|
| 122 |
+
maze_type='teleport',
|
| 123 |
+
),
|
| 124 |
+
)
|
| 125 |
+
register(
|
| 126 |
+
id='visual-antmaze-teleport-v0',
|
| 127 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 128 |
+
max_episode_steps=1000,
|
| 129 |
+
kwargs=dict(
|
| 130 |
+
loco_env_type='ant',
|
| 131 |
+
maze_env_type='maze',
|
| 132 |
+
maze_type='teleport',
|
| 133 |
+
**visual_dict,
|
| 134 |
+
),
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
register(
|
| 138 |
+
id='antsoccer-arena-v0',
|
| 139 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 140 |
+
max_episode_steps=1000,
|
| 141 |
+
kwargs=dict(
|
| 142 |
+
loco_env_type='ant',
|
| 143 |
+
maze_env_type='ball',
|
| 144 |
+
maze_type='arena',
|
| 145 |
+
),
|
| 146 |
+
)
|
| 147 |
+
register(
|
| 148 |
+
id='antsoccer-medium-v0',
|
| 149 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 150 |
+
max_episode_steps=1000,
|
| 151 |
+
kwargs=dict(
|
| 152 |
+
loco_env_type='ant',
|
| 153 |
+
maze_env_type='ball',
|
| 154 |
+
maze_type='medium',
|
| 155 |
+
),
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
register(
|
| 159 |
+
id='humanoidmaze-medium-v0',
|
| 160 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 161 |
+
max_episode_steps=2000,
|
| 162 |
+
kwargs=dict(
|
| 163 |
+
loco_env_type='humanoid',
|
| 164 |
+
maze_env_type='maze',
|
| 165 |
+
maze_type='medium',
|
| 166 |
+
),
|
| 167 |
+
)
|
| 168 |
+
register(
|
| 169 |
+
id='visual-humanoidmaze-medium-v0',
|
| 170 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 171 |
+
max_episode_steps=2000,
|
| 172 |
+
kwargs=dict(
|
| 173 |
+
loco_env_type='humanoid',
|
| 174 |
+
maze_env_type='maze',
|
| 175 |
+
maze_type='medium',
|
| 176 |
+
**visual_dict,
|
| 177 |
+
),
|
| 178 |
+
)
|
| 179 |
+
register(
|
| 180 |
+
id='humanoidmaze-large-v0',
|
| 181 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 182 |
+
max_episode_steps=2000,
|
| 183 |
+
kwargs=dict(
|
| 184 |
+
loco_env_type='humanoid',
|
| 185 |
+
maze_env_type='maze',
|
| 186 |
+
maze_type='large',
|
| 187 |
+
),
|
| 188 |
+
)
|
| 189 |
+
register(
|
| 190 |
+
id='visual-humanoidmaze-large-v0',
|
| 191 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 192 |
+
max_episode_steps=2000,
|
| 193 |
+
kwargs=dict(
|
| 194 |
+
loco_env_type='humanoid',
|
| 195 |
+
maze_env_type='maze',
|
| 196 |
+
maze_type='large',
|
| 197 |
+
**visual_dict,
|
| 198 |
+
),
|
| 199 |
+
)
|
| 200 |
+
register(
|
| 201 |
+
id='humanoidmaze-giant-v0',
|
| 202 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 203 |
+
max_episode_steps=4000,
|
| 204 |
+
kwargs=dict(
|
| 205 |
+
loco_env_type='humanoid',
|
| 206 |
+
maze_env_type='maze',
|
| 207 |
+
maze_type='giant',
|
| 208 |
+
),
|
| 209 |
+
)
|
| 210 |
+
register(
|
| 211 |
+
id='visual-humanoidmaze-giant-v0',
|
| 212 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 213 |
+
max_episode_steps=4000,
|
| 214 |
+
kwargs=dict(
|
| 215 |
+
loco_env_type='humanoid',
|
| 216 |
+
maze_env_type='maze',
|
| 217 |
+
maze_type='giant',
|
| 218 |
+
**visual_dict,
|
| 219 |
+
),
|
| 220 |
+
)
|
| 221 |
+
register(
|
| 222 |
+
id='humanoidmaze-teleport-v0',
|
| 223 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 224 |
+
max_episode_steps=2000,
|
| 225 |
+
kwargs=dict(
|
| 226 |
+
loco_env_type='humanoid',
|
| 227 |
+
maze_env_type='maze',
|
| 228 |
+
maze_type='teleport',
|
| 229 |
+
),
|
| 230 |
+
)
|
| 231 |
+
register(
|
| 232 |
+
id='visual-humanoidmaze-teleport-v0',
|
| 233 |
+
entry_point='ogbench.locomaze.maze:make_maze_env',
|
| 234 |
+
max_episode_steps=2000,
|
| 235 |
+
kwargs=dict(
|
| 236 |
+
loco_env_type='humanoid',
|
| 237 |
+
maze_env_type='maze',
|
| 238 |
+
maze_type='teleport',
|
| 239 |
+
**visual_dict,
|
| 240 |
+
),
|
| 241 |
+
)
|
ogbench/locomaze/ant.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from gymnasium import utils
|
| 5 |
+
from gymnasium.envs.mujoco import MujocoEnv
|
| 6 |
+
from gymnasium.spaces import Box
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class AntEnv(MujocoEnv, utils.EzPickle):
|
| 10 |
+
"""Gymnasium Ant environment.
|
| 11 |
+
|
| 12 |
+
Unlike the original Ant environment, this environment uses a restricted joint range for the actuators, as typically
|
| 13 |
+
done in previous works in hierarchical reinforcement learning. It also uses a control frequency of 10Hz instead of
|
| 14 |
+
20Hz, which is the default in the original environment.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
xml_file = os.path.join(os.path.dirname(__file__), 'assets', 'ant.xml')
|
| 18 |
+
metadata = {
|
| 19 |
+
'render_modes': ['human', 'rgb_array', 'depth_array'],
|
| 20 |
+
'render_fps': 10,
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
xml_file=None,
|
| 26 |
+
reset_noise_scale=0.1,
|
| 27 |
+
render_mode='rgb_array',
|
| 28 |
+
width=200,
|
| 29 |
+
height=200,
|
| 30 |
+
**kwargs,
|
| 31 |
+
):
|
| 32 |
+
"""Initialize the Ant environment.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
xml_file: Path to the XML description (optional).
|
| 36 |
+
reset_noise_scale: Scale of the noise added to the initial state during reset.
|
| 37 |
+
render_mode: Rendering mode.
|
| 38 |
+
width: Width of the rendered image.
|
| 39 |
+
height: Height of the rendered image.
|
| 40 |
+
**kwargs: Additional keyword arguments.
|
| 41 |
+
"""
|
| 42 |
+
if xml_file is None:
|
| 43 |
+
xml_file = self.xml_file
|
| 44 |
+
utils.EzPickle.__init__(
|
| 45 |
+
self,
|
| 46 |
+
xml_file,
|
| 47 |
+
reset_noise_scale,
|
| 48 |
+
**kwargs,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
self._reset_noise_scale = reset_noise_scale
|
| 52 |
+
|
| 53 |
+
observation_space = Box(low=-np.inf, high=np.inf, shape=(29,), dtype=np.float64)
|
| 54 |
+
|
| 55 |
+
MujocoEnv.__init__(
|
| 56 |
+
self,
|
| 57 |
+
xml_file,
|
| 58 |
+
frame_skip=5,
|
| 59 |
+
observation_space=observation_space,
|
| 60 |
+
render_mode=render_mode,
|
| 61 |
+
width=width,
|
| 62 |
+
height=height,
|
| 63 |
+
**kwargs,
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
def step(self, action):
|
| 67 |
+
prev_qpos = self.data.qpos.copy()
|
| 68 |
+
prev_qvel = self.data.qvel.copy()
|
| 69 |
+
|
| 70 |
+
self.do_simulation(action, self.frame_skip)
|
| 71 |
+
|
| 72 |
+
qpos = self.data.qpos.copy()
|
| 73 |
+
qvel = self.data.qvel.copy()
|
| 74 |
+
|
| 75 |
+
observation = self.get_ob()
|
| 76 |
+
|
| 77 |
+
if self.render_mode == 'human':
|
| 78 |
+
self.render()
|
| 79 |
+
|
| 80 |
+
return (
|
| 81 |
+
observation,
|
| 82 |
+
0.0,
|
| 83 |
+
False,
|
| 84 |
+
False,
|
| 85 |
+
{
|
| 86 |
+
'xy': self.get_xy(),
|
| 87 |
+
'prev_qpos': prev_qpos,
|
| 88 |
+
'prev_qvel': prev_qvel,
|
| 89 |
+
'qpos': qpos,
|
| 90 |
+
'qvel': qvel,
|
| 91 |
+
},
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
def get_ob(self):
|
| 95 |
+
position = self.data.qpos.flat.copy()
|
| 96 |
+
velocity = self.data.qvel.flat.copy()
|
| 97 |
+
|
| 98 |
+
return np.concatenate([position, velocity])
|
| 99 |
+
|
| 100 |
+
def reset_model(self):
|
| 101 |
+
noise_low = -self._reset_noise_scale
|
| 102 |
+
noise_high = self._reset_noise_scale
|
| 103 |
+
|
| 104 |
+
qpos = self.init_qpos + self.np_random.uniform(low=noise_low, high=noise_high, size=self.model.nq)
|
| 105 |
+
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.standard_normal(self.model.nv)
|
| 106 |
+
self.set_state(qpos, qvel)
|
| 107 |
+
|
| 108 |
+
observation = self.get_ob()
|
| 109 |
+
|
| 110 |
+
return observation
|
| 111 |
+
|
| 112 |
+
def get_xy(self):
|
| 113 |
+
return self.data.qpos[:2].copy()
|
| 114 |
+
|
| 115 |
+
def set_xy(self, xy):
|
| 116 |
+
qpos = self.data.qpos.copy()
|
| 117 |
+
qvel = self.data.qvel.copy()
|
| 118 |
+
qpos[:2] = xy
|
| 119 |
+
self.set_state(qpos, qvel)
|
ogbench/locomaze/assets/ant.xml
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mujoco model="ant">
|
| 2 |
+
<compiler inertiafromgeom="true" angle="degree" coordinate="local"/>
|
| 3 |
+
|
| 4 |
+
<option timestep="0.02" integrator="RK4"/>
|
| 5 |
+
|
| 6 |
+
<custom>
|
| 7 |
+
<numeric name="init_qpos" data="0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0"/>
|
| 8 |
+
</custom>
|
| 9 |
+
|
| 10 |
+
<default>
|
| 11 |
+
<joint limited="true" armature="1" damping="1"/>
|
| 12 |
+
<geom condim="3" conaffinity="0" margin="0.01" friction="1 0.5 0.5" solref=".02 1" solimp=".8 .8 .01" density="5.0" material="self"/>
|
| 13 |
+
</default>
|
| 14 |
+
|
| 15 |
+
<asset>
|
| 16 |
+
<texture type="skybox" builtin="gradient" width="100" height="100" rgb1="1 1 1" rgb2="0 0 0"/>
|
| 17 |
+
<texture name="grid" type="2d" builtin="checker" rgb1=".08 .11 .16" rgb2=".15 .18 .25" width="300" height="300"/>
|
| 18 |
+
<texture name="ball" builtin="checker" mark="cross" width="151" height="151" rgb1="0.1 0.1 0.1" rgb2="0.9 0.9 0.9" markrgb="1 1 1"/>
|
| 19 |
+
<texture name="teleport_in" type="2d" builtin="gradient" rgb1=".1 .1 .1" rgb2="0.35 0.55 0.91" width="300" height="300"/>
|
| 20 |
+
<texture name="teleport_out" type="2d" builtin="gradient" rgb1=".9 .9 .9" rgb2="0.35 0.55 0.91" width="300" height="300"/>
|
| 21 |
+
<material name="grid" texture="grid" texrepeat="1 1" texuniform="true"/>
|
| 22 |
+
<material name="self" rgba=".7 .5 .3 1"/>
|
| 23 |
+
<material name="self_white" rgba=".8 .8 .8 1"/>
|
| 24 |
+
<material name="wall" rgba="1 1 1 1"/>
|
| 25 |
+
<material name="ball" texture="ball"/>
|
| 26 |
+
<material name="target" rgba="0.96 0.26 0.33 1"/>
|
| 27 |
+
<material name="teleport_in" texture="teleport_in"/>
|
| 28 |
+
<material name="teleport_out" texture="teleport_out"/>
|
| 29 |
+
</asset>
|
| 30 |
+
|
| 31 |
+
<worldbody>
|
| 32 |
+
<light name="global" directional="true" cutoff="100" ambient=".2 .2 .2" exponent="1" diffuse="1 1 1" specular=".1 .1 .1" pos="0 0 1.3" dir="-0 0 -1.3"/>
|
| 33 |
+
<geom name="floor" type="plane" conaffinity="1" size="100 100 .2" material="grid"/>
|
| 34 |
+
<body name="torso" pos="0 0 0.75">
|
| 35 |
+
<camera name="back" pos="0 -2.5 5" xyaxes="1 0 0 0 2 1" mode="trackcom"/>
|
| 36 |
+
<geom name="torso_geom" type="sphere" size="0.25" pos="0 0 0"/>
|
| 37 |
+
<joint name="root" type="free" limited="false" pos="0 0 0" axis="0 0 1" margin="0.01" armature="0" damping="0"/>
|
| 38 |
+
<light name="torso_light" pos="0 0 8" mode="trackcom"/>
|
| 39 |
+
<body name="front_left_leg" pos="0 0 0">
|
| 40 |
+
<geom name="aux_1_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 0.2 0.2 0.0"/>
|
| 41 |
+
<body name="aux_1" pos="0.2 0.2 0">
|
| 42 |
+
<joint name="hip_1" type="hinge" pos="0.0 0.0 0.0" axis="0 0 1" range="-30 30"/>
|
| 43 |
+
<geom name="left_leg_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 0.2 0.2 0.0"/>
|
| 44 |
+
<body pos="0.2 0.2 0">
|
| 45 |
+
<joint name="ankle_1" type="hinge" pos="0.0 0.0 0.0" axis="-1 1 0" range="30 70"/>
|
| 46 |
+
<geom name="left_ankle_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 0.4 0.4 0.0"/>
|
| 47 |
+
</body>
|
| 48 |
+
</body>
|
| 49 |
+
</body>
|
| 50 |
+
<body name="front_right_leg" pos="0 0 0">
|
| 51 |
+
<geom name="aux_2_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 -0.2 0.2 0.0"/>
|
| 52 |
+
<body name="aux_2" pos="-0.2 0.2 0">
|
| 53 |
+
<joint name="hip_2" type="hinge" pos="0.0 0.0 0.0" axis="0 0 1" range="-30 30"/>
|
| 54 |
+
<geom name="right_leg_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 -0.2 0.2 0.0"/>
|
| 55 |
+
<body pos="-0.2 0.2 0">
|
| 56 |
+
<joint name="ankle_2" type="hinge" pos="0.0 0.0 0.0" axis="1 1 0" range="-70 -30"/>
|
| 57 |
+
<geom name="right_ankle_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 -0.4 0.4 0.0"/>
|
| 58 |
+
</body>
|
| 59 |
+
</body>
|
| 60 |
+
</body>
|
| 61 |
+
<body name="back_leg" pos="0 0 0">
|
| 62 |
+
<geom name="aux_3_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 -0.2 -0.2 0.0"/>
|
| 63 |
+
<body name="aux_3" pos="-0.2 -0.2 0">
|
| 64 |
+
<joint name="hip_3" type="hinge" pos="0.0 0.0 0.0" axis="0 0 1" range="-30 30"/>
|
| 65 |
+
<geom name="back_leg_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 -0.2 -0.2 0.0"/>
|
| 66 |
+
<body pos="-0.2 -0.2 0">
|
| 67 |
+
<joint name="ankle_3" type="hinge" pos="0.0 0.0 0.0" axis="-1 1 0" range="-70 -30"/>
|
| 68 |
+
<geom name="third_ankle_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 -0.4 -0.4 0.0"/>
|
| 69 |
+
</body>
|
| 70 |
+
</body>
|
| 71 |
+
</body>
|
| 72 |
+
<body name="right_back_leg" pos="0 0 0">
|
| 73 |
+
<geom name="aux_4_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 0.2 -0.2 0.0"/>
|
| 74 |
+
<body name="aux_4" pos="0.2 -0.2 0">
|
| 75 |
+
<joint name="hip_4" type="hinge" pos="0.0 0.0 0.0" axis="0 0 1" range="-30 30"/>
|
| 76 |
+
<geom name="rightback_leg_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 0.2 -0.2 0.0"/>
|
| 77 |
+
<body pos="0.2 -0.2 0">
|
| 78 |
+
<joint name="ankle_4" type="hinge" pos="0.0 0.0 0.0" axis="1 1 0" range="30 70"/>
|
| 79 |
+
<geom name="fourth_ankle_geom" type="capsule" size="0.08" fromto="0.0 0.0 0.0 0.4 -0.4 0.0"/>
|
| 80 |
+
</body>
|
| 81 |
+
</body>
|
| 82 |
+
</body>
|
| 83 |
+
</body>
|
| 84 |
+
</worldbody>
|
| 85 |
+
|
| 86 |
+
<actuator>
|
| 87 |
+
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_4" gear="30"/>
|
| 88 |
+
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_4" gear="30"/>
|
| 89 |
+
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_1" gear="30"/>
|
| 90 |
+
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_1" gear="30"/>
|
| 91 |
+
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_2" gear="30"/>
|
| 92 |
+
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_2" gear="30"/>
|
| 93 |
+
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_3" gear="30"/>
|
| 94 |
+
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_3" gear="30"/>
|
| 95 |
+
</actuator>
|
| 96 |
+
</mujoco>
|
ogbench/locomaze/assets/humanoid.xml
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mujoco model="humanoid">
|
| 2 |
+
<asset>
|
| 3 |
+
<texture name="grid" type="2d" builtin="checker" rgb1=".08 .11 .16" rgb2=".15 .18 .25" width="300" height="300"/>
|
| 4 |
+
<material name="grid" texture="grid" texrepeat="1 1" texuniform="true"/>
|
| 5 |
+
<material name="self" rgba=".7 .5 .3 1"/>
|
| 6 |
+
<material name="self_default" rgba=".7 .5 .3 1"/>
|
| 7 |
+
<material name="self_highlight" rgba="0 .5 .3 1"/>
|
| 8 |
+
<material name="self_white" rgba=".8 .8 .8 1"/>
|
| 9 |
+
<material name="effector" rgba=".7 .4 .2 1"/>
|
| 10 |
+
<material name="effector_default" rgba=".7 .4 .2 1"/>
|
| 11 |
+
<material name="effector_highlight" rgba="0 .5 .3 1"/>
|
| 12 |
+
<material name="decoration" rgba=".3 .5 .7 1"/>
|
| 13 |
+
<material name="eye" rgba="0 .2 1 1"/>
|
| 14 |
+
<material name="target" rgba="0.96 0.26 0.33 1"/>
|
| 15 |
+
<material name="site" rgba=".5 .5 .5 .3"/>
|
| 16 |
+
<material name="wall" rgba="1 1 1 1"/>
|
| 17 |
+
</asset>
|
| 18 |
+
|
| 19 |
+
<option timestep=".005"/>
|
| 20 |
+
|
| 21 |
+
<default>
|
| 22 |
+
<motor ctrlrange="-1 1" ctrllimited="true"/>
|
| 23 |
+
<default class="body">
|
| 24 |
+
<geom type="capsule" condim="1" friction=".7" solimp=".9 .99 .003" solref=".015 1" material="self"/>
|
| 25 |
+
<joint type="hinge" damping=".2" stiffness="1" armature=".01" limited="true" solimplimit="0 .99 .01"/>
|
| 26 |
+
<default class="big_joint">
|
| 27 |
+
<joint damping="5" stiffness="10"/>
|
| 28 |
+
<default class="big_stiff_joint">
|
| 29 |
+
<joint stiffness="20"/>
|
| 30 |
+
</default>
|
| 31 |
+
</default>
|
| 32 |
+
<site size=".04" group="3"/>
|
| 33 |
+
<default class="force-torque">
|
| 34 |
+
<site type="box" size=".01 .01 .02" rgba="1 0 0 1"/>
|
| 35 |
+
</default>
|
| 36 |
+
<default class="touch">
|
| 37 |
+
<site type="capsule" rgba="0 0 1 .3"/>
|
| 38 |
+
</default>
|
| 39 |
+
</default>
|
| 40 |
+
</default>
|
| 41 |
+
|
| 42 |
+
<worldbody>
|
| 43 |
+
<light name="global" directional="true" cutoff="100" exponent="1" ambient=".2 .2 .2" diffuse="1 1 1" specular=".1 .1 .1" pos="0 0 1.3" dir="-0 0 -1.3"/>
|
| 44 |
+
<geom name="floor" type="plane" conaffinity="1" size="100 100 .2" material="grid"/>
|
| 45 |
+
<body name="torso" pos="0 0 1.5" childclass="body">
|
| 46 |
+
<camera name="back" pos="0 -0.6 1.25" xyaxes="1 0 0 0 2.5 1" mode="trackcom"/>
|
| 47 |
+
<freejoint name="root"/>
|
| 48 |
+
<site name="root" class="force-torque"/>
|
| 49 |
+
<geom name="torso" fromto="0 -.07 0 0 .07 0" size=".07"/>
|
| 50 |
+
<light name="torso_light" pos="0 0 8" mode="trackcom"/>
|
| 51 |
+
<geom name="upper_waist" fromto="-.01 -.06 -.12 -.01 .06 -.12" size=".06"/>
|
| 52 |
+
<site name="torso" class="touch" type="box" pos="0 0 -.05" size=".075 .14 .13"/>
|
| 53 |
+
<body name="head" pos="0 0 .19">
|
| 54 |
+
<geom name="head" type="sphere" size=".09"/>
|
| 55 |
+
<site name="head" class="touch" type="sphere" size=".091"/>
|
| 56 |
+
<camera name="egocentric" pos=".09 0 0" xyaxes="0 -1 0 .1 0 1" fovy="80"/>
|
| 57 |
+
</body>
|
| 58 |
+
<body name="lower_waist" pos="-.01 0 -.260" quat="1.000 0 -.002 0">
|
| 59 |
+
<geom name="lower_waist" fromto="0 -.06 0 0 .06 0" size=".06"/>
|
| 60 |
+
<site name="lower_waist" class="touch" size=".061 .06" zaxis="0 1 0"/>
|
| 61 |
+
<joint name="abdomen_z" pos="0 0 .065" axis="0 0 1" range="-45 45" class="big_stiff_joint"/>
|
| 62 |
+
<joint name="abdomen_y" pos="0 0 .065" axis="0 1 0" range="-75 30" class="big_joint"/>
|
| 63 |
+
<body name="pelvis" pos="0 0 -.165" quat="1.000 0 -.002 0">
|
| 64 |
+
<joint name="abdomen_x" pos="0 0 .1" axis="1 0 0" range="-35 35" class="big_joint"/>
|
| 65 |
+
<geom name="butt" fromto="-.02 -.07 0 -.02 .07 0" size=".09"/>
|
| 66 |
+
<site name="butt" class="touch" size=".091 .07" pos="-.02 0 0" zaxis="0 1 0"/>
|
| 67 |
+
<body name="right_thigh" pos="0 -.1 -.04">
|
| 68 |
+
<site name="right_hip" class="force-torque"/>
|
| 69 |
+
<joint name="right_hip_x" axis="1 0 0" range="-25 5" class="big_joint"/>
|
| 70 |
+
<joint name="right_hip_z" axis="0 0 1" range="-60 35" class="big_joint"/>
|
| 71 |
+
<joint name="right_hip_y" axis="0 1 0" range="-110 20" class="big_stiff_joint"/>
|
| 72 |
+
<geom name="right_thigh" fromto="0 0 0 0 .01 -.34" size=".06"/>
|
| 73 |
+
<site name="right_thigh" class="touch" pos="0 .005 -.17" size=".061 .17" zaxis="0 -1 34"/>
|
| 74 |
+
<body name="right_shin" pos="0 .01 -.403">
|
| 75 |
+
<site name="right_knee" class="force-torque" pos="0 0 .02"/>
|
| 76 |
+
<joint name="right_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/>
|
| 77 |
+
<geom name="right_shin" fromto="0 0 0 0 0 -.3" size=".049"/>
|
| 78 |
+
<site name="right_shin" class="touch" pos="0 0 -.15" size=".05 .15"/>
|
| 79 |
+
<body name="right_foot" pos="0 0 -.39">
|
| 80 |
+
<site name="right_ankle" class="force-torque"/>
|
| 81 |
+
<joint name="right_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" stiffness="6"/>
|
| 82 |
+
<joint name="right_ankle_x" pos="0 0 .04" axis="1 0 .5" range="-50 50" stiffness="3"/>
|
| 83 |
+
<geom name="right_right_foot" fromto="-.07 -.02 0 .14 -.04 0" size=".027"/>
|
| 84 |
+
<geom name="left_right_foot" fromto="-.07 0 0 .14 .02 0" size=".027"/>
|
| 85 |
+
<site name="right_right_foot" class="touch" pos=".035 -.03 0" size=".03 .11" zaxis="21 -2 0"/>
|
| 86 |
+
<site name="left_right_foot" class="touch" pos=".035 .01 0" size=".03 .11" zaxis="21 2 0"/>
|
| 87 |
+
</body>
|
| 88 |
+
</body>
|
| 89 |
+
</body>
|
| 90 |
+
<body name="left_thigh" pos="0 .1 -.04">
|
| 91 |
+
<site name="left_hip" class="force-torque"/>
|
| 92 |
+
<joint name="left_hip_x" axis="-1 0 0" range="-25 5" class="big_joint"/>
|
| 93 |
+
<joint name="left_hip_z" axis="0 0 -1" range="-60 35" class="big_joint"/>
|
| 94 |
+
<joint name="left_hip_y" axis="0 1 0" range="-120 20" class="big_stiff_joint"/>
|
| 95 |
+
<geom name="left_thigh" fromto="0 0 0 0 -.01 -.34" size=".06"/>
|
| 96 |
+
<site name="left_thigh" class="touch" pos="0 -.005 -.17" size=".061 .17" zaxis="0 1 34"/>
|
| 97 |
+
<body name="left_shin" pos="0 -.01 -.403">
|
| 98 |
+
<site name="left_knee" class="force-torque" pos="0 0 .02"/>
|
| 99 |
+
<joint name="left_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/>
|
| 100 |
+
<geom name="left_shin" fromto="0 0 0 0 0 -.3" size=".049"/>
|
| 101 |
+
<site name="left_shin" class="touch" pos="0 0 -.15" size=".05 .15"/>
|
| 102 |
+
<body name="left_foot" pos="0 0 -.39">
|
| 103 |
+
<site name="left_ankle" class="force-torque"/>
|
| 104 |
+
<joint name="left_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" stiffness="6"/>
|
| 105 |
+
<joint name="left_ankle_x" pos="0 0 .04" axis="1 0 .5" range="-50 50" stiffness="3"/>
|
| 106 |
+
<geom name="left_left_foot" fromto="-.07 .02 0 .14 .04 0" size=".027"/>
|
| 107 |
+
<geom name="right_left_foot" fromto="-.07 0 0 .14 -.02 0" size=".027"/>
|
| 108 |
+
<site name="right_left_foot" class="touch" pos=".035 -.01 0" size=".03 .11" zaxis="21 -2 0"/>
|
| 109 |
+
<site name="left_left_foot" class="touch" pos=".035 .03 0" size=".03 .11" zaxis="21 2 0"/>
|
| 110 |
+
</body>
|
| 111 |
+
</body>
|
| 112 |
+
</body>
|
| 113 |
+
</body>
|
| 114 |
+
</body>
|
| 115 |
+
<body name="right_upper_arm" pos="0 -.17 .06">
|
| 116 |
+
<joint name="right_shoulder1" axis="2 1 1" range="-85 60"/>
|
| 117 |
+
<joint name="right_shoulder2" axis="0 -1 1" range="-85 60"/>
|
| 118 |
+
<geom name="right_upper_arm" fromto="0 0 0 .16 -.16 -.16" size=".04 .16"/>
|
| 119 |
+
<site name="right_upper_arm" class="touch" pos=".08 -.08 -.08" size=".041 .14" zaxis="1 -1 -1"/>
|
| 120 |
+
<body name="right_lower_arm" pos=".18 -.18 -.18">
|
| 121 |
+
<joint name="right_elbow" axis="0 -1 1" range="-90 50" stiffness="0"/>
|
| 122 |
+
<geom name="right_lower_arm" fromto=".01 .01 .01 .17 .17 .17" size=".031"/>
|
| 123 |
+
<site name="right_lower_arm" class="touch" pos=".09 .09 .09" size=".032 .14" zaxis="1 1 1"/>
|
| 124 |
+
<body name="right_hand" pos=".18 .18 .18">
|
| 125 |
+
<geom name="right_hand" type="sphere" size=".04"/>
|
| 126 |
+
<site name="right_hand" class="touch" type="sphere" size=".041"/>
|
| 127 |
+
</body>
|
| 128 |
+
</body>
|
| 129 |
+
</body>
|
| 130 |
+
<body name="left_upper_arm" pos="0 .17 .06">
|
| 131 |
+
<joint name="left_shoulder1" axis="2 -1 1" range="-60 85"/>
|
| 132 |
+
<joint name="left_shoulder2" axis="0 1 1" range="-60 85"/>
|
| 133 |
+
<geom name="left_upper_arm" fromto="0 0 0 .16 .16 -.16" size=".04 .16"/>
|
| 134 |
+
<site name="left_upper_arm" class="touch" pos=".08 .08 -.08" size=".041 .14" zaxis="1 1 -1"/>
|
| 135 |
+
<body name="left_lower_arm" pos=".18 .18 -.18">
|
| 136 |
+
<joint name="left_elbow" axis="0 -1 -1" range="-90 50" stiffness="0"/>
|
| 137 |
+
<geom name="left_lower_arm" fromto=".01 -.01 .01 .17 -.17 .17" size=".031"/>
|
| 138 |
+
<site name="left_lower_arm" class="touch" pos=".09 -.09 .09" size=".032 .14" zaxis="1 -1 1"/>
|
| 139 |
+
<body name="left_hand" pos=".18 -.18 .18">
|
| 140 |
+
<geom name="left_hand" type="sphere" size=".04"/>
|
| 141 |
+
<site name="left_hand" class="touch" type="sphere" size=".041"/>
|
| 142 |
+
</body>
|
| 143 |
+
</body>
|
| 144 |
+
</body>
|
| 145 |
+
</body>
|
| 146 |
+
</worldbody>
|
| 147 |
+
|
| 148 |
+
<actuator>
|
| 149 |
+
<motor name="abdomen_y" gear="40" joint="abdomen_y"/>
|
| 150 |
+
<motor name="abdomen_z" gear="40" joint="abdomen_z"/>
|
| 151 |
+
<motor name="abdomen_x" gear="40" joint="abdomen_x"/>
|
| 152 |
+
<motor name="right_hip_x" gear="40" joint="right_hip_x"/>
|
| 153 |
+
<motor name="right_hip_z" gear="40" joint="right_hip_z"/>
|
| 154 |
+
<motor name="right_hip_y" gear="120" joint="right_hip_y"/>
|
| 155 |
+
<motor name="right_knee" gear="80" joint="right_knee"/>
|
| 156 |
+
<motor name="right_ankle_x" gear="20" joint="right_ankle_x"/>
|
| 157 |
+
<motor name="right_ankle_y" gear="20" joint="right_ankle_y"/>
|
| 158 |
+
<motor name="left_hip_x" gear="40" joint="left_hip_x"/>
|
| 159 |
+
<motor name="left_hip_z" gear="40" joint="left_hip_z"/>
|
| 160 |
+
<motor name="left_hip_y" gear="120" joint="left_hip_y"/>
|
| 161 |
+
<motor name="left_knee" gear="80" joint="left_knee"/>
|
| 162 |
+
<motor name="left_ankle_x" gear="20" joint="left_ankle_x"/>
|
| 163 |
+
<motor name="left_ankle_y" gear="20" joint="left_ankle_y"/>
|
| 164 |
+
<motor name="right_shoulder1" gear="20" joint="right_shoulder1"/>
|
| 165 |
+
<motor name="right_shoulder2" gear="20" joint="right_shoulder2"/>
|
| 166 |
+
<motor name="right_elbow" gear="40" joint="right_elbow"/>
|
| 167 |
+
<motor name="left_shoulder1" gear="20" joint="left_shoulder1"/>
|
| 168 |
+
<motor name="left_shoulder2" gear="20" joint="left_shoulder2"/>
|
| 169 |
+
<motor name="left_elbow" gear="40" joint="left_elbow"/>
|
| 170 |
+
</actuator>
|
| 171 |
+
|
| 172 |
+
<sensor>
|
| 173 |
+
<subtreelinvel name="torso_subtreelinvel" body="torso"/>
|
| 174 |
+
<accelerometer name="torso_accel" site="root"/>
|
| 175 |
+
<velocimeter name="torso_vel" site="root"/>
|
| 176 |
+
<gyro name="torso_gyro" site="root"/>
|
| 177 |
+
|
| 178 |
+
<force name="left_ankle_force" site="left_ankle"/>
|
| 179 |
+
<force name="right_ankle_force" site="right_ankle"/>
|
| 180 |
+
<force name="left_knee_force" site="left_knee"/>
|
| 181 |
+
<force name="right_knee_force" site="right_knee"/>
|
| 182 |
+
<force name="left_hip_force" site="left_hip"/>
|
| 183 |
+
<force name="right_hip_force" site="right_hip"/>
|
| 184 |
+
|
| 185 |
+
<torque name="left_ankle_torque" site="left_ankle"/>
|
| 186 |
+
<torque name="right_ankle_torque" site="right_ankle"/>
|
| 187 |
+
<torque name="left_knee_torque" site="left_knee"/>
|
| 188 |
+
<torque name="right_knee_torque" site="right_knee"/>
|
| 189 |
+
<torque name="left_hip_torque" site="left_hip"/>
|
| 190 |
+
<torque name="right_hip_torque" site="right_hip"/>
|
| 191 |
+
|
| 192 |
+
<touch name="torso_touch" site="torso"/>
|
| 193 |
+
<touch name="head_touch" site="head"/>
|
| 194 |
+
<touch name="lower_waist_touch" site="lower_waist"/>
|
| 195 |
+
<touch name="butt_touch" site="butt"/>
|
| 196 |
+
<touch name="right_thigh_touch" site="right_thigh"/>
|
| 197 |
+
<touch name="right_shin_touch" site="right_shin"/>
|
| 198 |
+
<touch name="right_right_foot_touch" site="right_right_foot"/>
|
| 199 |
+
<touch name="left_right_foot_touch" site="left_right_foot"/>
|
| 200 |
+
<touch name="left_thigh_touch" site="left_thigh"/>
|
| 201 |
+
<touch name="left_shin_touch" site="left_shin"/>
|
| 202 |
+
<touch name="right_left_foot_touch" site="right_left_foot"/>
|
| 203 |
+
<touch name="left_left_foot_touch" site="left_left_foot"/>
|
| 204 |
+
<touch name="right_upper_arm_touch" site="right_upper_arm"/>
|
| 205 |
+
<touch name="right_lower_arm_touch" site="right_lower_arm"/>
|
| 206 |
+
<touch name="right_hand_touch" site="right_hand"/>
|
| 207 |
+
<touch name="left_upper_arm_touch" site="left_upper_arm"/>
|
| 208 |
+
<touch name="left_lower_arm_touch" site="left_lower_arm"/>
|
| 209 |
+
<touch name="left_hand_touch" site="left_hand"/>
|
| 210 |
+
</sensor>
|
| 211 |
+
</mujoco>
|
| 212 |
+
|
ogbench/locomaze/assets/point.xml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mujoco>
|
| 2 |
+
<compiler inertiafromgeom="true" angle="degree" coordinate="local"/>
|
| 3 |
+
|
| 4 |
+
<option timestep="0.02" integrator="RK4"/>
|
| 5 |
+
|
| 6 |
+
<default>
|
| 7 |
+
<joint limited="false" armature="0" damping="0"/>
|
| 8 |
+
<geom condim="3" conaffinity="0" margin="0" friction="1 0.5 0.5" density="100" material="self"/>
|
| 9 |
+
</default>
|
| 10 |
+
|
| 11 |
+
<asset>
|
| 12 |
+
<texture type="skybox" builtin="gradient" width="100" height="100" rgb1="1 1 1" rgb2="0 0 0"/>
|
| 13 |
+
<texture name="grid" type="2d" builtin="checker" rgb1=".08 .11 .16" rgb2=".15 .18 .25" width="300" height="300"/>
|
| 14 |
+
<texture name="texgeom" type="cube" builtin="flat" mark="cross" width="127" height="1278" rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" markrgb="1 1 1" random="0.01"/>
|
| 15 |
+
<texture name="teleport_in" type="2d" builtin="gradient" rgb1=".1 .1 .1" rgb2="0.35 0.55 0.91" width="300" height="300"/>
|
| 16 |
+
<texture name="teleport_out" type="2d" builtin="gradient" rgb1=".9 .9 .9" rgb2="0.35 0.55 0.91" width="300" height="300"/>
|
| 17 |
+
<material name="grid" texture="grid" texrepeat="1 1" texuniform="true"/>
|
| 18 |
+
<material name="self" rgba=".7 .5 .3 1"/>
|
| 19 |
+
<material name="geom" texture="texgeom" texuniform="true"/>
|
| 20 |
+
<material name="wall" rgba="1 1 1 1"/>
|
| 21 |
+
<material name="target" rgba="0.96 0.26 0.33 1"/>
|
| 22 |
+
<material name="teleport_in" texture="teleport_in"/>
|
| 23 |
+
<material name="teleport_out" texture="teleport_out"/>
|
| 24 |
+
</asset>
|
| 25 |
+
|
| 26 |
+
<worldbody>
|
| 27 |
+
<light name="global" directional="true" cutoff="100" exponent="1" ambient=".2 .2 .2" diffuse="1 1 1" specular=".1 .1 .1" pos="0 0 1.3" dir="-0 0 -1.3"/>
|
| 28 |
+
<geom name="floor" pos="0 0 0" size="100 100 .2" type="plane" conaffinity="1" condim="3" material="grid"/>
|
| 29 |
+
<body name="torso" pos="0 0 0">
|
| 30 |
+
<geom name="pointbody" type="sphere" size="0.7" pos="0 0 0.7"/>
|
| 31 |
+
<joint name="ballx" type="slide" axis="1 0 0" pos="0 0 0"/>
|
| 32 |
+
<joint name="bally" type="slide" axis="0 1 0" pos="0 0 0"/>
|
| 33 |
+
<light name="torso_light" pos="0 0 8" mode="trackcom"/>
|
| 34 |
+
</body>
|
| 35 |
+
</worldbody>
|
| 36 |
+
|
| 37 |
+
<actuator>
|
| 38 |
+
<motor joint="ballx" ctrlrange="-1 1" ctrllimited="true"/>
|
| 39 |
+
<motor joint="bally" ctrlrange="-1 1" ctrllimited="true"/>
|
| 40 |
+
</actuator>
|
| 41 |
+
</mujoco>
|
ogbench/locomaze/humanoid.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import mujoco
|
| 5 |
+
import numpy as np
|
| 6 |
+
from gymnasium import utils
|
| 7 |
+
from gymnasium.envs.mujoco import MujocoEnv
|
| 8 |
+
from gymnasium.spaces import Box
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class HumanoidEnv(MujocoEnv, utils.EzPickle):
|
| 12 |
+
"""DMC Humanoid environment.
|
| 13 |
+
|
| 14 |
+
Several methods are reimplemented to remove the dependency on the `dm_control` package. It is supposed to work
|
| 15 |
+
identically to the original Humanoid environment.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
xml_file = os.path.join(os.path.dirname(__file__), 'assets', 'humanoid.xml')
|
| 19 |
+
metadata = {
|
| 20 |
+
'render_modes': ['human', 'rgb_array', 'depth_array'],
|
| 21 |
+
'render_fps': 40,
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
xml_file=None,
|
| 27 |
+
render_mode='rgb_array',
|
| 28 |
+
width=200,
|
| 29 |
+
height=200,
|
| 30 |
+
**kwargs,
|
| 31 |
+
):
|
| 32 |
+
"""Initialize the Humanoid environment.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
xml_file: Path to the XML description (optional).
|
| 36 |
+
render_mode: Rendering mode.
|
| 37 |
+
width: Width of the rendered image.
|
| 38 |
+
height: Height of the rendered image.
|
| 39 |
+
**kwargs: Additional keyword arguments.
|
| 40 |
+
"""
|
| 41 |
+
if xml_file is None:
|
| 42 |
+
xml_file = self.xml_file
|
| 43 |
+
utils.EzPickle.__init__(
|
| 44 |
+
self,
|
| 45 |
+
xml_file,
|
| 46 |
+
**kwargs,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
observation_space = Box(low=-np.inf, high=np.inf, shape=(69,), dtype=np.float64)
|
| 50 |
+
|
| 51 |
+
MujocoEnv.__init__(
|
| 52 |
+
self,
|
| 53 |
+
xml_file,
|
| 54 |
+
frame_skip=5,
|
| 55 |
+
observation_space=observation_space,
|
| 56 |
+
render_mode=render_mode,
|
| 57 |
+
width=width,
|
| 58 |
+
height=height,
|
| 59 |
+
**kwargs,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
def step(self, action):
|
| 63 |
+
prev_qpos = self.data.qpos.copy()
|
| 64 |
+
prev_qvel = self.data.qvel.copy()
|
| 65 |
+
|
| 66 |
+
self.do_simulation(action, self.frame_skip)
|
| 67 |
+
|
| 68 |
+
qpos = self.data.qpos.copy()
|
| 69 |
+
qvel = self.data.qvel.copy()
|
| 70 |
+
|
| 71 |
+
observation = self.get_ob()
|
| 72 |
+
|
| 73 |
+
if self.render_mode == 'human':
|
| 74 |
+
self.render()
|
| 75 |
+
|
| 76 |
+
return (
|
| 77 |
+
observation,
|
| 78 |
+
0.0,
|
| 79 |
+
False,
|
| 80 |
+
False,
|
| 81 |
+
{
|
| 82 |
+
'xy': self.get_xy(),
|
| 83 |
+
'prev_qpos': prev_qpos,
|
| 84 |
+
'prev_qvel': prev_qvel,
|
| 85 |
+
'qpos': qpos,
|
| 86 |
+
'qvel': qvel,
|
| 87 |
+
},
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
def _step_mujoco_simulation(self, ctrl, n_frames):
|
| 91 |
+
self.data.ctrl[:] = ctrl
|
| 92 |
+
|
| 93 |
+
# DMC-style stepping (see Page 6 of https://arxiv.org/abs/2006.12983).
|
| 94 |
+
if self.model.opt.integrator != mujoco.mjtIntegrator.mjINT_RK4.value:
|
| 95 |
+
mujoco.mj_step2(self.model, self.data)
|
| 96 |
+
if n_frames > 1:
|
| 97 |
+
mujoco.mj_step(self.model, self.data, n_frames - 1)
|
| 98 |
+
else:
|
| 99 |
+
mujoco.mj_step(self.model, self.data, n_frames)
|
| 100 |
+
|
| 101 |
+
mujoco.mj_step1(self.model, self.data)
|
| 102 |
+
|
| 103 |
+
def get_ob(self):
|
| 104 |
+
xy = self.data.qpos[:2]
|
| 105 |
+
joint_angles = self.data.qpos[7:] # Skip the 7 DoFs of the free root joint.
|
| 106 |
+
head_height = self.data.xpos[2, 2] # ['head', 'z']
|
| 107 |
+
torso_frame = self.data.xmat[1].reshape(3, 3) # ['torso']
|
| 108 |
+
torso_pos = self.data.xpos[1] # ['torso']
|
| 109 |
+
positions = []
|
| 110 |
+
for idx in [16, 10, 13, 7]: # ['left_hand', 'left_foot', 'right_hand', 'right_foot']
|
| 111 |
+
torso_to_limb = self.data.xpos[idx] - torso_pos
|
| 112 |
+
positions.append(torso_to_limb.dot(torso_frame))
|
| 113 |
+
extremities = np.hstack(positions)
|
| 114 |
+
torso_vertical_orientation = self.data.xmat[1, [6, 7, 8]] # ['torso', ['zx', 'zy', 'zz']]
|
| 115 |
+
center_of_mass_velocity = self.data.sensordata[0:3] # ['torso_subtreelinvel']
|
| 116 |
+
velocity = self.data.qvel
|
| 117 |
+
|
| 118 |
+
return np.concatenate(
|
| 119 |
+
[
|
| 120 |
+
xy,
|
| 121 |
+
joint_angles,
|
| 122 |
+
[head_height],
|
| 123 |
+
extremities,
|
| 124 |
+
torso_vertical_orientation,
|
| 125 |
+
center_of_mass_velocity,
|
| 126 |
+
velocity,
|
| 127 |
+
]
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
@contextlib.contextmanager
|
| 131 |
+
def disable(self, *flags):
|
| 132 |
+
old_bitmask = self.model.opt.disableflags
|
| 133 |
+
new_bitmask = old_bitmask
|
| 134 |
+
for flag in flags:
|
| 135 |
+
if isinstance(flag, str):
|
| 136 |
+
field_name = 'mjDSBL_' + flag.upper()
|
| 137 |
+
flag = getattr(mujoco.mjtDisableBit, field_name)
|
| 138 |
+
elif isinstance(flag, int):
|
| 139 |
+
flag = mujoco.mjtDisableBit(flag)
|
| 140 |
+
new_bitmask |= flag.value
|
| 141 |
+
self.model.opt.disableflags = new_bitmask
|
| 142 |
+
try:
|
| 143 |
+
yield
|
| 144 |
+
finally:
|
| 145 |
+
self.model.opt.disableflags = old_bitmask
|
| 146 |
+
|
| 147 |
+
def reset_model(self):
|
| 148 |
+
penetrating = True
|
| 149 |
+
while penetrating:
|
| 150 |
+
quat = self.np_random.uniform(size=4)
|
| 151 |
+
quat /= np.linalg.norm(quat)
|
| 152 |
+
self.data.qpos[3:7] = quat
|
| 153 |
+
self.data.qvel = 0.1 * self.np_random.standard_normal(self.model.nv)
|
| 154 |
+
|
| 155 |
+
for joint_id in range(1, self.model.njnt):
|
| 156 |
+
range_min, range_max = self.model.jnt_range[joint_id]
|
| 157 |
+
self.data.qpos[6 + joint_id] = self.np_random.uniform(range_min, range_max)
|
| 158 |
+
|
| 159 |
+
with self.disable('actuation'):
|
| 160 |
+
mujoco.mj_forward(self.model, self.data)
|
| 161 |
+
penetrating = self.data.ncon > 0
|
| 162 |
+
|
| 163 |
+
observation = self.get_ob()
|
| 164 |
+
|
| 165 |
+
return observation
|
| 166 |
+
|
| 167 |
+
def get_xy(self):
|
| 168 |
+
return self.data.qpos[:2].copy()
|
| 169 |
+
|
| 170 |
+
def set_xy(self, xy):
|
| 171 |
+
qpos = self.data.qpos.copy()
|
| 172 |
+
qvel = self.data.qvel.copy()
|
| 173 |
+
qpos[:2] = xy
|
| 174 |
+
self.set_state(qpos, qvel)
|
ogbench/locomaze/maze.py
ADDED
|
@@ -0,0 +1,650 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tempfile
|
| 2 |
+
import xml.etree.ElementTree as ET
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from gymnasium.spaces import Box
|
| 6 |
+
|
| 7 |
+
from ogbench.locomaze.ant import AntEnv
|
| 8 |
+
from ogbench.locomaze.humanoid import HumanoidEnv
|
| 9 |
+
from ogbench.locomaze.point import PointEnv
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def make_maze_env(loco_env_type, maze_env_type, *args, **kwargs):
|
| 13 |
+
"""Factory function for creating a maze environment.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
loco_env_type: Locomotion environment type. One of 'point', 'ant', or 'humanoid'.
|
| 17 |
+
maze_env_type: Maze environment type. Either 'maze' or 'ball'.
|
| 18 |
+
*args: Additional arguments to pass to the target class.
|
| 19 |
+
**kwargs: Additional keyword arguments to pass to the target class.
|
| 20 |
+
"""
|
| 21 |
+
if loco_env_type == 'point':
|
| 22 |
+
loco_env_class = PointEnv
|
| 23 |
+
elif loco_env_type == 'ant':
|
| 24 |
+
loco_env_class = AntEnv
|
| 25 |
+
elif loco_env_type == 'humanoid':
|
| 26 |
+
loco_env_class = HumanoidEnv
|
| 27 |
+
else:
|
| 28 |
+
raise ValueError(f'Unknown locomotion environment type: {loco_env_type}')
|
| 29 |
+
|
| 30 |
+
class MazeEnv(loco_env_class):
|
| 31 |
+
"""Maze environment.
|
| 32 |
+
|
| 33 |
+
It inherits from the locomotion environment and adds a maze to it.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
maze_type='large',
|
| 39 |
+
maze_unit=4.0,
|
| 40 |
+
maze_height=0.5,
|
| 41 |
+
terminate_at_goal=True,
|
| 42 |
+
ob_type='states',
|
| 43 |
+
add_noise_to_goal=True,
|
| 44 |
+
*args,
|
| 45 |
+
**kwargs,
|
| 46 |
+
):
|
| 47 |
+
"""Initialize the maze environment.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
maze_type: Maze type. One of 'arena', 'medium', 'large', 'giant', or 'teleport'.
|
| 51 |
+
maze_unit: Size of a maze unit block.
|
| 52 |
+
maze_height: Height of the maze walls.
|
| 53 |
+
terminate_at_goal: Whether to terminate the episode when the goal is reached.
|
| 54 |
+
ob_type: Observation type. Either 'states' or 'pixels'.
|
| 55 |
+
add_noise_to_goal: Whether to add noise to the goal position.
|
| 56 |
+
*args: Additional arguments to pass to the parent locomotion environment.
|
| 57 |
+
**kwargs: Additional keyword arguments to pass to the parent locomotion environment.
|
| 58 |
+
"""
|
| 59 |
+
self._maze_type = maze_type
|
| 60 |
+
self._maze_unit = maze_unit
|
| 61 |
+
self._maze_height = maze_height
|
| 62 |
+
self._terminate_at_goal = terminate_at_goal
|
| 63 |
+
self._ob_type = ob_type
|
| 64 |
+
self._add_noise_to_goal = add_noise_to_goal
|
| 65 |
+
assert ob_type in ['states', 'pixels']
|
| 66 |
+
|
| 67 |
+
# Define constants.
|
| 68 |
+
self._offset_x = 4
|
| 69 |
+
self._offset_y = 4
|
| 70 |
+
self._noise = 1
|
| 71 |
+
self._goal_tol = 1.0 if loco_env_type == 'point' else 0.5
|
| 72 |
+
|
| 73 |
+
# Define maze map.
|
| 74 |
+
self._teleport_info = None
|
| 75 |
+
if self._maze_type == 'arena':
|
| 76 |
+
maze_map = [
|
| 77 |
+
[1, 1, 1, 1, 1, 1, 1, 1],
|
| 78 |
+
[1, 0, 0, 0, 0, 0, 0, 1],
|
| 79 |
+
[1, 0, 0, 0, 0, 0, 0, 1],
|
| 80 |
+
[1, 0, 0, 0, 0, 0, 0, 1],
|
| 81 |
+
[1, 0, 0, 0, 0, 0, 0, 1],
|
| 82 |
+
[1, 0, 0, 0, 0, 0, 0, 1],
|
| 83 |
+
[1, 0, 0, 0, 0, 0, 0, 1],
|
| 84 |
+
[1, 1, 1, 1, 1, 1, 1, 1],
|
| 85 |
+
]
|
| 86 |
+
elif self._maze_type == 'medium':
|
| 87 |
+
maze_map = [
|
| 88 |
+
[1, 1, 1, 1, 1, 1, 1, 1],
|
| 89 |
+
[1, 0, 0, 1, 1, 0, 0, 1],
|
| 90 |
+
[1, 0, 0, 1, 0, 0, 0, 1],
|
| 91 |
+
[1, 1, 0, 0, 0, 1, 1, 1],
|
| 92 |
+
[1, 0, 0, 1, 0, 0, 0, 1],
|
| 93 |
+
[1, 0, 1, 0, 0, 1, 0, 1],
|
| 94 |
+
[1, 0, 0, 0, 1, 0, 0, 1],
|
| 95 |
+
[1, 1, 1, 1, 1, 1, 1, 1],
|
| 96 |
+
]
|
| 97 |
+
elif self._maze_type == 'large':
|
| 98 |
+
maze_map = [
|
| 99 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
| 100 |
+
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
|
| 101 |
+
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1],
|
| 102 |
+
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
|
| 103 |
+
[1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1],
|
| 104 |
+
[1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
|
| 105 |
+
[1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1],
|
| 106 |
+
[1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
|
| 107 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
| 108 |
+
]
|
| 109 |
+
elif self._maze_type == 'giant':
|
| 110 |
+
maze_map = [
|
| 111 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
| 112 |
+
[1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1],
|
| 113 |
+
[1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1],
|
| 114 |
+
[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
|
| 115 |
+
[1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1],
|
| 116 |
+
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1],
|
| 117 |
+
[1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1],
|
| 118 |
+
[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
|
| 119 |
+
[1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1],
|
| 120 |
+
[1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1],
|
| 121 |
+
[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
|
| 122 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
| 123 |
+
]
|
| 124 |
+
elif self._maze_type == 'teleport':
|
| 125 |
+
maze_map = [
|
| 126 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
| 127 |
+
[1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1],
|
| 128 |
+
[1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1],
|
| 129 |
+
[1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1],
|
| 130 |
+
[1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1],
|
| 131 |
+
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1],
|
| 132 |
+
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1],
|
| 133 |
+
[1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1],
|
| 134 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
| 135 |
+
]
|
| 136 |
+
self._teleport_info = dict(
|
| 137 |
+
teleport_in_ijs=[(4, 6), (5, 1)],
|
| 138 |
+
teleport_out_ijs=[(1, 7), (6, 1), (6, 10)],
|
| 139 |
+
teleport_radius=1,
|
| 140 |
+
)
|
| 141 |
+
self._teleport_info['teleport_in_xys'] = [
|
| 142 |
+
self.ij_to_xy(ij) for ij in self._teleport_info['teleport_in_ijs']
|
| 143 |
+
]
|
| 144 |
+
self._teleport_info['teleport_out_xys'] = [
|
| 145 |
+
self.ij_to_xy(ij) for ij in self._teleport_info['teleport_out_ijs']
|
| 146 |
+
]
|
| 147 |
+
else:
|
| 148 |
+
raise ValueError(f'Unknown maze type: {self._maze_type}')
|
| 149 |
+
|
| 150 |
+
self.maze_map = np.array(maze_map)
|
| 151 |
+
|
| 152 |
+
# Update XML file.
|
| 153 |
+
xml_file = self.xml_file
|
| 154 |
+
tree = ET.parse(xml_file)
|
| 155 |
+
self.update_tree(tree)
|
| 156 |
+
_, maze_xml_file = tempfile.mkstemp(text=True, suffix='.xml')
|
| 157 |
+
tree.write(maze_xml_file)
|
| 158 |
+
|
| 159 |
+
super().__init__(xml_file=maze_xml_file, *args, **kwargs)
|
| 160 |
+
|
| 161 |
+
# Set task goals.
|
| 162 |
+
self.task_infos = []
|
| 163 |
+
self.cur_task_id = None
|
| 164 |
+
self.cur_task_info = None
|
| 165 |
+
self.set_tasks()
|
| 166 |
+
self.num_tasks = len(self.task_infos)
|
| 167 |
+
self.cur_goal_xy = np.zeros(2)
|
| 168 |
+
|
| 169 |
+
if self._ob_type == 'pixels':
|
| 170 |
+
self.observation_space = Box(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8)
|
| 171 |
+
|
| 172 |
+
# Manually color the floor to enable the agent to infer its position from the observation.
|
| 173 |
+
tex_grid = self.model.tex('grid')
|
| 174 |
+
tex_height = tex_grid.height[0]
|
| 175 |
+
tex_width = tex_grid.width[0]
|
| 176 |
+
# MuJoCo 3.2.1 changed the attribute name from 'tex_rgb' to 'tex_data'.
|
| 177 |
+
attr_name = 'tex_rgb' if hasattr(self.model, 'tex_rgb') else 'tex_data'
|
| 178 |
+
tex_rgb = getattr(self.model, attr_name)[tex_grid.adr[0] : tex_grid.adr[0] + 3 * tex_height * tex_width]
|
| 179 |
+
tex_rgb = tex_rgb.reshape(tex_height, tex_width, 3)
|
| 180 |
+
for x in range(tex_height):
|
| 181 |
+
for y in range(tex_width):
|
| 182 |
+
min_value = 0
|
| 183 |
+
max_value = 192
|
| 184 |
+
r = int(x / tex_height * (max_value - min_value) + min_value)
|
| 185 |
+
g = int(y / tex_width * (max_value - min_value) + min_value)
|
| 186 |
+
tex_rgb[x, y, :] = [r, g, 128]
|
| 187 |
+
else:
|
| 188 |
+
ex_ob = self.get_ob()
|
| 189 |
+
self.observation_space = Box(low=-np.inf, high=np.inf, shape=ex_ob.shape, dtype=ex_ob.dtype)
|
| 190 |
+
|
| 191 |
+
# Set camera.
|
| 192 |
+
self.reset()
|
| 193 |
+
self.render()
|
| 194 |
+
self.mujoco_renderer.viewer.cam.lookat[0] = 2 * (self.maze_map.shape[1] - 3)
|
| 195 |
+
self.mujoco_renderer.viewer.cam.lookat[1] = 2 * (self.maze_map.shape[0] - 3)
|
| 196 |
+
self.mujoco_renderer.viewer.cam.distance = 5 * (self.maze_map.shape[1] - 2)
|
| 197 |
+
self.mujoco_renderer.viewer.cam.elevation = -90
|
| 198 |
+
|
| 199 |
+
def update_tree(self, tree):
|
| 200 |
+
"""Update the XML tree to include the maze."""
|
| 201 |
+
worldbody = tree.find('.//worldbody')
|
| 202 |
+
|
| 203 |
+
# Add walls.
|
| 204 |
+
for i in range(self.maze_map.shape[0]):
|
| 205 |
+
for j in range(self.maze_map.shape[1]):
|
| 206 |
+
struct = self.maze_map[i, j]
|
| 207 |
+
if struct == 1:
|
| 208 |
+
ET.SubElement(
|
| 209 |
+
worldbody,
|
| 210 |
+
'geom',
|
| 211 |
+
name=f'block_{i}_{j}',
|
| 212 |
+
pos=f'{j * self._maze_unit - self._offset_x} {i * self._maze_unit - self._offset_y} {self._maze_height / 2 * self._maze_unit}',
|
| 213 |
+
size=f'{self._maze_unit / 2} {self._maze_unit / 2} {self._maze_height / 2 * self._maze_unit}',
|
| 214 |
+
type='box',
|
| 215 |
+
contype='1',
|
| 216 |
+
conaffinity='1',
|
| 217 |
+
material='wall',
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
# Adjust floor size.
|
| 221 |
+
center_x, center_y = 2 * (self.maze_map.shape[1] - 3), 2 * (self.maze_map.shape[0] - 3)
|
| 222 |
+
size_x, size_y = 2 * self.maze_map.shape[1], 2 * self.maze_map.shape[0]
|
| 223 |
+
floor = tree.find('.//geom[@name="floor"]')
|
| 224 |
+
floor.set('pos', f'{center_x} {center_y} 0')
|
| 225 |
+
floor.set('size', f'{size_x} {size_y} 0.2')
|
| 226 |
+
|
| 227 |
+
if self._teleport_info is not None:
|
| 228 |
+
# Add teleports.
|
| 229 |
+
for i, (x, y) in enumerate(self._teleport_info['teleport_in_xys']):
|
| 230 |
+
ET.SubElement(
|
| 231 |
+
worldbody,
|
| 232 |
+
'geom',
|
| 233 |
+
name=f'teleport_in_{i}',
|
| 234 |
+
type='cylinder',
|
| 235 |
+
size=f'{self._teleport_info["teleport_radius"]} .05',
|
| 236 |
+
pos=f'{x} {y} .05',
|
| 237 |
+
material='teleport_in',
|
| 238 |
+
contype='0',
|
| 239 |
+
conaffinity='0',
|
| 240 |
+
)
|
| 241 |
+
for i, (x, y) in enumerate(self._teleport_info['teleport_out_xys']):
|
| 242 |
+
ET.SubElement(
|
| 243 |
+
worldbody,
|
| 244 |
+
'geom',
|
| 245 |
+
name=f'teleport_out_{i}',
|
| 246 |
+
type='cylinder',
|
| 247 |
+
size=f'{self._teleport_info["teleport_radius"]} .05',
|
| 248 |
+
pos=f'{x} {y} .05',
|
| 249 |
+
material='teleport_out',
|
| 250 |
+
contype='0',
|
| 251 |
+
conaffinity='0',
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
if self._ob_type == 'pixels':
|
| 255 |
+
# Color wall.
|
| 256 |
+
wall = tree.find('.//material[@name="wall"]')
|
| 257 |
+
wall.set('rgba', '.6 .6 .6 1')
|
| 258 |
+
# Remove ambient light.
|
| 259 |
+
light = tree.find('.//light[@name="global"]')
|
| 260 |
+
light.attrib.pop('ambient')
|
| 261 |
+
# Remove torso light.
|
| 262 |
+
torso_light = tree.find('.//light[@name="torso_light"]')
|
| 263 |
+
torso_light_parent = tree.find('.//light[@name="torso_light"]/..')
|
| 264 |
+
torso_light_parent.remove(torso_light)
|
| 265 |
+
# Remove texture repeat.
|
| 266 |
+
grid = tree.find('.//material[@name="grid"]')
|
| 267 |
+
grid.set('texuniform', 'false')
|
| 268 |
+
if loco_env_type == 'ant':
|
| 269 |
+
# Color one leg white to break symmetry.
|
| 270 |
+
tree.find('.//geom[@name="aux_1_geom"]').set('material', 'self_white')
|
| 271 |
+
tree.find('.//geom[@name="left_leg_geom"]').set('material', 'self_white')
|
| 272 |
+
tree.find('.//geom[@name="left_ankle_geom"]').set('material', 'self_white')
|
| 273 |
+
else:
|
| 274 |
+
# Only show the target for states-based observation.
|
| 275 |
+
ET.SubElement(
|
| 276 |
+
worldbody,
|
| 277 |
+
'geom',
|
| 278 |
+
name='target',
|
| 279 |
+
type='cylinder',
|
| 280 |
+
size='.5 .05',
|
| 281 |
+
pos='0 0 .05',
|
| 282 |
+
material='target',
|
| 283 |
+
contype='0',
|
| 284 |
+
conaffinity='0',
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
def set_tasks(self):
|
| 288 |
+
# `tasks` is a list of tasks, where each task is a list of two tuples: (init_ij, goal_ij).
|
| 289 |
+
if self._maze_type == 'arena':
|
| 290 |
+
tasks = [
|
| 291 |
+
[(1, 1), (6, 6)],
|
| 292 |
+
]
|
| 293 |
+
elif self._maze_type == 'medium':
|
| 294 |
+
tasks = [
|
| 295 |
+
[(1, 1), (6, 6)],
|
| 296 |
+
[(6, 1), (1, 6)],
|
| 297 |
+
[(5, 3), (4, 2)],
|
| 298 |
+
[(6, 5), (6, 1)],
|
| 299 |
+
[(2, 6), (1, 1)],
|
| 300 |
+
]
|
| 301 |
+
elif self._maze_type == 'large':
|
| 302 |
+
tasks = [
|
| 303 |
+
[(1, 1), (7, 10)],
|
| 304 |
+
[(5, 4), (7, 1)],
|
| 305 |
+
[(7, 4), (1, 10)],
|
| 306 |
+
[(3, 8), (5, 4)],
|
| 307 |
+
[(1, 1), (5, 4)],
|
| 308 |
+
]
|
| 309 |
+
elif self._maze_type == 'giant':
|
| 310 |
+
tasks = [
|
| 311 |
+
[(1, 1), (10, 14)],
|
| 312 |
+
[(1, 14), (10, 1)],
|
| 313 |
+
[(8, 14), (1, 1)],
|
| 314 |
+
[(8, 3), (5, 12)],
|
| 315 |
+
[(5, 9), (3, 8)],
|
| 316 |
+
]
|
| 317 |
+
elif self._maze_type == 'teleport':
|
| 318 |
+
tasks = [
|
| 319 |
+
[(1, 10), (7, 1)],
|
| 320 |
+
[(1, 1), (7, 10)],
|
| 321 |
+
[(5, 6), (7, 10)],
|
| 322 |
+
[(7, 1), (7, 10)],
|
| 323 |
+
[(5, 6), (7, 1)],
|
| 324 |
+
]
|
| 325 |
+
else:
|
| 326 |
+
raise ValueError(f'Unknown maze type: {self._maze_type}')
|
| 327 |
+
|
| 328 |
+
self.task_infos = []
|
| 329 |
+
for i, task in enumerate(tasks):
|
| 330 |
+
self.task_infos.append(
|
| 331 |
+
dict(
|
| 332 |
+
task_name=f'task{i + 1}',
|
| 333 |
+
init_ij=task[0],
|
| 334 |
+
init_xy=self.ij_to_xy(task[0]),
|
| 335 |
+
goal_ij=task[1],
|
| 336 |
+
goal_xy=self.ij_to_xy(task[1]),
|
| 337 |
+
)
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
def reset(self, options=None, *args, **kwargs):
|
| 341 |
+
if options is None:
|
| 342 |
+
options = {}
|
| 343 |
+
# Set the task goal.
|
| 344 |
+
if 'task_id' in options:
|
| 345 |
+
# Use the pre-defined task.
|
| 346 |
+
assert 1 <= options['task_id'] <= self.num_tasks, f'Task ID must be in [1, {self.num_tasks}].'
|
| 347 |
+
self.cur_task_id = options['task_id']
|
| 348 |
+
self.cur_task_info = self.task_infos[self.cur_task_id - 1]
|
| 349 |
+
elif 'task_info' in options:
|
| 350 |
+
# Use the provided task information.
|
| 351 |
+
self.cur_task_id = None
|
| 352 |
+
self.cur_task_info = options['task_info']
|
| 353 |
+
else:
|
| 354 |
+
# Randomly sample a task.
|
| 355 |
+
self.cur_task_id = np.random.randint(1, self.num_tasks + 1)
|
| 356 |
+
self.cur_task_info = self.task_infos[self.cur_task_id - 1]
|
| 357 |
+
|
| 358 |
+
# Whether to provide a rendering of the goal.
|
| 359 |
+
render_goal = False
|
| 360 |
+
if 'render_goal' in options:
|
| 361 |
+
render_goal = options['render_goal']
|
| 362 |
+
|
| 363 |
+
# Get initial and goal positions with noise.
|
| 364 |
+
init_xy = self.add_noise(self.ij_to_xy(self.cur_task_info['init_ij']))
|
| 365 |
+
goal_xy = self.ij_to_xy(self.cur_task_info['goal_ij'])
|
| 366 |
+
if self._add_noise_to_goal:
|
| 367 |
+
goal_xy = self.add_noise(goal_xy)
|
| 368 |
+
|
| 369 |
+
# First, force set the position to the goal position to obtain the goal observation.
|
| 370 |
+
super().reset(*args, **kwargs)
|
| 371 |
+
|
| 372 |
+
# Do a few random steps to stabilize the environment.
|
| 373 |
+
num_random_actions = 40 if loco_env_type == 'humanoid' else 5
|
| 374 |
+
for _ in range(num_random_actions):
|
| 375 |
+
super().step(self.action_space.sample())
|
| 376 |
+
|
| 377 |
+
# Save the goal observation.
|
| 378 |
+
self.set_goal(goal_xy=goal_xy)
|
| 379 |
+
self.set_xy(goal_xy)
|
| 380 |
+
goal_ob = self.get_ob()
|
| 381 |
+
if render_goal:
|
| 382 |
+
goal_rendered = self.render()
|
| 383 |
+
|
| 384 |
+
# Now, do the actual reset.
|
| 385 |
+
ob, info = super().reset(*args, **kwargs)
|
| 386 |
+
self.set_goal(goal_xy=goal_xy)
|
| 387 |
+
self.set_xy(init_xy)
|
| 388 |
+
ob = self.get_ob()
|
| 389 |
+
info['goal'] = goal_ob
|
| 390 |
+
if render_goal:
|
| 391 |
+
info['goal_rendered'] = goal_rendered
|
| 392 |
+
|
| 393 |
+
return ob, info
|
| 394 |
+
|
| 395 |
+
def step(self, action):
|
| 396 |
+
ob, reward, terminated, truncated, info = super().step(action)
|
| 397 |
+
|
| 398 |
+
if self._teleport_info is not None:
|
| 399 |
+
# Check if the agent is close to a inbound teleport.
|
| 400 |
+
for x, y in self._teleport_info['teleport_in_xys']:
|
| 401 |
+
if np.linalg.norm(self.get_xy() - np.array([x, y])) <= self._teleport_info['teleport_radius'] * 1.5:
|
| 402 |
+
# Teleport the agent to a random outbound teleport.
|
| 403 |
+
teleport_out_xy = self._teleport_info['teleport_out_xys'][
|
| 404 |
+
np.random.randint(len(self._teleport_info['teleport_out_xys']))
|
| 405 |
+
]
|
| 406 |
+
self.set_xy(np.array(teleport_out_xy))
|
| 407 |
+
break
|
| 408 |
+
|
| 409 |
+
# Check if the agent has reached the goal.
|
| 410 |
+
if np.linalg.norm(self.get_xy() - self.cur_goal_xy) <= self._goal_tol:
|
| 411 |
+
if self._terminate_at_goal:
|
| 412 |
+
terminated = True
|
| 413 |
+
info['success'] = 1.0
|
| 414 |
+
reward = 1.0
|
| 415 |
+
else:
|
| 416 |
+
info['success'] = 0.0
|
| 417 |
+
reward = 0.0
|
| 418 |
+
|
| 419 |
+
return ob, reward, terminated, truncated, info
|
| 420 |
+
|
| 421 |
+
def get_ob(self, ob_type=None):
|
| 422 |
+
ob_type = self._ob_type if ob_type is None else ob_type
|
| 423 |
+
if ob_type == 'states':
|
| 424 |
+
return super().get_ob()
|
| 425 |
+
else:
|
| 426 |
+
frame = self.render()
|
| 427 |
+
return frame
|
| 428 |
+
|
| 429 |
+
def set_goal(self, goal_ij=None, goal_xy=None):
|
| 430 |
+
"""Set the goal position and update the target object."""
|
| 431 |
+
if goal_xy is None:
|
| 432 |
+
self.cur_goal_xy = self.ij_to_xy(goal_ij)
|
| 433 |
+
if self._add_noise_to_goal:
|
| 434 |
+
self.cur_goal_xy = self.add_noise(self.cur_goal_xy)
|
| 435 |
+
else:
|
| 436 |
+
self.cur_goal_xy = goal_xy
|
| 437 |
+
if self._ob_type == 'states':
|
| 438 |
+
self.model.geom('target').pos[:2] = goal_xy
|
| 439 |
+
|
| 440 |
+
def get_oracle_subgoal(self, start_xy, goal_xy):
|
| 441 |
+
"""Get the oracle subgoal for the agent.
|
| 442 |
+
|
| 443 |
+
If the goal is unreachable, it returns the current position as the subgoal.
|
| 444 |
+
|
| 445 |
+
Args:
|
| 446 |
+
start_xy: Starting position of the agent.
|
| 447 |
+
goal_xy: Goal position of the agent.
|
| 448 |
+
Returns:
|
| 449 |
+
A tuple of the oracle subgoal and the BFS map.
|
| 450 |
+
"""
|
| 451 |
+
start_ij = self.xy_to_ij(start_xy)
|
| 452 |
+
goal_ij = self.xy_to_ij(goal_xy)
|
| 453 |
+
|
| 454 |
+
# Run BFS to find the next subgoal.
|
| 455 |
+
bfs_map = self.maze_map.copy()
|
| 456 |
+
for i in range(self.maze_map.shape[0]):
|
| 457 |
+
for j in range(self.maze_map.shape[1]):
|
| 458 |
+
bfs_map[i][j] = -1
|
| 459 |
+
|
| 460 |
+
bfs_map[goal_ij[0], goal_ij[1]] = 0
|
| 461 |
+
queue = [goal_ij]
|
| 462 |
+
while len(queue) > 0:
|
| 463 |
+
i, j = queue.pop(0)
|
| 464 |
+
for di, dj in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
|
| 465 |
+
ni, nj = i + di, j + dj
|
| 466 |
+
if (
|
| 467 |
+
0 <= ni < self.maze_map.shape[0]
|
| 468 |
+
and 0 <= nj < self.maze_map.shape[1]
|
| 469 |
+
and self.maze_map[ni, nj] == 0
|
| 470 |
+
and bfs_map[ni, nj] == -1
|
| 471 |
+
):
|
| 472 |
+
bfs_map[ni][nj] = bfs_map[i][j] + 1
|
| 473 |
+
queue.append((ni, nj))
|
| 474 |
+
|
| 475 |
+
# Find the subgoal that attains the minimum BFS value.
|
| 476 |
+
subgoal_ij = start_ij
|
| 477 |
+
for di, dj in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
|
| 478 |
+
ni, nj = start_ij[0] + di, start_ij[1] + dj
|
| 479 |
+
if (
|
| 480 |
+
0 <= ni < self.maze_map.shape[0]
|
| 481 |
+
and 0 <= nj < self.maze_map.shape[1]
|
| 482 |
+
and self.maze_map[ni, nj] == 0
|
| 483 |
+
and bfs_map[ni, nj] < bfs_map[subgoal_ij[0], subgoal_ij[1]]
|
| 484 |
+
):
|
| 485 |
+
subgoal_ij = (ni, nj)
|
| 486 |
+
subgoal_xy = self.ij_to_xy(subgoal_ij)
|
| 487 |
+
return np.array(subgoal_xy), bfs_map
|
| 488 |
+
|
| 489 |
+
def xy_to_ij(self, xy):
|
| 490 |
+
maze_unit = self._maze_unit
|
| 491 |
+
i = int((xy[1] + self._offset_y + 0.5 * maze_unit) / maze_unit)
|
| 492 |
+
j = int((xy[0] + self._offset_x + 0.5 * maze_unit) / maze_unit)
|
| 493 |
+
return i, j
|
| 494 |
+
|
| 495 |
+
def ij_to_xy(self, ij):
|
| 496 |
+
i, j = ij
|
| 497 |
+
x = j * self._maze_unit - self._offset_x
|
| 498 |
+
y = i * self._maze_unit - self._offset_y
|
| 499 |
+
return x, y
|
| 500 |
+
|
| 501 |
+
def add_noise(self, xy):
|
| 502 |
+
random_x = np.random.uniform(low=-self._noise, high=self._noise) * self._maze_unit / 4
|
| 503 |
+
random_y = np.random.uniform(low=-self._noise, high=self._noise) * self._maze_unit / 4
|
| 504 |
+
return xy[0] + random_x, xy[1] + random_y
|
| 505 |
+
|
| 506 |
+
class BallEnv(MazeEnv):
|
| 507 |
+
def update_tree(self, tree):
|
| 508 |
+
super().update_tree(tree)
|
| 509 |
+
|
| 510 |
+
# Add ball.
|
| 511 |
+
worldbody = tree.find('.//worldbody')
|
| 512 |
+
ball = ET.SubElement(worldbody, 'body', name='ball', pos='0 0 0.5')
|
| 513 |
+
ET.SubElement(ball, 'freejoint', name='ball_root')
|
| 514 |
+
ET.SubElement(
|
| 515 |
+
ball,
|
| 516 |
+
'geom',
|
| 517 |
+
name='ball',
|
| 518 |
+
size='.25',
|
| 519 |
+
material='ball',
|
| 520 |
+
priority='1',
|
| 521 |
+
conaffinity='1',
|
| 522 |
+
condim='6',
|
| 523 |
+
)
|
| 524 |
+
ET.SubElement(ball, 'light', name='ball_light', pos='0 0 4', mode='trackcom')
|
| 525 |
+
|
| 526 |
+
def set_tasks(self):
|
| 527 |
+
# `tasks` is a list of tasks, where each task is a list of three tuples: (agent_init_ij, ball_init_ij,
|
| 528 |
+
# goal_ij).
|
| 529 |
+
if self._maze_type == 'arena':
|
| 530 |
+
tasks = [
|
| 531 |
+
[(1, 6), (2, 3), (5, 2)],
|
| 532 |
+
[(2, 2), (5, 5), (2, 2)],
|
| 533 |
+
[(6, 1), (2, 3), (6, 6)],
|
| 534 |
+
[(6, 6), (1, 1), (6, 1)],
|
| 535 |
+
[(4, 6), (6, 2), (1, 6)],
|
| 536 |
+
]
|
| 537 |
+
elif self._maze_type == 'medium':
|
| 538 |
+
tasks = [
|
| 539 |
+
[(1, 1), (3, 4), (6, 6)],
|
| 540 |
+
[(6, 1), (6, 5), (1, 1)],
|
| 541 |
+
[(5, 3), (4, 2), (6, 5)],
|
| 542 |
+
[(6, 5), (1, 1), (5, 3)],
|
| 543 |
+
[(1, 6), (6, 1), (1, 6)],
|
| 544 |
+
]
|
| 545 |
+
else:
|
| 546 |
+
raise ValueError(f'Unknown maze type: {self._maze_type}')
|
| 547 |
+
|
| 548 |
+
self.task_infos = []
|
| 549 |
+
for i, task in enumerate(tasks):
|
| 550 |
+
self.task_infos.append(
|
| 551 |
+
dict(
|
| 552 |
+
task_name=f'task{i + 1}',
|
| 553 |
+
agent_init_ij=task[0],
|
| 554 |
+
agent_init_xy=self.ij_to_xy(task[0]),
|
| 555 |
+
ball_init_ij=task[1],
|
| 556 |
+
ball_init_xy=self.ij_to_xy(task[1]),
|
| 557 |
+
goal_ij=task[2],
|
| 558 |
+
goal_xy=self.ij_to_xy(task[2]),
|
| 559 |
+
)
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
def reset(self, options=None, *args, **kwargs):
|
| 563 |
+
if options is None:
|
| 564 |
+
options = {}
|
| 565 |
+
# Set the task goal.
|
| 566 |
+
if 'task_id' in options:
|
| 567 |
+
# Use the pre-defined task.
|
| 568 |
+
assert 1 <= options['task_id'] <= self.num_tasks, f'Task ID must be in [1, {self.num_tasks}].'
|
| 569 |
+
self.cur_task_id = options['task_id']
|
| 570 |
+
self.cur_task_info = self.task_infos[self.cur_task_id - 1]
|
| 571 |
+
elif 'task_info' in options:
|
| 572 |
+
# Use the provided task information.
|
| 573 |
+
self.cur_task_id = None
|
| 574 |
+
self.cur_task_info = options['task_info']
|
| 575 |
+
else:
|
| 576 |
+
# Randomly sample a task.
|
| 577 |
+
self.cur_task_id = np.random.randint(1, self.num_tasks + 1)
|
| 578 |
+
self.cur_task_info = self.task_infos[self.cur_task_id - 1]
|
| 579 |
+
|
| 580 |
+
# Whether to provide a rendering of the goal.
|
| 581 |
+
render_goal = False
|
| 582 |
+
if 'render_goal' in options:
|
| 583 |
+
render_goal = options['render_goal']
|
| 584 |
+
|
| 585 |
+
# Get initial and goal positions with noise.
|
| 586 |
+
agent_init_xy = self.add_noise(self.ij_to_xy(self.cur_task_info['agent_init_ij']))
|
| 587 |
+
ball_init_xy = self.add_noise(self.ij_to_xy(self.cur_task_info['ball_init_ij']))
|
| 588 |
+
goal_xy = self.ij_to_xy(self.cur_task_info['goal_ij'])
|
| 589 |
+
if self._add_noise_to_goal:
|
| 590 |
+
goal_xy = self.add_noise(goal_xy)
|
| 591 |
+
|
| 592 |
+
# First, force set the position to the goal position to obtain the goal observation.
|
| 593 |
+
super(MazeEnv, self).reset(*args, **kwargs)
|
| 594 |
+
|
| 595 |
+
# Do a few random steps to stabilize the environment.
|
| 596 |
+
for _ in range(10):
|
| 597 |
+
super(MazeEnv, self).step(self.action_space.sample())
|
| 598 |
+
|
| 599 |
+
# Save the goal observation.
|
| 600 |
+
self.set_goal(goal_xy=goal_xy)
|
| 601 |
+
self.set_agent_ball_xy(goal_xy, goal_xy)
|
| 602 |
+
goal_ob = self.get_ob()
|
| 603 |
+
if render_goal:
|
| 604 |
+
goal_rendered = self.render()
|
| 605 |
+
|
| 606 |
+
# Now, do the actual reset.
|
| 607 |
+
ob, info = super(MazeEnv, self).reset(*args, **kwargs)
|
| 608 |
+
self.set_goal(goal_xy=goal_xy)
|
| 609 |
+
self.set_agent_ball_xy(agent_init_xy, ball_init_xy)
|
| 610 |
+
ob = self.get_ob()
|
| 611 |
+
info['goal'] = goal_ob
|
| 612 |
+
if render_goal:
|
| 613 |
+
info['goal_rendered'] = goal_rendered
|
| 614 |
+
|
| 615 |
+
return ob, info
|
| 616 |
+
|
| 617 |
+
def step(self, action):
|
| 618 |
+
ob, reward, terminated, truncated, info = super(MazeEnv, self).step(action)
|
| 619 |
+
|
| 620 |
+
# Check if the ball has reached the goal.
|
| 621 |
+
if np.linalg.norm(self.get_agent_ball_xy()[1] - self.cur_goal_xy) <= self._goal_tol:
|
| 622 |
+
if self._terminate_at_goal:
|
| 623 |
+
terminated = True
|
| 624 |
+
info['success'] = 1.0
|
| 625 |
+
reward = 1.0
|
| 626 |
+
else:
|
| 627 |
+
info['success'] = 0.0
|
| 628 |
+
reward = 0.0
|
| 629 |
+
|
| 630 |
+
return ob, reward, terminated, truncated, info
|
| 631 |
+
|
| 632 |
+
def get_agent_ball_xy(self):
|
| 633 |
+
agent_xy = self.data.qpos[:2].copy()
|
| 634 |
+
ball_xy = self.data.qpos[-7:-5].copy()
|
| 635 |
+
|
| 636 |
+
return agent_xy, ball_xy
|
| 637 |
+
|
| 638 |
+
def set_agent_ball_xy(self, agent_xy, ball_xy):
|
| 639 |
+
qpos = self.data.qpos.copy()
|
| 640 |
+
qvel = self.data.qvel.copy()
|
| 641 |
+
qpos[:2] = agent_xy
|
| 642 |
+
qpos[-7:-5] = ball_xy
|
| 643 |
+
self.set_state(qpos, qvel)
|
| 644 |
+
|
| 645 |
+
if maze_env_type == 'maze':
|
| 646 |
+
return MazeEnv(*args, **kwargs)
|
| 647 |
+
elif maze_env_type == 'ball':
|
| 648 |
+
return BallEnv(*args, **kwargs)
|
| 649 |
+
else:
|
| 650 |
+
raise ValueError(f'Unknown maze environment type: {maze_env_type}')
|
ogbench/locomaze/point.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import mujoco
|
| 4 |
+
import numpy as np
|
| 5 |
+
from gymnasium import utils
|
| 6 |
+
from gymnasium.envs.mujoco import MujocoEnv
|
| 7 |
+
from gymnasium.spaces import Box
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class PointEnv(MujocoEnv, utils.EzPickle):
|
| 11 |
+
"""PointMass environment.
|
| 12 |
+
|
| 13 |
+
This is a simple 2-D point mass environment, where the agent is controlled by an x-y action vector that is added to
|
| 14 |
+
the current position of the point mass.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
xml_file = os.path.join(os.path.dirname(__file__), 'assets', 'point.xml')
|
| 18 |
+
metadata = {
|
| 19 |
+
'render_modes': ['human', 'rgb_array', 'depth_array'],
|
| 20 |
+
'render_fps': 10,
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
xml_file=None,
|
| 26 |
+
render_mode='rgb_array',
|
| 27 |
+
width=200,
|
| 28 |
+
height=200,
|
| 29 |
+
**kwargs,
|
| 30 |
+
):
|
| 31 |
+
"""Initialize the Humanoid environment.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
xml_file: Path to the XML description (optional).
|
| 35 |
+
render_mode: Rendering mode.
|
| 36 |
+
width: Width of the rendered image.
|
| 37 |
+
height: Height of the rendered image.
|
| 38 |
+
**kwargs: Additional keyword arguments.
|
| 39 |
+
"""
|
| 40 |
+
if xml_file is None:
|
| 41 |
+
xml_file = self.xml_file
|
| 42 |
+
utils.EzPickle.__init__(
|
| 43 |
+
self,
|
| 44 |
+
xml_file,
|
| 45 |
+
**kwargs,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
observation_space = Box(low=-np.inf, high=np.inf, shape=(6,), dtype=np.float64)
|
| 49 |
+
|
| 50 |
+
MujocoEnv.__init__(
|
| 51 |
+
self,
|
| 52 |
+
xml_file,
|
| 53 |
+
frame_skip=5,
|
| 54 |
+
observation_space=observation_space,
|
| 55 |
+
render_mode=render_mode,
|
| 56 |
+
width=width,
|
| 57 |
+
height=height,
|
| 58 |
+
**kwargs,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
def step(self, action):
|
| 62 |
+
prev_qpos = self.data.qpos.copy()
|
| 63 |
+
prev_qvel = self.data.qvel.copy()
|
| 64 |
+
|
| 65 |
+
action = 0.2 * action
|
| 66 |
+
|
| 67 |
+
self.data.qpos[:] = self.data.qpos + action
|
| 68 |
+
self.data.qvel[:] = np.array([0.0, 0.0])
|
| 69 |
+
|
| 70 |
+
mujoco.mj_step(self.model, self.data, nstep=self.frame_skip)
|
| 71 |
+
|
| 72 |
+
qpos = self.data.qpos.flat.copy()
|
| 73 |
+
qvel = self.data.qvel.flat.copy()
|
| 74 |
+
|
| 75 |
+
observation = self.get_ob()
|
| 76 |
+
|
| 77 |
+
if self.render_mode == 'human':
|
| 78 |
+
self.render()
|
| 79 |
+
|
| 80 |
+
return (
|
| 81 |
+
observation,
|
| 82 |
+
0.0,
|
| 83 |
+
False,
|
| 84 |
+
False,
|
| 85 |
+
{
|
| 86 |
+
'xy': self.get_xy(),
|
| 87 |
+
'prev_qpos': prev_qpos,
|
| 88 |
+
'prev_qvel': prev_qvel,
|
| 89 |
+
'qpos': qpos,
|
| 90 |
+
'qvel': qvel,
|
| 91 |
+
},
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
def get_ob(self):
|
| 95 |
+
return self.data.qpos.flat.copy()
|
| 96 |
+
|
| 97 |
+
def reset_model(self):
|
| 98 |
+
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-0.1, high=0.1)
|
| 99 |
+
qvel = self.init_qvel + self.np_random.standard_normal(self.model.nv) * 0.1
|
| 100 |
+
|
| 101 |
+
self.set_state(qpos, qvel)
|
| 102 |
+
|
| 103 |
+
return self.get_ob()
|
| 104 |
+
|
| 105 |
+
def get_xy(self):
|
| 106 |
+
return self.data.qpos.copy()
|
| 107 |
+
|
| 108 |
+
def set_xy(self, xy):
|
| 109 |
+
qpos = self.data.qpos.copy()
|
| 110 |
+
qvel = self.data.qvel.copy()
|
| 111 |
+
qpos[:] = xy
|
| 112 |
+
self.set_state(qpos, qvel)
|
ogbench/manipspace/__init__.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gymnasium.envs.registration import register
|
| 2 |
+
|
| 3 |
+
visual_dict = dict(
|
| 4 |
+
ob_type='pixels',
|
| 5 |
+
width=64,
|
| 6 |
+
height=64,
|
| 7 |
+
visualize_info=False,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
register(
|
| 11 |
+
id='cube-single-v0',
|
| 12 |
+
entry_point='ogbench.manipspace.envs.cube_env:CubeEnv',
|
| 13 |
+
max_episode_steps=200,
|
| 14 |
+
kwargs=dict(
|
| 15 |
+
env_type='single',
|
| 16 |
+
),
|
| 17 |
+
)
|
| 18 |
+
register(
|
| 19 |
+
id='visual-cube-single-v0',
|
| 20 |
+
entry_point='ogbench.manipspace.envs.cube_env:CubeEnv',
|
| 21 |
+
max_episode_steps=200,
|
| 22 |
+
kwargs=dict(
|
| 23 |
+
env_type='single',
|
| 24 |
+
**visual_dict,
|
| 25 |
+
),
|
| 26 |
+
)
|
| 27 |
+
register(
|
| 28 |
+
id='cube-double-v0',
|
| 29 |
+
entry_point='ogbench.manipspace.envs.cube_env:CubeEnv',
|
| 30 |
+
max_episode_steps=500,
|
| 31 |
+
kwargs=dict(
|
| 32 |
+
env_type='double',
|
| 33 |
+
),
|
| 34 |
+
)
|
| 35 |
+
register(
|
| 36 |
+
id='visual-cube-double-v0',
|
| 37 |
+
entry_point='ogbench.manipspace.envs.cube_env:CubeEnv',
|
| 38 |
+
max_episode_steps=500,
|
| 39 |
+
kwargs=dict(
|
| 40 |
+
env_type='double',
|
| 41 |
+
**visual_dict,
|
| 42 |
+
),
|
| 43 |
+
)
|
| 44 |
+
register(
|
| 45 |
+
id='cube-triple-v0',
|
| 46 |
+
entry_point='ogbench.manipspace.envs.cube_env:CubeEnv',
|
| 47 |
+
max_episode_steps=1000,
|
| 48 |
+
kwargs=dict(
|
| 49 |
+
env_type='triple',
|
| 50 |
+
),
|
| 51 |
+
)
|
| 52 |
+
register(
|
| 53 |
+
id='visual-cube-triple-v0',
|
| 54 |
+
entry_point='ogbench.manipspace.envs.cube_env:CubeEnv',
|
| 55 |
+
max_episode_steps=1000,
|
| 56 |
+
kwargs=dict(
|
| 57 |
+
env_type='triple',
|
| 58 |
+
**visual_dict,
|
| 59 |
+
),
|
| 60 |
+
)
|
| 61 |
+
register(
|
| 62 |
+
id='cube-quadruple-v0',
|
| 63 |
+
entry_point='ogbench.manipspace.envs.cube_env:CubeEnv',
|
| 64 |
+
max_episode_steps=1000,
|
| 65 |
+
kwargs=dict(
|
| 66 |
+
env_type='quadruple',
|
| 67 |
+
),
|
| 68 |
+
)
|
| 69 |
+
register(
|
| 70 |
+
id='visual-cube-quadruple-v0',
|
| 71 |
+
entry_point='ogbench.manipspace.envs.cube_env:CubeEnv',
|
| 72 |
+
max_episode_steps=1000,
|
| 73 |
+
kwargs=dict(
|
| 74 |
+
env_type='quadruple',
|
| 75 |
+
**visual_dict,
|
| 76 |
+
),
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
register(
|
| 80 |
+
id='scene-v0',
|
| 81 |
+
entry_point='ogbench.manipspace.envs.scene_env:SceneEnv',
|
| 82 |
+
max_episode_steps=750,
|
| 83 |
+
kwargs=dict(
|
| 84 |
+
env_type='scene',
|
| 85 |
+
),
|
| 86 |
+
)
|
| 87 |
+
register(
|
| 88 |
+
id='visual-scene-v0',
|
| 89 |
+
entry_point='ogbench.manipspace.envs.scene_env:SceneEnv',
|
| 90 |
+
max_episode_steps=750,
|
| 91 |
+
kwargs=dict(
|
| 92 |
+
env_type='scene',
|
| 93 |
+
**visual_dict,
|
| 94 |
+
),
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
register(
|
| 98 |
+
id='puzzle-3x3-v0',
|
| 99 |
+
entry_point='ogbench.manipspace.envs.puzzle_env:PuzzleEnv',
|
| 100 |
+
max_episode_steps=500,
|
| 101 |
+
kwargs=dict(
|
| 102 |
+
env_type='3x3',
|
| 103 |
+
),
|
| 104 |
+
)
|
| 105 |
+
register(
|
| 106 |
+
id='visual-puzzle-3x3-v0',
|
| 107 |
+
entry_point='ogbench.manipspace.envs.puzzle_env:PuzzleEnv',
|
| 108 |
+
max_episode_steps=500,
|
| 109 |
+
kwargs=dict(
|
| 110 |
+
env_type='3x3',
|
| 111 |
+
**visual_dict,
|
| 112 |
+
),
|
| 113 |
+
)
|
| 114 |
+
register(
|
| 115 |
+
id='puzzle-4x4-v0',
|
| 116 |
+
entry_point='ogbench.manipspace.envs.puzzle_env:PuzzleEnv',
|
| 117 |
+
max_episode_steps=500,
|
| 118 |
+
kwargs=dict(
|
| 119 |
+
env_type='4x4',
|
| 120 |
+
),
|
| 121 |
+
)
|
| 122 |
+
register(
|
| 123 |
+
id='visual-puzzle-4x4-v0',
|
| 124 |
+
entry_point='ogbench.manipspace.envs.puzzle_env:PuzzleEnv',
|
| 125 |
+
max_episode_steps=500,
|
| 126 |
+
kwargs=dict(
|
| 127 |
+
env_type='4x4',
|
| 128 |
+
**visual_dict,
|
| 129 |
+
),
|
| 130 |
+
)
|
| 131 |
+
register(
|
| 132 |
+
id='puzzle-4x5-v0',
|
| 133 |
+
entry_point='ogbench.manipspace.envs.puzzle_env:PuzzleEnv',
|
| 134 |
+
max_episode_steps=1000,
|
| 135 |
+
kwargs=dict(
|
| 136 |
+
env_type='4x5',
|
| 137 |
+
),
|
| 138 |
+
)
|
| 139 |
+
register(
|
| 140 |
+
id='visual-puzzle-4x5-v0',
|
| 141 |
+
entry_point='ogbench.manipspace.envs.puzzle_env:PuzzleEnv',
|
| 142 |
+
max_episode_steps=1000,
|
| 143 |
+
kwargs=dict(
|
| 144 |
+
env_type='4x5',
|
| 145 |
+
**visual_dict,
|
| 146 |
+
),
|
| 147 |
+
)
|
| 148 |
+
register(
|
| 149 |
+
id='puzzle-4x6-v0',
|
| 150 |
+
entry_point='ogbench.manipspace.envs.puzzle_env:PuzzleEnv',
|
| 151 |
+
max_episode_steps=1000,
|
| 152 |
+
kwargs=dict(
|
| 153 |
+
env_type='4x6',
|
| 154 |
+
),
|
| 155 |
+
)
|
| 156 |
+
register(
|
| 157 |
+
id='visual-puzzle-4x6-v0',
|
| 158 |
+
entry_point='ogbench.manipspace.envs.puzzle_env:PuzzleEnv',
|
| 159 |
+
max_episode_steps=1000,
|
| 160 |
+
kwargs=dict(
|
| 161 |
+
env_type='4x6',
|
| 162 |
+
**visual_dict,
|
| 163 |
+
),
|
| 164 |
+
)
|
ogbench/manipspace/controllers/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ogbench.manipspace.controllers.diff_ik import DiffIKController
|
| 2 |
+
|
| 3 |
+
__all__ = ('DiffIKController',)
|
ogbench/manipspace/controllers/diff_ik.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import mujoco
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
PI = np.pi
|
| 5 |
+
PI_2 = 2 * np.pi
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def angle_diff(q1: np.ndarray, q2: np.ndarray) -> np.ndarray:
|
| 9 |
+
return np.mod(q1 - q2 + PI, PI_2) - PI
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class DiffIKController:
|
| 13 |
+
"""Differential inverse kinematics controller."""
|
| 14 |
+
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
model: mujoco.MjModel,
|
| 18 |
+
sites: list,
|
| 19 |
+
qpos0: np.ndarray = None,
|
| 20 |
+
damping_coeff: float = 1e-12,
|
| 21 |
+
max_angle_change: float = np.radians(45),
|
| 22 |
+
):
|
| 23 |
+
self._model = model
|
| 24 |
+
self._data = mujoco.MjData(self._model)
|
| 25 |
+
self._qp0 = qpos0
|
| 26 |
+
self._max_angle_change = max_angle_change
|
| 27 |
+
|
| 28 |
+
# Cache references.
|
| 29 |
+
self._ns = len(sites) # Number of sites.
|
| 30 |
+
self._site_ids = np.asarray([self._model.site(s).id for s in sites])
|
| 31 |
+
|
| 32 |
+
# Preallocate arrays.
|
| 33 |
+
self._err = np.empty((self._ns, 6))
|
| 34 |
+
self._site_quat = np.empty((self._ns, 4))
|
| 35 |
+
self._site_quat_inv = np.empty((self._ns, 4))
|
| 36 |
+
self._err_quat = np.empty((self._ns, 4))
|
| 37 |
+
self._jac = np.empty((6 * self._ns, self._model.nv))
|
| 38 |
+
self._damping = damping_coeff * np.eye(6 * self._ns)
|
| 39 |
+
self._eye = np.eye(self._model.nv)
|
| 40 |
+
|
| 41 |
+
def _forward_kinematics(self) -> None:
|
| 42 |
+
"""Minimal computation required for forward kinematics."""
|
| 43 |
+
mujoco.mj_kinematics(self._model, self._data)
|
| 44 |
+
mujoco.mj_comPos(self._model, self._data) # Required for mj_jacSite.
|
| 45 |
+
|
| 46 |
+
def _integrate(self, update: np.ndarray) -> None:
|
| 47 |
+
"""Integrate the joint velocities in-place."""
|
| 48 |
+
mujoco.mj_integratePos(self._model, self._data.qpos, update, 1.0)
|
| 49 |
+
|
| 50 |
+
def _compute_translational_error(self, pos: np.ndarray) -> None:
|
| 51 |
+
"""Compute the error between the desired and current site positions."""
|
| 52 |
+
self._err[:, :3] = pos - self._data.site_xpos[self._site_ids]
|
| 53 |
+
|
| 54 |
+
def _compute_rotational_error(self, quat: np.ndarray) -> None:
|
| 55 |
+
"""Compute the error between the desired and current site orientations."""
|
| 56 |
+
for i, site_id in enumerate(self._site_ids):
|
| 57 |
+
mujoco.mju_mat2Quat(self._site_quat[i], self._data.site_xmat[site_id])
|
| 58 |
+
mujoco.mju_negQuat(self._site_quat_inv[i], self._site_quat[i])
|
| 59 |
+
mujoco.mju_mulQuat(self._err_quat[i], quat[i], self._site_quat_inv[i])
|
| 60 |
+
mujoco.mju_quat2Vel(self._err[i, 3:], self._err_quat[i], 1.0)
|
| 61 |
+
|
| 62 |
+
def _compute_jacobian(self) -> None:
|
| 63 |
+
"""Update site end-effector Jacobians."""
|
| 64 |
+
for i, site_id in enumerate(self._site_ids):
|
| 65 |
+
jacp = self._jac[6 * i : 6 * i + 3]
|
| 66 |
+
jacr = self._jac[6 * i + 3 : 6 * i + 6]
|
| 67 |
+
mujoco.mj_jacSite(self._model, self._data, jacp, jacr, site_id)
|
| 68 |
+
|
| 69 |
+
def _error_threshold_reached(self, pos_thresh: float, ori_thresh: float) -> bool:
|
| 70 |
+
"""Return True if position and rotation errors are below the thresholds."""
|
| 71 |
+
pos_achieved = np.linalg.norm(self._err[:, :3]) <= pos_thresh
|
| 72 |
+
ori_achieved = np.linalg.norm(self._err[:, 3:]) <= ori_thresh
|
| 73 |
+
return pos_achieved and ori_achieved
|
| 74 |
+
|
| 75 |
+
def _solve(self) -> np.ndarray:
|
| 76 |
+
"""Solve for joint velocities using damped least squares."""
|
| 77 |
+
H = self._jac @ self._jac.T + self._damping
|
| 78 |
+
x = self._jac.T @ np.linalg.solve(H, self._err.ravel())
|
| 79 |
+
if self._qp0 is not None:
|
| 80 |
+
jac_pinv = np.linalg.pinv(H)
|
| 81 |
+
q_err = angle_diff(self._qp0, self._data.qpos)
|
| 82 |
+
x += (self._eye - (self._jac.T @ jac_pinv) @ self._jac) @ q_err
|
| 83 |
+
return x
|
| 84 |
+
|
| 85 |
+
def _scale_update(self, update: np.ndarray) -> np.ndarray:
|
| 86 |
+
"""Scale down update so that the max allowable angle change is not exceeded."""
|
| 87 |
+
update_max = np.max(np.abs(update))
|
| 88 |
+
if update_max > self._max_angle_change:
|
| 89 |
+
update *= self._max_angle_change / update_max
|
| 90 |
+
return update
|
| 91 |
+
|
| 92 |
+
def solve(
|
| 93 |
+
self,
|
| 94 |
+
pos: np.ndarray,
|
| 95 |
+
quat: np.ndarray,
|
| 96 |
+
curr_qpos: np.ndarray,
|
| 97 |
+
max_iters: int = 20,
|
| 98 |
+
pos_thresh: float = 1e-4,
|
| 99 |
+
ori_thresh: float = 1e-4,
|
| 100 |
+
) -> np.ndarray:
|
| 101 |
+
self._data.qpos = curr_qpos
|
| 102 |
+
|
| 103 |
+
for _ in range(max_iters):
|
| 104 |
+
self._forward_kinematics()
|
| 105 |
+
|
| 106 |
+
self._compute_translational_error(np.atleast_2d(pos))
|
| 107 |
+
self._compute_rotational_error(np.atleast_2d(quat))
|
| 108 |
+
if self._error_threshold_reached(pos_thresh, ori_thresh):
|
| 109 |
+
break
|
| 110 |
+
|
| 111 |
+
self._compute_jacobian()
|
| 112 |
+
update = self._scale_update(self._solve())
|
| 113 |
+
self._integrate(update)
|
| 114 |
+
|
| 115 |
+
return self._data.qpos.copy()
|
ogbench/manipspace/descriptions/button_inner.xml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mujoco model="button_inner">
|
| 2 |
+
<worldbody>
|
| 3 |
+
<body childclass="buttonbox_base" name="buttonbox_0" pos="0.58 -0.05 0.048" euler="-1.57 0 0">
|
| 4 |
+
<geom material="btn_black" mesh="stopbot" pos="0.0 -0.024 0.0"/>
|
| 5 |
+
<geom material="btn_metal" euler="1.57 0 0" mesh="stopbuttonrim" pos="0.0 -0.0356 0.0"/>
|
| 6 |
+
<geom material="btn_top" mesh="stoptop" pos="0.0 -0.024 0.0"/>
|
| 7 |
+
|
| 8 |
+
<geom class="buttonbox_col" pos="0.0 0.0048 0.0288" size="0.048 0.0408 0.0192" type="box"/>
|
| 9 |
+
<geom class="buttonbox_col" pos="0.0 0.0048 -0.0288" size="0.048 0.0408 0.0192" type="box"/>
|
| 10 |
+
<geom class="buttonbox_col" pos="-0.0292 0.0048 0.0" size="0.0188 0.0408 0.0096" type="box"/>
|
| 11 |
+
<geom class="buttonbox_col" pos="0.0292 0.0048 0.0" size="0.0188 0.0408 0.0096" type="box"/>
|
| 12 |
+
<site name="btntop_0" pos="0.0 -0.0774 0.0" group="5"/>
|
| 13 |
+
|
| 14 |
+
<body childclass="buttonbox_base" name="button_0">
|
| 15 |
+
<inertial pos="0.0 -0.0774 0.0" mass=".01" diaginertia="0.001 0.001 0.001"/>
|
| 16 |
+
<joint name="buttonbox_joint_0" pos="0.0 0.0 0.0" axis="0 -1 0" type="slide" springref=".5" limited="true" stiffness="0.5" range="-0.024 0.0" damping="1"/>
|
| 17 |
+
<geom material="btn_red" euler="1.57 0 0" mesh="stopbutton" pos="0.0 -0.0632 0.0" name="btngeom_0"/>
|
| 18 |
+
<geom material="btn_black" euler="1.57 0 0" mesh="stopbuttonrod" pos="0.0 -0.0504 0.0"/>
|
| 19 |
+
|
| 20 |
+
<geom class="buttonbox_col" euler="1.57 0 0" pos="0.0 -0.0512 0.0" size="0.0084 0.0156" type="cylinder"/>
|
| 21 |
+
<geom class="buttonbox_col" euler="1.57 0 0" pos="0.0 -0.0664 0.0" size="0.0172 0.0032" type="cylinder"/>
|
| 22 |
+
<geom class="buttonbox_col" euler="1.57 0 0" pos="0.0 -0.0732 0.0" size="0.0172 0.0044" type="cylinder"/>
|
| 23 |
+
</body>
|
| 24 |
+
</body>
|
| 25 |
+
</worldbody>
|
| 26 |
+
</mujoco>
|
ogbench/manipspace/descriptions/button_outer.xml
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mujoco model="button_outer">
|
| 2 |
+
<compiler angle="radian" inertiafromgeom="auto" inertiagrouprange="1 5"/>
|
| 3 |
+
|
| 4 |
+
<asset>
|
| 5 |
+
<texture name="T_btn" type="cube" file="metaworld/button/metal1.png"/>
|
| 6 |
+
|
| 7 |
+
<material name="btn_col" rgba="0.96 0.26 0.33 0.5" shininess="0" specular="0"/>
|
| 8 |
+
<material name="btn_red" rgba="0.96 0.26 0.33 1" shininess="1" reflectance=".7" specular=".5"/>
|
| 9 |
+
<material name="btn_top" rgba="1 1 1 1" shininess="1" reflectance=".7" specular=".5"/>
|
| 10 |
+
<material name="btn_black" rgba=".15 .15 .15 1" shininess="1" reflectance=".7" specular=".5"/>
|
| 11 |
+
<material name="btn_metal" rgba=".8 .8 .8 1" texture="T_btn" shininess="1" reflectance="1" specular="1"/>
|
| 12 |
+
</asset>
|
| 13 |
+
|
| 14 |
+
<default>
|
| 15 |
+
<default class="buttonbox_base">
|
| 16 |
+
<joint armature="0.001" damping="2" limited="true"/>
|
| 17 |
+
<geom conaffinity="0" contype="0" group="1" type="mesh"/>
|
| 18 |
+
<position ctrllimited="true" ctrlrange="0 1.57"/>
|
| 19 |
+
<default class="buttonbox_viz">
|
| 20 |
+
<geom condim="4" type="mesh"/>
|
| 21 |
+
</default>
|
| 22 |
+
<default class="buttonbox_col">
|
| 23 |
+
<geom conaffinity="1" condim="3" contype="1" group="4" material="btn_col" solimp="0.99 0.99 0.01" solref="0.01 1"/>
|
| 24 |
+
</default>
|
| 25 |
+
<site type="sphere" size=".01" rgba="0 1 0 1" group="5"/>
|
| 26 |
+
</default>
|
| 27 |
+
</default>
|
| 28 |
+
|
| 29 |
+
<asset>
|
| 30 |
+
<mesh file="metaworld/button/stopbot.stl" name="stopbot" scale="0.4 0.4 0.4"/>
|
| 31 |
+
<mesh file="metaworld/button/stopbutton.stl" name="stopbutton" scale="0.4 0.4 0.4"/>
|
| 32 |
+
<mesh file="metaworld/button/stopbuttonrim.stl" name="stopbuttonrim" scale="0.4 0.4 0.4"/>
|
| 33 |
+
<mesh file="metaworld/button/stopbuttonrod.stl" name="stopbuttonrod" scale="0.4 0.4 0.4"/>
|
| 34 |
+
<mesh file="metaworld/button/stoptop.stl" name="stoptop" scale="0.4 0.4 0.4"/>
|
| 35 |
+
</asset>
|
| 36 |
+
|
| 37 |
+
<worldbody>
|
| 38 |
+
</worldbody>
|
| 39 |
+
</mujoco>
|
ogbench/manipspace/descriptions/buttons.xml
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mujoco model="buttonbox">
|
| 2 |
+
<compiler angle="radian" inertiafromgeom="auto" inertiagrouprange="1 5"/>
|
| 3 |
+
|
| 4 |
+
<asset>
|
| 5 |
+
<texture name="T_btn" type="cube" file="metaworld/button/metal1.png"/>
|
| 6 |
+
|
| 7 |
+
<material name="btn_col" rgba="0.96 0.26 0.33 0.5" shininess="0" specular="0"/>
|
| 8 |
+
<material name="btn_red" rgba="0.96 0.26 0.33 1" shininess="1" reflectance=".7" specular=".5"/>
|
| 9 |
+
<material name="btn_top_0" rgba="0.6 0.72 0.94 1" shininess="1" reflectance=".7" specular=".5"/>
|
| 10 |
+
<material name="btn_top_1" rgba="1 1 1 1" shininess="1" reflectance=".7" specular=".5"/>
|
| 11 |
+
<material name="btn_black" rgba=".15 .15 .15 1" shininess="1" reflectance=".7" specular=".5"/>
|
| 12 |
+
<material name="btn_metal" rgba=".8 .8 .8 1" texture="T_btn" shininess="1" reflectance="1" specular="1"/>
|
| 13 |
+
</asset>
|
| 14 |
+
|
| 15 |
+
<default>
|
| 16 |
+
<default class="buttonbox_base">
|
| 17 |
+
<joint armature="0.001" damping="2" limited="true"/>
|
| 18 |
+
<geom conaffinity="0" contype="0" group="1" type="mesh"/>
|
| 19 |
+
<position ctrllimited="true" ctrlrange="0 1.57"/>
|
| 20 |
+
<default class="buttonbox_viz">
|
| 21 |
+
<geom condim="4" type="mesh"/>
|
| 22 |
+
</default>
|
| 23 |
+
<default class="buttonbox_col">
|
| 24 |
+
<geom conaffinity="1" condim="3" contype="1" group="4" material="btn_col" solimp="0.99 0.99 0.01" solref="0.01 1"/>
|
| 25 |
+
</default>
|
| 26 |
+
<site type="sphere" size=".01" rgba="0 1 0 1" group="5"/>
|
| 27 |
+
</default>
|
| 28 |
+
</default>
|
| 29 |
+
|
| 30 |
+
<asset>
|
| 31 |
+
<mesh file="metaworld/button/stopbot.stl" name="stopbot" scale="0.4 0.4 0.4"/>
|
| 32 |
+
<mesh file="metaworld/button/stopbutton.stl" name="stopbutton" scale="0.4 0.4 0.4"/>
|
| 33 |
+
<mesh file="metaworld/button/stopbuttonrim.stl" name="stopbuttonrim" scale="0.4 0.4 0.4"/>
|
| 34 |
+
<mesh file="metaworld/button/stopbuttonrod.stl" name="stopbuttonrod" scale="0.4 0.4 0.4"/>
|
| 35 |
+
<mesh file="metaworld/button/stoptop.stl" name="stoptop" scale="0.4 0.4 0.4"/>
|
| 36 |
+
</asset>
|
| 37 |
+
|
| 38 |
+
<worldbody>
|
| 39 |
+
<body childclass="buttonbox_base" name="buttonbox_0" pos="0.58 -0.05 0.048" euler="-1.57 0 0">
|
| 40 |
+
<geom material="btn_black" mesh="stopbot" pos="0.0 -0.024 0.0"/>
|
| 41 |
+
<geom material="btn_metal" euler="1.57 0 0" mesh="stopbuttonrim" pos="0.0 -0.0356 0.0"/>
|
| 42 |
+
<geom material="btn_top_0" mesh="stoptop" pos="0.0 -0.024 0.0"/>
|
| 43 |
+
|
| 44 |
+
<geom class="buttonbox_col" pos="0.0 0.0048 0.0288" size="0.048 0.0408 0.0192" type="box"/>
|
| 45 |
+
<geom class="buttonbox_col" pos="0.0 0.0048 -0.0288" size="0.048 0.0408 0.0192" type="box"/>
|
| 46 |
+
<geom class="buttonbox_col" pos="-0.0292 0.0048 0.0" size="0.0188 0.0408 0.0096" type="box"/>
|
| 47 |
+
<geom class="buttonbox_col" pos="0.0292 0.0048 0.0" size="0.0188 0.0408 0.0096" type="box"/>
|
| 48 |
+
<site name="btntop_0" pos="0.0 -0.0774 0.0" group="5"/>
|
| 49 |
+
|
| 50 |
+
<body childclass="buttonbox_base" name="button_0">
|
| 51 |
+
<inertial pos="0.0 -0.0774 0.0" mass=".01" diaginertia="0.001 0.001 0.001"/>
|
| 52 |
+
<joint name="buttonbox_joint_0" pos="0.0 0.0 0.0" axis="0 -1 0" type="slide" springref=".5" limited="true" stiffness="0.5" range="-0.024 0.0" damping="1"/>
|
| 53 |
+
<geom material="btn_red" euler="1.57 0 0" mesh="stopbutton" pos="0.0 -0.0632 0.0" name="btngeom_0"/>
|
| 54 |
+
<geom material="btn_black" euler="1.57 0 0" mesh="stopbuttonrod" pos="0.0 -0.0504 0.0"/>
|
| 55 |
+
|
| 56 |
+
<geom class="buttonbox_col" euler="1.57 0 0" pos="0.0 -0.0512 0.0" size="0.0084 0.0156" type="cylinder"/>
|
| 57 |
+
<geom class="buttonbox_col" euler="1.57 0 0" pos="0.0 -0.0664 0.0" size="0.0172 0.0032" type="cylinder"/>
|
| 58 |
+
<geom class="buttonbox_col" euler="1.57 0 0" pos="0.0 -0.0732 0.0" size="0.0172 0.0044" type="cylinder"/>
|
| 59 |
+
</body>
|
| 60 |
+
</body>
|
| 61 |
+
<body childclass="buttonbox_base" name="buttonbox_1" pos="0.58 0.05 0.048" euler="-1.57 0 0">
|
| 62 |
+
<geom material="btn_black" mesh="stopbot" pos="0.0 -0.024 0.0"/>
|
| 63 |
+
<geom material="btn_metal" euler="1.57 0 0" mesh="stopbuttonrim" pos="0.0 -0.0356 0.0"/>
|
| 64 |
+
<geom material="btn_top_1" mesh="stoptop" pos="0.0 -0.024 0.0"/>
|
| 65 |
+
|
| 66 |
+
<geom class="buttonbox_col" pos="0.0 0.0048 0.0288" size="0.048 0.0408 0.0192" type="box"/>
|
| 67 |
+
<geom class="buttonbox_col" pos="0.0 0.0048 -0.0288" size="0.048 0.0408 0.0192" type="box"/>
|
| 68 |
+
<geom class="buttonbox_col" pos="-0.0292 0.0048 0.0" size="0.0188 0.0408 0.0096" type="box"/>
|
| 69 |
+
<geom class="buttonbox_col" pos="0.0292 0.0048 0.0" size="0.0188 0.0408 0.0096" type="box"/>
|
| 70 |
+
<site name="btntop_1" pos="0.0 -0.0732 0.0" group="5"/>
|
| 71 |
+
|
| 72 |
+
<body childclass="buttonbox_base" name="button_1">
|
| 73 |
+
<inertial pos="0.0 -0.0774 0.0" mass=".01" diaginertia="0.001 0.001 0.001"/>
|
| 74 |
+
<joint name="buttonbox_joint_1" pos="0.0 0.0 0.0" axis="0 -1 0" type="slide" springref=".5" limited="true" stiffness="0.5" range="-0.024 0.0" damping="1"/>
|
| 75 |
+
<geom material="btn_red" euler="1.57 0 0" mesh="stopbutton" pos="0.0 -0.0632 0.0" name="btngeom_1"/>
|
| 76 |
+
<geom material="btn_black" euler="1.57 0 0" mesh="stopbuttonrod" pos="0.0 -0.0504 0.0"/>
|
| 77 |
+
|
| 78 |
+
<geom class="buttonbox_col" euler="1.57 0 0" pos="0.0 -0.0512 0.0" size="0.0084 0.0156" type="cylinder"/>
|
| 79 |
+
<geom class="buttonbox_col" euler="1.57 0 0" pos="0.0 -0.0664 0.0" size="0.0172 0.0032" type="cylinder"/>
|
| 80 |
+
<geom class="buttonbox_col" euler="1.57 0 0" pos="0.0 -0.0732 0.0" size="0.0172 0.0044" type="cylinder"/>
|
| 81 |
+
</body>
|
| 82 |
+
</body>
|
| 83 |
+
</worldbody>
|
| 84 |
+
</mujoco>
|
ogbench/manipspace/descriptions/cube.xml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mujoco model="cube">
|
| 2 |
+
<default>
|
| 3 |
+
<default class="cube">
|
| 4 |
+
<geom type="box" size="0.02 0.02 0.02" rgba="0.96 0.26 0.33 1.0" density="1240" solref="0.004 1" contype="3" group="1"/>
|
| 5 |
+
<site type="sphere" size=".005" rgba="0 1 0 1" group="5"/>
|
| 6 |
+
</default>
|
| 7 |
+
</default>
|
| 8 |
+
|
| 9 |
+
<worldbody>
|
| 10 |
+
<body name="object_0" pos="0.3 0 .02">
|
| 11 |
+
<freejoint name="object_joint_0"/>
|
| 12 |
+
<geom name="object_0" class="cube"/>
|
| 13 |
+
<site name="com_0" group="5"/>
|
| 14 |
+
</body>
|
| 15 |
+
<body name="object_target_0" pos="0.45 0 .02" mocap="true">
|
| 16 |
+
<geom name="target_object_0" class="cube" rgba=".5 .5 .5 .2" contype="0" conaffinity="0"/>
|
| 17 |
+
</body>
|
| 18 |
+
</worldbody>
|
| 19 |
+
</mujoco>
|
ogbench/manipspace/descriptions/cube_inner.xml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mujoco model="cube_inner">
|
| 2 |
+
<worldbody>
|
| 3 |
+
<body name="object_0" pos="0.3 0 .02">
|
| 4 |
+
<freejoint name="object_joint_0"/>
|
| 5 |
+
<geom name="object_0" class="cube"/>
|
| 6 |
+
<site name="com_0" group="5"/>
|
| 7 |
+
</body>
|
| 8 |
+
<body name="object_target_0" pos="0.45 0 .02" mocap="true">
|
| 9 |
+
<geom name="target_object_0" class="cube" rgba=".5 .5 .5 .2" contype="0" conaffinity="0"/>
|
| 10 |
+
</body>
|
| 11 |
+
</worldbody>
|
| 12 |
+
</mujoco>
|