Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- maniskill3_environment_assets/.gitignore +1 -0
- maniskill3_environment_assets/racks/bin.gltf +207 -0
- maniskill3_environment_assets/racks/dish_rack.mtl +2 -0
- maniskill3_environment_assets/racks/dish_rack.obj +0 -0
- maniskill3_environment_assets/racks/sockerbit_box.mtl +12 -0
- maniskill3_environment_assets/racks/sockerbit_box.obj +0 -0
- maniskill3_environment_assets/racks/uppsnofsad_box.mtl +12 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/README.md +9 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/.gitignore +2 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/README.md +61 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/detr/backbone.py +129 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/detr/detr_vae.py +141 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/detr/position_encoding.py +93 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/detr/transformer.py +313 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/evaluate.py +98 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/make_env.py +46 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/utils.py +161 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/baselines.sh +77 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/setup.py +17 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/train.py +456 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/train_rgbd.py +612 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/bc/.gitignore +4 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/.gitignore +4 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/README.md +64 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/baselines.sh +78 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/diffusion_policy/evaluate.py +52 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/diffusion_policy/make_env.py +48 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/diffusion_policy/utils.py +119 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/train.py +418 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/.gitignore +4 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/README.md +119 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/baselines.sh +185 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/examples.sh +133 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/ppo.py +470 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/ppo_fast.py +522 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/ppo_rgb.py +594 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/README.md +77 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/buffer.py +101 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/init.py +22 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/logger.py +208 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/scale.py +48 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/seed.py +12 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/world_model.py +178 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/config.yaml +120 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/environment.yaml +67 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/evaluate.py +124 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/tdmpc2.py +313 -0
- project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/train.py +86 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/tabletop/__init__.py +46 -0
- project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/tabletop/__pycache__/assembling_kits.cpython-310.pyc +0 -0
maniskill3_environment_assets/.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
*.zip
|
maniskill3_environment_assets/racks/bin.gltf
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"asset":{
|
| 3 |
+
"generator":"Khronos glTF Blender I/O v4.3.47",
|
| 4 |
+
"version":"2.0"
|
| 5 |
+
},
|
| 6 |
+
"scene":0,
|
| 7 |
+
"scenes":[
|
| 8 |
+
{
|
| 9 |
+
"name":"Scene",
|
| 10 |
+
"nodes":[
|
| 11 |
+
0,
|
| 12 |
+
1
|
| 13 |
+
]
|
| 14 |
+
}
|
| 15 |
+
],
|
| 16 |
+
"nodes":[
|
| 17 |
+
{
|
| 18 |
+
"mesh":0,
|
| 19 |
+
"name":"Model"
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"mesh":1,
|
| 23 |
+
"name":"Mesh_0",
|
| 24 |
+
"rotation":[
|
| 25 |
+
0.7071068286895752,
|
| 26 |
+
0,
|
| 27 |
+
0,
|
| 28 |
+
0.7071068286895752
|
| 29 |
+
]
|
| 30 |
+
}
|
| 31 |
+
],
|
| 32 |
+
"materials":[
|
| 33 |
+
{
|
| 34 |
+
"name":"Material.001"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"doubleSided":true,
|
| 38 |
+
"name":"Material_0.006",
|
| 39 |
+
"pbrMetallicRoughness":{
|
| 40 |
+
"baseColorFactor":[
|
| 41 |
+
0.800000011920929,
|
| 42 |
+
0.800000011920929,
|
| 43 |
+
0.800000011920929,
|
| 44 |
+
1
|
| 45 |
+
],
|
| 46 |
+
"metallicFactor":0,
|
| 47 |
+
"roughnessFactor":0.5
|
| 48 |
+
}
|
| 49 |
+
}
|
| 50 |
+
],
|
| 51 |
+
"meshes":[
|
| 52 |
+
{
|
| 53 |
+
"name":"Model.002",
|
| 54 |
+
"primitives":[
|
| 55 |
+
{
|
| 56 |
+
"attributes":{
|
| 57 |
+
"POSITION":0,
|
| 58 |
+
"NORMAL":1,
|
| 59 |
+
"TEXCOORD_0":2
|
| 60 |
+
},
|
| 61 |
+
"indices":3,
|
| 62 |
+
"material":0
|
| 63 |
+
}
|
| 64 |
+
]
|
| 65 |
+
},
|
| 66 |
+
{
|
| 67 |
+
"name":"Mesh_0.005",
|
| 68 |
+
"primitives":[
|
| 69 |
+
{
|
| 70 |
+
"attributes":{
|
| 71 |
+
"POSITION":4,
|
| 72 |
+
"NORMAL":5,
|
| 73 |
+
"TEXCOORD_0":6
|
| 74 |
+
},
|
| 75 |
+
"indices":7,
|
| 76 |
+
"material":1
|
| 77 |
+
}
|
| 78 |
+
]
|
| 79 |
+
}
|
| 80 |
+
],
|
| 81 |
+
"accessors":[
|
| 82 |
+
{
|
| 83 |
+
"bufferView":0,
|
| 84 |
+
"componentType":5126,
|
| 85 |
+
"count":96746,
|
| 86 |
+
"max":[
|
| 87 |
+
366.4075012207031,
|
| 88 |
+
1.4419732093811035,
|
| 89 |
+
192.4498291015625
|
| 90 |
+
],
|
| 91 |
+
"min":[
|
| 92 |
+
352.80267333984375,
|
| 93 |
+
-9.999999974752427e-07,
|
| 94 |
+
174.88624572753906
|
| 95 |
+
],
|
| 96 |
+
"type":"VEC3"
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"bufferView":1,
|
| 100 |
+
"componentType":5126,
|
| 101 |
+
"count":96746,
|
| 102 |
+
"type":"VEC3"
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"bufferView":2,
|
| 106 |
+
"componentType":5126,
|
| 107 |
+
"count":96746,
|
| 108 |
+
"type":"VEC2"
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"bufferView":3,
|
| 112 |
+
"componentType":5125,
|
| 113 |
+
"count":398523,
|
| 114 |
+
"type":"SCALAR"
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"bufferView":4,
|
| 118 |
+
"componentType":5126,
|
| 119 |
+
"count":18533,
|
| 120 |
+
"max":[
|
| 121 |
+
0.12296000123023987,
|
| 122 |
+
0.11509200185537338,
|
| 123 |
+
0.08574099838733673
|
| 124 |
+
],
|
| 125 |
+
"min":[
|
| 126 |
+
-0.12296000123023987,
|
| 127 |
+
9.999999974752427e-07,
|
| 128 |
+
-0.08569499850273132
|
| 129 |
+
],
|
| 130 |
+
"type":"VEC3"
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"bufferView":5,
|
| 134 |
+
"componentType":5126,
|
| 135 |
+
"count":18533,
|
| 136 |
+
"type":"VEC3"
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"bufferView":6,
|
| 140 |
+
"componentType":5126,
|
| 141 |
+
"count":18533,
|
| 142 |
+
"type":"VEC2"
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
"bufferView":7,
|
| 146 |
+
"componentType":5123,
|
| 147 |
+
"count":89001,
|
| 148 |
+
"type":"SCALAR"
|
| 149 |
+
}
|
| 150 |
+
],
|
| 151 |
+
"bufferViews":[
|
| 152 |
+
{
|
| 153 |
+
"buffer":0,
|
| 154 |
+
"byteLength":1160952,
|
| 155 |
+
"byteOffset":0,
|
| 156 |
+
"target":34962
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"buffer":0,
|
| 160 |
+
"byteLength":1160952,
|
| 161 |
+
"byteOffset":1160952,
|
| 162 |
+
"target":34962
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"buffer":0,
|
| 166 |
+
"byteLength":773968,
|
| 167 |
+
"byteOffset":2321904,
|
| 168 |
+
"target":34962
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"buffer":0,
|
| 172 |
+
"byteLength":1594092,
|
| 173 |
+
"byteOffset":3095872,
|
| 174 |
+
"target":34963
|
| 175 |
+
},
|
| 176 |
+
{
|
| 177 |
+
"buffer":0,
|
| 178 |
+
"byteLength":222396,
|
| 179 |
+
"byteOffset":4689964,
|
| 180 |
+
"target":34962
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"buffer":0,
|
| 184 |
+
"byteLength":222396,
|
| 185 |
+
"byteOffset":4912360,
|
| 186 |
+
"target":34962
|
| 187 |
+
},
|
| 188 |
+
{
|
| 189 |
+
"buffer":0,
|
| 190 |
+
"byteLength":148264,
|
| 191 |
+
"byteOffset":5134756,
|
| 192 |
+
"target":34962
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"buffer":0,
|
| 196 |
+
"byteLength":178002,
|
| 197 |
+
"byteOffset":5283020,
|
| 198 |
+
"target":34963
|
| 199 |
+
}
|
| 200 |
+
],
|
| 201 |
+
"buffers":[
|
| 202 |
+
{
|
| 203 |
+
"byteLength":5461024,
|
| 204 |
+
"uri":"bin.bin"
|
| 205 |
+
}
|
| 206 |
+
]
|
| 207 |
+
}
|
maniskill3_environment_assets/racks/dish_rack.mtl
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Blender 4.3.2 MTL File: 'None'
|
| 2 |
+
# www.blender.org
|
maniskill3_environment_assets/racks/dish_rack.obj
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
maniskill3_environment_assets/racks/sockerbit_box.mtl
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Blender 4.3.2 MTL File: 'None'
|
| 2 |
+
# www.blender.org
|
| 3 |
+
|
| 4 |
+
newmtl SimplygonCastMaterial.004
|
| 5 |
+
Ns 250.000000
|
| 6 |
+
Ka 1.000000 1.000000 1.000000
|
| 7 |
+
Kd 0.800000 0.800000 0.800000
|
| 8 |
+
Ks 0.500000 0.500000 0.500000
|
| 9 |
+
Ke 0.000000 0.000000 0.000000
|
| 10 |
+
Ni 1.500000
|
| 11 |
+
d 1.000000
|
| 12 |
+
illum 2
|
maniskill3_environment_assets/racks/sockerbit_box.obj
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
maniskill3_environment_assets/racks/uppsnofsad_box.mtl
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Blender 4.3.2 MTL File: 'None'
|
| 2 |
+
# www.blender.org
|
| 3 |
+
|
| 4 |
+
newmtl Material_0.004
|
| 5 |
+
Ns 250.000000
|
| 6 |
+
Ka 1.000000 1.000000 1.000000
|
| 7 |
+
Kd 0.800000 0.800000 0.800000
|
| 8 |
+
Ks 0.500000 0.500000 0.500000
|
| 9 |
+
Ke 0.000000 0.000000 0.000000
|
| 10 |
+
Ni 1.500000
|
| 11 |
+
d 1.000000
|
| 12 |
+
illum 2
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/README.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ManiSkill Baselines
|
| 2 |
+
|
| 3 |
+
This folder contains code for all implemented ManiSkill baselines which currently include online Reinforcement Learning and Imitation Learning. All baseline results are published to our [public wandb page](https://wandb.ai/stonet2000/ManiSkill).
|
| 4 |
+
|
| 5 |
+
For more details on baselines (e.g. how to setup maniskill for RL, run baselines etc.) follow the links below in our documentation:
|
| 6 |
+
|
| 7 |
+
- Online Reinforcement Learning: https://maniskill.readthedocs.io/en/latest/user_guide/reinforcement_learning/index.html
|
| 8 |
+
- Learning From Demonstrations / Imitation Learning: https://maniskill.readthedocs.io/en/latest/user_guide/learning_from_demos/index.html
|
| 9 |
+
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
runs/
|
| 2 |
+
wandb/
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/README.md
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Action Chunking with Transformers (ACT)
|
| 2 |
+
|
| 3 |
+
Code for running the ACT algorithm based on ["Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware"](https://arxiv.org/pdf/2304.13705). It is adapted from the [original code](https://github.com/tonyzhaozh/act).
|
| 4 |
+
|
| 5 |
+
## Installation
|
| 6 |
+
|
| 7 |
+
To get started, we recommend using conda/mamba to create a new environment and install the dependencies
|
| 8 |
+
|
| 9 |
+
```bash
|
| 10 |
+
conda create -n act-ms python=3.9
|
| 11 |
+
conda activate act-ms
|
| 12 |
+
pip install -e .
|
| 13 |
+
```
|
| 14 |
+
|
| 15 |
+
## Setup
|
| 16 |
+
|
| 17 |
+
Read through the [imitation learning setup documentation](https://maniskill.readthedocs.io/en/latest/user_guide/learning_from_demos/setup.html) which details everything you need to know regarding running imitation learning baselines in ManiSkill. It includes details on how to download demonstration datasets, preprocess them, evaluate policies fairly for comparison, as well as suggestions to improve performance and avoid bugs.
|
| 18 |
+
|
| 19 |
+
## Training
|
| 20 |
+
|
| 21 |
+
We provide scripts to train ACT on demonstrations.
|
| 22 |
+
|
| 23 |
+
Note that some demonstrations are slow (e.g. motion planning or human teleoperated) and can exceed the default max episode steps which can be an issue as imitation learning algorithms learn to solve the task at the same speed the demonstrations solve it. In this case, you can use the `--max-episode-steps` flag to set a higher value so that the policy can solve the task in time. General recommendation is to set `--max-episode-steps` to about 2x the length of the mean demonstrations length you are using for training. We have tuned baselines in the `baselines.sh` script that set a recommended `--max-episode-steps` for each task.
|
| 24 |
+
|
| 25 |
+
Example state-based training, learning from 100 demonstrations generated via motionplanning in the PickCube-v1 task.
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
seed=1
|
| 29 |
+
demos=100
|
| 30 |
+
python train.py --env-id PickCube-v1 \
|
| 31 |
+
--demo-path ~/.maniskill/demos/PickCube-v1/motionplanning/trajectory.state.pd_ee_delta_pos.physx_cpu.h5 \
|
| 32 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cpu" --num_demos $demos --max_episode_steps 100 \
|
| 33 |
+
--total_iters 30000 --log_freq 100 --eval_freq 5000 \
|
| 34 |
+
--exp-name=act-PickCube-v1-state-${demos}_motionplanning_demos-$seed \
|
| 35 |
+
--track # track training on wandb
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
## Citation
|
| 39 |
+
|
| 40 |
+
If you use this baseline please cite the following
|
| 41 |
+
```
|
| 42 |
+
@inproceedings{DBLP:conf/rss/ZhaoKLF23,
|
| 43 |
+
author = {Tony Z. Zhao and
|
| 44 |
+
Vikash Kumar and
|
| 45 |
+
Sergey Levine and
|
| 46 |
+
Chelsea Finn},
|
| 47 |
+
editor = {Kostas E. Bekris and
|
| 48 |
+
Kris Hauser and
|
| 49 |
+
Sylvia L. Herbert and
|
| 50 |
+
Jingjin Yu},
|
| 51 |
+
title = {Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware},
|
| 52 |
+
booktitle = {Robotics: Science and Systems XIX, Daegu, Republic of Korea, July
|
| 53 |
+
10-14, 2023},
|
| 54 |
+
year = {2023},
|
| 55 |
+
url = {https://doi.org/10.15607/RSS.2023.XIX.016},
|
| 56 |
+
doi = {10.15607/RSS.2023.XIX.016},
|
| 57 |
+
timestamp = {Thu, 20 Jul 2023 15:37:49 +0200},
|
| 58 |
+
biburl = {https://dblp.org/rec/conf/rss/ZhaoKLF23.bib},
|
| 59 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 60 |
+
}
|
| 61 |
+
```
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/detr/backbone.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Backbone modules.
|
| 4 |
+
"""
|
| 5 |
+
from collections import OrderedDict
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
import torchvision
|
| 10 |
+
from torch import nn
|
| 11 |
+
from torchvision.models._utils import IntermediateLayerGetter
|
| 12 |
+
from typing import Dict, List
|
| 13 |
+
|
| 14 |
+
from act.utils import NestedTensor, is_main_process
|
| 15 |
+
from act.detr.position_encoding import build_position_encoding
|
| 16 |
+
|
| 17 |
+
import IPython
|
| 18 |
+
e = IPython.embed
|
| 19 |
+
|
| 20 |
+
class FrozenBatchNorm2d(torch.nn.Module):
|
| 21 |
+
"""
|
| 22 |
+
BatchNorm2d where the batch statistics and the affine parameters are fixed.
|
| 23 |
+
|
| 24 |
+
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
|
| 25 |
+
without which any other policy_models than torchvision.policy_models.resnet[18,34,50,101]
|
| 26 |
+
produce nans.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, n):
|
| 30 |
+
super(FrozenBatchNorm2d, self).__init__()
|
| 31 |
+
self.register_buffer("weight", torch.ones(n))
|
| 32 |
+
self.register_buffer("bias", torch.zeros(n))
|
| 33 |
+
self.register_buffer("running_mean", torch.zeros(n))
|
| 34 |
+
self.register_buffer("running_var", torch.ones(n))
|
| 35 |
+
|
| 36 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
| 37 |
+
missing_keys, unexpected_keys, error_msgs):
|
| 38 |
+
num_batches_tracked_key = prefix + 'num_batches_tracked'
|
| 39 |
+
if num_batches_tracked_key in state_dict:
|
| 40 |
+
del state_dict[num_batches_tracked_key]
|
| 41 |
+
|
| 42 |
+
super(FrozenBatchNorm2d, self)._load_from_state_dict(
|
| 43 |
+
state_dict, prefix, local_metadata, strict,
|
| 44 |
+
missing_keys, unexpected_keys, error_msgs)
|
| 45 |
+
|
| 46 |
+
def forward(self, x):
|
| 47 |
+
# move reshapes to the beginning
|
| 48 |
+
# to make it fuser-friendly
|
| 49 |
+
w = self.weight.reshape(1, -1, 1, 1)
|
| 50 |
+
b = self.bias.reshape(1, -1, 1, 1)
|
| 51 |
+
rv = self.running_var.reshape(1, -1, 1, 1)
|
| 52 |
+
rm = self.running_mean.reshape(1, -1, 1, 1)
|
| 53 |
+
eps = 1e-5
|
| 54 |
+
scale = w * (rv + eps).rsqrt()
|
| 55 |
+
bias = b - rm * scale
|
| 56 |
+
return x * scale + bias
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class BackboneBase(nn.Module):
|
| 60 |
+
|
| 61 |
+
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
|
| 62 |
+
super().__init__()
|
| 63 |
+
# for name, parameter in backbone.named_parameters(): # only train later layers # TODO do we want this?
|
| 64 |
+
# if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
|
| 65 |
+
# parameter.requires_grad_(False)
|
| 66 |
+
if return_interm_layers:
|
| 67 |
+
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
|
| 68 |
+
else:
|
| 69 |
+
return_layers = {'layer4': "0"}
|
| 70 |
+
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
|
| 71 |
+
self.num_channels = num_channels
|
| 72 |
+
|
| 73 |
+
def forward(self, tensor):
|
| 74 |
+
xs = self.body(tensor)
|
| 75 |
+
return xs
|
| 76 |
+
# out: Dict[str, NestedTensor] = {}
|
| 77 |
+
# for name, x in xs.items():
|
| 78 |
+
# m = tensor_list.mask
|
| 79 |
+
# assert m is not None
|
| 80 |
+
# mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
|
| 81 |
+
# out[name] = NestedTensor(x, mask)
|
| 82 |
+
# return out
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class Backbone(BackboneBase):
|
| 86 |
+
"""ResNet backbone with frozen BatchNorm."""
|
| 87 |
+
def __init__(self, name: str,
|
| 88 |
+
train_backbone: bool,
|
| 89 |
+
return_interm_layers: bool,
|
| 90 |
+
dilation: bool,
|
| 91 |
+
include_depth: bool):
|
| 92 |
+
backbone = getattr(torchvision.models, name)(
|
| 93 |
+
replace_stride_with_dilation=[False, False, dilation],
|
| 94 |
+
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) # pretrained # TODO do we want frozen batch_norm??
|
| 95 |
+
|
| 96 |
+
# for rgbd data
|
| 97 |
+
if include_depth:
|
| 98 |
+
w = backbone.conv1.weight
|
| 99 |
+
w = torch.cat([w, torch.full((64, 1, 7, 7), 0)], dim=1)
|
| 100 |
+
backbone.conv1.weight = nn.Parameter(w)
|
| 101 |
+
|
| 102 |
+
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
|
| 103 |
+
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class Joiner(nn.Sequential):
|
| 107 |
+
def __init__(self, backbone, position_embedding):
|
| 108 |
+
super().__init__(backbone, position_embedding)
|
| 109 |
+
|
| 110 |
+
def forward(self, tensor_list: NestedTensor):
|
| 111 |
+
xs = self[0](tensor_list)
|
| 112 |
+
out: List[NestedTensor] = []
|
| 113 |
+
pos = []
|
| 114 |
+
for name, x in xs.items():
|
| 115 |
+
out.append(x)
|
| 116 |
+
# position encoding
|
| 117 |
+
pos.append(self[1](x).to(x.dtype))
|
| 118 |
+
|
| 119 |
+
return out, pos
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def build_backbone(args):
|
| 123 |
+
position_embedding = build_position_encoding(args)
|
| 124 |
+
train_backbone = args.lr_backbone > 0
|
| 125 |
+
return_interm_layers = args.masks
|
| 126 |
+
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation, args.include_depth)
|
| 127 |
+
model = Joiner(backbone, position_embedding)
|
| 128 |
+
model.num_channels = backbone.num_channels
|
| 129 |
+
return model
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/detr/detr_vae.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
DETR model and criterion classes.
|
| 4 |
+
"""
|
| 5 |
+
import torch
|
| 6 |
+
from torch import nn
|
| 7 |
+
from torch.autograd import Variable
|
| 8 |
+
from act.detr.transformer import build_transformer, TransformerEncoder, TransformerEncoderLayer
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
import IPython
|
| 13 |
+
e = IPython.embed
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def reparametrize(mu, logvar):
|
| 17 |
+
std = logvar.div(2).exp()
|
| 18 |
+
eps = Variable(std.data.new(std.size()).normal_())
|
| 19 |
+
return mu + std * eps
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_sinusoid_encoding_table(n_position, d_hid):
|
| 23 |
+
def get_position_angle_vec(position):
|
| 24 |
+
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
|
| 25 |
+
|
| 26 |
+
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
|
| 27 |
+
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
|
| 28 |
+
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
|
| 29 |
+
|
| 30 |
+
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class DETRVAE(nn.Module):
|
| 34 |
+
""" This is the DETR module that performs object detection """
|
| 35 |
+
def __init__(self, backbones, transformer, encoder, state_dim, action_dim, num_queries):
|
| 36 |
+
super().__init__()
|
| 37 |
+
self.num_queries = num_queries
|
| 38 |
+
self.transformer = transformer
|
| 39 |
+
self.encoder = encoder
|
| 40 |
+
hidden_dim = transformer.d_model
|
| 41 |
+
self.action_head = nn.Linear(hidden_dim, action_dim)
|
| 42 |
+
self.query_embed = nn.Embedding(num_queries, hidden_dim)
|
| 43 |
+
if backbones is not None:
|
| 44 |
+
self.input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1)
|
| 45 |
+
self.backbones = nn.ModuleList(backbones)
|
| 46 |
+
self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim)
|
| 47 |
+
else:
|
| 48 |
+
self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim)
|
| 49 |
+
self.backbones = None
|
| 50 |
+
|
| 51 |
+
# encoder extra parameters
|
| 52 |
+
self.latent_dim = 32 # size of latent z
|
| 53 |
+
self.cls_embed = nn.Embedding(1, hidden_dim) # extra cls token embedding
|
| 54 |
+
self.encoder_state_proj = nn.Linear(state_dim, hidden_dim) # project state to embedding
|
| 55 |
+
self.encoder_action_proj = nn.Linear(action_dim, hidden_dim) # project action to embedding
|
| 56 |
+
self.latent_proj = nn.Linear(hidden_dim, self.latent_dim*2) # project hidden state to latent std, var
|
| 57 |
+
self.register_buffer('pos_table', get_sinusoid_encoding_table(1+1+num_queries, hidden_dim)) # [CLS], state, actions
|
| 58 |
+
|
| 59 |
+
# decoder extra parameters
|
| 60 |
+
self.latent_out_proj = nn.Linear(self.latent_dim, hidden_dim) # project latent sample to embedding
|
| 61 |
+
self.additional_pos_embed = nn.Embedding(2, hidden_dim) # learned position embedding for state and proprio
|
| 62 |
+
|
| 63 |
+
def forward(self, obs, actions=None):
|
| 64 |
+
is_training = actions is not None
|
| 65 |
+
state = obs['state'] if self.backbones is not None else obs
|
| 66 |
+
bs = state.shape[0]
|
| 67 |
+
|
| 68 |
+
if is_training:
|
| 69 |
+
# project CLS token, state sequence, and action sequence to embedding dim
|
| 70 |
+
cls_embed = self.cls_embed.weight # (1, hidden_dim)
|
| 71 |
+
cls_embed = torch.unsqueeze(cls_embed, axis=0).repeat(bs, 1, 1) # (bs, 1, hidden_dim)
|
| 72 |
+
state_embed = self.encoder_state_proj(state) # (bs, hidden_dim)
|
| 73 |
+
state_embed = torch.unsqueeze(state_embed, axis=1) # (bs, 1, hidden_dim)
|
| 74 |
+
action_embed = self.encoder_action_proj(actions) # (bs, seq, hidden_dim)
|
| 75 |
+
# concat them together to form an input to the CVAE encoder
|
| 76 |
+
encoder_input = torch.cat([cls_embed, state_embed, action_embed], axis=1) # (bs, seq+2, hidden_dim)
|
| 77 |
+
encoder_input = encoder_input.permute(1, 0, 2) # (seq+2, bs, hidden_dim)
|
| 78 |
+
# no masking is applied to all parts of the CVAE encoder input
|
| 79 |
+
is_pad = torch.full((bs, encoder_input.shape[0]), False).to(state.device) # False: not a padding
|
| 80 |
+
# obtain position embedding
|
| 81 |
+
pos_embed = self.pos_table.clone().detach()
|
| 82 |
+
pos_embed = pos_embed.permute(1, 0, 2) # (seq+2, 1, hidden_dim)
|
| 83 |
+
# query CVAE encoder
|
| 84 |
+
encoder_output = self.encoder(encoder_input, pos=pos_embed, src_key_padding_mask=is_pad)
|
| 85 |
+
encoder_output = encoder_output[0] # take cls output only
|
| 86 |
+
latent_info = self.latent_proj(encoder_output)
|
| 87 |
+
mu = latent_info[:, :self.latent_dim]
|
| 88 |
+
logvar = latent_info[:, self.latent_dim:]
|
| 89 |
+
latent_sample = reparametrize(mu, logvar)
|
| 90 |
+
latent_input = self.latent_out_proj(latent_sample)
|
| 91 |
+
else:
|
| 92 |
+
mu = logvar = None
|
| 93 |
+
latent_sample = torch.zeros([bs, self.latent_dim], dtype=torch.float32).to(state.device)
|
| 94 |
+
latent_input = self.latent_out_proj(latent_sample)
|
| 95 |
+
|
| 96 |
+
# CVAE decoder
|
| 97 |
+
if self.backbones is not None:
|
| 98 |
+
vis_data = obs['rgb']
|
| 99 |
+
if "depth" in obs:
|
| 100 |
+
vis_data = torch.cat([vis_data, obs['depth']], dim=2)
|
| 101 |
+
num_cams = vis_data.shape[1]
|
| 102 |
+
|
| 103 |
+
# Image observation features and position embeddings
|
| 104 |
+
all_cam_features = []
|
| 105 |
+
all_cam_pos = []
|
| 106 |
+
for cam_id in range(num_cams):
|
| 107 |
+
features, pos = self.backbones[0](vis_data[:, cam_id]) # HARDCODED
|
| 108 |
+
features = features[0] # take the last layer feature # (batch, hidden_dim, H, W)
|
| 109 |
+
pos = pos[0] # (1, hidden_dim, H, W)
|
| 110 |
+
all_cam_features.append(self.input_proj(features))
|
| 111 |
+
all_cam_pos.append(pos)
|
| 112 |
+
|
| 113 |
+
# proprioception features (state)
|
| 114 |
+
proprio_input = self.input_proj_robot_state(state)
|
| 115 |
+
# fold camera dimension into width dimension
|
| 116 |
+
src = torch.cat(all_cam_features, axis=3) # (batch, hidden_dim, 4, 8)
|
| 117 |
+
pos = torch.cat(all_cam_pos, axis=3) # (batch, hidden_dim, 4, 8)
|
| 118 |
+
hs = self.transformer(src, None, self.query_embed.weight, pos, latent_input, proprio_input, self.additional_pos_embed.weight)[0] # (batch, num_queries, hidden_dim)
|
| 119 |
+
else:
|
| 120 |
+
state = self.input_proj_robot_state(state)
|
| 121 |
+
hs = self.transformer(None, None, self.query_embed.weight, None, latent_input, state, self.additional_pos_embed.weight)[0]
|
| 122 |
+
|
| 123 |
+
a_hat = self.action_head(hs)
|
| 124 |
+
return a_hat, [mu, logvar]
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def build_encoder(args):
|
| 128 |
+
d_model = args.hidden_dim # 256
|
| 129 |
+
dropout = args.dropout # 0.1
|
| 130 |
+
nhead = args.nheads # 8
|
| 131 |
+
dim_feedforward = args.dim_feedforward # 2048
|
| 132 |
+
num_encoder_layers = args.enc_layers # 4 # TODO shared with VAE decoder
|
| 133 |
+
normalize_before = args.pre_norm # False
|
| 134 |
+
activation = "relu"
|
| 135 |
+
|
| 136 |
+
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
|
| 137 |
+
dropout, activation, normalize_before)
|
| 138 |
+
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
|
| 139 |
+
encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
|
| 140 |
+
|
| 141 |
+
return encoder
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/detr/position_encoding.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Various positional encodings for the transformer.
|
| 4 |
+
"""
|
| 5 |
+
import math
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
|
| 9 |
+
from act.utils import NestedTensor
|
| 10 |
+
|
| 11 |
+
import IPython
|
| 12 |
+
e = IPython.embed
|
| 13 |
+
|
| 14 |
+
class PositionEmbeddingSine(nn.Module):
|
| 15 |
+
"""
|
| 16 |
+
This is a more standard version of the position embedding, very similar to the one
|
| 17 |
+
used by the Attention is all you need paper, generalized to work on images.
|
| 18 |
+
"""
|
| 19 |
+
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
|
| 20 |
+
super().__init__()
|
| 21 |
+
self.num_pos_feats = num_pos_feats
|
| 22 |
+
self.temperature = temperature
|
| 23 |
+
self.normalize = normalize
|
| 24 |
+
if scale is not None and normalize is False:
|
| 25 |
+
raise ValueError("normalize should be True if scale is passed")
|
| 26 |
+
if scale is None:
|
| 27 |
+
scale = 2 * math.pi
|
| 28 |
+
self.scale = scale
|
| 29 |
+
|
| 30 |
+
def forward(self, tensor):
|
| 31 |
+
x = tensor
|
| 32 |
+
# mask = tensor_list.mask
|
| 33 |
+
# assert mask is not None
|
| 34 |
+
# not_mask = ~mask
|
| 35 |
+
|
| 36 |
+
not_mask = torch.ones_like(x[0, [0]])
|
| 37 |
+
y_embed = not_mask.cumsum(1, dtype=torch.float32)
|
| 38 |
+
x_embed = not_mask.cumsum(2, dtype=torch.float32)
|
| 39 |
+
if self.normalize:
|
| 40 |
+
eps = 1e-6
|
| 41 |
+
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
|
| 42 |
+
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
|
| 43 |
+
|
| 44 |
+
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
| 45 |
+
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
| 46 |
+
|
| 47 |
+
pos_x = x_embed[:, :, :, None] / dim_t
|
| 48 |
+
pos_y = y_embed[:, :, :, None] / dim_t
|
| 49 |
+
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
|
| 50 |
+
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
|
| 51 |
+
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
| 52 |
+
return pos
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class PositionEmbeddingLearned(nn.Module):
|
| 56 |
+
"""
|
| 57 |
+
Absolute pos embedding, learned.
|
| 58 |
+
"""
|
| 59 |
+
def __init__(self, num_pos_feats=256):
|
| 60 |
+
super().__init__()
|
| 61 |
+
self.row_embed = nn.Embedding(50, num_pos_feats)
|
| 62 |
+
self.col_embed = nn.Embedding(50, num_pos_feats)
|
| 63 |
+
self.reset_parameters()
|
| 64 |
+
|
| 65 |
+
def reset_parameters(self):
|
| 66 |
+
nn.init.uniform_(self.row_embed.weight)
|
| 67 |
+
nn.init.uniform_(self.col_embed.weight)
|
| 68 |
+
|
| 69 |
+
def forward(self, tensor_list: NestedTensor):
|
| 70 |
+
x = tensor_list.tensors
|
| 71 |
+
h, w = x.shape[-2:]
|
| 72 |
+
i = torch.arange(w, device=x.device)
|
| 73 |
+
j = torch.arange(h, device=x.device)
|
| 74 |
+
x_emb = self.col_embed(i)
|
| 75 |
+
y_emb = self.row_embed(j)
|
| 76 |
+
pos = torch.cat([
|
| 77 |
+
x_emb.unsqueeze(0).repeat(h, 1, 1),
|
| 78 |
+
y_emb.unsqueeze(1).repeat(1, w, 1),
|
| 79 |
+
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
|
| 80 |
+
return pos
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def build_position_encoding(args):
|
| 84 |
+
N_steps = args.hidden_dim // 2
|
| 85 |
+
if args.position_embedding in ('v2', 'sine'):
|
| 86 |
+
# TODO find a better way of exposing other arguments
|
| 87 |
+
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
|
| 88 |
+
elif args.position_embedding in ('v3', 'learned'):
|
| 89 |
+
position_embedding = PositionEmbeddingLearned(N_steps)
|
| 90 |
+
else:
|
| 91 |
+
raise ValueError(f"not supported {args.position_embedding}")
|
| 92 |
+
|
| 93 |
+
return position_embedding
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/detr/transformer.py
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
DETR Transformer class.
|
| 4 |
+
|
| 5 |
+
Copy-paste from torch.nn.Transformer with modifications:
|
| 6 |
+
* positional encodings are passed in MHattention
|
| 7 |
+
* extra LN at the end of encoder is removed
|
| 8 |
+
* decoder returns a stack of activations from all decoding layers
|
| 9 |
+
"""
|
| 10 |
+
import copy
|
| 11 |
+
from typing import Optional, List
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import torch.nn.functional as F
|
| 15 |
+
from torch import nn, Tensor
|
| 16 |
+
|
| 17 |
+
import IPython
|
| 18 |
+
e = IPython.embed
|
| 19 |
+
|
| 20 |
+
class Transformer(nn.Module):
|
| 21 |
+
|
| 22 |
+
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
|
| 23 |
+
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
|
| 24 |
+
activation="relu", normalize_before=False,
|
| 25 |
+
return_intermediate_dec=False):
|
| 26 |
+
super().__init__()
|
| 27 |
+
|
| 28 |
+
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
|
| 29 |
+
dropout, activation, normalize_before)
|
| 30 |
+
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
|
| 31 |
+
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
|
| 32 |
+
|
| 33 |
+
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
|
| 34 |
+
dropout, activation, normalize_before)
|
| 35 |
+
decoder_norm = nn.LayerNorm(d_model)
|
| 36 |
+
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
|
| 37 |
+
return_intermediate=return_intermediate_dec)
|
| 38 |
+
|
| 39 |
+
self._reset_parameters()
|
| 40 |
+
|
| 41 |
+
self.d_model = d_model
|
| 42 |
+
self.nhead = nhead
|
| 43 |
+
|
| 44 |
+
def _reset_parameters(self):
|
| 45 |
+
for p in self.parameters():
|
| 46 |
+
if p.dim() > 1:
|
| 47 |
+
nn.init.xavier_uniform_(p)
|
| 48 |
+
|
| 49 |
+
def forward(self, src, mask, query_embed, pos_embed, latent_input=None, proprio_input=None, additional_pos_embed=None):
|
| 50 |
+
if src is None:
|
| 51 |
+
bs = proprio_input.shape[0]
|
| 52 |
+
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
|
| 53 |
+
pos_embed = additional_pos_embed.unsqueeze(1).repeat(1, bs, 1) # seq, bs, dim
|
| 54 |
+
src = torch.stack([latent_input, proprio_input], axis=0)
|
| 55 |
+
# TODO flatten only when input has H and W
|
| 56 |
+
elif len(src.shape) == 4: # has H and W
|
| 57 |
+
# flatten NxCxHxW to HWxNxC
|
| 58 |
+
bs, c, h, w = src.shape
|
| 59 |
+
src = src.flatten(2).permute(2, 0, 1)
|
| 60 |
+
pos_embed = pos_embed.flatten(2).permute(2, 0, 1).repeat(1, bs, 1)
|
| 61 |
+
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
|
| 62 |
+
# mask = mask.flatten(1)
|
| 63 |
+
|
| 64 |
+
additional_pos_embed = additional_pos_embed.unsqueeze(1).repeat(1, bs, 1) # seq, bs, dim
|
| 65 |
+
pos_embed = torch.cat([additional_pos_embed, pos_embed], axis=0)
|
| 66 |
+
|
| 67 |
+
addition_input = torch.stack([latent_input, proprio_input], axis=0)
|
| 68 |
+
src = torch.cat([addition_input, src], axis=0)
|
| 69 |
+
|
| 70 |
+
tgt = torch.zeros_like(query_embed)
|
| 71 |
+
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
|
| 72 |
+
hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,
|
| 73 |
+
pos=pos_embed, query_pos=query_embed)
|
| 74 |
+
hs = hs.transpose(1, 2)
|
| 75 |
+
return hs
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class TransformerEncoder(nn.Module):
|
| 79 |
+
|
| 80 |
+
def __init__(self, encoder_layer, num_layers, norm=None):
|
| 81 |
+
super().__init__()
|
| 82 |
+
self.layers = _get_clones(encoder_layer, num_layers)
|
| 83 |
+
self.num_layers = num_layers
|
| 84 |
+
self.norm = norm
|
| 85 |
+
|
| 86 |
+
def forward(self, src,
|
| 87 |
+
mask: Optional[Tensor] = None,
|
| 88 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
| 89 |
+
pos: Optional[Tensor] = None):
|
| 90 |
+
output = src
|
| 91 |
+
|
| 92 |
+
for layer in self.layers:
|
| 93 |
+
output = layer(output, src_mask=mask,
|
| 94 |
+
src_key_padding_mask=src_key_padding_mask, pos=pos)
|
| 95 |
+
|
| 96 |
+
if self.norm is not None:
|
| 97 |
+
output = self.norm(output)
|
| 98 |
+
|
| 99 |
+
return output
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class TransformerDecoder(nn.Module):
|
| 103 |
+
|
| 104 |
+
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
|
| 105 |
+
super().__init__()
|
| 106 |
+
self.layers = _get_clones(decoder_layer, num_layers)
|
| 107 |
+
self.num_layers = num_layers
|
| 108 |
+
self.norm = norm
|
| 109 |
+
self.return_intermediate = return_intermediate
|
| 110 |
+
|
| 111 |
+
def forward(self, tgt, memory,
|
| 112 |
+
tgt_mask: Optional[Tensor] = None,
|
| 113 |
+
memory_mask: Optional[Tensor] = None,
|
| 114 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
| 115 |
+
memory_key_padding_mask: Optional[Tensor] = None,
|
| 116 |
+
pos: Optional[Tensor] = None,
|
| 117 |
+
query_pos: Optional[Tensor] = None):
|
| 118 |
+
output = tgt
|
| 119 |
+
|
| 120 |
+
intermediate = []
|
| 121 |
+
|
| 122 |
+
for layer in self.layers:
|
| 123 |
+
output = layer(output, memory, tgt_mask=tgt_mask,
|
| 124 |
+
memory_mask=memory_mask,
|
| 125 |
+
tgt_key_padding_mask=tgt_key_padding_mask,
|
| 126 |
+
memory_key_padding_mask=memory_key_padding_mask,
|
| 127 |
+
pos=pos, query_pos=query_pos)
|
| 128 |
+
if self.return_intermediate:
|
| 129 |
+
intermediate.append(self.norm(output))
|
| 130 |
+
|
| 131 |
+
if self.norm is not None:
|
| 132 |
+
output = self.norm(output)
|
| 133 |
+
if self.return_intermediate:
|
| 134 |
+
intermediate.pop()
|
| 135 |
+
intermediate.append(output)
|
| 136 |
+
|
| 137 |
+
if self.return_intermediate:
|
| 138 |
+
return torch.stack(intermediate)
|
| 139 |
+
|
| 140 |
+
return output.unsqueeze(0)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class TransformerEncoderLayer(nn.Module):
|
| 144 |
+
|
| 145 |
+
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
|
| 146 |
+
activation="relu", normalize_before=False):
|
| 147 |
+
super().__init__()
|
| 148 |
+
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
| 149 |
+
# Implementation of Feedforward model
|
| 150 |
+
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
| 151 |
+
self.dropout = nn.Dropout(dropout)
|
| 152 |
+
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
| 153 |
+
|
| 154 |
+
self.norm1 = nn.LayerNorm(d_model)
|
| 155 |
+
self.norm2 = nn.LayerNorm(d_model)
|
| 156 |
+
self.dropout1 = nn.Dropout(dropout)
|
| 157 |
+
self.dropout2 = nn.Dropout(dropout)
|
| 158 |
+
|
| 159 |
+
self.activation = _get_activation_fn(activation)
|
| 160 |
+
self.normalize_before = normalize_before
|
| 161 |
+
|
| 162 |
+
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
| 163 |
+
return tensor if pos is None else tensor + pos
|
| 164 |
+
|
| 165 |
+
def forward_post(self,
|
| 166 |
+
src,
|
| 167 |
+
src_mask: Optional[Tensor] = None,
|
| 168 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
| 169 |
+
pos: Optional[Tensor] = None):
|
| 170 |
+
q = k = self.with_pos_embed(src, pos)
|
| 171 |
+
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
|
| 172 |
+
key_padding_mask=src_key_padding_mask)[0]
|
| 173 |
+
src = src + self.dropout1(src2)
|
| 174 |
+
src = self.norm1(src)
|
| 175 |
+
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
|
| 176 |
+
src = src + self.dropout2(src2)
|
| 177 |
+
src = self.norm2(src)
|
| 178 |
+
return src
|
| 179 |
+
|
| 180 |
+
def forward_pre(self, src,
|
| 181 |
+
src_mask: Optional[Tensor] = None,
|
| 182 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
| 183 |
+
pos: Optional[Tensor] = None):
|
| 184 |
+
src2 = self.norm1(src)
|
| 185 |
+
q = k = self.with_pos_embed(src2, pos)
|
| 186 |
+
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
|
| 187 |
+
key_padding_mask=src_key_padding_mask)[0]
|
| 188 |
+
src = src + self.dropout1(src2)
|
| 189 |
+
src2 = self.norm2(src)
|
| 190 |
+
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
|
| 191 |
+
src = src + self.dropout2(src2)
|
| 192 |
+
return src
|
| 193 |
+
|
| 194 |
+
def forward(self, src,
|
| 195 |
+
src_mask: Optional[Tensor] = None,
|
| 196 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
| 197 |
+
pos: Optional[Tensor] = None):
|
| 198 |
+
if self.normalize_before:
|
| 199 |
+
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
|
| 200 |
+
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class TransformerDecoderLayer(nn.Module):
|
| 204 |
+
|
| 205 |
+
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
|
| 206 |
+
activation="relu", normalize_before=False):
|
| 207 |
+
super().__init__()
|
| 208 |
+
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
| 209 |
+
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
| 210 |
+
# Implementation of Feedforward model
|
| 211 |
+
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
| 212 |
+
self.dropout = nn.Dropout(dropout)
|
| 213 |
+
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
| 214 |
+
|
| 215 |
+
self.norm1 = nn.LayerNorm(d_model)
|
| 216 |
+
self.norm2 = nn.LayerNorm(d_model)
|
| 217 |
+
self.norm3 = nn.LayerNorm(d_model)
|
| 218 |
+
self.dropout1 = nn.Dropout(dropout)
|
| 219 |
+
self.dropout2 = nn.Dropout(dropout)
|
| 220 |
+
self.dropout3 = nn.Dropout(dropout)
|
| 221 |
+
|
| 222 |
+
self.activation = _get_activation_fn(activation)
|
| 223 |
+
self.normalize_before = normalize_before
|
| 224 |
+
|
| 225 |
+
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
| 226 |
+
return tensor if pos is None else tensor + pos
|
| 227 |
+
|
| 228 |
+
def forward_post(self, tgt, memory,
|
| 229 |
+
tgt_mask: Optional[Tensor] = None,
|
| 230 |
+
memory_mask: Optional[Tensor] = None,
|
| 231 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
| 232 |
+
memory_key_padding_mask: Optional[Tensor] = None,
|
| 233 |
+
pos: Optional[Tensor] = None,
|
| 234 |
+
query_pos: Optional[Tensor] = None):
|
| 235 |
+
q = k = self.with_pos_embed(tgt, query_pos)
|
| 236 |
+
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
|
| 237 |
+
key_padding_mask=tgt_key_padding_mask)[0]
|
| 238 |
+
tgt = tgt + self.dropout1(tgt2)
|
| 239 |
+
tgt = self.norm1(tgt)
|
| 240 |
+
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
|
| 241 |
+
key=self.with_pos_embed(memory, pos),
|
| 242 |
+
value=memory, attn_mask=memory_mask,
|
| 243 |
+
key_padding_mask=memory_key_padding_mask)[0]
|
| 244 |
+
tgt = tgt + self.dropout2(tgt2)
|
| 245 |
+
tgt = self.norm2(tgt)
|
| 246 |
+
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
|
| 247 |
+
tgt = tgt + self.dropout3(tgt2)
|
| 248 |
+
tgt = self.norm3(tgt)
|
| 249 |
+
return tgt
|
| 250 |
+
|
| 251 |
+
def forward_pre(self, tgt, memory,
|
| 252 |
+
tgt_mask: Optional[Tensor] = None,
|
| 253 |
+
memory_mask: Optional[Tensor] = None,
|
| 254 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
| 255 |
+
memory_key_padding_mask: Optional[Tensor] = None,
|
| 256 |
+
pos: Optional[Tensor] = None,
|
| 257 |
+
query_pos: Optional[Tensor] = None):
|
| 258 |
+
tgt2 = self.norm1(tgt)
|
| 259 |
+
q = k = self.with_pos_embed(tgt2, query_pos)
|
| 260 |
+
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
|
| 261 |
+
key_padding_mask=tgt_key_padding_mask)[0]
|
| 262 |
+
tgt = tgt + self.dropout1(tgt2)
|
| 263 |
+
tgt2 = self.norm2(tgt)
|
| 264 |
+
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
|
| 265 |
+
key=self.with_pos_embed(memory, pos),
|
| 266 |
+
value=memory, attn_mask=memory_mask,
|
| 267 |
+
key_padding_mask=memory_key_padding_mask)[0]
|
| 268 |
+
tgt = tgt + self.dropout2(tgt2)
|
| 269 |
+
tgt2 = self.norm3(tgt)
|
| 270 |
+
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
| 271 |
+
tgt = tgt + self.dropout3(tgt2)
|
| 272 |
+
return tgt
|
| 273 |
+
|
| 274 |
+
def forward(self, tgt, memory,
|
| 275 |
+
tgt_mask: Optional[Tensor] = None,
|
| 276 |
+
memory_mask: Optional[Tensor] = None,
|
| 277 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
| 278 |
+
memory_key_padding_mask: Optional[Tensor] = None,
|
| 279 |
+
pos: Optional[Tensor] = None,
|
| 280 |
+
query_pos: Optional[Tensor] = None):
|
| 281 |
+
if self.normalize_before:
|
| 282 |
+
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
|
| 283 |
+
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
|
| 284 |
+
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
|
| 285 |
+
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def _get_clones(module, N):
|
| 289 |
+
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def build_transformer(args):
|
| 293 |
+
return Transformer(
|
| 294 |
+
d_model=args.hidden_dim,
|
| 295 |
+
dropout=args.dropout,
|
| 296 |
+
nhead=args.nheads,
|
| 297 |
+
dim_feedforward=args.dim_feedforward,
|
| 298 |
+
num_encoder_layers=args.enc_layers,
|
| 299 |
+
num_decoder_layers=args.dec_layers,
|
| 300 |
+
normalize_before=args.pre_norm,
|
| 301 |
+
return_intermediate_dec=True,
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _get_activation_fn(activation):
|
| 306 |
+
"""Return an activation function given a string"""
|
| 307 |
+
if activation == "relu":
|
| 308 |
+
return F.relu
|
| 309 |
+
if activation == "gelu":
|
| 310 |
+
return F.gelu
|
| 311 |
+
if activation == "glu":
|
| 312 |
+
return F.glu
|
| 313 |
+
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/evaluate.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
import gymnasium
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from mani_skill.utils import common
|
| 7 |
+
|
| 8 |
+
def evaluate(n: int, agent, eval_envs, eval_kwargs):
|
| 9 |
+
stats, num_queries, temporal_agg, max_timesteps, device, sim_backend = eval_kwargs.values()
|
| 10 |
+
|
| 11 |
+
use_visual_obs = isinstance(eval_envs.single_observation_space.sample(), dict)
|
| 12 |
+
delta_control = not stats
|
| 13 |
+
if not delta_control:
|
| 14 |
+
if sim_backend == "physx_cpu":
|
| 15 |
+
pre_process = lambda s_obs: (s_obs - stats['state_mean'].cpu().numpy()) / stats['state_std'].cpu().numpy()
|
| 16 |
+
else:
|
| 17 |
+
pre_process = lambda s_obs: (s_obs - stats['state_mean']) / stats['state_std']
|
| 18 |
+
post_process = lambda a: a * stats['action_std'] + stats['action_mean']
|
| 19 |
+
|
| 20 |
+
# create action table for temporal ensembling
|
| 21 |
+
action_dim = eval_envs.action_space.shape[-1]
|
| 22 |
+
num_envs = eval_envs.num_envs
|
| 23 |
+
if temporal_agg:
|
| 24 |
+
query_frequency = 1
|
| 25 |
+
all_time_actions = torch.zeros([num_envs, max_timesteps, max_timesteps+num_queries, action_dim], device=device)
|
| 26 |
+
else:
|
| 27 |
+
query_frequency = num_queries
|
| 28 |
+
actions_to_take = torch.zeros([num_envs, num_queries, action_dim], device=device)
|
| 29 |
+
|
| 30 |
+
agent.eval()
|
| 31 |
+
with torch.no_grad():
|
| 32 |
+
eval_metrics = defaultdict(list)
|
| 33 |
+
obs, info = eval_envs.reset()
|
| 34 |
+
ts, eps_count = 0, 0
|
| 35 |
+
while eps_count < n:
|
| 36 |
+
# pre-process obs
|
| 37 |
+
if use_visual_obs:
|
| 38 |
+
obs['state'] = pre_process(obs['state']) if not delta_control else obs['state'] # (num_envs, obs_dim)
|
| 39 |
+
obs = {k: common.to_tensor(v, device) for k, v in obs.items()}
|
| 40 |
+
else:
|
| 41 |
+
obs = pre_process(obs) if not delta_control else obs # (num_envs, obs_dim)
|
| 42 |
+
obs = common.to_tensor(obs, device)
|
| 43 |
+
|
| 44 |
+
# query policy
|
| 45 |
+
if ts % query_frequency == 0:
|
| 46 |
+
action_seq = agent.get_action(obs) # (num_envs, num_queries, action_dim)
|
| 47 |
+
|
| 48 |
+
# we assume ignore_terminations=True. Otherwise, some envs could be done
|
| 49 |
+
# earlier, so we would need to temporally ensemble at corresponding timestep
|
| 50 |
+
# for each env.
|
| 51 |
+
if temporal_agg:
|
| 52 |
+
assert query_frequency == 1, "query_frequency != 1 has not been implemented for temporal_agg==1."
|
| 53 |
+
all_time_actions[:, ts, ts:ts+num_queries] = action_seq # (num_envs, num_queries, act_dim)
|
| 54 |
+
actions_for_curr_step = all_time_actions[:, :, ts] # (num_envs, max_timesteps, act_dim)
|
| 55 |
+
# since we pad the action with 0 in 'delta_pos' control mode, this causes error.
|
| 56 |
+
#actions_populated = torch.all(actions_for_curr_step[0] != 0, axis=1) # (max_timesteps,)
|
| 57 |
+
actions_populated = torch.zeros(max_timesteps, dtype=torch.bool, device=device) # (max_timesteps,)
|
| 58 |
+
actions_populated[max(0, ts + 1 - num_queries):ts+1] = True
|
| 59 |
+
actions_for_curr_step = actions_for_curr_step[:, actions_populated] # (num_envs, num_populated, act_dim)
|
| 60 |
+
k = 0.01
|
| 61 |
+
if ts < num_queries:
|
| 62 |
+
exp_weights = torch.exp(-k * torch.arange(len(actions_for_curr_step[0]), device=device)) # (num_populated,)
|
| 63 |
+
exp_weights = exp_weights / exp_weights.sum() # (num_populated,)
|
| 64 |
+
exp_weights = torch.tile(exp_weights, (num_envs, 1)) # (num_envs, num_populated)
|
| 65 |
+
exp_weights = torch.unsqueeze(exp_weights, -1) # (num_envs, num_populated, 1)
|
| 66 |
+
raw_action = (actions_for_curr_step * exp_weights).sum(dim=1) # (num_envs, act_dim)
|
| 67 |
+
else:
|
| 68 |
+
if ts % query_frequency == 0:
|
| 69 |
+
actions_to_take = action_seq
|
| 70 |
+
raw_action = actions_to_take[:, ts % query_frequency]
|
| 71 |
+
|
| 72 |
+
action = post_process(raw_action) if not delta_control else raw_action # (num_envs, act_dim)
|
| 73 |
+
if sim_backend == "physx_cpu":
|
| 74 |
+
action = action.cpu().numpy()
|
| 75 |
+
|
| 76 |
+
# step the environment
|
| 77 |
+
obs, rew, terminated, truncated, info = eval_envs.step(action)
|
| 78 |
+
ts += 1
|
| 79 |
+
|
| 80 |
+
# collect episode info
|
| 81 |
+
if truncated.any():
|
| 82 |
+
assert truncated.all() == truncated.any(), "all episodes should truncate at the same time for fair evaluation with other algorithms"
|
| 83 |
+
if isinstance(info["final_info"], dict):
|
| 84 |
+
for k, v in info["final_info"]["episode"].items():
|
| 85 |
+
eval_metrics[k].append(v.float().cpu().numpy())
|
| 86 |
+
else:
|
| 87 |
+
for final_info in info["final_info"]:
|
| 88 |
+
for k, v in final_info["episode"].items():
|
| 89 |
+
eval_metrics[k].append(v)
|
| 90 |
+
# new episodes begin
|
| 91 |
+
eps_count += num_envs
|
| 92 |
+
ts = 0
|
| 93 |
+
all_time_actions = torch.zeros([num_envs, max_timesteps, max_timesteps+num_queries, action_dim], device=device)
|
| 94 |
+
|
| 95 |
+
agent.train()
|
| 96 |
+
for k in eval_metrics.keys():
|
| 97 |
+
eval_metrics[k] = np.stack(eval_metrics[k])
|
| 98 |
+
return eval_metrics
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/make_env.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
import gymnasium as gym
|
| 3 |
+
import mani_skill.envs
|
| 4 |
+
from mani_skill.utils import gym_utils
|
| 5 |
+
from mani_skill.vector.wrappers.gymnasium import ManiSkillVectorEnv
|
| 6 |
+
from mani_skill.utils.wrappers import RecordEpisode, CPUGymWrapper
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def make_eval_envs(env_id, num_envs: int, sim_backend: str, env_kwargs: dict, other_kwargs: dict, video_dir: Optional[str] = None, wrappers: list[gym.Wrapper] = []):
|
| 10 |
+
"""Create vectorized environment for evaluation and/or recording videos.
|
| 11 |
+
For CPU vectorized environments only the first parallel environment is used to record videos.
|
| 12 |
+
For GPU vectorized environments all parallel environments are used to record videos.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
env_id: the environment id
|
| 16 |
+
num_envs: the number of parallel environments
|
| 17 |
+
sim_backend: the simulation backend to use. can be "cpu" or "gpu
|
| 18 |
+
env_kwargs: the environment kwargs. You can also pass in max_episode_steps in env_kwargs to override the default max episode steps for the environment.
|
| 19 |
+
video_dir: the directory to save the videos. If None no videos are recorded.
|
| 20 |
+
wrappers: the list of wrappers to apply to the environment.
|
| 21 |
+
"""
|
| 22 |
+
if sim_backend == "physx_cpu":
|
| 23 |
+
def cpu_make_env(env_id, seed, video_dir=None, env_kwargs = dict(), other_kwargs = dict()):
|
| 24 |
+
def thunk():
|
| 25 |
+
env = gym.make(env_id, reconfiguration_freq=1, **env_kwargs)
|
| 26 |
+
for wrapper in wrappers:
|
| 27 |
+
env = wrapper(env)
|
| 28 |
+
env = CPUGymWrapper(env, ignore_terminations=True, record_metrics=True)
|
| 29 |
+
if video_dir:
|
| 30 |
+
env = RecordEpisode(env, output_dir=video_dir, save_trajectory=False, info_on_video=True, source_type="act", source_desc="act evaluation rollout")
|
| 31 |
+
env.action_space.seed(seed)
|
| 32 |
+
env.observation_space.seed(seed)
|
| 33 |
+
return env
|
| 34 |
+
|
| 35 |
+
return thunk
|
| 36 |
+
vector_cls = gym.vector.SyncVectorEnv if num_envs == 1 else lambda x : gym.vector.AsyncVectorEnv(x, context="forkserver")
|
| 37 |
+
env = vector_cls([cpu_make_env(env_id, seed, video_dir if seed == 0 else None, env_kwargs, other_kwargs) for seed in range(num_envs)])
|
| 38 |
+
else:
|
| 39 |
+
env = gym.make(env_id, num_envs=num_envs, sim_backend=sim_backend, reconfiguration_freq=1, **env_kwargs)
|
| 40 |
+
max_episode_steps = gym_utils.find_max_episode_steps_value(env)
|
| 41 |
+
for wrapper in wrappers:
|
| 42 |
+
env = wrapper(env)
|
| 43 |
+
if video_dir:
|
| 44 |
+
env = RecordEpisode(env, output_dir=video_dir, save_trajectory=False, save_video=True, source_type="act", source_desc="act evaluation rollout", max_steps_per_video=max_episode_steps)
|
| 45 |
+
env = ManiSkillVectorEnv(env, ignore_terminations=True, record_metrics=True)
|
| 46 |
+
return env
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/act/utils.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.utils.data.sampler import Sampler
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
import torch.distributed as dist
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
from h5py import File, Group, Dataset
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class NestedTensor(object):
|
| 11 |
+
def __init__(self, tensors, mask: Optional[Tensor]):
|
| 12 |
+
self.tensors = tensors
|
| 13 |
+
self.mask = mask
|
| 14 |
+
|
| 15 |
+
def to(self, device):
|
| 16 |
+
# type: (Device) -> NestedTensor # noqa
|
| 17 |
+
cast_tensor = self.tensors.to(device)
|
| 18 |
+
mask = self.mask
|
| 19 |
+
if mask is not None:
|
| 20 |
+
assert mask is not None
|
| 21 |
+
cast_mask = mask.to(device)
|
| 22 |
+
else:
|
| 23 |
+
cast_mask = None
|
| 24 |
+
return NestedTensor(cast_tensor, cast_mask)
|
| 25 |
+
|
| 26 |
+
def decompose(self):
|
| 27 |
+
return self.tensors, self.mask
|
| 28 |
+
|
| 29 |
+
def __repr__(self):
|
| 30 |
+
return str(self.tensors)
|
| 31 |
+
|
| 32 |
+
def is_dist_avail_and_initialized():
|
| 33 |
+
if not dist.is_available():
|
| 34 |
+
return False
|
| 35 |
+
if not dist.is_initialized():
|
| 36 |
+
return False
|
| 37 |
+
return True
|
| 38 |
+
|
| 39 |
+
def get_rank():
|
| 40 |
+
if not is_dist_avail_and_initialized():
|
| 41 |
+
return 0
|
| 42 |
+
return dist.get_rank()
|
| 43 |
+
|
| 44 |
+
def is_main_process():
|
| 45 |
+
return get_rank() == 0
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class IterationBasedBatchSampler(Sampler):
|
| 49 |
+
"""Wraps a BatchSampler.
|
| 50 |
+
Resampling from it until a specified number of iterations have been sampled
|
| 51 |
+
References:
|
| 52 |
+
https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self, batch_sampler, num_iterations, start_iter=0):
|
| 56 |
+
self.batch_sampler = batch_sampler
|
| 57 |
+
self.num_iterations = num_iterations
|
| 58 |
+
self.start_iter = start_iter
|
| 59 |
+
|
| 60 |
+
def __iter__(self):
|
| 61 |
+
iteration = self.start_iter
|
| 62 |
+
while iteration < self.num_iterations:
|
| 63 |
+
# if the underlying sampler has a set_epoch method, like
|
| 64 |
+
# DistributedSampler, used for making each process see
|
| 65 |
+
# a different split of the dataset, then set it
|
| 66 |
+
if hasattr(self.batch_sampler.sampler, "set_epoch"):
|
| 67 |
+
self.batch_sampler.sampler.set_epoch(iteration)
|
| 68 |
+
for batch in self.batch_sampler:
|
| 69 |
+
yield batch
|
| 70 |
+
iteration += 1
|
| 71 |
+
if iteration >= self.num_iterations:
|
| 72 |
+
break
|
| 73 |
+
|
| 74 |
+
def __len__(self):
|
| 75 |
+
return self.num_iterations - self.start_iter
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def worker_init_fn(worker_id, base_seed=None):
|
| 79 |
+
"""The function is designed for pytorch multi-process dataloader.
|
| 80 |
+
Note that we use the pytorch random generator to generate a base_seed.
|
| 81 |
+
Please try to be consistent.
|
| 82 |
+
References:
|
| 83 |
+
https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed
|
| 84 |
+
"""
|
| 85 |
+
if base_seed is None:
|
| 86 |
+
base_seed = torch.IntTensor(1).random_().item()
|
| 87 |
+
# print(worker_id, base_seed)
|
| 88 |
+
np.random.seed(base_seed + worker_id)
|
| 89 |
+
|
| 90 |
+
TARGET_KEY_TO_SOURCE_KEY = {
|
| 91 |
+
'states': 'env_states',
|
| 92 |
+
'observations': 'obs',
|
| 93 |
+
'success': 'success',
|
| 94 |
+
'next_observations': 'obs',
|
| 95 |
+
# 'dones': 'dones',
|
| 96 |
+
# 'rewards': 'rewards',
|
| 97 |
+
'actions': 'actions',
|
| 98 |
+
}
|
| 99 |
+
def load_content_from_h5_file(file):
|
| 100 |
+
if isinstance(file, (File, Group)):
|
| 101 |
+
return {key: load_content_from_h5_file(file[key]) for key in list(file.keys())}
|
| 102 |
+
elif isinstance(file, Dataset):
|
| 103 |
+
return file[()]
|
| 104 |
+
else:
|
| 105 |
+
raise NotImplementedError(f"Unspported h5 file type: {type(file)}")
|
| 106 |
+
|
| 107 |
+
def load_hdf5(path, ):
|
| 108 |
+
print('Loading HDF5 file', path)
|
| 109 |
+
file = File(path, 'r')
|
| 110 |
+
ret = load_content_from_h5_file(file)
|
| 111 |
+
file.close()
|
| 112 |
+
print('Loaded')
|
| 113 |
+
return ret
|
| 114 |
+
|
| 115 |
+
def load_traj_hdf5(path, num_traj=None):
|
| 116 |
+
print('Loading HDF5 file', path)
|
| 117 |
+
file = File(path, 'r')
|
| 118 |
+
keys = list(file.keys())
|
| 119 |
+
if num_traj is not None:
|
| 120 |
+
assert num_traj <= len(keys), f"num_traj: {num_traj} > len(keys): {len(keys)}"
|
| 121 |
+
keys = sorted(keys, key=lambda x: int(x.split('_')[-1]))
|
| 122 |
+
keys = keys[:num_traj]
|
| 123 |
+
ret = {
|
| 124 |
+
key: load_content_from_h5_file(file[key]) for key in keys
|
| 125 |
+
}
|
| 126 |
+
file.close()
|
| 127 |
+
print('Loaded')
|
| 128 |
+
return ret
|
| 129 |
+
def load_demo_dataset(path, keys=['observations', 'actions'], num_traj=None, concat=True):
|
| 130 |
+
# assert num_traj is None
|
| 131 |
+
raw_data = load_traj_hdf5(path, num_traj)
|
| 132 |
+
# raw_data has keys like: ['traj_0', 'traj_1', ...]
|
| 133 |
+
# raw_data['traj_0'] has keys like: ['actions', 'dones', 'env_states', 'infos', ...]
|
| 134 |
+
_traj = raw_data['traj_0']
|
| 135 |
+
for key in keys:
|
| 136 |
+
source_key = TARGET_KEY_TO_SOURCE_KEY[key]
|
| 137 |
+
assert source_key in _traj, f"key: {source_key} not in traj_0: {_traj.keys()}"
|
| 138 |
+
dataset = {}
|
| 139 |
+
for target_key in keys:
|
| 140 |
+
# if 'next' in target_key:
|
| 141 |
+
# raise NotImplementedError('Please carefully deal with the length of trajectory')
|
| 142 |
+
source_key = TARGET_KEY_TO_SOURCE_KEY[target_key]
|
| 143 |
+
dataset[target_key] = [ raw_data[idx][source_key] for idx in raw_data ]
|
| 144 |
+
if isinstance(dataset[target_key][0], np.ndarray) and concat:
|
| 145 |
+
if target_key in ['observations', 'states'] and \
|
| 146 |
+
len(dataset[target_key][0]) > len(raw_data['traj_0']['actions']):
|
| 147 |
+
dataset[target_key] = np.concatenate([
|
| 148 |
+
t[:-1] for t in dataset[target_key]
|
| 149 |
+
], axis=0)
|
| 150 |
+
elif target_key in ['next_observations', 'next_states'] and \
|
| 151 |
+
len(dataset[target_key][0]) > len(raw_data['traj_0']['actions']):
|
| 152 |
+
dataset[target_key] = np.concatenate([
|
| 153 |
+
t[1:] for t in dataset[target_key]
|
| 154 |
+
], axis=0)
|
| 155 |
+
else:
|
| 156 |
+
dataset[target_key] = np.concatenate(dataset[target_key], axis=0)
|
| 157 |
+
|
| 158 |
+
print('Load', target_key, dataset[target_key].shape)
|
| 159 |
+
else:
|
| 160 |
+
print('Load', target_key, len(dataset[target_key]), type(dataset[target_key][0]))
|
| 161 |
+
return dataset
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/baselines.sh
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
seed=1
|
| 2 |
+
# State based
|
| 3 |
+
for demos in 100; do
|
| 4 |
+
python train.py --env-id PickCube-v1 \
|
| 5 |
+
--demo-path ~/.maniskill/demos/PickCube-v1/motionplanning/trajectory.state.pd_ee_delta_pos.physx_cpu.h5 \
|
| 6 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cpu" --num_demos $demos --max_episode_steps 100 \
|
| 7 |
+
--total_iters 30000 --log_freq 100 --eval_freq 5000 \
|
| 8 |
+
--exp-name=act-PickCube-v1-state-${demos}_motionplanning_demos-$seed \
|
| 9 |
+
--demo_type motionplanning --track
|
| 10 |
+
|
| 11 |
+
python train.py --env-id PushCube-v1 \
|
| 12 |
+
--demo-path ~/.maniskill/demos/PushCube-v1/motionplanning/trajectory.state.pd_ee_delta_pos.physx_cpu.h5 \
|
| 13 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cpu" --num_demos $demos --max_episode_steps 100 \
|
| 14 |
+
--total_iters 30000 --log_freq 100 --eval_freq 5000 \
|
| 15 |
+
--exp-name=act-PushCube-v1-state-${demos}_motionplanning_demos-$seed \
|
| 16 |
+
--demo_type motionplanning --track
|
| 17 |
+
|
| 18 |
+
python train.py --env-id StackCube-v1 \
|
| 19 |
+
--demo-path ~/.maniskill/demos/StackCube-v1/motionplanning/trajectory.state.pd_ee_delta_pos.physx_cpu.h5 \
|
| 20 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cpu" --num_demos $demos --max_episode_steps 200 \
|
| 21 |
+
--total_iters 30000 --log_freq 100 --eval_freq 5000 \
|
| 22 |
+
--exp-name=act-StackCube-v1-state-${demos}_motionplanning_demos-$seed \
|
| 23 |
+
--demo_type motionplanning --track
|
| 24 |
+
|
| 25 |
+
python train.py --env-id PegInsertionSide-v1 \
|
| 26 |
+
--demo-path ~/.maniskill/demos/PegInsertionSide-v1/motionplanning/trajectory.state.pd_ee_delta_pose.physx_cpu.h5 \
|
| 27 |
+
--control-mode "pd_ee_delta_pose" --sim-backend "physx_cpu" --num_demos $demos --max_episode_steps 300 \
|
| 28 |
+
--total_iters 100000 --log_freq 100 --eval_freq 5000 \
|
| 29 |
+
--exp-name=act-PegInsertionSide-v1-state-${demos}_motionplanning_demos-$seed \
|
| 30 |
+
--demo_type motionplanning --track
|
| 31 |
+
|
| 32 |
+
python train.py --env-id PushT-v1 \
|
| 33 |
+
--demo-path ~/.maniskill/demos/PushT-v1/rl/trajectory.state.pd_ee_delta_pose.physx_cuda.h5 \
|
| 34 |
+
--control-mode "pd_ee_delta_pose" --sim-backend "physx_cuda" --num_demos $demos --max_episode_steps 150 \
|
| 35 |
+
--total_iters 100000 --log_freq 100 --eval_freq 5000 \
|
| 36 |
+
--exp-name=act-PushT-v1-state-${demos}_rl_demos-$seed \
|
| 37 |
+
--demo_type rl --track
|
| 38 |
+
done
|
| 39 |
+
|
| 40 |
+
# RGB based
|
| 41 |
+
|
| 42 |
+
for demos in 100; do
|
| 43 |
+
python train_rgbd.py --env-id PickCube-v1 --no_include_depth \
|
| 44 |
+
--demo-path ~/.maniskill/demos/PickCube-v1/motionplanning/trajectory.rgb.pd_ee_delta_pos.physx_cpu.h5 \
|
| 45 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cuda" --num_demos $demos --max_episode_steps 100 --num_eval_envs 100 --no-capture-video \
|
| 46 |
+
--total_iters 30000 --log_freq 100 --eval_freq 5000 \
|
| 47 |
+
--exp-name=act-PickCube-v1-state-${demos}_motionplanning_demos-$seed \
|
| 48 |
+
--demo_type motionplanning --track
|
| 49 |
+
|
| 50 |
+
python train_rgbd.py --env-id PushCube-v1 --no_include_depth \
|
| 51 |
+
--demo-path ~/.maniskill/demos/PushCube-v1/motionplanning/trajectory.rgb.pd_ee_delta_pos.physx_cpu.h5 \
|
| 52 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cuda" --num_demos $demos --max_episode_steps 100 --num_eval_envs 100 --no-capture-video \
|
| 53 |
+
--total_iters 30000 --log_freq 100 --eval_freq 5000 \
|
| 54 |
+
--exp-name=act-PushCube-v1-state-${demos}_motionplanning_demos-$seed \
|
| 55 |
+
--demo_type motionplanning --track
|
| 56 |
+
|
| 57 |
+
python train_rgbd.py --env-id StackCube-v1 --no_include_depth \
|
| 58 |
+
--demo-path ~/.maniskill/demos/StackCube-v1/motionplanning/trajectory.rgb.pd_ee_delta_pos.physx_cpu.h5 \
|
| 59 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cuda" --num_demos $demos --max_episode_steps 200 --num_eval_envs 100 --no-capture-video \
|
| 60 |
+
--total_iters 30000 --log_freq 100 --eval_freq 5000 \
|
| 61 |
+
--exp-name=act-StackCube-v1-state-${demos}_motionplanning_demos-$seed \
|
| 62 |
+
--demo_type motionplanning --track
|
| 63 |
+
|
| 64 |
+
python train_rgbd.py --env-id PegInsertionSide-v1 --no_include_depth \
|
| 65 |
+
--demo-path ~/.maniskill/demos/PegInsertionSide-v1/motionplanning/trajectory.rgb.pd_ee_delta_pose.physx_cpu.h5 \
|
| 66 |
+
--control-mode "pd_ee_delta_pose" --sim-backend "physx_cpu" --num_demos $demos --max_episode_steps 300 \
|
| 67 |
+
--total_iters 100000 --log_freq 100 --eval_freq 5000 \
|
| 68 |
+
--exp-name=act-PegInsertionSide-v1-state-${demos}_motionplanning_demos-$seed \
|
| 69 |
+
--demo_type motionplanning --track
|
| 70 |
+
|
| 71 |
+
python train_rgbd.py --env-id PushT-v1 --no_include_depth \
|
| 72 |
+
--demo-path ~/.maniskill/demos/PushT-v1/rl/trajectory.rgb.pd_ee_delta_pose.physx_cuda.h5 \
|
| 73 |
+
--control-mode "pd_ee_delta_pose" --sim-backend "physx_cuda" --num_demos $demos --max_episode_steps 150 \
|
| 74 |
+
--total_iters 100000 --log_freq 100 --eval_freq 5000 \
|
| 75 |
+
--exp-name=act-PushT-v1-rgb-${demos}_rl_demos-$seed \
|
| 76 |
+
--demo_type rl --track
|
| 77 |
+
done
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/setup.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools import setup, find_packages
|
| 2 |
+
|
| 3 |
+
setup(
|
| 4 |
+
name="act",
|
| 5 |
+
version="0.1.0",
|
| 6 |
+
packages=find_packages(),
|
| 7 |
+
install_requires=[
|
| 8 |
+
"torchvision",
|
| 9 |
+
"diffusers",
|
| 10 |
+
"tensorboard",
|
| 11 |
+
"wandb",
|
| 12 |
+
"mani_skill"
|
| 13 |
+
],
|
| 14 |
+
description="A minimal setup for ACT for ManiSkill",
|
| 15 |
+
long_description=open("README.md").read(),
|
| 16 |
+
long_description_content_type="text/markdown",
|
| 17 |
+
)
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/train.py
ADDED
|
@@ -0,0 +1,456 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ALGO_NAME = 'BC_ACT_state'
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import os
|
| 5 |
+
import random
|
| 6 |
+
from distutils.util import strtobool
|
| 7 |
+
import time
|
| 8 |
+
import gymnasium as gym
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
import torch.optim as optim
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
import torchvision.transforms as T
|
| 15 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 16 |
+
from act.evaluate import evaluate
|
| 17 |
+
from mani_skill.utils import common, gym_utils
|
| 18 |
+
from mani_skill.utils.registration import REGISTERED_ENVS
|
| 19 |
+
|
| 20 |
+
from collections import defaultdict
|
| 21 |
+
|
| 22 |
+
from torch.utils.data.dataset import Dataset
|
| 23 |
+
from torch.utils.data.sampler import RandomSampler, BatchSampler
|
| 24 |
+
from torch.utils.data.dataloader import DataLoader
|
| 25 |
+
from act.utils import IterationBasedBatchSampler, worker_init_fn
|
| 26 |
+
from act.make_env import make_eval_envs
|
| 27 |
+
from diffusers.training_utils import EMAModel
|
| 28 |
+
from act.detr.transformer import build_transformer
|
| 29 |
+
from act.detr.detr_vae import build_encoder, DETRVAE
|
| 30 |
+
from dataclasses import dataclass, field
|
| 31 |
+
from typing import Optional, List
|
| 32 |
+
import tyro
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class Args:
|
| 36 |
+
exp_name: Optional[str] = None
|
| 37 |
+
"""the name of this experiment"""
|
| 38 |
+
seed: int = 1
|
| 39 |
+
"""seed of the experiment"""
|
| 40 |
+
torch_deterministic: bool = True
|
| 41 |
+
"""if toggled, `torch.backends.cudnn.deterministic=False`"""
|
| 42 |
+
cuda: bool = True
|
| 43 |
+
"""if toggled, cuda will be enabled by default"""
|
| 44 |
+
track: bool = False
|
| 45 |
+
"""if toggled, this experiment will be tracked with Weights and Biases"""
|
| 46 |
+
wandb_project_name: str = "ManiSkill"
|
| 47 |
+
"""the wandb's project name"""
|
| 48 |
+
wandb_entity: Optional[str] = None
|
| 49 |
+
"""the entity (team) of wandb's project"""
|
| 50 |
+
capture_video: bool = True
|
| 51 |
+
"""whether to capture videos of the agent performances (check out `videos` folder)"""
|
| 52 |
+
|
| 53 |
+
env_id: str = "PickCube-v1"
|
| 54 |
+
"""the id of the environment"""
|
| 55 |
+
demo_path: str = 'pickcube.trajectory.state.pd_joint_delta_pos.cpu.h5'
|
| 56 |
+
"""the path of demo dataset (pkl or h5)"""
|
| 57 |
+
num_demos: Optional[int] = None
|
| 58 |
+
"""number of trajectories to load from the demo dataset"""
|
| 59 |
+
total_iters: int = 1_000_000
|
| 60 |
+
"""total timesteps of the experiment"""
|
| 61 |
+
batch_size: int = 1024
|
| 62 |
+
"""the batch size of sample from the replay memory"""
|
| 63 |
+
|
| 64 |
+
# ACT specific arguments
|
| 65 |
+
lr: float = 1e-4
|
| 66 |
+
"""the learning rate of the Action Chunking with Transformers"""
|
| 67 |
+
kl_weight: float = 10
|
| 68 |
+
"""weight for the kl loss term"""
|
| 69 |
+
temporal_agg: bool = True
|
| 70 |
+
"""if toggled, temporal ensembling will be performed"""
|
| 71 |
+
|
| 72 |
+
# Backbone
|
| 73 |
+
position_embedding: str = 'sine'
|
| 74 |
+
backbone: str = 'resnet18'
|
| 75 |
+
lr_backbone: float = 1e-5
|
| 76 |
+
masks: bool = False
|
| 77 |
+
dilation: bool = False
|
| 78 |
+
|
| 79 |
+
# Transformer
|
| 80 |
+
enc_layers: int = 2
|
| 81 |
+
dec_layers: int = 4
|
| 82 |
+
dim_feedforward: int = 512
|
| 83 |
+
hidden_dim: int = 256
|
| 84 |
+
dropout: float = 0.1
|
| 85 |
+
nheads: int = 4
|
| 86 |
+
num_queries: int = 30
|
| 87 |
+
pre_norm: bool = False
|
| 88 |
+
|
| 89 |
+
# Environment/experiment specific arguments
|
| 90 |
+
max_episode_steps: Optional[int] = None
|
| 91 |
+
"""Change the environments' max_episode_steps to this value. Sometimes necessary if the demonstrations being imitated are too short. Typically the default
|
| 92 |
+
max episode steps of environments in ManiSkill are tuned lower so reinforcement learning agents can learn faster."""
|
| 93 |
+
log_freq: int = 1000
|
| 94 |
+
"""the frequency of logging the training metrics"""
|
| 95 |
+
eval_freq: int = 5000
|
| 96 |
+
"""the frequency of evaluating the agent on the evaluation environments"""
|
| 97 |
+
save_freq: Optional[int] = None
|
| 98 |
+
"""the frequency of saving the model checkpoints. By default this is None and will only save checkpoints based on the best evaluation metrics."""
|
| 99 |
+
num_eval_episodes: int = 100
|
| 100 |
+
"""the number of episodes to evaluate the agent on"""
|
| 101 |
+
num_eval_envs: int = 10
|
| 102 |
+
"""the number of parallel environments to evaluate the agent on"""
|
| 103 |
+
sim_backend: str = "physx_cpu"
|
| 104 |
+
"""the simulation backend to use for evaluation environments. can be "physx_cpu" or "physx_cuda" """
|
| 105 |
+
num_dataload_workers: int = 0
|
| 106 |
+
"""the number of workers to use for loading the training data in the torch dataloader"""
|
| 107 |
+
control_mode: str = 'pd_joint_delta_pos'
|
| 108 |
+
"""the control mode to use for the evaluation environments. Must match the control mode of the demonstration dataset."""
|
| 109 |
+
|
| 110 |
+
# additional tags/configs for logging purposes to wandb and shared comparisons with other algorithms
|
| 111 |
+
demo_type: Optional[str] = None
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class SmallDemoDataset_ACTPolicy(Dataset): # Load everything into GPU memory
|
| 115 |
+
def __init__(self, data_path, num_queries, device, num_traj):
|
| 116 |
+
if data_path[-4:] == '.pkl':
|
| 117 |
+
raise NotImplementedError()
|
| 118 |
+
else:
|
| 119 |
+
from act.utils import load_demo_dataset
|
| 120 |
+
trajectories = load_demo_dataset(data_path, num_traj=num_traj, concat=False)
|
| 121 |
+
# trajectories['observations'] is a list of np.ndarray (L+1, obs_dim)
|
| 122 |
+
# trajectories['actions'] is a list of np.ndarray (L, act_dim)
|
| 123 |
+
|
| 124 |
+
for k, v in trajectories.items():
|
| 125 |
+
for i in range(len(v)):
|
| 126 |
+
trajectories[k][i] = torch.Tensor(v[i]).to(device)
|
| 127 |
+
|
| 128 |
+
# When the robot reaches the goal state, its joints and gripper fingers need to remain stationary
|
| 129 |
+
if 'delta_pos' in args.control_mode or args.control_mode == 'base_pd_joint_vel_arm_pd_joint_vel':
|
| 130 |
+
self.pad_action_arm = torch.zeros((trajectories['actions'][0].shape[1]-1,), device=device)
|
| 131 |
+
# to make the arm stay still, we pad the action with 0 in 'delta_pos' control mode
|
| 132 |
+
# gripper action needs to be copied from the last action
|
| 133 |
+
# else:
|
| 134 |
+
# raise NotImplementedError(f'Control Mode {args.control_mode} not supported')
|
| 135 |
+
|
| 136 |
+
self.slices = []
|
| 137 |
+
self.num_traj = len(trajectories['actions'])
|
| 138 |
+
for traj_idx in range(self.num_traj):
|
| 139 |
+
episode_len = trajectories['actions'][traj_idx].shape[0]
|
| 140 |
+
self.slices += [
|
| 141 |
+
(traj_idx, ts) for ts in range(episode_len)
|
| 142 |
+
]
|
| 143 |
+
|
| 144 |
+
print(f"Length of Dataset: {len(self.slices)}")
|
| 145 |
+
|
| 146 |
+
self.num_queries = num_queries
|
| 147 |
+
self.trajectories = trajectories
|
| 148 |
+
self.delta_control = 'delta' in args.control_mode
|
| 149 |
+
self.norm_stats = self.get_norm_stats() if not self.delta_control else None
|
| 150 |
+
|
| 151 |
+
def __getitem__(self, index):
|
| 152 |
+
traj_idx, ts = self.slices[index]
|
| 153 |
+
|
| 154 |
+
# get observation at ts only
|
| 155 |
+
obs = self.trajectories['observations'][traj_idx][ts]
|
| 156 |
+
# get num_queries actions
|
| 157 |
+
act_seq = self.trajectories['actions'][traj_idx][ts:ts+self.num_queries]
|
| 158 |
+
action_len = act_seq.shape[0]
|
| 159 |
+
|
| 160 |
+
# Pad after the trajectory, so all the observations are utilized in training
|
| 161 |
+
if action_len < self.num_queries:
|
| 162 |
+
if 'delta_pos' in args.control_mode or args.control_mode == 'base_pd_joint_vel_arm_pd_joint_vel':
|
| 163 |
+
gripper_action = act_seq[-1, -1]
|
| 164 |
+
pad_action = torch.cat((self.pad_action_arm, gripper_action[None]), dim=0)
|
| 165 |
+
act_seq = torch.cat([act_seq, pad_action.repeat(self.num_queries-action_len, 1)], dim=0)
|
| 166 |
+
# making the robot (arm and gripper) stay still
|
| 167 |
+
elif not self.delta_control:
|
| 168 |
+
target = act_seq[-1]
|
| 169 |
+
act_seq = torch.cat([act_seq, target.repeat(self.num_queries-action_len, 1)], dim=0)
|
| 170 |
+
|
| 171 |
+
# normalize obs and act_seq
|
| 172 |
+
if not self.delta_control:
|
| 173 |
+
obs = (obs - self.norm_stats["state_mean"][0]) / self.norm_stats["state_std"][0]
|
| 174 |
+
act_seq = (act_seq - self.norm_stats["action_mean"]) / self.norm_stats["action_std"]
|
| 175 |
+
|
| 176 |
+
return {
|
| 177 |
+
'observations': obs,
|
| 178 |
+
'actions': act_seq,
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
def __len__(self):
|
| 182 |
+
return len(self.slices)
|
| 183 |
+
|
| 184 |
+
def get_norm_stats(self):
|
| 185 |
+
traj_idx, ts = self.slices[index]
|
| 186 |
+
|
| 187 |
+
# get observation at start_ts only
|
| 188 |
+
obs = self.trajectories['observations'][traj_idx][ts]
|
| 189 |
+
# get num_queries actions
|
| 190 |
+
act_seq = self.trajectories['actions'][traj_idx][ts:ts+self.num_queries]
|
| 191 |
+
action_len = act_seq.shape[0]
|
| 192 |
+
|
| 193 |
+
# Pad after the trajectory, so all the observations are utilized in training
|
| 194 |
+
if action_len < self.num_queries:
|
| 195 |
+
if 'delta_pos' in args.control_mode or args.control_mode == 'base_pd_joint_vel_arm_pd_joint_vel':
|
| 196 |
+
gripper_action = act_seq[-1, -1]
|
| 197 |
+
pad_action = torch.cat((self.pad_action_arm, gripper_action[None]), dim=0)
|
| 198 |
+
act_seq = torch.cat([act_seq, pad_action.repeat(self.num_queries-action_len, 1)], dim=0)
|
| 199 |
+
# making the robot (arm and gripper) stay still
|
| 200 |
+
elif not self.delta_control:
|
| 201 |
+
target = act_seq[-1]
|
| 202 |
+
act_seq = torch.cat([act_seq, target.repeat(self.num_queries-action_len, 1)], dim=0)
|
| 203 |
+
|
| 204 |
+
# normalize obs and act_seq
|
| 205 |
+
if not self.delta_control:
|
| 206 |
+
obs = (obs - self.norm_stats["state_mean"][0]) / self.norm_stats["state_std"][0]
|
| 207 |
+
act_seq = (act_seq - self.norm_stats["action_mean"]) / self.norm_stats["action_std"]
|
| 208 |
+
|
| 209 |
+
return {
|
| 210 |
+
'observations': obs,
|
| 211 |
+
'actions': act_seq,
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
class Agent(nn.Module):
|
| 216 |
+
def __init__(self, env, args):
|
| 217 |
+
super().__init__()
|
| 218 |
+
assert len(env.single_observation_space.shape) == 1 # (obs_dim,)
|
| 219 |
+
assert len(env.single_action_space.shape) == 1 # (act_dim,)
|
| 220 |
+
#assert (env.single_action_space.high == 1).all() and (env.single_action_space.low == -1).all()
|
| 221 |
+
|
| 222 |
+
self.kl_weight = args.kl_weight
|
| 223 |
+
self.state_dim = env.single_observation_space.shape[0]
|
| 224 |
+
self.act_dim = env.single_action_space.shape[0]
|
| 225 |
+
|
| 226 |
+
# CNN backbone
|
| 227 |
+
backbones = None
|
| 228 |
+
|
| 229 |
+
# CVAE decoder
|
| 230 |
+
transformer = build_transformer(args)
|
| 231 |
+
|
| 232 |
+
# CVAE encoder
|
| 233 |
+
encoder = build_encoder(args)
|
| 234 |
+
|
| 235 |
+
# ACT ( CVAE encoder + (CNN backbones + CVAE decoder) )
|
| 236 |
+
self.model = DETRVAE(
|
| 237 |
+
backbones,
|
| 238 |
+
transformer,
|
| 239 |
+
encoder,
|
| 240 |
+
state_dim=self.state_dim,
|
| 241 |
+
action_dim=self.act_dim,
|
| 242 |
+
num_queries=args.num_queries,
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
def compute_loss(self, obs, action_seq):
|
| 246 |
+
# forward pass
|
| 247 |
+
a_hat, (mu, logvar) = self.model(obs, action_seq)
|
| 248 |
+
|
| 249 |
+
# compute l1 loss and kl loss
|
| 250 |
+
total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar)
|
| 251 |
+
all_l1 = F.l1_loss(action_seq, a_hat, reduction='none')
|
| 252 |
+
l1 = all_l1.mean()
|
| 253 |
+
|
| 254 |
+
# store all loss
|
| 255 |
+
loss_dict = dict()
|
| 256 |
+
loss_dict['l1'] = l1
|
| 257 |
+
loss_dict['kl'] = total_kld[0]
|
| 258 |
+
loss_dict['loss'] = loss_dict['l1'] + loss_dict['kl'] * self.kl_weight
|
| 259 |
+
return loss_dict
|
| 260 |
+
|
| 261 |
+
def get_action(self, obs):
|
| 262 |
+
# forward pass
|
| 263 |
+
a_hat, (_, _) = self.model(obs) # no action, sample from prior
|
| 264 |
+
return a_hat
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def kl_divergence(mu, logvar):
|
| 268 |
+
batch_size = mu.size(0)
|
| 269 |
+
assert batch_size != 0
|
| 270 |
+
if mu.data.ndimension() == 4:
|
| 271 |
+
mu = mu.view(mu.size(0), mu.size(1))
|
| 272 |
+
if logvar.data.ndimension() == 4:
|
| 273 |
+
logvar = logvar.view(logvar.size(0), logvar.size(1))
|
| 274 |
+
|
| 275 |
+
klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp())
|
| 276 |
+
total_kld = klds.sum(1).mean(0, True)
|
| 277 |
+
dimension_wise_kld = klds.mean(0)
|
| 278 |
+
mean_kld = klds.mean(1).mean(0, True)
|
| 279 |
+
|
| 280 |
+
return total_kld, dimension_wise_kld, mean_kld
|
| 281 |
+
|
| 282 |
+
def save_ckpt(run_name, tag):
|
| 283 |
+
os.makedirs(f'runs/{run_name}/checkpoints', exist_ok=True)
|
| 284 |
+
ema.copy_to(ema_agent.parameters())
|
| 285 |
+
torch.save({
|
| 286 |
+
'norm_stats': dataset.norm_stats,
|
| 287 |
+
'agent': agent.state_dict(),
|
| 288 |
+
'ema_agent': ema_agent.state_dict(),
|
| 289 |
+
}, f'runs/{run_name}/checkpoints/{tag}.pt')
|
| 290 |
+
|
| 291 |
+
if __name__ == "__main__":
|
| 292 |
+
args = tyro.cli(Args)
|
| 293 |
+
if args.exp_name is None:
|
| 294 |
+
args.exp_name = os.path.basename(__file__)[: -len(".py")]
|
| 295 |
+
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
|
| 296 |
+
else:
|
| 297 |
+
run_name = args.exp_name
|
| 298 |
+
|
| 299 |
+
if args.demo_path.endswith('.h5'):
|
| 300 |
+
import json
|
| 301 |
+
json_file = args.demo_path[:-2] + 'json'
|
| 302 |
+
with open(json_file, 'r') as f:
|
| 303 |
+
demo_info = json.load(f)
|
| 304 |
+
if 'control_mode' in demo_info['env_info']['env_kwargs']:
|
| 305 |
+
control_mode = demo_info['env_info']['env_kwargs']['control_mode']
|
| 306 |
+
elif 'control_mode' in demo_info['episodes'][0]:
|
| 307 |
+
control_mode = demo_info['episodes'][0]['control_mode']
|
| 308 |
+
else:
|
| 309 |
+
raise Exception('Control mode not found in json')
|
| 310 |
+
assert control_mode == args.control_mode, f"Control mode mismatched. Dataset has control mode {control_mode}, but args has control mode {args.control_mode}"
|
| 311 |
+
|
| 312 |
+
# TRY NOT TO MODIFY: seeding
|
| 313 |
+
random.seed(args.seed)
|
| 314 |
+
np.random.seed(args.seed)
|
| 315 |
+
torch.manual_seed(args.seed)
|
| 316 |
+
torch.backends.cudnn.deterministic = args.torch_deterministic
|
| 317 |
+
|
| 318 |
+
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
|
| 319 |
+
|
| 320 |
+
# env setup
|
| 321 |
+
env_kwargs = dict(control_mode=args.control_mode, reward_mode="sparse", obs_mode="state", render_mode="rgb_array")
|
| 322 |
+
if args.max_episode_steps is not None:
|
| 323 |
+
env_kwargs["max_episode_steps"] = args.max_episode_steps
|
| 324 |
+
other_kwargs = None
|
| 325 |
+
envs = make_eval_envs(args.env_id, args.num_eval_envs, args.sim_backend, env_kwargs, other_kwargs, video_dir=f'runs/{run_name}/videos' if args.capture_video else None)
|
| 326 |
+
|
| 327 |
+
# dataloader setup
|
| 328 |
+
dataset = SmallDemoDataset_ACTPolicy(args.demo_path, args.num_queries, device, num_traj=args.num_demos)
|
| 329 |
+
sampler = RandomSampler(dataset, replacement=False)
|
| 330 |
+
batch_sampler = BatchSampler(sampler, batch_size=args.batch_size, drop_last=True)
|
| 331 |
+
batch_sampler = IterationBasedBatchSampler(batch_sampler, args.total_iters)
|
| 332 |
+
train_dataloader = DataLoader(
|
| 333 |
+
dataset,
|
| 334 |
+
batch_sampler=batch_sampler,
|
| 335 |
+
num_workers=args.num_dataload_workers,
|
| 336 |
+
worker_init_fn=lambda worker_id: worker_init_fn(worker_id, base_seed=args.seed),
|
| 337 |
+
)
|
| 338 |
+
if args.num_demos is None:
|
| 339 |
+
args.num_demos = dataset.num_traj
|
| 340 |
+
|
| 341 |
+
if args.track:
|
| 342 |
+
import wandb
|
| 343 |
+
config = vars(args)
|
| 344 |
+
config["eval_env_cfg"] = dict(**env_kwargs, num_envs=args.num_eval_envs, env_id=args.env_id, env_horizon=args.max_episode_steps)
|
| 345 |
+
wandb.init(
|
| 346 |
+
project=args.wandb_project_name,
|
| 347 |
+
entity=args.wandb_entity,
|
| 348 |
+
sync_tensorboard=True,
|
| 349 |
+
config=config,
|
| 350 |
+
name=run_name,
|
| 351 |
+
save_code=True,
|
| 352 |
+
group="ACT",
|
| 353 |
+
tags=["act"]
|
| 354 |
+
)
|
| 355 |
+
writer = SummaryWriter(f"runs/{run_name}")
|
| 356 |
+
writer.add_text(
|
| 357 |
+
"hyperparameters",
|
| 358 |
+
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
# agent setup
|
| 362 |
+
agent = Agent(envs, args).to(device)
|
| 363 |
+
|
| 364 |
+
# optimizer setup
|
| 365 |
+
param_dicts = [
|
| 366 |
+
{"params": [p for n, p in agent.named_parameters() if "backbone" not in n and p.requires_grad]},
|
| 367 |
+
{
|
| 368 |
+
"params": [p for n, p in agent.named_parameters() if "backbone" in n and p.requires_grad],
|
| 369 |
+
"lr": args.lr_backbone,
|
| 370 |
+
},
|
| 371 |
+
]
|
| 372 |
+
optimizer = optim.AdamW(param_dicts, lr=args.lr, weight_decay=1e-4)
|
| 373 |
+
|
| 374 |
+
# LR drop by a factor of 10 after lr_drop iters
|
| 375 |
+
lr_drop = int((2/3)*args.total_iters)
|
| 376 |
+
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, lr_drop)
|
| 377 |
+
|
| 378 |
+
# Exponential Moving Average
|
| 379 |
+
# accelerates training and improves stability
|
| 380 |
+
# holds a copy of the model weights
|
| 381 |
+
ema = EMAModel(parameters=agent.parameters(), power=0.75)
|
| 382 |
+
ema_agent = Agent(envs, args).to(device)
|
| 383 |
+
|
| 384 |
+
# Evaluation
|
| 385 |
+
#eval_kwargs = dict(
|
| 386 |
+
# stats=dataset.norm_stats, num_queries=args.num_queries, temporal_agg=args.temporal_agg,
|
| 387 |
+
# max_timesteps=gym_utils.find_max_episode_steps_value(envs), device=device, sim_backend=args.sim_backend
|
| 388 |
+
#)
|
| 389 |
+
eval_kwargs = dict(
|
| 390 |
+
stats=dataset.norm_stats, num_queries=args.num_queries, temporal_agg=args.temporal_agg,
|
| 391 |
+
max_timesteps=args.max_episode_steps, device=device, sim_backend=args.sim_backend
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
# ---------------------------------------------------------------------------- #
|
| 395 |
+
# Training begins.
|
| 396 |
+
# ---------------------------------------------------------------------------- #
|
| 397 |
+
print("Training begins...")
|
| 398 |
+
agent.train()
|
| 399 |
+
|
| 400 |
+
best_eval_metrics = defaultdict(float)
|
| 401 |
+
timings = defaultdict(float)
|
| 402 |
+
|
| 403 |
+
for cur_iter, data_batch in enumerate(train_dataloader):
|
| 404 |
+
last_tick = time.time()
|
| 405 |
+
# forward and compute loss
|
| 406 |
+
loss_dict = agent.compute_loss(
|
| 407 |
+
obs=data_batch['observations'], # (B, obs_dim)
|
| 408 |
+
action_seq=data_batch['actions'], # (B, num_queries, act_dim)
|
| 409 |
+
)
|
| 410 |
+
total_loss = loss_dict['loss'] # total_loss = l1 + kl * self.kl_weight
|
| 411 |
+
|
| 412 |
+
# backward
|
| 413 |
+
optimizer.zero_grad()
|
| 414 |
+
total_loss.backward()
|
| 415 |
+
optimizer.step()
|
| 416 |
+
lr_scheduler.step() # step lr scheduler every batch, this is different from standard pytorch behavior
|
| 417 |
+
|
| 418 |
+
# update Exponential Moving Average of the model weights
|
| 419 |
+
ema.step(agent.parameters())
|
| 420 |
+
timings["update"] += time.time() - last_tick
|
| 421 |
+
|
| 422 |
+
# Evaluation
|
| 423 |
+
if cur_iter % args.eval_freq == 0:
|
| 424 |
+
last_tick = time.time()
|
| 425 |
+
|
| 426 |
+
ema.copy_to(ema_agent.parameters())
|
| 427 |
+
|
| 428 |
+
eval_metrics = evaluate(args.num_eval_episodes, ema_agent, envs, eval_kwargs)
|
| 429 |
+
timings["eval"] += time.time() - last_tick
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
print(f"Evaluated {len(eval_metrics['success_at_end'])} episodes")
|
| 433 |
+
for k in eval_metrics.keys():
|
| 434 |
+
eval_metrics[k] = np.mean(eval_metrics[k])
|
| 435 |
+
writer.add_scalar(f"eval/{k}", eval_metrics[k], cur_iter)
|
| 436 |
+
print(f"{k}: {eval_metrics[k]:.4f}")
|
| 437 |
+
|
| 438 |
+
save_on_best_metrics = ["success_once", "success_at_end"]
|
| 439 |
+
for k in save_on_best_metrics:
|
| 440 |
+
if k in eval_metrics and eval_metrics[k] > best_eval_metrics[k]:
|
| 441 |
+
best_eval_metrics[k] = eval_metrics[k]
|
| 442 |
+
save_ckpt(run_name, f"best_eval_{k}")
|
| 443 |
+
print(f'New best {k}_rate: {eval_metrics[k]:.4f}. Saving checkpoint.')
|
| 444 |
+
|
| 445 |
+
if cur_iter % args.log_freq == 0:
|
| 446 |
+
print(f"Iteration {cur_iter}, loss: {total_loss.item()}")
|
| 447 |
+
writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], cur_iter)
|
| 448 |
+
writer.add_scalar("losses/total_loss", total_loss.item(), cur_iter)
|
| 449 |
+
for k, v in timings.items():
|
| 450 |
+
writer.add_scalar(f"time/{k}", v, cur_iter)
|
| 451 |
+
# Checkpoint
|
| 452 |
+
if args.save_freq is not None and cur_iter % args.save_freq == 0:
|
| 453 |
+
save_ckpt(run_name, str(cur_iter))
|
| 454 |
+
|
| 455 |
+
envs.close()
|
| 456 |
+
writer.close()
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/act/train_rgbd.py
ADDED
|
@@ -0,0 +1,612 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ALGO_NAME = 'BC_ACT_rgbd'
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import os
|
| 5 |
+
import random
|
| 6 |
+
from distutils.util import strtobool
|
| 7 |
+
from functools import partial
|
| 8 |
+
import time
|
| 9 |
+
import gymnasium as gym
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
import torch.optim as optim
|
| 14 |
+
import torch.nn.functional as F
|
| 15 |
+
import torchvision.transforms as T
|
| 16 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 17 |
+
from act.evaluate import evaluate
|
| 18 |
+
from mani_skill.envs.sapien_env import BaseEnv
|
| 19 |
+
from mani_skill.utils import common, gym_utils
|
| 20 |
+
from mani_skill.utils.registration import REGISTERED_ENVS
|
| 21 |
+
|
| 22 |
+
from collections import defaultdict
|
| 23 |
+
|
| 24 |
+
from torch.utils.data.dataset import Dataset
|
| 25 |
+
from torch.utils.data.sampler import RandomSampler, BatchSampler
|
| 26 |
+
from torch.utils.data.dataloader import DataLoader
|
| 27 |
+
from act.utils import IterationBasedBatchSampler, worker_init_fn
|
| 28 |
+
from act.make_env import make_eval_envs
|
| 29 |
+
from diffusers.training_utils import EMAModel
|
| 30 |
+
from act.detr.backbone import build_backbone
|
| 31 |
+
from act.detr.transformer import build_transformer
|
| 32 |
+
from act.detr.detr_vae import build_encoder, DETRVAE
|
| 33 |
+
from dataclasses import dataclass, field
|
| 34 |
+
from typing import Optional, List, Dict
|
| 35 |
+
import tyro
|
| 36 |
+
|
| 37 |
+
@dataclass
|
| 38 |
+
class Args:
|
| 39 |
+
exp_name: Optional[str] = None
|
| 40 |
+
"""the name of this experiment"""
|
| 41 |
+
seed: int = 1
|
| 42 |
+
"""seed of the experiment"""
|
| 43 |
+
torch_deterministic: bool = True
|
| 44 |
+
"""if toggled, `torch.backends.cudnn.deterministic=False`"""
|
| 45 |
+
cuda: bool = True
|
| 46 |
+
"""if toggled, cuda will be enabled by default"""
|
| 47 |
+
track: bool = False
|
| 48 |
+
"""if toggled, this experiment will be tracked with Weights and Biases"""
|
| 49 |
+
wandb_project_name: str = "ManiSkill"
|
| 50 |
+
"""the wandb's project name"""
|
| 51 |
+
wandb_entity: Optional[str] = None
|
| 52 |
+
"""the entity (team) of wandb's project"""
|
| 53 |
+
capture_video: bool = True
|
| 54 |
+
"""whether to capture videos of the agent performances (check out `videos` folder)"""
|
| 55 |
+
|
| 56 |
+
env_id: str = "PickCube-v1"
|
| 57 |
+
"""the id of the environment"""
|
| 58 |
+
demo_path: str = 'pickcube.trajectory.rgbd.pd_joint_delta_pos.cpu.h5'
|
| 59 |
+
"""the path of demo dataset (pkl or h5)"""
|
| 60 |
+
num_demos: Optional[int] = None
|
| 61 |
+
"""number of trajectories to load from the demo dataset"""
|
| 62 |
+
total_iters: int = 1_000_000
|
| 63 |
+
"""total timesteps of the experiment"""
|
| 64 |
+
batch_size: int = 256
|
| 65 |
+
"""the batch size of sample from the replay memory"""
|
| 66 |
+
|
| 67 |
+
# ACT specific arguments
|
| 68 |
+
lr: float = 1e-4
|
| 69 |
+
"""the learning rate of the Action Chunking with Transformers"""
|
| 70 |
+
kl_weight: float = 10
|
| 71 |
+
"""weight for the kl loss term"""
|
| 72 |
+
temporal_agg: bool = True
|
| 73 |
+
"""if toggled, temporal ensembling will be performed"""
|
| 74 |
+
|
| 75 |
+
# Backbone
|
| 76 |
+
position_embedding: str = 'sine'
|
| 77 |
+
backbone: str = 'resnet18'
|
| 78 |
+
lr_backbone: float = 1e-5
|
| 79 |
+
masks: bool = False
|
| 80 |
+
dilation: bool = False
|
| 81 |
+
include_depth: bool = True
|
| 82 |
+
|
| 83 |
+
# Transformer
|
| 84 |
+
enc_layers: int = 2
|
| 85 |
+
dec_layers: int = 4
|
| 86 |
+
dim_feedforward: int = 512
|
| 87 |
+
hidden_dim: int = 256
|
| 88 |
+
dropout: float = 0.1
|
| 89 |
+
nheads: int = 8
|
| 90 |
+
num_queries: int = 30
|
| 91 |
+
pre_norm: bool = False
|
| 92 |
+
|
| 93 |
+
# Environment/experiment specific arguments
|
| 94 |
+
max_episode_steps: Optional[int] = None
|
| 95 |
+
"""Change the environments' max_episode_steps to this value. Sometimes necessary if the demonstrations being imitated are too short. Typically the default
|
| 96 |
+
max episode steps of environments in ManiSkill are tuned lower so reinforcement learning agents can learn faster."""
|
| 97 |
+
log_freq: int = 1000
|
| 98 |
+
"""the frequency of logging the training metrics"""
|
| 99 |
+
eval_freq: int = 5000
|
| 100 |
+
"""the frequency of evaluating the agent on the evaluation environments"""
|
| 101 |
+
save_freq: Optional[int] = None
|
| 102 |
+
"""the frequency of saving the model checkpoints. By default this is None and will only save checkpoints based on the best evaluation metrics."""
|
| 103 |
+
num_eval_episodes: int = 100
|
| 104 |
+
"""the number of episodes to evaluate the agent on"""
|
| 105 |
+
num_eval_envs: int = 10
|
| 106 |
+
"""the number of parallel environments to evaluate the agent on"""
|
| 107 |
+
sim_backend: str = "cpu"
|
| 108 |
+
"""the simulation backend to use for evaluation environments. can be "cpu" or "gpu"""
|
| 109 |
+
num_dataload_workers: int = 0
|
| 110 |
+
"""the number of workers to use for loading the training data in the torch dataloader"""
|
| 111 |
+
control_mode: str = 'pd_joint_delta_pos'
|
| 112 |
+
"""the control mode to use for the evaluation environments. Must match the control mode of the demonstration dataset."""
|
| 113 |
+
|
| 114 |
+
# additional tags/configs for logging purposes to wandb and shared comparisons with other algorithms
|
| 115 |
+
demo_type: Optional[str] = None
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class FlattenRGBDObservationWrapper(gym.ObservationWrapper):
|
| 119 |
+
"""
|
| 120 |
+
Flattens the rgbd mode observations into a dictionary with two keys, "rgbd" and "state"
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
rgb (bool): Whether to include rgb images in the observation
|
| 124 |
+
depth (bool): Whether to include depth images in the observation
|
| 125 |
+
state (bool): Whether to include state data in the observation
|
| 126 |
+
|
| 127 |
+
Note that the returned observations will have a "rgbd" or "rgb" or "depth" key depending on the rgb/depth bool flags.
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
def __init__(self, env, rgb=True, depth=True, state=True) -> None:
|
| 131 |
+
self.base_env: BaseEnv = env.unwrapped
|
| 132 |
+
super().__init__(env)
|
| 133 |
+
self.include_rgb = rgb
|
| 134 |
+
self.include_depth = depth
|
| 135 |
+
self.include_state = state
|
| 136 |
+
self.transforms = T.Compose(
|
| 137 |
+
[
|
| 138 |
+
T.Resize((224, 224), antialias=True),
|
| 139 |
+
]
|
| 140 |
+
) # resize the input image to be at least 224x224
|
| 141 |
+
new_obs = self.observation(self.base_env._init_raw_obs)
|
| 142 |
+
self.base_env.update_obs_space(new_obs)
|
| 143 |
+
|
| 144 |
+
def observation(self, observation: Dict):
|
| 145 |
+
sensor_data = observation.pop("sensor_data")
|
| 146 |
+
del observation["sensor_param"]
|
| 147 |
+
images_rgb = []
|
| 148 |
+
images_depth = []
|
| 149 |
+
for cam_data in sensor_data.values():
|
| 150 |
+
if self.include_rgb:
|
| 151 |
+
resized_rgb = self.transforms(
|
| 152 |
+
cam_data["rgb"].permute(0, 3, 1, 2)
|
| 153 |
+
) # (1, 3, 224, 224)
|
| 154 |
+
images_rgb.append(resized_rgb)
|
| 155 |
+
if self.include_depth:
|
| 156 |
+
depth = (cam_data["depth"].to(torch.float32) / 1024).to(torch.float16)
|
| 157 |
+
resized_depth = self.transforms(
|
| 158 |
+
depth.permute(0, 3, 1, 2)
|
| 159 |
+
) # (1, 1, 224, 224)
|
| 160 |
+
images_depth.append(resized_depth)
|
| 161 |
+
|
| 162 |
+
rgb = torch.stack(images_rgb, dim=1) # (1, num_cams, C, 224, 224), uint8
|
| 163 |
+
if self.include_depth:
|
| 164 |
+
depth = torch.stack(images_depth, dim=1) # (1, num_cams, C, 224, 224), float16
|
| 165 |
+
|
| 166 |
+
# flatten the rest of the data which should just be state data
|
| 167 |
+
observation = common.flatten_state_dict(observation, use_torch=True)
|
| 168 |
+
ret = dict()
|
| 169 |
+
if self.include_state:
|
| 170 |
+
ret["state"] = observation
|
| 171 |
+
if self.include_rgb and not self.include_depth:
|
| 172 |
+
ret["rgb"] = rgb
|
| 173 |
+
elif self.include_rgb and self.include_depth:
|
| 174 |
+
ret["rgb"] = rgb
|
| 175 |
+
ret["depth"] = depth
|
| 176 |
+
elif self.include_depth and not self.include_rgb:
|
| 177 |
+
ret["depth"] = depth
|
| 178 |
+
return ret
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class SmallDemoDataset_ACTPolicy(Dataset): # Load everything into memory
|
| 182 |
+
def __init__(self, data_path, num_queries, num_traj, include_depth=True):
|
| 183 |
+
if data_path[-4:] == '.pkl':
|
| 184 |
+
raise NotImplementedError()
|
| 185 |
+
else:
|
| 186 |
+
from act.utils import load_demo_dataset
|
| 187 |
+
trajectories = load_demo_dataset(data_path, num_traj=num_traj, concat=False)
|
| 188 |
+
# trajectories['observations'] is a list of np.ndarray (L+1, obs_dim)
|
| 189 |
+
# trajectories['actions'] is a list of np.ndarray (L, act_dim)
|
| 190 |
+
print('Raw trajectory loaded, start to pre-process the observations...')
|
| 191 |
+
|
| 192 |
+
self.include_depth = include_depth
|
| 193 |
+
self.transforms = T.Compose(
|
| 194 |
+
[
|
| 195 |
+
T.Resize((224, 224), antialias=True),
|
| 196 |
+
]
|
| 197 |
+
) # pre-trained models from torchvision.models expect input image to be at least 224x224
|
| 198 |
+
|
| 199 |
+
# Pre-process the observations, make them align with the obs returned by the FlattenRGBDObservationWrapper
|
| 200 |
+
obs_traj_dict_list = []
|
| 201 |
+
for obs_traj_dict in trajectories['observations']:
|
| 202 |
+
obs_traj_dict = self.process_obs(obs_traj_dict)
|
| 203 |
+
obs_traj_dict_list.append(obs_traj_dict)
|
| 204 |
+
trajectories['observations'] = obs_traj_dict_list
|
| 205 |
+
self.obs_keys = list(obs_traj_dict.keys())
|
| 206 |
+
|
| 207 |
+
# Pre-process the actions
|
| 208 |
+
for i in range(len(trajectories['actions'])):
|
| 209 |
+
trajectories['actions'][i] = torch.Tensor(trajectories['actions'][i])
|
| 210 |
+
print('Obs/action pre-processing is done.')
|
| 211 |
+
|
| 212 |
+
# When the robot reaches the goal state, its joints and gripper fingers need to remain stationary
|
| 213 |
+
if 'delta_pos' in args.control_mode or args.control_mode == 'base_pd_joint_vel_arm_pd_joint_vel':
|
| 214 |
+
self.pad_action_arm = torch.zeros((trajectories['actions'][0].shape[1]-1,))
|
| 215 |
+
# to make the arm stay still, we pad the action with 0 in 'delta_pos' control mode
|
| 216 |
+
# gripper action needs to be copied from the last action
|
| 217 |
+
# else:
|
| 218 |
+
# raise NotImplementedError(f'Control Mode {args.control_mode} not supported')
|
| 219 |
+
|
| 220 |
+
self.slices = []
|
| 221 |
+
self.num_traj = len(trajectories['actions'])
|
| 222 |
+
for traj_idx in range(self.num_traj):
|
| 223 |
+
episode_len = trajectories['actions'][traj_idx].shape[0]
|
| 224 |
+
self.slices += [
|
| 225 |
+
(traj_idx, ts) for ts in range(episode_len)
|
| 226 |
+
]
|
| 227 |
+
|
| 228 |
+
print(f"Length of Dataset: {len(self.slices)}")
|
| 229 |
+
|
| 230 |
+
self.num_queries = num_queries
|
| 231 |
+
self.trajectories = trajectories
|
| 232 |
+
self.delta_control = 'delta' in args.control_mode
|
| 233 |
+
self.norm_stats = self.get_norm_stats() if not self.delta_control else None
|
| 234 |
+
|
| 235 |
+
def __getitem__(self, index):
|
| 236 |
+
traj_idx, ts = self.slices[index]
|
| 237 |
+
|
| 238 |
+
# get state at start_ts only
|
| 239 |
+
state = self.trajectories['observations'][traj_idx]['state'][ts]
|
| 240 |
+
# get num_queries actions
|
| 241 |
+
act_seq = self.trajectories['actions'][traj_idx][ts:ts+self.num_queries]
|
| 242 |
+
action_len = act_seq.shape[0]
|
| 243 |
+
|
| 244 |
+
# Pad after the trajectory, so all the observations are utilized in training
|
| 245 |
+
if action_len < self.num_queries:
|
| 246 |
+
if 'delta_pos' in args.control_mode or args.control_mode == 'base_pd_joint_vel_arm_pd_joint_vel':
|
| 247 |
+
gripper_action = act_seq[-1, -1]
|
| 248 |
+
pad_action = torch.cat((self.pad_action_arm, gripper_action[None]), dim=0)
|
| 249 |
+
act_seq = torch.cat([act_seq, pad_action.repeat(self.num_queries-action_len, 1)], dim=0)
|
| 250 |
+
# making the robot (arm and gripper) stay still
|
| 251 |
+
elif not self.delta_control:
|
| 252 |
+
target = act_seq[-1]
|
| 253 |
+
act_seq = torch.cat([act_seq, target.repeat(self.num_queries-action_len, 1)], dim=0)
|
| 254 |
+
|
| 255 |
+
# normalize state and act_seq
|
| 256 |
+
if not self.delta_control:
|
| 257 |
+
state = (state - self.norm_stats["state_mean"][0]) / self.norm_stats["state_std"][0]
|
| 258 |
+
act_seq = (act_seq - self.norm_stats["action_mean"]) / self.norm_stats["action_std"]
|
| 259 |
+
|
| 260 |
+
# get rgb or rgbd data at start_ts and combine with state to form obs
|
| 261 |
+
if self.include_depth:
|
| 262 |
+
rgb = self.trajectories['observations'][traj_idx]['rgb'][ts]
|
| 263 |
+
depth = self.trajectories['observations'][traj_idx]['depth'][ts]
|
| 264 |
+
obs = dict(state=state, rgb=rgb, depth=depth)
|
| 265 |
+
else:
|
| 266 |
+
rgb = self.trajectories['observations'][traj_idx]['rgb'][ts]
|
| 267 |
+
obs = dict(state=state, rgb=rgb)
|
| 268 |
+
|
| 269 |
+
return {
|
| 270 |
+
'observations': obs,
|
| 271 |
+
'actions': act_seq,
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
def __len__(self):
|
| 275 |
+
return len(self.slices)
|
| 276 |
+
|
| 277 |
+
def process_obs(self, obs_dict):
|
| 278 |
+
# get rgbd data
|
| 279 |
+
sensor_data = obs_dict.pop("sensor_data")
|
| 280 |
+
del obs_dict["sensor_param"]
|
| 281 |
+
images_rgb = []
|
| 282 |
+
images_depth = []
|
| 283 |
+
for cam_data in sensor_data.values():
|
| 284 |
+
rgb = torch.from_numpy(cam_data["rgb"]) # (ep_len, H, W, 3)
|
| 285 |
+
resized_rgb = self.transforms(
|
| 286 |
+
rgb.permute(0, 3, 1, 2)
|
| 287 |
+
) # (ep_len, 3, 224, 224); pre-trained models from torchvision.models expect input image to be at least 224x224
|
| 288 |
+
images_rgb.append(resized_rgb)
|
| 289 |
+
if self.include_depth:
|
| 290 |
+
depth = torch.Tensor(cam_data["depth"].astype(np.float32) / 1024).to(torch.float16) # (ep_len, H, W, 1)
|
| 291 |
+
resized_depth = self.transforms(
|
| 292 |
+
depth.permute(0, 3, 1, 2)
|
| 293 |
+
) # (ep_len, 1, 224, 224); pre-trained models from torchvision.models expect input image to be at least 224x224
|
| 294 |
+
images_depth.append(resized_depth)
|
| 295 |
+
rgb = torch.stack(images_rgb, dim=1) # (ep_len, num_cams, 3, 224, 224) # still uint8
|
| 296 |
+
if self.include_depth:
|
| 297 |
+
depth = torch.stack(images_depth, dim=1) # (ep_len, num_cams, 1, 224, 224) # float16
|
| 298 |
+
|
| 299 |
+
# flatten the rest of the data which should just be state data
|
| 300 |
+
obs_dict['extra'] = {k: v[:, None] if len(v.shape) == 1 else v for k, v in obs_dict['extra'].items()} # dirty fix for data that has one dimension (e.g. is_grasped)
|
| 301 |
+
obs_dict = common.flatten_state_dict(obs_dict, use_torch=True)
|
| 302 |
+
|
| 303 |
+
processed_obs = dict(state=obs_dict, rgb=rgb, depth=depth) if self.include_depth else dict(state=obs_dict, rgb=rgb)
|
| 304 |
+
|
| 305 |
+
return processed_obs
|
| 306 |
+
|
| 307 |
+
def get_norm_stats(self):
|
| 308 |
+
all_state_data = []
|
| 309 |
+
all_action_data = []
|
| 310 |
+
for traj_idx, ts in self.slices:
|
| 311 |
+
state = self.trajectories['observations'][traj_idx]['state'][ts]
|
| 312 |
+
act_seq = self.trajectories['actions'][traj_idx][ts:ts+self.num_queries]
|
| 313 |
+
action_len = act_seq.shape[0]
|
| 314 |
+
if action_len < self.num_queries:
|
| 315 |
+
target_pos = act_seq[-1]
|
| 316 |
+
act_seq = torch.cat([act_seq, target_pos.repeat(self.num_queries-action_len, 1)], dim=0)
|
| 317 |
+
all_state_data.append(state)
|
| 318 |
+
all_action_data.append(act_seq)
|
| 319 |
+
|
| 320 |
+
all_state_data = torch.stack(all_state_data)
|
| 321 |
+
all_action_data = torch.concatenate(all_action_data)
|
| 322 |
+
|
| 323 |
+
# normalize obs (state) data
|
| 324 |
+
state_mean = all_state_data.mean(dim=0, keepdim=True)
|
| 325 |
+
state_std = all_state_data.std(dim=0, keepdim=True)
|
| 326 |
+
state_std = torch.clip(state_std, 1e-2, np.inf) # clipping
|
| 327 |
+
|
| 328 |
+
# normalize action data
|
| 329 |
+
action_mean = all_action_data.mean(dim=0, keepdim=True)
|
| 330 |
+
action_std = all_action_data.std(dim=0, keepdim=True)
|
| 331 |
+
action_std = torch.clip(action_std, 1e-2, np.inf) # clipping
|
| 332 |
+
|
| 333 |
+
stats = {"action_mean": action_mean, "action_std": action_std,
|
| 334 |
+
"state_mean": state_mean, "state_std": state_std,
|
| 335 |
+
"example_state": state}
|
| 336 |
+
|
| 337 |
+
return stats
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
class Agent(nn.Module):
|
| 341 |
+
def __init__(self, env, args):
|
| 342 |
+
super().__init__()
|
| 343 |
+
assert len(env.single_observation_space['state'].shape) == 1 # (obs_dim,)
|
| 344 |
+
assert len(env.single_observation_space['rgb'].shape) == 4 # (num_cams, C, H, W)
|
| 345 |
+
assert len(env.single_action_space.shape) == 1 # (act_dim,)
|
| 346 |
+
#assert (env.single_action_space.high == 1).all() and (env.single_action_space.low == -1).all()
|
| 347 |
+
|
| 348 |
+
self.state_dim = env.single_observation_space['state'].shape[0]
|
| 349 |
+
self.act_dim = env.single_action_space.shape[0]
|
| 350 |
+
self.kl_weight = args.kl_weight
|
| 351 |
+
self.normalize = T.Normalize(mean=[0.485, 0.456, 0.406],
|
| 352 |
+
std=[0.229, 0.224, 0.225])
|
| 353 |
+
|
| 354 |
+
# CNN backbone
|
| 355 |
+
backbones = []
|
| 356 |
+
backbone = build_backbone(args)
|
| 357 |
+
backbones.append(backbone)
|
| 358 |
+
|
| 359 |
+
# CVAE decoder
|
| 360 |
+
transformer = build_transformer(args)
|
| 361 |
+
|
| 362 |
+
# CVAE encoder
|
| 363 |
+
encoder = build_encoder(args)
|
| 364 |
+
|
| 365 |
+
# ACT ( CVAE encoder + (CNN backbones + CVAE decoder) )
|
| 366 |
+
self.model = DETRVAE(
|
| 367 |
+
backbones,
|
| 368 |
+
transformer,
|
| 369 |
+
encoder,
|
| 370 |
+
state_dim=self.state_dim,
|
| 371 |
+
action_dim=self.act_dim,
|
| 372 |
+
num_queries=args.num_queries,
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
def compute_loss(self, obs, action_seq):
|
| 376 |
+
# normalize rgb data
|
| 377 |
+
obs['rgb'] = obs['rgb'].float() / 255.0
|
| 378 |
+
obs['rgb'] = self.normalize(obs['rgb'])
|
| 379 |
+
|
| 380 |
+
# depth data
|
| 381 |
+
if args.include_depth:
|
| 382 |
+
obs['depth'] = obs['depth'].float()
|
| 383 |
+
|
| 384 |
+
# forward pass
|
| 385 |
+
a_hat, (mu, logvar) = self.model(obs, action_seq)
|
| 386 |
+
|
| 387 |
+
# compute l1 loss and kl loss
|
| 388 |
+
total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar)
|
| 389 |
+
all_l1 = F.l1_loss(action_seq, a_hat, reduction='none')
|
| 390 |
+
l1 = all_l1.mean()
|
| 391 |
+
|
| 392 |
+
# store all loss
|
| 393 |
+
loss_dict = dict()
|
| 394 |
+
loss_dict['l1'] = l1
|
| 395 |
+
loss_dict['kl'] = total_kld[0]
|
| 396 |
+
loss_dict['loss'] = loss_dict['l1'] + loss_dict['kl'] * self.kl_weight
|
| 397 |
+
return loss_dict
|
| 398 |
+
|
| 399 |
+
def get_action(self, obs):
|
| 400 |
+
# normalize rgb data
|
| 401 |
+
obs['rgb'] = obs['rgb'].float() / 255.0
|
| 402 |
+
obs['rgb'] = self.normalize(obs['rgb'])
|
| 403 |
+
|
| 404 |
+
# depth data
|
| 405 |
+
if args.include_depth:
|
| 406 |
+
obs['depth'] = obs['depth'].float()
|
| 407 |
+
|
| 408 |
+
# forward pass
|
| 409 |
+
a_hat, (_, _) = self.model(obs) # no action, sample from prior
|
| 410 |
+
|
| 411 |
+
return a_hat
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def kl_divergence(mu, logvar):
|
| 415 |
+
batch_size = mu.size(0)
|
| 416 |
+
assert batch_size != 0
|
| 417 |
+
if mu.data.ndimension() == 4:
|
| 418 |
+
mu = mu.view(mu.size(0), mu.size(1))
|
| 419 |
+
if logvar.data.ndimension() == 4:
|
| 420 |
+
logvar = logvar.view(logvar.size(0), logvar.size(1))
|
| 421 |
+
|
| 422 |
+
klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp())
|
| 423 |
+
total_kld = klds.sum(1).mean(0, True)
|
| 424 |
+
dimension_wise_kld = klds.mean(0)
|
| 425 |
+
mean_kld = klds.mean(1).mean(0, True)
|
| 426 |
+
|
| 427 |
+
return total_kld, dimension_wise_kld, mean_kld
|
| 428 |
+
|
| 429 |
+
def save_ckpt(run_name, tag):
|
| 430 |
+
os.makedirs(f'runs/{run_name}/checkpoints', exist_ok=True)
|
| 431 |
+
ema.copy_to(ema_agent.parameters())
|
| 432 |
+
torch.save({
|
| 433 |
+
'norm_stats': dataset.norm_stats,
|
| 434 |
+
'agent': agent.state_dict(),
|
| 435 |
+
'ema_agent': ema_agent.state_dict(),
|
| 436 |
+
}, f'runs/{run_name}/checkpoints/{tag}.pt')
|
| 437 |
+
|
| 438 |
+
if __name__ == "__main__":
|
| 439 |
+
args = tyro.cli(Args)
|
| 440 |
+
|
| 441 |
+
if args.exp_name is None:
|
| 442 |
+
args.exp_name = os.path.basename(__file__)[: -len(".py")]
|
| 443 |
+
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
|
| 444 |
+
else:
|
| 445 |
+
run_name = args.exp_name
|
| 446 |
+
|
| 447 |
+
if args.demo_path.endswith('.h5'):
|
| 448 |
+
import json
|
| 449 |
+
json_file = args.demo_path[:-2] + 'json'
|
| 450 |
+
with open(json_file, 'r') as f:
|
| 451 |
+
demo_info = json.load(f)
|
| 452 |
+
if 'control_mode' in demo_info['env_info']['env_kwargs']:
|
| 453 |
+
control_mode = demo_info['env_info']['env_kwargs']['control_mode']
|
| 454 |
+
elif 'control_mode' in demo_info['episodes'][0]:
|
| 455 |
+
control_mode = demo_info['episodes'][0]['control_mode']
|
| 456 |
+
else:
|
| 457 |
+
raise Exception('Control mode not found in json')
|
| 458 |
+
assert control_mode == args.control_mode, f"Control mode mismatched. Dataset has control mode {control_mode}, but args has control mode {args.control_mode}"
|
| 459 |
+
|
| 460 |
+
# TRY NOT TO MODIFY: seeding
|
| 461 |
+
random.seed(args.seed)
|
| 462 |
+
np.random.seed(args.seed)
|
| 463 |
+
torch.manual_seed(args.seed)
|
| 464 |
+
torch.backends.cudnn.deterministic = args.torch_deterministic
|
| 465 |
+
|
| 466 |
+
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
|
| 467 |
+
|
| 468 |
+
# env setup
|
| 469 |
+
env_kwargs = dict(control_mode=args.control_mode, reward_mode="sparse", obs_mode="rgbd" if args.include_depth else "rgb", render_mode="rgb_array")
|
| 470 |
+
if args.max_episode_steps is not None:
|
| 471 |
+
env_kwargs["max_episode_steps"] = args.max_episode_steps
|
| 472 |
+
other_kwargs = None
|
| 473 |
+
wrappers = [partial(FlattenRGBDObservationWrapper, depth=args.include_depth)]
|
| 474 |
+
envs = make_eval_envs(args.env_id, args.num_eval_envs, args.sim_backend, env_kwargs, other_kwargs, video_dir=f'runs/{run_name}/videos' if args.capture_video else None, wrappers=wrappers)
|
| 475 |
+
|
| 476 |
+
# dataloader setup
|
| 477 |
+
dataset = SmallDemoDataset_ACTPolicy(args.demo_path, args.num_queries, num_traj=args.num_demos, include_depth=args.include_depth)
|
| 478 |
+
sampler = RandomSampler(dataset, replacement=False)
|
| 479 |
+
batch_sampler = BatchSampler(sampler, batch_size=args.batch_size, drop_last=True)
|
| 480 |
+
batch_sampler = IterationBasedBatchSampler(batch_sampler, args.total_iters)
|
| 481 |
+
train_dataloader = DataLoader(
|
| 482 |
+
dataset,
|
| 483 |
+
batch_sampler=batch_sampler,
|
| 484 |
+
num_workers=args.num_dataload_workers,
|
| 485 |
+
worker_init_fn=lambda worker_id: worker_init_fn(worker_id, base_seed=args.seed),
|
| 486 |
+
)
|
| 487 |
+
if args.num_demos is None:
|
| 488 |
+
args.num_demos = dataset.num_traj
|
| 489 |
+
|
| 490 |
+
obs_mode = "rgb+depth" if args.include_depth else "rgb"
|
| 491 |
+
|
| 492 |
+
if args.track:
|
| 493 |
+
import wandb
|
| 494 |
+
config = vars(args)
|
| 495 |
+
config["eval_env_cfg"] = dict(**env_kwargs, num_envs=args.num_eval_envs, env_id=args.env_id, env_horizon=args.max_episode_steps)
|
| 496 |
+
wandb.init(
|
| 497 |
+
project=args.wandb_project_name,
|
| 498 |
+
entity=args.wandb_entity,
|
| 499 |
+
sync_tensorboard=True,
|
| 500 |
+
config=config,
|
| 501 |
+
name=run_name,
|
| 502 |
+
save_code=True,
|
| 503 |
+
group="ACT",
|
| 504 |
+
tags=["act"]
|
| 505 |
+
)
|
| 506 |
+
writer = SummaryWriter(f"runs/{run_name}")
|
| 507 |
+
writer.add_text(
|
| 508 |
+
"hyperparameters",
|
| 509 |
+
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
# agent setup
|
| 513 |
+
agent = Agent(envs, args).to(device)
|
| 514 |
+
|
| 515 |
+
# optimizer setup
|
| 516 |
+
param_dicts = [
|
| 517 |
+
{"params": [p for n, p in agent.named_parameters() if "backbone" not in n and p.requires_grad]},
|
| 518 |
+
{
|
| 519 |
+
"params": [p for n, p in agent.named_parameters() if "backbone" in n and p.requires_grad],
|
| 520 |
+
"lr": args.lr_backbone,
|
| 521 |
+
},
|
| 522 |
+
]
|
| 523 |
+
optimizer = optim.AdamW(param_dicts, lr=args.lr, weight_decay=1e-4)
|
| 524 |
+
|
| 525 |
+
# LR drop by a factor of 10 after lr_drop iters
|
| 526 |
+
lr_drop = int((2/3)*args.total_iters)
|
| 527 |
+
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, lr_drop)
|
| 528 |
+
|
| 529 |
+
# Exponential Moving Average
|
| 530 |
+
# accelerates training and improves stability
|
| 531 |
+
# holds a copy of the model weights
|
| 532 |
+
ema = EMAModel(parameters=agent.parameters(), power=0.75)
|
| 533 |
+
ema_agent = Agent(envs, args).to(device)
|
| 534 |
+
|
| 535 |
+
# Evaluation
|
| 536 |
+
#eval_kwargs = dict(
|
| 537 |
+
# stats=dataset.norm_stats, num_queries=args.num_queries, temporal_agg=args.temporal_agg,
|
| 538 |
+
# max_timesteps=gym_utils.find_max_episode_steps_value(envs), device=device, sim_backend=args.sim_backend
|
| 539 |
+
#)
|
| 540 |
+
eval_kwargs = dict(
|
| 541 |
+
stats=dataset.norm_stats, num_queries=args.num_queries, temporal_agg=args.temporal_agg,
|
| 542 |
+
max_timesteps=args.max_episode_steps, device=device, sim_backend=args.sim_backend
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
# ---------------------------------------------------------------------------- #
|
| 546 |
+
# Training begins.
|
| 547 |
+
# ---------------------------------------------------------------------------- #
|
| 548 |
+
agent.train()
|
| 549 |
+
|
| 550 |
+
best_eval_metrics = defaultdict(float)
|
| 551 |
+
timings = defaultdict(float)
|
| 552 |
+
|
| 553 |
+
for cur_iter, data_batch in enumerate(train_dataloader):
|
| 554 |
+
last_tick = time.time()
|
| 555 |
+
# copy data from cpu to gpu
|
| 556 |
+
obs_batch_dict = data_batch['observations']
|
| 557 |
+
obs_batch_dict = {k: v.cuda(non_blocking=True) for k, v in obs_batch_dict.items()}
|
| 558 |
+
act_batch = data_batch['actions'].cuda(non_blocking=True)
|
| 559 |
+
|
| 560 |
+
# forward and compute loss
|
| 561 |
+
loss_dict = agent.compute_loss(
|
| 562 |
+
obs=obs_batch_dict, # obs_batch_dict['state'] is (B, obs_dim)
|
| 563 |
+
action_seq=act_batch, # (B, num_queries, act_dim)
|
| 564 |
+
)
|
| 565 |
+
total_loss = loss_dict['loss'] # total_loss = l1 + kl * self.kl_weight
|
| 566 |
+
|
| 567 |
+
# backward
|
| 568 |
+
optimizer.zero_grad()
|
| 569 |
+
total_loss.backward()
|
| 570 |
+
optimizer.step()
|
| 571 |
+
lr_scheduler.step() # step lr scheduler every batch, this is different from standard pytorch behavior
|
| 572 |
+
|
| 573 |
+
# update Exponential Moving Average of the model weights
|
| 574 |
+
ema.step(agent.parameters())
|
| 575 |
+
timings["update"] += time.time() - last_tick
|
| 576 |
+
|
| 577 |
+
# Evaluation
|
| 578 |
+
if cur_iter % args.eval_freq == 0:
|
| 579 |
+
last_tick = time.time()
|
| 580 |
+
|
| 581 |
+
ema.copy_to(ema_agent.parameters())
|
| 582 |
+
|
| 583 |
+
eval_metrics = evaluate(args.num_eval_episodes, ema_agent, envs, eval_kwargs)
|
| 584 |
+
timings["eval"] += time.time() - last_tick
|
| 585 |
+
|
| 586 |
+
print(f"Evaluated {len(eval_metrics['success_at_end'])} episodes")
|
| 587 |
+
for k in eval_metrics.keys():
|
| 588 |
+
eval_metrics[k] = np.mean(eval_metrics[k])
|
| 589 |
+
writer.add_scalar(f"eval/{k}", eval_metrics[k], cur_iter)
|
| 590 |
+
print(f"{k}: {eval_metrics[k]:.4f}")
|
| 591 |
+
|
| 592 |
+
save_on_best_metrics = ["success_once", "success_at_end"]
|
| 593 |
+
for k in save_on_best_metrics:
|
| 594 |
+
if k in eval_metrics and eval_metrics[k] > best_eval_metrics[k]:
|
| 595 |
+
best_eval_metrics[k] = eval_metrics[k]
|
| 596 |
+
save_ckpt(run_name, f"best_eval_{k}")
|
| 597 |
+
print(f'New best {k}_rate: {eval_metrics[k]:.4f}. Saving checkpoint.')
|
| 598 |
+
|
| 599 |
+
if cur_iter % args.log_freq == 0:
|
| 600 |
+
print(f"Iteration {cur_iter}, loss: {total_loss.item()}")
|
| 601 |
+
writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], cur_iter)
|
| 602 |
+
writer.add_scalar("charts/backbone_learning_rate", optimizer.param_groups[1]["lr"], cur_iter)
|
| 603 |
+
writer.add_scalar("losses/total_loss", total_loss.item(), cur_iter)
|
| 604 |
+
for k, v in timings.items():
|
| 605 |
+
writer.add_scalar(f"time/{k}", v, cur_iter)
|
| 606 |
+
|
| 607 |
+
# Checkpoint
|
| 608 |
+
if args.save_freq is not None and cur_iter % args.save_freq == 0:
|
| 609 |
+
save_ckpt(run_name, str(cur_iter))
|
| 610 |
+
|
| 611 |
+
envs.close()
|
| 612 |
+
writer.close()
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/bc/.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
runs/
|
| 3 |
+
wandb/
|
| 4 |
+
*.egg-info/
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
runs/
|
| 3 |
+
wandb/
|
| 4 |
+
*.egg-info/
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/README.md
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Diffusion Policy
|
| 2 |
+
|
| 3 |
+
Code for running the Diffusion Policy algorithm based on ["Diffusion Policy: Visuomotor Policy Learning via Action Diffusion"](https://arxiv.org/abs/2303.04137v4). It is adapted from the [original code](https://github.com/real-stanford/diffusion_policy).
|
| 4 |
+
|
| 5 |
+
## Installation
|
| 6 |
+
|
| 7 |
+
To get started, we recommend using conda/mamba to create a new environment and install the dependencies
|
| 8 |
+
|
| 9 |
+
```bash
|
| 10 |
+
conda create -n diffusion-policy-ms python=3.9
|
| 11 |
+
conda activate diffusion-policy-ms
|
| 12 |
+
pip install -e .
|
| 13 |
+
```
|
| 14 |
+
|
| 15 |
+
## Setup
|
| 16 |
+
|
| 17 |
+
Read through the [imitation learning setup documentation](https://maniskill.readthedocs.io/en/latest/user_guide/learning_from_demos/setup.html) which details everything you need to know regarding running imitation learning baselines in ManiSkill. It includes details on how to download demonstration datasets, preprocess them, evaluate policies fairly for comparison, as well as suggestions to improve performance and avoid bugs.
|
| 18 |
+
|
| 19 |
+
## Training
|
| 20 |
+
|
| 21 |
+
We provide scripts to train Diffusion Policy on demonstrations.
|
| 22 |
+
|
| 23 |
+
Note that some demonstrations are slow (e.g. motion planning or human teleoperated) and can exceed the default max episode steps which can be an issue as imitation learning algorithms learn to solve the task at the same speed the demonstrations solve it. In this case, you can use the `--max-episode-steps` flag to set a higher value so that the policy can solve the task in time. General recommendation is to set `--max-episode-steps` to about 2x the length of the mean demonstrations length you are using for training. We have tuned baselines in the `baselines.sh` script that set a recommended `--max-episode-steps` for each task.
|
| 24 |
+
|
| 25 |
+
Example state based training, learning from 100 demonstrations generated via motionplanning in the PickCube-v1 task
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
seed=1
|
| 29 |
+
demos=100
|
| 30 |
+
python train.py --env-id PickCube-v1 \
|
| 31 |
+
--demo-path ~/.maniskill/demos/PickCube-v1/motionplanning/trajectory.state.pd_ee_delta_pos.physx_cpu.h5 \
|
| 32 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cpu" --num-demos ${demos} --max_episode_steps 100 \
|
| 33 |
+
--total_iters 30000 \
|
| 34 |
+
--exp-name diffusion_policy-PickCube-v1-state-${demos}_motionplanning_demos-${seed} \
|
| 35 |
+
--track # track training on wandb
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
## Citation
|
| 39 |
+
|
| 40 |
+
If you use this baseline please cite the following
|
| 41 |
+
```
|
| 42 |
+
@inproceedings{DBLP:conf/rss/ChiFDXCBS23,
|
| 43 |
+
author = {Cheng Chi and
|
| 44 |
+
Siyuan Feng and
|
| 45 |
+
Yilun Du and
|
| 46 |
+
Zhenjia Xu and
|
| 47 |
+
Eric Cousineau and
|
| 48 |
+
Benjamin Burchfiel and
|
| 49 |
+
Shuran Song},
|
| 50 |
+
editor = {Kostas E. Bekris and
|
| 51 |
+
Kris Hauser and
|
| 52 |
+
Sylvia L. Herbert and
|
| 53 |
+
Jingjin Yu},
|
| 54 |
+
title = {Diffusion Policy: Visuomotor Policy Learning via Action Diffusion},
|
| 55 |
+
booktitle = {Robotics: Science and Systems XIX, Daegu, Republic of Korea, July
|
| 56 |
+
10-14, 2023},
|
| 57 |
+
year = {2023},
|
| 58 |
+
url = {https://doi.org/10.15607/RSS.2023.XIX.026},
|
| 59 |
+
doi = {10.15607/RSS.2023.XIX.026},
|
| 60 |
+
timestamp = {Mon, 29 Apr 2024 21:28:50 +0200},
|
| 61 |
+
biburl = {https://dblp.org/rec/conf/rss/ChiFDXCBS23.bib},
|
| 62 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 63 |
+
}
|
| 64 |
+
```
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/baselines.sh
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Baseline scripts
|
| 2 |
+
|
| 3 |
+
# state based baselines
|
| 4 |
+
seed=1
|
| 5 |
+
demos=100
|
| 6 |
+
for demo_type in "motionplanning" "rl"
|
| 7 |
+
do
|
| 8 |
+
demo_path=~/.maniskill/demos/PickCube-v1/${demo_type}/trajectory.state.pd_ee_delta_pos.physx_cpu.h5
|
| 9 |
+
if [ -f "$demo_path" ]; then
|
| 10 |
+
python train.py --env-id PickCube-v1 \
|
| 11 |
+
--demo-path $demo_path \
|
| 12 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cpu" --num-demos ${demos} --max_episode_steps 100 \
|
| 13 |
+
--total_iters 30000 \
|
| 14 |
+
--exp-name diffusion_policy-PickCube-v1-state-${demos}_motionplanning_demos-${seed} \
|
| 15 |
+
--demo_type=${demo_type} --track # additional tag for logging purposes on wandb
|
| 16 |
+
else
|
| 17 |
+
echo "Demo path $demo_path does not exist. Skipping PickCube-v1 for ${demo_type}."
|
| 18 |
+
fi
|
| 19 |
+
|
| 20 |
+
demo_path=~/.maniskill/demos/PushCube-v1/${demo_type}/trajectory.state.pd_ee_delta_pos.physx_cpu.h5
|
| 21 |
+
if [ -f "$demo_path" ]; then
|
| 22 |
+
python train.py --env-id PushCube-v1 \
|
| 23 |
+
--demo-path $demo_path \
|
| 24 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cpu" --num-demos ${demos} --max_episode_steps 100 \
|
| 25 |
+
--total_iters 30000 \
|
| 26 |
+
--exp-name diffusion_policy-PushCube-v1-state-${demos}_motionplanning_demos-${seed} \
|
| 27 |
+
--demo_type=${demo_type} --track # additional tag for logging purposes on wandb
|
| 28 |
+
else
|
| 29 |
+
echo "Demo path $demo_path does not exist. Skipping PushCube-v1 for ${demo_type}."
|
| 30 |
+
fi
|
| 31 |
+
demo_path=~/.maniskill/demos/AnymalCReach-v1/${demo_type}/trajectory.state.pd_joint_delta_pos.physx_cuda.h5
|
| 32 |
+
if [ -f "$demo_path" ]; then
|
| 33 |
+
python train.py --env-id AnymalCReach-v1 \
|
| 34 |
+
--demo-path $demo_path \
|
| 35 |
+
--control-mode "pd_joint_delta_pos" --sim-backend "physx_cuda" --num-demos ${demos} --max_episode_steps 200 --num_eval_envs 100 \
|
| 36 |
+
--total_iters 30000 --act_horizon 1 \
|
| 37 |
+
--exp-name diffusion_policy-AnymalCReach-v1-state-${demos}_${demo_type}_demos-${seed} --no_capture_video \
|
| 38 |
+
--demo_type=${demo_type} --track # additional tag for logging purposes on wandb
|
| 39 |
+
else
|
| 40 |
+
echo "Demo path $demo_path does not exist. Skipping AnymalCReach-v1 for ${demo_type}."
|
| 41 |
+
fi
|
| 42 |
+
|
| 43 |
+
demo_path=~/.maniskill/demos/PushT-v1/${demo_type}/trajectory.state.pd_ee_delta_pose.physx_cuda.h5
|
| 44 |
+
if [ -f "$demo_path" ]; then
|
| 45 |
+
python train.py --env-id PushT-v1 \
|
| 46 |
+
--demo-path $demo_path \
|
| 47 |
+
--control-mode "pd_ee_delta_pose" --sim-backend "physx_cuda" --num-demos ${demos} --max_episode_steps 150 --num_eval_envs 100 \
|
| 48 |
+
--total_iters 50000 --act_horizon 1 \
|
| 49 |
+
--exp-name diffusion_policy-PushT-v1-state-${demos}_${demo_type}_demos-${seed} --no_capture_video \
|
| 50 |
+
--demo_type=${demo_type} --track # additional tag for logging purposes on wandb
|
| 51 |
+
else
|
| 52 |
+
echo "Demo path $demo_path does not exist. Skipping PushT-v1 for ${demo_type}."
|
| 53 |
+
fi
|
| 54 |
+
|
| 55 |
+
demo_path=~/.maniskill/demos/StackCube-v1/${demo_type}/trajectory.state.pd_ee_delta_pos.physx_cpu.h5
|
| 56 |
+
if [ -f "$demo_path" ]; then
|
| 57 |
+
python train.py --env-id StackCube-v1 \
|
| 58 |
+
--demo-path $demo_path \
|
| 59 |
+
--control-mode "pd_ee_delta_pos" --sim-backend "physx_cpu" --num-demos ${demos} --max_episode_steps 200 \
|
| 60 |
+
--total_iters 30000 \
|
| 61 |
+
--exp-name diffusion_policy-StackCube-v1-state-${demos}_${demo_type}_demos-${seed} \
|
| 62 |
+
--demo_type=${demo_type} --track # additional tag for logging purposes on wandb
|
| 63 |
+
else
|
| 64 |
+
echo "Demo path $demo_path does not exist. Skipping StackCube-v1 for ${demo_type}."
|
| 65 |
+
fi
|
| 66 |
+
|
| 67 |
+
demo_path=~/.maniskill/demos/PegInsertionSide-v1/${demo_type}/trajectory.state.pd_ee_delta_pose.physx_cpu.h5
|
| 68 |
+
if [ -f "$demo_path" ]; then
|
| 69 |
+
python train.py --env-id PegInsertionSide-v1 \
|
| 70 |
+
--demo-path $demo_path \
|
| 71 |
+
--control-mode "pd_ee_delta_pose" --sim-backend "physx_cpu" --num-demos ${demos} --max_episode_steps 300 \
|
| 72 |
+
--total_iters 100000 \
|
| 73 |
+
--exp-name diffusion_policy-PegInsertionSide-v1-state-${demos}_motionplanning_demos-${seed} \
|
| 74 |
+
--demo_type=${demo_type} --track # additional tag for logging purposes on wandb
|
| 75 |
+
else
|
| 76 |
+
echo "Demo path $demo_path does not exist. Skipping PegInsertionSide-v1 for ${demo_type}."
|
| 77 |
+
fi
|
| 78 |
+
done
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/diffusion_policy/evaluate.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
import gymnasium
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from mani_skill.utils import common
|
| 7 |
+
|
| 8 |
+
def collect_episode_info(infos, result):
|
| 9 |
+
if "final_info" in infos: # infos is a dict
|
| 10 |
+
|
| 11 |
+
indices = np.where(infos["_final_info"])[0] # not all envs are done at the same time
|
| 12 |
+
for i in indices:
|
| 13 |
+
info = infos["final_info"][i] # info is also a dict
|
| 14 |
+
ep = info['episode']
|
| 15 |
+
result['return'].append(ep['r'][0])
|
| 16 |
+
result['episode_len'].append(ep["l"][0])
|
| 17 |
+
if "success" in info:
|
| 18 |
+
result['success'].append(info['success'])
|
| 19 |
+
if "fail" in info:
|
| 20 |
+
result['fail'].append(info['fail'])
|
| 21 |
+
return result
|
| 22 |
+
|
| 23 |
+
def evaluate(n: int, agent, eval_envs, device, sim_backend: str):
|
| 24 |
+
agent.eval()
|
| 25 |
+
with torch.no_grad():
|
| 26 |
+
eval_metrics = defaultdict(list)
|
| 27 |
+
obs, info = eval_envs.reset()
|
| 28 |
+
eps_count = 0
|
| 29 |
+
while eps_count < n:
|
| 30 |
+
obs = common.to_tensor(obs, device)
|
| 31 |
+
action_seq = agent.get_action(obs)
|
| 32 |
+
if sim_backend == "cpu":
|
| 33 |
+
action_seq = action_seq.cpu().numpy()
|
| 34 |
+
for i in range(action_seq.shape[1]):
|
| 35 |
+
obs, rew, terminated, truncated, info = eval_envs.step(action_seq[:, i])
|
| 36 |
+
if truncated.any():
|
| 37 |
+
break
|
| 38 |
+
|
| 39 |
+
if truncated.any():
|
| 40 |
+
assert truncated.all() == truncated.any(), "all episodes should truncate at the same time for fair evaluation with other algorithms"
|
| 41 |
+
if isinstance(info["final_info"], dict):
|
| 42 |
+
for k, v in info["final_info"]["episode"].items():
|
| 43 |
+
eval_metrics[k].append(v.float().cpu().numpy())
|
| 44 |
+
else:
|
| 45 |
+
for final_info in info["final_info"]:
|
| 46 |
+
for k, v in final_info["episode"].items():
|
| 47 |
+
eval_metrics[k].append(v)
|
| 48 |
+
eps_count += eval_envs.num_envs
|
| 49 |
+
agent.train()
|
| 50 |
+
for k in eval_metrics.keys():
|
| 51 |
+
eval_metrics[k] = np.stack(eval_metrics[k])
|
| 52 |
+
return eval_metrics
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/diffusion_policy/make_env.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
import gymnasium as gym
|
| 3 |
+
import mani_skill.envs
|
| 4 |
+
from mani_skill.utils import gym_utils
|
| 5 |
+
from mani_skill.vector.wrappers.gymnasium import ManiSkillVectorEnv
|
| 6 |
+
from mani_skill.utils.wrappers import RecordEpisode, FrameStack, CPUGymWrapper
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def make_eval_envs(env_id, num_envs: int, sim_backend: str, env_kwargs: dict, other_kwargs: dict, video_dir: Optional[str] = None, wrappers: list[gym.Wrapper] = []):
|
| 10 |
+
"""Create vectorized environment for evaluation and/or recording videos.
|
| 11 |
+
For CPU vectorized environments only the first parallel environment is used to record videos.
|
| 12 |
+
For GPU vectorized environments all parallel environments are used to record videos.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
env_id: the environment id
|
| 16 |
+
num_envs: the number of parallel environments
|
| 17 |
+
sim_backend: the simulation backend to use. can be "cpu" or "gpu
|
| 18 |
+
env_kwargs: the environment kwargs. You can also pass in max_episode_steps in env_kwargs to override the default max episode steps for the environment.
|
| 19 |
+
video_dir: the directory to save the videos. If None no videos are recorded.
|
| 20 |
+
wrappers: the list of wrappers to apply to the environment.
|
| 21 |
+
"""
|
| 22 |
+
if sim_backend == "cpu":
|
| 23 |
+
def cpu_make_env(env_id, seed, video_dir=None, env_kwargs = dict(), other_kwargs = dict()):
|
| 24 |
+
def thunk():
|
| 25 |
+
env = gym.make(env_id, reconfiguration_freq=1, **env_kwargs)
|
| 26 |
+
for wrapper in wrappers:
|
| 27 |
+
env = wrapper(env)
|
| 28 |
+
env = CPUGymWrapper(env, ignore_terminations=True, record_metrics=True)
|
| 29 |
+
if video_dir:
|
| 30 |
+
env = RecordEpisode(env, output_dir=video_dir, save_trajectory=False, info_on_video=True, source_type="diffusion_policy", source_desc="diffusion_policy evaluation rollout")
|
| 31 |
+
env = gym.wrappers.FrameStack(env, other_kwargs['obs_horizon'])
|
| 32 |
+
env.action_space.seed(seed)
|
| 33 |
+
env.observation_space.seed(seed)
|
| 34 |
+
return env
|
| 35 |
+
|
| 36 |
+
return thunk
|
| 37 |
+
vector_cls = gym.vector.SyncVectorEnv if num_envs == 1 else lambda x : gym.vector.AsyncVectorEnv(x, context="forkserver")
|
| 38 |
+
env = vector_cls([cpu_make_env(env_id, seed, video_dir if seed == 0 else None, env_kwargs, other_kwargs) for seed in range(num_envs)])
|
| 39 |
+
else:
|
| 40 |
+
env = gym.make(env_id, num_envs=num_envs, sim_backend=sim_backend, reconfiguration_freq=1, **env_kwargs)
|
| 41 |
+
max_episode_steps = gym_utils.find_max_episode_steps_value(env)
|
| 42 |
+
for wrapper in wrappers:
|
| 43 |
+
env = wrapper(env)
|
| 44 |
+
env = FrameStack(env, num_stack=other_kwargs['obs_horizon'])
|
| 45 |
+
if video_dir:
|
| 46 |
+
env = RecordEpisode(env, output_dir=video_dir, save_trajectory=False, save_video=True, source_type="diffusion_policy", source_desc="diffusion_policy evaluation rollout", max_steps_per_video=max_episode_steps)
|
| 47 |
+
env = ManiSkillVectorEnv(env, ignore_terminations=True, record_metrics=True)
|
| 48 |
+
return env
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/diffusion_policy/utils.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.utils.data.sampler import Sampler
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
from h5py import File, Group, Dataset
|
| 5 |
+
|
| 6 |
+
class IterationBasedBatchSampler(Sampler):
|
| 7 |
+
"""Wraps a BatchSampler.
|
| 8 |
+
Resampling from it until a specified number of iterations have been sampled
|
| 9 |
+
References:
|
| 10 |
+
https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, batch_sampler, num_iterations, start_iter=0):
|
| 14 |
+
self.batch_sampler = batch_sampler
|
| 15 |
+
self.num_iterations = num_iterations
|
| 16 |
+
self.start_iter = start_iter
|
| 17 |
+
|
| 18 |
+
def __iter__(self):
|
| 19 |
+
iteration = self.start_iter
|
| 20 |
+
while iteration < self.num_iterations:
|
| 21 |
+
# if the underlying sampler has a set_epoch method, like
|
| 22 |
+
# DistributedSampler, used for making each process see
|
| 23 |
+
# a different split of the dataset, then set it
|
| 24 |
+
if hasattr(self.batch_sampler.sampler, "set_epoch"):
|
| 25 |
+
self.batch_sampler.sampler.set_epoch(iteration)
|
| 26 |
+
for batch in self.batch_sampler:
|
| 27 |
+
yield batch
|
| 28 |
+
iteration += 1
|
| 29 |
+
if iteration >= self.num_iterations:
|
| 30 |
+
break
|
| 31 |
+
|
| 32 |
+
def __len__(self):
|
| 33 |
+
return self.num_iterations - self.start_iter
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def worker_init_fn(worker_id, base_seed=None):
|
| 37 |
+
"""The function is designed for pytorch multi-process dataloader.
|
| 38 |
+
Note that we use the pytorch random generator to generate a base_seed.
|
| 39 |
+
Please try to be consistent.
|
| 40 |
+
References:
|
| 41 |
+
https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed
|
| 42 |
+
"""
|
| 43 |
+
if base_seed is None:
|
| 44 |
+
base_seed = torch.IntTensor(1).random_().item()
|
| 45 |
+
# print(worker_id, base_seed)
|
| 46 |
+
np.random.seed(base_seed + worker_id)
|
| 47 |
+
|
| 48 |
+
TARGET_KEY_TO_SOURCE_KEY = {
|
| 49 |
+
'states': 'env_states',
|
| 50 |
+
'observations': 'obs',
|
| 51 |
+
'success': 'success',
|
| 52 |
+
'next_observations': 'obs',
|
| 53 |
+
# 'dones': 'dones',
|
| 54 |
+
# 'rewards': 'rewards',
|
| 55 |
+
'actions': 'actions',
|
| 56 |
+
}
|
| 57 |
+
def load_content_from_h5_file(file):
|
| 58 |
+
if isinstance(file, (File, Group)):
|
| 59 |
+
return {key: load_content_from_h5_file(file[key]) for key in list(file.keys())}
|
| 60 |
+
elif isinstance(file, Dataset):
|
| 61 |
+
return file[()]
|
| 62 |
+
else:
|
| 63 |
+
raise NotImplementedError(f"Unspported h5 file type: {type(file)}")
|
| 64 |
+
|
| 65 |
+
def load_hdf5(path, ):
|
| 66 |
+
print('Loading HDF5 file', path)
|
| 67 |
+
file = File(path, 'r')
|
| 68 |
+
ret = load_content_from_h5_file(file)
|
| 69 |
+
file.close()
|
| 70 |
+
print('Loaded')
|
| 71 |
+
return ret
|
| 72 |
+
|
| 73 |
+
def load_traj_hdf5(path, num_traj=None):
|
| 74 |
+
print('Loading HDF5 file', path)
|
| 75 |
+
file = File(path, 'r')
|
| 76 |
+
keys = list(file.keys())
|
| 77 |
+
if num_traj is not None:
|
| 78 |
+
assert num_traj <= len(keys), f"num_traj: {num_traj} > len(keys): {len(keys)}"
|
| 79 |
+
keys = sorted(keys, key=lambda x: int(x.split('_')[-1]))
|
| 80 |
+
keys = keys[:num_traj]
|
| 81 |
+
ret = {
|
| 82 |
+
key: load_content_from_h5_file(file[key]) for key in keys
|
| 83 |
+
}
|
| 84 |
+
file.close()
|
| 85 |
+
print('Loaded')
|
| 86 |
+
return ret
|
| 87 |
+
def load_demo_dataset(path, keys=['observations', 'actions'], num_traj=None, concat=True):
|
| 88 |
+
# assert num_traj is None
|
| 89 |
+
raw_data = load_traj_hdf5(path, num_traj)
|
| 90 |
+
# raw_data has keys like: ['traj_0', 'traj_1', ...]
|
| 91 |
+
# raw_data['traj_0'] has keys like: ['actions', 'dones', 'env_states', 'infos', ...]
|
| 92 |
+
_traj = raw_data['traj_0']
|
| 93 |
+
for key in keys:
|
| 94 |
+
source_key = TARGET_KEY_TO_SOURCE_KEY[key]
|
| 95 |
+
assert source_key in _traj, f"key: {source_key} not in traj_0: {_traj.keys()}"
|
| 96 |
+
dataset = {}
|
| 97 |
+
for target_key in keys:
|
| 98 |
+
# if 'next' in target_key:
|
| 99 |
+
# raise NotImplementedError('Please carefully deal with the length of trajectory')
|
| 100 |
+
source_key = TARGET_KEY_TO_SOURCE_KEY[target_key]
|
| 101 |
+
dataset[target_key] = [ raw_data[idx][source_key] for idx in raw_data ]
|
| 102 |
+
if isinstance(dataset[target_key][0], np.ndarray) and concat:
|
| 103 |
+
if target_key in ['observations', 'states'] and \
|
| 104 |
+
len(dataset[target_key][0]) > len(raw_data['traj_0']['actions']):
|
| 105 |
+
dataset[target_key] = np.concatenate([
|
| 106 |
+
t[:-1] for t in dataset[target_key]
|
| 107 |
+
], axis=0)
|
| 108 |
+
elif target_key in ['next_observations', 'next_states'] and \
|
| 109 |
+
len(dataset[target_key][0]) > len(raw_data['traj_0']['actions']):
|
| 110 |
+
dataset[target_key] = np.concatenate([
|
| 111 |
+
t[1:] for t in dataset[target_key]
|
| 112 |
+
], axis=0)
|
| 113 |
+
else:
|
| 114 |
+
dataset[target_key] = np.concatenate(dataset[target_key], axis=0)
|
| 115 |
+
|
| 116 |
+
print('Load', target_key, dataset[target_key].shape)
|
| 117 |
+
else:
|
| 118 |
+
print('Load', target_key, len(dataset[target_key]), type(dataset[target_key][0]))
|
| 119 |
+
return dataset
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/diffusion_policy/train.py
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ALGO_NAME = 'BC_Diffusion_state_UNet'
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import os
|
| 5 |
+
import random
|
| 6 |
+
from distutils.util import strtobool
|
| 7 |
+
import time
|
| 8 |
+
import gymnasium as gym
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
import torch.optim as optim
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 15 |
+
from diffusion_policy.evaluate import evaluate
|
| 16 |
+
from mani_skill.utils import gym_utils
|
| 17 |
+
from mani_skill.utils.registration import REGISTERED_ENVS
|
| 18 |
+
|
| 19 |
+
from collections import defaultdict
|
| 20 |
+
|
| 21 |
+
from torch.utils.data.dataset import Dataset
|
| 22 |
+
from torch.utils.data.sampler import RandomSampler, BatchSampler
|
| 23 |
+
from torch.utils.data.dataloader import DataLoader
|
| 24 |
+
from diffusion_policy.utils import IterationBasedBatchSampler, worker_init_fn
|
| 25 |
+
from diffusion_policy.make_env import make_eval_envs
|
| 26 |
+
from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
|
| 27 |
+
from diffusers.training_utils import EMAModel
|
| 28 |
+
from diffusers.optimization import get_scheduler
|
| 29 |
+
from diffusion_policy.conditional_unet1d import ConditionalUnet1D
|
| 30 |
+
from dataclasses import dataclass, field
|
| 31 |
+
from typing import Optional, List
|
| 32 |
+
import tyro
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class Args:
|
| 36 |
+
exp_name: Optional[str] = None
|
| 37 |
+
"""the name of this experiment"""
|
| 38 |
+
seed: int = 1
|
| 39 |
+
"""seed of the experiment"""
|
| 40 |
+
torch_deterministic: bool = True
|
| 41 |
+
"""if toggled, `torch.backends.cudnn.deterministic=False`"""
|
| 42 |
+
cuda: bool = True
|
| 43 |
+
"""if toggled, cuda will be enabled by default"""
|
| 44 |
+
track: bool = False
|
| 45 |
+
"""if toggled, this experiment will be tracked with Weights and Biases"""
|
| 46 |
+
wandb_project_name: str = "ManiSkill"
|
| 47 |
+
"""the wandb's project name"""
|
| 48 |
+
wandb_entity: Optional[str] = None
|
| 49 |
+
"""the entity (team) of wandb's project"""
|
| 50 |
+
capture_video: bool = True
|
| 51 |
+
"""whether to capture videos of the agent performances (check out `videos` folder)"""
|
| 52 |
+
|
| 53 |
+
env_id: str = "PegInsertionSide-v0"
|
| 54 |
+
"""the id of the environment"""
|
| 55 |
+
demo_path: str = 'data/ms2_official_demos/rigid_body/PegInsertionSide-v0/trajectory.state.pd_ee_delta_pose.h5'
|
| 56 |
+
"""the path of demo dataset (pkl or h5)"""
|
| 57 |
+
num_demos: Optional[int] = None
|
| 58 |
+
"""number of trajectories to load from the demo dataset"""
|
| 59 |
+
total_iters: int = 1_000_000
|
| 60 |
+
"""total timesteps of the experiment"""
|
| 61 |
+
batch_size: int = 1024
|
| 62 |
+
"""the batch size of sample from the replay memory"""
|
| 63 |
+
|
| 64 |
+
# Diffusion Policy specific arguments
|
| 65 |
+
lr: float = 1e-4
|
| 66 |
+
"""the learning rate of the diffusion policy"""
|
| 67 |
+
obs_horizon: int = 2 # Seems not very important in ManiSkill, 1, 2, 4 work well
|
| 68 |
+
act_horizon: int = 8 # Seems not very important in ManiSkill, 4, 8, 15 work well
|
| 69 |
+
pred_horizon: int = 16 # 16->8 leads to worse performance, maybe it is like generate a half image; 16->32, improvement is very marginal
|
| 70 |
+
diffusion_step_embed_dim: int = 64 # not very important
|
| 71 |
+
unet_dims: List[int] = field(default_factory=lambda: [64, 128, 256]) # default setting is about ~4.5M params
|
| 72 |
+
n_groups: int = 8 # jigu says it is better to let each group have at least 8 channels; it seems 4 and 8 are simila
|
| 73 |
+
|
| 74 |
+
# Environment/experiment specific arguments
|
| 75 |
+
max_episode_steps: Optional[int] = None
|
| 76 |
+
"""Change the environments' max_episode_steps to this value. Sometimes necessary if the demonstrations being imitated are too short. Typically the default
|
| 77 |
+
max episode steps of environments in ManiSkill are tuned lower so reinforcement learning agents can learn faster."""
|
| 78 |
+
log_freq: int = 1000
|
| 79 |
+
"""the frequency of logging the training metrics"""
|
| 80 |
+
eval_freq: int = 5000
|
| 81 |
+
"""the frequency of evaluating the agent on the evaluation environments"""
|
| 82 |
+
save_freq: Optional[int] = None
|
| 83 |
+
"""the frequency of saving the model checkpoints. By default this is None and will only save checkpoints based on the best evaluation metrics."""
|
| 84 |
+
num_eval_episodes: int = 100
|
| 85 |
+
"""the number of episodes to evaluate the agent on"""
|
| 86 |
+
num_eval_envs: int = 10
|
| 87 |
+
"""the number of parallel environments to evaluate the agent on"""
|
| 88 |
+
sim_backend: str = "cpu"
|
| 89 |
+
"""the simulation backend to use for evaluation environments. can be "cpu" or "gpu"""
|
| 90 |
+
num_dataload_workers: int = 0
|
| 91 |
+
"""the number of workers to use for loading the training data in the torch dataloader"""
|
| 92 |
+
control_mode: str = 'pd_joint_delta_pos'
|
| 93 |
+
"""the control mode to use for the evaluation environments. Must match the control mode of the demonstration dataset."""
|
| 94 |
+
|
| 95 |
+
# additional tags/configs for logging purposes to wandb and shared comparisons with other algorithms
|
| 96 |
+
demo_type: Optional[str] = None
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class SmallDemoDataset_DiffusionPolicy(Dataset): # Load everything into GPU memory
|
| 100 |
+
def __init__(self, data_path, device, num_traj):
|
| 101 |
+
if data_path[-4:] == '.pkl':
|
| 102 |
+
raise NotImplementedError()
|
| 103 |
+
else:
|
| 104 |
+
from diffusion_policy.utils import load_demo_dataset
|
| 105 |
+
trajectories = load_demo_dataset(data_path, num_traj=num_traj, concat=False)
|
| 106 |
+
# trajectories['observations'] is a list of np.ndarray (L+1, obs_dim)
|
| 107 |
+
# trajectories['actions'] is a list of np.ndarray (L, act_dim)
|
| 108 |
+
|
| 109 |
+
for k, v in trajectories.items():
|
| 110 |
+
for i in range(len(v)):
|
| 111 |
+
trajectories[k][i] = torch.Tensor(v[i]).to(device)
|
| 112 |
+
|
| 113 |
+
# Pre-compute all possible (traj_idx, start, end) tuples, this is very specific to Diffusion Policy
|
| 114 |
+
if 'delta_pos' in args.control_mode or args.control_mode == 'base_pd_joint_vel_arm_pd_joint_vel':
|
| 115 |
+
self.pad_action_arm = torch.zeros((trajectories['actions'][0].shape[1]-1,), device=device)
|
| 116 |
+
# to make the arm stay still, we pad the action with 0 in 'delta_pos' control mode
|
| 117 |
+
# gripper action needs to be copied from the last action
|
| 118 |
+
# else:
|
| 119 |
+
# raise NotImplementedError(f'Control Mode {args.control_mode} not supported')
|
| 120 |
+
self.obs_horizon, self.pred_horizon = obs_horizon, pred_horizon = args.obs_horizon, args.pred_horizon
|
| 121 |
+
self.slices = []
|
| 122 |
+
num_traj = len(trajectories['actions'])
|
| 123 |
+
total_transitions = 0
|
| 124 |
+
for traj_idx in range(num_traj):
|
| 125 |
+
L = trajectories['actions'][traj_idx].shape[0]
|
| 126 |
+
assert trajectories['observations'][traj_idx].shape[0] == L + 1
|
| 127 |
+
total_transitions += L
|
| 128 |
+
|
| 129 |
+
# |o|o| observations: 2
|
| 130 |
+
# | |a|a|a|a|a|a|a|a| actions executed: 8
|
| 131 |
+
# |p|p|p|p|p|p|p|p|p|p|p|p|p|p|p|p| actions predicted: 16
|
| 132 |
+
pad_before = obs_horizon - 1
|
| 133 |
+
# Pad before the trajectory, so the first action of an episode is in "actions executed"
|
| 134 |
+
# obs_horizon - 1 is the number of "not used actions"
|
| 135 |
+
pad_after = pred_horizon - obs_horizon
|
| 136 |
+
# Pad after the trajectory, so all the observations are utilized in training
|
| 137 |
+
# Note that in the original code, pad_after = act_horizon - 1, but I think this is not the best choice
|
| 138 |
+
self.slices += [
|
| 139 |
+
(traj_idx, start, start + pred_horizon) for start in range(-pad_before, L - pred_horizon + pad_after)
|
| 140 |
+
] # slice indices follow convention [start, end)
|
| 141 |
+
|
| 142 |
+
print(f"Total transitions: {total_transitions}, Total obs sequences: {len(self.slices)}")
|
| 143 |
+
|
| 144 |
+
self.trajectories = trajectories
|
| 145 |
+
|
| 146 |
+
def __getitem__(self, index):
|
| 147 |
+
traj_idx, start, end = self.slices[index]
|
| 148 |
+
L, act_dim = self.trajectories['actions'][traj_idx].shape
|
| 149 |
+
|
| 150 |
+
obs_seq = self.trajectories['observations'][traj_idx][max(0, start):start+self.obs_horizon]
|
| 151 |
+
# start+self.obs_horizon is at least 1
|
| 152 |
+
act_seq = self.trajectories['actions'][traj_idx][max(0, start):end]
|
| 153 |
+
if start < 0: # pad before the trajectory
|
| 154 |
+
obs_seq = torch.cat([obs_seq[0].repeat(-start, 1), obs_seq], dim=0)
|
| 155 |
+
act_seq = torch.cat([act_seq[0].repeat(-start, 1), act_seq], dim=0)
|
| 156 |
+
if end > L: # pad after the trajectory
|
| 157 |
+
gripper_action = act_seq[-1, -1]
|
| 158 |
+
pad_action = torch.cat((self.pad_action_arm, gripper_action[None]), dim=0)
|
| 159 |
+
act_seq = torch.cat([act_seq, pad_action.repeat(end-L, 1)], dim=0)
|
| 160 |
+
# making the robot (arm and gripper) stay still
|
| 161 |
+
assert obs_seq.shape[0] == self.obs_horizon and act_seq.shape[0] == self.pred_horizon
|
| 162 |
+
return {
|
| 163 |
+
'observations': obs_seq,
|
| 164 |
+
'actions': act_seq,
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
def __len__(self):
|
| 168 |
+
return len(self.slices)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class Agent(nn.Module):
|
| 172 |
+
def __init__(self, env, args):
|
| 173 |
+
super().__init__()
|
| 174 |
+
self.obs_horizon = args.obs_horizon
|
| 175 |
+
self.act_horizon = args.act_horizon
|
| 176 |
+
self.pred_horizon = args.pred_horizon
|
| 177 |
+
assert len(env.single_observation_space.shape) == 2 # (obs_horizon, obs_dim)
|
| 178 |
+
assert len(env.single_action_space.shape) == 1 # (act_dim, )
|
| 179 |
+
assert (env.single_action_space.high == 1).all() and (env.single_action_space.low == -1).all()
|
| 180 |
+
# denoising results will be clipped to [-1,1], so the action should be in [-1,1] as well
|
| 181 |
+
self.act_dim = env.single_action_space.shape[0]
|
| 182 |
+
|
| 183 |
+
self.noise_pred_net = ConditionalUnet1D(
|
| 184 |
+
input_dim=self.act_dim, # act_horizon is not used (U-Net doesn't care)
|
| 185 |
+
global_cond_dim=np.prod(env.single_observation_space.shape), # obs_horizon * obs_dim
|
| 186 |
+
diffusion_step_embed_dim=args.diffusion_step_embed_dim,
|
| 187 |
+
down_dims=args.unet_dims,
|
| 188 |
+
n_groups=args.n_groups,
|
| 189 |
+
)
|
| 190 |
+
self.num_diffusion_iters = 100
|
| 191 |
+
self.noise_scheduler = DDPMScheduler(
|
| 192 |
+
num_train_timesteps=self.num_diffusion_iters,
|
| 193 |
+
beta_schedule='squaredcos_cap_v2', # has big impact on performance, try not to change
|
| 194 |
+
clip_sample=True, # clip output to [-1,1] to improve stability
|
| 195 |
+
prediction_type='epsilon' # predict noise (instead of denoised action)
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
def compute_loss(self, obs_seq, action_seq):
|
| 199 |
+
B = obs_seq.shape[0]
|
| 200 |
+
|
| 201 |
+
# observation as FiLM conditioning
|
| 202 |
+
obs_cond = obs_seq.flatten(start_dim=1) # (B, obs_horizon * obs_dim)
|
| 203 |
+
|
| 204 |
+
# sample noise to add to actions
|
| 205 |
+
noise = torch.randn((B, self.pred_horizon, self.act_dim), device=device)
|
| 206 |
+
|
| 207 |
+
# sample a diffusion iteration for each data point
|
| 208 |
+
timesteps = torch.randint(
|
| 209 |
+
0, self.noise_scheduler.config.num_train_timesteps,
|
| 210 |
+
(B,), device=device
|
| 211 |
+
).long()
|
| 212 |
+
|
| 213 |
+
# add noise to the clean images(actions) according to the noise magnitude at each diffusion iteration
|
| 214 |
+
# (this is the forward diffusion process)
|
| 215 |
+
noisy_action_seq = self.noise_scheduler.add_noise(
|
| 216 |
+
action_seq, noise, timesteps)
|
| 217 |
+
|
| 218 |
+
# predict the noise residual
|
| 219 |
+
noise_pred = self.noise_pred_net(
|
| 220 |
+
noisy_action_seq, timesteps, global_cond=obs_cond)
|
| 221 |
+
|
| 222 |
+
return F.mse_loss(noise_pred, noise)
|
| 223 |
+
|
| 224 |
+
def get_action(self, obs_seq):
|
| 225 |
+
# init scheduler
|
| 226 |
+
# self.noise_scheduler.set_timesteps(self.num_diffusion_iters)
|
| 227 |
+
# set_timesteps will change noise_scheduler.timesteps is only used in noise_scheduler.step()
|
| 228 |
+
# noise_scheduler.step() is only called during inference
|
| 229 |
+
# if we use DDPM, and inference_diffusion_steps == train_diffusion_steps, then we can skip this
|
| 230 |
+
|
| 231 |
+
# obs_seq: (B, obs_horizon, obs_dim)
|
| 232 |
+
B = obs_seq.shape[0]
|
| 233 |
+
with torch.no_grad():
|
| 234 |
+
obs_cond = obs_seq.flatten(start_dim=1) # (B, obs_horizon * obs_dim)
|
| 235 |
+
|
| 236 |
+
# initialize action from Guassian noise
|
| 237 |
+
noisy_action_seq = torch.randn((B, self.pred_horizon, self.act_dim), device=obs_seq.device)
|
| 238 |
+
|
| 239 |
+
for k in self.noise_scheduler.timesteps:
|
| 240 |
+
# predict noise
|
| 241 |
+
noise_pred = self.noise_pred_net(
|
| 242 |
+
sample=noisy_action_seq,
|
| 243 |
+
timestep=k,
|
| 244 |
+
global_cond=obs_cond,
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
# inverse diffusion step (remove noise)
|
| 248 |
+
noisy_action_seq = self.noise_scheduler.step(
|
| 249 |
+
model_output=noise_pred,
|
| 250 |
+
timestep=k,
|
| 251 |
+
sample=noisy_action_seq,
|
| 252 |
+
).prev_sample
|
| 253 |
+
|
| 254 |
+
# only take act_horizon number of actions
|
| 255 |
+
start = self.obs_horizon - 1
|
| 256 |
+
end = start + self.act_horizon
|
| 257 |
+
return noisy_action_seq[:, start:end] # (B, act_horizon, act_dim)
|
| 258 |
+
|
| 259 |
+
def save_ckpt(run_name, tag):
|
| 260 |
+
os.makedirs(f'runs/{run_name}/checkpoints', exist_ok=True)
|
| 261 |
+
ema.copy_to(ema_agent.parameters())
|
| 262 |
+
torch.save({
|
| 263 |
+
'agent': agent.state_dict(),
|
| 264 |
+
'ema_agent': ema_agent.state_dict(),
|
| 265 |
+
}, f'runs/{run_name}/checkpoints/{tag}.pt')
|
| 266 |
+
|
| 267 |
+
if __name__ == "__main__":
|
| 268 |
+
args = tyro.cli(Args)
|
| 269 |
+
if args.exp_name is None:
|
| 270 |
+
args.exp_name = os.path.basename(__file__)[: -len(".py")]
|
| 271 |
+
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
|
| 272 |
+
else:
|
| 273 |
+
run_name = args.exp_name
|
| 274 |
+
|
| 275 |
+
if args.demo_path.endswith('.h5'):
|
| 276 |
+
import json
|
| 277 |
+
json_file = args.demo_path[:-2] + 'json'
|
| 278 |
+
with open(json_file, 'r') as f:
|
| 279 |
+
demo_info = json.load(f)
|
| 280 |
+
if 'control_mode' in demo_info['env_info']['env_kwargs']:
|
| 281 |
+
control_mode = demo_info['env_info']['env_kwargs']['control_mode']
|
| 282 |
+
elif 'control_mode' in demo_info['episodes'][0]:
|
| 283 |
+
control_mode = demo_info['episodes'][0]['control_mode']
|
| 284 |
+
else:
|
| 285 |
+
raise Exception('Control mode not found in json')
|
| 286 |
+
assert control_mode == args.control_mode, f"Control mode mismatched. Dataset has control mode {control_mode}, but args has control mode {args.control_mode}"
|
| 287 |
+
assert args.obs_horizon + args.act_horizon - 1 <= args.pred_horizon
|
| 288 |
+
assert args.obs_horizon >= 1 and args.act_horizon >= 1 and args.pred_horizon >= 1
|
| 289 |
+
|
| 290 |
+
# TRY NOT TO MODIFY: seeding
|
| 291 |
+
random.seed(args.seed)
|
| 292 |
+
np.random.seed(args.seed)
|
| 293 |
+
torch.manual_seed(args.seed)
|
| 294 |
+
torch.backends.cudnn.deterministic = args.torch_deterministic
|
| 295 |
+
|
| 296 |
+
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
|
| 297 |
+
|
| 298 |
+
# env setup
|
| 299 |
+
env_kwargs = dict(control_mode=args.control_mode, reward_mode="sparse", obs_mode="state", render_mode="rgb_array")
|
| 300 |
+
if args.max_episode_steps is not None:
|
| 301 |
+
env_kwargs["max_episode_steps"] = args.max_episode_steps
|
| 302 |
+
other_kwargs = dict(obs_horizon=args.obs_horizon)
|
| 303 |
+
envs = make_eval_envs(args.env_id, args.num_eval_envs, args.sim_backend, env_kwargs, other_kwargs, video_dir=f'runs/{run_name}/videos' if args.capture_video else None)
|
| 304 |
+
|
| 305 |
+
if args.track:
|
| 306 |
+
import wandb
|
| 307 |
+
config = vars(args)
|
| 308 |
+
config["eval_env_cfg"] = dict(**env_kwargs, num_envs=args.num_eval_envs, env_id=args.env_id, env_horizon=args.max_episode_steps or gym_utils.find_max_episode_steps_value(envs))
|
| 309 |
+
wandb.init(
|
| 310 |
+
project=args.wandb_project_name,
|
| 311 |
+
entity=args.wandb_entity,
|
| 312 |
+
sync_tensorboard=True,
|
| 313 |
+
config=config,
|
| 314 |
+
name=run_name,
|
| 315 |
+
save_code=True,
|
| 316 |
+
group="DiffusionPolicy",
|
| 317 |
+
tags=["diffusion_policy"]
|
| 318 |
+
)
|
| 319 |
+
writer = SummaryWriter(f"runs/{run_name}")
|
| 320 |
+
writer.add_text(
|
| 321 |
+
"hyperparameters",
|
| 322 |
+
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
# dataloader setup
|
| 326 |
+
dataset = SmallDemoDataset_DiffusionPolicy(args.demo_path, device, num_traj=args.num_demos)
|
| 327 |
+
sampler = RandomSampler(dataset, replacement=False)
|
| 328 |
+
batch_sampler = BatchSampler(sampler, batch_size=args.batch_size, drop_last=True)
|
| 329 |
+
batch_sampler = IterationBasedBatchSampler(batch_sampler, args.total_iters)
|
| 330 |
+
train_dataloader = DataLoader(
|
| 331 |
+
dataset,
|
| 332 |
+
batch_sampler=batch_sampler,
|
| 333 |
+
num_workers=args.num_dataload_workers,
|
| 334 |
+
worker_init_fn=lambda worker_id: worker_init_fn(worker_id, base_seed=args.seed),
|
| 335 |
+
)
|
| 336 |
+
if args.num_demos is None:
|
| 337 |
+
args.num_demos = len(dataset)
|
| 338 |
+
|
| 339 |
+
# agent setup
|
| 340 |
+
agent = Agent(envs, args).to(device)
|
| 341 |
+
optimizer = optim.AdamW(params=agent.parameters(),
|
| 342 |
+
lr=args.lr, betas=(0.95, 0.999), weight_decay=1e-6)
|
| 343 |
+
|
| 344 |
+
# Cosine LR schedule with linear warmup
|
| 345 |
+
lr_scheduler = get_scheduler(
|
| 346 |
+
name='cosine',
|
| 347 |
+
optimizer=optimizer,
|
| 348 |
+
num_warmup_steps=500,
|
| 349 |
+
num_training_steps=args.total_iters,
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
# Exponential Moving Average
|
| 353 |
+
# accelerates training and improves stability
|
| 354 |
+
# holds a copy of the model weights
|
| 355 |
+
ema = EMAModel(parameters=agent.parameters(), power=0.75)
|
| 356 |
+
ema_agent = Agent(envs, args).to(device)
|
| 357 |
+
|
| 358 |
+
# ---------------------------------------------------------------------------- #
|
| 359 |
+
# Training begins.
|
| 360 |
+
# ---------------------------------------------------------------------------- #
|
| 361 |
+
agent.train()
|
| 362 |
+
|
| 363 |
+
best_eval_metrics = defaultdict(float)
|
| 364 |
+
timings = defaultdict(float)
|
| 365 |
+
|
| 366 |
+
for iteration, data_batch in enumerate(train_dataloader):
|
| 367 |
+
# # copy data from cpu to gpu
|
| 368 |
+
# data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items()}
|
| 369 |
+
|
| 370 |
+
# forward and compute loss
|
| 371 |
+
total_loss = agent.compute_loss(
|
| 372 |
+
obs_seq=data_batch['observations'], # (B, L, obs_dim)
|
| 373 |
+
action_seq=data_batch['actions'], # (B, L, act_dim)
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
# backward
|
| 377 |
+
optimizer.zero_grad()
|
| 378 |
+
total_loss.backward()
|
| 379 |
+
optimizer.step()
|
| 380 |
+
lr_scheduler.step() # step lr scheduler every batch, this is different from standard pytorch behavior
|
| 381 |
+
last_tick = time.time()
|
| 382 |
+
|
| 383 |
+
# update Exponential Moving Average of the model weights
|
| 384 |
+
ema.step(agent.parameters())
|
| 385 |
+
# TRY NOT TO MODIFY: record rewards for plotting purposes
|
| 386 |
+
if iteration % args.log_freq == 0:
|
| 387 |
+
print(f"Iteration {iteration}, loss: {total_loss.item()}")
|
| 388 |
+
writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], iteration)
|
| 389 |
+
writer.add_scalar("losses/total_loss", total_loss.item(), iteration)
|
| 390 |
+
for k, v in timings.items():
|
| 391 |
+
writer.add_scalar(f"time/{k}", v, iteration)
|
| 392 |
+
# Evaluation
|
| 393 |
+
if iteration % args.eval_freq == 0:
|
| 394 |
+
last_tick = time.time()
|
| 395 |
+
|
| 396 |
+
ema.copy_to(ema_agent.parameters())
|
| 397 |
+
# def sample_fn(obs):
|
| 398 |
+
|
| 399 |
+
eval_metrics = evaluate(args.num_eval_episodes, ema_agent, envs, device, args.sim_backend)
|
| 400 |
+
timings["eval"] += time.time() - last_tick
|
| 401 |
+
|
| 402 |
+
print(f"Evaluated {len(eval_metrics['success_at_end'])} episodes")
|
| 403 |
+
for k in eval_metrics.keys():
|
| 404 |
+
eval_metrics[k] = np.mean(eval_metrics[k])
|
| 405 |
+
writer.add_scalar(f"eval/{k}", eval_metrics[k], iteration)
|
| 406 |
+
print(f"{k}: {eval_metrics[k]:.4f}")
|
| 407 |
+
|
| 408 |
+
save_on_best_metrics = ["success_once", "success_at_end"]
|
| 409 |
+
for k in save_on_best_metrics:
|
| 410 |
+
if k in eval_metrics and eval_metrics[k] > best_eval_metrics[k]:
|
| 411 |
+
best_eval_metrics[k] = eval_metrics[k]
|
| 412 |
+
save_ckpt(run_name, f"best_eval_{k}")
|
| 413 |
+
print(f'New best {k}_rate: {eval_metrics[k]:.4f}. Saving checkpoint.')
|
| 414 |
+
# Checkpoint
|
| 415 |
+
if args.save_freq is not None and iteration % args.save_freq == 0:
|
| 416 |
+
save_ckpt(run_name, str(iteration))
|
| 417 |
+
envs.close()
|
| 418 |
+
writer.close()
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/runs
|
| 2 |
+
/videos
|
| 3 |
+
/pretrained
|
| 4 |
+
/wandb
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/README.md
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Proximal Policy Optimization (PPO)
|
| 2 |
+
|
| 3 |
+
Code for running the PPO RL algorithm is adapted from [CleanRL](https://github.com/vwxyzjn/cleanrl/) and [LeanRL](https://github.com/pytorch-labs/LeanRL/). It is written to be single-file and easy to follow/read, and supports state-based RL and visual-based RL code.
|
| 4 |
+
|
| 5 |
+
Note that ManiSkill is still in beta, so we have not finalized training scripts for every pre-built task (some of which are simply too hard to solve with RL anyway).
|
| 6 |
+
|
| 7 |
+
Official baseline results can be run by using the scripts in the baselines.sh file. Results are organized and published to our [wandb report](https://api.wandb.ai/links/stonet2000/k6lz966q)
|
| 8 |
+
|
| 9 |
+
There is also now experimental support for PPO compiled and with CUDA Graphs enabled based on LeanRL. The code is in ppo_fast.py and you need to install [torchrl](https://github.com/pytorch/rl) and [tensordict](https://github.com/pytorch/tensordict/):
|
| 10 |
+
|
| 11 |
+
```bash
|
| 12 |
+
pip install torchrl tensordict
|
| 13 |
+
```
|
| 14 |
+
|
| 15 |
+
## State Based RL
|
| 16 |
+
|
| 17 |
+
Below is a sample of various commands you can run to train a state-based policy to solve various tasks with PPO that are lightly tuned already. The fastest one is the PushCube-v1 task which can take less than a minute to train on the GPU and the PickCube-v1 task which can take 2-5 minutes on the GPU.
|
| 18 |
+
|
| 19 |
+
The PPO baseline is not guaranteed to work for all tasks as some tasks do not have dense rewards yet or well tuned ones, or simply are too hard with standard PPO.
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
```bash
|
| 23 |
+
python ppo.py --env_id="PushCube-v1" \
|
| 24 |
+
--num_envs=2048 --update_epochs=8 --num_minibatches=32 \
|
| 25 |
+
--total_timesteps=2_000_000 --eval_freq=10 --num-steps=20
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
To evaluate, you can run
|
| 29 |
+
```bash
|
| 30 |
+
python ppo.py --env_id="PushCube-v1" \
|
| 31 |
+
--evaluate --checkpoint=path/to/model.pt \
|
| 32 |
+
--num_eval_envs=1 --num-eval-steps=1000
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
Note that with `--evaluate`, trajectories are saved from a GPU simulation. In order to support replaying these trajectories correctly with the `maniskill.trajectory.replay_trajectory` tool for some task, the number of evaluation environments must be fixed to `1`. This is necessary in order to ensure reproducibility for tasks that have randomizations on geometry (e.g. PickSingleYCB). Other tasks without geometrical randomization like PushCube are fine and you can increase the number of evaluation environments.
|
| 36 |
+
|
| 37 |
+
The examples.sh file has a full list of tested commands for running state based PPO successfully on many tasks.
|
| 38 |
+
|
| 39 |
+
The results of running the baseline scripts for state based PPO are here: https://api.wandb.ai/links/stonet2000/k6lz966q.
|
| 40 |
+
|
| 41 |
+
## Visual (RGB) Based RL
|
| 42 |
+
|
| 43 |
+
Below is a sample of various commands for training a image-based policy with PPO that are lightly tuned. The fastest again is also PushCube-v1 which can take about 1-5 minutes and PickCube-v1 which takes 15-45 minutes. You will need to tune the `--num_envs` argument according to how much GPU memory you have as rendering visual observations uses a lot of memory. The settings below should all take less than 15GB of GPU memory. The examples.sh file has a full list of tested commands for running visual based PPO successfully on many tasks.
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
```bash
|
| 47 |
+
python ppo_rgb.py --env_id="PushCube-v1" \
|
| 48 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=8 \
|
| 49 |
+
--total_timesteps=1_000_000 --eval_freq=10 --num-steps=20
|
| 50 |
+
python ppo_rgb.py --env_id="PickCube-v1" \
|
| 51 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=8 \
|
| 52 |
+
--total_timesteps=10_000_000
|
| 53 |
+
python ppo_rgb.py --env_id="AnymalC-Reach-v1" \
|
| 54 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=32 \
|
| 55 |
+
--total_timesteps=10_000_000 --num-steps=200 --num-eval-steps=200 \
|
| 56 |
+
--gamma=0.99 --gae_lambda=0.95
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
To evaluate a trained policy you can run
|
| 60 |
+
|
| 61 |
+
```bash
|
| 62 |
+
python ppo_rgb.py --env_id="PickCube-v1" \
|
| 63 |
+
--evaluate --checkpoint=path/to/model.pt \
|
| 64 |
+
--num_eval_envs=1 --num-eval-steps=1000
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
and it will save videos to the `path/to/test_videos`.
|
| 68 |
+
|
| 69 |
+
The examples.sh file has a full list of tested commands for running RGB based PPO successfully on many tasks.
|
| 70 |
+
|
| 71 |
+
The results of running the baseline scripts for RGB based PPO are here: https://api.wandb.ai/links/stonet2000/k6lz966q
|
| 72 |
+
|
| 73 |
+
## Visual (RGB+Depth) Based RL
|
| 74 |
+
|
| 75 |
+
WIP
|
| 76 |
+
|
| 77 |
+
## Visual (Pointcloud) Based RL
|
| 78 |
+
|
| 79 |
+
WIP
|
| 80 |
+
|
| 81 |
+
## Replaying Evaluation Trajectories
|
| 82 |
+
|
| 83 |
+
It might be useful to get some nicer looking videos. A simple way to do that is to first use the evaluation scripts provided above. It will then save a .h5 and .json file with a name equal to the date and time that you can then replay with different settings as so
|
| 84 |
+
|
| 85 |
+
```bash
|
| 86 |
+
python -m mani_skill.trajectory.replay_trajectory \
|
| 87 |
+
--traj-path=path/to/trajectory.h5 --use-env-states --shader="rt-fast" \
|
| 88 |
+
--save-video --allow-failure -o "none"
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
This will use environment states to replay trajectories, turn on the ray-tracer (There is also "rt" which is higher quality but slower), and save all videos including failed trajectories.
|
| 92 |
+
|
| 93 |
+
## Some Notes
|
| 94 |
+
|
| 95 |
+
- Evaluation with GPU simulation (especially with randomized objects) is a bit tricky. We recommend reading through [our docs](https://maniskill.readthedocs.io/en/latest/user_guide/reinforcement_learning/baselines.html#evaluation) on online RL evaluation in order to understand how to fairly evaluate policies with GPU simulation.
|
| 96 |
+
- Many tasks support visual observations, however we have not carefully verified yet if the camera poses for the tasks are setup in a way that makes it possible to solve some tasks from visual observations.
|
| 97 |
+
|
| 98 |
+
## Citation
|
| 99 |
+
|
| 100 |
+
If you use this baseline please cite the following
|
| 101 |
+
```
|
| 102 |
+
@article{DBLP:journals/corr/SchulmanWDRK17,
|
| 103 |
+
author = {John Schulman and
|
| 104 |
+
Filip Wolski and
|
| 105 |
+
Prafulla Dhariwal and
|
| 106 |
+
Alec Radford and
|
| 107 |
+
Oleg Klimov},
|
| 108 |
+
title = {Proximal Policy Optimization Algorithms},
|
| 109 |
+
journal = {CoRR},
|
| 110 |
+
volume = {abs/1707.06347},
|
| 111 |
+
year = {2017},
|
| 112 |
+
url = {http://arxiv.org/abs/1707.06347},
|
| 113 |
+
eprinttype = {arXiv},
|
| 114 |
+
eprint = {1707.06347},
|
| 115 |
+
timestamp = {Mon, 13 Aug 2018 16:47:34 +0200},
|
| 116 |
+
biburl = {https://dblp.org/rec/journals/corr/SchulmanWDRK17.bib},
|
| 117 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 118 |
+
}
|
| 119 |
+
```
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/baselines.sh
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Baseline results for PPO
|
| 2 |
+
|
| 3 |
+
seeds=(9351 4796 1788)
|
| 4 |
+
|
| 5 |
+
### State Based PPO Baselines ###
|
| 6 |
+
for seed in ${seeds[@]}
|
| 7 |
+
do
|
| 8 |
+
python ppo_fast.py --env_id="PushCube-v1" --seed=${seed} \
|
| 9 |
+
--num_envs=4096 --num-steps=4 --update_epochs=8 --num_minibatches=32 \
|
| 10 |
+
--total_timesteps=50_000_000 \
|
| 11 |
+
--num_eval_envs=16 \
|
| 12 |
+
--save-model --cudagraphs --exp-name="ppo-PushCube-v1-state-${seed}-walltime_efficient" \
|
| 13 |
+
--wandb_entity="stonet2000" --track
|
| 14 |
+
done
|
| 15 |
+
|
| 16 |
+
for seed in ${seeds[@]}
|
| 17 |
+
do
|
| 18 |
+
python ppo_fast.py --env_id="PickCube-v1" --seed=${seed} \
|
| 19 |
+
--num_envs=4096 --num-steps=4 --update_epochs=8 --num_minibatches=32 \
|
| 20 |
+
--total_timesteps=50_000_000 \
|
| 21 |
+
--num_eval_envs=16 \
|
| 22 |
+
--save-model --cudagraphs --exp-name="ppo-PickCube-v1-state-${seed}-walltime_efficient" \
|
| 23 |
+
--wandb_entity="stonet2000" --track
|
| 24 |
+
done
|
| 25 |
+
|
| 26 |
+
for seed in ${seeds[@]}
|
| 27 |
+
do
|
| 28 |
+
python ppo_fast.py --env_id="PushT-v1" --seed=${seed} \
|
| 29 |
+
--num_envs=4096 --num-steps=16 --update_epochs=8 --num_minibatches=32 --gamma=0.99 \
|
| 30 |
+
--total_timesteps=50_000_000 --num_eval_steps=100 \
|
| 31 |
+
--num_eval_envs=16 \
|
| 32 |
+
--save-model --cudagraphs --exp-name="ppo-PushT-v1-state-${seed}-walltime_efficient" \
|
| 33 |
+
--wandb_entity="stonet2000" --track
|
| 34 |
+
done
|
| 35 |
+
|
| 36 |
+
for seed in ${seeds[@]}
|
| 37 |
+
do
|
| 38 |
+
python ppo_fast.py --env_id="StackCube-v1" --seed=${seed} \
|
| 39 |
+
--num_envs=4096 --num-steps=16 --update_epochs=8 --num_minibatches=32 \
|
| 40 |
+
--total_timesteps=50_000_000 \
|
| 41 |
+
--num_eval_envs=16 \
|
| 42 |
+
--save-model --cudagraphs --exp-name="ppo-StackCube-v1-state-${seed}-walltime_efficient" \
|
| 43 |
+
--wandb_entity="stonet2000" --track
|
| 44 |
+
done
|
| 45 |
+
|
| 46 |
+
for seed in ${seeds[@]}
|
| 47 |
+
do
|
| 48 |
+
python ppo_fast.py --env_id="RollBall-v1" --seed=${seed} \
|
| 49 |
+
--num_envs=4096 --num-steps=16 --update_epochs=8 --num_minibatches=32 --gamma=0.95 \
|
| 50 |
+
--total_timesteps=50_000_000 --num-eval-steps=80 \
|
| 51 |
+
--num_eval_envs=16 \
|
| 52 |
+
--save-model --cudagraphs --exp-name="ppo-RollBall-v1-state-${seed}-walltime_efficient" \
|
| 53 |
+
--wandb_entity="stonet2000" --track
|
| 54 |
+
done
|
| 55 |
+
|
| 56 |
+
for seed in ${seeds[@]}
|
| 57 |
+
do
|
| 58 |
+
python ppo_fast.py --env_id="PullCube-v1" --seed=${seed} \
|
| 59 |
+
--num_envs=4096 --num-steps=4 --update_epochs=8 --num_minibatches=32 \
|
| 60 |
+
--total_timesteps=50_000_000 \
|
| 61 |
+
--num_eval_envs=16 \
|
| 62 |
+
--save-model --cudagraphs --exp-name="ppo-PullCube-v1-state-${seed}-walltime_efficient" \
|
| 63 |
+
--wandb_entity="stonet2000" --track
|
| 64 |
+
done
|
| 65 |
+
|
| 66 |
+
for seed in ${seeds[@]}
|
| 67 |
+
do
|
| 68 |
+
python ppo_fast.py --env_id="PokeCube-v1" --seed=${seed} \
|
| 69 |
+
--num_envs=4096 --num-steps=4 --update_epochs=8 --num_minibatches=32 \
|
| 70 |
+
--total_timesteps=50_000_000 \
|
| 71 |
+
--num_eval_envs=16 \
|
| 72 |
+
--save-model --cudagraphs --exp-name="ppo-PokeCube-v1-state-${seed}-walltime_efficient" \
|
| 73 |
+
--wandb_entity="stonet2000" --track
|
| 74 |
+
done
|
| 75 |
+
|
| 76 |
+
for seed in ${seeds[@]}
|
| 77 |
+
do
|
| 78 |
+
python ppo_fast.py --env_id="LiftPegUpright-v1" --seed=${seed} \
|
| 79 |
+
--num_envs=4096 --num-steps=4 --update_epochs=8 --num_minibatches=32 \
|
| 80 |
+
--total_timesteps=50_000_000 \
|
| 81 |
+
--num_eval_envs=16 \
|
| 82 |
+
--save-model --cudagraphs --exp-name="ppo-LiftPegUpright-v1-state-${seed}-walltime_efficient" \
|
| 83 |
+
--wandb_entity="stonet2000" --track
|
| 84 |
+
done
|
| 85 |
+
|
| 86 |
+
for seed in ${seeds[@]}
|
| 87 |
+
do
|
| 88 |
+
python ppo_fast.py --env_id="AnymalC-Reach-v1" --seed=${seed} \
|
| 89 |
+
--num_envs=4096 --update_epochs=8 --num_minibatches=32 --gamma=0.99 --gae_lambda=0.95 \
|
| 90 |
+
--total_timesteps=50_000_000 --num-steps=16 --num-eval-steps=200 \
|
| 91 |
+
--num_eval_envs=16 \
|
| 92 |
+
--save-model --cudagraphs --exp-name="ppo-AnymalC-Reach-v1-state-${seed}-walltime_efficient" \
|
| 93 |
+
--wandb_entity="stonet2000" --track
|
| 94 |
+
done
|
| 95 |
+
|
| 96 |
+
for seed in ${seeds[@]}
|
| 97 |
+
do
|
| 98 |
+
python ppo_fast.py --env_id="PegInsertionSide-v1" --seed=${seed} \
|
| 99 |
+
--num_envs=2048 --update_epochs=8 --num_minibatches=32 --gamma=0.97 --gae_lambda=0.95 \
|
| 100 |
+
--total_timesteps=75_000_000 --num-steps=16 --num-eval-steps=100 \
|
| 101 |
+
--num_eval_envs=16 \
|
| 102 |
+
--save-model --cudagraphs --exp-name="ppo-PegInsertionSide-v1-state-${seed}-walltime_efficient" \
|
| 103 |
+
--wandb_entity="stonet2000" --track
|
| 104 |
+
done
|
| 105 |
+
|
| 106 |
+
for seed in ${seeds[@]}
|
| 107 |
+
do
|
| 108 |
+
python ppo_fast.py --env_id="TwoRobotPickCube-v1" --seed=${seed} \
|
| 109 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 110 |
+
--total_timesteps=50_000_000 --num-steps=100 --num-eval-steps=100 \
|
| 111 |
+
--num_eval_envs=16 \
|
| 112 |
+
--save-model --cudagraphs --exp-name="ppo-TwoRobotPickCube-v1-state-${seed}-walltime_efficient" \
|
| 113 |
+
--wandb_entity="stonet2000" --track
|
| 114 |
+
done
|
| 115 |
+
|
| 116 |
+
for seed in ${seeds[@]}
|
| 117 |
+
do
|
| 118 |
+
python ppo_fast.py --env_id="UnitreeG1PlaceAppleInBowl-v1" --seed=${seed} \
|
| 119 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 120 |
+
--total_timesteps=50_000_000 --num-steps=32 --num-eval-steps=100 \
|
| 121 |
+
--num_eval_envs=16 \
|
| 122 |
+
--save-model --cudagraphs --exp-name="ppo-UnitreeG1PlaceAppleInBowl-v1-state-${seed}-walltime_efficient" \
|
| 123 |
+
--wandb_entity="stonet2000" --track
|
| 124 |
+
done
|
| 125 |
+
|
| 126 |
+
for seed in ${seeds[@]}
|
| 127 |
+
do
|
| 128 |
+
python ppo_fast.py --env_id="UnitreeG1TransportBox-v1" --seed=${seed} \
|
| 129 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 130 |
+
--total_timesteps=100_000_000 --num-steps=32 --num-eval-steps=100 \
|
| 131 |
+
--num_eval_envs=16 \
|
| 132 |
+
--save-model --cudagraphs --exp-name="ppo-UnitreeG1TransportBox-v1-state-${seed}-walltime_efficient" \
|
| 133 |
+
--wandb_entity="stonet2000" --track
|
| 134 |
+
done
|
| 135 |
+
|
| 136 |
+
for seed in ${seeds[@]}
|
| 137 |
+
do
|
| 138 |
+
python ppo_fast.py --env_id="OpenCabinetDrawer-v1" --seed=${seed} \
|
| 139 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 140 |
+
--total_timesteps=50_000_000 --num-steps=16 --num-eval-steps=100 \
|
| 141 |
+
--num_eval_envs=16 \
|
| 142 |
+
--save-model --cudagraphs --exp-name="ppo-OpenCabinetDrawer-v1-state-${seed}-walltime_efficient" \
|
| 143 |
+
--wandb_entity="stonet2000" --track
|
| 144 |
+
done
|
| 145 |
+
|
| 146 |
+
### RGB Based PPO Baselines ###
|
| 147 |
+
for seed in ${seeds[@]}
|
| 148 |
+
do
|
| 149 |
+
python ppo_rgb.py --env_id="PushCube-v1" --seed=${seed} \
|
| 150 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=8 \
|
| 151 |
+
--total_timesteps=50_000_000 \
|
| 152 |
+
--num_eval_envs=16 \
|
| 153 |
+
--exp-name="ppo-PushCube-v1-rgb-${seed}-walltime_efficient" \
|
| 154 |
+
--wandb_entity="stonet2000" --track
|
| 155 |
+
done
|
| 156 |
+
|
| 157 |
+
for seed in ${seeds[@]}
|
| 158 |
+
do
|
| 159 |
+
python ppo_rgb.py --env_id="PickCube-v1" --seed=${seed} \
|
| 160 |
+
--num_envs=1024 --num-steps=16 --update_epochs=8 --num_minibatches=32 \
|
| 161 |
+
--total_timesteps=50_000_000 \
|
| 162 |
+
--num_eval_envs=16 \
|
| 163 |
+
--exp-name="ppo-PickCube-v1-rgb-${seed}-walltime_efficient" \
|
| 164 |
+
--wandb_entity="stonet2000" --track
|
| 165 |
+
done
|
| 166 |
+
|
| 167 |
+
for seed in ${seeds[@]}
|
| 168 |
+
do
|
| 169 |
+
python ppo_rgb.py --env_id="PushT-v1" --seed=${seed} \
|
| 170 |
+
--num_envs=1024 --num-steps=16 --update_epochs=8 --num_minibatches=32 \
|
| 171 |
+
--total_timesteps=50_000_000 --num_eval_steps=100 --gamma=0.99 \
|
| 172 |
+
--num_eval_envs=16 \
|
| 173 |
+
--exp-name="ppo-PushT-v1-rgb-${seed}-walltime_efficient" \
|
| 174 |
+
--wandb_entity="stonet2000" --track
|
| 175 |
+
done
|
| 176 |
+
|
| 177 |
+
for seed in ${seeds[@]}
|
| 178 |
+
do
|
| 179 |
+
python ppo_rgb.py --env_id="AnymalC-Reach-v1" --seed=${seed} \
|
| 180 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 --gamma=0.99 --gae_lambda=0.95 \
|
| 181 |
+
--total_timesteps=50_000_000 --num-steps=16 --num-eval-steps=200 \
|
| 182 |
+
--num_eval_envs=16 --eval-reconfiguration-freq=0 \
|
| 183 |
+
--exp-name="ppo-AnymalC-Reach-v1-rgb-${seed}-walltime_efficient" \
|
| 184 |
+
--wandb_entity="stonet2000" --track
|
| 185 |
+
done
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/examples.sh
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is a giant collection of tested example commands for PPO
|
| 2 |
+
# Note these are tuned for wall time speed. For official baseline results which run
|
| 3 |
+
# more fair comparisons of RL algorithms see the baselines.sh file
|
| 4 |
+
|
| 5 |
+
### State Based PPO ###
|
| 6 |
+
python ppo.py --env_id="PickCube-v1" \
|
| 7 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 8 |
+
--total_timesteps=10_000_000
|
| 9 |
+
python ppo.py --env_id="StackCube-v1" \
|
| 10 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 11 |
+
--total_timesteps=25_000_000
|
| 12 |
+
python ppo.py --env_id="PushT-v1" \
|
| 13 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 14 |
+
--total_timesteps=25_000_000 --num-steps=100 --num_eval_steps=100 --gamma=0.99
|
| 15 |
+
python ppo.py --env_id="PickSingleYCB-v1" \
|
| 16 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 17 |
+
--total_timesteps=25_000_000
|
| 18 |
+
python ppo.py --env_id="PegInsertionSide-v1" \
|
| 19 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 20 |
+
--total_timesteps=250_000_000 --num-steps=100 --num-eval-steps=100
|
| 21 |
+
python ppo.py --env_id="TwoRobotPickCube-v1" \
|
| 22 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 23 |
+
--total_timesteps=20_000_000 --num-steps=100 --num-eval-steps=100
|
| 24 |
+
python ppo.py --env_id="TwoRobotStackCube-v1" \
|
| 25 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 26 |
+
--total_timesteps=40_000_000 --num-steps=100 --num-eval-steps=100
|
| 27 |
+
python ppo.py --env_id="TriFingerRotateCubeLevel0-v1" \
|
| 28 |
+
--num_envs=128 --update_epochs=8 --num_minibatches=32 \
|
| 29 |
+
--total_timesteps=50_000_000 --num-steps=250 --num-eval-steps=250
|
| 30 |
+
python ppo.py --env_id="TriFingerRotateCubeLevel1-v1" \
|
| 31 |
+
--num_envs=128 --update_epochs=8 --num_minibatches=32 \
|
| 32 |
+
--total_timesteps=50_000_000 --num-steps=250 --num-eval-steps=250
|
| 33 |
+
python ppo.py --env_id="TriFingerRotateCubeLevel2-v1" \
|
| 34 |
+
--num_envs=128 --update_epochs=8 --num_minibatches=32 \
|
| 35 |
+
--total_timesteps=50_000_000 --num-steps=250 --num-eval-steps=250
|
| 36 |
+
python ppo.py --env_id="TriFingerRotateCubeLevel3-v1" \
|
| 37 |
+
--num_envs=128 --update_epochs=8 --num_minibatches=32 \
|
| 38 |
+
--total_timesteps=50_000_000 --num-steps=250 --num-eval-steps=250
|
| 39 |
+
python ppo.py --env_id="TriFingerRotateCubeLevel4-v1" \
|
| 40 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 41 |
+
--total_timesteps=500_000_000 --num-steps=250 --num-eval-steps=250
|
| 42 |
+
python ppo.py --env_id="PokeCube-v1" --update_epochs=8 --num_minibatches=32 \
|
| 43 |
+
--num_envs=1024 --total_timesteps=5_000_000 --eval_freq=10 --num-steps=20
|
| 44 |
+
python ppo.py --env_id="MS-CartpoleBalance-v1" \
|
| 45 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 46 |
+
--total_timesteps=4_000_000 --num-steps=250 --num-eval-steps=1000 \
|
| 47 |
+
--gamma=0.99 --gae_lambda=0.95 \
|
| 48 |
+
--eval_freq=5
|
| 49 |
+
|
| 50 |
+
python ppo.py --env_id="MS-CartpoleSwingUp-v1" \
|
| 51 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 52 |
+
--total_timesteps=10_000_000 --num-steps=250 --num-eval-steps=1000 \
|
| 53 |
+
--gamma=0.99 --gae_lambda=0.95 \
|
| 54 |
+
--eval_freq=5
|
| 55 |
+
python ppo.py --env_id="MS-AntWalk-v1" --num_envs=2048 --eval_freq=10 \
|
| 56 |
+
--update_epochs=8 --num_minibatches=32 --total_timesteps=20_000_000 \
|
| 57 |
+
--num_eval_steps=1000 --num_steps=200 --gamma=0.97 --ent_coef=1e-3
|
| 58 |
+
python ppo.py --env_id="MS-AntRun-v1" --num_envs=2048 --eval_freq=10 \
|
| 59 |
+
--update_epochs=8 --num_minibatches=32 --total_timesteps=20_000_000 \
|
| 60 |
+
--num_eval_steps=1000 --num_steps=200 --gamma=0.97 --ent_coef=1e-3
|
| 61 |
+
python ppo.py --env_id="MS-HumanoidStand-v1" --num_envs=2048 --eval_freq=10 \
|
| 62 |
+
--update_epochs=8 --num_minibatches=32 --total_timesteps=40_000_000 \
|
| 63 |
+
--num_eval_steps=1000 --num_steps=200 --gamma=0.95
|
| 64 |
+
python ppo.py --env_id="MS-HumanoidWalk-v1" --num_envs=2048 --eval_freq=10 \
|
| 65 |
+
--update_epochs=8 --num_minibatches=32 --total_timesteps=80_000_000 \
|
| 66 |
+
--num_eval_steps=1000 --num_steps=200 --gamma=0.97 --ent_coef=1e-3
|
| 67 |
+
python ppo.py --env_id="MS-HumanoidRun-v1" --num_envs=2048 --eval_freq=10 \
|
| 68 |
+
--update_epochs=8 --num_minibatches=32 --total_timesteps=60_000_000 \
|
| 69 |
+
--num_eval_steps=1000 --num_steps=200 --gamma=0.97 --ent_coef=1e-3
|
| 70 |
+
python ppo.py --env_id="UnitreeG1PlaceAppleInBowl-v1" \
|
| 71 |
+
--num_envs=512 --update_epochs=8 --num_minibatches=32 \
|
| 72 |
+
--total_timesteps=50_000_000 --num-steps=100 --num-eval-steps=100
|
| 73 |
+
python ppo.py --env_id="AnymalC-Reach-v1" \
|
| 74 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 75 |
+
--total_timesteps=25_000_000 --num-steps=200 --num-eval-steps=200 \
|
| 76 |
+
--gamma=0.99 --gae_lambda=0.95
|
| 77 |
+
python ppo.py --env_id="AnymalC-Spin-v1" \
|
| 78 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 79 |
+
--total_timesteps=50_000_000 --num-steps=200 --num-eval-steps=200 \
|
| 80 |
+
--gamma=0.99 --gae_lambda=0.95
|
| 81 |
+
python ppo.py --env_id="UnitreeGo2-Reach-v1" \
|
| 82 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 83 |
+
--total_timesteps=50_000_000 --num-steps=200 --num-eval-steps=200 \
|
| 84 |
+
--gamma=0.99 --gae_lambda=0.95
|
| 85 |
+
python ppo.py --env_id="UnitreeH1Stand-v1" \
|
| 86 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 87 |
+
--total_timesteps=100_000_000 --num-steps=100 --num-eval-steps=1000 \
|
| 88 |
+
--gamma=0.99 --gae_lambda=0.95
|
| 89 |
+
python ppo.py --env_id="UnitreeG1Stand-v1" \
|
| 90 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 91 |
+
--total_timesteps=100_000_000 --num-steps=100 --num-eval-steps=1000 \
|
| 92 |
+
--gamma=0.99 --gae_lambda=0.95
|
| 93 |
+
|
| 94 |
+
python ppo.py --env_id="OpenCabinetDrawer-v1" \
|
| 95 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 96 |
+
--total_timesteps=10_000_000 --num-steps=100 --num-eval-steps=100
|
| 97 |
+
|
| 98 |
+
python ppo.py --env_id="RollBall-v1" \
|
| 99 |
+
--num_envs=1024 --update_epochs=8 --num_minibatches=32 \
|
| 100 |
+
--total_timesteps=20_000_000 --num-steps=80 --num_eval_steps=80 --gamma=0.95
|
| 101 |
+
|
| 102 |
+
### RGB Based PPO ###
|
| 103 |
+
python ppo_rgb.py --env_id="PushCube-v1" \
|
| 104 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=8 \
|
| 105 |
+
--total_timesteps=1_000_000 --eval_freq=10 --num-steps=20
|
| 106 |
+
python ppo_rgb.py --env_id="PickCube-v1" \
|
| 107 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=8 \
|
| 108 |
+
--total_timesteps=10_000_000
|
| 109 |
+
python ppo_rgb.py --env_id="AnymalC-Reach-v1" \
|
| 110 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=32 \
|
| 111 |
+
--total_timesteps=10_000_000 --num-steps=200 --num-eval-steps=200 \
|
| 112 |
+
--gamma=0.99 --gae_lambda=0.95
|
| 113 |
+
python ppo_rgb.py --env_id="PickSingleYCB-v1" \
|
| 114 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=8 \
|
| 115 |
+
--total_timesteps=10_000_000
|
| 116 |
+
python ppo_rgb.py --env_id="PushT-v1" \
|
| 117 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=8 \
|
| 118 |
+
--total_timesteps=25_000_000 --num-steps=100 --num_eval_steps=100 --gamma=0.99
|
| 119 |
+
python ppo_rgb.py --env_id="MS-AntWalk-v1" \
|
| 120 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=32 \
|
| 121 |
+
--total_timesteps=5_000_000 --eval_freq=15 --num_eval_steps=1000 \
|
| 122 |
+
--num_steps=200 --gamma=0.97 --no-include-state --render_mode="rgb_array" \
|
| 123 |
+
--ent_coef=1e-3
|
| 124 |
+
python ppo_rgb.py --env_id="MS-AntRun-v1" \
|
| 125 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=32 \
|
| 126 |
+
--total_timesteps=15_000_000 --eval_freq=15 --num_eval_steps=1000 \
|
| 127 |
+
--num_steps=200 --gamma=0.97 --no-include-state --render_mode="rgb_array" \
|
| 128 |
+
--ent_coef=1e-3
|
| 129 |
+
python ppo_rgb.py --env_id="MS-HumanoidRun-v1" \
|
| 130 |
+
--num_envs=256 --update_epochs=8 --num_minibatches=32 \
|
| 131 |
+
--total_timesteps=80_000_000 --eval_freq=15 --num_eval_steps=1000 \
|
| 132 |
+
--num_steps=200 --gamma=0.98 --no-include-state --render_mode="rgb_array" \
|
| 133 |
+
--ent_coef=1e-3
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/ppo.py
ADDED
|
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import time
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import Optional
|
| 7 |
+
|
| 8 |
+
import gymnasium as gym
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
import torch.optim as optim
|
| 13 |
+
import tyro
|
| 14 |
+
from torch.distributions.normal import Normal
|
| 15 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 16 |
+
|
| 17 |
+
# ManiSkill specific imports
|
| 18 |
+
import mani_skill.envs
|
| 19 |
+
from mani_skill.utils import gym_utils
|
| 20 |
+
from mani_skill.utils.wrappers.flatten import FlattenActionSpaceWrapper
|
| 21 |
+
from mani_skill.utils.wrappers.record import RecordEpisode
|
| 22 |
+
from mani_skill.vector.wrappers.gymnasium import ManiSkillVectorEnv
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class Args:
|
| 26 |
+
exp_name: Optional[str] = None
|
| 27 |
+
"""the name of this experiment"""
|
| 28 |
+
seed: int = 1
|
| 29 |
+
"""seed of the experiment"""
|
| 30 |
+
torch_deterministic: bool = True
|
| 31 |
+
"""if toggled, `torch.backends.cudnn.deterministic=True`"""
|
| 32 |
+
cuda: bool = True
|
| 33 |
+
"""if toggled, cuda will be enabled by default"""
|
| 34 |
+
track: bool = False
|
| 35 |
+
"""if toggled, this experiment will be tracked with Weights and Biases"""
|
| 36 |
+
wandb_project_name: str = "ManiSkill"
|
| 37 |
+
"""the wandb's project name"""
|
| 38 |
+
wandb_entity: Optional[str] = None
|
| 39 |
+
"""the entity (team) of wandb's project"""
|
| 40 |
+
capture_video: bool = True
|
| 41 |
+
"""whether to capture videos of the agent performances (check out `videos` folder)"""
|
| 42 |
+
save_model: bool = True
|
| 43 |
+
"""whether to save model into the `runs/{run_name}` folder"""
|
| 44 |
+
evaluate: bool = False
|
| 45 |
+
"""if toggled, only runs evaluation with the given model checkpoint and saves the evaluation trajectories"""
|
| 46 |
+
checkpoint: Optional[str] = None
|
| 47 |
+
"""path to a pretrained checkpoint file to start evaluation/training from"""
|
| 48 |
+
|
| 49 |
+
# Algorithm specific arguments
|
| 50 |
+
env_id: str = "PickCube-v1"
|
| 51 |
+
"""the id of the environment"""
|
| 52 |
+
total_timesteps: int = 10000000
|
| 53 |
+
"""total timesteps of the experiments"""
|
| 54 |
+
learning_rate: float = 3e-4
|
| 55 |
+
"""the learning rate of the optimizer"""
|
| 56 |
+
num_envs: int = 512
|
| 57 |
+
"""the number of parallel environments"""
|
| 58 |
+
num_eval_envs: int = 8
|
| 59 |
+
"""the number of parallel evaluation environments"""
|
| 60 |
+
partial_reset: bool = True
|
| 61 |
+
"""whether to let parallel environments reset upon termination instead of truncation"""
|
| 62 |
+
eval_partial_reset: bool = False
|
| 63 |
+
"""whether to let parallel evaluation environments reset upon termination instead of truncation"""
|
| 64 |
+
num_steps: int = 50
|
| 65 |
+
"""the number of steps to run in each environment per policy rollout"""
|
| 66 |
+
num_eval_steps: int = 50
|
| 67 |
+
"""the number of steps to run in each evaluation environment during evaluation"""
|
| 68 |
+
reconfiguration_freq: Optional[int] = None
|
| 69 |
+
"""how often to reconfigure the environment during training"""
|
| 70 |
+
eval_reconfiguration_freq: Optional[int] = 1
|
| 71 |
+
"""for benchmarking purposes we want to reconfigure the eval environment each reset to ensure objects are randomized in some tasks"""
|
| 72 |
+
control_mode: Optional[str] = "pd_joint_delta_pos"
|
| 73 |
+
"""the control mode to use for the environment"""
|
| 74 |
+
anneal_lr: bool = False
|
| 75 |
+
"""Toggle learning rate annealing for policy and value networks"""
|
| 76 |
+
gamma: float = 0.8
|
| 77 |
+
"""the discount factor gamma"""
|
| 78 |
+
gae_lambda: float = 0.9
|
| 79 |
+
"""the lambda for the general advantage estimation"""
|
| 80 |
+
num_minibatches: int = 32
|
| 81 |
+
"""the number of mini-batches"""
|
| 82 |
+
update_epochs: int = 4
|
| 83 |
+
"""the K epochs to update the policy"""
|
| 84 |
+
norm_adv: bool = True
|
| 85 |
+
"""Toggles advantages normalization"""
|
| 86 |
+
clip_coef: float = 0.2
|
| 87 |
+
"""the surrogate clipping coefficient"""
|
| 88 |
+
clip_vloss: bool = False
|
| 89 |
+
"""Toggles whether or not to use a clipped loss for the value function, as per the paper."""
|
| 90 |
+
ent_coef: float = 0.0
|
| 91 |
+
"""coefficient of the entropy"""
|
| 92 |
+
vf_coef: float = 0.5
|
| 93 |
+
"""coefficient of the value function"""
|
| 94 |
+
max_grad_norm: float = 0.5
|
| 95 |
+
"""the maximum norm for the gradient clipping"""
|
| 96 |
+
target_kl: float = 0.1
|
| 97 |
+
"""the target KL divergence threshold"""
|
| 98 |
+
reward_scale: float = 1.0
|
| 99 |
+
"""Scale the reward by this factor"""
|
| 100 |
+
eval_freq: int = 25
|
| 101 |
+
"""evaluation frequency in terms of iterations"""
|
| 102 |
+
save_train_video_freq: Optional[int] = None
|
| 103 |
+
"""frequency to save training videos in terms of iterations"""
|
| 104 |
+
finite_horizon_gae: bool = False
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# to be filled in runtime
|
| 108 |
+
batch_size: int = 0
|
| 109 |
+
"""the batch size (computed in runtime)"""
|
| 110 |
+
minibatch_size: int = 0
|
| 111 |
+
"""the mini-batch size (computed in runtime)"""
|
| 112 |
+
num_iterations: int = 0
|
| 113 |
+
"""the number of iterations (computed in runtime)"""
|
| 114 |
+
|
| 115 |
+
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
|
| 116 |
+
torch.nn.init.orthogonal_(layer.weight, std)
|
| 117 |
+
torch.nn.init.constant_(layer.bias, bias_const)
|
| 118 |
+
return layer
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class Agent(nn.Module):
|
| 122 |
+
def __init__(self, envs):
|
| 123 |
+
super().__init__()
|
| 124 |
+
self.critic = nn.Sequential(
|
| 125 |
+
layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 256)),
|
| 126 |
+
nn.Tanh(),
|
| 127 |
+
layer_init(nn.Linear(256, 256)),
|
| 128 |
+
nn.Tanh(),
|
| 129 |
+
layer_init(nn.Linear(256, 256)),
|
| 130 |
+
nn.Tanh(),
|
| 131 |
+
layer_init(nn.Linear(256, 1)),
|
| 132 |
+
)
|
| 133 |
+
self.actor_mean = nn.Sequential(
|
| 134 |
+
layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 256)),
|
| 135 |
+
nn.Tanh(),
|
| 136 |
+
layer_init(nn.Linear(256, 256)),
|
| 137 |
+
nn.Tanh(),
|
| 138 |
+
layer_init(nn.Linear(256, 256)),
|
| 139 |
+
nn.Tanh(),
|
| 140 |
+
layer_init(nn.Linear(256, np.prod(envs.single_action_space.shape)), std=0.01*np.sqrt(2)),
|
| 141 |
+
)
|
| 142 |
+
self.actor_logstd = nn.Parameter(torch.ones(1, np.prod(envs.single_action_space.shape)) * -0.5)
|
| 143 |
+
|
| 144 |
+
def get_value(self, x):
|
| 145 |
+
return self.critic(x)
|
| 146 |
+
def get_action(self, x, deterministic=False):
|
| 147 |
+
action_mean = self.actor_mean(x)
|
| 148 |
+
if deterministic:
|
| 149 |
+
return action_mean
|
| 150 |
+
action_logstd = self.actor_logstd.expand_as(action_mean)
|
| 151 |
+
action_std = torch.exp(action_logstd)
|
| 152 |
+
probs = Normal(action_mean, action_std)
|
| 153 |
+
return probs.sample()
|
| 154 |
+
def get_action_and_value(self, x, action=None):
|
| 155 |
+
action_mean = self.actor_mean(x)
|
| 156 |
+
action_logstd = self.actor_logstd.expand_as(action_mean)
|
| 157 |
+
action_std = torch.exp(action_logstd)
|
| 158 |
+
probs = Normal(action_mean, action_std)
|
| 159 |
+
if action is None:
|
| 160 |
+
action = probs.sample()
|
| 161 |
+
return action, probs.log_prob(action).sum(1), probs.entropy().sum(1), self.critic(x)
|
| 162 |
+
|
| 163 |
+
class Logger:
|
| 164 |
+
def __init__(self, log_wandb=False, tensorboard: SummaryWriter = None) -> None:
|
| 165 |
+
self.writer = tensorboard
|
| 166 |
+
self.log_wandb = log_wandb
|
| 167 |
+
def add_scalar(self, tag, scalar_value, step):
|
| 168 |
+
if self.log_wandb:
|
| 169 |
+
wandb.log({tag: scalar_value}, step=step)
|
| 170 |
+
self.writer.add_scalar(tag, scalar_value, step)
|
| 171 |
+
def close(self):
|
| 172 |
+
self.writer.close()
|
| 173 |
+
|
| 174 |
+
if __name__ == "__main__":
|
| 175 |
+
args = tyro.cli(Args)
|
| 176 |
+
args.batch_size = int(args.num_envs * args.num_steps)
|
| 177 |
+
args.minibatch_size = int(args.batch_size // args.num_minibatches)
|
| 178 |
+
args.num_iterations = args.total_timesteps // args.batch_size
|
| 179 |
+
if args.exp_name is None:
|
| 180 |
+
args.exp_name = os.path.basename(__file__)[: -len(".py")]
|
| 181 |
+
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
|
| 182 |
+
else:
|
| 183 |
+
run_name = args.exp_name
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
# TRY NOT TO MODIFY: seeding
|
| 187 |
+
random.seed(args.seed)
|
| 188 |
+
np.random.seed(args.seed)
|
| 189 |
+
torch.manual_seed(args.seed)
|
| 190 |
+
torch.backends.cudnn.deterministic = args.torch_deterministic
|
| 191 |
+
|
| 192 |
+
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
|
| 193 |
+
|
| 194 |
+
# env setup
|
| 195 |
+
env_kwargs = dict(obs_mode="state", render_mode="rgb_array", sim_backend="physx_cuda")
|
| 196 |
+
if args.control_mode is not None:
|
| 197 |
+
env_kwargs["control_mode"] = args.control_mode
|
| 198 |
+
envs = gym.make(args.env_id, num_envs=args.num_envs if not args.evaluate else 1, reconfiguration_freq=args.reconfiguration_freq, **env_kwargs)
|
| 199 |
+
eval_envs = gym.make(args.env_id, num_envs=args.num_eval_envs, reconfiguration_freq=args.eval_reconfiguration_freq, **env_kwargs)
|
| 200 |
+
if isinstance(envs.action_space, gym.spaces.Dict):
|
| 201 |
+
envs = FlattenActionSpaceWrapper(envs)
|
| 202 |
+
eval_envs = FlattenActionSpaceWrapper(eval_envs)
|
| 203 |
+
if args.capture_video:
|
| 204 |
+
eval_output_dir = f"runs/{run_name}/videos"
|
| 205 |
+
if args.evaluate:
|
| 206 |
+
eval_output_dir = f"{os.path.dirname(args.checkpoint)}/test_videos"
|
| 207 |
+
print(f"Saving eval videos to {eval_output_dir}")
|
| 208 |
+
if args.save_train_video_freq is not None:
|
| 209 |
+
save_video_trigger = lambda x : (x // args.num_steps) % args.save_train_video_freq == 0
|
| 210 |
+
envs = RecordEpisode(envs, output_dir=f"runs/{run_name}/train_videos", save_trajectory=False, save_video_trigger=save_video_trigger, max_steps_per_video=args.num_steps, video_fps=30)
|
| 211 |
+
eval_envs = RecordEpisode(eval_envs, output_dir=eval_output_dir, save_trajectory=args.evaluate, trajectory_name="trajectory", max_steps_per_video=args.num_eval_steps, video_fps=30)
|
| 212 |
+
envs = ManiSkillVectorEnv(envs, args.num_envs, ignore_terminations=not args.partial_reset, record_metrics=True)
|
| 213 |
+
eval_envs = ManiSkillVectorEnv(eval_envs, args.num_eval_envs, ignore_terminations=not args.eval_partial_reset, record_metrics=True)
|
| 214 |
+
assert isinstance(envs.single_action_space, gym.spaces.Box), "only continuous action space is supported"
|
| 215 |
+
|
| 216 |
+
max_episode_steps = gym_utils.find_max_episode_steps_value(envs._env)
|
| 217 |
+
logger = None
|
| 218 |
+
if not args.evaluate:
|
| 219 |
+
print("Running training")
|
| 220 |
+
if args.track:
|
| 221 |
+
import wandb
|
| 222 |
+
config = vars(args)
|
| 223 |
+
config["env_cfg"] = dict(**env_kwargs, num_envs=args.num_envs, env_id=args.env_id, reward_mode="normalized_dense", env_horizon=max_episode_steps, partial_reset=args.partial_reset)
|
| 224 |
+
config["eval_env_cfg"] = dict(**env_kwargs, num_envs=args.num_eval_envs, env_id=args.env_id, reward_mode="normalized_dense", env_horizon=max_episode_steps, partial_reset=False)
|
| 225 |
+
wandb.init(
|
| 226 |
+
project=args.wandb_project_name,
|
| 227 |
+
entity=args.wandb_entity,
|
| 228 |
+
sync_tensorboard=False,
|
| 229 |
+
config=config,
|
| 230 |
+
name=run_name,
|
| 231 |
+
save_code=True,
|
| 232 |
+
group="PPO",
|
| 233 |
+
tags=["ppo", "walltime_efficient"]
|
| 234 |
+
)
|
| 235 |
+
writer = SummaryWriter(f"runs/{run_name}")
|
| 236 |
+
writer.add_text(
|
| 237 |
+
"hyperparameters",
|
| 238 |
+
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
|
| 239 |
+
)
|
| 240 |
+
logger = Logger(log_wandb=args.track, tensorboard=writer)
|
| 241 |
+
else:
|
| 242 |
+
print("Running evaluation")
|
| 243 |
+
|
| 244 |
+
agent = Agent(envs).to(device)
|
| 245 |
+
optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5)
|
| 246 |
+
|
| 247 |
+
# ALGO Logic: Storage setup
|
| 248 |
+
obs = torch.zeros((args.num_steps, args.num_envs) + envs.single_observation_space.shape).to(device)
|
| 249 |
+
actions = torch.zeros((args.num_steps, args.num_envs) + envs.single_action_space.shape).to(device)
|
| 250 |
+
logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device)
|
| 251 |
+
rewards = torch.zeros((args.num_steps, args.num_envs)).to(device)
|
| 252 |
+
dones = torch.zeros((args.num_steps, args.num_envs)).to(device)
|
| 253 |
+
values = torch.zeros((args.num_steps, args.num_envs)).to(device)
|
| 254 |
+
|
| 255 |
+
# TRY NOT TO MODIFY: start the game
|
| 256 |
+
global_step = 0
|
| 257 |
+
start_time = time.time()
|
| 258 |
+
next_obs, _ = envs.reset(seed=args.seed)
|
| 259 |
+
eval_obs, _ = eval_envs.reset(seed=args.seed)
|
| 260 |
+
next_done = torch.zeros(args.num_envs, device=device)
|
| 261 |
+
print(f"####")
|
| 262 |
+
print(f"args.num_iterations={args.num_iterations} args.num_envs={args.num_envs} args.num_eval_envs={args.num_eval_envs}")
|
| 263 |
+
print(f"args.minibatch_size={args.minibatch_size} args.batch_size={args.batch_size} args.update_epochs={args.update_epochs}")
|
| 264 |
+
print(f"####")
|
| 265 |
+
action_space_low, action_space_high = torch.from_numpy(envs.single_action_space.low).to(device), torch.from_numpy(envs.single_action_space.high).to(device)
|
| 266 |
+
def clip_action(action: torch.Tensor):
|
| 267 |
+
return torch.clamp(action.detach(), action_space_low, action_space_high)
|
| 268 |
+
|
| 269 |
+
if args.checkpoint:
|
| 270 |
+
agent.load_state_dict(torch.load(args.checkpoint))
|
| 271 |
+
|
| 272 |
+
for iteration in range(1, args.num_iterations + 1):
|
| 273 |
+
print(f"Epoch: {iteration}, global_step={global_step}")
|
| 274 |
+
final_values = torch.zeros((args.num_steps, args.num_envs), device=device)
|
| 275 |
+
agent.eval()
|
| 276 |
+
if iteration % args.eval_freq == 1:
|
| 277 |
+
print("Evaluating")
|
| 278 |
+
eval_obs, _ = eval_envs.reset()
|
| 279 |
+
eval_metrics = defaultdict(list)
|
| 280 |
+
num_episodes = 0
|
| 281 |
+
for _ in range(args.num_eval_steps):
|
| 282 |
+
with torch.no_grad():
|
| 283 |
+
eval_obs, eval_rew, eval_terminations, eval_truncations, eval_infos = eval_envs.step(agent.get_action(eval_obs, deterministic=True))
|
| 284 |
+
if "final_info" in eval_infos:
|
| 285 |
+
mask = eval_infos["_final_info"]
|
| 286 |
+
num_episodes += mask.sum()
|
| 287 |
+
for k, v in eval_infos["final_info"]["episode"].items():
|
| 288 |
+
eval_metrics[k].append(v)
|
| 289 |
+
print(f"Evaluated {args.num_eval_steps * args.num_eval_envs} steps resulting in {num_episodes} episodes")
|
| 290 |
+
for k, v in eval_metrics.items():
|
| 291 |
+
mean = torch.stack(v).float().mean()
|
| 292 |
+
if logger is not None:
|
| 293 |
+
logger.add_scalar(f"eval/{k}", mean, global_step)
|
| 294 |
+
print(f"eval_{k}_mean={mean}")
|
| 295 |
+
if args.evaluate:
|
| 296 |
+
break
|
| 297 |
+
if args.save_model and iteration % args.eval_freq == 1:
|
| 298 |
+
model_path = f"runs/{run_name}/ckpt_{iteration}.pt"
|
| 299 |
+
torch.save(agent.state_dict(), model_path)
|
| 300 |
+
print(f"model saved to {model_path}")
|
| 301 |
+
# Annealing the rate if instructed to do so.
|
| 302 |
+
if args.anneal_lr:
|
| 303 |
+
frac = 1.0 - (iteration - 1.0) / args.num_iterations
|
| 304 |
+
lrnow = frac * args.learning_rate
|
| 305 |
+
optimizer.param_groups[0]["lr"] = lrnow
|
| 306 |
+
|
| 307 |
+
rollout_time = time.time()
|
| 308 |
+
for step in range(0, args.num_steps):
|
| 309 |
+
global_step += args.num_envs
|
| 310 |
+
obs[step] = next_obs
|
| 311 |
+
dones[step] = next_done
|
| 312 |
+
|
| 313 |
+
# ALGO LOGIC: action logic
|
| 314 |
+
with torch.no_grad():
|
| 315 |
+
action, logprob, _, value = agent.get_action_and_value(next_obs)
|
| 316 |
+
values[step] = value.flatten()
|
| 317 |
+
actions[step] = action
|
| 318 |
+
logprobs[step] = logprob
|
| 319 |
+
|
| 320 |
+
# TRY NOT TO MODIFY: execute the game and log data.
|
| 321 |
+
next_obs, reward, terminations, truncations, infos = envs.step(clip_action(action))
|
| 322 |
+
next_done = torch.logical_or(terminations, truncations).to(torch.float32)
|
| 323 |
+
rewards[step] = reward.view(-1) * args.reward_scale
|
| 324 |
+
|
| 325 |
+
if "final_info" in infos:
|
| 326 |
+
final_info = infos["final_info"]
|
| 327 |
+
done_mask = infos["_final_info"]
|
| 328 |
+
for k, v in final_info["episode"].items():
|
| 329 |
+
logger.add_scalar(f"train/{k}", v[done_mask].float().mean(), global_step)
|
| 330 |
+
with torch.no_grad():
|
| 331 |
+
final_values[step, torch.arange(args.num_envs, device=device)[done_mask]] = agent.get_value(infos["final_observation"][done_mask]).view(-1)
|
| 332 |
+
rollout_time = time.time() - rollout_time
|
| 333 |
+
# bootstrap value according to termination and truncation
|
| 334 |
+
with torch.no_grad():
|
| 335 |
+
next_value = agent.get_value(next_obs).reshape(1, -1)
|
| 336 |
+
advantages = torch.zeros_like(rewards).to(device)
|
| 337 |
+
lastgaelam = 0
|
| 338 |
+
for t in reversed(range(args.num_steps)):
|
| 339 |
+
if t == args.num_steps - 1:
|
| 340 |
+
next_not_done = 1.0 - next_done
|
| 341 |
+
nextvalues = next_value
|
| 342 |
+
else:
|
| 343 |
+
next_not_done = 1.0 - dones[t + 1]
|
| 344 |
+
nextvalues = values[t + 1]
|
| 345 |
+
real_next_values = next_not_done * nextvalues + final_values[t] # t instead of t+1
|
| 346 |
+
# next_not_done means nextvalues is computed from the correct next_obs
|
| 347 |
+
# if next_not_done is 1, final_values is always 0
|
| 348 |
+
# if next_not_done is 0, then use final_values, which is computed according to bootstrap_at_done
|
| 349 |
+
if args.finite_horizon_gae:
|
| 350 |
+
"""
|
| 351 |
+
See GAE paper equation(16) line 1, we will compute the GAE based on this line only
|
| 352 |
+
1 *( -V(s_t) + r_t + gamma * V(s_{t+1}) )
|
| 353 |
+
lambda *( -V(s_t) + r_t + gamma * r_{t+1} + gamma^2 * V(s_{t+2}) )
|
| 354 |
+
lambda^2 *( -V(s_t) + r_t + gamma * r_{t+1} + gamma^2 * r_{t+2} + ... )
|
| 355 |
+
lambda^3 *( -V(s_t) + r_t + gamma * r_{t+1} + gamma^2 * r_{t+2} + gamma^3 * r_{t+3}
|
| 356 |
+
We then normalize it by the sum of the lambda^i (instead of 1-lambda)
|
| 357 |
+
"""
|
| 358 |
+
if t == args.num_steps - 1: # initialize
|
| 359 |
+
lam_coef_sum = 0.
|
| 360 |
+
reward_term_sum = 0. # the sum of the second term
|
| 361 |
+
value_term_sum = 0. # the sum of the third term
|
| 362 |
+
lam_coef_sum = lam_coef_sum * next_not_done
|
| 363 |
+
reward_term_sum = reward_term_sum * next_not_done
|
| 364 |
+
value_term_sum = value_term_sum * next_not_done
|
| 365 |
+
|
| 366 |
+
lam_coef_sum = 1 + args.gae_lambda * lam_coef_sum
|
| 367 |
+
reward_term_sum = args.gae_lambda * args.gamma * reward_term_sum + lam_coef_sum * rewards[t]
|
| 368 |
+
value_term_sum = args.gae_lambda * args.gamma * value_term_sum + args.gamma * real_next_values
|
| 369 |
+
|
| 370 |
+
advantages[t] = (reward_term_sum + value_term_sum) / lam_coef_sum - values[t]
|
| 371 |
+
else:
|
| 372 |
+
delta = rewards[t] + args.gamma * real_next_values - values[t]
|
| 373 |
+
advantages[t] = lastgaelam = delta + args.gamma * args.gae_lambda * next_not_done * lastgaelam # Here actually we should use next_not_terminated, but we don't have lastgamlam if terminated
|
| 374 |
+
returns = advantages + values
|
| 375 |
+
|
| 376 |
+
# flatten the batch
|
| 377 |
+
b_obs = obs.reshape((-1,) + envs.single_observation_space.shape)
|
| 378 |
+
b_logprobs = logprobs.reshape(-1)
|
| 379 |
+
b_actions = actions.reshape((-1,) + envs.single_action_space.shape)
|
| 380 |
+
b_advantages = advantages.reshape(-1)
|
| 381 |
+
b_returns = returns.reshape(-1)
|
| 382 |
+
b_values = values.reshape(-1)
|
| 383 |
+
|
| 384 |
+
# Optimizing the policy and value network
|
| 385 |
+
agent.train()
|
| 386 |
+
b_inds = np.arange(args.batch_size)
|
| 387 |
+
clipfracs = []
|
| 388 |
+
update_time = time.time()
|
| 389 |
+
for epoch in range(args.update_epochs):
|
| 390 |
+
np.random.shuffle(b_inds)
|
| 391 |
+
for start in range(0, args.batch_size, args.minibatch_size):
|
| 392 |
+
end = start + args.minibatch_size
|
| 393 |
+
mb_inds = b_inds[start:end]
|
| 394 |
+
|
| 395 |
+
_, newlogprob, entropy, newvalue = agent.get_action_and_value(b_obs[mb_inds], b_actions[mb_inds])
|
| 396 |
+
logratio = newlogprob - b_logprobs[mb_inds]
|
| 397 |
+
ratio = logratio.exp()
|
| 398 |
+
|
| 399 |
+
with torch.no_grad():
|
| 400 |
+
# calculate approx_kl http://joschu.net/blog/kl-approx.html
|
| 401 |
+
old_approx_kl = (-logratio).mean()
|
| 402 |
+
approx_kl = ((ratio - 1) - logratio).mean()
|
| 403 |
+
clipfracs += [((ratio - 1.0).abs() > args.clip_coef).float().mean().item()]
|
| 404 |
+
|
| 405 |
+
if args.target_kl is not None and approx_kl > args.target_kl:
|
| 406 |
+
break
|
| 407 |
+
|
| 408 |
+
mb_advantages = b_advantages[mb_inds]
|
| 409 |
+
if args.norm_adv:
|
| 410 |
+
mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
|
| 411 |
+
|
| 412 |
+
# Policy loss
|
| 413 |
+
pg_loss1 = -mb_advantages * ratio
|
| 414 |
+
pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - args.clip_coef, 1 + args.clip_coef)
|
| 415 |
+
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
|
| 416 |
+
|
| 417 |
+
# Value loss
|
| 418 |
+
newvalue = newvalue.view(-1)
|
| 419 |
+
if args.clip_vloss:
|
| 420 |
+
v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2
|
| 421 |
+
v_clipped = b_values[mb_inds] + torch.clamp(
|
| 422 |
+
newvalue - b_values[mb_inds],
|
| 423 |
+
-args.clip_coef,
|
| 424 |
+
args.clip_coef,
|
| 425 |
+
)
|
| 426 |
+
v_loss_clipped = (v_clipped - b_returns[mb_inds]) ** 2
|
| 427 |
+
v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
|
| 428 |
+
v_loss = 0.5 * v_loss_max.mean()
|
| 429 |
+
else:
|
| 430 |
+
v_loss = 0.5 * ((newvalue - b_returns[mb_inds]) ** 2).mean()
|
| 431 |
+
|
| 432 |
+
entropy_loss = entropy.mean()
|
| 433 |
+
loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef
|
| 434 |
+
|
| 435 |
+
optimizer.zero_grad()
|
| 436 |
+
loss.backward()
|
| 437 |
+
nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm)
|
| 438 |
+
optimizer.step()
|
| 439 |
+
|
| 440 |
+
if args.target_kl is not None and approx_kl > args.target_kl:
|
| 441 |
+
break
|
| 442 |
+
|
| 443 |
+
update_time = time.time() - update_time
|
| 444 |
+
|
| 445 |
+
y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy()
|
| 446 |
+
var_y = np.var(y_true)
|
| 447 |
+
explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y
|
| 448 |
+
|
| 449 |
+
logger.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], global_step)
|
| 450 |
+
logger.add_scalar("losses/value_loss", v_loss.item(), global_step)
|
| 451 |
+
logger.add_scalar("losses/policy_loss", pg_loss.item(), global_step)
|
| 452 |
+
logger.add_scalar("losses/entropy", entropy_loss.item(), global_step)
|
| 453 |
+
logger.add_scalar("losses/old_approx_kl", old_approx_kl.item(), global_step)
|
| 454 |
+
logger.add_scalar("losses/approx_kl", approx_kl.item(), global_step)
|
| 455 |
+
logger.add_scalar("losses/clipfrac", np.mean(clipfracs), global_step)
|
| 456 |
+
logger.add_scalar("losses/explained_variance", explained_var, global_step)
|
| 457 |
+
print("SPS:", int(global_step / (time.time() - start_time)))
|
| 458 |
+
logger.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step)
|
| 459 |
+
logger.add_scalar("time/step", global_step, global_step)
|
| 460 |
+
logger.add_scalar("time/update_time", update_time, global_step)
|
| 461 |
+
logger.add_scalar("time/rollout_time", rollout_time, global_step)
|
| 462 |
+
logger.add_scalar("time/rollout_fps", args.num_envs * args.num_steps / rollout_time, global_step)
|
| 463 |
+
if not args.evaluate:
|
| 464 |
+
if args.save_model:
|
| 465 |
+
model_path = f"runs/{run_name}/final_ckpt.pt"
|
| 466 |
+
torch.save(agent.state_dict(), model_path)
|
| 467 |
+
print(f"model saved to {model_path}")
|
| 468 |
+
logger.close()
|
| 469 |
+
envs.close()
|
| 470 |
+
eval_envs.close()
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/ppo_fast.py
ADDED
|
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from mani_skill.utils import gym_utils
|
| 4 |
+
from mani_skill.utils.wrappers.flatten import FlattenActionSpaceWrapper
|
| 5 |
+
from mani_skill.utils.wrappers.record import RecordEpisode
|
| 6 |
+
from mani_skill.vector.wrappers.gymnasium import ManiSkillVectorEnv
|
| 7 |
+
|
| 8 |
+
os.environ["TORCHDYNAMO_INLINE_INBUILT_NN_MODULES"] = "1"
|
| 9 |
+
|
| 10 |
+
import math
|
| 11 |
+
import os
|
| 12 |
+
import random
|
| 13 |
+
import time
|
| 14 |
+
from collections import defaultdict
|
| 15 |
+
from dataclasses import dataclass
|
| 16 |
+
from typing import Optional, Tuple
|
| 17 |
+
|
| 18 |
+
import gymnasium as gym
|
| 19 |
+
import numpy as np
|
| 20 |
+
import tensordict
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn as nn
|
| 23 |
+
import torch.optim as optim
|
| 24 |
+
import tqdm
|
| 25 |
+
import tyro
|
| 26 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 27 |
+
import wandb
|
| 28 |
+
from tensordict import from_module
|
| 29 |
+
from tensordict.nn import CudaGraphModule
|
| 30 |
+
from torch.distributions.normal import Normal
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class Args:
|
| 35 |
+
exp_name: Optional[str] = None
|
| 36 |
+
"""the name of this experiment"""
|
| 37 |
+
seed: int = 1
|
| 38 |
+
"""seed of the experiment"""
|
| 39 |
+
torch_deterministic: bool = True
|
| 40 |
+
"""if toggled, `torch.backends.cudnn.deterministic=False`"""
|
| 41 |
+
cuda: bool = True
|
| 42 |
+
"""if toggled, cuda will be enabled by default"""
|
| 43 |
+
track: bool = False
|
| 44 |
+
"""if toggled, this experiment will be tracked with Weights and Biases"""
|
| 45 |
+
wandb_project_name: str = "ManiSkill"
|
| 46 |
+
"""the wandb's project name"""
|
| 47 |
+
wandb_entity: Optional[str] = None
|
| 48 |
+
"""the entity (team) of wandb's project"""
|
| 49 |
+
wandb_group: str = "PPO"
|
| 50 |
+
"""the group of the run for wandb"""
|
| 51 |
+
capture_video: bool = True
|
| 52 |
+
"""whether to capture videos of the agent performances (check out `videos` folder)"""
|
| 53 |
+
save_trajectory: bool = False
|
| 54 |
+
"""whether to save trajectory data into the `videos` folder"""
|
| 55 |
+
save_model: bool = False
|
| 56 |
+
"""whether to save model into the `runs/{run_name}` folder"""
|
| 57 |
+
evaluate: bool = False
|
| 58 |
+
"""if toggled, only runs evaluation with the given model checkpoint and saves the evaluation trajectories"""
|
| 59 |
+
checkpoint: Optional[str] = None
|
| 60 |
+
"""path to a pretrained checkpoint file to start evaluation/training from"""
|
| 61 |
+
|
| 62 |
+
# Environment specific arguments
|
| 63 |
+
env_id: str = "PickCube-v1"
|
| 64 |
+
"""the id of the environment"""
|
| 65 |
+
env_vectorization: str = "gpu"
|
| 66 |
+
"""the type of environment vectorization to use"""
|
| 67 |
+
num_envs: int = 512
|
| 68 |
+
"""the number of parallel environments"""
|
| 69 |
+
num_eval_envs: int = 16
|
| 70 |
+
"""the number of parallel evaluation environments"""
|
| 71 |
+
partial_reset: bool = True
|
| 72 |
+
"""whether to let parallel environments reset upon termination instead of truncation"""
|
| 73 |
+
eval_partial_reset: bool = False
|
| 74 |
+
"""whether to let parallel evaluation environments reset upon termination instead of truncation"""
|
| 75 |
+
num_steps: int = 50
|
| 76 |
+
"""the number of steps to run in each environment per policy rollout"""
|
| 77 |
+
num_eval_steps: int = 50
|
| 78 |
+
"""the number of steps to run in each evaluation environment during evaluation"""
|
| 79 |
+
reconfiguration_freq: Optional[int] = None
|
| 80 |
+
"""how often to reconfigure the environment during training"""
|
| 81 |
+
eval_reconfiguration_freq: Optional[int] = 1
|
| 82 |
+
"""for benchmarking purposes we want to reconfigure the eval environment each reset to ensure objects are randomized in some tasks"""
|
| 83 |
+
eval_freq: int = 25
|
| 84 |
+
"""evaluation frequency in terms of iterations"""
|
| 85 |
+
save_train_video_freq: Optional[int] = None
|
| 86 |
+
"""frequency to save training videos in terms of iterations"""
|
| 87 |
+
control_mode: Optional[str] = "pd_joint_delta_pos"
|
| 88 |
+
"""the control mode to use for the environment"""
|
| 89 |
+
|
| 90 |
+
# Algorithm specific arguments
|
| 91 |
+
total_timesteps: int = 10000000
|
| 92 |
+
"""total timesteps of the experiments"""
|
| 93 |
+
learning_rate: float = 3e-4
|
| 94 |
+
"""the learning rate of the optimizer"""
|
| 95 |
+
anneal_lr: bool = False
|
| 96 |
+
"""Toggle learning rate annealing for policy and value networks"""
|
| 97 |
+
gamma: float = 0.8
|
| 98 |
+
"""the discount factor gamma"""
|
| 99 |
+
gae_lambda: float = 0.9
|
| 100 |
+
"""the lambda for the general advantage estimation"""
|
| 101 |
+
num_minibatches: int = 32
|
| 102 |
+
"""the number of mini-batches"""
|
| 103 |
+
update_epochs: int = 4
|
| 104 |
+
"""the K epochs to update the policy"""
|
| 105 |
+
norm_adv: bool = True
|
| 106 |
+
"""Toggles advantages normalization"""
|
| 107 |
+
clip_coef: float = 0.2
|
| 108 |
+
"""the surrogate clipping coefficient"""
|
| 109 |
+
clip_vloss: bool = False
|
| 110 |
+
"""Toggles whether or not to use a clipped loss for the value function, as per the paper."""
|
| 111 |
+
ent_coef: float = 0.0
|
| 112 |
+
"""coefficient of the entropy"""
|
| 113 |
+
vf_coef: float = 0.5
|
| 114 |
+
"""coefficient of the value function"""
|
| 115 |
+
max_grad_norm: float = 0.5
|
| 116 |
+
"""the maximum norm for the gradient clipping"""
|
| 117 |
+
target_kl: float = 0.1
|
| 118 |
+
"""the target KL divergence threshold"""
|
| 119 |
+
reward_scale: float = 1.0
|
| 120 |
+
"""Scale the reward by this factor"""
|
| 121 |
+
finite_horizon_gae: bool = False
|
| 122 |
+
|
| 123 |
+
# to be filled in runtime
|
| 124 |
+
batch_size: int = 0
|
| 125 |
+
"""the batch size (computed in runtime)"""
|
| 126 |
+
minibatch_size: int = 0
|
| 127 |
+
"""the mini-batch size (computed in runtime)"""
|
| 128 |
+
num_iterations: int = 0
|
| 129 |
+
"""the number of iterations (computed in runtime)"""
|
| 130 |
+
|
| 131 |
+
# Torch optimizations
|
| 132 |
+
compile: bool = False
|
| 133 |
+
"""whether to use torch.compile."""
|
| 134 |
+
cudagraphs: bool = False
|
| 135 |
+
"""whether to use cudagraphs on top of compile."""
|
| 136 |
+
|
| 137 |
+
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
|
| 138 |
+
torch.nn.init.orthogonal_(layer.weight, std)
|
| 139 |
+
torch.nn.init.constant_(layer.bias, bias_const)
|
| 140 |
+
return layer
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class Agent(nn.Module):
|
| 144 |
+
def __init__(self, n_obs, n_act, device=None):
|
| 145 |
+
super().__init__()
|
| 146 |
+
self.critic = nn.Sequential(
|
| 147 |
+
layer_init(nn.Linear(n_obs, 256, device=device)),
|
| 148 |
+
nn.Tanh(),
|
| 149 |
+
layer_init(nn.Linear(256, 256, device=device)),
|
| 150 |
+
nn.Tanh(),
|
| 151 |
+
layer_init(nn.Linear(256, 256, device=device)),
|
| 152 |
+
nn.Tanh(),
|
| 153 |
+
layer_init(nn.Linear(256, 1, device=device)),
|
| 154 |
+
)
|
| 155 |
+
self.actor_mean = nn.Sequential(
|
| 156 |
+
layer_init(nn.Linear(n_obs, 256, device=device)),
|
| 157 |
+
nn.Tanh(),
|
| 158 |
+
layer_init(nn.Linear(256, 256, device=device)),
|
| 159 |
+
nn.Tanh(),
|
| 160 |
+
layer_init(nn.Linear(256, 256, device=device)),
|
| 161 |
+
nn.Tanh(),
|
| 162 |
+
layer_init(nn.Linear(256, n_act, device=device), std=0.01*np.sqrt(2)),
|
| 163 |
+
)
|
| 164 |
+
self.actor_logstd = nn.Parameter(torch.zeros(1, n_act, device=device))
|
| 165 |
+
|
| 166 |
+
def get_value(self, x):
|
| 167 |
+
return self.critic(x)
|
| 168 |
+
|
| 169 |
+
def get_action_and_value(self, obs, action=None):
|
| 170 |
+
action_mean = self.actor_mean(obs)
|
| 171 |
+
action_logstd = self.actor_logstd.expand_as(action_mean)
|
| 172 |
+
action_std = torch.exp(action_logstd)
|
| 173 |
+
probs = Normal(action_mean, action_std)
|
| 174 |
+
if action is None:
|
| 175 |
+
action = action_mean + action_std * torch.randn_like(action_mean)
|
| 176 |
+
return action, probs.log_prob(action).sum(1), probs.entropy().sum(1), self.critic(obs)
|
| 177 |
+
|
| 178 |
+
class Logger:
|
| 179 |
+
def __init__(self, log_wandb=False, tensorboard: SummaryWriter = None) -> None:
|
| 180 |
+
self.writer = tensorboard
|
| 181 |
+
self.log_wandb = log_wandb
|
| 182 |
+
def add_scalar(self, tag, scalar_value, step):
|
| 183 |
+
if self.log_wandb:
|
| 184 |
+
wandb.log({tag: scalar_value}, step=step)
|
| 185 |
+
self.writer.add_scalar(tag, scalar_value, step)
|
| 186 |
+
def close(self):
|
| 187 |
+
self.writer.close()
|
| 188 |
+
|
| 189 |
+
def gae(next_obs, next_done, container, final_values):
|
| 190 |
+
# bootstrap value if not done
|
| 191 |
+
next_value = get_value(next_obs).reshape(-1)
|
| 192 |
+
lastgaelam = 0
|
| 193 |
+
nextnonterminals = (~container["dones"]).float().unbind(0)
|
| 194 |
+
vals = container["vals"]
|
| 195 |
+
vals_unbind = vals.unbind(0)
|
| 196 |
+
rewards = container["rewards"].unbind(0)
|
| 197 |
+
|
| 198 |
+
advantages = []
|
| 199 |
+
nextnonterminal = (~next_done).float()
|
| 200 |
+
nextvalues = next_value
|
| 201 |
+
for t in range(args.num_steps - 1, -1, -1):
|
| 202 |
+
cur_val = vals_unbind[t]
|
| 203 |
+
# real_next_values = nextvalues * nextnonterminal
|
| 204 |
+
real_next_values = nextnonterminal * nextvalues + final_values[t] # t instead of t+1
|
| 205 |
+
delta = rewards[t] + args.gamma * real_next_values - cur_val
|
| 206 |
+
advantages.append(delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam)
|
| 207 |
+
lastgaelam = advantages[-1]
|
| 208 |
+
|
| 209 |
+
nextnonterminal = nextnonterminals[t]
|
| 210 |
+
nextvalues = cur_val
|
| 211 |
+
|
| 212 |
+
advantages = container["advantages"] = torch.stack(list(reversed(advantages)))
|
| 213 |
+
container["returns"] = advantages + vals
|
| 214 |
+
return container
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def rollout(obs, done):
|
| 218 |
+
ts = []
|
| 219 |
+
final_values = torch.zeros((args.num_steps, args.num_envs), device=device)
|
| 220 |
+
for step in range(args.num_steps):
|
| 221 |
+
# ALGO LOGIC: action logic
|
| 222 |
+
action, logprob, _, value = policy(obs=obs)
|
| 223 |
+
|
| 224 |
+
# TRY NOT TO MODIFY: execute the game and log data.
|
| 225 |
+
next_obs, reward, next_done, infos = step_func(action)
|
| 226 |
+
|
| 227 |
+
if "final_info" in infos:
|
| 228 |
+
final_info = infos["final_info"]
|
| 229 |
+
done_mask = infos["_final_info"]
|
| 230 |
+
for k, v in final_info["episode"].items():
|
| 231 |
+
logger.add_scalar(f"train/{k}", v[done_mask].float().mean(), global_step)
|
| 232 |
+
with torch.no_grad():
|
| 233 |
+
final_values[step, torch.arange(args.num_envs, device=device)[done_mask]] = agent.get_value(infos["final_observation"][done_mask]).view(-1)
|
| 234 |
+
|
| 235 |
+
ts.append(
|
| 236 |
+
tensordict.TensorDict._new_unsafe(
|
| 237 |
+
obs=obs,
|
| 238 |
+
# cleanrl ppo examples associate the done with the previous obs (not the done resulting from action)
|
| 239 |
+
dones=done,
|
| 240 |
+
vals=value.flatten(),
|
| 241 |
+
actions=action,
|
| 242 |
+
logprobs=logprob,
|
| 243 |
+
rewards=reward,
|
| 244 |
+
batch_size=(args.num_envs,),
|
| 245 |
+
)
|
| 246 |
+
)
|
| 247 |
+
# NOTE (stao): change here for gpu env
|
| 248 |
+
obs = next_obs = next_obs
|
| 249 |
+
done = next_done
|
| 250 |
+
# NOTE (stao): need to do .to(device) i think? otherwise container.device is None, not sure if this affects anything
|
| 251 |
+
container = torch.stack(ts, 0).to(device)
|
| 252 |
+
return next_obs, done, container, final_values
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def update(obs, actions, logprobs, advantages, returns, vals):
|
| 256 |
+
optimizer.zero_grad()
|
| 257 |
+
_, newlogprob, entropy, newvalue = agent.get_action_and_value(obs, actions)
|
| 258 |
+
logratio = newlogprob - logprobs
|
| 259 |
+
ratio = logratio.exp()
|
| 260 |
+
|
| 261 |
+
with torch.no_grad():
|
| 262 |
+
# calculate approx_kl http://joschu.net/blog/kl-approx.html
|
| 263 |
+
old_approx_kl = (-logratio).mean()
|
| 264 |
+
approx_kl = ((ratio - 1) - logratio).mean()
|
| 265 |
+
clipfrac = ((ratio - 1.0).abs() > args.clip_coef).float().mean()
|
| 266 |
+
|
| 267 |
+
if args.norm_adv:
|
| 268 |
+
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
|
| 269 |
+
|
| 270 |
+
# Policy loss
|
| 271 |
+
pg_loss1 = -advantages * ratio
|
| 272 |
+
pg_loss2 = -advantages * torch.clamp(ratio, 1 - args.clip_coef, 1 + args.clip_coef)
|
| 273 |
+
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
|
| 274 |
+
|
| 275 |
+
# Value loss
|
| 276 |
+
newvalue = newvalue.view(-1)
|
| 277 |
+
if args.clip_vloss:
|
| 278 |
+
v_loss_unclipped = (newvalue - returns) ** 2
|
| 279 |
+
v_clipped = vals + torch.clamp(
|
| 280 |
+
newvalue - vals,
|
| 281 |
+
-args.clip_coef,
|
| 282 |
+
args.clip_coef,
|
| 283 |
+
)
|
| 284 |
+
v_loss_clipped = (v_clipped - returns) ** 2
|
| 285 |
+
v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
|
| 286 |
+
v_loss = 0.5 * v_loss_max.mean()
|
| 287 |
+
else:
|
| 288 |
+
v_loss = 0.5 * ((newvalue - returns) ** 2).mean()
|
| 289 |
+
|
| 290 |
+
entropy_loss = entropy.mean()
|
| 291 |
+
loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef
|
| 292 |
+
|
| 293 |
+
loss.backward()
|
| 294 |
+
gn = nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm)
|
| 295 |
+
optimizer.step()
|
| 296 |
+
|
| 297 |
+
return approx_kl, v_loss.detach(), pg_loss.detach(), entropy_loss.detach(), old_approx_kl, clipfrac, gn
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
update = tensordict.nn.TensorDictModule(
|
| 301 |
+
update,
|
| 302 |
+
in_keys=["obs", "actions", "logprobs", "advantages", "returns", "vals"],
|
| 303 |
+
out_keys=["approx_kl", "v_loss", "pg_loss", "entropy_loss", "old_approx_kl", "clipfrac", "gn"],
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
if __name__ == "__main__":
|
| 307 |
+
args = tyro.cli(Args)
|
| 308 |
+
# if not args.evaluate: exit()
|
| 309 |
+
|
| 310 |
+
batch_size = int(args.num_envs * args.num_steps)
|
| 311 |
+
args.minibatch_size = batch_size // args.num_minibatches
|
| 312 |
+
args.batch_size = args.num_minibatches * args.minibatch_size
|
| 313 |
+
args.num_iterations = args.total_timesteps // args.batch_size
|
| 314 |
+
if args.exp_name is None:
|
| 315 |
+
args.exp_name = os.path.basename(__file__)[: -len(".py")]
|
| 316 |
+
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
|
| 317 |
+
else:
|
| 318 |
+
run_name = args.exp_name
|
| 319 |
+
|
| 320 |
+
# TRY NOT TO MODIFY: seeding
|
| 321 |
+
random.seed(args.seed)
|
| 322 |
+
np.random.seed(args.seed)
|
| 323 |
+
torch.manual_seed(args.seed)
|
| 324 |
+
torch.backends.cudnn.deterministic = args.torch_deterministic
|
| 325 |
+
|
| 326 |
+
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
|
| 327 |
+
|
| 328 |
+
####### Environment setup #######
|
| 329 |
+
env_kwargs = dict(obs_mode="state", render_mode="rgb_array", sim_backend="physx_cuda")
|
| 330 |
+
if args.control_mode is not None:
|
| 331 |
+
env_kwargs["control_mode"] = args.control_mode
|
| 332 |
+
envs = gym.make(args.env_id, num_envs=args.num_envs if not args.evaluate else 1, reconfiguration_freq=args.reconfiguration_freq, **env_kwargs)
|
| 333 |
+
eval_envs = gym.make(args.env_id, num_envs=args.num_eval_envs, reconfiguration_freq=args.eval_reconfiguration_freq, human_render_camera_configs=dict(shader_pack="default"), **env_kwargs)
|
| 334 |
+
if isinstance(envs.action_space, gym.spaces.Dict):
|
| 335 |
+
envs = FlattenActionSpaceWrapper(envs)
|
| 336 |
+
eval_envs = FlattenActionSpaceWrapper(eval_envs)
|
| 337 |
+
if args.capture_video or args.save_trajectory:
|
| 338 |
+
eval_output_dir = f"runs/{run_name}/videos"
|
| 339 |
+
if args.evaluate:
|
| 340 |
+
eval_output_dir = f"{os.path.dirname(args.checkpoint)}/test_videos"
|
| 341 |
+
print(f"Saving eval trajectories/videos to {eval_output_dir}")
|
| 342 |
+
if args.save_train_video_freq is not None:
|
| 343 |
+
save_video_trigger = lambda x : (x // args.num_steps) % args.save_train_video_freq == 0
|
| 344 |
+
envs = RecordEpisode(envs, output_dir=f"runs/{run_name}/train_videos", save_trajectory=False, save_video_trigger=save_video_trigger, max_steps_per_video=args.num_steps, video_fps=30)
|
| 345 |
+
eval_envs = RecordEpisode(eval_envs, output_dir=eval_output_dir, save_trajectory=args.save_trajectory, save_video=args.capture_video, trajectory_name="trajectory", max_steps_per_video=args.num_eval_steps, video_fps=30)
|
| 346 |
+
envs = ManiSkillVectorEnv(envs, args.num_envs, ignore_terminations=not args.partial_reset, record_metrics=True)
|
| 347 |
+
eval_envs = ManiSkillVectorEnv(eval_envs, args.num_eval_envs, ignore_terminations=not args.eval_partial_reset, record_metrics=True)
|
| 348 |
+
assert isinstance(envs.single_action_space, gym.spaces.Box), "only continuous action space is supported"
|
| 349 |
+
|
| 350 |
+
max_episode_steps = gym_utils.find_max_episode_steps_value(envs._env)
|
| 351 |
+
logger = None
|
| 352 |
+
if not args.evaluate:
|
| 353 |
+
print("Running training")
|
| 354 |
+
if args.track:
|
| 355 |
+
import wandb
|
| 356 |
+
config = vars(args)
|
| 357 |
+
config["env_cfg"] = dict(**env_kwargs, num_envs=args.num_envs, env_id=args.env_id, reward_mode="normalized_dense", env_horizon=max_episode_steps, partial_reset=args.partial_reset)
|
| 358 |
+
config["eval_env_cfg"] = dict(**env_kwargs, num_envs=args.num_eval_envs, env_id=args.env_id, reward_mode="normalized_dense", env_horizon=max_episode_steps, partial_reset=False)
|
| 359 |
+
wandb.init(
|
| 360 |
+
project=args.wandb_project_name,
|
| 361 |
+
entity=args.wandb_entity,
|
| 362 |
+
sync_tensorboard=False,
|
| 363 |
+
config=config,
|
| 364 |
+
name=run_name,
|
| 365 |
+
save_code=True,
|
| 366 |
+
group=args.wandb_group,
|
| 367 |
+
tags=["ppo", "walltime_efficient", f"GPU:{torch.cuda.get_device_name()}"]
|
| 368 |
+
)
|
| 369 |
+
writer = SummaryWriter(f"runs/{run_name}")
|
| 370 |
+
writer.add_text(
|
| 371 |
+
"hyperparameters",
|
| 372 |
+
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
|
| 373 |
+
)
|
| 374 |
+
logger = Logger(log_wandb=args.track, tensorboard=writer)
|
| 375 |
+
else:
|
| 376 |
+
print("Running evaluation")
|
| 377 |
+
n_act = math.prod(envs.single_action_space.shape)
|
| 378 |
+
n_obs = math.prod(envs.single_observation_space.shape)
|
| 379 |
+
assert isinstance(envs.single_action_space, gym.spaces.Box), "only continuous action space is supported"
|
| 380 |
+
|
| 381 |
+
# Register step as a special op not to graph break
|
| 382 |
+
# @torch.library.custom_op("mylib::step", mutates_args=())
|
| 383 |
+
def step_func(action: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 384 |
+
# NOTE (stao): change here for gpu env
|
| 385 |
+
next_obs, reward, terminations, truncations, info = envs.step(action)
|
| 386 |
+
next_done = torch.logical_or(terminations, truncations)
|
| 387 |
+
return next_obs, reward, next_done, info
|
| 388 |
+
|
| 389 |
+
####### Agent #######
|
| 390 |
+
agent = Agent(n_obs, n_act, device=device)
|
| 391 |
+
if args.checkpoint:
|
| 392 |
+
agent.load_state_dict(torch.load(args.checkpoint))
|
| 393 |
+
# Make a version of agent with detached params
|
| 394 |
+
agent_inference = Agent(n_obs, n_act, device=device)
|
| 395 |
+
agent_inference_p = from_module(agent).data
|
| 396 |
+
agent_inference_p.to_module(agent_inference)
|
| 397 |
+
|
| 398 |
+
####### Optimizer #######
|
| 399 |
+
optimizer = optim.Adam(
|
| 400 |
+
agent.parameters(),
|
| 401 |
+
lr=torch.tensor(args.learning_rate, device=device),
|
| 402 |
+
eps=1e-5,
|
| 403 |
+
capturable=args.cudagraphs and not args.compile,
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
####### Executables #######
|
| 407 |
+
# Define networks: wrapping the policy in a TensorDictModule allows us to use CudaGraphModule
|
| 408 |
+
policy = agent_inference.get_action_and_value
|
| 409 |
+
get_value = agent_inference.get_value
|
| 410 |
+
|
| 411 |
+
# Compile policy
|
| 412 |
+
if args.compile:
|
| 413 |
+
policy = torch.compile(policy)
|
| 414 |
+
gae = torch.compile(gae, fullgraph=True)
|
| 415 |
+
update = torch.compile(update)
|
| 416 |
+
|
| 417 |
+
if args.cudagraphs:
|
| 418 |
+
policy = CudaGraphModule(policy)
|
| 419 |
+
gae = CudaGraphModule(gae)
|
| 420 |
+
update = CudaGraphModule(update)
|
| 421 |
+
|
| 422 |
+
global_step = 0
|
| 423 |
+
start_time = time.time()
|
| 424 |
+
container_local = None
|
| 425 |
+
next_obs = envs.reset()[0]
|
| 426 |
+
next_done = torch.zeros(args.num_envs, device=device, dtype=torch.bool)
|
| 427 |
+
pbar = tqdm.tqdm(range(1, args.num_iterations + 1))
|
| 428 |
+
|
| 429 |
+
cumulative_times = defaultdict(float)
|
| 430 |
+
|
| 431 |
+
for iteration in pbar:
|
| 432 |
+
agent.eval()
|
| 433 |
+
if iteration % args.eval_freq == 1:
|
| 434 |
+
stime = time.perf_counter()
|
| 435 |
+
eval_obs, _ = eval_envs.reset()
|
| 436 |
+
eval_metrics = defaultdict(list)
|
| 437 |
+
num_episodes = 0
|
| 438 |
+
for _ in range(args.num_eval_steps):
|
| 439 |
+
with torch.no_grad():
|
| 440 |
+
eval_obs, eval_rew, eval_terminations, eval_truncations, eval_infos = eval_envs.step(agent.actor_mean(eval_obs))
|
| 441 |
+
if "final_info" in eval_infos:
|
| 442 |
+
mask = eval_infos["_final_info"]
|
| 443 |
+
num_episodes += mask.sum()
|
| 444 |
+
for k, v in eval_infos["final_info"]["episode"].items():
|
| 445 |
+
eval_metrics[k].append(v)
|
| 446 |
+
eval_metrics_mean = {}
|
| 447 |
+
for k, v in eval_metrics.items():
|
| 448 |
+
mean = torch.stack(v).float().mean()
|
| 449 |
+
eval_metrics_mean[k] = mean
|
| 450 |
+
if logger is not None:
|
| 451 |
+
logger.add_scalar(f"eval/{k}", mean, global_step)
|
| 452 |
+
pbar.set_description(
|
| 453 |
+
f"success_once: {eval_metrics_mean['success_once']:.2f}, "
|
| 454 |
+
f"return: {eval_metrics_mean['return']:.2f}"
|
| 455 |
+
)
|
| 456 |
+
if logger is not None:
|
| 457 |
+
eval_time = time.perf_counter() - stime
|
| 458 |
+
cumulative_times["eval_time"] += eval_time
|
| 459 |
+
logger.add_scalar("time/eval_time", eval_time, global_step)
|
| 460 |
+
if args.evaluate:
|
| 461 |
+
break
|
| 462 |
+
if args.save_model and iteration % args.eval_freq == 1:
|
| 463 |
+
model_path = f"runs/{run_name}/ckpt_{iteration}.pt"
|
| 464 |
+
torch.save(agent.state_dict(), model_path)
|
| 465 |
+
print(f"model saved to {model_path}")
|
| 466 |
+
# Annealing the rate if instructed to do so.
|
| 467 |
+
if args.anneal_lr:
|
| 468 |
+
frac = 1.0 - (iteration - 1.0) / args.num_iterations
|
| 469 |
+
lrnow = frac * args.learning_rate
|
| 470 |
+
optimizer.param_groups[0]["lr"].copy_(lrnow)
|
| 471 |
+
|
| 472 |
+
torch.compiler.cudagraph_mark_step_begin()
|
| 473 |
+
rollout_time = time.perf_counter()
|
| 474 |
+
next_obs, next_done, container, final_values = rollout(next_obs, next_done)
|
| 475 |
+
rollout_time = time.perf_counter() - rollout_time
|
| 476 |
+
cumulative_times["rollout_time"] += rollout_time
|
| 477 |
+
global_step += container.numel()
|
| 478 |
+
|
| 479 |
+
update_time = time.perf_counter()
|
| 480 |
+
container = gae(next_obs, next_done, container, final_values)
|
| 481 |
+
container_flat = container.view(-1)
|
| 482 |
+
|
| 483 |
+
# Optimizing the policy and value network
|
| 484 |
+
clipfracs = []
|
| 485 |
+
for epoch in range(args.update_epochs):
|
| 486 |
+
b_inds = torch.randperm(container_flat.shape[0], device=device).split(args.minibatch_size)
|
| 487 |
+
for b in b_inds:
|
| 488 |
+
container_local = container_flat[b]
|
| 489 |
+
|
| 490 |
+
out = update(container_local, tensordict_out=tensordict.TensorDict())
|
| 491 |
+
clipfracs.append(out["clipfrac"])
|
| 492 |
+
if args.target_kl is not None and out["approx_kl"] > args.target_kl:
|
| 493 |
+
break
|
| 494 |
+
else:
|
| 495 |
+
continue
|
| 496 |
+
break
|
| 497 |
+
update_time = time.perf_counter() - update_time
|
| 498 |
+
cumulative_times["update_time"] += update_time
|
| 499 |
+
|
| 500 |
+
logger.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], global_step)
|
| 501 |
+
logger.add_scalar("losses/value_loss", out["v_loss"].item(), global_step)
|
| 502 |
+
logger.add_scalar("losses/policy_loss", out["pg_loss"].item(), global_step)
|
| 503 |
+
logger.add_scalar("losses/entropy", out["entropy_loss"].item(), global_step)
|
| 504 |
+
logger.add_scalar("losses/old_approx_kl", out["old_approx_kl"].item(), global_step)
|
| 505 |
+
logger.add_scalar("losses/approx_kl", out["approx_kl"].item(), global_step)
|
| 506 |
+
logger.add_scalar("losses/clipfrac", torch.stack(clipfracs).mean().cpu().item(), global_step)
|
| 507 |
+
logger.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step)
|
| 508 |
+
logger.add_scalar("time/step", global_step, global_step)
|
| 509 |
+
logger.add_scalar("time/update_time", update_time, global_step)
|
| 510 |
+
logger.add_scalar("time/rollout_time", rollout_time, global_step)
|
| 511 |
+
logger.add_scalar("time/rollout_fps", args.num_envs * args.num_steps / rollout_time, global_step)
|
| 512 |
+
for k, v in cumulative_times.items():
|
| 513 |
+
logger.add_scalar(f"time/total_{k}", v, global_step)
|
| 514 |
+
logger.add_scalar("time/total_rollout+update_time", cumulative_times["rollout_time"] + cumulative_times["update_time"], global_step)
|
| 515 |
+
if not args.evaluate:
|
| 516 |
+
if args.save_model:
|
| 517 |
+
model_path = f"runs/{run_name}/final_ckpt.pt"
|
| 518 |
+
torch.save(agent.state_dict(), model_path)
|
| 519 |
+
print(f"model saved to {model_path}")
|
| 520 |
+
logger.close()
|
| 521 |
+
envs.close()
|
| 522 |
+
eval_envs.close()
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/ppo/ppo_rgb.py
ADDED
|
@@ -0,0 +1,594 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# docs and experiment results can be found at https://docs.cleanrl.dev/rl-algorithms/ppo/#ppo_continuous_actionpy
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
import os
|
| 4 |
+
import random
|
| 5 |
+
import time
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
import gymnasium as gym
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
import torch.optim as optim
|
| 14 |
+
import tyro
|
| 15 |
+
from torch.distributions.normal import Normal
|
| 16 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 17 |
+
|
| 18 |
+
# ManiSkill specific imports
|
| 19 |
+
import mani_skill.envs
|
| 20 |
+
from mani_skill.utils import gym_utils
|
| 21 |
+
from mani_skill.utils.wrappers.flatten import FlattenActionSpaceWrapper, FlattenRGBDObservationWrapper
|
| 22 |
+
from mani_skill.utils.wrappers.record import RecordEpisode
|
| 23 |
+
from mani_skill.vector.wrappers.gymnasium import ManiSkillVectorEnv
|
| 24 |
+
|
| 25 |
+
@dataclass
|
| 26 |
+
class Args:
|
| 27 |
+
exp_name: Optional[str] = None
|
| 28 |
+
"""the name of this experiment"""
|
| 29 |
+
seed: int = 1
|
| 30 |
+
"""seed of the experiment"""
|
| 31 |
+
torch_deterministic: bool = True
|
| 32 |
+
"""if toggled, `torch.backends.cudnn.deterministic=False`"""
|
| 33 |
+
cuda: bool = True
|
| 34 |
+
"""if toggled, cuda will be enabled by default"""
|
| 35 |
+
track: bool = False
|
| 36 |
+
"""if toggled, this experiment will be tracked with Weights and Biases"""
|
| 37 |
+
wandb_project_name: str = "ManiSkill"
|
| 38 |
+
"""the wandb's project name"""
|
| 39 |
+
wandb_entity: Optional[str] = None
|
| 40 |
+
"""the entity (team) of wandb's project"""
|
| 41 |
+
wandb_group: str = "PPO"
|
| 42 |
+
"""the group of the run for wandb"""
|
| 43 |
+
capture_video: bool = True
|
| 44 |
+
"""whether to capture videos of the agent performances (check out `videos` folder)"""
|
| 45 |
+
save_model: bool = True
|
| 46 |
+
"""whether to save model into the `runs/{run_name}` folder"""
|
| 47 |
+
evaluate: bool = False
|
| 48 |
+
"""if toggled, only runs evaluation with the given model checkpoint and saves the evaluation trajectories"""
|
| 49 |
+
checkpoint: Optional[str] = None
|
| 50 |
+
"""path to a pretrained checkpoint file to start evaluation/training from"""
|
| 51 |
+
render_mode: str = "all"
|
| 52 |
+
"""the environment rendering mode"""
|
| 53 |
+
|
| 54 |
+
# Algorithm specific arguments
|
| 55 |
+
env_id: str = "PickCube-v1"
|
| 56 |
+
"""the id of the environment"""
|
| 57 |
+
include_state: bool = True
|
| 58 |
+
"""whether to include state information in observations"""
|
| 59 |
+
total_timesteps: int = 10000000
|
| 60 |
+
"""total timesteps of the experiments"""
|
| 61 |
+
learning_rate: float = 3e-4
|
| 62 |
+
"""the learning rate of the optimizer"""
|
| 63 |
+
num_envs: int = 512
|
| 64 |
+
"""the number of parallel environments"""
|
| 65 |
+
num_eval_envs: int = 8
|
| 66 |
+
"""the number of parallel evaluation environments"""
|
| 67 |
+
partial_reset: bool = True
|
| 68 |
+
"""whether to let parallel environments reset upon termination instead of truncation"""
|
| 69 |
+
eval_partial_reset: bool = False
|
| 70 |
+
"""whether to let parallel evaluation environments reset upon termination instead of truncation"""
|
| 71 |
+
num_steps: int = 50
|
| 72 |
+
"""the number of steps to run in each environment per policy rollout"""
|
| 73 |
+
num_eval_steps: int = 50
|
| 74 |
+
"""the number of steps to run in each evaluation environment during evaluation"""
|
| 75 |
+
reconfiguration_freq: Optional[int] = None
|
| 76 |
+
"""how often to reconfigure the environment during training"""
|
| 77 |
+
eval_reconfiguration_freq: Optional[int] = 1
|
| 78 |
+
"""for benchmarking purposes we want to reconfigure the eval environment each reset to ensure objects are randomized in some tasks"""
|
| 79 |
+
control_mode: Optional[str] = "pd_joint_delta_pos"
|
| 80 |
+
"""the control mode to use for the environment"""
|
| 81 |
+
anneal_lr: bool = False
|
| 82 |
+
"""Toggle learning rate annealing for policy and value networks"""
|
| 83 |
+
gamma: float = 0.8
|
| 84 |
+
"""the discount factor gamma"""
|
| 85 |
+
gae_lambda: float = 0.9
|
| 86 |
+
"""the lambda for the general advantage estimation"""
|
| 87 |
+
num_minibatches: int = 32
|
| 88 |
+
"""the number of mini-batches"""
|
| 89 |
+
update_epochs: int = 4
|
| 90 |
+
"""the K epochs to update the policy"""
|
| 91 |
+
norm_adv: bool = True
|
| 92 |
+
"""Toggles advantages normalization"""
|
| 93 |
+
clip_coef: float = 0.2
|
| 94 |
+
"""the surrogate clipping coefficient"""
|
| 95 |
+
clip_vloss: bool = False
|
| 96 |
+
"""Toggles whether or not to use a clipped loss for the value function, as per the paper."""
|
| 97 |
+
ent_coef: float = 0.0
|
| 98 |
+
"""coefficient of the entropy"""
|
| 99 |
+
vf_coef: float = 0.5
|
| 100 |
+
"""coefficient of the value function"""
|
| 101 |
+
max_grad_norm: float = 0.5
|
| 102 |
+
"""the maximum norm for the gradient clipping"""
|
| 103 |
+
target_kl: float = 0.2
|
| 104 |
+
"""the target KL divergence threshold"""
|
| 105 |
+
reward_scale: float = 1.0
|
| 106 |
+
"""Scale the reward by this factor"""
|
| 107 |
+
eval_freq: int = 25
|
| 108 |
+
"""evaluation frequency in terms of iterations"""
|
| 109 |
+
save_train_video_freq: Optional[int] = None
|
| 110 |
+
"""frequency to save training videos in terms of iterations"""
|
| 111 |
+
finite_horizon_gae: bool = False
|
| 112 |
+
|
| 113 |
+
# to be filled in runtime
|
| 114 |
+
batch_size: int = 0
|
| 115 |
+
"""the batch size (computed in runtime)"""
|
| 116 |
+
minibatch_size: int = 0
|
| 117 |
+
"""the mini-batch size (computed in runtime)"""
|
| 118 |
+
num_iterations: int = 0
|
| 119 |
+
"""the number of iterations (computed in runtime)"""
|
| 120 |
+
|
| 121 |
+
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
|
| 122 |
+
torch.nn.init.orthogonal_(layer.weight, std)
|
| 123 |
+
torch.nn.init.constant_(layer.bias, bias_const)
|
| 124 |
+
return layer
|
| 125 |
+
|
| 126 |
+
class DictArray(object):
|
| 127 |
+
def __init__(self, buffer_shape, element_space, data_dict=None, device=None):
|
| 128 |
+
self.buffer_shape = buffer_shape
|
| 129 |
+
if data_dict:
|
| 130 |
+
self.data = data_dict
|
| 131 |
+
else:
|
| 132 |
+
assert isinstance(element_space, gym.spaces.dict.Dict)
|
| 133 |
+
self.data = {}
|
| 134 |
+
for k, v in element_space.items():
|
| 135 |
+
if isinstance(v, gym.spaces.dict.Dict):
|
| 136 |
+
self.data[k] = DictArray(buffer_shape, v, device=device)
|
| 137 |
+
else:
|
| 138 |
+
dtype = (torch.float32 if v.dtype in (np.float32, np.float64) else
|
| 139 |
+
torch.uint8 if v.dtype == np.uint8 else
|
| 140 |
+
torch.int16 if v.dtype == np.int16 else
|
| 141 |
+
torch.int32 if v.dtype == np.int32 else
|
| 142 |
+
v.dtype)
|
| 143 |
+
self.data[k] = torch.zeros(buffer_shape + v.shape, dtype=dtype, device=device)
|
| 144 |
+
|
| 145 |
+
def keys(self):
|
| 146 |
+
return self.data.keys()
|
| 147 |
+
|
| 148 |
+
def __getitem__(self, index):
|
| 149 |
+
if isinstance(index, str):
|
| 150 |
+
return self.data[index]
|
| 151 |
+
return {
|
| 152 |
+
k: v[index] for k, v in self.data.items()
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
def __setitem__(self, index, value):
|
| 156 |
+
if isinstance(index, str):
|
| 157 |
+
self.data[index] = value
|
| 158 |
+
for k, v in value.items():
|
| 159 |
+
self.data[k][index] = v
|
| 160 |
+
|
| 161 |
+
@property
|
| 162 |
+
def shape(self):
|
| 163 |
+
return self.buffer_shape
|
| 164 |
+
|
| 165 |
+
def reshape(self, shape):
|
| 166 |
+
t = len(self.buffer_shape)
|
| 167 |
+
new_dict = {}
|
| 168 |
+
for k,v in self.data.items():
|
| 169 |
+
if isinstance(v, DictArray):
|
| 170 |
+
new_dict[k] = v.reshape(shape)
|
| 171 |
+
else:
|
| 172 |
+
new_dict[k] = v.reshape(shape + v.shape[t:])
|
| 173 |
+
new_buffer_shape = next(iter(new_dict.values())).shape[:len(shape)]
|
| 174 |
+
return DictArray(new_buffer_shape, None, data_dict=new_dict)
|
| 175 |
+
|
| 176 |
+
class NatureCNN(nn.Module):
|
| 177 |
+
def __init__(self, sample_obs):
|
| 178 |
+
super().__init__()
|
| 179 |
+
|
| 180 |
+
extractors = {}
|
| 181 |
+
|
| 182 |
+
self.out_features = 0
|
| 183 |
+
feature_size = 256
|
| 184 |
+
in_channels=sample_obs["rgb"].shape[-1]
|
| 185 |
+
image_size=(sample_obs["rgb"].shape[1], sample_obs["rgb"].shape[2])
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
# here we use a NatureCNN architecture to process images, but any architecture is permissble here
|
| 189 |
+
cnn = nn.Sequential(
|
| 190 |
+
nn.Conv2d(
|
| 191 |
+
in_channels=in_channels,
|
| 192 |
+
out_channels=32,
|
| 193 |
+
kernel_size=8,
|
| 194 |
+
stride=4,
|
| 195 |
+
padding=0,
|
| 196 |
+
),
|
| 197 |
+
nn.ReLU(),
|
| 198 |
+
nn.Conv2d(
|
| 199 |
+
in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=0
|
| 200 |
+
),
|
| 201 |
+
nn.ReLU(),
|
| 202 |
+
nn.Conv2d(
|
| 203 |
+
in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0
|
| 204 |
+
),
|
| 205 |
+
nn.ReLU(),
|
| 206 |
+
nn.Flatten(),
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
# to easily figure out the dimensions after flattening, we pass a test tensor
|
| 210 |
+
with torch.no_grad():
|
| 211 |
+
n_flatten = cnn(sample_obs["rgb"].float().permute(0,3,1,2).cpu()).shape[1]
|
| 212 |
+
fc = nn.Sequential(nn.Linear(n_flatten, feature_size), nn.ReLU())
|
| 213 |
+
extractors["rgb"] = nn.Sequential(cnn, fc)
|
| 214 |
+
self.out_features += feature_size
|
| 215 |
+
|
| 216 |
+
if "state" in sample_obs:
|
| 217 |
+
# for state data we simply pass it through a single linear layer
|
| 218 |
+
state_size = sample_obs["state"].shape[-1]
|
| 219 |
+
extractors["state"] = nn.Linear(state_size, 256)
|
| 220 |
+
self.out_features += 256
|
| 221 |
+
|
| 222 |
+
self.extractors = nn.ModuleDict(extractors)
|
| 223 |
+
|
| 224 |
+
def forward(self, observations) -> torch.Tensor:
|
| 225 |
+
encoded_tensor_list = []
|
| 226 |
+
# self.extractors contain nn.Modules that do all the processing.
|
| 227 |
+
for key, extractor in self.extractors.items():
|
| 228 |
+
obs = observations[key]
|
| 229 |
+
if key == "rgb":
|
| 230 |
+
obs = obs.float().permute(0,3,1,2)
|
| 231 |
+
obs = obs / 255
|
| 232 |
+
encoded_tensor_list.append(extractor(obs))
|
| 233 |
+
return torch.cat(encoded_tensor_list, dim=1)
|
| 234 |
+
|
| 235 |
+
class Agent(nn.Module):
|
| 236 |
+
def __init__(self, envs, sample_obs):
|
| 237 |
+
super().__init__()
|
| 238 |
+
self.feature_net = NatureCNN(sample_obs=sample_obs)
|
| 239 |
+
# latent_size = np.array(envs.unwrapped.single_observation_space.shape).prod()
|
| 240 |
+
latent_size = self.feature_net.out_features
|
| 241 |
+
self.critic = nn.Sequential(
|
| 242 |
+
layer_init(nn.Linear(latent_size, 512)),
|
| 243 |
+
nn.ReLU(inplace=True),
|
| 244 |
+
layer_init(nn.Linear(512, 1)),
|
| 245 |
+
)
|
| 246 |
+
self.actor_mean = nn.Sequential(
|
| 247 |
+
layer_init(nn.Linear(latent_size, 512)),
|
| 248 |
+
nn.ReLU(inplace=True),
|
| 249 |
+
layer_init(nn.Linear(512, np.prod(envs.unwrapped.single_action_space.shape)), std=0.01*np.sqrt(2)),
|
| 250 |
+
)
|
| 251 |
+
self.actor_logstd = nn.Parameter(torch.ones(1, np.prod(envs.unwrapped.single_action_space.shape)) * -0.5)
|
| 252 |
+
def get_features(self, x):
|
| 253 |
+
return self.feature_net(x)
|
| 254 |
+
def get_value(self, x):
|
| 255 |
+
x = self.feature_net(x)
|
| 256 |
+
return self.critic(x)
|
| 257 |
+
def get_action(self, x, deterministic=False):
|
| 258 |
+
x = self.feature_net(x)
|
| 259 |
+
action_mean = self.actor_mean(x)
|
| 260 |
+
if deterministic:
|
| 261 |
+
return action_mean
|
| 262 |
+
action_logstd = self.actor_logstd.expand_as(action_mean)
|
| 263 |
+
action_std = torch.exp(action_logstd)
|
| 264 |
+
probs = Normal(action_mean, action_std)
|
| 265 |
+
return probs.sample()
|
| 266 |
+
def get_action_and_value(self, x, action=None):
|
| 267 |
+
x = self.feature_net(x)
|
| 268 |
+
action_mean = self.actor_mean(x)
|
| 269 |
+
action_logstd = self.actor_logstd.expand_as(action_mean)
|
| 270 |
+
action_std = torch.exp(action_logstd)
|
| 271 |
+
probs = Normal(action_mean, action_std)
|
| 272 |
+
if action is None:
|
| 273 |
+
action = probs.sample()
|
| 274 |
+
return action, probs.log_prob(action).sum(1), probs.entropy().sum(1), self.critic(x)
|
| 275 |
+
|
| 276 |
+
class Logger:
|
| 277 |
+
def __init__(self, log_wandb=False, tensorboard: SummaryWriter = None) -> None:
|
| 278 |
+
self.writer = tensorboard
|
| 279 |
+
self.log_wandb = log_wandb
|
| 280 |
+
def add_scalar(self, tag, scalar_value, step):
|
| 281 |
+
if self.log_wandb:
|
| 282 |
+
wandb.log({tag: scalar_value}, step=step)
|
| 283 |
+
self.writer.add_scalar(tag, scalar_value, step)
|
| 284 |
+
def close(self):
|
| 285 |
+
self.writer.close()
|
| 286 |
+
|
| 287 |
+
if __name__ == "__main__":
|
| 288 |
+
args = tyro.cli(Args)
|
| 289 |
+
args.batch_size = int(args.num_envs * args.num_steps)
|
| 290 |
+
args.minibatch_size = int(args.batch_size // args.num_minibatches)
|
| 291 |
+
args.num_iterations = args.total_timesteps // args.batch_size
|
| 292 |
+
if args.exp_name is None:
|
| 293 |
+
args.exp_name = os.path.basename(__file__)[: -len(".py")]
|
| 294 |
+
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
|
| 295 |
+
else:
|
| 296 |
+
run_name = args.exp_name
|
| 297 |
+
|
| 298 |
+
# TRY NOT TO MODIFY: seeding
|
| 299 |
+
random.seed(args.seed)
|
| 300 |
+
np.random.seed(args.seed)
|
| 301 |
+
torch.manual_seed(args.seed)
|
| 302 |
+
torch.backends.cudnn.deterministic = args.torch_deterministic
|
| 303 |
+
|
| 304 |
+
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
|
| 305 |
+
|
| 306 |
+
# env setup
|
| 307 |
+
env_kwargs = dict(obs_mode="rgb", render_mode=args.render_mode, sim_backend="physx_cuda")
|
| 308 |
+
if args.control_mode is not None:
|
| 309 |
+
env_kwargs["control_mode"] = args.control_mode
|
| 310 |
+
eval_envs = gym.make(args.env_id, num_envs=args.num_eval_envs, reconfiguration_freq=args.eval_reconfiguration_freq, **env_kwargs)
|
| 311 |
+
envs = gym.make(args.env_id, num_envs=args.num_envs if not args.evaluate else 1, reconfiguration_freq=args.reconfiguration_freq, **env_kwargs)
|
| 312 |
+
|
| 313 |
+
# rgbd obs mode returns a dict of data, we flatten it so there is just a rgbd key and state key
|
| 314 |
+
envs = FlattenRGBDObservationWrapper(envs, rgb=True, depth=False, state=args.include_state)
|
| 315 |
+
eval_envs = FlattenRGBDObservationWrapper(eval_envs, rgb=True, depth=False, state=args.include_state)
|
| 316 |
+
|
| 317 |
+
if isinstance(envs.action_space, gym.spaces.Dict):
|
| 318 |
+
envs = FlattenActionSpaceWrapper(envs)
|
| 319 |
+
eval_envs = FlattenActionSpaceWrapper(eval_envs)
|
| 320 |
+
if args.capture_video:
|
| 321 |
+
eval_output_dir = f"runs/{run_name}/videos"
|
| 322 |
+
if args.evaluate:
|
| 323 |
+
eval_output_dir = f"{os.path.dirname(args.checkpoint)}/test_videos"
|
| 324 |
+
print(f"Saving eval videos to {eval_output_dir}")
|
| 325 |
+
if args.save_train_video_freq is not None:
|
| 326 |
+
save_video_trigger = lambda x : (x // args.num_steps) % args.save_train_video_freq == 0
|
| 327 |
+
envs = RecordEpisode(envs, output_dir=f"runs/{run_name}/train_videos", save_trajectory=False, save_video_trigger=save_video_trigger, max_steps_per_video=args.num_steps, video_fps=30)
|
| 328 |
+
eval_envs = RecordEpisode(eval_envs, output_dir=eval_output_dir, save_trajectory=args.evaluate, trajectory_name="trajectory", max_steps_per_video=args.num_eval_steps, video_fps=30)
|
| 329 |
+
envs = ManiSkillVectorEnv(envs, args.num_envs, ignore_terminations=not args.partial_reset, record_metrics=True)
|
| 330 |
+
eval_envs = ManiSkillVectorEnv(eval_envs, args.num_eval_envs, ignore_terminations=not args.eval_partial_reset, record_metrics=True)
|
| 331 |
+
assert isinstance(envs.single_action_space, gym.spaces.Box), "only continuous action space is supported"
|
| 332 |
+
|
| 333 |
+
max_episode_steps = gym_utils.find_max_episode_steps_value(envs._env)
|
| 334 |
+
logger = None
|
| 335 |
+
if not args.evaluate:
|
| 336 |
+
print("Running training")
|
| 337 |
+
if args.track:
|
| 338 |
+
import wandb
|
| 339 |
+
config = vars(args)
|
| 340 |
+
config["env_cfg"] = dict(**env_kwargs, num_envs=args.num_envs, env_id=args.env_id, reward_mode="normalized_dense", env_horizon=max_episode_steps, partial_reset=args.partial_reset)
|
| 341 |
+
config["eval_env_cfg"] = dict(**env_kwargs, num_envs=args.num_eval_envs, env_id=args.env_id, reward_mode="normalized_dense", env_horizon=max_episode_steps, partial_reset=args.partial_reset)
|
| 342 |
+
wandb.init(
|
| 343 |
+
project=args.wandb_project_name,
|
| 344 |
+
entity=args.wandb_entity,
|
| 345 |
+
sync_tensorboard=False,
|
| 346 |
+
config=config,
|
| 347 |
+
name=run_name,
|
| 348 |
+
save_code=True,
|
| 349 |
+
group=args.wandb_group,
|
| 350 |
+
tags=["ppo", "walltime_efficient"]
|
| 351 |
+
)
|
| 352 |
+
writer = SummaryWriter(f"runs/{run_name}")
|
| 353 |
+
writer.add_text(
|
| 354 |
+
"hyperparameters",
|
| 355 |
+
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
|
| 356 |
+
)
|
| 357 |
+
logger = Logger(log_wandb=args.track, tensorboard=writer)
|
| 358 |
+
else:
|
| 359 |
+
print("Running evaluation")
|
| 360 |
+
|
| 361 |
+
# ALGO Logic: Storage setup
|
| 362 |
+
obs = DictArray((args.num_steps, args.num_envs), envs.single_observation_space, device=device)
|
| 363 |
+
actions = torch.zeros((args.num_steps, args.num_envs) + envs.single_action_space.shape).to(device)
|
| 364 |
+
logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device)
|
| 365 |
+
rewards = torch.zeros((args.num_steps, args.num_envs)).to(device)
|
| 366 |
+
dones = torch.zeros((args.num_steps, args.num_envs)).to(device)
|
| 367 |
+
values = torch.zeros((args.num_steps, args.num_envs)).to(device)
|
| 368 |
+
|
| 369 |
+
# TRY NOT TO MODIFY: start the game
|
| 370 |
+
global_step = 0
|
| 371 |
+
start_time = time.time()
|
| 372 |
+
next_obs, _ = envs.reset(seed=args.seed)
|
| 373 |
+
eval_obs, _ = eval_envs.reset(seed=args.seed)
|
| 374 |
+
next_done = torch.zeros(args.num_envs, device=device)
|
| 375 |
+
print(f"####")
|
| 376 |
+
print(f"args.num_iterations={args.num_iterations} args.num_envs={args.num_envs} args.num_eval_envs={args.num_eval_envs}")
|
| 377 |
+
print(f"args.minibatch_size={args.minibatch_size} args.batch_size={args.batch_size} args.update_epochs={args.update_epochs}")
|
| 378 |
+
print(f"####")
|
| 379 |
+
agent = Agent(envs, sample_obs=next_obs).to(device)
|
| 380 |
+
optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5)
|
| 381 |
+
|
| 382 |
+
if args.checkpoint:
|
| 383 |
+
agent.load_state_dict(torch.load(args.checkpoint))
|
| 384 |
+
|
| 385 |
+
cumulative_times = defaultdict(float)
|
| 386 |
+
|
| 387 |
+
for iteration in range(1, args.num_iterations + 1):
|
| 388 |
+
print(f"Epoch: {iteration}, global_step={global_step}")
|
| 389 |
+
final_values = torch.zeros((args.num_steps, args.num_envs), device=device)
|
| 390 |
+
agent.eval()
|
| 391 |
+
if iteration % args.eval_freq == 1:
|
| 392 |
+
print("Evaluating")
|
| 393 |
+
stime = time.perf_counter()
|
| 394 |
+
eval_obs, _ = eval_envs.reset()
|
| 395 |
+
eval_metrics = defaultdict(list)
|
| 396 |
+
num_episodes = 0
|
| 397 |
+
for _ in range(args.num_eval_steps):
|
| 398 |
+
with torch.no_grad():
|
| 399 |
+
eval_obs, eval_rew, eval_terminations, eval_truncations, eval_infos = eval_envs.step(agent.get_action(eval_obs, deterministic=True))
|
| 400 |
+
if "final_info" in eval_infos:
|
| 401 |
+
mask = eval_infos["_final_info"]
|
| 402 |
+
num_episodes += mask.sum()
|
| 403 |
+
for k, v in eval_infos["final_info"]["episode"].items():
|
| 404 |
+
eval_metrics[k].append(v)
|
| 405 |
+
print(f"Evaluated {args.num_eval_steps * args.num_eval_envs} steps resulting in {num_episodes} episodes")
|
| 406 |
+
for k, v in eval_metrics.items():
|
| 407 |
+
mean = torch.stack(v).float().mean()
|
| 408 |
+
if logger is not None:
|
| 409 |
+
logger.add_scalar(f"eval/{k}", mean, global_step)
|
| 410 |
+
print(f"eval_{k}_mean={mean}")
|
| 411 |
+
if logger is not None:
|
| 412 |
+
eval_time = time.perf_counter() - stime
|
| 413 |
+
cumulative_times["eval_time"] += eval_time
|
| 414 |
+
logger.add_scalar("time/eval_time", eval_time, global_step)
|
| 415 |
+
if args.evaluate:
|
| 416 |
+
break
|
| 417 |
+
if args.save_model and iteration % args.eval_freq == 1:
|
| 418 |
+
model_path = f"runs/{run_name}/ckpt_{iteration}.pt"
|
| 419 |
+
torch.save(agent.state_dict(), model_path)
|
| 420 |
+
print(f"model saved to {model_path}")
|
| 421 |
+
# Annealing the rate if instructed to do so.
|
| 422 |
+
if args.anneal_lr:
|
| 423 |
+
frac = 1.0 - (iteration - 1.0) / args.num_iterations
|
| 424 |
+
lrnow = frac * args.learning_rate
|
| 425 |
+
optimizer.param_groups[0]["lr"] = lrnow
|
| 426 |
+
rollout_time = time.perf_counter()
|
| 427 |
+
for step in range(0, args.num_steps):
|
| 428 |
+
global_step += args.num_envs
|
| 429 |
+
obs[step] = next_obs
|
| 430 |
+
dones[step] = next_done
|
| 431 |
+
|
| 432 |
+
# ALGO LOGIC: action logic
|
| 433 |
+
with torch.no_grad():
|
| 434 |
+
action, logprob, _, value = agent.get_action_and_value(next_obs)
|
| 435 |
+
values[step] = value.flatten()
|
| 436 |
+
actions[step] = action
|
| 437 |
+
logprobs[step] = logprob
|
| 438 |
+
|
| 439 |
+
# TRY NOT TO MODIFY: execute the game and log data.
|
| 440 |
+
next_obs, reward, terminations, truncations, infos = envs.step(action)
|
| 441 |
+
next_done = torch.logical_or(terminations, truncations).to(torch.float32)
|
| 442 |
+
rewards[step] = reward.view(-1) * args.reward_scale
|
| 443 |
+
|
| 444 |
+
if "final_info" in infos:
|
| 445 |
+
final_info = infos["final_info"]
|
| 446 |
+
done_mask = infos["_final_info"]
|
| 447 |
+
for k, v in final_info["episode"].items():
|
| 448 |
+
logger.add_scalar(f"train/{k}", v[done_mask].float().mean(), global_step)
|
| 449 |
+
|
| 450 |
+
for k in infos["final_observation"]:
|
| 451 |
+
infos["final_observation"][k] = infos["final_observation"][k][done_mask]
|
| 452 |
+
with torch.no_grad():
|
| 453 |
+
final_values[step, torch.arange(args.num_envs, device=device)[done_mask]] = agent.get_value(infos["final_observation"]).view(-1)
|
| 454 |
+
rollout_time = time.perf_counter() - rollout_time
|
| 455 |
+
cumulative_times["rollout_time"] += rollout_time
|
| 456 |
+
# bootstrap value according to termination and truncation
|
| 457 |
+
with torch.no_grad():
|
| 458 |
+
next_value = agent.get_value(next_obs).reshape(1, -1)
|
| 459 |
+
advantages = torch.zeros_like(rewards).to(device)
|
| 460 |
+
lastgaelam = 0
|
| 461 |
+
for t in reversed(range(args.num_steps)):
|
| 462 |
+
if t == args.num_steps - 1:
|
| 463 |
+
next_not_done = 1.0 - next_done
|
| 464 |
+
nextvalues = next_value
|
| 465 |
+
else:
|
| 466 |
+
next_not_done = 1.0 - dones[t + 1]
|
| 467 |
+
nextvalues = values[t + 1]
|
| 468 |
+
real_next_values = next_not_done * nextvalues + final_values[t] # t instead of t+1
|
| 469 |
+
# next_not_done means nextvalues is computed from the correct next_obs
|
| 470 |
+
# if next_not_done is 1, final_values is always 0
|
| 471 |
+
# if next_not_done is 0, then use final_values, which is computed according to bootstrap_at_done
|
| 472 |
+
if args.finite_horizon_gae:
|
| 473 |
+
"""
|
| 474 |
+
See GAE paper equation(16) line 1, we will compute the GAE based on this line only
|
| 475 |
+
1 *( -V(s_t) + r_t + gamma * V(s_{t+1}) )
|
| 476 |
+
lambda *( -V(s_t) + r_t + gamma * r_{t+1} + gamma^2 * V(s_{t+2}) )
|
| 477 |
+
lambda^2 *( -V(s_t) + r_t + gamma * r_{t+1} + gamma^2 * r_{t+2} + ... )
|
| 478 |
+
lambda^3 *( -V(s_t) + r_t + gamma * r_{t+1} + gamma^2 * r_{t+2} + gamma^3 * r_{t+3}
|
| 479 |
+
We then normalize it by the sum of the lambda^i (instead of 1-lambda)
|
| 480 |
+
"""
|
| 481 |
+
if t == args.num_steps - 1: # initialize
|
| 482 |
+
lam_coef_sum = 0.
|
| 483 |
+
reward_term_sum = 0. # the sum of the second term
|
| 484 |
+
value_term_sum = 0. # the sum of the third term
|
| 485 |
+
lam_coef_sum = lam_coef_sum * next_not_done
|
| 486 |
+
reward_term_sum = reward_term_sum * next_not_done
|
| 487 |
+
value_term_sum = value_term_sum * next_not_done
|
| 488 |
+
|
| 489 |
+
lam_coef_sum = 1 + args.gae_lambda * lam_coef_sum
|
| 490 |
+
reward_term_sum = args.gae_lambda * args.gamma * reward_term_sum + lam_coef_sum * rewards[t]
|
| 491 |
+
value_term_sum = args.gae_lambda * args.gamma * value_term_sum + args.gamma * real_next_values
|
| 492 |
+
|
| 493 |
+
advantages[t] = (reward_term_sum + value_term_sum) / lam_coef_sum - values[t]
|
| 494 |
+
else:
|
| 495 |
+
delta = rewards[t] + args.gamma * real_next_values - values[t]
|
| 496 |
+
advantages[t] = lastgaelam = delta + args.gamma * args.gae_lambda * next_not_done * lastgaelam # Here actually we should use next_not_terminated, but we don't have lastgamlam if terminated
|
| 497 |
+
returns = advantages + values
|
| 498 |
+
|
| 499 |
+
# flatten the batch
|
| 500 |
+
b_obs = obs.reshape((-1,))
|
| 501 |
+
b_logprobs = logprobs.reshape(-1)
|
| 502 |
+
b_actions = actions.reshape((-1,) + envs.single_action_space.shape)
|
| 503 |
+
b_advantages = advantages.reshape(-1)
|
| 504 |
+
b_returns = returns.reshape(-1)
|
| 505 |
+
b_values = values.reshape(-1)
|
| 506 |
+
|
| 507 |
+
# Optimizing the policy and value network
|
| 508 |
+
agent.train()
|
| 509 |
+
b_inds = np.arange(args.batch_size)
|
| 510 |
+
clipfracs = []
|
| 511 |
+
update_time = time.perf_counter()
|
| 512 |
+
for epoch in range(args.update_epochs):
|
| 513 |
+
np.random.shuffle(b_inds)
|
| 514 |
+
for start in range(0, args.batch_size, args.minibatch_size):
|
| 515 |
+
end = start + args.minibatch_size
|
| 516 |
+
mb_inds = b_inds[start:end]
|
| 517 |
+
|
| 518 |
+
_, newlogprob, entropy, newvalue = agent.get_action_and_value(b_obs[mb_inds], b_actions[mb_inds])
|
| 519 |
+
logratio = newlogprob - b_logprobs[mb_inds]
|
| 520 |
+
ratio = logratio.exp()
|
| 521 |
+
|
| 522 |
+
with torch.no_grad():
|
| 523 |
+
# calculate approx_kl http://joschu.net/blog/kl-approx.html
|
| 524 |
+
old_approx_kl = (-logratio).mean()
|
| 525 |
+
approx_kl = ((ratio - 1) - logratio).mean()
|
| 526 |
+
clipfracs += [((ratio - 1.0).abs() > args.clip_coef).float().mean().item()]
|
| 527 |
+
|
| 528 |
+
if args.target_kl is not None and approx_kl > args.target_kl:
|
| 529 |
+
break
|
| 530 |
+
|
| 531 |
+
mb_advantages = b_advantages[mb_inds]
|
| 532 |
+
if args.norm_adv:
|
| 533 |
+
mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
|
| 534 |
+
|
| 535 |
+
# Policy loss
|
| 536 |
+
pg_loss1 = -mb_advantages * ratio
|
| 537 |
+
pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - args.clip_coef, 1 + args.clip_coef)
|
| 538 |
+
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
|
| 539 |
+
|
| 540 |
+
# Value loss
|
| 541 |
+
newvalue = newvalue.view(-1)
|
| 542 |
+
if args.clip_vloss:
|
| 543 |
+
v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2
|
| 544 |
+
v_clipped = b_values[mb_inds] + torch.clamp(
|
| 545 |
+
newvalue - b_values[mb_inds],
|
| 546 |
+
-args.clip_coef,
|
| 547 |
+
args.clip_coef,
|
| 548 |
+
)
|
| 549 |
+
v_loss_clipped = (v_clipped - b_returns[mb_inds]) ** 2
|
| 550 |
+
v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
|
| 551 |
+
v_loss = 0.5 * v_loss_max.mean()
|
| 552 |
+
else:
|
| 553 |
+
v_loss = 0.5 * ((newvalue - b_returns[mb_inds]) ** 2).mean()
|
| 554 |
+
|
| 555 |
+
entropy_loss = entropy.mean()
|
| 556 |
+
loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef
|
| 557 |
+
|
| 558 |
+
optimizer.zero_grad()
|
| 559 |
+
loss.backward()
|
| 560 |
+
nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm)
|
| 561 |
+
optimizer.step()
|
| 562 |
+
|
| 563 |
+
if args.target_kl is not None and approx_kl > args.target_kl:
|
| 564 |
+
break
|
| 565 |
+
update_time = time.perf_counter() - update_time
|
| 566 |
+
cumulative_times["update_time"] += update_time
|
| 567 |
+
y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy()
|
| 568 |
+
var_y = np.var(y_true)
|
| 569 |
+
explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y
|
| 570 |
+
|
| 571 |
+
logger.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], global_step)
|
| 572 |
+
logger.add_scalar("losses/value_loss", v_loss.item(), global_step)
|
| 573 |
+
logger.add_scalar("losses/policy_loss", pg_loss.item(), global_step)
|
| 574 |
+
logger.add_scalar("losses/entropy", entropy_loss.item(), global_step)
|
| 575 |
+
logger.add_scalar("losses/old_approx_kl", old_approx_kl.item(), global_step)
|
| 576 |
+
logger.add_scalar("losses/approx_kl", approx_kl.item(), global_step)
|
| 577 |
+
logger.add_scalar("losses/clipfrac", np.mean(clipfracs), global_step)
|
| 578 |
+
logger.add_scalar("losses/explained_variance", explained_var, global_step)
|
| 579 |
+
print("SPS:", int(global_step / (time.time() - start_time)))
|
| 580 |
+
logger.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step)
|
| 581 |
+
logger.add_scalar("time/step", global_step, global_step)
|
| 582 |
+
logger.add_scalar("time/update_time", update_time, global_step)
|
| 583 |
+
logger.add_scalar("time/rollout_time", rollout_time, global_step)
|
| 584 |
+
logger.add_scalar("time/rollout_fps", args.num_envs * args.num_steps / rollout_time, global_step)
|
| 585 |
+
for k, v in cumulative_times.items():
|
| 586 |
+
logger.add_scalar(f"time/total_{k}", v, global_step)
|
| 587 |
+
logger.add_scalar("time/total_rollout+update_time", cumulative_times["rollout_time"] + cumulative_times["update_time"], global_step)
|
| 588 |
+
if args.save_model and not args.evaluate:
|
| 589 |
+
model_path = f"runs/{run_name}/final_ckpt.pt"
|
| 590 |
+
torch.save(agent.state_dict(), model_path)
|
| 591 |
+
print(f"model saved to {model_path}")
|
| 592 |
+
|
| 593 |
+
envs.close()
|
| 594 |
+
if logger is not None: logger.close()
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/README.md
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Temporal Difference Learning for Model Predictive Control 2 (TD-MPC2)
|
| 2 |
+
|
| 3 |
+
Scalable, robust model-based RL algorithm based on ["TD-MPC2: Scalable, Robust World Models for Continuous Control"](https://arxiv.org/abs/2310.16828). Code adapted from https://github.com/nicklashansen/tdmpc2. It is written to work with the new Maniskill update, and supports vectorized state-based and visual-based RL environment.
|
| 4 |
+
|
| 5 |
+
## Installation
|
| 6 |
+
We recommend using conda/mamba and you can install the dependencies as so :
|
| 7 |
+
|
| 8 |
+
```bash
|
| 9 |
+
conda env create -f environment.yaml
|
| 10 |
+
conda activate tdmpc2-ms
|
| 11 |
+
```
|
| 12 |
+
|
| 13 |
+
or follow the [original repo](https://github.com/nicklashansen/tdmpc2)'s guide to build the docker image.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
## State Based RL
|
| 17 |
+
|
| 18 |
+
Simple command to run the algorithm with default configs (5M params, 1M steps, default control mode, 32 envs, state obs mode) :
|
| 19 |
+
```bash
|
| 20 |
+
python train.py env_id=PushCube-v1
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
More advanced command with optional configs : (More can be found in config.yaml)
|
| 24 |
+
```bash
|
| 25 |
+
python train.py model_size=5 steps=1_000_000 seed=1 exp_name=default \
|
| 26 |
+
env_id=PushCube-v1 env_type=gpu num_envs=32 control_mode=pd_ee_delta_pose obs=state \
|
| 27 |
+
save_video_local=false wandb=true wandb_entity=??? wandb_project=??? wandb_group=??? wandb_name=??? setting_tag=???
|
| 28 |
+
```
|
| 29 |
+
(*) The optional *setting_tag* is for adding a specific tag in the wandb log (e.g. sample_efficient, walltime_efficient, etc.)
|
| 30 |
+
|
| 31 |
+
## Visual (RGB) Based RL
|
| 32 |
+
|
| 33 |
+
The visual based RL expects model_size = 5. Also, make sure you have sufficient CPU memory, otherwise lower the buffer_size and use gpu env.
|
| 34 |
+
```bash
|
| 35 |
+
python train.py buffer_size=500_000 steps=5_000_000 seed=1 exp_name=default \
|
| 36 |
+
env_id=PushCube-v1 env_type=gpu num_envs=32 control_mode=pd_ee_delta_pose obs=rgb \
|
| 37 |
+
save_video_local=false wandb=true wandb_entity=??? wandb_project=??? wandb_group=??? wandb_name=??? setting_tag=???
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
## Replaying Evaluation Trajectories
|
| 41 |
+
|
| 42 |
+
To create videos of a checkpoint model, use the following command.
|
| 43 |
+
|
| 44 |
+
```bash
|
| 45 |
+
python evaluate.py model_size=5 seed=1 exp_name=default \
|
| 46 |
+
env_id=PushCube-v1 control_mode=pd_ee_delta_pose obs=state \
|
| 47 |
+
save_video_local=true checkpoint=/absolute/path/to/checkpoint.pt
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
* Make sure you specify the same control_mode the model was trained on if it's not default.
|
| 51 |
+
* The video are saved under ```logs/{env_id}/{seed}/{exp_name}/videos```
|
| 52 |
+
* The number of video saved is determined by ```num_envs * eval_episodes_per_env```
|
| 53 |
+
|
| 54 |
+
## Some Notes
|
| 55 |
+
|
| 56 |
+
- Multi-task TD-MPC2 isn't supported for Maniskill at the moment.
|
| 57 |
+
|
| 58 |
+
## Citation
|
| 59 |
+
|
| 60 |
+
If you use this baseline please cite the following
|
| 61 |
+
```
|
| 62 |
+
@inproceedings{hansen2024tdmpc2,
|
| 63 |
+
title={TD-MPC2: Scalable, Robust World Models for Continuous Control},
|
| 64 |
+
author={Nicklas Hansen and Hao Su and Xiaolong Wang},
|
| 65 |
+
booktitle={International Conference on Learning Representations (ICLR)},
|
| 66 |
+
year={2024}
|
| 67 |
+
}
|
| 68 |
+
```
|
| 69 |
+
as well as the original TD-MPC paper:
|
| 70 |
+
```
|
| 71 |
+
@inproceedings{hansen2022tdmpc,
|
| 72 |
+
title={Temporal Difference Learning for Model Predictive Control},
|
| 73 |
+
author={Nicklas Hansen and Xiaolong Wang and Hao Su},
|
| 74 |
+
booktitle={International Conference on Machine Learning (ICML)},
|
| 75 |
+
year={2022}
|
| 76 |
+
}
|
| 77 |
+
```
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/buffer.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from tensordict.tensordict import TensorDict
|
| 3 |
+
from torchrl.data.replay_buffers import ReplayBuffer, LazyTensorStorage
|
| 4 |
+
from torchrl.data.replay_buffers.samplers import SliceSampler
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Buffer():
|
| 8 |
+
"""
|
| 9 |
+
Replay buffer for TD-MPC2 training. Based on torchrl.
|
| 10 |
+
Uses CUDA memory if available, and CPU memory otherwise.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, cfg):
|
| 14 |
+
self.cfg = cfg
|
| 15 |
+
self._device = torch.device('cuda')
|
| 16 |
+
self._capacity = min(cfg.buffer_size, cfg.steps)
|
| 17 |
+
self._sampler = SliceSampler(
|
| 18 |
+
num_slices=self.cfg.batch_size,
|
| 19 |
+
end_key=None,
|
| 20 |
+
traj_key='episode',
|
| 21 |
+
truncated_key=None,
|
| 22 |
+
strict_length=True,
|
| 23 |
+
)
|
| 24 |
+
self._batch_size = cfg.batch_size * (cfg.horizon+1)
|
| 25 |
+
self._num_eps = 0
|
| 26 |
+
|
| 27 |
+
@property
|
| 28 |
+
def capacity(self):
|
| 29 |
+
"""Return the capacity of the buffer."""
|
| 30 |
+
return self._capacity
|
| 31 |
+
|
| 32 |
+
@property
|
| 33 |
+
def num_eps(self):
|
| 34 |
+
"""Return the number of episodes in the buffer."""
|
| 35 |
+
return self._num_eps
|
| 36 |
+
|
| 37 |
+
def _reserve_buffer(self, storage):
|
| 38 |
+
"""
|
| 39 |
+
Reserve a buffer with the given storage.
|
| 40 |
+
"""
|
| 41 |
+
return ReplayBuffer(
|
| 42 |
+
storage=storage,
|
| 43 |
+
sampler=self._sampler,
|
| 44 |
+
pin_memory=True,
|
| 45 |
+
prefetch=int(self.cfg.num_envs / self.cfg.steps_per_update),
|
| 46 |
+
batch_size=self._batch_size,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
def _init(self, tds):
|
| 50 |
+
"""Initialize the replay buffer. Use the first episode to estimate storage requirements."""
|
| 51 |
+
print(f'Buffer capacity: {self._capacity:,}')
|
| 52 |
+
mem_free, _ = torch.cuda.mem_get_info()
|
| 53 |
+
bytes_per_step = sum([
|
| 54 |
+
(v.numel()*v.element_size() if not isinstance(v, TensorDict) \
|
| 55 |
+
else sum([x.numel()*x.element_size() for x in v.values()])) \
|
| 56 |
+
for v in tds.values()
|
| 57 |
+
]) / len(tds)
|
| 58 |
+
total_bytes = bytes_per_step*self._capacity
|
| 59 |
+
print(f'Storage required: {total_bytes/1e9:.2f} GB')
|
| 60 |
+
# Heuristic: decide whether to use CUDA or CPU memory
|
| 61 |
+
storage_device = 'cuda' if 2.5*total_bytes < mem_free else 'cpu'
|
| 62 |
+
print(f'Using {storage_device.upper()} memory for storage.')
|
| 63 |
+
return self._reserve_buffer(
|
| 64 |
+
LazyTensorStorage(self._capacity, device=torch.device(storage_device))
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
def _to_device(self, *args, device=None):
|
| 68 |
+
if device is None:
|
| 69 |
+
device = self._device
|
| 70 |
+
return (arg.to(device, non_blocking=True) \
|
| 71 |
+
if arg is not None else None for arg in args)
|
| 72 |
+
|
| 73 |
+
def _prepare_batch(self, td):
|
| 74 |
+
"""
|
| 75 |
+
Prepare a sampled batch for training (post-processing).
|
| 76 |
+
Expects `td` to be a TensorDict with batch size TxB.
|
| 77 |
+
"""
|
| 78 |
+
obs = td['obs']
|
| 79 |
+
action = td['action'][1:]
|
| 80 |
+
reward = td['reward'][1:].unsqueeze(-1)
|
| 81 |
+
task = td['task'][0] if 'task' in td.keys() else None
|
| 82 |
+
return self._to_device(obs, action, reward, task)
|
| 83 |
+
|
| 84 |
+
def add(self, td):
|
| 85 |
+
"""Add an episode to the buffer.
|
| 86 |
+
Before vec: td[episode_len+1, ..] ..=act_dim, obs_dim, None
|
| 87 |
+
After: add num_env to the batch dimension
|
| 88 |
+
Note: for official vec code @51d6b8d, it seems to have batch dimension [episode_len+1, num_env]"""
|
| 89 |
+
|
| 90 |
+
for _td in td:
|
| 91 |
+
_td['episode'] = torch.ones_like(_td['reward'], dtype=torch.int64) * self._num_eps
|
| 92 |
+
if self._num_eps == 0:
|
| 93 |
+
self._buffer = self._init(_td)
|
| 94 |
+
self._buffer.extend(_td)
|
| 95 |
+
self._num_eps += 1
|
| 96 |
+
return self._num_eps
|
| 97 |
+
|
| 98 |
+
def sample(self):
|
| 99 |
+
"""Sample a batch of subsequences from the buffer."""
|
| 100 |
+
td = self._buffer.sample().view(-1, self.cfg.horizon+1).permute(1, 0)
|
| 101 |
+
return self._prepare_batch(td)
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/init.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def weight_init(m):
|
| 5 |
+
"""Custom weight initialization for TD-MPC2."""
|
| 6 |
+
if isinstance(m, nn.Linear):
|
| 7 |
+
nn.init.trunc_normal_(m.weight, std=0.02)
|
| 8 |
+
if m.bias is not None:
|
| 9 |
+
nn.init.constant_(m.bias, 0)
|
| 10 |
+
elif isinstance(m, nn.Embedding):
|
| 11 |
+
nn.init.uniform_(m.weight, -0.02, 0.02)
|
| 12 |
+
elif isinstance(m, nn.ParameterList):
|
| 13 |
+
for i,p in enumerate(m):
|
| 14 |
+
if p.dim() == 3: # Linear
|
| 15 |
+
nn.init.trunc_normal_(p, std=0.02) # Weight
|
| 16 |
+
nn.init.constant_(m[i+1], 0) # Bias
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def zero_(params):
|
| 20 |
+
"""Initialize parameters to zero."""
|
| 21 |
+
for p in params:
|
| 22 |
+
p.data.fill_(0)
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/logger.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import datetime
|
| 3 |
+
import re
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from termcolor import colored
|
| 7 |
+
from omegaconf import OmegaConf
|
| 8 |
+
from mani_skill.utils.visualization.misc import tile_images
|
| 9 |
+
import wandb
|
| 10 |
+
from common import TASK_SET
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
CONSOLE_FORMAT = [
|
| 14 |
+
("iteration", "I", "int"),
|
| 15 |
+
("episode", "E", "int"),
|
| 16 |
+
("step", "I", "int"),
|
| 17 |
+
("return", "R", "float"),
|
| 18 |
+
("success_once", "S", "float"),
|
| 19 |
+
("fail_once", "F", "float"),
|
| 20 |
+
("total_time", "T", "time"),
|
| 21 |
+
# Added for maniskill rl baselines matrics
|
| 22 |
+
# ("reward", "RET", "float"),
|
| 23 |
+
# ("episode_len", "L", "int"),
|
| 24 |
+
# ("rollout_time", "RT", "float"),
|
| 25 |
+
# ("rollout_fps", "RF", "float"),
|
| 26 |
+
# ("update_time", "U", "float"),
|
| 27 |
+
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
CAT_TO_COLOR = {
|
| 31 |
+
"pretrain": "yellow",
|
| 32 |
+
"train": "blue",
|
| 33 |
+
"eval": "green",
|
| 34 |
+
# Added for maniskill rl baselines matrics
|
| 35 |
+
"time" : "magenta",
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def make_dir(dir_path):
|
| 40 |
+
"""Create directory if it does not already exist."""
|
| 41 |
+
try:
|
| 42 |
+
os.makedirs(dir_path)
|
| 43 |
+
except OSError:
|
| 44 |
+
pass
|
| 45 |
+
return dir_path
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def print_run(cfg): # this function has to be called after make_env
|
| 49 |
+
"""
|
| 50 |
+
Pretty-printing of current run information.
|
| 51 |
+
Logger calls this method at initialization.
|
| 52 |
+
"""
|
| 53 |
+
prefix, color, attrs = " ", "green", ["bold"]
|
| 54 |
+
|
| 55 |
+
def _limstr(s, maxlen=36):
|
| 56 |
+
return str(s[:maxlen]) + "..." if len(str(s)) > maxlen else s
|
| 57 |
+
|
| 58 |
+
def _pprint(k, v):
|
| 59 |
+
print(
|
| 60 |
+
prefix + colored(f'{k.capitalize()+":":<15}', color, attrs=attrs), _limstr(v)
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
observations = ", ".join([str(v) for v in cfg.obs_shape.values()])
|
| 64 |
+
kvs = [
|
| 65 |
+
("task", cfg.env_id),
|
| 66 |
+
("sim backend", cfg.env_type),
|
| 67 |
+
("steps", f"{int(cfg.steps):,}"),
|
| 68 |
+
("observations", observations),
|
| 69 |
+
("actions", cfg.action_dim),
|
| 70 |
+
("experiment", cfg.exp_name),
|
| 71 |
+
]
|
| 72 |
+
w = np.max([len(_limstr(str(kv[1]))) for kv in kvs]) + 25
|
| 73 |
+
div = "-" * w
|
| 74 |
+
print(div)
|
| 75 |
+
for k, v in kvs:
|
| 76 |
+
_pprint(k, v)
|
| 77 |
+
print(div)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def cfg_to_group(cfg, return_list=False):
|
| 81 |
+
"""
|
| 82 |
+
Return a wandb-safe group name for logging.
|
| 83 |
+
Optionally returns group name as list.
|
| 84 |
+
"""
|
| 85 |
+
lst = [cfg.env_id, re.sub("[^0-9a-zA-Z]+", "-", cfg.exp_name)]
|
| 86 |
+
return lst if return_list else "-".join(lst)
|
| 87 |
+
class Logger:
|
| 88 |
+
"""Primary logging object. Logs either locally or using wandb."""
|
| 89 |
+
|
| 90 |
+
def __init__(self, cfg, manager = None):
|
| 91 |
+
self.cfg = cfg
|
| 92 |
+
self._log_dir = make_dir(cfg.work_dir)
|
| 93 |
+
self._model_dir = make_dir(self._log_dir / "models")
|
| 94 |
+
self._save_csv = cfg.save_csv
|
| 95 |
+
self._save_agent = cfg.save_agent
|
| 96 |
+
self._group = cfg_to_group(cfg)
|
| 97 |
+
self._seed = cfg.seed
|
| 98 |
+
self._eval = []
|
| 99 |
+
self.save_video_local = cfg.save_video_local
|
| 100 |
+
# Set up wandb
|
| 101 |
+
self.project = cfg.get("wandb_project", "none")
|
| 102 |
+
self.entity = cfg.get("wandb_entity", "none")
|
| 103 |
+
self.name = cfg.get("wandb_name", "none")
|
| 104 |
+
self.group = cfg.get("wandb_group", "none")
|
| 105 |
+
if not cfg.wandb or self.project == "none" or self.entity == "none":
|
| 106 |
+
print(colored("Wandb disabled.", "blue", attrs=["bold"]))
|
| 107 |
+
self._wandb = None
|
| 108 |
+
else:
|
| 109 |
+
print(colored("Logs will be synced with wandb.", "blue", attrs=["bold"]))
|
| 110 |
+
os.environ["WANDB_SILENT"] = "true" if cfg.wandb_silent else "false"
|
| 111 |
+
# Modified for Maniskill RL Baseline Logging Convention
|
| 112 |
+
wandb_tags = cfg_to_group(cfg, return_list=True) + [f"seed:{cfg.seed}"] + ["tdmpc2"]
|
| 113 |
+
if cfg.setting_tag != 'none':
|
| 114 |
+
wandb_tags += [cfg.setting_tag]
|
| 115 |
+
self._wandb = wandb.init(
|
| 116 |
+
project=self.project,
|
| 117 |
+
entity=self.entity,
|
| 118 |
+
name=self.name,
|
| 119 |
+
group=self.group,
|
| 120 |
+
tags=wandb_tags,
|
| 121 |
+
dir=self._log_dir,
|
| 122 |
+
config=OmegaConf.to_container(cfg, resolve=True),
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
self.wandb_videos = manager.list()
|
| 126 |
+
self.lock = manager.Lock()
|
| 127 |
+
|
| 128 |
+
@property
|
| 129 |
+
def model_dir(self):
|
| 130 |
+
return self._model_dir
|
| 131 |
+
|
| 132 |
+
def save_agent(self, agent=None, identifier='final'):
|
| 133 |
+
if self._save_agent and agent:
|
| 134 |
+
fp = self._model_dir / f'{str(identifier)}.pt'
|
| 135 |
+
agent.save(fp)
|
| 136 |
+
if self._wandb:
|
| 137 |
+
artifact = wandb.Artifact(
|
| 138 |
+
self.group + '-' + str(self._seed) + '-' + str(identifier),
|
| 139 |
+
type='model',
|
| 140 |
+
)
|
| 141 |
+
artifact.add_file(fp)
|
| 142 |
+
self._wandb.log_artifact(artifact)
|
| 143 |
+
|
| 144 |
+
def finish(self, agent=None):
|
| 145 |
+
try:
|
| 146 |
+
self.save_agent(agent)
|
| 147 |
+
except Exception as e:
|
| 148 |
+
print(colored(f"Failed to save model: {e}", "red"))
|
| 149 |
+
if self._wandb:
|
| 150 |
+
self._wandb.finish()
|
| 151 |
+
|
| 152 |
+
def _format(self, key, value, ty):
|
| 153 |
+
if ty == "int":
|
| 154 |
+
return f'{colored(key+":", "blue")} {int(value):,}'
|
| 155 |
+
elif ty == "float":
|
| 156 |
+
return f'{colored(key+":", "blue")} {value:.02f}'
|
| 157 |
+
elif ty == "time":
|
| 158 |
+
value = str(datetime.timedelta(seconds=int(value)))
|
| 159 |
+
return f'{colored(key+":", "blue")} {value}'
|
| 160 |
+
else:
|
| 161 |
+
raise f"invalid log format type: {ty}"
|
| 162 |
+
|
| 163 |
+
def _print(self, d, category):
|
| 164 |
+
category = colored(category, CAT_TO_COLOR[category])
|
| 165 |
+
pieces = [f" {category:<14}"]
|
| 166 |
+
for k, disp_k, ty in CONSOLE_FORMAT:
|
| 167 |
+
if k in d:
|
| 168 |
+
pieces.append(f"{self._format(disp_k, d[k], ty):<22}")
|
| 169 |
+
print(" ".join(pieces))
|
| 170 |
+
|
| 171 |
+
def add_wandb_video(self, frames: np.ndarray): # (num_envs, num_frames, h, w, 3)
|
| 172 |
+
with self.lock:
|
| 173 |
+
if self.cfg.wandb and len(frames) > 0:
|
| 174 |
+
self.wandb_videos.extend(frames)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def log_wandb_video(self, step, fps=15, key='videos/eval_video'):
|
| 178 |
+
with self.lock:
|
| 179 |
+
if self.cfg.wandb and len(self.wandb_videos) > 0 :
|
| 180 |
+
nrows = int(np.sqrt(len(self.wandb_videos)))
|
| 181 |
+
wandb_video = np.stack(self.wandb_videos)
|
| 182 |
+
wandb_video = wandb_video.transpose(1, 0, 2, 3, 4)
|
| 183 |
+
wandb_video = [tile_images(rgbs, nrows=nrows) for rgbs in wandb_video]
|
| 184 |
+
wandb_video = np.stack(wandb_video)
|
| 185 |
+
self.wandb_videos[:] = []
|
| 186 |
+
return self._wandb.log(
|
| 187 |
+
{key: wandb.Video(wandb_video.transpose(0, 3, 1, 2), fps=fps, format='mp4')}, step=step
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
def log(self, d, category="train"):
|
| 191 |
+
assert category in CAT_TO_COLOR.keys(), f"invalid category: {category}"
|
| 192 |
+
if self._wandb:
|
| 193 |
+
if category in {"train", "eval", "time"}:
|
| 194 |
+
xkey = "step"
|
| 195 |
+
elif category == "pretrain":
|
| 196 |
+
xkey = "iteration"
|
| 197 |
+
_d = dict()
|
| 198 |
+
for k, v in d.items():
|
| 199 |
+
_d[category + "/" + k] = v
|
| 200 |
+
self._wandb.log(_d, step=d[xkey])
|
| 201 |
+
if category == "eval" and self._save_csv:
|
| 202 |
+
keys = ["step", "return"]
|
| 203 |
+
self._eval.append(np.array([d[keys[0]], d[keys[1]]]))
|
| 204 |
+
pd.DataFrame(np.array(self._eval)).to_csv(
|
| 205 |
+
self._log_dir / "eval.csv", header=keys, index=None
|
| 206 |
+
)
|
| 207 |
+
if category != 'time':
|
| 208 |
+
self._print(d, category)
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/scale.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class RunningScale:
|
| 5 |
+
"""Running trimmed scale estimator."""
|
| 6 |
+
|
| 7 |
+
def __init__(self, cfg):
|
| 8 |
+
self.cfg = cfg
|
| 9 |
+
self._value = torch.ones(1, dtype=torch.float32, device=torch.device('cuda'))
|
| 10 |
+
self._percentiles = torch.tensor([5, 95], dtype=torch.float32, device=torch.device('cuda'))
|
| 11 |
+
|
| 12 |
+
def state_dict(self):
|
| 13 |
+
return dict(value=self._value, percentiles=self._percentiles)
|
| 14 |
+
|
| 15 |
+
def load_state_dict(self, state_dict):
|
| 16 |
+
self._value.data.copy_(state_dict['value'])
|
| 17 |
+
self._percentiles.data.copy_(state_dict['percentiles'])
|
| 18 |
+
|
| 19 |
+
@property
|
| 20 |
+
def value(self):
|
| 21 |
+
return self._value.cpu().item()
|
| 22 |
+
|
| 23 |
+
def _percentile(self, x):
|
| 24 |
+
x_dtype, x_shape = x.dtype, x.shape
|
| 25 |
+
x = x.view(x.shape[0], -1)
|
| 26 |
+
in_sorted, _ = torch.sort(x, dim=0)
|
| 27 |
+
positions = self._percentiles * (x.shape[0]-1) / 100
|
| 28 |
+
floored = torch.floor(positions)
|
| 29 |
+
ceiled = floored + 1
|
| 30 |
+
ceiled[ceiled > x.shape[0] - 1] = x.shape[0] - 1
|
| 31 |
+
weight_ceiled = positions-floored
|
| 32 |
+
weight_floored = 1.0 - weight_ceiled
|
| 33 |
+
d0 = in_sorted[floored.long(), :] * weight_floored[:, None]
|
| 34 |
+
d1 = in_sorted[ceiled.long(), :] * weight_ceiled[:, None]
|
| 35 |
+
return (d0+d1).view(-1, *x_shape[1:]).type(x_dtype)
|
| 36 |
+
|
| 37 |
+
def update(self, x):
|
| 38 |
+
percentiles = self._percentile(x.detach())
|
| 39 |
+
value = torch.clamp(percentiles[1] - percentiles[0], min=1.)
|
| 40 |
+
self._value.data.lerp_(value, self.cfg.tau)
|
| 41 |
+
|
| 42 |
+
def __call__(self, x, update=False):
|
| 43 |
+
if update:
|
| 44 |
+
self.update(x)
|
| 45 |
+
return x * (1/self.value)
|
| 46 |
+
|
| 47 |
+
def __repr__(self):
|
| 48 |
+
return f'RunningScale(S: {self.value})'
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/seed.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def set_seed(seed):
|
| 8 |
+
"""Set seed for reproducibility."""
|
| 9 |
+
random.seed(seed)
|
| 10 |
+
np.random.seed(seed)
|
| 11 |
+
torch.manual_seed(seed)
|
| 12 |
+
torch.cuda.manual_seed_all(seed)
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/common/world_model.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from copy import deepcopy
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from common import layers, math, init
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class WorldModel(nn.Module):
|
| 11 |
+
"""
|
| 12 |
+
TD-MPC2 implicit world model architecture.
|
| 13 |
+
Can be used for both single-task and multi-task experiments.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, cfg):
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.cfg = cfg
|
| 19 |
+
if cfg.multitask:
|
| 20 |
+
self._task_emb = nn.Embedding(len(cfg.tasks), cfg.task_dim, max_norm=1)
|
| 21 |
+
self._action_masks = torch.zeros(len(cfg.tasks), cfg.action_dim)
|
| 22 |
+
for i in range(len(cfg.tasks)):
|
| 23 |
+
self._action_masks[i, :cfg.action_dims[i]] = 1.
|
| 24 |
+
self._encoder = layers.enc(cfg)
|
| 25 |
+
self._dynamics = layers.mlp(cfg.latent_dim + cfg.action_dim + cfg.task_dim, 2*[cfg.mlp_dim], cfg.latent_dim, act=layers.SimNorm(cfg))
|
| 26 |
+
self._reward = layers.mlp(cfg.latent_dim + cfg.action_dim + cfg.task_dim, 2*[cfg.mlp_dim], max(cfg.num_bins, 1))
|
| 27 |
+
self._pi = layers.mlp(cfg.latent_dim + cfg.task_dim, 2*[cfg.mlp_dim], 2*cfg.action_dim)
|
| 28 |
+
self._Qs = layers.Ensemble([layers.mlp(cfg.latent_dim + cfg.action_dim + cfg.task_dim, 2*[cfg.mlp_dim],
|
| 29 |
+
max(cfg.num_bins, 1), dropout=cfg.dropout) for _ in range(cfg.num_q)])
|
| 30 |
+
self.apply(init.weight_init)
|
| 31 |
+
init.zero_([self._reward[-1].weight, self._Qs.params[-2]])
|
| 32 |
+
self._target_Qs = deepcopy(self._Qs).requires_grad_(False)
|
| 33 |
+
self.log_std_min = torch.tensor(cfg.log_std_min)
|
| 34 |
+
self.log_std_dif = torch.tensor(cfg.log_std_max) - self.log_std_min
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def total_params(self):
|
| 38 |
+
return sum(p.numel() for p in self.parameters() if p.requires_grad)
|
| 39 |
+
|
| 40 |
+
def to(self, *args, **kwargs):
|
| 41 |
+
"""
|
| 42 |
+
Overriding `to` method to also move additional tensors to device.
|
| 43 |
+
"""
|
| 44 |
+
super().to(*args, **kwargs)
|
| 45 |
+
if self.cfg.multitask:
|
| 46 |
+
self._action_masks = self._action_masks.to(*args, **kwargs)
|
| 47 |
+
self.log_std_min = self.log_std_min.to(*args, **kwargs)
|
| 48 |
+
self.log_std_dif = self.log_std_dif.to(*args, **kwargs)
|
| 49 |
+
return self
|
| 50 |
+
|
| 51 |
+
def train(self, mode=True):
|
| 52 |
+
"""
|
| 53 |
+
Overriding `train` method to keep target Q-networks in eval mode.
|
| 54 |
+
"""
|
| 55 |
+
super().train(mode)
|
| 56 |
+
self._target_Qs.train(False)
|
| 57 |
+
return self
|
| 58 |
+
|
| 59 |
+
def track_q_grad(self, mode=True):
|
| 60 |
+
"""
|
| 61 |
+
Enables/disables gradient tracking of Q-networks.
|
| 62 |
+
Avoids unnecessary computation during policy optimization.
|
| 63 |
+
This method also enables/disables gradients for task embeddings.
|
| 64 |
+
"""
|
| 65 |
+
for p in self._Qs.parameters():
|
| 66 |
+
p.requires_grad_(mode)
|
| 67 |
+
if self.cfg.multitask:
|
| 68 |
+
for p in self._task_emb.parameters():
|
| 69 |
+
p.requires_grad_(mode)
|
| 70 |
+
|
| 71 |
+
def soft_update_target_Q(self):
|
| 72 |
+
"""
|
| 73 |
+
Soft-update target Q-networks using Polyak averaging.
|
| 74 |
+
"""
|
| 75 |
+
with torch.no_grad():
|
| 76 |
+
for p, p_target in zip(self._Qs.parameters(), self._target_Qs.parameters()):
|
| 77 |
+
p_target.data.lerp_(p.data, self.cfg.tau)
|
| 78 |
+
|
| 79 |
+
def task_emb(self, x, task):
|
| 80 |
+
"""
|
| 81 |
+
Continuous task embedding for multi-task experiments.
|
| 82 |
+
Retrieves the task embedding for a given task ID `task`
|
| 83 |
+
and concatenates it to the input `x`.
|
| 84 |
+
"""
|
| 85 |
+
if isinstance(task, int):
|
| 86 |
+
task = torch.tensor([task], device=x.device)
|
| 87 |
+
emb = self._task_emb(task.long())
|
| 88 |
+
if x.ndim == 3:
|
| 89 |
+
emb = emb.unsqueeze(0).repeat(x.shape[0], 1, 1)
|
| 90 |
+
elif emb.shape[0] == 1:
|
| 91 |
+
emb = emb.repeat(x.shape[0], 1)
|
| 92 |
+
return torch.cat([x, emb], dim=-1)
|
| 93 |
+
|
| 94 |
+
def encode(self, obs, task):
|
| 95 |
+
"""
|
| 96 |
+
Encodes an observation into its latent representation. Online trainer obs is [1, obs_shape], task is None
|
| 97 |
+
This implementation assumes a single state-based observation. Should be already batched.
|
| 98 |
+
Should be ok.
|
| 99 |
+
"""
|
| 100 |
+
if self.cfg.multitask:
|
| 101 |
+
obs = self.task_emb(obs, task)
|
| 102 |
+
if self.cfg.obs == 'rgb' and obs.ndim == 5:
|
| 103 |
+
return torch.stack([self._encoder[self.cfg.obs](o) for o in obs])
|
| 104 |
+
return self._encoder[self.cfg.obs](obs)
|
| 105 |
+
|
| 106 |
+
def next(self, z, a, task):
|
| 107 |
+
"""
|
| 108 |
+
z[]
|
| 109 |
+
Predicts the next latent state given the current latent state and action.
|
| 110 |
+
"""
|
| 111 |
+
if self.cfg.multitask:
|
| 112 |
+
z = self.task_emb(z, task)
|
| 113 |
+
z = torch.cat([z, a], dim=-1)
|
| 114 |
+
return self._dynamics(z)
|
| 115 |
+
|
| 116 |
+
def reward(self, z, a, task):
|
| 117 |
+
"""
|
| 118 |
+
Predicts instantaneous (single-step) reward.
|
| 119 |
+
"""
|
| 120 |
+
if self.cfg.multitask:
|
| 121 |
+
z = self.task_emb(z, task)
|
| 122 |
+
z = torch.cat([z, a], dim=-1)
|
| 123 |
+
return self._reward(z)
|
| 124 |
+
|
| 125 |
+
def pi(self, z, task):
|
| 126 |
+
"""
|
| 127 |
+
z[~, 1]
|
| 128 |
+
Return mu[~, action_dim], pi[~, action_dim], log_pi[~, 1], log_std[~, action_dim]
|
| 129 |
+
|
| 130 |
+
Samples an action from the policy prior.
|
| 131 |
+
The policy prior is a Gaussian distribution with
|
| 132 |
+
mean and (log) std predicted by a neural network.
|
| 133 |
+
"""
|
| 134 |
+
if self.cfg.multitask:
|
| 135 |
+
z = self.task_emb(z, task)
|
| 136 |
+
|
| 137 |
+
# Gaussian policy prior
|
| 138 |
+
mu, log_std = self._pi(z).chunk(2, dim=-1)
|
| 139 |
+
log_std = math.log_std(log_std, self.log_std_min, self.log_std_dif)
|
| 140 |
+
eps = torch.randn_like(mu)
|
| 141 |
+
|
| 142 |
+
if self.cfg.multitask: # Mask out unused action dimensions
|
| 143 |
+
mu = mu * self._action_masks[task]
|
| 144 |
+
log_std = log_std * self._action_masks[task]
|
| 145 |
+
eps = eps * self._action_masks[task]
|
| 146 |
+
action_dims = self._action_masks.sum(-1)[task].unsqueeze(-1)
|
| 147 |
+
else: # No masking
|
| 148 |
+
action_dims = None
|
| 149 |
+
|
| 150 |
+
log_pi = math.gaussian_logprob(eps, log_std, size=action_dims)
|
| 151 |
+
pi = mu + eps * log_std.exp()
|
| 152 |
+
mu, pi, log_pi = math.squash(mu, pi, log_pi)
|
| 153 |
+
|
| 154 |
+
return mu, pi, log_pi, log_std
|
| 155 |
+
|
| 156 |
+
def Q(self, z, a, task, return_type='min', target=False):
|
| 157 |
+
"""
|
| 158 |
+
Predict state-action value. z[~, latent_dim], a[~, action_dim] -> [num_q, ~, num_bins] if all else [~, 1]
|
| 159 |
+
`return_type` can be one of [`min`, `avg`, `all`]:
|
| 160 |
+
- `min`: return the minimum of two randomly subsampled Q-values.
|
| 161 |
+
- `avg`: return the average of two randomly subsampled Q-values.
|
| 162 |
+
- `all`: return all Q-values.
|
| 163 |
+
`target` specifies whether to use the target Q-networks or not.
|
| 164 |
+
"""
|
| 165 |
+
assert return_type in {'min', 'avg', 'all'}
|
| 166 |
+
|
| 167 |
+
if self.cfg.multitask:
|
| 168 |
+
z = self.task_emb(z, task)
|
| 169 |
+
|
| 170 |
+
z = torch.cat([z, a], dim=-1)
|
| 171 |
+
out = (self._target_Qs if target else self._Qs)(z)
|
| 172 |
+
|
| 173 |
+
if return_type == 'all':
|
| 174 |
+
return out
|
| 175 |
+
|
| 176 |
+
Q1, Q2 = out[np.random.choice(self.cfg.num_q, 2, replace=False)]
|
| 177 |
+
Q1, Q2 = math.two_hot_inv(Q1, self.cfg), math.two_hot_inv(Q2, self.cfg)
|
| 178 |
+
return torch.min(Q1, Q2) if return_type == 'min' else (Q1 + Q2) / 2
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/config.yaml
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- override hydra/launcher: submitit_local
|
| 3 |
+
|
| 4 |
+
# environment
|
| 5 |
+
env_id: PushCube-v1
|
| 6 |
+
obs: state # or rgb
|
| 7 |
+
control_mode: default # or pd_joint_delta_pos or pd_ee_delta_pose
|
| 8 |
+
num_envs: 32
|
| 9 |
+
num_eval_envs: 4
|
| 10 |
+
env_type: gpu # cpu
|
| 11 |
+
render_mode: rgb_array # ['rgb_array' for quality, or 'sensors' for speed]
|
| 12 |
+
render_size: 64
|
| 13 |
+
setting_tag: none # ['none', 'walltime_efficient', 'sample_efficient', ...] for wandb tags
|
| 14 |
+
|
| 15 |
+
# evaluation
|
| 16 |
+
checkpoint: ???
|
| 17 |
+
eval_episodes_per_env: 2 # total (eval_episodes_per_env * num_eval_envs number) of eval episodes
|
| 18 |
+
eval_freq: 50000
|
| 19 |
+
|
| 20 |
+
# training
|
| 21 |
+
steps: 1_000_000
|
| 22 |
+
batch_size: 256
|
| 23 |
+
reward_coef: 0.1
|
| 24 |
+
value_coef: 0.1
|
| 25 |
+
consistency_coef: 20
|
| 26 |
+
rho: 0.5
|
| 27 |
+
lr: 3e-4
|
| 28 |
+
enc_lr_scale: 0.3
|
| 29 |
+
grad_clip_norm: 20
|
| 30 |
+
tau: 0.01
|
| 31 |
+
discount_denom: 5
|
| 32 |
+
discount_min: 0.95
|
| 33 |
+
discount_max: 0.995
|
| 34 |
+
buffer_size: 1_000_000
|
| 35 |
+
exp_name: default
|
| 36 |
+
data_dir: ???
|
| 37 |
+
steps_per_update: 1
|
| 38 |
+
|
| 39 |
+
# planning
|
| 40 |
+
mpc: true
|
| 41 |
+
iterations: 6
|
| 42 |
+
num_samples: 512
|
| 43 |
+
num_elites: 64
|
| 44 |
+
num_pi_trajs: 24
|
| 45 |
+
horizon: 3
|
| 46 |
+
min_std: 0.05
|
| 47 |
+
max_std: 2
|
| 48 |
+
temperature: 0.5
|
| 49 |
+
|
| 50 |
+
# actor
|
| 51 |
+
log_std_min: -10
|
| 52 |
+
log_std_max: 2
|
| 53 |
+
entropy_coef: 1e-4
|
| 54 |
+
|
| 55 |
+
# critic
|
| 56 |
+
num_bins: 101
|
| 57 |
+
vmin: -10
|
| 58 |
+
vmax: +10
|
| 59 |
+
|
| 60 |
+
# architecture
|
| 61 |
+
model_size: ???
|
| 62 |
+
num_enc_layers: 2
|
| 63 |
+
enc_dim: 256
|
| 64 |
+
num_channels: 32
|
| 65 |
+
mlp_dim: 512
|
| 66 |
+
latent_dim: 512
|
| 67 |
+
task_dim: 0
|
| 68 |
+
num_q: 5
|
| 69 |
+
dropout: 0.01
|
| 70 |
+
simnorm_dim: 8
|
| 71 |
+
|
| 72 |
+
# logging
|
| 73 |
+
wandb_project:
|
| 74 |
+
wandb_group:
|
| 75 |
+
wandb_name:
|
| 76 |
+
wandb_entity:
|
| 77 |
+
wandb_silent: false
|
| 78 |
+
wandb: false # enable wandb
|
| 79 |
+
save_csv: true
|
| 80 |
+
|
| 81 |
+
# misc
|
| 82 |
+
save_video_local: false # save video in eval_video for evaluation during training
|
| 83 |
+
save_agent: true
|
| 84 |
+
seed: 1
|
| 85 |
+
|
| 86 |
+
# convenience
|
| 87 |
+
work_dir: ???
|
| 88 |
+
task_title: ???
|
| 89 |
+
multitask: ???
|
| 90 |
+
tasks: ???
|
| 91 |
+
obs_shape: ???
|
| 92 |
+
action_dim: ???
|
| 93 |
+
episode_length: ???
|
| 94 |
+
obs_shapes: ???
|
| 95 |
+
action_dims: ???
|
| 96 |
+
episode_lengths: ???
|
| 97 |
+
seed_steps: ???
|
| 98 |
+
bin_size: ???
|
| 99 |
+
|
| 100 |
+
# Added for Maniskill RL Baselines Config Convention (don't assign to them)
|
| 101 |
+
env_cfg:
|
| 102 |
+
env_id: ???
|
| 103 |
+
control_mode: ??? # pd_joint_delta_pos or pd_ee_delta_pose
|
| 104 |
+
obs_mode: ???
|
| 105 |
+
reward_mode: ???
|
| 106 |
+
num_envs: ???
|
| 107 |
+
sim_backend: ??? # cpu or gpu
|
| 108 |
+
partial_reset: false
|
| 109 |
+
env_horizon: ???
|
| 110 |
+
eval_env_cfg:
|
| 111 |
+
env_id: ???
|
| 112 |
+
control_mode: ???
|
| 113 |
+
obs_mode: ???
|
| 114 |
+
reward_mode: ???
|
| 115 |
+
num_envs: ???
|
| 116 |
+
sim_backend: ???
|
| 117 |
+
env_horizon: ???
|
| 118 |
+
partial_reset: false
|
| 119 |
+
num_eval_episodes: ???
|
| 120 |
+
discount: ???
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/environment.yaml
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: tdmpc2-ms
|
| 2 |
+
channels:
|
| 3 |
+
- pytorch-nightly
|
| 4 |
+
- nvidia
|
| 5 |
+
- conda-forge
|
| 6 |
+
- defaults
|
| 7 |
+
dependencies:
|
| 8 |
+
- cudatoolkit=11.7
|
| 9 |
+
- glew=2.1.0
|
| 10 |
+
- glib=2.68.4
|
| 11 |
+
- pip=21.0
|
| 12 |
+
- python=3.9.0
|
| 13 |
+
- pytorch>=2.2.2
|
| 14 |
+
- torchvision>=0.16.2
|
| 15 |
+
- pip:
|
| 16 |
+
- absl-py==2.0.0
|
| 17 |
+
- "cython<3"
|
| 18 |
+
- dm-control==1.0.8
|
| 19 |
+
- ffmpeg==1.4
|
| 20 |
+
- glfw==2.6.4
|
| 21 |
+
- hydra-core==1.3.2
|
| 22 |
+
- hydra-submitit-launcher==1.2.0
|
| 23 |
+
- imageio==2.33.1
|
| 24 |
+
- imageio-ffmpeg==0.4.9
|
| 25 |
+
- kornia==0.7.1
|
| 26 |
+
- moviepy==1.0.3
|
| 27 |
+
- mujoco==2.3.1
|
| 28 |
+
- mujoco-py==2.1.2.14
|
| 29 |
+
- numpy==1.23.5
|
| 30 |
+
- omegaconf==2.3.0
|
| 31 |
+
- open3d==0.18.0
|
| 32 |
+
- opencv-contrib-python==4.9.0.80
|
| 33 |
+
- opencv-python==4.9.0.80
|
| 34 |
+
- pandas==2.1.4
|
| 35 |
+
- sapien==3.0.0.b1
|
| 36 |
+
- submitit==1.5.1
|
| 37 |
+
- setuptools==65.5.0
|
| 38 |
+
- patchelf==0.17.2.1
|
| 39 |
+
- protobuf==4.25.2
|
| 40 |
+
- pillow==10.2.0
|
| 41 |
+
- pyquaternion==0.9.9
|
| 42 |
+
- tensordict-nightly==2024.3.26
|
| 43 |
+
- termcolor==2.4.0
|
| 44 |
+
- torchrl-nightly==2024.3.26
|
| 45 |
+
- transforms3d==0.4.1
|
| 46 |
+
- trimesh==4.0.9
|
| 47 |
+
- tqdm==4.66.1
|
| 48 |
+
- wandb==0.16.2
|
| 49 |
+
- wheel==0.38.0
|
| 50 |
+
- mani_skill>=3.0.0b12
|
| 51 |
+
####################
|
| 52 |
+
# Gym:
|
| 53 |
+
# (unmaintained but required for maniskill2/meta-world/myosuite)
|
| 54 |
+
# - gym==0.21.0
|
| 55 |
+
####################
|
| 56 |
+
# ManiSkill2:
|
| 57 |
+
# (requires gym==0.21.0 which occasionally breaks)
|
| 58 |
+
# - mani-skill2==0.4.1
|
| 59 |
+
####################
|
| 60 |
+
# Meta-World:
|
| 61 |
+
# (requires gym==0.21.0 which occasionally breaks)
|
| 62 |
+
# - git+https://github.com/Farama-Foundation/Metaworld.git@04be337a12305e393c0caf0cbf5ec7755c7c8feb
|
| 63 |
+
####################
|
| 64 |
+
# MyoSuite:
|
| 65 |
+
# (requires gym==0.13 which conflicts with meta-world / mani-skill2)
|
| 66 |
+
# - myosuite
|
| 67 |
+
####################
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/evaluate.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
os.environ['MUJOCO_GL'] = 'egl'
|
| 3 |
+
import warnings
|
| 4 |
+
warnings.filterwarnings('ignore')
|
| 5 |
+
|
| 6 |
+
import hydra
|
| 7 |
+
import imageio
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
from termcolor import colored
|
| 11 |
+
|
| 12 |
+
from common.parser import parse_cfg
|
| 13 |
+
from common.seed import set_seed
|
| 14 |
+
from envs import make_envs
|
| 15 |
+
from tdmpc2 import TDMPC2
|
| 16 |
+
|
| 17 |
+
torch.backends.cudnn.benchmark = True
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@hydra.main(config_name='config', config_path='.')
|
| 21 |
+
def evaluate(cfg: dict):
|
| 22 |
+
"""
|
| 23 |
+
Script for evaluating a single-task / multi-task TD-MPC2 checkpoint.
|
| 24 |
+
|
| 25 |
+
Most relevant args:
|
| 26 |
+
`env_id`: task name (eg. PickCube-v0)
|
| 27 |
+
`model_size`: model size, must be one of `[1, 5, 19, 48, 317]` (default: 5)
|
| 28 |
+
`checkpoint`: path to model checkpoint to load
|
| 29 |
+
`eval_episodes`: number of episodes to evaluate on per task (default: 10)
|
| 30 |
+
`save_video_local`: whether to save a video of the evaluation (default: True)
|
| 31 |
+
`seed`: random seed (default: 1)
|
| 32 |
+
|
| 33 |
+
See config.yaml for a full list of args.
|
| 34 |
+
|
| 35 |
+
Example usage:
|
| 36 |
+
````
|
| 37 |
+
$ python evaluate.py task=mt80 model_size=48 checkpoint=/path/to/mt80-48M.pt
|
| 38 |
+
$ python evaluate.py task=mt30 model_size=317 checkpoint=/path/to/mt30-317M.pt
|
| 39 |
+
$ python evaluate.py task=dog-run checkpoint=/path/to/dog-1.pt save_video_local=true
|
| 40 |
+
```
|
| 41 |
+
"""
|
| 42 |
+
assert torch.cuda.is_available()
|
| 43 |
+
assert cfg.eval_episodes_per_env > 0, 'Must evaluate at least 1 episode.'
|
| 44 |
+
eval_episodes = cfg.eval_episodes_per_env * cfg.num_eval_envs
|
| 45 |
+
cfg.num_envs = 1 # to keep the code similar and logging video simpler
|
| 46 |
+
cfg = parse_cfg(cfg)
|
| 47 |
+
assert not cfg.multitask, colored('Warning: multi-task models is not currently supported for maniskill.', 'red', attrs=['bold'])
|
| 48 |
+
set_seed(cfg.seed)
|
| 49 |
+
print(colored(f'Task: {cfg.env_id}', 'blue', attrs=['bold']))
|
| 50 |
+
print(colored(f'Model size: {cfg.get("model_size", "default")}', 'blue', attrs=['bold']))
|
| 51 |
+
print(colored(f'Checkpoint: {cfg.checkpoint}', 'blue', attrs=['bold']))
|
| 52 |
+
|
| 53 |
+
# Make environment
|
| 54 |
+
env = make_envs(cfg, cfg.num_envs, is_eval=True)
|
| 55 |
+
|
| 56 |
+
# Load agent
|
| 57 |
+
agent = TDMPC2(cfg)
|
| 58 |
+
assert os.path.exists(cfg.checkpoint), f'Checkpoint {cfg.checkpoint} not found! Must be a valid filepath.'
|
| 59 |
+
agent.load(cfg.checkpoint)
|
| 60 |
+
|
| 61 |
+
# Evaluate
|
| 62 |
+
if cfg.multitask:
|
| 63 |
+
print(colored(f'Evaluating agent on {len(cfg.tasks)} tasks:', 'yellow', attrs=['bold']))
|
| 64 |
+
else:
|
| 65 |
+
print(colored(f'Evaluating agent on {cfg.env_id}:', 'yellow', attrs=['bold']))
|
| 66 |
+
if cfg.save_video_local:
|
| 67 |
+
video_dir = os.path.join(cfg.work_dir, 'videos')
|
| 68 |
+
os.makedirs(video_dir, exist_ok=True)
|
| 69 |
+
scores = []
|
| 70 |
+
tasks = cfg.tasks if cfg.multitask else [cfg.env_id]
|
| 71 |
+
for task_idx, task in enumerate(tasks):
|
| 72 |
+
if not cfg.multitask:
|
| 73 |
+
task_idx = None
|
| 74 |
+
has_success, has_fail = False, False # if task has success or/and fail (added for maniskill)
|
| 75 |
+
ep_rewards, ep_successes, ep_fails = [], [], []
|
| 76 |
+
for i in range(eval_episodes):
|
| 77 |
+
obs, _ = env.reset()
|
| 78 |
+
done = False # ms3: done is truncated since the ms3 ignore_terminations.
|
| 79 |
+
ep_reward, t = 0, 0
|
| 80 |
+
if cfg.save_video_local:
|
| 81 |
+
frames = [env.render().squeeze()]
|
| 82 |
+
while not done: # done is truncated and should be the same
|
| 83 |
+
action = agent.act(obs, t0=t==0)
|
| 84 |
+
obs, reward, terminated, truncated, info = env.step(action)
|
| 85 |
+
done = terminated | truncated
|
| 86 |
+
ep_reward += reward
|
| 87 |
+
t += 1
|
| 88 |
+
if cfg.save_video_local:
|
| 89 |
+
frames.append(env.render().squeeze())
|
| 90 |
+
ep_rewards.append(ep_reward.mean().item())
|
| 91 |
+
if 'success' in info:
|
| 92 |
+
has_success = True
|
| 93 |
+
ep_successes.append(info['success'].float().mean().item())
|
| 94 |
+
if 'fail' in info:
|
| 95 |
+
has_fail = True
|
| 96 |
+
ep_fails.append(info['fail'].float().mean().item())
|
| 97 |
+
if cfg.save_video_local:
|
| 98 |
+
imageio.mimsave(
|
| 99 |
+
os.path.join(video_dir, f'{task}-{i}.mp4'), frames, fps=15)
|
| 100 |
+
ep_rewards = np.nanmean(ep_rewards)
|
| 101 |
+
ep_successes = np.nanmean(ep_successes)
|
| 102 |
+
ep_fails = np.nanmean(ep_fails)
|
| 103 |
+
if cfg.multitask:
|
| 104 |
+
scores.append(ep_successes*100 if task.startswith('mw-') else ep_rewards/10)
|
| 105 |
+
if has_success and has_fail:
|
| 106 |
+
print(colored(f' {task:<22}' \
|
| 107 |
+
f'\tR: {ep_rewards:.01f} ' \
|
| 108 |
+
f'\tS: {ep_successes:.02f}' \
|
| 109 |
+
f'\tF: {ep_fails:.02f}', 'yellow'))
|
| 110 |
+
elif has_success:
|
| 111 |
+
print(colored(f' {task:<22}' \
|
| 112 |
+
f'\tR: {ep_rewards:.01f} ' \
|
| 113 |
+
f'\tS: {ep_successes:.02f}', 'yellow'))
|
| 114 |
+
elif has_fail:
|
| 115 |
+
print(colored(f' {task:<22}' \
|
| 116 |
+
f'\tR: {ep_rewards:.01f} ' \
|
| 117 |
+
f'\tF: {ep_fails:.02f}', 'yellow'))
|
| 118 |
+
|
| 119 |
+
if cfg.multitask:
|
| 120 |
+
print(colored(f'Normalized score: {np.mean(scores):.02f}', 'yellow', attrs=['bold']))
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
if __name__ == '__main__':
|
| 124 |
+
evaluate()
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/tdmpc2.py
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
from common import math
|
| 6 |
+
from common.scale import RunningScale
|
| 7 |
+
from common.world_model import WorldModel
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TDMPC2:
|
| 11 |
+
"""
|
| 12 |
+
TD-MPC2 agent. Implements training + inference.
|
| 13 |
+
Can be used for both single-task and multi-task experiments,
|
| 14 |
+
and supports both state and pixel observations.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, cfg):
|
| 18 |
+
self.cfg = cfg
|
| 19 |
+
self.device = torch.device('cuda')
|
| 20 |
+
self.model = WorldModel(cfg).to(self.device)
|
| 21 |
+
self.optim = torch.optim.Adam([
|
| 22 |
+
{'params': self.model._encoder.parameters(), 'lr': self.cfg.lr*self.cfg.enc_lr_scale},
|
| 23 |
+
{'params': self.model._dynamics.parameters()},
|
| 24 |
+
{'params': self.model._reward.parameters()},
|
| 25 |
+
{'params': self.model._Qs.parameters()},
|
| 26 |
+
{'params': self.model._task_emb.parameters() if self.cfg.multitask else []}
|
| 27 |
+
], lr=self.cfg.lr)
|
| 28 |
+
self.pi_optim = torch.optim.Adam(self.model._pi.parameters(), lr=self.cfg.lr, eps=1e-5)
|
| 29 |
+
self.model.eval()
|
| 30 |
+
self.scale = RunningScale(cfg)
|
| 31 |
+
self.cfg.iterations += 2*int(cfg.action_dim >= 20) # Heuristic for large action spaces
|
| 32 |
+
self.discount = torch.tensor(
|
| 33 |
+
[self._get_discount(ep_len) for ep_len in cfg.episode_lengths], device='cuda'
|
| 34 |
+
) if self.cfg.multitask else self._get_discount(cfg.episode_length)
|
| 35 |
+
|
| 36 |
+
self.cfg.discount = self.discount
|
| 37 |
+
|
| 38 |
+
def _get_discount(self, episode_length):
|
| 39 |
+
"""
|
| 40 |
+
Returns discount factor for a given episode length.
|
| 41 |
+
Simple heuristic that scales discount linearly with episode length.
|
| 42 |
+
Default values should work well for most tasks, but can be changed as needed.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
episode_length (int): Length of the episode. Assumes episodes are of fixed length.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
float: Discount factor for the task.
|
| 49 |
+
"""
|
| 50 |
+
frac = episode_length/self.cfg.discount_denom
|
| 51 |
+
return min(max((frac-1)/(frac), self.cfg.discount_min), self.cfg.discount_max)
|
| 52 |
+
|
| 53 |
+
def save(self, fp):
|
| 54 |
+
"""
|
| 55 |
+
Save state dict of the agent to filepath.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
fp (str): Filepath to save state dict to.
|
| 59 |
+
"""
|
| 60 |
+
torch.save({"model": self.model.state_dict()}, fp)
|
| 61 |
+
|
| 62 |
+
def load(self, fp):
|
| 63 |
+
"""
|
| 64 |
+
Load a saved state dict from filepath (or dictionary) into current agent.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
fp (str or dict): Filepath or state dict to load.
|
| 68 |
+
"""
|
| 69 |
+
state_dict = fp if isinstance(fp, dict) else torch.load(fp)
|
| 70 |
+
self.model.load_state_dict(state_dict["model"])
|
| 71 |
+
|
| 72 |
+
@torch.no_grad()
|
| 73 |
+
def act(self, obs, t0=False, eval_mode=False, task=None):
|
| 74 |
+
"""
|
| 75 |
+
Before: obs is 1d, return seems to be mu(1, action_dim)
|
| 76 |
+
After: obs is batched with num_env, return still 2d
|
| 77 |
+
|
| 78 |
+
Select an action by planning in the latent space of the world model.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
obs (torch.Tensor): Observation from the environment. 1d for online trainer
|
| 82 |
+
t0 (bool): Whether this is the first observation in the episode.
|
| 83 |
+
eval_mode (bool): Whether to use the mean of the action distribution.
|
| 84 |
+
task (int): Task index (only used for multi-task experiments).
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
torch.Tensor: Action to take in the environment.
|
| 88 |
+
"""
|
| 89 |
+
obs = obs.to(self.device, non_blocking=True)
|
| 90 |
+
if task is not None:
|
| 91 |
+
task = torch.tensor([task], device=self.device)
|
| 92 |
+
z = self.model.encode(obs, task) # [num_envs, latent_dim]
|
| 93 |
+
if self.cfg.mpc:
|
| 94 |
+
a = self.plan(z, t0=t0, eval_mode=eval_mode, task=task)
|
| 95 |
+
else:
|
| 96 |
+
a = self.model.pi(z, task)[int(not eval_mode)] # [int(not eval_mode)] selects mu or pi
|
| 97 |
+
return a.cpu()
|
| 98 |
+
|
| 99 |
+
@torch.no_grad()
|
| 100 |
+
def _estimate_value(self, z, actions, task):
|
| 101 |
+
"""z[num_samples, latent_dim], actions[horizon, num_samples, action_dim] -> [num_samples, 1]
|
| 102 |
+
Estimate value of a trajectory starting at latent state z and executing given actions."""
|
| 103 |
+
G, discount = 0, 1
|
| 104 |
+
for t in range(self.cfg.horizon):
|
| 105 |
+
reward = math.two_hot_inv(self.model.reward(z, actions[:, t], task), self.cfg)
|
| 106 |
+
z = self.model.next(z, actions[:, t], task)
|
| 107 |
+
G += discount * reward
|
| 108 |
+
discount *= self.discount[torch.tensor(task)] if self.cfg.multitask else self.discount
|
| 109 |
+
return G + discount * self.model.Q(z, self.model.pi(z, task)[1], task, return_type='avg')
|
| 110 |
+
|
| 111 |
+
@torch.no_grad()
|
| 112 |
+
def plan(self, z, t0=False, eval_mode=False, task=None):
|
| 113 |
+
"""
|
| 114 |
+
Before: For online, z[1, latent_dim]
|
| 115 |
+
After: For online z[num_envs, latent_dim]. Should be ok
|
| 116 |
+
Plan a sequence of actions using the learned world model.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
z (torch.Tensor): Latent state from which to plan.
|
| 120 |
+
t0 (bool): Whether this is the first observation in the episode.
|
| 121 |
+
eval_mode (bool): Whether to use the mean of the action distribution.
|
| 122 |
+
task (Torch.Tensor): Task index (only used for multi-task experiments).
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
torch.Tensor: Action to take in the environment.
|
| 126 |
+
"""
|
| 127 |
+
num_envs = self.cfg.num_eval_envs if eval_mode else self.cfg.num_envs
|
| 128 |
+
# Sample policy trajectories
|
| 129 |
+
if self.cfg.num_pi_trajs > 0:
|
| 130 |
+
pi_actions = torch.empty(num_envs, self.cfg.horizon, self.cfg.num_pi_trajs, self.cfg.action_dim, device=self.device)
|
| 131 |
+
_z = z.unsqueeze(1).repeat(1, self.cfg.num_pi_trajs, 1) # (num_envs, num_pi_trajs, latent_dim)
|
| 132 |
+
for t in range(self.cfg.horizon-1):
|
| 133 |
+
pi_actions[:, t] = self.model.pi(_z, task)[1]
|
| 134 |
+
_z = self.model.next(_z, pi_actions[:, t], task)
|
| 135 |
+
pi_actions[:, -1] = self.model.pi(_z, task)[1]
|
| 136 |
+
|
| 137 |
+
# Initialize state and parameters
|
| 138 |
+
z = z.unsqueeze(1).repeat(1, self.cfg.num_samples, 1) # (num_envs, num_samples, latent_dim)
|
| 139 |
+
mean = torch.zeros(num_envs, self.cfg.horizon, self.cfg.action_dim, device=self.device)
|
| 140 |
+
std = self.cfg.max_std*torch.ones(num_envs, self.cfg.horizon, self.cfg.action_dim, device=self.device)
|
| 141 |
+
if not t0 and hasattr(self, '_prev_mean'):
|
| 142 |
+
if eval_mode: # Added to avoid the problem with shape (num_envs) mismatch with train and eval env
|
| 143 |
+
mean[:, :-1] = self._prev_mean_eval[:, 1:]
|
| 144 |
+
else:
|
| 145 |
+
mean[:, :-1] = self._prev_mean[:, 1:]
|
| 146 |
+
actions = torch.empty(num_envs, self.cfg.horizon, self.cfg.num_samples,
|
| 147 |
+
self.cfg.action_dim, device=self.device) # # (num_envs, horizon, num_samples, latent_dim)
|
| 148 |
+
if self.cfg.num_pi_trajs > 0:
|
| 149 |
+
actions[:, :, :self.cfg.num_pi_trajs] = pi_actions
|
| 150 |
+
|
| 151 |
+
# Iterate MPPI
|
| 152 |
+
for _ in range(self.cfg.iterations):
|
| 153 |
+
|
| 154 |
+
# Sample actions
|
| 155 |
+
actions[:, :, self.cfg.num_pi_trajs:] = (mean.unsqueeze(2) + std.unsqueeze(2) * \
|
| 156 |
+
torch.randn(num_envs, self.cfg.horizon, self.cfg.num_samples-self.cfg.num_pi_trajs, self.cfg.action_dim, device=std.device)) \
|
| 157 |
+
.clamp(-1, 1)
|
| 158 |
+
if self.cfg.multitask:
|
| 159 |
+
actions = actions * self.model._action_masks[task]
|
| 160 |
+
|
| 161 |
+
# Compute elite actions
|
| 162 |
+
value = self._estimate_value(z, actions, task).nan_to_num_(0) # (num_envs, num_samples, 1)
|
| 163 |
+
elite_idxs = torch.topk(value.squeeze(2), self.cfg.num_elites, dim=1).indices # (num_envs, num_elites)
|
| 164 |
+
elite_value = value[torch.arange(num_envs).unsqueeze(1), elite_idxs] # (num_envs, num_elites, 1)
|
| 165 |
+
# elite_actions = torch.zeros(num_envs, self.cfg.horizon, self.cfg.num_elites, self.cfg.action_dim, dtype=actions.dtype, device=actions.device)
|
| 166 |
+
# for j, curr_elites in enumerate(elite_idxs):
|
| 167 |
+
# elite_actions[j] = actions[j, :, curr_elites]
|
| 168 |
+
elite_actions = torch.gather(actions, 2, elite_idxs.unsqueeze(1).unsqueeze(3).expand(-1, self.cfg.horizon, -1, self.cfg.action_dim))
|
| 169 |
+
|
| 170 |
+
# Update parameters
|
| 171 |
+
max_value = elite_value.max(1)[0] # (num_envs, 1)
|
| 172 |
+
score = torch.exp(self.cfg.temperature*(elite_value - max_value.unsqueeze(1)))
|
| 173 |
+
score /= score.sum(1, keepdim=True) # (num_envs, num_elites, 1)
|
| 174 |
+
mean = torch.sum(score.unsqueeze(1) * elite_actions, dim=2) / (score.sum(1, keepdim=True) + 1e-9) # (num_envs, horizon, action_dim)
|
| 175 |
+
std = torch.sqrt(torch.sum(score.unsqueeze(1) * (elite_actions - mean.unsqueeze(2)) ** 2, dim=2) / (score.sum(1, keepdim=True) + 1e-9)) \
|
| 176 |
+
.clamp_(self.cfg.min_std, self.cfg.max_std) # (num_envs, horizon, action_dim)
|
| 177 |
+
if self.cfg.multitask:
|
| 178 |
+
mean = mean * self.model._action_masks[task]
|
| 179 |
+
std = std * self.model._action_masks[task]
|
| 180 |
+
|
| 181 |
+
# Select action
|
| 182 |
+
score = score.squeeze(2).cpu().numpy() # (num_envs, num_elites)
|
| 183 |
+
# (num_envs, horizon, num_elites, action_dim) for elite_actions
|
| 184 |
+
actions = torch.zeros(num_envs, self.cfg.horizon, self.cfg.action_dim, dtype=actions.dtype, device=actions.device)
|
| 185 |
+
for i in range(len(score)):
|
| 186 |
+
actions[i] = elite_actions[i, :, np.random.choice(np.arange(score.shape[1]), p=score[i])]
|
| 187 |
+
if eval_mode:
|
| 188 |
+
self._prev_mean_eval = mean # (num_eval_envs, horizon, action_dim)
|
| 189 |
+
else:
|
| 190 |
+
self._prev_mean = mean # (num_envs, horizon, action_dim)
|
| 191 |
+
a, std = actions[:, 0], std[:, 0]
|
| 192 |
+
if not eval_mode:
|
| 193 |
+
a += std * torch.randn(num_envs, self.cfg.action_dim, device=std.device)
|
| 194 |
+
return a.clamp_(-1, 1)
|
| 195 |
+
|
| 196 |
+
def update_pi(self, zs, task):
|
| 197 |
+
"""
|
| 198 |
+
Update policy using a sequence of latent states.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
zs (torch.Tensor): Sequence of latent states.
|
| 202 |
+
task (torch.Tensor): Task index (only used for multi-task experiments).
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
float: Loss of the policy update.
|
| 206 |
+
"""
|
| 207 |
+
self.pi_optim.zero_grad(set_to_none=True)
|
| 208 |
+
self.model.track_q_grad(False)
|
| 209 |
+
_, pis, log_pis, _ = self.model.pi(zs, task)
|
| 210 |
+
qs = self.model.Q(zs, pis, task, return_type='avg')
|
| 211 |
+
self.scale.update(qs[0])
|
| 212 |
+
qs = self.scale(qs)
|
| 213 |
+
|
| 214 |
+
# Loss is a weighted sum of Q-values
|
| 215 |
+
rho = torch.pow(self.cfg.rho, torch.arange(len(qs), device=self.device))
|
| 216 |
+
pi_loss = ((self.cfg.entropy_coef * log_pis - qs).mean(dim=(1,2)) * rho).mean()
|
| 217 |
+
pi_loss.backward()
|
| 218 |
+
torch.nn.utils.clip_grad_norm_(self.model._pi.parameters(), self.cfg.grad_clip_norm)
|
| 219 |
+
self.pi_optim.step()
|
| 220 |
+
self.model.track_q_grad(True)
|
| 221 |
+
|
| 222 |
+
return pi_loss.item()
|
| 223 |
+
|
| 224 |
+
@torch.no_grad()
|
| 225 |
+
def _td_target(self, next_z, reward, task):
|
| 226 |
+
"""
|
| 227 |
+
Compute the TD-target from a reward and the observation at the following time step.
|
| 228 |
+
|
| 229 |
+
Args:
|
| 230 |
+
next_z (torch.Tensor): Latent state at the following time step.
|
| 231 |
+
reward (torch.Tensor): Reward at the current time step.
|
| 232 |
+
task (torch.Tensor): Task index (only used for multi-task experiments).
|
| 233 |
+
|
| 234 |
+
Returns:
|
| 235 |
+
torch.Tensor: TD-target.
|
| 236 |
+
"""
|
| 237 |
+
pi = self.model.pi(next_z, task)[1]
|
| 238 |
+
discount = self.discount[task].unsqueeze(-1) if self.cfg.multitask else self.discount
|
| 239 |
+
return reward + discount * self.model.Q(next_z, pi, task, return_type='min', target=True)
|
| 240 |
+
|
| 241 |
+
def update(self, buffer):
|
| 242 |
+
"""
|
| 243 |
+
Main update function. Corresponds to one iteration of model learning.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
buffer (common.buffer.Buffer): Replay buffer.
|
| 247 |
+
|
| 248 |
+
Returns:
|
| 249 |
+
dict: Dictionary of training statistics.
|
| 250 |
+
"""
|
| 251 |
+
obs, action, reward, task = buffer.sample()
|
| 252 |
+
|
| 253 |
+
# Compute targets
|
| 254 |
+
with torch.no_grad():
|
| 255 |
+
next_z = self.model.encode(obs[1:], task)
|
| 256 |
+
td_targets = self._td_target(next_z, reward, task)
|
| 257 |
+
|
| 258 |
+
# Prepare for update
|
| 259 |
+
self.optim.zero_grad(set_to_none=True)
|
| 260 |
+
self.model.train()
|
| 261 |
+
|
| 262 |
+
# Latent rollout
|
| 263 |
+
zs = torch.empty(self.cfg.horizon+1, self.cfg.batch_size, self.cfg.latent_dim, device=self.device)
|
| 264 |
+
z = self.model.encode(obs[0], task)
|
| 265 |
+
zs[0] = z
|
| 266 |
+
consistency_loss = 0
|
| 267 |
+
for t in range(self.cfg.horizon):
|
| 268 |
+
z = self.model.next(z, action[t], task)
|
| 269 |
+
consistency_loss += F.mse_loss(z, next_z[t]) * self.cfg.rho**t
|
| 270 |
+
zs[t+1] = z
|
| 271 |
+
|
| 272 |
+
# Predictions
|
| 273 |
+
_zs = zs[:-1]
|
| 274 |
+
qs = self.model.Q(_zs, action, task, return_type='all')
|
| 275 |
+
reward_preds = self.model.reward(_zs, action, task)
|
| 276 |
+
|
| 277 |
+
# Compute losses
|
| 278 |
+
reward_loss, value_loss = 0, 0
|
| 279 |
+
for t in range(self.cfg.horizon):
|
| 280 |
+
reward_loss += math.soft_ce(reward_preds[t], reward[t], self.cfg).mean() * self.cfg.rho**t
|
| 281 |
+
for q in range(self.cfg.num_q):
|
| 282 |
+
value_loss += math.soft_ce(qs[q][t], td_targets[t], self.cfg).mean() * self.cfg.rho**t
|
| 283 |
+
consistency_loss *= (1/self.cfg.horizon)
|
| 284 |
+
reward_loss *= (1/self.cfg.horizon)
|
| 285 |
+
value_loss *= (1/(self.cfg.horizon * self.cfg.num_q))
|
| 286 |
+
total_loss = (
|
| 287 |
+
self.cfg.consistency_coef * consistency_loss +
|
| 288 |
+
self.cfg.reward_coef * reward_loss +
|
| 289 |
+
self.cfg.value_coef * value_loss
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
# Update model
|
| 293 |
+
total_loss.backward()
|
| 294 |
+
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.cfg.grad_clip_norm)
|
| 295 |
+
self.optim.step()
|
| 296 |
+
|
| 297 |
+
# Update policy
|
| 298 |
+
pi_loss = self.update_pi(zs.detach(), task)
|
| 299 |
+
|
| 300 |
+
# Update target Q-functions
|
| 301 |
+
self.model.soft_update_target_Q()
|
| 302 |
+
|
| 303 |
+
# Return training statistics
|
| 304 |
+
self.model.eval()
|
| 305 |
+
return {
|
| 306 |
+
"consistency_loss": float(consistency_loss.mean().item()),
|
| 307 |
+
"reward_loss": float(reward_loss.mean().item()),
|
| 308 |
+
"value_loss": float(value_loss.mean().item()),
|
| 309 |
+
"pi_loss": pi_loss,
|
| 310 |
+
"total_loss": float(total_loss.mean().item()),
|
| 311 |
+
"grad_norm": float(grad_norm),
|
| 312 |
+
"pi_scale": float(self.scale.value),
|
| 313 |
+
}
|
project/ManiSkill3/src/maniskill3_environment/examples/baselines/tdmpc2/train.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
os.environ['MUJOCO_GL'] = 'egl'
|
| 3 |
+
os.environ['LAZY_LEGACY_OP'] = '0'
|
| 4 |
+
import warnings
|
| 5 |
+
warnings.filterwarnings('ignore')
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
import hydra
|
| 9 |
+
from termcolor import colored
|
| 10 |
+
from omegaconf import OmegaConf
|
| 11 |
+
|
| 12 |
+
from common.parser import parse_cfg
|
| 13 |
+
from common.seed import set_seed
|
| 14 |
+
from common.buffer import Buffer
|
| 15 |
+
from envs import make_envs
|
| 16 |
+
from tdmpc2 import TDMPC2
|
| 17 |
+
from trainer.offline_trainer import OfflineTrainer
|
| 18 |
+
from trainer.online_trainer import OnlineTrainer
|
| 19 |
+
from common.logger import Logger, print_run
|
| 20 |
+
import multiprocessing
|
| 21 |
+
|
| 22 |
+
import gymnasium as gym
|
| 23 |
+
|
| 24 |
+
torch.backends.cudnn.benchmark = True
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@hydra.main(config_name='config', config_path='.')
|
| 28 |
+
def train(cfg: dict):
|
| 29 |
+
"""
|
| 30 |
+
Script for training single-task / multi-task TD-MPC2 agents.
|
| 31 |
+
|
| 32 |
+
Most relevant args:
|
| 33 |
+
`task`: task name (or mt30/mt80 for multi-task training)
|
| 34 |
+
`model_size`: model size, must be one of `[1, 5, 19, 48, 317]` (default: 5)
|
| 35 |
+
`steps`: number of training/environment steps (default: 10M)
|
| 36 |
+
`seed`: random seed (default: 1)
|
| 37 |
+
|
| 38 |
+
See config.yaml for a full list of args.
|
| 39 |
+
|
| 40 |
+
Example usage:
|
| 41 |
+
```
|
| 42 |
+
$ python train.py task=mt80 model_size=48
|
| 43 |
+
$ python train.py task=mt30 model_size=317
|
| 44 |
+
$ python train.py task=dog-run steps=7000000
|
| 45 |
+
```
|
| 46 |
+
"""
|
| 47 |
+
assert torch.cuda.is_available()
|
| 48 |
+
assert cfg.steps > 0, 'Must train for at least 1 step.'
|
| 49 |
+
cfg = parse_cfg(cfg)
|
| 50 |
+
assert not cfg.multitask, colored('Warning: multi-task models is not currently supported for maniskill.', 'red', attrs=['bold'])
|
| 51 |
+
set_seed(cfg.seed)
|
| 52 |
+
print(colored('Work dir:', 'yellow', attrs=['bold']), cfg.work_dir)
|
| 53 |
+
|
| 54 |
+
# Need to initiate logger before make env to wrap record episode wrapper into async vec cpu env
|
| 55 |
+
manager = multiprocessing.Manager()
|
| 56 |
+
video_path = cfg.work_dir / 'eval_video'
|
| 57 |
+
if cfg.save_video_local:
|
| 58 |
+
try:
|
| 59 |
+
os.makedirs(video_path)
|
| 60 |
+
except:
|
| 61 |
+
pass
|
| 62 |
+
logger = Logger(cfg, manager)
|
| 63 |
+
# Init env
|
| 64 |
+
env = make_envs(cfg, cfg.num_envs)
|
| 65 |
+
eval_env = make_envs(cfg, cfg.num_eval_envs, video_path=video_path, is_eval=True, logger=logger)
|
| 66 |
+
print_run(cfg)
|
| 67 |
+
# Init agent
|
| 68 |
+
agent = TDMPC2(cfg)
|
| 69 |
+
# Update wandb config, for control_mode, env_horizon, discount are set after logger init
|
| 70 |
+
if logger._wandb != None:
|
| 71 |
+
logger._wandb.config.update(OmegaConf.to_container(cfg, resolve=True), allow_val_change=True)
|
| 72 |
+
trainer_cls = OnlineTrainer # OfflineTrainer not available
|
| 73 |
+
trainer = trainer_cls(
|
| 74 |
+
cfg=cfg,
|
| 75 |
+
env=env,
|
| 76 |
+
eval_env=eval_env,
|
| 77 |
+
agent=agent,
|
| 78 |
+
buffer=Buffer(cfg),
|
| 79 |
+
logger=logger,
|
| 80 |
+
)
|
| 81 |
+
trainer.train()
|
| 82 |
+
print('\nTraining completed successfully')
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
if __name__ == '__main__':
|
| 86 |
+
train()
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/tabletop/__init__.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .assembling_kits import AssemblingKitsEnv
|
| 2 |
+
from .lift_peg_upright import LiftPegUprightEnv
|
| 3 |
+
from .peg_insertion_side import PegInsertionSideEnv
|
| 4 |
+
from .pick_clutter_ycb import PickClutterYCBEnv
|
| 5 |
+
from .pick_cube import PickCubeEnv
|
| 6 |
+
from .pick_single_ycb import PickSingleYCBEnv
|
| 7 |
+
from .plug_charger import PlugChargerEnv
|
| 8 |
+
from .pull_cube import PullCubeEnv
|
| 9 |
+
from .push_cube import PushCubeEnv
|
| 10 |
+
from .stack_cube import StackCubeEnv
|
| 11 |
+
from .turn_faucet import TurnFaucetEnv
|
| 12 |
+
from .two_robot_pick_cube import TwoRobotPickCube
|
| 13 |
+
from .two_robot_stack_cube import TwoRobotStackCube
|
| 14 |
+
from .poke_cube import PokeCubeEnv
|
| 15 |
+
from .place_sphere import PlaceSphereEnv
|
| 16 |
+
from .roll_ball import RollBallEnv
|
| 17 |
+
from .push_t import PushTEnv
|
| 18 |
+
from .pull_cube_tool import PullCubeToolEnv
|
| 19 |
+
from .mug_on_rack import PlaceMugOnRackEnv
|
| 20 |
+
from .stack_mug_on_rack import StackMugOnRackEnv
|
| 21 |
+
from .stack_bowl import StackBowlEnv
|
| 22 |
+
from .fork_from_rack import PickForkFromRackEnv
|
| 23 |
+
from .stack_plate_on_rack import StackPlateOnRackEnv
|
| 24 |
+
from .mug_on_coffee_machine import PlaceMugOnCoffeeMachineEnv
|
| 25 |
+
from .mug_from_coffee_machine import PickMugFromCoffeeMachineEnv
|
| 26 |
+
from .spoon_on_rack import PlaceSpoonOnRackEnv
|
| 27 |
+
from .bowl_on_rack import PlaceBowlOnRackEnv
|
| 28 |
+
from .bowl_on_rack_v2 import PlaceBowlOnRackEnv
|
| 29 |
+
from .bowl_on_rack_v3 import PlaceBowlOnRackEnv
|
| 30 |
+
from .bowl_on_rack_v4 import PlaceBowlOnRackEnv
|
| 31 |
+
from .plate_on_rack import PlacePlateOnRackEnv
|
| 32 |
+
from .plate_on_rack_v2 import PlacePlateOnRackEnv
|
| 33 |
+
from .plate_on_rack_v3 import PlacePlateOnRackEnv
|
| 34 |
+
from .plate_on_rack_v4 import PlacePlateOnRackEnv
|
| 35 |
+
from .fork_on_rack import PlaceForkOnRackEnv
|
| 36 |
+
from .fork_on_rack_v2 import PlaceForkOnRackEnv
|
| 37 |
+
from .fork_on_rack_v3 import PlaceForkOnRackEnv
|
| 38 |
+
from .fork_on_rack_v4 import PlaceForkOnRackEnv
|
| 39 |
+
from .knife_on_rack import PlaceKnifeOnRackEnv
|
| 40 |
+
from .knife_on_rack_v2 import PlaceKnifeOnRackEnv
|
| 41 |
+
from .knife_on_rack_v3 import PlaceKnifeOnRackEnv
|
| 42 |
+
from .knife_on_rack_v4 import PlaceKnifeOnRackEnv
|
| 43 |
+
from .grasp_fork_v0 import GraspForkEnv
|
| 44 |
+
from .grasp_bowl_v0 import GraspBowlEnv
|
| 45 |
+
from .grasp_plate_v0 import GraspPlateEnv
|
| 46 |
+
from .grasp_cup_v0 import GraspCupEnv
|
project/ManiSkill3/src/maniskill3_environment/mani_skill/envs/tasks/tabletop/__pycache__/assembling_kits.cpython-310.pyc
ADDED
|
Binary file (10.5 kB). View file
|
|
|