Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- mjlab/docs/source/_static/css/custom.css +172 -0
- mjlab/docs/source/_static/favicon.ico +0 -0
- mjlab/docs/source/_static/refs.bib +0 -0
- mjlab/docs/source/api/actuator.rst +10 -0
- mjlab/docs/source/api/entity.rst +10 -0
- mjlab/docs/source/api/envs.rst +10 -0
- mjlab/docs/source/api/index.rst +16 -0
- mjlab/docs/source/api/managers.rst +10 -0
- mjlab/docs/source/api/scene.rst +10 -0
- mjlab/docs/source/api/sensor.rst +10 -0
- mjlab/docs/source/api/sim.rst +10 -0
- mjlab/docs/source/api/terrains.rst +10 -0
- mjlab/src/mjlab/__init__.py +59 -0
- mjlab/src/mjlab/actuator/__init__.py +48 -0
- mjlab/src/mjlab/actuator/actuator.py +269 -0
- mjlab/src/mjlab/actuator/builtin_actuator.py +279 -0
- mjlab/src/mjlab/actuator/builtin_group.py +112 -0
- mjlab/src/mjlab/actuator/dc_actuator.py +162 -0
- mjlab/src/mjlab/actuator/delayed_actuator.py +174 -0
- mjlab/src/mjlab/actuator/learned_actuator.py +207 -0
- mjlab/src/mjlab/actuator/pd_actuator.py +150 -0
- mjlab/src/mjlab/actuator/xml_actuator.py +131 -0
- mjlab/src/mjlab/asset_zoo/README.md +7 -0
- mjlab/src/mjlab/asset_zoo/__init__.py +0 -0
- mjlab/src/mjlab/entity/__init__.py +5 -0
- mjlab/src/mjlab/entity/data.py +576 -0
- mjlab/src/mjlab/entity/entity.py +985 -0
- mjlab/src/mjlab/envs/__init__.py +4 -0
- mjlab/src/mjlab/envs/manager_based_rl_env.py +524 -0
- mjlab/src/mjlab/envs/types.py +6 -0
- mjlab/src/mjlab/managers/__init__.py +33 -0
- mjlab/src/mjlab/managers/action_manager.py +186 -0
- mjlab/src/mjlab/managers/command_manager.py +229 -0
- mjlab/src/mjlab/managers/curriculum_manager.py +155 -0
- mjlab/src/mjlab/managers/manager_base.py +139 -0
- mjlab/src/mjlab/managers/metrics_manager.py +144 -0
- mjlab/src/mjlab/managers/observation_manager.py +485 -0
- mjlab/src/mjlab/managers/reward_manager.py +153 -0
- mjlab/src/mjlab/managers/scene_entity_config.py +190 -0
- mjlab/src/mjlab/managers/termination_manager.py +140 -0
- mjlab/src/mjlab/py.typed +0 -0
- mjlab/src/mjlab/rl/__init__.py +6 -0
- mjlab/src/mjlab/rl/config.py +128 -0
- mjlab/src/mjlab/rl/exporter_utils.py +82 -0
- mjlab/src/mjlab/rl/runner.py +121 -0
- mjlab/src/mjlab/rl/spatial_softmax.py +205 -0
- mjlab/src/mjlab/rl/vecenv_wrapper.py +107 -0
- mjlab/src/mjlab/scene/__init__.py +2 -0
- mjlab/src/mjlab/scene/scene.py +227 -0
- mjlab/src/mjlab/scene/scene.xml +12 -0
mjlab/docs/source/_static/css/custom.css
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* PyData Sphinx Theme — Option A (Indigo/Teal)
|
| 3 |
+
* Aesthetic: modern lab — indigo primary, teal accent, neutral grays
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
/* LIGHT THEME */
|
| 7 |
+
html[data-theme="light"] {
|
| 8 |
+
/* Brand */
|
| 9 |
+
--pst-color-primary: #4F46E5;
|
| 10 |
+
/* Indigo-600 */
|
| 11 |
+
--pst-color-secondary: #14B8A6;
|
| 12 |
+
/* Teal-500 */
|
| 13 |
+
--pst-color-secondary-highlight: #2DD4BF;
|
| 14 |
+
/* Teal-400 */
|
| 15 |
+
|
| 16 |
+
/* Links / code links */
|
| 17 |
+
--pst-color-inline-code-links: #0D9488;
|
| 18 |
+
/* Teal-600 */
|
| 19 |
+
--pst-color-link: var(--pst-color-primary);
|
| 20 |
+
--pst-color-link-hover: #4338CA;
|
| 21 |
+
/* Indigo-700 */
|
| 22 |
+
|
| 23 |
+
/* Semantic */
|
| 24 |
+
--pst-color-info: var(--pst-color-secondary);
|
| 25 |
+
--pst-color-info-highlight: var(--pst-color-secondary);
|
| 26 |
+
--pst-color-info-bg: #D1FAE5;
|
| 27 |
+
/* Teal-50 */
|
| 28 |
+
--pst-color-attention: #F59E0B;
|
| 29 |
+
/* Amber-500 */
|
| 30 |
+
--pst-color-target: #EEF2FF;
|
| 31 |
+
/* Indigo-50 */
|
| 32 |
+
|
| 33 |
+
/* Text */
|
| 34 |
+
--pst-color-text-base: #1F2937;
|
| 35 |
+
/* Slate-800 */
|
| 36 |
+
--pst-color-text-muted: #6B7280;
|
| 37 |
+
/* Slate-500 */
|
| 38 |
+
|
| 39 |
+
/* Surfaces */
|
| 40 |
+
--pst-color-background: #FFFFFF;
|
| 41 |
+
--pst-color-on-background: #FFFFFF;
|
| 42 |
+
--pst-color-surface: #F3F4F6;
|
| 43 |
+
/* Gray-100 */
|
| 44 |
+
--pst-color-on-surface: #E5E7EB;
|
| 45 |
+
/* Gray-200 */
|
| 46 |
+
--pst-color-shadow: #D1D5DB;
|
| 47 |
+
--pst-color-border: #E5E7EB;
|
| 48 |
+
|
| 49 |
+
/* Inline code */
|
| 50 |
+
--pst-color-inline-code: #0D9488;
|
| 51 |
+
/* Teal-600 */
|
| 52 |
+
|
| 53 |
+
/* Tables / hovers */
|
| 54 |
+
--pst-color-table-row-hover-bg: #EEF2FF;
|
| 55 |
+
/* Indigo-50 */
|
| 56 |
+
|
| 57 |
+
/* Accent (sparingly) */
|
| 58 |
+
--pst-color-accent: #10B981;
|
| 59 |
+
/* Emerald-500 */
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
/* DARK THEME */
|
| 63 |
+
html[data-theme="dark"] {
|
| 64 |
+
/* Brand */
|
| 65 |
+
--pst-color-primary: #A5B4FC;
|
| 66 |
+
/* Indigo-300/200 mix for readability */
|
| 67 |
+
--pst-color-secondary: #5EEAD4;
|
| 68 |
+
/* Teal-300 */
|
| 69 |
+
--pst-color-secondary-highlight: #2DD4BF;
|
| 70 |
+
|
| 71 |
+
/* Links / code links */
|
| 72 |
+
--pst-color-inline-code-links: #93C5FD;
|
| 73 |
+
/* Indigo-300 */
|
| 74 |
+
--pst-color-link: var(--pst-color-primary);
|
| 75 |
+
--pst-color-link-hover: #818CF8;
|
| 76 |
+
/* Indigo-400 */
|
| 77 |
+
|
| 78 |
+
/* Semantic */
|
| 79 |
+
--pst-color-info: var(--pst-color-secondary);
|
| 80 |
+
--pst-color-info-highlight: var(--pst-color-secondary);
|
| 81 |
+
--pst-color-info-bg: #042F2E;
|
| 82 |
+
/* Deep teal */
|
| 83 |
+
--pst-color-attention: #F59E0B;
|
| 84 |
+
--pst-color-target: #1B1C2A;
|
| 85 |
+
/* Indigo-tinted surface */
|
| 86 |
+
|
| 87 |
+
/* Text */
|
| 88 |
+
--pst-color-text-base: #E5E7EB;
|
| 89 |
+
/* Gray-200 */
|
| 90 |
+
--pst-color-text-muted: #9CA3AF;
|
| 91 |
+
/* Gray-400 */
|
| 92 |
+
|
| 93 |
+
/* Surfaces */
|
| 94 |
+
--pst-color-background: #0B0C10;
|
| 95 |
+
/* Deep graphite */
|
| 96 |
+
--pst-color-on-background: #12131A;
|
| 97 |
+
--pst-color-surface: #111827;
|
| 98 |
+
/* Slate-900 */
|
| 99 |
+
--pst-color-on-surface: #1F2937;
|
| 100 |
+
/* Slate-800 */
|
| 101 |
+
--pst-color-shadow: #0F172A;
|
| 102 |
+
--pst-color-border: #2A2D3A;
|
| 103 |
+
|
| 104 |
+
/* Inline code */
|
| 105 |
+
--pst-color-inline-code: #5EEAD4;
|
| 106 |
+
/* Teal-300 */
|
| 107 |
+
|
| 108 |
+
/* Tables / hovers */
|
| 109 |
+
--pst-color-table-row-hover-bg: #1B1C2A;
|
| 110 |
+
|
| 111 |
+
/* Accent */
|
| 112 |
+
--pst-color-accent: #34D399;
|
| 113 |
+
/* Emerald-400 */
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
/* General tweaks */
|
| 117 |
+
a {
|
| 118 |
+
text-decoration: none !important;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
.bd-header-announcement a,
|
| 122 |
+
.bd-header-version-warning a {
|
| 123 |
+
color: #5EEAD4;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
.form-control {
|
| 127 |
+
border-radius: 0 !important;
|
| 128 |
+
border: none !important;
|
| 129 |
+
outline: none !important;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
.navbar-brand,
|
| 133 |
+
.navbar-icon-links {
|
| 134 |
+
padding-top: 0rem !important;
|
| 135 |
+
padding-bottom: 0rem !important;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
/* Version switcher */
|
| 139 |
+
.sidebar-version-switcher {
|
| 140 |
+
display: flex;
|
| 141 |
+
align-items: center;
|
| 142 |
+
gap: 0.5rem;
|
| 143 |
+
padding: 0.4rem 1rem;
|
| 144 |
+
margin-bottom: 0.5rem;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
.sidebar-version-label {
|
| 148 |
+
font-size: 0.8rem;
|
| 149 |
+
font-weight: 600;
|
| 150 |
+
color: var(--pst-color-text-muted);
|
| 151 |
+
white-space: nowrap;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
.sidebar-version-select {
|
| 155 |
+
flex: 1;
|
| 156 |
+
font-size: 0.8rem;
|
| 157 |
+
padding: 0.25rem 0.5rem;
|
| 158 |
+
border: 1px solid var(--pst-color-border);
|
| 159 |
+
border-radius: 4px;
|
| 160 |
+
background: var(--pst-color-background);
|
| 161 |
+
color: var(--pst-color-text-base);
|
| 162 |
+
cursor: pointer;
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
.sidebar-version-select:hover {
|
| 166 |
+
border-color: var(--pst-color-primary);
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
/* Sidebar section spacing */
|
| 170 |
+
.bd-sidebar .navbar-icon-links {
|
| 171 |
+
padding: 0 1rem 0.25rem !important;
|
| 172 |
+
}
|
mjlab/docs/source/_static/favicon.ico
ADDED
|
|
mjlab/docs/source/_static/refs.bib
ADDED
|
File without changes
|
mjlab/docs/source/api/actuator.rst
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mjlab.actuator
|
| 2 |
+
==============
|
| 3 |
+
|
| 4 |
+
Actuator implementations for motor control.
|
| 5 |
+
|
| 6 |
+
.. automodule:: mjlab.actuator
|
| 7 |
+
:members:
|
| 8 |
+
:undoc-members:
|
| 9 |
+
:imported-members:
|
| 10 |
+
:show-inheritance:
|
mjlab/docs/source/api/entity.rst
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mjlab.entity
|
| 2 |
+
============
|
| 3 |
+
|
| 4 |
+
Entity models and data structures.
|
| 5 |
+
|
| 6 |
+
.. automodule:: mjlab.entity
|
| 7 |
+
:members:
|
| 8 |
+
:undoc-members:
|
| 9 |
+
:imported-members:
|
| 10 |
+
:show-inheritance:
|
mjlab/docs/source/api/envs.rst
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mjlab.envs
|
| 2 |
+
==========
|
| 3 |
+
|
| 4 |
+
RL environment classes.
|
| 5 |
+
|
| 6 |
+
.. automodule:: mjlab.envs
|
| 7 |
+
:members:
|
| 8 |
+
:undoc-members:
|
| 9 |
+
:imported-members:
|
| 10 |
+
:show-inheritance:
|
mjlab/docs/source/api/index.rst
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
API Reference
|
| 2 |
+
=============
|
| 3 |
+
|
| 4 |
+
This section provides detailed API documentation for all public modules in mjlab.
|
| 5 |
+
|
| 6 |
+
.. toctree::
|
| 7 |
+
:maxdepth: 1
|
| 8 |
+
|
| 9 |
+
envs
|
| 10 |
+
scene
|
| 11 |
+
sim
|
| 12 |
+
entity
|
| 13 |
+
actuator
|
| 14 |
+
sensor
|
| 15 |
+
managers
|
| 16 |
+
terrains
|
mjlab/docs/source/api/managers.rst
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mjlab.managers
|
| 2 |
+
==============
|
| 3 |
+
|
| 4 |
+
Environment managers for actions, observations, rewards, terminations, commands, and curriculum.
|
| 5 |
+
|
| 6 |
+
.. automodule:: mjlab.managers
|
| 7 |
+
:members:
|
| 8 |
+
:undoc-members:
|
| 9 |
+
:imported-members:
|
| 10 |
+
:show-inheritance:
|
mjlab/docs/source/api/scene.rst
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mjlab.scene
|
| 2 |
+
===========
|
| 3 |
+
|
| 4 |
+
Scene management.
|
| 5 |
+
|
| 6 |
+
.. automodule:: mjlab.scene
|
| 7 |
+
:members:
|
| 8 |
+
:undoc-members:
|
| 9 |
+
:imported-members:
|
| 10 |
+
:show-inheritance:
|
mjlab/docs/source/api/sensor.rst
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mjlab.sensor
|
| 2 |
+
============
|
| 3 |
+
|
| 4 |
+
Sensor implementations.
|
| 5 |
+
|
| 6 |
+
.. automodule:: mjlab.sensor
|
| 7 |
+
:members:
|
| 8 |
+
:undoc-members:
|
| 9 |
+
:imported-members:
|
| 10 |
+
:show-inheritance:
|
mjlab/docs/source/api/sim.rst
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mjlab.sim
|
| 2 |
+
=========
|
| 3 |
+
|
| 4 |
+
Simulation core.
|
| 5 |
+
|
| 6 |
+
.. automodule:: mjlab.sim
|
| 7 |
+
:members:
|
| 8 |
+
:undoc-members:
|
| 9 |
+
:imported-members:
|
| 10 |
+
:show-inheritance:
|
mjlab/docs/source/api/terrains.rst
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mjlab.terrains
|
| 2 |
+
==============
|
| 3 |
+
|
| 4 |
+
Terrain generation and importing.
|
| 5 |
+
|
| 6 |
+
.. automodule:: mjlab.terrains
|
| 7 |
+
:members:
|
| 8 |
+
:undoc-members:
|
| 9 |
+
:imported-members:
|
| 10 |
+
:show-inheritance:
|
mjlab/src/mjlab/__init__.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from importlib.metadata import entry_points
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
import tyro
|
| 6 |
+
import warp as wp
|
| 7 |
+
|
| 8 |
+
MJLAB_SRC_PATH: Path = Path(__file__).parent
|
| 9 |
+
|
| 10 |
+
TYRO_FLAGS = (
|
| 11 |
+
# Don't let users switch between types in unions. This produces a simpler CLI
|
| 12 |
+
# with flatter helptext, at the cost of some flexibility. Type changes can
|
| 13 |
+
# just be done in code.
|
| 14 |
+
tyro.conf.AvoidSubcommands,
|
| 15 |
+
# Disable automatic flag conversion (e.g., use `--flag False` instead of
|
| 16 |
+
# `--no-flag` for booleans).
|
| 17 |
+
tyro.conf.FlagConversionOff,
|
| 18 |
+
# Use Python syntax for collections: --tuple (1,2,3) instead of --tuple 1 2 3.
|
| 19 |
+
# Helps with wandb sweep compatibility: https://brentyi.github.io/tyro/wandb_sweeps/
|
| 20 |
+
tyro.conf.UsePythonSyntaxForLiteralCollections,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _configure_warp() -> None:
|
| 25 |
+
"""Configure Warp globally for mjlab."""
|
| 26 |
+
wp.config.enable_backward = False
|
| 27 |
+
|
| 28 |
+
# Keep warp verbose by default to show kernel compilation progress.
|
| 29 |
+
# Override with MJLAB_WARP_QUIET=1 environment variable if needed.
|
| 30 |
+
quiet = os.environ.get("MJLAB_WARP_QUIET", "0").lower() in ("1", "true", "yes")
|
| 31 |
+
wp.config.quiet = quiet
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _import_registered_packages() -> None:
|
| 35 |
+
"""Auto-discover and import packages registered via entry points.
|
| 36 |
+
|
| 37 |
+
Looks for packages registered under the 'mjlab.tasks' entry point group.
|
| 38 |
+
Each discovered package is imported, which allows it to register custom
|
| 39 |
+
environments with gymnasium.
|
| 40 |
+
"""
|
| 41 |
+
mjlab_tasks = entry_points().select(group="mjlab.tasks")
|
| 42 |
+
for entry_point in mjlab_tasks:
|
| 43 |
+
try:
|
| 44 |
+
entry_point.load()
|
| 45 |
+
except Exception as e:
|
| 46 |
+
print(f"[WARN] Failed to load task package {entry_point.name}: {e}")
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _configure_mediapy() -> None:
|
| 50 |
+
"""Point mediapy at the bundled imageio-ffmpeg binary."""
|
| 51 |
+
import imageio_ffmpeg
|
| 52 |
+
import mediapy
|
| 53 |
+
|
| 54 |
+
mediapy.set_ffmpeg(imageio_ffmpeg.get_ffmpeg_exe())
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
_configure_warp()
|
| 58 |
+
_configure_mediapy()
|
| 59 |
+
_import_registered_packages()
|
mjlab/src/mjlab/actuator/__init__.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Actuator implementations for mjlab."""
|
| 2 |
+
|
| 3 |
+
from mjlab.actuator.actuator import Actuator as Actuator
|
| 4 |
+
from mjlab.actuator.actuator import ActuatorCfg as ActuatorCfg
|
| 5 |
+
from mjlab.actuator.actuator import ActuatorCmd as ActuatorCmd
|
| 6 |
+
from mjlab.actuator.builtin_actuator import (
|
| 7 |
+
BuiltinMotorActuator as BuiltinMotorActuator,
|
| 8 |
+
)
|
| 9 |
+
from mjlab.actuator.builtin_actuator import (
|
| 10 |
+
BuiltinMotorActuatorCfg as BuiltinMotorActuatorCfg,
|
| 11 |
+
)
|
| 12 |
+
from mjlab.actuator.builtin_actuator import (
|
| 13 |
+
BuiltinMuscleActuator as BuiltinMuscleActuator,
|
| 14 |
+
)
|
| 15 |
+
from mjlab.actuator.builtin_actuator import (
|
| 16 |
+
BuiltinMuscleActuatorCfg as BuiltinMuscleActuatorCfg,
|
| 17 |
+
)
|
| 18 |
+
from mjlab.actuator.builtin_actuator import (
|
| 19 |
+
BuiltinPositionActuator as BuiltinPositionActuator,
|
| 20 |
+
)
|
| 21 |
+
from mjlab.actuator.builtin_actuator import (
|
| 22 |
+
BuiltinPositionActuatorCfg as BuiltinPositionActuatorCfg,
|
| 23 |
+
)
|
| 24 |
+
from mjlab.actuator.builtin_actuator import (
|
| 25 |
+
BuiltinVelocityActuator as BuiltinVelocityActuator,
|
| 26 |
+
)
|
| 27 |
+
from mjlab.actuator.builtin_actuator import (
|
| 28 |
+
BuiltinVelocityActuatorCfg as BuiltinVelocityActuatorCfg,
|
| 29 |
+
)
|
| 30 |
+
from mjlab.actuator.builtin_group import BuiltinActuatorGroup as BuiltinActuatorGroup
|
| 31 |
+
from mjlab.actuator.dc_actuator import DcMotorActuator as DcMotorActuator
|
| 32 |
+
from mjlab.actuator.dc_actuator import DcMotorActuatorCfg as DcMotorActuatorCfg
|
| 33 |
+
from mjlab.actuator.delayed_actuator import DelayedActuator as DelayedActuator
|
| 34 |
+
from mjlab.actuator.delayed_actuator import DelayedActuatorCfg as DelayedActuatorCfg
|
| 35 |
+
from mjlab.actuator.learned_actuator import LearnedMlpActuator as LearnedMlpActuator
|
| 36 |
+
from mjlab.actuator.learned_actuator import (
|
| 37 |
+
LearnedMlpActuatorCfg as LearnedMlpActuatorCfg,
|
| 38 |
+
)
|
| 39 |
+
from mjlab.actuator.pd_actuator import IdealPdActuator as IdealPdActuator
|
| 40 |
+
from mjlab.actuator.pd_actuator import IdealPdActuatorCfg as IdealPdActuatorCfg
|
| 41 |
+
from mjlab.actuator.xml_actuator import XmlMotorActuator as XmlMotorActuator
|
| 42 |
+
from mjlab.actuator.xml_actuator import XmlMotorActuatorCfg as XmlMotorActuatorCfg
|
| 43 |
+
from mjlab.actuator.xml_actuator import XmlMuscleActuator as XmlMuscleActuator
|
| 44 |
+
from mjlab.actuator.xml_actuator import XmlMuscleActuatorCfg as XmlMuscleActuatorCfg
|
| 45 |
+
from mjlab.actuator.xml_actuator import XmlPositionActuator as XmlPositionActuator
|
| 46 |
+
from mjlab.actuator.xml_actuator import XmlPositionActuatorCfg as XmlPositionActuatorCfg
|
| 47 |
+
from mjlab.actuator.xml_actuator import XmlVelocityActuator as XmlVelocityActuator
|
| 48 |
+
from mjlab.actuator.xml_actuator import XmlVelocityActuatorCfg as XmlVelocityActuatorCfg
|
mjlab/src/mjlab/actuator/actuator.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Base actuator interface."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from abc import ABC, abstractmethod
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from enum import Enum
|
| 8 |
+
from typing import TYPE_CHECKING, Generic, TypeVar
|
| 9 |
+
|
| 10 |
+
import mujoco
|
| 11 |
+
import mujoco_warp as mjwarp
|
| 12 |
+
import torch
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from mjlab.entity import Entity
|
| 16 |
+
from mjlab.entity.data import EntityData
|
| 17 |
+
|
| 18 |
+
ActuatorCfgT = TypeVar("ActuatorCfgT", bound="ActuatorCfg")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class TransmissionType(str, Enum):
|
| 22 |
+
"""Transmission types for actuators."""
|
| 23 |
+
|
| 24 |
+
JOINT = "joint"
|
| 25 |
+
TENDON = "tendon"
|
| 26 |
+
SITE = "site"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass(kw_only=True)
|
| 30 |
+
class ActuatorCfg(ABC):
|
| 31 |
+
target_names_expr: tuple[str, ...]
|
| 32 |
+
"""Targets that are part of this actuator group.
|
| 33 |
+
|
| 34 |
+
Can be a tuple of names or tuple of regex expressions.
|
| 35 |
+
Interpreted based on transmission_type (joint/tendon/site).
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
transmission_type: TransmissionType = TransmissionType.JOINT
|
| 39 |
+
"""Transmission type. Defaults to JOINT."""
|
| 40 |
+
|
| 41 |
+
armature: float = 0.0
|
| 42 |
+
"""Reflected rotor inertia."""
|
| 43 |
+
|
| 44 |
+
frictionloss: float = 0.0
|
| 45 |
+
"""Friction loss force limit.
|
| 46 |
+
|
| 47 |
+
Applies a constant friction force opposing motion, independent of load or velocity.
|
| 48 |
+
Also known as dry friction or load-independent friction.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __post_init__(self) -> None:
|
| 52 |
+
assert self.armature >= 0.0, "armature must be non-negative."
|
| 53 |
+
assert self.frictionloss >= 0.0, "frictionloss must be non-negative."
|
| 54 |
+
if self.transmission_type == TransmissionType.SITE:
|
| 55 |
+
if self.armature > 0.0 or self.frictionloss > 0.0:
|
| 56 |
+
raise ValueError(
|
| 57 |
+
f"{self.__class__.__name__}: armature and frictionloss are not supported for "
|
| 58 |
+
"SITE transmission type."
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
@abstractmethod
|
| 62 |
+
def build(
|
| 63 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 64 |
+
) -> Actuator:
|
| 65 |
+
"""Build actuator instance.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
entity: Entity this actuator belongs to.
|
| 69 |
+
target_ids: Local target indices (for indexing entity arrays).
|
| 70 |
+
target_names: Target names corresponding to target_ids.
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
Actuator instance.
|
| 74 |
+
"""
|
| 75 |
+
raise NotImplementedError
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@dataclass
|
| 79 |
+
class ActuatorCmd:
|
| 80 |
+
"""High-level actuator command with targets and current state.
|
| 81 |
+
|
| 82 |
+
Passed to actuator's `compute()` method to generate low-level control signals.
|
| 83 |
+
All tensors have shape (num_envs, num_targets).
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
position_target: torch.Tensor
|
| 87 |
+
"""Desired positions (joint positions, tendon lengths, or site positions)."""
|
| 88 |
+
velocity_target: torch.Tensor
|
| 89 |
+
"""Desired velocities (joint velocities, tendon velocities, or site velocities)."""
|
| 90 |
+
effort_target: torch.Tensor
|
| 91 |
+
"""Feedforward effort (torques or forces)."""
|
| 92 |
+
pos: torch.Tensor
|
| 93 |
+
"""Current positions (joint positions, tendon lengths, or site positions)."""
|
| 94 |
+
vel: torch.Tensor
|
| 95 |
+
"""Current velocities (joint velocities, tendon velocities, or site velocities)."""
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class Actuator(ABC, Generic[ActuatorCfgT]):
|
| 99 |
+
"""Base actuator interface."""
|
| 100 |
+
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
cfg: ActuatorCfgT,
|
| 104 |
+
entity: Entity,
|
| 105 |
+
target_ids: list[int],
|
| 106 |
+
target_names: list[str],
|
| 107 |
+
) -> None:
|
| 108 |
+
self.cfg = cfg
|
| 109 |
+
self.entity = entity
|
| 110 |
+
self._target_ids_list = target_ids
|
| 111 |
+
self._target_names = target_names
|
| 112 |
+
self._target_ids: torch.Tensor | None = None
|
| 113 |
+
self._ctrl_ids: torch.Tensor | None = None
|
| 114 |
+
self._global_ctrl_ids: torch.Tensor | None = None
|
| 115 |
+
self._mjs_actuators: list[mujoco.MjsActuator] = []
|
| 116 |
+
self._site_zeros: torch.Tensor | None = None
|
| 117 |
+
|
| 118 |
+
@property
|
| 119 |
+
def target_ids(self) -> torch.Tensor:
|
| 120 |
+
"""Local indices of targets controlled by this actuator."""
|
| 121 |
+
assert self._target_ids is not None
|
| 122 |
+
return self._target_ids
|
| 123 |
+
|
| 124 |
+
@property
|
| 125 |
+
def target_names(self) -> list[str]:
|
| 126 |
+
"""Names of targets controlled by this actuator."""
|
| 127 |
+
return self._target_names
|
| 128 |
+
|
| 129 |
+
@property
|
| 130 |
+
def transmission_type(self) -> TransmissionType:
|
| 131 |
+
"""Transmission type of this actuator."""
|
| 132 |
+
return self.cfg.transmission_type
|
| 133 |
+
|
| 134 |
+
@property
|
| 135 |
+
def ctrl_ids(self) -> torch.Tensor:
|
| 136 |
+
"""Local indices of control inputs within the entity."""
|
| 137 |
+
assert self._ctrl_ids is not None
|
| 138 |
+
return self._ctrl_ids
|
| 139 |
+
|
| 140 |
+
@property
|
| 141 |
+
def global_ctrl_ids(self) -> torch.Tensor:
|
| 142 |
+
"""Global indices of control inputs in the MuJoCo model."""
|
| 143 |
+
assert self._global_ctrl_ids is not None
|
| 144 |
+
return self._global_ctrl_ids
|
| 145 |
+
|
| 146 |
+
@abstractmethod
|
| 147 |
+
def edit_spec(self, spec: mujoco.MjSpec, target_names: list[str]) -> None:
|
| 148 |
+
"""Edit the MjSpec to add actuators.
|
| 149 |
+
|
| 150 |
+
This is called during entity construction, before the model is compiled.
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
spec: The entity's MjSpec to edit.
|
| 154 |
+
target_names: Names of targets (joints, tendons, or sites) controlled by
|
| 155 |
+
this actuator.
|
| 156 |
+
"""
|
| 157 |
+
raise NotImplementedError
|
| 158 |
+
|
| 159 |
+
def initialize(
|
| 160 |
+
self,
|
| 161 |
+
mj_model: mujoco.MjModel,
|
| 162 |
+
model: mjwarp.Model,
|
| 163 |
+
data: mjwarp.Data,
|
| 164 |
+
device: str,
|
| 165 |
+
) -> None:
|
| 166 |
+
"""Initialize the actuator after model compilation.
|
| 167 |
+
|
| 168 |
+
This is called after the MjSpec is compiled into an MjModel.
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
mj_model: The compiled MuJoCo model.
|
| 172 |
+
model: The compiled mjwarp model.
|
| 173 |
+
data: The mjwarp data arrays.
|
| 174 |
+
device: Device for tensor operations (e.g., "cuda", "cpu").
|
| 175 |
+
"""
|
| 176 |
+
del mj_model, model # Unused.
|
| 177 |
+
self._target_ids = torch.tensor(
|
| 178 |
+
self._target_ids_list, dtype=torch.long, device=device
|
| 179 |
+
)
|
| 180 |
+
global_ctrl_ids_list = [act.id for act in self._mjs_actuators]
|
| 181 |
+
self._global_ctrl_ids = torch.tensor(
|
| 182 |
+
global_ctrl_ids_list, dtype=torch.long, device=device
|
| 183 |
+
)
|
| 184 |
+
entity_ctrl_ids = self.entity.indexing.ctrl_ids
|
| 185 |
+
global_to_local = {gid.item(): i for i, gid in enumerate(entity_ctrl_ids)}
|
| 186 |
+
self._ctrl_ids = torch.tensor(
|
| 187 |
+
[global_to_local[gid] for gid in global_ctrl_ids_list],
|
| 188 |
+
dtype=torch.long,
|
| 189 |
+
device=device,
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Pre-allocate zeros for SITE transmission type to avoid repeated allocations.
|
| 193 |
+
if self.transmission_type == TransmissionType.SITE:
|
| 194 |
+
nenvs = data.nworld
|
| 195 |
+
ntargets = len(self._target_ids_list)
|
| 196 |
+
self._site_zeros = torch.zeros((nenvs, ntargets), device=device)
|
| 197 |
+
|
| 198 |
+
def get_command(self, data: EntityData) -> ActuatorCmd:
|
| 199 |
+
"""Extract command data for this actuator from entity data.
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
data: The entity data containing all state and target information.
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
ActuatorCmd with appropriate data based on transmission type.
|
| 206 |
+
"""
|
| 207 |
+
if self.transmission_type == TransmissionType.JOINT:
|
| 208 |
+
return ActuatorCmd(
|
| 209 |
+
position_target=data.joint_pos_target[:, self.target_ids],
|
| 210 |
+
velocity_target=data.joint_vel_target[:, self.target_ids],
|
| 211 |
+
effort_target=data.joint_effort_target[:, self.target_ids],
|
| 212 |
+
pos=data.joint_pos[:, self.target_ids],
|
| 213 |
+
vel=data.joint_vel[:, self.target_ids],
|
| 214 |
+
)
|
| 215 |
+
elif self.transmission_type == TransmissionType.TENDON:
|
| 216 |
+
return ActuatorCmd(
|
| 217 |
+
position_target=data.tendon_len_target[:, self.target_ids],
|
| 218 |
+
velocity_target=data.tendon_vel_target[:, self.target_ids],
|
| 219 |
+
effort_target=data.tendon_effort_target[:, self.target_ids],
|
| 220 |
+
pos=data.tendon_len[:, self.target_ids],
|
| 221 |
+
vel=data.tendon_vel[:, self.target_ids],
|
| 222 |
+
)
|
| 223 |
+
elif self.transmission_type == TransmissionType.SITE:
|
| 224 |
+
assert self._site_zeros is not None
|
| 225 |
+
return ActuatorCmd(
|
| 226 |
+
position_target=self._site_zeros,
|
| 227 |
+
velocity_target=self._site_zeros,
|
| 228 |
+
effort_target=data.site_effort_target[:, self.target_ids],
|
| 229 |
+
pos=self._site_zeros,
|
| 230 |
+
vel=self._site_zeros,
|
| 231 |
+
)
|
| 232 |
+
else:
|
| 233 |
+
raise ValueError(f"Unknown transmission type: {self.transmission_type}")
|
| 234 |
+
|
| 235 |
+
@abstractmethod
|
| 236 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 237 |
+
"""Compute low-level actuator control signal from high-level commands.
|
| 238 |
+
|
| 239 |
+
Args:
|
| 240 |
+
cmd: High-level actuator command.
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
Control signal tensor of shape (num_envs, num_actuators).
|
| 244 |
+
"""
|
| 245 |
+
raise NotImplementedError
|
| 246 |
+
|
| 247 |
+
# Optional methods.
|
| 248 |
+
|
| 249 |
+
def reset(self, env_ids: torch.Tensor | slice | None = None) -> None:
|
| 250 |
+
"""Reset actuator state for specified environments.
|
| 251 |
+
|
| 252 |
+
Base implementation does nothing. Override in subclasses that maintain
|
| 253 |
+
internal state.
|
| 254 |
+
|
| 255 |
+
Args:
|
| 256 |
+
env_ids: Environment indices to reset. If None, reset all environments.
|
| 257 |
+
"""
|
| 258 |
+
del env_ids # Unused.
|
| 259 |
+
|
| 260 |
+
def update(self, dt: float) -> None:
|
| 261 |
+
"""Update actuator state after a simulation step.
|
| 262 |
+
|
| 263 |
+
Base implementation does nothing. Override in subclasses that need
|
| 264 |
+
per-step updates.
|
| 265 |
+
|
| 266 |
+
Args:
|
| 267 |
+
dt: Time step in seconds.
|
| 268 |
+
"""
|
| 269 |
+
del dt # Unused.
|
mjlab/src/mjlab/actuator/builtin_actuator.py
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MuJoCo built-in actuators.
|
| 2 |
+
|
| 3 |
+
This module provides actuators that use MuJoCo's native actuator implementations,
|
| 4 |
+
created programmatically via the MjSpec API.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from typing import TYPE_CHECKING
|
| 11 |
+
|
| 12 |
+
import mujoco
|
| 13 |
+
import torch
|
| 14 |
+
|
| 15 |
+
from mjlab.actuator.actuator import (
|
| 16 |
+
Actuator,
|
| 17 |
+
ActuatorCfg,
|
| 18 |
+
ActuatorCmd,
|
| 19 |
+
TransmissionType,
|
| 20 |
+
)
|
| 21 |
+
from mjlab.utils.spec import (
|
| 22 |
+
create_motor_actuator,
|
| 23 |
+
create_muscle_actuator,
|
| 24 |
+
create_position_actuator,
|
| 25 |
+
create_velocity_actuator,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
if TYPE_CHECKING:
|
| 29 |
+
from mjlab.entity import Entity
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@dataclass(kw_only=True)
|
| 33 |
+
class BuiltinPositionActuatorCfg(ActuatorCfg):
|
| 34 |
+
"""Configuration for MuJoCo built-in position actuator.
|
| 35 |
+
|
| 36 |
+
Under the hood, this creates a <position> actuator for each target and sets
|
| 37 |
+
the stiffness, damping and effort limits accordingly. It also modifies the target's
|
| 38 |
+
properties, namely armature and frictionloss.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
stiffness: float
|
| 42 |
+
"""PD proportional gain."""
|
| 43 |
+
damping: float
|
| 44 |
+
"""PD derivative gain."""
|
| 45 |
+
effort_limit: float | None = None
|
| 46 |
+
"""Maximum actuator force/torque. If None, no limit is applied."""
|
| 47 |
+
|
| 48 |
+
def __post_init__(self) -> None:
|
| 49 |
+
super().__post_init__()
|
| 50 |
+
if self.transmission_type == TransmissionType.SITE:
|
| 51 |
+
raise ValueError(
|
| 52 |
+
"BuiltinPositionActuatorCfg does not support SITE transmission. "
|
| 53 |
+
"Use BuiltinMotorActuatorCfg for site transmission."
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
def build(
|
| 57 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 58 |
+
) -> BuiltinPositionActuator:
|
| 59 |
+
return BuiltinPositionActuator(self, entity, target_ids, target_names)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class BuiltinPositionActuator(Actuator[BuiltinPositionActuatorCfg]):
|
| 63 |
+
"""MuJoCo built-in position actuator."""
|
| 64 |
+
|
| 65 |
+
def __init__(
|
| 66 |
+
self,
|
| 67 |
+
cfg: BuiltinPositionActuatorCfg,
|
| 68 |
+
entity: Entity,
|
| 69 |
+
target_ids: list[int],
|
| 70 |
+
target_names: list[str],
|
| 71 |
+
) -> None:
|
| 72 |
+
super().__init__(cfg, entity, target_ids, target_names)
|
| 73 |
+
|
| 74 |
+
def edit_spec(self, spec: mujoco.MjSpec, target_names: list[str]) -> None:
|
| 75 |
+
# Add <position> actuator to spec, one per target.
|
| 76 |
+
for target_name in target_names:
|
| 77 |
+
actuator = create_position_actuator(
|
| 78 |
+
spec,
|
| 79 |
+
target_name,
|
| 80 |
+
stiffness=self.cfg.stiffness,
|
| 81 |
+
damping=self.cfg.damping,
|
| 82 |
+
effort_limit=self.cfg.effort_limit,
|
| 83 |
+
armature=self.cfg.armature,
|
| 84 |
+
frictionloss=self.cfg.frictionloss,
|
| 85 |
+
transmission_type=self.cfg.transmission_type,
|
| 86 |
+
)
|
| 87 |
+
self._mjs_actuators.append(actuator)
|
| 88 |
+
|
| 89 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 90 |
+
return cmd.position_target
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
@dataclass(kw_only=True)
|
| 94 |
+
class BuiltinMotorActuatorCfg(ActuatorCfg):
|
| 95 |
+
"""Configuration for MuJoCo built-in motor actuator.
|
| 96 |
+
|
| 97 |
+
Under the hood, this creates a <motor> actuator for each target and sets
|
| 98 |
+
its effort limit and gear ratio accordingly. It also modifies the target's
|
| 99 |
+
properties, namely armature and frictionloss.
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
effort_limit: float
|
| 103 |
+
"""Maximum actuator effort."""
|
| 104 |
+
gear: float = 1.0
|
| 105 |
+
"""Actuator gear ratio."""
|
| 106 |
+
|
| 107 |
+
def build(
|
| 108 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 109 |
+
) -> BuiltinMotorActuator:
|
| 110 |
+
return BuiltinMotorActuator(self, entity, target_ids, target_names)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class BuiltinMotorActuator(Actuator[BuiltinMotorActuatorCfg]):
|
| 114 |
+
"""MuJoCo built-in motor actuator."""
|
| 115 |
+
|
| 116 |
+
def __init__(
|
| 117 |
+
self,
|
| 118 |
+
cfg: BuiltinMotorActuatorCfg,
|
| 119 |
+
entity: Entity,
|
| 120 |
+
target_ids: list[int],
|
| 121 |
+
target_names: list[str],
|
| 122 |
+
) -> None:
|
| 123 |
+
super().__init__(cfg, entity, target_ids, target_names)
|
| 124 |
+
|
| 125 |
+
def edit_spec(self, spec: mujoco.MjSpec, target_names: list[str]) -> None:
|
| 126 |
+
# Add <motor> actuator to spec, one per target.
|
| 127 |
+
for target_name in target_names:
|
| 128 |
+
actuator = create_motor_actuator(
|
| 129 |
+
spec,
|
| 130 |
+
target_name,
|
| 131 |
+
effort_limit=self.cfg.effort_limit,
|
| 132 |
+
gear=self.cfg.gear,
|
| 133 |
+
armature=self.cfg.armature,
|
| 134 |
+
frictionloss=self.cfg.frictionloss,
|
| 135 |
+
transmission_type=self.cfg.transmission_type,
|
| 136 |
+
)
|
| 137 |
+
self._mjs_actuators.append(actuator)
|
| 138 |
+
|
| 139 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 140 |
+
return cmd.effort_target
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@dataclass(kw_only=True)
|
| 144 |
+
class BuiltinVelocityActuatorCfg(ActuatorCfg):
|
| 145 |
+
"""Configuration for MuJoCo built-in velocity actuator.
|
| 146 |
+
|
| 147 |
+
Under the hood, this creates a <velocity> actuator for each target and sets the
|
| 148 |
+
damping gain. It also modifies the target's properties, namely armature and
|
| 149 |
+
frictionloss.
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
damping: float
|
| 153 |
+
"""Damping gain."""
|
| 154 |
+
effort_limit: float | None = None
|
| 155 |
+
"""Maximum actuator force/torque. If None, no limit is applied."""
|
| 156 |
+
|
| 157 |
+
def __post_init__(self) -> None:
|
| 158 |
+
super().__post_init__()
|
| 159 |
+
if self.transmission_type == TransmissionType.SITE:
|
| 160 |
+
raise ValueError(
|
| 161 |
+
"BuiltinVelocityActuatorCfg does not support SITE transmission. "
|
| 162 |
+
"Use BuiltinMotorActuatorCfg for site transmission."
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
def build(
|
| 166 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 167 |
+
) -> BuiltinVelocityActuator:
|
| 168 |
+
return BuiltinVelocityActuator(self, entity, target_ids, target_names)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class BuiltinVelocityActuator(Actuator[BuiltinVelocityActuatorCfg]):
|
| 172 |
+
"""MuJoCo built-in velocity actuator."""
|
| 173 |
+
|
| 174 |
+
def __init__(
|
| 175 |
+
self,
|
| 176 |
+
cfg: BuiltinVelocityActuatorCfg,
|
| 177 |
+
entity: Entity,
|
| 178 |
+
target_ids: list[int],
|
| 179 |
+
target_names: list[str],
|
| 180 |
+
) -> None:
|
| 181 |
+
super().__init__(cfg, entity, target_ids, target_names)
|
| 182 |
+
|
| 183 |
+
def edit_spec(self, spec: mujoco.MjSpec, target_names: list[str]) -> None:
|
| 184 |
+
# Add <velocity> actuator to spec, one per target.
|
| 185 |
+
for target_name in target_names:
|
| 186 |
+
actuator = create_velocity_actuator(
|
| 187 |
+
spec,
|
| 188 |
+
target_name,
|
| 189 |
+
damping=self.cfg.damping,
|
| 190 |
+
effort_limit=self.cfg.effort_limit,
|
| 191 |
+
armature=self.cfg.armature,
|
| 192 |
+
frictionloss=self.cfg.frictionloss,
|
| 193 |
+
transmission_type=self.cfg.transmission_type,
|
| 194 |
+
)
|
| 195 |
+
self._mjs_actuators.append(actuator)
|
| 196 |
+
|
| 197 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 198 |
+
return cmd.velocity_target
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
@dataclass(kw_only=True)
|
| 202 |
+
class BuiltinMuscleActuatorCfg(ActuatorCfg):
|
| 203 |
+
"""Configuration for MuJoCo built-in muscle actuator."""
|
| 204 |
+
|
| 205 |
+
length_range: tuple[float, float] = (0.0, 0.0)
|
| 206 |
+
"""Length range for muscle actuator."""
|
| 207 |
+
gear: float = 1.0
|
| 208 |
+
"""Gear ratio."""
|
| 209 |
+
timeconst: tuple[float, float] = (0.01, 0.04)
|
| 210 |
+
"""Activation and deactivation time constants."""
|
| 211 |
+
tausmooth: float = 0.0
|
| 212 |
+
"""Smoothing time constant."""
|
| 213 |
+
range: tuple[float, float] = (0.75, 1.05)
|
| 214 |
+
"""Operating range of normalized muscle length."""
|
| 215 |
+
force: float = -1.0
|
| 216 |
+
"""Peak force (if -1, defaults to scale * FLV)."""
|
| 217 |
+
scale: float = 200.0
|
| 218 |
+
"""Force scaling factor."""
|
| 219 |
+
lmin: float = 0.5
|
| 220 |
+
"""Minimum normalized muscle length."""
|
| 221 |
+
lmax: float = 1.6
|
| 222 |
+
"""Maximum normalized muscle length."""
|
| 223 |
+
vmax: float = 1.5
|
| 224 |
+
"""Maximum normalized muscle velocity."""
|
| 225 |
+
fpmax: float = 1.3
|
| 226 |
+
"""Passive force at lmax."""
|
| 227 |
+
fvmax: float = 1.2
|
| 228 |
+
"""Active force at -vmax."""
|
| 229 |
+
|
| 230 |
+
def __post_init__(self) -> None:
|
| 231 |
+
super().__post_init__()
|
| 232 |
+
if self.transmission_type == TransmissionType.SITE:
|
| 233 |
+
raise ValueError(
|
| 234 |
+
"BuiltinMuscleActuatorCfg does not support SITE transmission. "
|
| 235 |
+
"Use BuiltinMotorActuatorCfg for site transmission."
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
def build(
|
| 239 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 240 |
+
) -> BuiltinMuscleActuator:
|
| 241 |
+
return BuiltinMuscleActuator(self, entity, target_ids, target_names)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class BuiltinMuscleActuator(Actuator[BuiltinMuscleActuatorCfg]):
|
| 245 |
+
"""MuJoCo built-in muscle actuator."""
|
| 246 |
+
|
| 247 |
+
def __init__(
|
| 248 |
+
self,
|
| 249 |
+
cfg: BuiltinMuscleActuatorCfg,
|
| 250 |
+
entity: Entity,
|
| 251 |
+
target_ids: list[int],
|
| 252 |
+
target_names: list[str],
|
| 253 |
+
) -> None:
|
| 254 |
+
super().__init__(cfg, entity, target_ids, target_names)
|
| 255 |
+
|
| 256 |
+
def edit_spec(self, spec: mujoco.MjSpec, target_names: list[str]) -> None:
|
| 257 |
+
# Add <muscle> actuator to spec, one per target.
|
| 258 |
+
for target_name in target_names:
|
| 259 |
+
actuator = create_muscle_actuator(
|
| 260 |
+
spec,
|
| 261 |
+
target_name,
|
| 262 |
+
length_range=self.cfg.length_range,
|
| 263 |
+
gear=self.cfg.gear,
|
| 264 |
+
timeconst=self.cfg.timeconst,
|
| 265 |
+
tausmooth=self.cfg.tausmooth,
|
| 266 |
+
range=self.cfg.range,
|
| 267 |
+
force=self.cfg.force,
|
| 268 |
+
scale=self.cfg.scale,
|
| 269 |
+
lmin=self.cfg.lmin,
|
| 270 |
+
lmax=self.cfg.lmax,
|
| 271 |
+
vmax=self.cfg.vmax,
|
| 272 |
+
fpmax=self.cfg.fpmax,
|
| 273 |
+
fvmax=self.cfg.fvmax,
|
| 274 |
+
transmission_type=self.cfg.transmission_type,
|
| 275 |
+
)
|
| 276 |
+
self._mjs_actuators.append(actuator)
|
| 277 |
+
|
| 278 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 279 |
+
return cmd.effort_target
|
mjlab/src/mjlab/actuator/builtin_group.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import TYPE_CHECKING
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from mjlab.actuator.actuator import TransmissionType
|
| 9 |
+
from mjlab.actuator.builtin_actuator import (
|
| 10 |
+
BuiltinMotorActuator,
|
| 11 |
+
BuiltinMuscleActuator,
|
| 12 |
+
BuiltinPositionActuator,
|
| 13 |
+
BuiltinVelocityActuator,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from mjlab.actuator.actuator import Actuator
|
| 18 |
+
from mjlab.entity.data import EntityData
|
| 19 |
+
|
| 20 |
+
BuiltinActuatorType = (
|
| 21 |
+
BuiltinMotorActuator
|
| 22 |
+
| BuiltinMuscleActuator
|
| 23 |
+
| BuiltinPositionActuator
|
| 24 |
+
| BuiltinVelocityActuator
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
# Maps (actuator_type, transmission_type) to EntityData target tensor attribute name.
|
| 28 |
+
_TARGET_TENSOR_MAP: dict[tuple[type[BuiltinActuatorType], TransmissionType], str] = {
|
| 29 |
+
(BuiltinPositionActuator, TransmissionType.JOINT): "joint_pos_target",
|
| 30 |
+
(BuiltinVelocityActuator, TransmissionType.JOINT): "joint_vel_target",
|
| 31 |
+
(BuiltinMotorActuator, TransmissionType.JOINT): "joint_effort_target",
|
| 32 |
+
(BuiltinPositionActuator, TransmissionType.TENDON): "tendon_len_target",
|
| 33 |
+
(BuiltinVelocityActuator, TransmissionType.TENDON): "tendon_vel_target",
|
| 34 |
+
(BuiltinMotorActuator, TransmissionType.TENDON): "tendon_effort_target",
|
| 35 |
+
(BuiltinMotorActuator, TransmissionType.SITE): "site_effort_target",
|
| 36 |
+
(BuiltinMuscleActuator, TransmissionType.JOINT): "joint_effort_target",
|
| 37 |
+
(BuiltinMuscleActuator, TransmissionType.TENDON): "tendon_effort_target",
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@dataclass(frozen=True)
|
| 42 |
+
class BuiltinActuatorGroup:
|
| 43 |
+
"""Groups builtin actuators for batch processing.
|
| 44 |
+
|
| 45 |
+
Builtin actuators (position, velocity, motor) just pass through target values
|
| 46 |
+
from entity data to control signals. This class pre-computes the mappings and
|
| 47 |
+
enables direct writes without per-actuator overhead.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
# Map from (BuiltinActuator type, transmission_type) to (target_ids, ctrl_ids).
|
| 51 |
+
_index_groups: dict[
|
| 52 |
+
tuple[type[BuiltinActuatorType], TransmissionType],
|
| 53 |
+
tuple[torch.Tensor, torch.Tensor],
|
| 54 |
+
]
|
| 55 |
+
|
| 56 |
+
@staticmethod
|
| 57 |
+
def process(
|
| 58 |
+
actuators: list[Actuator],
|
| 59 |
+
) -> tuple[BuiltinActuatorGroup, tuple[Actuator, ...]]:
|
| 60 |
+
"""Register builtin actuators and pre-compute their mappings.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
actuators: List of initialized actuators to process.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
A tuple containing:
|
| 67 |
+
- BuiltinActuatorGroup with pre-computed mappings.
|
| 68 |
+
- List of custom (non-builtin) actuators.
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
builtin_groups: dict[
|
| 72 |
+
tuple[type[BuiltinActuatorType], TransmissionType], list[Actuator]
|
| 73 |
+
] = {}
|
| 74 |
+
custom_actuators: list[Actuator] = []
|
| 75 |
+
|
| 76 |
+
# Group actuators by (type, transmission_type).
|
| 77 |
+
for act in actuators:
|
| 78 |
+
if isinstance(act, BuiltinActuatorType):
|
| 79 |
+
key: tuple[type[BuiltinActuatorType], TransmissionType] = (
|
| 80 |
+
type(act),
|
| 81 |
+
act.cfg.transmission_type,
|
| 82 |
+
)
|
| 83 |
+
builtin_groups.setdefault(key, []).append(act)
|
| 84 |
+
else:
|
| 85 |
+
custom_actuators.append(act)
|
| 86 |
+
|
| 87 |
+
# Return stacked indices for each (actuator_type, transmission_type) group.
|
| 88 |
+
index_groups: dict[
|
| 89 |
+
tuple[type[BuiltinActuatorType], TransmissionType],
|
| 90 |
+
tuple[torch.Tensor, torch.Tensor],
|
| 91 |
+
] = {
|
| 92 |
+
key: (
|
| 93 |
+
torch.cat([act.target_ids for act in acts], dim=0),
|
| 94 |
+
torch.cat([act.ctrl_ids for act in acts], dim=0),
|
| 95 |
+
)
|
| 96 |
+
for key, acts in builtin_groups.items()
|
| 97 |
+
}
|
| 98 |
+
return BuiltinActuatorGroup(index_groups), tuple(custom_actuators)
|
| 99 |
+
|
| 100 |
+
def apply_controls(self, data: EntityData) -> None:
|
| 101 |
+
"""Write builtin actuator controls directly to simulation data.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
data: Entity data containing targets and control arrays.
|
| 105 |
+
"""
|
| 106 |
+
for (actuator_type, transmission_type), (
|
| 107 |
+
target_ids,
|
| 108 |
+
ctrl_ids,
|
| 109 |
+
) in self._index_groups.items():
|
| 110 |
+
attr_name = _TARGET_TENSOR_MAP[(actuator_type, transmission_type)]
|
| 111 |
+
target_tensor = getattr(data, attr_name)
|
| 112 |
+
data.write_ctrl(target_tensor[:, target_ids], ctrl_ids)
|
mjlab/src/mjlab/actuator/dc_actuator.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""DC motor actuator with velocity-based saturation model.
|
| 2 |
+
|
| 3 |
+
This module provides a DC motor actuator that implements a realistic torque-speed
|
| 4 |
+
curve for more accurate motor behavior simulation.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from typing import TYPE_CHECKING, Generic, TypeVar
|
| 11 |
+
|
| 12 |
+
import mujoco
|
| 13 |
+
import mujoco_warp as mjwarp
|
| 14 |
+
import torch
|
| 15 |
+
|
| 16 |
+
from mjlab.actuator.actuator import ActuatorCmd
|
| 17 |
+
from mjlab.actuator.pd_actuator import IdealPdActuator, IdealPdActuatorCfg
|
| 18 |
+
|
| 19 |
+
if TYPE_CHECKING:
|
| 20 |
+
from mjlab.entity import Entity
|
| 21 |
+
|
| 22 |
+
DcMotorCfgT = TypeVar("DcMotorCfgT", bound="DcMotorActuatorCfg")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@dataclass(kw_only=True)
|
| 26 |
+
class DcMotorActuatorCfg(IdealPdActuatorCfg):
|
| 27 |
+
"""Configuration for DC motor actuator with velocity-based saturation.
|
| 28 |
+
|
| 29 |
+
This actuator implements a DC motor torque-speed curve for more realistic
|
| 30 |
+
actuator behavior. The motor produces maximum torque (saturation_effort) at
|
| 31 |
+
zero velocity and reduces linearly to zero torque at maximum velocity.
|
| 32 |
+
|
| 33 |
+
Note: effort_limit should be explicitly set to a realistic value for proper
|
| 34 |
+
motor modeling. Using the default (inf) will trigger a warning. Use
|
| 35 |
+
IdealPdActuator if unlimited torque is desired.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
saturation_effort: float
|
| 39 |
+
"""Peak motor torque at zero velocity (stall torque)."""
|
| 40 |
+
|
| 41 |
+
velocity_limit: float
|
| 42 |
+
"""Maximum motor velocity (no-load speed)."""
|
| 43 |
+
|
| 44 |
+
def __post_init__(self) -> None:
|
| 45 |
+
"""Validate DC motor parameters."""
|
| 46 |
+
import warnings
|
| 47 |
+
|
| 48 |
+
if self.effort_limit == float("inf"):
|
| 49 |
+
warnings.warn(
|
| 50 |
+
"effort_limit is set to inf for DcMotorActuator, which creates an "
|
| 51 |
+
"unrealistic motor with unlimited continuous torque. Consider setting "
|
| 52 |
+
"effort_limit to your motor's continuous rating (<= saturation_effort). "
|
| 53 |
+
"Use IdealPdActuator if you truly want unlimited torque.",
|
| 54 |
+
UserWarning,
|
| 55 |
+
stacklevel=2,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
if self.effort_limit > self.saturation_effort:
|
| 59 |
+
warnings.warn(
|
| 60 |
+
f"effort_limit ({self.effort_limit}) exceeds saturation_effort "
|
| 61 |
+
f"({self.saturation_effort}). For realistic motors, continuous torque "
|
| 62 |
+
"should be <= peak torque.",
|
| 63 |
+
UserWarning,
|
| 64 |
+
stacklevel=2,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
def build(
|
| 68 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 69 |
+
) -> DcMotorActuator:
|
| 70 |
+
return DcMotorActuator(self, entity, target_ids, target_names)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class DcMotorActuator(IdealPdActuator[DcMotorCfgT], Generic[DcMotorCfgT]):
|
| 74 |
+
"""DC motor actuator with velocity-based saturation model.
|
| 75 |
+
|
| 76 |
+
This actuator extends IdealPdActuator with a realistic DC motor model
|
| 77 |
+
that limits torque based on current joint velocity. The model implements
|
| 78 |
+
a linear torque-speed curve where:
|
| 79 |
+
- At zero velocity: can produce full saturation_effort (stall torque)
|
| 80 |
+
- At max velocity: can produce zero torque
|
| 81 |
+
- Between: torque limit varies linearly
|
| 82 |
+
|
| 83 |
+
The continuous torque limit (effort_limit) further constrains the output.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
def __init__(
|
| 87 |
+
self,
|
| 88 |
+
cfg: DcMotorCfgT,
|
| 89 |
+
entity: Entity,
|
| 90 |
+
target_ids: list[int],
|
| 91 |
+
target_names: list[str],
|
| 92 |
+
) -> None:
|
| 93 |
+
super().__init__(cfg, entity, target_ids, target_names)
|
| 94 |
+
self.saturation_effort: torch.Tensor | None = None
|
| 95 |
+
self.velocity_limit_motor: torch.Tensor | None = None
|
| 96 |
+
self._vel_at_effort_lim: torch.Tensor | None = None
|
| 97 |
+
self._joint_vel_clipped: torch.Tensor | None = None
|
| 98 |
+
|
| 99 |
+
def initialize(
|
| 100 |
+
self,
|
| 101 |
+
mj_model: mujoco.MjModel,
|
| 102 |
+
model: mjwarp.Model,
|
| 103 |
+
data: mjwarp.Data,
|
| 104 |
+
device: str,
|
| 105 |
+
) -> None:
|
| 106 |
+
super().initialize(mj_model, model, data, device)
|
| 107 |
+
|
| 108 |
+
num_envs = data.nworld
|
| 109 |
+
num_joints = len(self._target_names)
|
| 110 |
+
|
| 111 |
+
self.saturation_effort = torch.full(
|
| 112 |
+
(num_envs, num_joints),
|
| 113 |
+
self.cfg.saturation_effort,
|
| 114 |
+
dtype=torch.float,
|
| 115 |
+
device=device,
|
| 116 |
+
)
|
| 117 |
+
self.velocity_limit_motor = torch.full(
|
| 118 |
+
(num_envs, num_joints),
|
| 119 |
+
self.cfg.velocity_limit,
|
| 120 |
+
dtype=torch.float,
|
| 121 |
+
device=device,
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# Compute corner velocity where torque-speed curve intersects effort_limit.
|
| 125 |
+
assert self.force_limit is not None
|
| 126 |
+
self._vel_at_effort_lim = self.velocity_limit_motor * (
|
| 127 |
+
1 + self.force_limit / self.saturation_effort
|
| 128 |
+
)
|
| 129 |
+
self._joint_vel_clipped = torch.zeros(num_envs, num_joints, device=device)
|
| 130 |
+
|
| 131 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 132 |
+
assert self._joint_vel_clipped is not None
|
| 133 |
+
self._joint_vel_clipped[:] = cmd.vel
|
| 134 |
+
return super().compute(cmd)
|
| 135 |
+
|
| 136 |
+
def _clip_effort(self, effort: torch.Tensor) -> torch.Tensor:
|
| 137 |
+
assert self.saturation_effort is not None
|
| 138 |
+
assert self.velocity_limit_motor is not None
|
| 139 |
+
assert self.force_limit is not None
|
| 140 |
+
assert self._vel_at_effort_lim is not None
|
| 141 |
+
assert self._joint_vel_clipped is not None
|
| 142 |
+
|
| 143 |
+
# Clip velocity to corner velocity range.
|
| 144 |
+
vel_clipped = torch.clamp(
|
| 145 |
+
self._joint_vel_clipped,
|
| 146 |
+
min=-self._vel_at_effort_lim,
|
| 147 |
+
max=self._vel_at_effort_lim,
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
# Compute torque-speed curve limits.
|
| 151 |
+
torque_speed_top = self.saturation_effort * (
|
| 152 |
+
1.0 - vel_clipped / self.velocity_limit_motor
|
| 153 |
+
)
|
| 154 |
+
torque_speed_bottom = self.saturation_effort * (
|
| 155 |
+
-1.0 - vel_clipped / self.velocity_limit_motor
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
# Apply continuous torque constraint.
|
| 159 |
+
max_effort = torch.clamp(torque_speed_top, max=self.force_limit)
|
| 160 |
+
min_effort = torch.clamp(torque_speed_bottom, min=-self.force_limit)
|
| 161 |
+
|
| 162 |
+
return torch.clamp(effort, min=min_effort, max=max_effort)
|
mjlab/src/mjlab/actuator/delayed_actuator.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Generic delayed actuator wrapper."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from typing import TYPE_CHECKING, Literal
|
| 7 |
+
|
| 8 |
+
import mujoco
|
| 9 |
+
import mujoco_warp as mjwarp
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from mjlab.actuator.actuator import Actuator, ActuatorCfg, ActuatorCmd
|
| 13 |
+
from mjlab.utils.buffers import DelayBuffer
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from mjlab.entity import Entity
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclass(kw_only=True)
|
| 20 |
+
class DelayedActuatorCfg(ActuatorCfg):
|
| 21 |
+
"""Configuration for delayed actuator wrapper.
|
| 22 |
+
|
| 23 |
+
Wraps any actuator config to add delay functionality. Delays are quantized
|
| 24 |
+
to physics timesteps (not control timesteps). For example, with 500Hz physics
|
| 25 |
+
and 50Hz control (decimation=10), a lag of 2 represents a 4ms delay (2 physics
|
| 26 |
+
steps).
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
target_names_expr: tuple[str, ...] = field(init=False, default=())
|
| 30 |
+
|
| 31 |
+
base_cfg: ActuatorCfg
|
| 32 |
+
"""Configuration for the underlying actuator."""
|
| 33 |
+
|
| 34 |
+
def __post_init__(self):
|
| 35 |
+
object.__setattr__(self, "target_names_expr", self.base_cfg.target_names_expr)
|
| 36 |
+
object.__setattr__(self, "transmission_type", self.base_cfg.transmission_type)
|
| 37 |
+
|
| 38 |
+
delay_target: (
|
| 39 |
+
Literal["position", "velocity", "effort"]
|
| 40 |
+
| tuple[Literal["position", "velocity", "effort"], ...]
|
| 41 |
+
) = "position"
|
| 42 |
+
"""Which command target(s) to delay.
|
| 43 |
+
|
| 44 |
+
Can be a single string like 'position', or a tuple of strings like
|
| 45 |
+
('position', 'velocity', 'effort') to delay multiple targets together.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
delay_min_lag: int = 0
|
| 49 |
+
"""Minimum delay lag in physics timesteps."""
|
| 50 |
+
|
| 51 |
+
delay_max_lag: int = 0
|
| 52 |
+
"""Maximum delay lag in physics timesteps."""
|
| 53 |
+
|
| 54 |
+
delay_hold_prob: float = 0.0
|
| 55 |
+
"""Probability of keeping previous lag when updating."""
|
| 56 |
+
|
| 57 |
+
delay_update_period: int = 0
|
| 58 |
+
"""Period for updating delays in physics timesteps (0 = every step)."""
|
| 59 |
+
|
| 60 |
+
delay_per_env_phase: bool = True
|
| 61 |
+
"""Whether each environment has a different phase offset."""
|
| 62 |
+
|
| 63 |
+
def build(
|
| 64 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 65 |
+
) -> DelayedActuator:
|
| 66 |
+
base_actuator = self.base_cfg.build(entity, target_ids, target_names)
|
| 67 |
+
return DelayedActuator(self, base_actuator)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class DelayedActuator(Actuator[DelayedActuatorCfg]):
|
| 71 |
+
"""Generic wrapper that adds delay to any actuator.
|
| 72 |
+
|
| 73 |
+
Delays the specified command target(s) (position, velocity, and/or effort)
|
| 74 |
+
before passing it to the underlying actuator's compute method.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
def __init__(self, cfg: DelayedActuatorCfg, base_actuator: Actuator) -> None:
|
| 78 |
+
super().__init__(
|
| 79 |
+
cfg,
|
| 80 |
+
base_actuator.entity,
|
| 81 |
+
base_actuator._target_ids_list,
|
| 82 |
+
base_actuator._target_names,
|
| 83 |
+
)
|
| 84 |
+
self._base_actuator = base_actuator
|
| 85 |
+
self._delay_buffers: dict[str, DelayBuffer] = {}
|
| 86 |
+
|
| 87 |
+
@property
|
| 88 |
+
def base_actuator(self) -> Actuator:
|
| 89 |
+
"""The underlying actuator being wrapped."""
|
| 90 |
+
return self._base_actuator
|
| 91 |
+
|
| 92 |
+
def edit_spec(self, spec: mujoco.MjSpec, target_names: list[str]) -> None:
|
| 93 |
+
self._base_actuator.edit_spec(spec, target_names)
|
| 94 |
+
self._mjs_actuators = self._base_actuator._mjs_actuators
|
| 95 |
+
|
| 96 |
+
def initialize(
|
| 97 |
+
self,
|
| 98 |
+
mj_model: mujoco.MjModel,
|
| 99 |
+
model: mjwarp.Model,
|
| 100 |
+
data: mjwarp.Data,
|
| 101 |
+
device: str,
|
| 102 |
+
) -> None:
|
| 103 |
+
self._base_actuator.initialize(mj_model, model, data, device)
|
| 104 |
+
|
| 105 |
+
self._target_ids = self._base_actuator._target_ids
|
| 106 |
+
self._ctrl_ids = self._base_actuator._ctrl_ids
|
| 107 |
+
self._global_ctrl_ids = self._base_actuator._global_ctrl_ids
|
| 108 |
+
|
| 109 |
+
targets = (
|
| 110 |
+
(self.cfg.delay_target,)
|
| 111 |
+
if isinstance(self.cfg.delay_target, str)
|
| 112 |
+
else self.cfg.delay_target
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Create independent delay buffer for each target.
|
| 116 |
+
for target in targets:
|
| 117 |
+
self._delay_buffers[target] = DelayBuffer(
|
| 118 |
+
min_lag=self.cfg.delay_min_lag,
|
| 119 |
+
max_lag=self.cfg.delay_max_lag,
|
| 120 |
+
batch_size=data.nworld,
|
| 121 |
+
device=device,
|
| 122 |
+
hold_prob=self.cfg.delay_hold_prob,
|
| 123 |
+
update_period=self.cfg.delay_update_period,
|
| 124 |
+
per_env_phase=self.cfg.delay_per_env_phase,
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 128 |
+
position_target = cmd.position_target
|
| 129 |
+
velocity_target = cmd.velocity_target
|
| 130 |
+
effort_target = cmd.effort_target
|
| 131 |
+
|
| 132 |
+
if "position" in self._delay_buffers:
|
| 133 |
+
self._delay_buffers["position"].append(cmd.position_target)
|
| 134 |
+
position_target = self._delay_buffers["position"].compute()
|
| 135 |
+
|
| 136 |
+
if "velocity" in self._delay_buffers:
|
| 137 |
+
self._delay_buffers["velocity"].append(cmd.velocity_target)
|
| 138 |
+
velocity_target = self._delay_buffers["velocity"].compute()
|
| 139 |
+
|
| 140 |
+
if "effort" in self._delay_buffers:
|
| 141 |
+
self._delay_buffers["effort"].append(cmd.effort_target)
|
| 142 |
+
effort_target = self._delay_buffers["effort"].compute()
|
| 143 |
+
|
| 144 |
+
delayed_cmd = ActuatorCmd(
|
| 145 |
+
position_target=position_target,
|
| 146 |
+
velocity_target=velocity_target,
|
| 147 |
+
effort_target=effort_target,
|
| 148 |
+
pos=cmd.pos,
|
| 149 |
+
vel=cmd.vel,
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
return self._base_actuator.compute(delayed_cmd)
|
| 153 |
+
|
| 154 |
+
def reset(self, env_ids: torch.Tensor | slice | None = None) -> None:
|
| 155 |
+
for buffer in self._delay_buffers.values():
|
| 156 |
+
buffer.reset(env_ids)
|
| 157 |
+
self._base_actuator.reset(env_ids)
|
| 158 |
+
|
| 159 |
+
def set_lags(
|
| 160 |
+
self,
|
| 161 |
+
lags: torch.Tensor,
|
| 162 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 163 |
+
) -> None:
|
| 164 |
+
"""Set delay lag values for specified environments.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
lags: Lag values in physics timesteps. Shape: (num_env_ids,) or scalar.
|
| 168 |
+
env_ids: Environment indices to set. If None, sets all environments.
|
| 169 |
+
"""
|
| 170 |
+
for buffer in self._delay_buffers.values():
|
| 171 |
+
buffer.set_lags(lags, env_ids)
|
| 172 |
+
|
| 173 |
+
def update(self, dt: float) -> None:
|
| 174 |
+
self._base_actuator.update(dt)
|
mjlab/src/mjlab/actuator/learned_actuator.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Learned actuator models."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import TYPE_CHECKING, Literal
|
| 7 |
+
|
| 8 |
+
import mujoco
|
| 9 |
+
import mujoco_warp as mjwarp
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from mjlab.actuator.actuator import ActuatorCmd
|
| 13 |
+
from mjlab.actuator.dc_actuator import DcMotorActuator, DcMotorActuatorCfg
|
| 14 |
+
from mjlab.utils.buffers import CircularBuffer
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from mjlab.entity import Entity
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@dataclass(kw_only=True)
|
| 21 |
+
class LearnedMlpActuatorCfg(DcMotorActuatorCfg):
|
| 22 |
+
"""Configuration for MLP-based learned actuator model.
|
| 23 |
+
|
| 24 |
+
This actuator learns the mapping from joint commands and state history to
|
| 25 |
+
actual torque output. It's useful for capturing actuator dynamics that are
|
| 26 |
+
difficult to model analytically, such as delays, non-linearities, and
|
| 27 |
+
friction effects.
|
| 28 |
+
|
| 29 |
+
The network is trained offline using data from the real system and loaded
|
| 30 |
+
as a TorchScript file. The model uses a sliding window of historical joint
|
| 31 |
+
position errors and velocities as inputs.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
network_file: str
|
| 35 |
+
"""Path to the TorchScript file containing the trained MLP model."""
|
| 36 |
+
|
| 37 |
+
pos_scale: float
|
| 38 |
+
"""Scaling factor for joint position error inputs to the network."""
|
| 39 |
+
|
| 40 |
+
vel_scale: float
|
| 41 |
+
"""Scaling factor for joint velocity inputs to the network."""
|
| 42 |
+
|
| 43 |
+
torque_scale: float
|
| 44 |
+
"""Scaling factor for torque outputs from the network."""
|
| 45 |
+
|
| 46 |
+
input_order: Literal["pos_vel", "vel_pos"] = "pos_vel"
|
| 47 |
+
"""Order of inputs to the network.
|
| 48 |
+
|
| 49 |
+
- "pos_vel": position errors followed by velocities
|
| 50 |
+
- "vel_pos": velocities followed by position errors
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
history_length: int = 3
|
| 54 |
+
"""Number of timesteps of history to use as network inputs.
|
| 55 |
+
|
| 56 |
+
For example, history_length=3 uses the current timestep plus the previous
|
| 57 |
+
2 timesteps (total of 3 frames).
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
# Learned actuators don't use stiffness/damping from PD controller.
|
| 61 |
+
stiffness: float = 0.0
|
| 62 |
+
damping: float = 0.0
|
| 63 |
+
|
| 64 |
+
def build(
|
| 65 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 66 |
+
) -> LearnedMlpActuator:
|
| 67 |
+
return LearnedMlpActuator(self, entity, target_ids, target_names)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class LearnedMlpActuator(DcMotorActuator[LearnedMlpActuatorCfg]):
|
| 71 |
+
"""MLP-based learned actuator with joint history.
|
| 72 |
+
|
| 73 |
+
This actuator uses a trained neural network to map from joint commands and
|
| 74 |
+
state history to torque outputs. The network captures complex actuator
|
| 75 |
+
dynamics that are difficult to model analytically.
|
| 76 |
+
|
| 77 |
+
The actuator maintains circular buffers of joint position errors and
|
| 78 |
+
velocities, which are used as inputs to the MLP. The network outputs are
|
| 79 |
+
then scaled and clipped using the DC motor limits from the parent class.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
def __init__(
|
| 83 |
+
self,
|
| 84 |
+
cfg: LearnedMlpActuatorCfg,
|
| 85 |
+
entity: Entity,
|
| 86 |
+
target_ids: list[int],
|
| 87 |
+
target_names: list[str],
|
| 88 |
+
) -> None:
|
| 89 |
+
super().__init__(cfg, entity, target_ids, target_names)
|
| 90 |
+
self.network: torch.jit.ScriptModule | None = None
|
| 91 |
+
self._pos_error_history: CircularBuffer | None = None
|
| 92 |
+
self._vel_history: CircularBuffer | None = None
|
| 93 |
+
|
| 94 |
+
def initialize(
|
| 95 |
+
self,
|
| 96 |
+
mj_model: mujoco.MjModel,
|
| 97 |
+
model: mjwarp.Model,
|
| 98 |
+
data: mjwarp.Data,
|
| 99 |
+
device: str,
|
| 100 |
+
) -> None:
|
| 101 |
+
super().initialize(mj_model, model, data, device)
|
| 102 |
+
|
| 103 |
+
# Load the trained network from TorchScript file.
|
| 104 |
+
self.network = torch.jit.load(self.cfg.network_file, map_location=device)
|
| 105 |
+
assert self.network is not None
|
| 106 |
+
self.network.eval()
|
| 107 |
+
|
| 108 |
+
# Create history buffers.
|
| 109 |
+
num_envs = data.nworld
|
| 110 |
+
|
| 111 |
+
self._pos_error_history = CircularBuffer(
|
| 112 |
+
max_len=self.cfg.history_length,
|
| 113 |
+
batch_size=num_envs,
|
| 114 |
+
device=device,
|
| 115 |
+
)
|
| 116 |
+
self._vel_history = CircularBuffer(
|
| 117 |
+
max_len=self.cfg.history_length,
|
| 118 |
+
batch_size=num_envs,
|
| 119 |
+
device=device,
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
def reset(self, env_ids: torch.Tensor | slice | None = None) -> None:
|
| 123 |
+
"""Reset history buffers for specified environments.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
env_ids: Environment indices to reset. If None, reset all environments.
|
| 127 |
+
"""
|
| 128 |
+
assert self._pos_error_history is not None
|
| 129 |
+
assert self._vel_history is not None
|
| 130 |
+
|
| 131 |
+
if env_ids is None:
|
| 132 |
+
self._pos_error_history.reset()
|
| 133 |
+
self._vel_history.reset()
|
| 134 |
+
elif isinstance(env_ids, slice):
|
| 135 |
+
# Convert slice to indices for CircularBuffer.
|
| 136 |
+
batch_size = self._pos_error_history.batch_size
|
| 137 |
+
indices = list(range(*env_ids.indices(batch_size)))
|
| 138 |
+
self._pos_error_history.reset(indices)
|
| 139 |
+
self._vel_history.reset(indices)
|
| 140 |
+
else:
|
| 141 |
+
self._pos_error_history.reset(env_ids)
|
| 142 |
+
self._vel_history.reset(env_ids)
|
| 143 |
+
|
| 144 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 145 |
+
"""Compute actuator torques using the learned MLP model.
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
cmd: High-level actuator command containing targets and current state.
|
| 149 |
+
|
| 150 |
+
Returns:
|
| 151 |
+
Computed torque tensor of shape (num_envs, num_joints).
|
| 152 |
+
"""
|
| 153 |
+
assert self.network is not None
|
| 154 |
+
assert self._pos_error_history is not None
|
| 155 |
+
assert self._vel_history is not None
|
| 156 |
+
assert self._joint_vel_clipped is not None
|
| 157 |
+
|
| 158 |
+
# Update history buffers with current state.
|
| 159 |
+
pos_error = cmd.position_target - cmd.pos
|
| 160 |
+
self._pos_error_history.append(pos_error)
|
| 161 |
+
self._vel_history.append(cmd.vel)
|
| 162 |
+
|
| 163 |
+
# Save velocity for DC motor clipping in parent class.
|
| 164 |
+
self._joint_vel_clipped[:] = cmd.vel
|
| 165 |
+
|
| 166 |
+
num_envs = cmd.pos.shape[0]
|
| 167 |
+
num_joints = cmd.pos.shape[1]
|
| 168 |
+
|
| 169 |
+
# Extract history from current to history_length-1 steps back.
|
| 170 |
+
# Each returns shape: (num_envs, num_joints).
|
| 171 |
+
pos_inputs = [
|
| 172 |
+
self._pos_error_history[lag] for lag in range(self.cfg.history_length)
|
| 173 |
+
]
|
| 174 |
+
vel_inputs = [self._vel_history[lag] for lag in range(self.cfg.history_length)]
|
| 175 |
+
|
| 176 |
+
# Stack along feature dimension: (num_envs, num_joints, history_length).
|
| 177 |
+
pos_stacked = torch.stack(pos_inputs, dim=2)
|
| 178 |
+
vel_stacked = torch.stack(vel_inputs, dim=2)
|
| 179 |
+
|
| 180 |
+
# Reshape to (num_envs * num_joints, num_history_steps) for network.
|
| 181 |
+
pos_flat = pos_stacked.reshape(num_envs * num_joints, -1)
|
| 182 |
+
vel_flat = vel_stacked.reshape(num_envs * num_joints, -1)
|
| 183 |
+
|
| 184 |
+
# Scale and concatenate inputs based on specified order.
|
| 185 |
+
if self.cfg.input_order == "pos_vel":
|
| 186 |
+
network_input = torch.cat(
|
| 187 |
+
[pos_flat * self.cfg.pos_scale, vel_flat * self.cfg.vel_scale], dim=1
|
| 188 |
+
)
|
| 189 |
+
elif self.cfg.input_order == "vel_pos":
|
| 190 |
+
network_input = torch.cat(
|
| 191 |
+
[vel_flat * self.cfg.vel_scale, pos_flat * self.cfg.pos_scale], dim=1
|
| 192 |
+
)
|
| 193 |
+
else:
|
| 194 |
+
raise ValueError(
|
| 195 |
+
f"Invalid input order: {self.cfg.input_order}. Must be 'pos_vel' or 'vel_pos'."
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# Run network inference.
|
| 199 |
+
with torch.inference_mode():
|
| 200 |
+
torques_flat = self.network(network_input)
|
| 201 |
+
|
| 202 |
+
# Reshape and scale output torques.
|
| 203 |
+
computed_torques = torques_flat.reshape(num_envs, num_joints)
|
| 204 |
+
computed_torques = computed_torques * self.cfg.torque_scale
|
| 205 |
+
|
| 206 |
+
# Clip using DC motor limits from parent class.
|
| 207 |
+
return self._clip_effort(computed_torques)
|
mjlab/src/mjlab/actuator/pd_actuator.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""An ideal PD control actuator."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import TYPE_CHECKING, Generic, TypeVar
|
| 7 |
+
|
| 8 |
+
import mujoco
|
| 9 |
+
import mujoco_warp as mjwarp
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from mjlab.actuator.actuator import Actuator, ActuatorCfg, ActuatorCmd
|
| 13 |
+
from mjlab.utils.spec import create_motor_actuator
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from mjlab.entity import Entity
|
| 17 |
+
|
| 18 |
+
IdealPdCfgT = TypeVar("IdealPdCfgT", bound="IdealPdActuatorCfg")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@dataclass(kw_only=True)
|
| 22 |
+
class IdealPdActuatorCfg(ActuatorCfg):
|
| 23 |
+
"""Configuration for ideal PD actuator."""
|
| 24 |
+
|
| 25 |
+
stiffness: float
|
| 26 |
+
"""PD stiffness (proportional gain)."""
|
| 27 |
+
damping: float
|
| 28 |
+
"""PD damping (derivative gain)."""
|
| 29 |
+
effort_limit: float = float("inf")
|
| 30 |
+
"""Maximum force/torque limit."""
|
| 31 |
+
|
| 32 |
+
def build(
|
| 33 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 34 |
+
) -> IdealPdActuator:
|
| 35 |
+
return IdealPdActuator(self, entity, target_ids, target_names)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class IdealPdActuator(Actuator, Generic[IdealPdCfgT]):
|
| 39 |
+
"""Ideal PD control actuator."""
|
| 40 |
+
|
| 41 |
+
def __init__(
|
| 42 |
+
self,
|
| 43 |
+
cfg: IdealPdCfgT,
|
| 44 |
+
entity: Entity,
|
| 45 |
+
target_ids: list[int],
|
| 46 |
+
target_names: list[str],
|
| 47 |
+
) -> None:
|
| 48 |
+
super().__init__(cfg, entity, target_ids, target_names)
|
| 49 |
+
self.stiffness: torch.Tensor | None = None
|
| 50 |
+
self.damping: torch.Tensor | None = None
|
| 51 |
+
self.force_limit: torch.Tensor | None = None
|
| 52 |
+
self.default_stiffness: torch.Tensor | None = None
|
| 53 |
+
self.default_damping: torch.Tensor | None = None
|
| 54 |
+
self.default_force_limit: torch.Tensor | None = None
|
| 55 |
+
|
| 56 |
+
def edit_spec(self, spec: mujoco.MjSpec, target_names: list[str]) -> None:
|
| 57 |
+
# Add <motor> actuator to spec, one per target.
|
| 58 |
+
for target_name in target_names:
|
| 59 |
+
actuator = create_motor_actuator(
|
| 60 |
+
spec,
|
| 61 |
+
target_name,
|
| 62 |
+
effort_limit=self.cfg.effort_limit,
|
| 63 |
+
armature=self.cfg.armature,
|
| 64 |
+
frictionloss=self.cfg.frictionloss,
|
| 65 |
+
transmission_type=self.cfg.transmission_type,
|
| 66 |
+
)
|
| 67 |
+
self._mjs_actuators.append(actuator)
|
| 68 |
+
|
| 69 |
+
def initialize(
|
| 70 |
+
self,
|
| 71 |
+
mj_model: mujoco.MjModel,
|
| 72 |
+
model: mjwarp.Model,
|
| 73 |
+
data: mjwarp.Data,
|
| 74 |
+
device: str,
|
| 75 |
+
) -> None:
|
| 76 |
+
super().initialize(mj_model, model, data, device)
|
| 77 |
+
|
| 78 |
+
num_envs = data.nworld
|
| 79 |
+
num_joints = len(self._target_names)
|
| 80 |
+
self.stiffness = torch.full(
|
| 81 |
+
(num_envs, num_joints), self.cfg.stiffness, dtype=torch.float, device=device
|
| 82 |
+
)
|
| 83 |
+
self.damping = torch.full(
|
| 84 |
+
(num_envs, num_joints), self.cfg.damping, dtype=torch.float, device=device
|
| 85 |
+
)
|
| 86 |
+
self.force_limit = torch.full(
|
| 87 |
+
(num_envs, num_joints), self.cfg.effort_limit, dtype=torch.float, device=device
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
self.default_stiffness = self.stiffness.clone()
|
| 91 |
+
self.default_damping = self.damping.clone()
|
| 92 |
+
self.default_force_limit = self.force_limit.clone()
|
| 93 |
+
|
| 94 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 95 |
+
assert self.stiffness is not None
|
| 96 |
+
assert self.damping is not None
|
| 97 |
+
|
| 98 |
+
pos_error = cmd.position_target - cmd.pos
|
| 99 |
+
vel_error = cmd.velocity_target - cmd.vel
|
| 100 |
+
|
| 101 |
+
computed_torques = self.stiffness * pos_error
|
| 102 |
+
computed_torques += self.damping * vel_error
|
| 103 |
+
computed_torques += cmd.effort_target
|
| 104 |
+
|
| 105 |
+
return self._clip_effort(computed_torques)
|
| 106 |
+
|
| 107 |
+
def _clip_effort(self, effort: torch.Tensor) -> torch.Tensor:
|
| 108 |
+
assert self.force_limit is not None
|
| 109 |
+
return torch.clamp(effort, -self.force_limit, self.force_limit)
|
| 110 |
+
|
| 111 |
+
def set_gains(
|
| 112 |
+
self,
|
| 113 |
+
env_ids: torch.Tensor | slice,
|
| 114 |
+
kp: torch.Tensor | None = None,
|
| 115 |
+
kd: torch.Tensor | None = None,
|
| 116 |
+
) -> None:
|
| 117 |
+
"""Set PD gains for specified environments.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
env_ids: Environment indices to update.
|
| 121 |
+
kp: New proportional gains. Shape: (num_envs, num_actuators) or (num_envs,).
|
| 122 |
+
kd: New derivative gains. Shape: (num_envs, num_actuators) or (num_envs,).
|
| 123 |
+
"""
|
| 124 |
+
assert self.stiffness is not None
|
| 125 |
+
assert self.damping is not None
|
| 126 |
+
|
| 127 |
+
if kp is not None:
|
| 128 |
+
if kp.ndim == 1:
|
| 129 |
+
kp = kp.unsqueeze(-1)
|
| 130 |
+
self.stiffness[env_ids] = kp
|
| 131 |
+
|
| 132 |
+
if kd is not None:
|
| 133 |
+
if kd.ndim == 1:
|
| 134 |
+
kd = kd.unsqueeze(-1)
|
| 135 |
+
self.damping[env_ids] = kd
|
| 136 |
+
|
| 137 |
+
def set_effort_limit(
|
| 138 |
+
self, env_ids: torch.Tensor | slice, effort_limit: torch.Tensor
|
| 139 |
+
) -> None:
|
| 140 |
+
"""Set effort limits for specified environments.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
env_ids: Environment indices to update.
|
| 144 |
+
effort_limit: New effort limits. Shape: (num_envs, num_actuators) or (num_envs,).
|
| 145 |
+
"""
|
| 146 |
+
assert self.force_limit is not None
|
| 147 |
+
|
| 148 |
+
if effort_limit.ndim == 1:
|
| 149 |
+
effort_limit = effort_limit.unsqueeze(-1)
|
| 150 |
+
self.force_limit[env_ids] = effort_limit
|
mjlab/src/mjlab/actuator/xml_actuator.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Wrappers for XML-defined actuators.
|
| 2 |
+
|
| 3 |
+
This module provides wrappers for actuators already defined in robot XML/MJCF files.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from typing import TYPE_CHECKING, Generic, TypeVar
|
| 10 |
+
|
| 11 |
+
import mujoco
|
| 12 |
+
import torch
|
| 13 |
+
|
| 14 |
+
from mjlab.actuator.actuator import Actuator, ActuatorCfg, ActuatorCmd
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from mjlab.entity import Entity
|
| 18 |
+
|
| 19 |
+
XmlActuatorCfgT = TypeVar("XmlActuatorCfgT", bound=ActuatorCfg)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class XmlActuator(Actuator[XmlActuatorCfgT], Generic[XmlActuatorCfgT]):
|
| 23 |
+
"""Base class for XML-defined actuators."""
|
| 24 |
+
|
| 25 |
+
def __init__(
|
| 26 |
+
self,
|
| 27 |
+
cfg: XmlActuatorCfgT,
|
| 28 |
+
entity: Entity,
|
| 29 |
+
target_ids: list[int],
|
| 30 |
+
target_names: list[str],
|
| 31 |
+
) -> None:
|
| 32 |
+
super().__init__(cfg, entity, target_ids, target_names)
|
| 33 |
+
|
| 34 |
+
def edit_spec(self, spec: mujoco.MjSpec, target_names: list[str]) -> None:
|
| 35 |
+
# Filter to only targets that have corresponding XML actuators.
|
| 36 |
+
filtered_target_ids = []
|
| 37 |
+
filtered_target_names = []
|
| 38 |
+
for i, target_name in enumerate(target_names):
|
| 39 |
+
actuator = self._find_actuator_for_target(spec, target_name)
|
| 40 |
+
if actuator is not None:
|
| 41 |
+
self._mjs_actuators.append(actuator)
|
| 42 |
+
filtered_target_ids.append(self._target_ids_list[i])
|
| 43 |
+
filtered_target_names.append(target_name)
|
| 44 |
+
|
| 45 |
+
if len(filtered_target_names) == 0:
|
| 46 |
+
raise ValueError(
|
| 47 |
+
f"No XML actuators found for any targets matching the patterns. "
|
| 48 |
+
f"Searched targets: {target_names}. "
|
| 49 |
+
f"XML actuator config expects actuators to already exist in the XML."
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
# Update target IDs and names to only include those with actuators.
|
| 53 |
+
self._target_ids_list = filtered_target_ids
|
| 54 |
+
self._target_names = filtered_target_names
|
| 55 |
+
|
| 56 |
+
def _find_actuator_for_target(
|
| 57 |
+
self, spec: mujoco.MjSpec, target_name: str
|
| 58 |
+
) -> mujoco.MjsActuator | None:
|
| 59 |
+
"""Find an actuator that targets the given target (joint, tendon, or site)."""
|
| 60 |
+
for actuator in spec.actuators:
|
| 61 |
+
if actuator.target == target_name:
|
| 62 |
+
return actuator
|
| 63 |
+
return None
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@dataclass(kw_only=True)
|
| 67 |
+
class XmlPositionActuatorCfg(ActuatorCfg):
|
| 68 |
+
"""Wrap existing XML-defined <position> actuators."""
|
| 69 |
+
|
| 70 |
+
def build(
|
| 71 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 72 |
+
) -> XmlPositionActuator:
|
| 73 |
+
return XmlPositionActuator(self, entity, target_ids, target_names)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class XmlPositionActuator(XmlActuator[XmlPositionActuatorCfg]):
|
| 77 |
+
"""Wrapper for XML-defined <position> actuators."""
|
| 78 |
+
|
| 79 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 80 |
+
return cmd.position_target
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@dataclass(kw_only=True)
|
| 84 |
+
class XmlMotorActuatorCfg(ActuatorCfg):
|
| 85 |
+
"""Wrap existing XML-defined <motor> actuators."""
|
| 86 |
+
|
| 87 |
+
def build(
|
| 88 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 89 |
+
) -> XmlMotorActuator:
|
| 90 |
+
return XmlMotorActuator(self, entity, target_ids, target_names)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class XmlMotorActuator(XmlActuator[XmlMotorActuatorCfg]):
|
| 94 |
+
"""Wrapper for XML-defined <motor> actuators."""
|
| 95 |
+
|
| 96 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 97 |
+
return cmd.effort_target
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@dataclass(kw_only=True)
|
| 101 |
+
class XmlVelocityActuatorCfg(ActuatorCfg):
|
| 102 |
+
"""Wrap existing XML-defined <velocity> actuators."""
|
| 103 |
+
|
| 104 |
+
def build(
|
| 105 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 106 |
+
) -> XmlVelocityActuator:
|
| 107 |
+
return XmlVelocityActuator(self, entity, target_ids, target_names)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class XmlVelocityActuator(XmlActuator[XmlVelocityActuatorCfg]):
|
| 111 |
+
"""Wrapper for XML-defined <velocity> actuators."""
|
| 112 |
+
|
| 113 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 114 |
+
return cmd.velocity_target
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
@dataclass(kw_only=True)
|
| 118 |
+
class XmlMuscleActuatorCfg(ActuatorCfg):
|
| 119 |
+
"""Wrap existing XML-defined <muscle> actuators."""
|
| 120 |
+
|
| 121 |
+
def build(
|
| 122 |
+
self, entity: Entity, target_ids: list[int], target_names: list[str]
|
| 123 |
+
) -> XmlMuscleActuator:
|
| 124 |
+
return XmlMuscleActuator(self, entity, target_ids, target_names)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class XmlMuscleActuator(XmlActuator[XmlMuscleActuatorCfg]):
|
| 128 |
+
"""Wrapper for XML-defined <muscle> actuators."""
|
| 129 |
+
|
| 130 |
+
def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
|
| 131 |
+
return cmd.effort_target
|
mjlab/src/mjlab/asset_zoo/README.md
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Asset Zoo
|
| 2 |
+
|
| 3 |
+
| Robot | Image |
|
| 4 |
+
|-------|-------|
|
| 5 |
+
| **Unitree G1** | <img src="../../../docs/source/_static/content/g1.png" width="200" height="150" alt="Unitree G1 Humanoid"> |
|
| 6 |
+
| **Unitree Go1** | <img src="../../../docs/source/_static/content/go1.png" width="200" height="150" alt="Unitree Go1 Quadruped"> |
|
| 7 |
+
| **i2rt YAM** | <img src="../../../docs/source/_static/content/yam.png" width="200" height="150" alt="i2rt YAM Robot Arm"> |
|
mjlab/src/mjlab/asset_zoo/__init__.py
ADDED
|
File without changes
|
mjlab/src/mjlab/entity/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mjlab.entity.data import EntityData as EntityData
|
| 2 |
+
from mjlab.entity.entity import Entity as Entity
|
| 3 |
+
from mjlab.entity.entity import EntityArticulationInfoCfg as EntityArticulationInfoCfg
|
| 4 |
+
from mjlab.entity.entity import EntityCfg as EntityCfg
|
| 5 |
+
from mjlab.entity.entity import EntityIndexing as EntityIndexing
|
mjlab/src/mjlab/entity/data.py
ADDED
|
@@ -0,0 +1,576 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import TYPE_CHECKING, Sequence
|
| 5 |
+
|
| 6 |
+
import mujoco_warp as mjwarp
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from mjlab.utils.lab_api.math import (
|
| 10 |
+
quat_apply,
|
| 11 |
+
quat_apply_inverse,
|
| 12 |
+
quat_from_matrix,
|
| 13 |
+
quat_mul,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from mjlab.entity.entity import EntityIndexing
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def compute_velocity_from_cvel(
|
| 21 |
+
pos: torch.Tensor,
|
| 22 |
+
subtree_com: torch.Tensor,
|
| 23 |
+
cvel: torch.Tensor,
|
| 24 |
+
) -> torch.Tensor:
|
| 25 |
+
"""Convert cvel quantities to world-frame velocities."""
|
| 26 |
+
lin_vel_c = cvel[..., 3:6]
|
| 27 |
+
ang_vel_c = cvel[..., 0:3]
|
| 28 |
+
offset = subtree_com - pos
|
| 29 |
+
lin_vel_w = lin_vel_c - torch.cross(ang_vel_c, offset, dim=-1)
|
| 30 |
+
ang_vel_w = ang_vel_c
|
| 31 |
+
return torch.cat([lin_vel_w, ang_vel_w], dim=-1)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class EntityData:
|
| 36 |
+
"""Data container for an entity.
|
| 37 |
+
|
| 38 |
+
Note: Write methods (write_*) modify state directly. Read properties (e.g.,
|
| 39 |
+
root_link_pose_w) require sim.forward() to be current. If you write then read,
|
| 40 |
+
call sim.forward() in between. Event order matters when mixing reads and writes.
|
| 41 |
+
All inputs/outputs use world frame.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
indexing: EntityIndexing
|
| 45 |
+
data: mjwarp.Data
|
| 46 |
+
model: mjwarp.Model
|
| 47 |
+
device: str
|
| 48 |
+
|
| 49 |
+
default_root_state: torch.Tensor
|
| 50 |
+
default_joint_pos: torch.Tensor
|
| 51 |
+
default_joint_vel: torch.Tensor
|
| 52 |
+
|
| 53 |
+
default_joint_pos_limits: torch.Tensor
|
| 54 |
+
joint_pos_limits: torch.Tensor
|
| 55 |
+
soft_joint_pos_limits: torch.Tensor
|
| 56 |
+
|
| 57 |
+
gravity_vec_w: torch.Tensor
|
| 58 |
+
forward_vec_b: torch.Tensor
|
| 59 |
+
|
| 60 |
+
is_fixed_base: bool
|
| 61 |
+
is_articulated: bool
|
| 62 |
+
is_actuated: bool
|
| 63 |
+
|
| 64 |
+
joint_pos_target: torch.Tensor
|
| 65 |
+
joint_vel_target: torch.Tensor
|
| 66 |
+
joint_effort_target: torch.Tensor
|
| 67 |
+
|
| 68 |
+
tendon_len_target: torch.Tensor
|
| 69 |
+
tendon_vel_target: torch.Tensor
|
| 70 |
+
tendon_effort_target: torch.Tensor
|
| 71 |
+
|
| 72 |
+
site_effort_target: torch.Tensor
|
| 73 |
+
|
| 74 |
+
encoder_bias: torch.Tensor
|
| 75 |
+
|
| 76 |
+
# State dimensions.
|
| 77 |
+
POS_DIM = 3
|
| 78 |
+
QUAT_DIM = 4
|
| 79 |
+
LIN_VEL_DIM = 3
|
| 80 |
+
ANG_VEL_DIM = 3
|
| 81 |
+
ROOT_POSE_DIM = POS_DIM + QUAT_DIM # 7
|
| 82 |
+
ROOT_VEL_DIM = LIN_VEL_DIM + ANG_VEL_DIM # 6
|
| 83 |
+
ROOT_STATE_DIM = ROOT_POSE_DIM + ROOT_VEL_DIM # 13
|
| 84 |
+
|
| 85 |
+
def write_root_state(
|
| 86 |
+
self, root_state: torch.Tensor, env_ids: torch.Tensor | slice | None = None
|
| 87 |
+
) -> None:
|
| 88 |
+
if self.is_fixed_base:
|
| 89 |
+
raise ValueError("Cannot write root state for fixed-base entity.")
|
| 90 |
+
assert root_state.shape[-1] == self.ROOT_STATE_DIM
|
| 91 |
+
|
| 92 |
+
self.write_root_pose(root_state[:, : self.ROOT_POSE_DIM], env_ids)
|
| 93 |
+
self.write_root_velocity(root_state[:, self.ROOT_POSE_DIM :], env_ids)
|
| 94 |
+
|
| 95 |
+
def write_root_pose(
|
| 96 |
+
self, pose: torch.Tensor, env_ids: torch.Tensor | slice | None = None
|
| 97 |
+
) -> None:
|
| 98 |
+
if self.is_fixed_base:
|
| 99 |
+
raise ValueError("Cannot write root pose for fixed-base entity.")
|
| 100 |
+
assert pose.shape[-1] == self.ROOT_POSE_DIM
|
| 101 |
+
|
| 102 |
+
env_ids = self._resolve_env_ids(env_ids)
|
| 103 |
+
self.data.qpos[env_ids, self.indexing.free_joint_q_adr] = pose
|
| 104 |
+
|
| 105 |
+
def write_root_velocity(
|
| 106 |
+
self, velocity: torch.Tensor, env_ids: torch.Tensor | slice | None = None
|
| 107 |
+
) -> None:
|
| 108 |
+
if self.is_fixed_base:
|
| 109 |
+
raise ValueError("Cannot write root velocity for fixed-base entity.")
|
| 110 |
+
assert velocity.shape[-1] == self.ROOT_VEL_DIM
|
| 111 |
+
|
| 112 |
+
env_ids = self._resolve_env_ids(env_ids)
|
| 113 |
+
quat_w = self.data.qpos[env_ids, self.indexing.free_joint_q_adr[3:7]]
|
| 114 |
+
ang_vel_b = quat_apply_inverse(quat_w, velocity[:, 3:])
|
| 115 |
+
velocity_qvel = torch.cat([velocity[:, :3], ang_vel_b], dim=-1)
|
| 116 |
+
self.data.qvel[env_ids, self.indexing.free_joint_v_adr] = velocity_qvel
|
| 117 |
+
|
| 118 |
+
def write_root_com_velocity(
|
| 119 |
+
self, velocity: torch.Tensor, env_ids: torch.Tensor | slice | None = None
|
| 120 |
+
) -> None:
|
| 121 |
+
if self.is_fixed_base:
|
| 122 |
+
raise ValueError("Cannot write root COM velocity for fixed-base entity.")
|
| 123 |
+
assert velocity.shape[-1] == self.ROOT_VEL_DIM
|
| 124 |
+
|
| 125 |
+
env_ids = self._resolve_env_ids(env_ids)
|
| 126 |
+
com_offset_b = self.model.body_ipos[:, self.indexing.root_body_id]
|
| 127 |
+
quat_w = self.data.qpos[env_ids, self.indexing.free_joint_q_adr[3:7]]
|
| 128 |
+
com_offset_w = quat_apply(quat_w, com_offset_b[env_ids])
|
| 129 |
+
lin_vel_com = velocity[:, :3]
|
| 130 |
+
ang_vel_w = velocity[:, 3:]
|
| 131 |
+
lin_vel_link = lin_vel_com - torch.cross(ang_vel_w, com_offset_w, dim=-1)
|
| 132 |
+
link_velocity = torch.cat([lin_vel_link, ang_vel_w], dim=-1)
|
| 133 |
+
self.write_root_velocity(link_velocity, env_ids)
|
| 134 |
+
|
| 135 |
+
def write_joint_state(
|
| 136 |
+
self,
|
| 137 |
+
position: torch.Tensor,
|
| 138 |
+
velocity: torch.Tensor,
|
| 139 |
+
joint_ids: torch.Tensor | slice | None = None,
|
| 140 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 141 |
+
) -> None:
|
| 142 |
+
if not self.is_articulated:
|
| 143 |
+
raise ValueError("Cannot write joint state for non-articulated entity.")
|
| 144 |
+
|
| 145 |
+
self.write_joint_position(position, joint_ids, env_ids)
|
| 146 |
+
self.write_joint_velocity(velocity, joint_ids, env_ids)
|
| 147 |
+
|
| 148 |
+
def write_joint_position(
|
| 149 |
+
self,
|
| 150 |
+
position: torch.Tensor,
|
| 151 |
+
joint_ids: torch.Tensor | slice | None = None,
|
| 152 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 153 |
+
) -> None:
|
| 154 |
+
if not self.is_articulated:
|
| 155 |
+
raise ValueError("Cannot write joint position for non-articulated entity.")
|
| 156 |
+
|
| 157 |
+
env_ids = self._resolve_env_ids(env_ids)
|
| 158 |
+
joint_ids = joint_ids if joint_ids is not None else slice(None)
|
| 159 |
+
q_slice = self.indexing.joint_q_adr[joint_ids]
|
| 160 |
+
self.data.qpos[env_ids, q_slice] = position
|
| 161 |
+
|
| 162 |
+
def write_joint_velocity(
|
| 163 |
+
self,
|
| 164 |
+
velocity: torch.Tensor,
|
| 165 |
+
joint_ids: torch.Tensor | slice | None = None,
|
| 166 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 167 |
+
) -> None:
|
| 168 |
+
if not self.is_articulated:
|
| 169 |
+
raise ValueError("Cannot write joint velocity for non-articulated entity.")
|
| 170 |
+
|
| 171 |
+
env_ids = self._resolve_env_ids(env_ids)
|
| 172 |
+
joint_ids = joint_ids if joint_ids is not None else slice(None)
|
| 173 |
+
v_slice = self.indexing.joint_v_adr[joint_ids]
|
| 174 |
+
self.data.qvel[env_ids, v_slice] = velocity
|
| 175 |
+
|
| 176 |
+
def write_external_wrench(
|
| 177 |
+
self,
|
| 178 |
+
force: torch.Tensor | None,
|
| 179 |
+
torque: torch.Tensor | None,
|
| 180 |
+
body_ids: Sequence[int] | slice | None = None,
|
| 181 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 182 |
+
) -> None:
|
| 183 |
+
env_ids = self._resolve_env_ids(env_ids)
|
| 184 |
+
local_body_ids = body_ids if body_ids is not None else slice(None)
|
| 185 |
+
global_body_ids = self.indexing.body_ids[local_body_ids]
|
| 186 |
+
if force is not None:
|
| 187 |
+
self.data.xfrc_applied[env_ids, global_body_ids, 0:3] = force
|
| 188 |
+
if torque is not None:
|
| 189 |
+
self.data.xfrc_applied[env_ids, global_body_ids, 3:6] = torque
|
| 190 |
+
|
| 191 |
+
def write_ctrl(
|
| 192 |
+
self,
|
| 193 |
+
ctrl: torch.Tensor,
|
| 194 |
+
ctrl_ids: torch.Tensor | slice | None = None,
|
| 195 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 196 |
+
) -> None:
|
| 197 |
+
if not self.is_actuated:
|
| 198 |
+
raise ValueError("Cannot write control for non-actuated entity.")
|
| 199 |
+
|
| 200 |
+
env_ids = self._resolve_env_ids(env_ids)
|
| 201 |
+
local_ctrl_ids = ctrl_ids if ctrl_ids is not None else slice(None)
|
| 202 |
+
global_ctrl_ids = self.indexing.ctrl_ids[local_ctrl_ids]
|
| 203 |
+
self.data.ctrl[env_ids, global_ctrl_ids] = ctrl
|
| 204 |
+
|
| 205 |
+
def write_mocap_pose(
|
| 206 |
+
self, pose: torch.Tensor, env_ids: torch.Tensor | slice | None = None
|
| 207 |
+
) -> None:
|
| 208 |
+
if self.indexing.mocap_id is None:
|
| 209 |
+
raise ValueError("Cannot write mocap pose for non-mocap entity.")
|
| 210 |
+
assert pose.shape[-1] == self.ROOT_POSE_DIM
|
| 211 |
+
|
| 212 |
+
env_ids = self._resolve_env_ids(env_ids)
|
| 213 |
+
self.data.mocap_pos[env_ids, self.indexing.mocap_id] = pose[:, 0:3].unsqueeze(1)
|
| 214 |
+
self.data.mocap_quat[env_ids, self.indexing.mocap_id] = pose[:, 3:7].unsqueeze(1)
|
| 215 |
+
|
| 216 |
+
def clear_state(self, env_ids: torch.Tensor | slice | None = None) -> None:
|
| 217 |
+
if self.is_actuated:
|
| 218 |
+
env_ids = self._resolve_env_ids(env_ids)
|
| 219 |
+
self.joint_pos_target[env_ids] = 0.0
|
| 220 |
+
self.joint_vel_target[env_ids] = 0.0
|
| 221 |
+
self.joint_effort_target[env_ids] = 0.0
|
| 222 |
+
self.tendon_len_target[env_ids] = 0.0
|
| 223 |
+
self.tendon_vel_target[env_ids] = 0.0
|
| 224 |
+
self.tendon_effort_target[env_ids] = 0.0
|
| 225 |
+
self.site_effort_target[env_ids] = 0.0
|
| 226 |
+
|
| 227 |
+
def _resolve_env_ids(
|
| 228 |
+
self, env_ids: torch.Tensor | slice | None
|
| 229 |
+
) -> torch.Tensor | slice:
|
| 230 |
+
"""Convert env_ids to consistent indexing format."""
|
| 231 |
+
if env_ids is None:
|
| 232 |
+
return slice(None)
|
| 233 |
+
if isinstance(env_ids, torch.Tensor):
|
| 234 |
+
return env_ids[:, None]
|
| 235 |
+
return env_ids
|
| 236 |
+
|
| 237 |
+
# Root properties
|
| 238 |
+
|
| 239 |
+
@property
|
| 240 |
+
def root_link_pose_w(self) -> torch.Tensor:
|
| 241 |
+
"""Root link pose in world frame. Shape (num_envs, 7)."""
|
| 242 |
+
pos_w = self.data.xpos[:, self.indexing.root_body_id] # (num_envs, 3)
|
| 243 |
+
quat_w = self.data.xquat[:, self.indexing.root_body_id] # (num_envs, 4)
|
| 244 |
+
return torch.cat([pos_w, quat_w], dim=-1) # (num_envs, 7)
|
| 245 |
+
|
| 246 |
+
@property
|
| 247 |
+
def root_link_vel_w(self) -> torch.Tensor:
|
| 248 |
+
"""Root link velocity in world frame. Shape (num_envs, 6)."""
|
| 249 |
+
# NOTE: Equivalently, can read this from qvel[:6] but the angular part
|
| 250 |
+
# will be in body frame and needs to be rotated to world frame.
|
| 251 |
+
# Note also that an extra forward() call might be required to make
|
| 252 |
+
# both values equal.
|
| 253 |
+
pos = self.data.xpos[:, self.indexing.root_body_id] # (num_envs, 3)
|
| 254 |
+
subtree_com = self.data.subtree_com[:, self.indexing.root_body_id]
|
| 255 |
+
cvel = self.data.cvel[:, self.indexing.root_body_id] # (num_envs, 6)
|
| 256 |
+
return compute_velocity_from_cvel(pos, subtree_com, cvel) # (num_envs, 6)
|
| 257 |
+
|
| 258 |
+
@property
|
| 259 |
+
def root_com_pose_w(self) -> torch.Tensor:
|
| 260 |
+
"""Root center-of-mass pose in world frame. Shape (num_envs, 7)."""
|
| 261 |
+
pos_w = self.data.xipos[:, self.indexing.root_body_id]
|
| 262 |
+
quat = self.data.xquat[:, self.indexing.root_body_id]
|
| 263 |
+
body_iquat = self.model.body_iquat[:, self.indexing.root_body_id]
|
| 264 |
+
assert body_iquat is not None
|
| 265 |
+
quat_w = quat_mul(quat, body_iquat.squeeze(1))
|
| 266 |
+
return torch.cat([pos_w, quat_w], dim=-1)
|
| 267 |
+
|
| 268 |
+
@property
|
| 269 |
+
def root_com_vel_w(self) -> torch.Tensor:
|
| 270 |
+
"""Root center-of-mass velocity in world frame. Shape (num_envs, 6)."""
|
| 271 |
+
# NOTE: Equivalent sensor is framelinvel/frameangvel with objtype="body".
|
| 272 |
+
pos = self.data.xipos[:, self.indexing.root_body_id] # (num_envs, 3)
|
| 273 |
+
subtree_com = self.data.subtree_com[:, self.indexing.root_body_id]
|
| 274 |
+
cvel = self.data.cvel[:, self.indexing.root_body_id] # (num_envs, 6)
|
| 275 |
+
return compute_velocity_from_cvel(pos, subtree_com, cvel) # (num_envs, 6)
|
| 276 |
+
|
| 277 |
+
# Body properties
|
| 278 |
+
|
| 279 |
+
@property
|
| 280 |
+
def body_link_pose_w(self) -> torch.Tensor:
|
| 281 |
+
"""Body link pose in world frame. Shape (num_envs, num_bodies, 7)."""
|
| 282 |
+
pos_w = self.data.xpos[:, self.indexing.body_ids]
|
| 283 |
+
quat_w = self.data.xquat[:, self.indexing.body_ids]
|
| 284 |
+
return torch.cat([pos_w, quat_w], dim=-1)
|
| 285 |
+
|
| 286 |
+
@property
|
| 287 |
+
def body_link_vel_w(self) -> torch.Tensor:
|
| 288 |
+
"""Body link velocity in world frame. Shape (num_envs, num_bodies, 6)."""
|
| 289 |
+
# NOTE: Equivalent sensor is framelinvel/frameangvel with objtype="xbody".
|
| 290 |
+
pos = self.data.xpos[:, self.indexing.body_ids] # (num_envs, num_bodies, 3)
|
| 291 |
+
subtree_com = self.data.subtree_com[:, self.indexing.root_body_id]
|
| 292 |
+
cvel = self.data.cvel[:, self.indexing.body_ids]
|
| 293 |
+
return compute_velocity_from_cvel(pos, subtree_com.unsqueeze(1), cvel)
|
| 294 |
+
|
| 295 |
+
@property
|
| 296 |
+
def body_com_pose_w(self) -> torch.Tensor:
|
| 297 |
+
"""Body center-of-mass pose in world frame. Shape (num_envs, num_bodies, 7)."""
|
| 298 |
+
pos_w = self.data.xipos[:, self.indexing.body_ids]
|
| 299 |
+
quat = self.data.xquat[:, self.indexing.body_ids]
|
| 300 |
+
body_iquat = self.model.body_iquat[:, self.indexing.body_ids]
|
| 301 |
+
quat_w = quat_mul(quat, body_iquat)
|
| 302 |
+
return torch.cat([pos_w, quat_w], dim=-1)
|
| 303 |
+
|
| 304 |
+
@property
|
| 305 |
+
def body_com_vel_w(self) -> torch.Tensor:
|
| 306 |
+
"""Body center-of-mass velocity in world frame. Shape (num_envs, num_bodies, 6)."""
|
| 307 |
+
# NOTE: Equivalent sensor is framelinvel/frameangvel with objtype="body".
|
| 308 |
+
pos = self.data.xipos[:, self.indexing.body_ids]
|
| 309 |
+
subtree_com = self.data.subtree_com[:, self.indexing.root_body_id]
|
| 310 |
+
cvel = self.data.cvel[:, self.indexing.body_ids]
|
| 311 |
+
return compute_velocity_from_cvel(pos, subtree_com.unsqueeze(1), cvel)
|
| 312 |
+
|
| 313 |
+
@property
|
| 314 |
+
def body_external_wrench(self) -> torch.Tensor:
|
| 315 |
+
"""Body external wrench in world frame. Shape (num_envs, num_bodies, 6)."""
|
| 316 |
+
return self.data.xfrc_applied[:, self.indexing.body_ids]
|
| 317 |
+
|
| 318 |
+
# Geom properties
|
| 319 |
+
|
| 320 |
+
@property
|
| 321 |
+
def geom_pose_w(self) -> torch.Tensor:
|
| 322 |
+
"""Geom pose in world frame. Shape (num_envs, num_geoms, 7)."""
|
| 323 |
+
pos_w = self.data.geom_xpos[:, self.indexing.geom_ids]
|
| 324 |
+
xmat = self.data.geom_xmat[:, self.indexing.geom_ids]
|
| 325 |
+
quat_w = quat_from_matrix(xmat)
|
| 326 |
+
return torch.cat([pos_w, quat_w], dim=-1)
|
| 327 |
+
|
| 328 |
+
@property
|
| 329 |
+
def geom_vel_w(self) -> torch.Tensor:
|
| 330 |
+
"""Geom velocity in world frame. Shape (num_envs, num_geoms, 6)."""
|
| 331 |
+
pos = self.data.geom_xpos[:, self.indexing.geom_ids]
|
| 332 |
+
body_ids = self.model.geom_bodyid[self.indexing.geom_ids] # (num_geoms,)
|
| 333 |
+
subtree_com = self.data.subtree_com[:, self.indexing.root_body_id]
|
| 334 |
+
cvel = self.data.cvel[:, body_ids]
|
| 335 |
+
return compute_velocity_from_cvel(pos, subtree_com.unsqueeze(1), cvel)
|
| 336 |
+
|
| 337 |
+
# Site properties
|
| 338 |
+
|
| 339 |
+
@property
|
| 340 |
+
def site_pose_w(self) -> torch.Tensor:
|
| 341 |
+
"""Site pose in world frame. Shape (num_envs, num_sites, 7)."""
|
| 342 |
+
pos_w = self.data.site_xpos[:, self.indexing.site_ids]
|
| 343 |
+
mat_w = self.data.site_xmat[:, self.indexing.site_ids]
|
| 344 |
+
quat_w = quat_from_matrix(mat_w)
|
| 345 |
+
return torch.cat([pos_w, quat_w], dim=-1)
|
| 346 |
+
|
| 347 |
+
@property
|
| 348 |
+
def site_vel_w(self) -> torch.Tensor:
|
| 349 |
+
"""Site velocity in world frame. Shape (num_envs, num_sites, 6)."""
|
| 350 |
+
pos = self.data.site_xpos[:, self.indexing.site_ids]
|
| 351 |
+
body_ids = self.model.site_bodyid[self.indexing.site_ids] # (num_sites,)
|
| 352 |
+
subtree_com = self.data.subtree_com[:, self.indexing.root_body_id]
|
| 353 |
+
cvel = self.data.cvel[:, body_ids]
|
| 354 |
+
return compute_velocity_from_cvel(pos, subtree_com.unsqueeze(1), cvel)
|
| 355 |
+
|
| 356 |
+
# Joint properties
|
| 357 |
+
|
| 358 |
+
@property
|
| 359 |
+
def joint_pos(self) -> torch.Tensor:
|
| 360 |
+
"""Joint positions. Shape (num_envs, num_joints)."""
|
| 361 |
+
return self.data.qpos[:, self.indexing.joint_q_adr]
|
| 362 |
+
|
| 363 |
+
@property
|
| 364 |
+
def joint_pos_biased(self) -> torch.Tensor:
|
| 365 |
+
"""Joint positions with encoder bias applied. Shape (num_envs, num_joints)."""
|
| 366 |
+
return self.joint_pos + self.encoder_bias
|
| 367 |
+
|
| 368 |
+
@property
|
| 369 |
+
def joint_vel(self) -> torch.Tensor:
|
| 370 |
+
"""Joint velocities. Shape (num_envs, nv)."""
|
| 371 |
+
return self.data.qvel[:, self.indexing.joint_v_adr]
|
| 372 |
+
|
| 373 |
+
@property
|
| 374 |
+
def joint_acc(self) -> torch.Tensor:
|
| 375 |
+
"""Joint accelerations. Shape (num_envs, nv)."""
|
| 376 |
+
return self.data.qacc[:, self.indexing.joint_v_adr]
|
| 377 |
+
|
| 378 |
+
# Tendon properties
|
| 379 |
+
|
| 380 |
+
@property
|
| 381 |
+
def tendon_len(self) -> torch.Tensor:
|
| 382 |
+
"""Tendon lengths. Shape (num_envs, num_tendons)."""
|
| 383 |
+
return self.data.ten_length[:, self.indexing.tendon_ids]
|
| 384 |
+
|
| 385 |
+
@property
|
| 386 |
+
def tendon_vel(self) -> torch.Tensor:
|
| 387 |
+
"""Tendon velocities. Shape (num_envs, num_tendons)."""
|
| 388 |
+
return self.data.ten_velocity[:, self.indexing.tendon_ids]
|
| 389 |
+
|
| 390 |
+
@property
|
| 391 |
+
def joint_torques(self) -> torch.Tensor:
|
| 392 |
+
"""Joint torques. Shape (num_envs, nv)."""
|
| 393 |
+
raise NotImplementedError(
|
| 394 |
+
"Joint torques are not currently available. "
|
| 395 |
+
"Consider using 'actuator_force' property for actuation forces, "
|
| 396 |
+
"or 'generalized_force' property for generalized forces applied to the DoFs."
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
@property
|
| 400 |
+
def actuator_force(self) -> torch.Tensor:
|
| 401 |
+
"""Scalar actuation force in actuation space. Shape (num_envs, nu)."""
|
| 402 |
+
return self.data.actuator_force[:, self.indexing.ctrl_ids]
|
| 403 |
+
|
| 404 |
+
@property
|
| 405 |
+
def generalized_force(self) -> torch.Tensor:
|
| 406 |
+
"""Generalized forces applied to the DoFs. Shape (num_envs, nv)."""
|
| 407 |
+
return self.data.qfrc_applied[:, self.indexing.free_joint_v_adr]
|
| 408 |
+
|
| 409 |
+
# Pose and velocity component accessors.
|
| 410 |
+
|
| 411 |
+
@property
|
| 412 |
+
def root_link_pos_w(self) -> torch.Tensor:
|
| 413 |
+
"""Root link position in world frame. Shape (num_envs, 3)."""
|
| 414 |
+
return self.root_link_pose_w[:, 0:3]
|
| 415 |
+
|
| 416 |
+
@property
|
| 417 |
+
def root_link_quat_w(self) -> torch.Tensor:
|
| 418 |
+
"""Root link quaternion in world frame. Shape (num_envs, 4)."""
|
| 419 |
+
return self.root_link_pose_w[:, 3:7]
|
| 420 |
+
|
| 421 |
+
@property
|
| 422 |
+
def root_link_lin_vel_w(self) -> torch.Tensor:
|
| 423 |
+
"""Root link linear velocity in world frame. Shape (num_envs, 3)."""
|
| 424 |
+
return self.root_link_vel_w[:, 0:3]
|
| 425 |
+
|
| 426 |
+
@property
|
| 427 |
+
def root_link_ang_vel_w(self) -> torch.Tensor:
|
| 428 |
+
"""Root link angular velocity in world frame. Shape (num_envs, 3)."""
|
| 429 |
+
return self.data.cvel[:, self.indexing.root_body_id, 0:3]
|
| 430 |
+
|
| 431 |
+
@property
|
| 432 |
+
def root_com_pos_w(self) -> torch.Tensor:
|
| 433 |
+
"""Root COM position in world frame. Shape (num_envs, 3)."""
|
| 434 |
+
return self.root_com_pose_w[:, 0:3]
|
| 435 |
+
|
| 436 |
+
@property
|
| 437 |
+
def root_com_quat_w(self) -> torch.Tensor:
|
| 438 |
+
"""Root COM quaternion in world frame. Shape (num_envs, 4)."""
|
| 439 |
+
return self.root_com_pose_w[:, 3:7]
|
| 440 |
+
|
| 441 |
+
@property
|
| 442 |
+
def root_com_lin_vel_w(self) -> torch.Tensor:
|
| 443 |
+
"""Root COM linear velocity in world frame. Shape (num_envs, 3)."""
|
| 444 |
+
return self.root_com_vel_w[:, 0:3]
|
| 445 |
+
|
| 446 |
+
@property
|
| 447 |
+
def root_com_ang_vel_w(self) -> torch.Tensor:
|
| 448 |
+
"""Root COM angular velocity in world frame. Shape (num_envs, 3)."""
|
| 449 |
+
# Angular velocity is the same for link and COM frames.
|
| 450 |
+
return self.data.cvel[:, self.indexing.root_body_id, 0:3]
|
| 451 |
+
|
| 452 |
+
@property
|
| 453 |
+
def body_link_pos_w(self) -> torch.Tensor:
|
| 454 |
+
"""Body link positions in world frame. Shape (num_envs, num_bodies, 3)."""
|
| 455 |
+
return self.body_link_pose_w[..., 0:3]
|
| 456 |
+
|
| 457 |
+
@property
|
| 458 |
+
def body_link_quat_w(self) -> torch.Tensor:
|
| 459 |
+
"""Body link quaternions in world frame. Shape (num_envs, num_bodies, 4)."""
|
| 460 |
+
return self.body_link_pose_w[..., 3:7]
|
| 461 |
+
|
| 462 |
+
@property
|
| 463 |
+
def body_link_lin_vel_w(self) -> torch.Tensor:
|
| 464 |
+
"""Body link linear velocities in world frame. Shape (num_envs, num_bodies, 3)."""
|
| 465 |
+
return self.body_link_vel_w[..., 0:3]
|
| 466 |
+
|
| 467 |
+
@property
|
| 468 |
+
def body_link_ang_vel_w(self) -> torch.Tensor:
|
| 469 |
+
"""Body link angular velocities in world frame. Shape (num_envs, num_bodies, 3)."""
|
| 470 |
+
return self.data.cvel[:, self.indexing.body_ids, 0:3]
|
| 471 |
+
|
| 472 |
+
@property
|
| 473 |
+
def body_com_pos_w(self) -> torch.Tensor:
|
| 474 |
+
"""Body COM positions in world frame. Shape (num_envs, num_bodies, 3)."""
|
| 475 |
+
return self.body_com_pose_w[..., 0:3]
|
| 476 |
+
|
| 477 |
+
@property
|
| 478 |
+
def body_com_quat_w(self) -> torch.Tensor:
|
| 479 |
+
"""Body COM quaternions in world frame. Shape (num_envs, num_bodies, 4)."""
|
| 480 |
+
return self.body_com_pose_w[..., 3:7]
|
| 481 |
+
|
| 482 |
+
@property
|
| 483 |
+
def body_com_lin_vel_w(self) -> torch.Tensor:
|
| 484 |
+
"""Body COM linear velocities in world frame. Shape (num_envs, num_bodies, 3)."""
|
| 485 |
+
return self.body_com_vel_w[..., 0:3]
|
| 486 |
+
|
| 487 |
+
@property
|
| 488 |
+
def body_com_ang_vel_w(self) -> torch.Tensor:
|
| 489 |
+
"""Body COM angular velocities in world frame. Shape (num_envs, num_bodies, 3)."""
|
| 490 |
+
# Angular velocity is the same for link and COM frames.
|
| 491 |
+
return self.data.cvel[:, self.indexing.body_ids, 0:3]
|
| 492 |
+
|
| 493 |
+
@property
|
| 494 |
+
def body_external_force(self) -> torch.Tensor:
|
| 495 |
+
"""Body external forces in world frame. Shape (num_envs, num_bodies, 3)."""
|
| 496 |
+
return self.body_external_wrench[..., 0:3]
|
| 497 |
+
|
| 498 |
+
@property
|
| 499 |
+
def body_external_torque(self) -> torch.Tensor:
|
| 500 |
+
"""Body external torques in world frame. Shape (num_envs, num_bodies, 3)."""
|
| 501 |
+
return self.body_external_wrench[..., 3:6]
|
| 502 |
+
|
| 503 |
+
@property
|
| 504 |
+
def geom_pos_w(self) -> torch.Tensor:
|
| 505 |
+
"""Geom positions in world frame. Shape (num_envs, num_geoms, 3)."""
|
| 506 |
+
return self.geom_pose_w[..., 0:3]
|
| 507 |
+
|
| 508 |
+
@property
|
| 509 |
+
def geom_quat_w(self) -> torch.Tensor:
|
| 510 |
+
"""Geom quaternions in world frame. Shape (num_envs, num_geoms, 4)."""
|
| 511 |
+
return self.geom_pose_w[..., 3:7]
|
| 512 |
+
|
| 513 |
+
@property
|
| 514 |
+
def geom_lin_vel_w(self) -> torch.Tensor:
|
| 515 |
+
"""Geom linear velocities in world frame. Shape (num_envs, num_geoms, 3)."""
|
| 516 |
+
return self.geom_vel_w[..., 0:3]
|
| 517 |
+
|
| 518 |
+
@property
|
| 519 |
+
def geom_ang_vel_w(self) -> torch.Tensor:
|
| 520 |
+
"""Geom angular velocities in world frame. Shape (num_envs, num_geoms, 3)."""
|
| 521 |
+
body_ids = self.model.geom_bodyid[self.indexing.geom_ids]
|
| 522 |
+
return self.data.cvel[:, body_ids, 0:3]
|
| 523 |
+
|
| 524 |
+
@property
|
| 525 |
+
def site_pos_w(self) -> torch.Tensor:
|
| 526 |
+
"""Site positions in world frame. Shape (num_envs, num_sites, 3)."""
|
| 527 |
+
return self.site_pose_w[..., 0:3]
|
| 528 |
+
|
| 529 |
+
@property
|
| 530 |
+
def site_quat_w(self) -> torch.Tensor:
|
| 531 |
+
"""Site quaternions in world frame. Shape (num_envs, num_sites, 4)."""
|
| 532 |
+
return self.site_pose_w[..., 3:7]
|
| 533 |
+
|
| 534 |
+
@property
|
| 535 |
+
def site_lin_vel_w(self) -> torch.Tensor:
|
| 536 |
+
"""Site linear velocities in world frame. Shape (num_envs, num_sites, 3)."""
|
| 537 |
+
return self.site_vel_w[..., 0:3]
|
| 538 |
+
|
| 539 |
+
@property
|
| 540 |
+
def site_ang_vel_w(self) -> torch.Tensor:
|
| 541 |
+
"""Site angular velocities in world frame. Shape (num_envs, num_sites, 3)."""
|
| 542 |
+
body_ids = self.model.site_bodyid[self.indexing.site_ids]
|
| 543 |
+
return self.data.cvel[:, body_ids, 0:3]
|
| 544 |
+
|
| 545 |
+
# Derived properties.
|
| 546 |
+
|
| 547 |
+
@property
|
| 548 |
+
def projected_gravity_b(self) -> torch.Tensor:
|
| 549 |
+
"""Gravity vector projected into body frame. Shape (num_envs, 3)."""
|
| 550 |
+
return quat_apply_inverse(self.root_link_quat_w, self.gravity_vec_w)
|
| 551 |
+
|
| 552 |
+
@property
|
| 553 |
+
def heading_w(self) -> torch.Tensor:
|
| 554 |
+
"""Heading angle in world frame. Shape (num_envs,)."""
|
| 555 |
+
forward_w = quat_apply(self.root_link_quat_w, self.forward_vec_b)
|
| 556 |
+
return torch.atan2(forward_w[:, 1], forward_w[:, 0])
|
| 557 |
+
|
| 558 |
+
@property
|
| 559 |
+
def root_link_lin_vel_b(self) -> torch.Tensor:
|
| 560 |
+
"""Root link linear velocity in body frame. Shape (num_envs, 3)."""
|
| 561 |
+
return quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w)
|
| 562 |
+
|
| 563 |
+
@property
|
| 564 |
+
def root_link_ang_vel_b(self) -> torch.Tensor:
|
| 565 |
+
"""Root link angular velocity in body frame. Shape (num_envs, 3)."""
|
| 566 |
+
return quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w)
|
| 567 |
+
|
| 568 |
+
@property
|
| 569 |
+
def root_com_lin_vel_b(self) -> torch.Tensor:
|
| 570 |
+
"""Root COM linear velocity in body frame. Shape (num_envs, 3)."""
|
| 571 |
+
return quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w)
|
| 572 |
+
|
| 573 |
+
@property
|
| 574 |
+
def root_com_ang_vel_b(self) -> torch.Tensor:
|
| 575 |
+
"""Root COM angular velocity in body frame. Shape (num_envs, 3)."""
|
| 576 |
+
return quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w)
|
mjlab/src/mjlab/entity/entity.py
ADDED
|
@@ -0,0 +1,985 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass, field
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Callable, Sequence
|
| 6 |
+
|
| 7 |
+
import mujoco
|
| 8 |
+
import mujoco_warp as mjwarp
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from mjlab import actuator
|
| 13 |
+
from mjlab.actuator import BuiltinActuatorGroup
|
| 14 |
+
from mjlab.actuator.actuator import TransmissionType
|
| 15 |
+
from mjlab.entity.data import EntityData
|
| 16 |
+
from mjlab.utils import spec_config as spec_cfg
|
| 17 |
+
from mjlab.utils.lab_api.string import resolve_matching_names
|
| 18 |
+
from mjlab.utils.mujoco import dof_width, qpos_width
|
| 19 |
+
from mjlab.utils.spec import auto_wrap_fixed_base_mocap
|
| 20 |
+
from mjlab.utils.string import resolve_expr
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@dataclass(frozen=False)
|
| 24 |
+
class EntityIndexing:
|
| 25 |
+
"""Maps entity elements to global indices and addresses in the simulation."""
|
| 26 |
+
|
| 27 |
+
# Elements.
|
| 28 |
+
bodies: tuple[mujoco.MjsBody, ...]
|
| 29 |
+
joints: tuple[mujoco.MjsJoint, ...]
|
| 30 |
+
geoms: tuple[mujoco.MjsGeom, ...]
|
| 31 |
+
sites: tuple[mujoco.MjsSite, ...]
|
| 32 |
+
tendons: tuple[mujoco.MjsTendon, ...]
|
| 33 |
+
actuators: tuple[mujoco.MjsActuator, ...] | None
|
| 34 |
+
|
| 35 |
+
# Indices.
|
| 36 |
+
body_ids: torch.Tensor
|
| 37 |
+
geom_ids: torch.Tensor
|
| 38 |
+
site_ids: torch.Tensor
|
| 39 |
+
tendon_ids: torch.Tensor
|
| 40 |
+
ctrl_ids: torch.Tensor
|
| 41 |
+
joint_ids: torch.Tensor
|
| 42 |
+
mocap_id: int | None
|
| 43 |
+
|
| 44 |
+
# Addresses.
|
| 45 |
+
joint_q_adr: torch.Tensor
|
| 46 |
+
joint_v_adr: torch.Tensor
|
| 47 |
+
free_joint_q_adr: torch.Tensor
|
| 48 |
+
free_joint_v_adr: torch.Tensor
|
| 49 |
+
|
| 50 |
+
@property
|
| 51 |
+
def root_body_id(self) -> int:
|
| 52 |
+
return self.bodies[0].id
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@dataclass
|
| 56 |
+
class EntityCfg:
|
| 57 |
+
@dataclass
|
| 58 |
+
class InitialStateCfg:
|
| 59 |
+
# Root position and orientation.
|
| 60 |
+
pos: tuple[float, float, float] = (0.0, 0.0, 0.0)
|
| 61 |
+
rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
|
| 62 |
+
# Root linear and angular velocity (only for floating base entities).
|
| 63 |
+
lin_vel: tuple[float, float, float] = (0.0, 0.0, 0.0)
|
| 64 |
+
ang_vel: tuple[float, float, float] = (0.0, 0.0, 0.0)
|
| 65 |
+
# Articulation (only for articulated entities).
|
| 66 |
+
# Set to None to use the model's existing keyframe (errors if none exists).
|
| 67 |
+
joint_pos: dict[str, float] | None = field(default_factory=lambda: {".*": 0.0})
|
| 68 |
+
joint_vel: dict[str, float] = field(default_factory=lambda: {".*": 0.0})
|
| 69 |
+
|
| 70 |
+
init_state: InitialStateCfg = field(default_factory=InitialStateCfg)
|
| 71 |
+
spec_fn: Callable[[], mujoco.MjSpec] = field(
|
| 72 |
+
default_factory=lambda: (lambda: mujoco.MjSpec())
|
| 73 |
+
)
|
| 74 |
+
articulation: EntityArticulationInfoCfg | None = None
|
| 75 |
+
|
| 76 |
+
# Editors.
|
| 77 |
+
lights: tuple[spec_cfg.LightCfg, ...] = field(default_factory=tuple)
|
| 78 |
+
cameras: tuple[spec_cfg.CameraCfg, ...] = field(default_factory=tuple)
|
| 79 |
+
textures: tuple[spec_cfg.TextureCfg, ...] = field(default_factory=tuple)
|
| 80 |
+
materials: tuple[spec_cfg.MaterialCfg, ...] = field(default_factory=tuple)
|
| 81 |
+
collisions: tuple[spec_cfg.CollisionCfg, ...] = field(default_factory=tuple)
|
| 82 |
+
|
| 83 |
+
def build(self) -> Entity:
|
| 84 |
+
"""Build entity instance from this config.
|
| 85 |
+
|
| 86 |
+
Override in subclasses to return custom Entity types.
|
| 87 |
+
"""
|
| 88 |
+
return Entity(self)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@dataclass
|
| 92 |
+
class EntityArticulationInfoCfg:
|
| 93 |
+
actuators: tuple[actuator.ActuatorCfg, ...] = field(default_factory=tuple)
|
| 94 |
+
soft_joint_pos_limit_factor: float = 1.0
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class Entity:
|
| 98 |
+
"""An entity represents a physical object in the simulation.
|
| 99 |
+
|
| 100 |
+
Entity Type Matrix
|
| 101 |
+
==================
|
| 102 |
+
MuJoCo entities can be categorized along two dimensions:
|
| 103 |
+
|
| 104 |
+
1. Base Type:
|
| 105 |
+
- Fixed Base: Entity is welded to the world (no freejoint)
|
| 106 |
+
- Floating Base: Entity has 6 DOF movement (has freejoint)
|
| 107 |
+
|
| 108 |
+
2. Articulation:
|
| 109 |
+
- Non-articulated: No joints other than freejoint
|
| 110 |
+
- Articulated: Has joints in kinematic tree (may or may not be actuated)
|
| 111 |
+
|
| 112 |
+
Fixed non-articulated entities can optionally be mocap bodies, whereby their
|
| 113 |
+
position and orientation can be set directly each timestep rather than being
|
| 114 |
+
determined by physics. This property can be useful for creating props with
|
| 115 |
+
adjustable position and orientation.
|
| 116 |
+
|
| 117 |
+
Supported Combinations:
|
| 118 |
+
----------------------
|
| 119 |
+
| Type | Example | is_fixed_base | is_articulated | is_actuated |
|
| 120 |
+
|---------------------------|---------------------|---------------|----------------|-------------|
|
| 121 |
+
| Fixed Non-articulated | Table, wall | True | False | False |
|
| 122 |
+
| Fixed Articulated | Robot arm, door | True | True | True/False |
|
| 123 |
+
| Floating Non-articulated | Box, ball, mug | False | False | False |
|
| 124 |
+
| Floating Articulated | Humanoid, quadruped | False | True | True/False |
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
def __init__(self, cfg: EntityCfg) -> None:
|
| 128 |
+
self.cfg = cfg
|
| 129 |
+
self._spec = auto_wrap_fixed_base_mocap(cfg.spec_fn)()
|
| 130 |
+
|
| 131 |
+
# Identify free joint and articulated joints.
|
| 132 |
+
self._all_joints = self._spec.joints
|
| 133 |
+
self._free_joint = None
|
| 134 |
+
self._non_free_joints = tuple(self._all_joints)
|
| 135 |
+
if self._all_joints and self._all_joints[0].type == mujoco.mjtJoint.mjJNT_FREE:
|
| 136 |
+
self._free_joint = self._all_joints[0]
|
| 137 |
+
if not self._free_joint.name:
|
| 138 |
+
self._free_joint.name = "floating_base_joint"
|
| 139 |
+
self._non_free_joints = tuple(self._all_joints[1:])
|
| 140 |
+
self._actuators: list[actuator.Actuator] = []
|
| 141 |
+
|
| 142 |
+
self._apply_spec_editors()
|
| 143 |
+
self._add_actuators()
|
| 144 |
+
self._add_initial_state_keyframe()
|
| 145 |
+
|
| 146 |
+
def _apply_spec_editors(self) -> None:
|
| 147 |
+
for cfg_list in [
|
| 148 |
+
self.cfg.lights,
|
| 149 |
+
self.cfg.cameras,
|
| 150 |
+
self.cfg.textures,
|
| 151 |
+
self.cfg.materials,
|
| 152 |
+
self.cfg.collisions,
|
| 153 |
+
]:
|
| 154 |
+
for cfg in cfg_list:
|
| 155 |
+
cfg.edit_spec(self._spec)
|
| 156 |
+
|
| 157 |
+
def _add_actuators(self) -> None:
|
| 158 |
+
if self.cfg.articulation is None:
|
| 159 |
+
return
|
| 160 |
+
|
| 161 |
+
for actuator_cfg in self.cfg.articulation.actuators:
|
| 162 |
+
# Find targets based on transmission type.
|
| 163 |
+
if actuator_cfg.transmission_type == TransmissionType.JOINT:
|
| 164 |
+
target_ids, target_names = self.find_joints(actuator_cfg.target_names_expr)
|
| 165 |
+
elif actuator_cfg.transmission_type == TransmissionType.TENDON:
|
| 166 |
+
target_ids, target_names = self.find_tendons(actuator_cfg.target_names_expr)
|
| 167 |
+
elif actuator_cfg.transmission_type == TransmissionType.SITE:
|
| 168 |
+
target_ids, target_names = self.find_sites(actuator_cfg.target_names_expr)
|
| 169 |
+
else:
|
| 170 |
+
raise ValueError(
|
| 171 |
+
f"Invalid transmission_type: {actuator_cfg.transmission_type}. "
|
| 172 |
+
f"Must be TransmissionType.JOINT, TransmissionType.TENDON, or TransmissionType.SITE."
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
if len(target_names) == 0:
|
| 176 |
+
raise ValueError(
|
| 177 |
+
f"No {actuator_cfg.transmission_type}s found for actuator with "
|
| 178 |
+
f"expressions: {actuator_cfg.target_names_expr}"
|
| 179 |
+
)
|
| 180 |
+
actuator_instance = actuator_cfg.build(self, target_ids, target_names)
|
| 181 |
+
actuator_instance.edit_spec(self._spec, target_names)
|
| 182 |
+
self._actuators.append(actuator_instance)
|
| 183 |
+
|
| 184 |
+
def _add_initial_state_keyframe(self) -> None:
|
| 185 |
+
# If joint_pos is None, use existing keyframe from the model.
|
| 186 |
+
if self.cfg.init_state.joint_pos is None:
|
| 187 |
+
if not self._spec.keys:
|
| 188 |
+
raise ValueError(
|
| 189 |
+
"joint_pos=None requires the model to have a keyframe, but none exists."
|
| 190 |
+
)
|
| 191 |
+
# Keep the existing keyframe, just rename it.
|
| 192 |
+
self._spec.keys[0].name = "init_state"
|
| 193 |
+
if self.is_fixed_base:
|
| 194 |
+
self.root_body.pos[:] = self.cfg.init_state.pos
|
| 195 |
+
self.root_body.quat[:] = self.cfg.init_state.rot
|
| 196 |
+
return
|
| 197 |
+
|
| 198 |
+
qpos_components = []
|
| 199 |
+
|
| 200 |
+
if self._free_joint is not None:
|
| 201 |
+
qpos_components.extend([self.cfg.init_state.pos, self.cfg.init_state.rot])
|
| 202 |
+
|
| 203 |
+
joint_pos = None
|
| 204 |
+
if self._non_free_joints:
|
| 205 |
+
joint_pos = resolve_expr(self.cfg.init_state.joint_pos, self.joint_names, 0.0)
|
| 206 |
+
qpos_components.append(joint_pos)
|
| 207 |
+
|
| 208 |
+
key_qpos = np.hstack(qpos_components) if qpos_components else np.array([])
|
| 209 |
+
key = self._spec.add_key(name="init_state", qpos=key_qpos.tolist())
|
| 210 |
+
|
| 211 |
+
if self.is_actuated and joint_pos is not None:
|
| 212 |
+
name_to_pos = {name: joint_pos[i] for i, name in enumerate(self.joint_names)}
|
| 213 |
+
ctrl = []
|
| 214 |
+
for act in self._spec.actuators:
|
| 215 |
+
joint_name = act.target
|
| 216 |
+
ctrl.append(name_to_pos.get(joint_name, 0.0))
|
| 217 |
+
key.ctrl = np.array(ctrl)
|
| 218 |
+
|
| 219 |
+
if self.is_fixed_base:
|
| 220 |
+
self.root_body.pos[:] = self.cfg.init_state.pos
|
| 221 |
+
self.root_body.quat[:] = self.cfg.init_state.rot
|
| 222 |
+
|
| 223 |
+
# Attributes.
|
| 224 |
+
|
| 225 |
+
@property
|
| 226 |
+
def is_fixed_base(self) -> bool:
|
| 227 |
+
"""Entity is welded to the world."""
|
| 228 |
+
return self._free_joint is None
|
| 229 |
+
|
| 230 |
+
@property
|
| 231 |
+
def is_articulated(self) -> bool:
|
| 232 |
+
"""Entity is articulated (has fixed or actuated joints)."""
|
| 233 |
+
return len(self._non_free_joints) > 0
|
| 234 |
+
|
| 235 |
+
@property
|
| 236 |
+
def is_actuated(self) -> bool:
|
| 237 |
+
"""Entity has actuated joints."""
|
| 238 |
+
return len(self._actuators) > 0
|
| 239 |
+
|
| 240 |
+
@property
|
| 241 |
+
def has_tendon_actuators(self) -> bool:
|
| 242 |
+
"""Entity has actuators using tendon transmission."""
|
| 243 |
+
if self.cfg.articulation is None:
|
| 244 |
+
return False
|
| 245 |
+
return any(
|
| 246 |
+
act.transmission_type == TransmissionType.TENDON
|
| 247 |
+
for act in self.cfg.articulation.actuators
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
@property
|
| 251 |
+
def has_site_actuators(self) -> bool:
|
| 252 |
+
"""Entity has actuators using site transmission."""
|
| 253 |
+
if self.cfg.articulation is None:
|
| 254 |
+
return False
|
| 255 |
+
return any(
|
| 256 |
+
act.transmission_type == TransmissionType.SITE
|
| 257 |
+
for act in self.cfg.articulation.actuators
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
@property
|
| 261 |
+
def is_mocap(self) -> bool:
|
| 262 |
+
"""Entity root body is a mocap body (only for fixed-base entities)."""
|
| 263 |
+
return bool(self.root_body.mocap) if self.is_fixed_base else False
|
| 264 |
+
|
| 265 |
+
@property
|
| 266 |
+
def spec(self) -> mujoco.MjSpec:
|
| 267 |
+
return self._spec
|
| 268 |
+
|
| 269 |
+
@property
|
| 270 |
+
def data(self) -> EntityData:
|
| 271 |
+
return self._data
|
| 272 |
+
|
| 273 |
+
@property
|
| 274 |
+
def actuators(self) -> list[actuator.Actuator]:
|
| 275 |
+
return self._actuators
|
| 276 |
+
|
| 277 |
+
@property
|
| 278 |
+
def all_joint_names(self) -> tuple[str, ...]:
|
| 279 |
+
return tuple(j.name.split("/")[-1] for j in self._all_joints)
|
| 280 |
+
|
| 281 |
+
@property
|
| 282 |
+
def joint_names(self) -> tuple[str, ...]:
|
| 283 |
+
return tuple(j.name.split("/")[-1] for j in self._non_free_joints)
|
| 284 |
+
|
| 285 |
+
@property
|
| 286 |
+
def body_names(self) -> tuple[str, ...]:
|
| 287 |
+
return tuple(b.name.split("/")[-1] for b in self.spec.bodies[1:])
|
| 288 |
+
|
| 289 |
+
@property
|
| 290 |
+
def geom_names(self) -> tuple[str, ...]:
|
| 291 |
+
return tuple(g.name.split("/")[-1] for g in self.spec.geoms)
|
| 292 |
+
|
| 293 |
+
@property
|
| 294 |
+
def tendon_names(self) -> tuple[str, ...]:
|
| 295 |
+
return tuple(t.name.split("/")[-1] for t in self._spec.tendons)
|
| 296 |
+
|
| 297 |
+
@property
|
| 298 |
+
def site_names(self) -> tuple[str, ...]:
|
| 299 |
+
return tuple(s.name.split("/")[-1] for s in self.spec.sites)
|
| 300 |
+
|
| 301 |
+
@property
|
| 302 |
+
def actuator_names(self) -> tuple[str, ...]:
|
| 303 |
+
return tuple(a.name.split("/")[-1] for a in self.spec.actuators)
|
| 304 |
+
|
| 305 |
+
@property
|
| 306 |
+
def num_joints(self) -> int:
|
| 307 |
+
return len(self.joint_names)
|
| 308 |
+
|
| 309 |
+
@property
|
| 310 |
+
def num_bodies(self) -> int:
|
| 311 |
+
return len(self.body_names)
|
| 312 |
+
|
| 313 |
+
@property
|
| 314 |
+
def num_geoms(self) -> int:
|
| 315 |
+
return len(self.geom_names)
|
| 316 |
+
|
| 317 |
+
@property
|
| 318 |
+
def num_sites(self) -> int:
|
| 319 |
+
return len(self.site_names)
|
| 320 |
+
|
| 321 |
+
@property
|
| 322 |
+
def num_actuators(self) -> int:
|
| 323 |
+
return len(self.actuator_names)
|
| 324 |
+
|
| 325 |
+
@property
|
| 326 |
+
def root_body(self) -> mujoco.MjsBody:
|
| 327 |
+
return self.spec.bodies[1]
|
| 328 |
+
|
| 329 |
+
# Methods.
|
| 330 |
+
|
| 331 |
+
def find_bodies(
|
| 332 |
+
self, name_keys: str | Sequence[str], preserve_order: bool = False
|
| 333 |
+
) -> tuple[list[int], list[str]]:
|
| 334 |
+
return resolve_matching_names(name_keys, self.body_names, preserve_order)
|
| 335 |
+
|
| 336 |
+
def find_joints(
|
| 337 |
+
self,
|
| 338 |
+
name_keys: str | Sequence[str],
|
| 339 |
+
joint_subset: Sequence[str] | None = None,
|
| 340 |
+
preserve_order: bool = False,
|
| 341 |
+
) -> tuple[list[int], list[str]]:
|
| 342 |
+
if joint_subset is None:
|
| 343 |
+
joint_subset = self.joint_names
|
| 344 |
+
return resolve_matching_names(name_keys, joint_subset, preserve_order)
|
| 345 |
+
|
| 346 |
+
def find_actuators(
|
| 347 |
+
self,
|
| 348 |
+
name_keys: str | Sequence[str],
|
| 349 |
+
actuator_subset: Sequence[str] | None = None,
|
| 350 |
+
preserve_order: bool = False,
|
| 351 |
+
) -> tuple[list[int], list[str]]:
|
| 352 |
+
if actuator_subset is None:
|
| 353 |
+
actuator_subset = self.actuator_names
|
| 354 |
+
return resolve_matching_names(name_keys, actuator_subset, preserve_order)
|
| 355 |
+
|
| 356 |
+
def find_tendons(
|
| 357 |
+
self,
|
| 358 |
+
name_keys: str | Sequence[str],
|
| 359 |
+
tendon_subset: Sequence[str] | None = None,
|
| 360 |
+
preserve_order: bool = False,
|
| 361 |
+
) -> tuple[list[int], list[str]]:
|
| 362 |
+
if tendon_subset is None:
|
| 363 |
+
tendon_subset = self.tendon_names
|
| 364 |
+
return resolve_matching_names(name_keys, tendon_subset, preserve_order)
|
| 365 |
+
|
| 366 |
+
def find_joints_by_actuator_names(
|
| 367 |
+
self,
|
| 368 |
+
actuator_name_keys: str | Sequence[str],
|
| 369 |
+
) -> tuple[list[int], list[str]]:
|
| 370 |
+
# Collect all actuated joint names.
|
| 371 |
+
actuated_joint_names_set = set()
|
| 372 |
+
for act in self._actuators:
|
| 373 |
+
actuated_joint_names_set.update(act.target_names)
|
| 374 |
+
|
| 375 |
+
# Filter self.joint_names to only actuated joints, preserving natural order.
|
| 376 |
+
actuated_in_natural_order = [
|
| 377 |
+
name for name in self.joint_names if name in actuated_joint_names_set
|
| 378 |
+
]
|
| 379 |
+
|
| 380 |
+
# Find joints matching the pattern within actuated joints.
|
| 381 |
+
_, matched_joint_names = self.find_joints(
|
| 382 |
+
actuator_name_keys, joint_subset=actuated_in_natural_order, preserve_order=False
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
# Map joint names back to entity-local indices (indices into self.joint_names).
|
| 386 |
+
name_to_entity_idx = {name: i for i, name in enumerate(self.joint_names)}
|
| 387 |
+
joint_ids = [name_to_entity_idx[name] for name in matched_joint_names]
|
| 388 |
+
return joint_ids, matched_joint_names
|
| 389 |
+
|
| 390 |
+
def find_geoms(
|
| 391 |
+
self,
|
| 392 |
+
name_keys: str | Sequence[str],
|
| 393 |
+
geom_subset: Sequence[str] | None = None,
|
| 394 |
+
preserve_order: bool = False,
|
| 395 |
+
) -> tuple[list[int], list[str]]:
|
| 396 |
+
if geom_subset is None:
|
| 397 |
+
geom_subset = self.geom_names
|
| 398 |
+
return resolve_matching_names(name_keys, geom_subset, preserve_order)
|
| 399 |
+
|
| 400 |
+
def find_sites(
|
| 401 |
+
self,
|
| 402 |
+
name_keys: str | Sequence[str],
|
| 403 |
+
site_subset: Sequence[str] | None = None,
|
| 404 |
+
preserve_order: bool = False,
|
| 405 |
+
) -> tuple[list[int], list[str]]:
|
| 406 |
+
if site_subset is None:
|
| 407 |
+
site_subset = self.site_names
|
| 408 |
+
return resolve_matching_names(name_keys, site_subset, preserve_order)
|
| 409 |
+
|
| 410 |
+
def compile(self) -> mujoco.MjModel:
|
| 411 |
+
"""Compile the underlying MjSpec into an MjModel."""
|
| 412 |
+
return self.spec.compile()
|
| 413 |
+
|
| 414 |
+
def write_xml(self, xml_path: Path) -> None:
|
| 415 |
+
"""Write the MjSpec to disk."""
|
| 416 |
+
with open(xml_path, "w") as f:
|
| 417 |
+
f.write(self.spec.to_xml())
|
| 418 |
+
|
| 419 |
+
def to_zip(self, path: Path) -> None:
|
| 420 |
+
"""Write the MjSpec to a zip file."""
|
| 421 |
+
with path.open("wb") as f:
|
| 422 |
+
mujoco.MjSpec.to_zip(self.spec, f)
|
| 423 |
+
|
| 424 |
+
def initialize(
|
| 425 |
+
self,
|
| 426 |
+
mj_model: mujoco.MjModel,
|
| 427 |
+
model: mjwarp.Model,
|
| 428 |
+
data: mjwarp.Data,
|
| 429 |
+
device: str,
|
| 430 |
+
) -> None:
|
| 431 |
+
indexing = self._compute_indexing(mj_model, device)
|
| 432 |
+
self.indexing = indexing
|
| 433 |
+
nworld = data.nworld
|
| 434 |
+
|
| 435 |
+
for act in self._actuators:
|
| 436 |
+
act.initialize(mj_model, model, data, device)
|
| 437 |
+
|
| 438 |
+
# Vectorize built-in actuators; we'll loop through custom ones.
|
| 439 |
+
builtin_group, custom_actuators = BuiltinActuatorGroup.process(self._actuators)
|
| 440 |
+
self._builtin_group = builtin_group
|
| 441 |
+
self._custom_actuators = custom_actuators
|
| 442 |
+
|
| 443 |
+
# Root state.
|
| 444 |
+
root_state_components = [self.cfg.init_state.pos, self.cfg.init_state.rot]
|
| 445 |
+
if not self.is_fixed_base:
|
| 446 |
+
root_state_components.extend(
|
| 447 |
+
[self.cfg.init_state.lin_vel, self.cfg.init_state.ang_vel]
|
| 448 |
+
)
|
| 449 |
+
default_root_state = torch.tensor(
|
| 450 |
+
sum((tuple(c) for c in root_state_components), ()),
|
| 451 |
+
dtype=torch.float,
|
| 452 |
+
device=device,
|
| 453 |
+
).repeat(nworld, 1)
|
| 454 |
+
|
| 455 |
+
# Joint state.
|
| 456 |
+
if self.is_articulated:
|
| 457 |
+
if self.cfg.init_state.joint_pos is None:
|
| 458 |
+
# Use keyframe joint positions.
|
| 459 |
+
key_qpos = mj_model.key("init_state").qpos
|
| 460 |
+
nq_root = 7 if not self.is_fixed_base else 0
|
| 461 |
+
default_joint_pos = torch.tensor(key_qpos[nq_root:], device=device)[
|
| 462 |
+
None
|
| 463 |
+
].repeat(nworld, 1)
|
| 464 |
+
else:
|
| 465 |
+
default_joint_pos = torch.tensor(
|
| 466 |
+
resolve_expr(self.cfg.init_state.joint_pos, self.joint_names, 0.0),
|
| 467 |
+
device=device,
|
| 468 |
+
)[None].repeat(nworld, 1)
|
| 469 |
+
default_joint_vel = torch.tensor(
|
| 470 |
+
resolve_expr(self.cfg.init_state.joint_vel, self.joint_names, 0.0),
|
| 471 |
+
device=device,
|
| 472 |
+
)[None].repeat(nworld, 1)
|
| 473 |
+
|
| 474 |
+
# Joint limits.
|
| 475 |
+
joint_ids_global = torch.tensor(
|
| 476 |
+
[j.id for j in self._non_free_joints], device=device
|
| 477 |
+
)
|
| 478 |
+
dof_limits = model.jnt_range[:, joint_ids_global]
|
| 479 |
+
default_joint_pos_limits = dof_limits.clone()
|
| 480 |
+
joint_pos_limits = default_joint_pos_limits.clone()
|
| 481 |
+
joint_pos_mean = (joint_pos_limits[..., 0] + joint_pos_limits[..., 1]) / 2
|
| 482 |
+
joint_pos_range = joint_pos_limits[..., 1] - joint_pos_limits[..., 0]
|
| 483 |
+
|
| 484 |
+
# Soft limits.
|
| 485 |
+
soft_limit_factor = (
|
| 486 |
+
self.cfg.articulation.soft_joint_pos_limit_factor
|
| 487 |
+
if self.cfg.articulation
|
| 488 |
+
else 1.0
|
| 489 |
+
)
|
| 490 |
+
soft_joint_pos_limits = torch.stack(
|
| 491 |
+
[
|
| 492 |
+
joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor,
|
| 493 |
+
joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor,
|
| 494 |
+
],
|
| 495 |
+
dim=-1,
|
| 496 |
+
)
|
| 497 |
+
else:
|
| 498 |
+
empty_shape = (nworld, 0)
|
| 499 |
+
default_joint_pos = torch.empty(*empty_shape, dtype=torch.float, device=device)
|
| 500 |
+
default_joint_vel = torch.empty(*empty_shape, dtype=torch.float, device=device)
|
| 501 |
+
default_joint_pos_limits = torch.empty(
|
| 502 |
+
*empty_shape, 2, dtype=torch.float, device=device
|
| 503 |
+
)
|
| 504 |
+
joint_pos_limits = torch.empty(*empty_shape, 2, dtype=torch.float, device=device)
|
| 505 |
+
soft_joint_pos_limits = torch.empty(
|
| 506 |
+
*empty_shape, 2, dtype=torch.float, device=device
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
if self.is_actuated:
|
| 510 |
+
joint_pos_target = torch.zeros(
|
| 511 |
+
(nworld, self.num_joints), dtype=torch.float, device=device
|
| 512 |
+
)
|
| 513 |
+
joint_vel_target = torch.zeros(
|
| 514 |
+
(nworld, self.num_joints), dtype=torch.float, device=device
|
| 515 |
+
)
|
| 516 |
+
joint_effort_target = torch.zeros(
|
| 517 |
+
(nworld, self.num_joints), dtype=torch.float, device=device
|
| 518 |
+
)
|
| 519 |
+
else:
|
| 520 |
+
joint_pos_target = torch.empty(nworld, 0, dtype=torch.float, device=device)
|
| 521 |
+
joint_vel_target = torch.empty(nworld, 0, dtype=torch.float, device=device)
|
| 522 |
+
joint_effort_target = torch.empty(nworld, 0, dtype=torch.float, device=device)
|
| 523 |
+
|
| 524 |
+
# Only allocate tendon targets if there are actuators using tendon transmission.
|
| 525 |
+
if self.has_tendon_actuators:
|
| 526 |
+
num_tendons = len(self.tendon_names)
|
| 527 |
+
tendon_len_target = torch.zeros(
|
| 528 |
+
(nworld, num_tendons), dtype=torch.float, device=device
|
| 529 |
+
)
|
| 530 |
+
tendon_vel_target = torch.zeros(
|
| 531 |
+
(nworld, num_tendons), dtype=torch.float, device=device
|
| 532 |
+
)
|
| 533 |
+
tendon_effort_target = torch.zeros(
|
| 534 |
+
(nworld, num_tendons), dtype=torch.float, device=device
|
| 535 |
+
)
|
| 536 |
+
else:
|
| 537 |
+
tendon_len_target = torch.empty(nworld, 0, dtype=torch.float, device=device)
|
| 538 |
+
tendon_vel_target = torch.empty(nworld, 0, dtype=torch.float, device=device)
|
| 539 |
+
tendon_effort_target = torch.empty(nworld, 0, dtype=torch.float, device=device)
|
| 540 |
+
|
| 541 |
+
# Only allocate site targets if there are actuators using site transmission.
|
| 542 |
+
if self.has_site_actuators:
|
| 543 |
+
num_sites = len(self.site_names)
|
| 544 |
+
site_effort_target = torch.zeros(
|
| 545 |
+
(nworld, num_sites), dtype=torch.float, device=device
|
| 546 |
+
)
|
| 547 |
+
else:
|
| 548 |
+
site_effort_target = torch.empty(nworld, 0, dtype=torch.float, device=device)
|
| 549 |
+
|
| 550 |
+
# Encoder bias for simulating encoder calibration errors.
|
| 551 |
+
# Shape: (num_envs, num_joints). Defaults to zero (no bias).
|
| 552 |
+
if self.is_articulated:
|
| 553 |
+
encoder_bias = torch.zeros(
|
| 554 |
+
(nworld, self.num_joints), dtype=torch.float, device=device
|
| 555 |
+
)
|
| 556 |
+
else:
|
| 557 |
+
encoder_bias = torch.empty(nworld, 0, dtype=torch.float, device=device)
|
| 558 |
+
|
| 559 |
+
self._data = EntityData(
|
| 560 |
+
indexing=indexing,
|
| 561 |
+
data=data,
|
| 562 |
+
model=model,
|
| 563 |
+
device=device,
|
| 564 |
+
default_root_state=default_root_state,
|
| 565 |
+
default_joint_pos=default_joint_pos,
|
| 566 |
+
default_joint_vel=default_joint_vel,
|
| 567 |
+
default_joint_pos_limits=default_joint_pos_limits,
|
| 568 |
+
joint_pos_limits=joint_pos_limits,
|
| 569 |
+
soft_joint_pos_limits=soft_joint_pos_limits,
|
| 570 |
+
gravity_vec_w=torch.tensor([0.0, 0.0, -1.0], device=device).repeat(nworld, 1),
|
| 571 |
+
forward_vec_b=torch.tensor([1.0, 0.0, 0.0], device=device).repeat(nworld, 1),
|
| 572 |
+
is_fixed_base=self.is_fixed_base,
|
| 573 |
+
is_articulated=self.is_articulated,
|
| 574 |
+
is_actuated=self.is_actuated,
|
| 575 |
+
joint_pos_target=joint_pos_target,
|
| 576 |
+
joint_vel_target=joint_vel_target,
|
| 577 |
+
joint_effort_target=joint_effort_target,
|
| 578 |
+
tendon_len_target=tendon_len_target,
|
| 579 |
+
tendon_vel_target=tendon_vel_target,
|
| 580 |
+
tendon_effort_target=tendon_effort_target,
|
| 581 |
+
site_effort_target=site_effort_target,
|
| 582 |
+
encoder_bias=encoder_bias,
|
| 583 |
+
)
|
| 584 |
+
|
| 585 |
+
def update(self, dt: float) -> None:
|
| 586 |
+
for act in self._actuators:
|
| 587 |
+
act.update(dt)
|
| 588 |
+
|
| 589 |
+
def reset(self, env_ids: torch.Tensor | slice | None = None) -> None:
|
| 590 |
+
self.clear_state(env_ids)
|
| 591 |
+
|
| 592 |
+
for act in self._actuators:
|
| 593 |
+
act.reset(env_ids)
|
| 594 |
+
|
| 595 |
+
def write_data_to_sim(self) -> None:
|
| 596 |
+
self._apply_actuator_controls()
|
| 597 |
+
|
| 598 |
+
def clear_state(self, env_ids: torch.Tensor | slice | None = None) -> None:
|
| 599 |
+
self._data.clear_state(env_ids)
|
| 600 |
+
|
| 601 |
+
def write_ctrl_to_sim(
|
| 602 |
+
self,
|
| 603 |
+
ctrl: torch.Tensor,
|
| 604 |
+
ctrl_ids: torch.Tensor | slice | None = None,
|
| 605 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 606 |
+
) -> None:
|
| 607 |
+
"""Write control inputs to the simulation.
|
| 608 |
+
|
| 609 |
+
Args:
|
| 610 |
+
ctrl: A tensor of control inputs.
|
| 611 |
+
ctrl_ids: A tensor of control indices.
|
| 612 |
+
env_ids: Optional tensor or slice specifying which environments to set.
|
| 613 |
+
If None, all environments are set.
|
| 614 |
+
"""
|
| 615 |
+
self._data.write_ctrl(ctrl, ctrl_ids, env_ids)
|
| 616 |
+
|
| 617 |
+
def write_root_state_to_sim(
|
| 618 |
+
self, root_state: torch.Tensor, env_ids: torch.Tensor | slice | None = None
|
| 619 |
+
) -> None:
|
| 620 |
+
"""Set the root state into the simulation.
|
| 621 |
+
|
| 622 |
+
The root state consists of position (3), orientation as a (w, x, y, z)
|
| 623 |
+
quaternion (4), linear velocity (3), and angular velocity (3), for a total
|
| 624 |
+
of 13 values. All of the quantities are in the world frame.
|
| 625 |
+
|
| 626 |
+
Args:
|
| 627 |
+
root_state: Tensor of shape (N, 13) where N is the number of environments.
|
| 628 |
+
env_ids: Optional tensor or slice specifying which environments to set. If
|
| 629 |
+
None, all environments are set.
|
| 630 |
+
"""
|
| 631 |
+
self._data.write_root_state(root_state, env_ids)
|
| 632 |
+
|
| 633 |
+
def write_root_link_pose_to_sim(
|
| 634 |
+
self,
|
| 635 |
+
root_pose: torch.Tensor,
|
| 636 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 637 |
+
):
|
| 638 |
+
"""Set the root pose into the simulation. Like `write_root_state_to_sim()`
|
| 639 |
+
but only sets position and orientation.
|
| 640 |
+
|
| 641 |
+
Args:
|
| 642 |
+
root_pose: Tensor of shape (N, 7) where N is the number of environments.
|
| 643 |
+
env_ids: Optional tensor or slice specifying which environments to set. If
|
| 644 |
+
None, all environments are set.
|
| 645 |
+
"""
|
| 646 |
+
self._data.write_root_pose(root_pose, env_ids)
|
| 647 |
+
|
| 648 |
+
def write_root_link_velocity_to_sim(
|
| 649 |
+
self,
|
| 650 |
+
root_velocity: torch.Tensor,
|
| 651 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 652 |
+
):
|
| 653 |
+
"""Set the root link (body origin) velocity into the simulation. Like
|
| 654 |
+
`write_root_state_to_sim()` but only sets linear and angular velocity.
|
| 655 |
+
|
| 656 |
+
Args:
|
| 657 |
+
root_velocity: Tensor of shape (N, 6) where N is the number of environments.
|
| 658 |
+
Contains linear velocity (3) at body origin and angular velocity (3),
|
| 659 |
+
both in world frame.
|
| 660 |
+
env_ids: Optional tensor or slice specifying which environments to set. If
|
| 661 |
+
None, all environments are set.
|
| 662 |
+
"""
|
| 663 |
+
self._data.write_root_velocity(root_velocity, env_ids)
|
| 664 |
+
|
| 665 |
+
def write_root_com_velocity_to_sim(
|
| 666 |
+
self,
|
| 667 |
+
root_velocity: torch.Tensor,
|
| 668 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 669 |
+
):
|
| 670 |
+
"""Set the root COM velocity into the simulation.
|
| 671 |
+
|
| 672 |
+
Args:
|
| 673 |
+
root_velocity: Tensor of shape (N, 6) where N is the number of environments.
|
| 674 |
+
Contains linear velocity (3) at COM and angular velocity (3),
|
| 675 |
+
both in world frame.
|
| 676 |
+
env_ids: Optional tensor or slice specifying which environments to set. If
|
| 677 |
+
None, all environments are set.
|
| 678 |
+
"""
|
| 679 |
+
self._data.write_root_com_velocity(root_velocity, env_ids)
|
| 680 |
+
|
| 681 |
+
def write_joint_state_to_sim(
|
| 682 |
+
self,
|
| 683 |
+
position: torch.Tensor,
|
| 684 |
+
velocity: torch.Tensor,
|
| 685 |
+
joint_ids: torch.Tensor | slice | None = None,
|
| 686 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 687 |
+
):
|
| 688 |
+
"""Set the joint state into the simulation.
|
| 689 |
+
|
| 690 |
+
The joint state consists of joint positions and velocities. It does not include
|
| 691 |
+
the root state.
|
| 692 |
+
|
| 693 |
+
Args:
|
| 694 |
+
position: Tensor of shape (N, num_joints) where N is the number of environments.
|
| 695 |
+
velocity: Tensor of shape (N, num_joints) where N is the number of environments.
|
| 696 |
+
joint_ids: Optional tensor or slice specifying which joints to set. If None,
|
| 697 |
+
all joints are set.
|
| 698 |
+
env_ids: Optional tensor or slice specifying which environments to set. If
|
| 699 |
+
None, all environments are set.
|
| 700 |
+
"""
|
| 701 |
+
self._data.write_joint_state(position, velocity, joint_ids, env_ids)
|
| 702 |
+
|
| 703 |
+
def write_joint_position_to_sim(
|
| 704 |
+
self,
|
| 705 |
+
position: torch.Tensor,
|
| 706 |
+
joint_ids: torch.Tensor | slice | None = None,
|
| 707 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 708 |
+
):
|
| 709 |
+
"""Set the joint positions into the simulation. Like `write_joint_state_to_sim()`
|
| 710 |
+
but only sets joint positions.
|
| 711 |
+
|
| 712 |
+
Args:
|
| 713 |
+
position: Tensor of shape (N, num_joints) where N is the number of environments.
|
| 714 |
+
joint_ids: Optional tensor or slice specifying which joints to set. If None,
|
| 715 |
+
all joints are set.
|
| 716 |
+
env_ids: Optional tensor or slice specifying which environments to set. If
|
| 717 |
+
None, all environments are set.
|
| 718 |
+
"""
|
| 719 |
+
self._data.write_joint_position(position, joint_ids, env_ids)
|
| 720 |
+
|
| 721 |
+
def write_joint_velocity_to_sim(
|
| 722 |
+
self,
|
| 723 |
+
velocity: torch.Tensor,
|
| 724 |
+
joint_ids: torch.Tensor | slice | None = None,
|
| 725 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 726 |
+
):
|
| 727 |
+
"""Set the joint velocities into the simulation. Like `write_joint_state_to_sim()`
|
| 728 |
+
but only sets joint velocities.
|
| 729 |
+
|
| 730 |
+
Args:
|
| 731 |
+
velocity: Tensor of shape (N, num_joints) where N is the number of environments.
|
| 732 |
+
joint_ids: Optional tensor or slice specifying which joints to set. If None,
|
| 733 |
+
all joints are set.
|
| 734 |
+
env_ids: Optional tensor or slice specifying which environments to set. If
|
| 735 |
+
None, all environments are set.
|
| 736 |
+
"""
|
| 737 |
+
self._data.write_joint_velocity(velocity, joint_ids, env_ids)
|
| 738 |
+
|
| 739 |
+
def set_joint_position_target(
|
| 740 |
+
self,
|
| 741 |
+
position: torch.Tensor,
|
| 742 |
+
joint_ids: torch.Tensor | slice | None = None,
|
| 743 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 744 |
+
) -> None:
|
| 745 |
+
"""Set joint position targets.
|
| 746 |
+
|
| 747 |
+
Args:
|
| 748 |
+
position: Target joint poisitions with shape (N, num_joints).
|
| 749 |
+
joint_ids: Optional joint indices to set. If None, set all joints.
|
| 750 |
+
env_ids: Optional environment indices. If None, set all environments.
|
| 751 |
+
"""
|
| 752 |
+
if env_ids is None:
|
| 753 |
+
env_ids = slice(None)
|
| 754 |
+
if joint_ids is None:
|
| 755 |
+
joint_ids = slice(None)
|
| 756 |
+
self._data.joint_pos_target[env_ids, joint_ids] = position
|
| 757 |
+
|
| 758 |
+
def set_joint_velocity_target(
|
| 759 |
+
self,
|
| 760 |
+
velocity: torch.Tensor,
|
| 761 |
+
joint_ids: torch.Tensor | slice | None = None,
|
| 762 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 763 |
+
) -> None:
|
| 764 |
+
"""Set joint velocity targets.
|
| 765 |
+
|
| 766 |
+
Args:
|
| 767 |
+
velocity: Target joint velocities with shape (N, num_joints).
|
| 768 |
+
joint_ids: Optional joint indices to set. If None, set all joints.
|
| 769 |
+
env_ids: Optional environment indices. If None, set all environments.
|
| 770 |
+
"""
|
| 771 |
+
if env_ids is None:
|
| 772 |
+
env_ids = slice(None)
|
| 773 |
+
if joint_ids is None:
|
| 774 |
+
joint_ids = slice(None)
|
| 775 |
+
self._data.joint_vel_target[env_ids, joint_ids] = velocity
|
| 776 |
+
|
| 777 |
+
def set_joint_effort_target(
|
| 778 |
+
self,
|
| 779 |
+
effort: torch.Tensor,
|
| 780 |
+
joint_ids: torch.Tensor | slice | None = None,
|
| 781 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 782 |
+
) -> None:
|
| 783 |
+
"""Set joint effort targets.
|
| 784 |
+
|
| 785 |
+
Args:
|
| 786 |
+
effort: Target joint efforts with shape (N, num_joints).
|
| 787 |
+
joint_ids: Optional joint indices to set. If None, set all joints.
|
| 788 |
+
env_ids: Optional environment indices. If None, set all environments.
|
| 789 |
+
"""
|
| 790 |
+
if env_ids is None:
|
| 791 |
+
env_ids = slice(None)
|
| 792 |
+
if joint_ids is None:
|
| 793 |
+
joint_ids = slice(None)
|
| 794 |
+
self._data.joint_effort_target[env_ids, joint_ids] = effort
|
| 795 |
+
|
| 796 |
+
def set_tendon_len_target(
|
| 797 |
+
self,
|
| 798 |
+
length: torch.Tensor,
|
| 799 |
+
tendon_ids: torch.Tensor | slice | None = None,
|
| 800 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 801 |
+
) -> None:
|
| 802 |
+
"""Set tendon length targets.
|
| 803 |
+
|
| 804 |
+
Args:
|
| 805 |
+
length: Target tendon lengths with shape (N, num_tendons).
|
| 806 |
+
tendon_ids: Optional tendon indices to set. If None, set all tendons.
|
| 807 |
+
env_ids: Optional environment indices. If None, set all environments.
|
| 808 |
+
"""
|
| 809 |
+
if env_ids is None:
|
| 810 |
+
env_ids = slice(None)
|
| 811 |
+
if tendon_ids is None:
|
| 812 |
+
tendon_ids = slice(None)
|
| 813 |
+
self._data.tendon_len_target[env_ids, tendon_ids] = length
|
| 814 |
+
|
| 815 |
+
def set_tendon_vel_target(
|
| 816 |
+
self,
|
| 817 |
+
velocity: torch.Tensor,
|
| 818 |
+
tendon_ids: torch.Tensor | slice | None = None,
|
| 819 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 820 |
+
) -> None:
|
| 821 |
+
"""Set tendon velocity targets.
|
| 822 |
+
|
| 823 |
+
Args:
|
| 824 |
+
velocity: Target tendon velocities with shape (N, num_tendons).
|
| 825 |
+
tendon_ids: Optional tendon indices to set. If None, set all tendons.
|
| 826 |
+
env_ids: Optional environment indices. If None, set all environments.
|
| 827 |
+
"""
|
| 828 |
+
if env_ids is None:
|
| 829 |
+
env_ids = slice(None)
|
| 830 |
+
if tendon_ids is None:
|
| 831 |
+
tendon_ids = slice(None)
|
| 832 |
+
self._data.tendon_vel_target[env_ids, tendon_ids] = velocity
|
| 833 |
+
|
| 834 |
+
def set_tendon_effort_target(
|
| 835 |
+
self,
|
| 836 |
+
effort: torch.Tensor,
|
| 837 |
+
tendon_ids: torch.Tensor | slice | None = None,
|
| 838 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 839 |
+
) -> None:
|
| 840 |
+
"""Set tendon effort targets.
|
| 841 |
+
|
| 842 |
+
Args:
|
| 843 |
+
effort: Target tendon efforts with shape (N, num_tendons).
|
| 844 |
+
tendon_ids: Optional tendon indices to set. If None, set all tendons.
|
| 845 |
+
env_ids: Optional environment indices. If None, set all environments.
|
| 846 |
+
"""
|
| 847 |
+
if env_ids is None:
|
| 848 |
+
env_ids = slice(None)
|
| 849 |
+
if tendon_ids is None:
|
| 850 |
+
tendon_ids = slice(None)
|
| 851 |
+
self._data.tendon_effort_target[env_ids, tendon_ids] = effort
|
| 852 |
+
|
| 853 |
+
def set_site_effort_target(
|
| 854 |
+
self,
|
| 855 |
+
effort: torch.Tensor,
|
| 856 |
+
site_ids: torch.Tensor | slice | None = None,
|
| 857 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 858 |
+
) -> None:
|
| 859 |
+
"""Set site effort targets.
|
| 860 |
+
|
| 861 |
+
Args:
|
| 862 |
+
effort: Target site efforts with shape (N, num_sites).
|
| 863 |
+
site_ids: Optional site indices to set. If None, set all sites.
|
| 864 |
+
env_ids: Optional environment indices. If None, set all environments.
|
| 865 |
+
"""
|
| 866 |
+
if env_ids is None:
|
| 867 |
+
env_ids = slice(None)
|
| 868 |
+
if site_ids is None:
|
| 869 |
+
site_ids = slice(None)
|
| 870 |
+
self._data.site_effort_target[env_ids, site_ids] = effort
|
| 871 |
+
|
| 872 |
+
def write_external_wrench_to_sim(
|
| 873 |
+
self,
|
| 874 |
+
forces: torch.Tensor,
|
| 875 |
+
torques: torch.Tensor,
|
| 876 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 877 |
+
body_ids: Sequence[int] | slice | None = None,
|
| 878 |
+
) -> None:
|
| 879 |
+
"""Apply external wrenches to bodies in the simulation.
|
| 880 |
+
|
| 881 |
+
Underneath the hood, this sets the `xfrc_applied` field in the MuJoCo data
|
| 882 |
+
structure. The wrenches are specified in the world frame and persist until
|
| 883 |
+
the next call to this function or until the simulation is reset.
|
| 884 |
+
|
| 885 |
+
Args:
|
| 886 |
+
forces: Tensor of shape (N, num_bodies, 3) where N is the number of
|
| 887 |
+
environments.
|
| 888 |
+
torques: Tensor of shape (N, num_bodies, 3) where N is the number of
|
| 889 |
+
environments.
|
| 890 |
+
env_ids: Optional tensor or slice specifying which environments to set. If
|
| 891 |
+
None, all environments are set.
|
| 892 |
+
body_ids: Optional list of body indices or slice specifying which bodies to
|
| 893 |
+
apply the wrenches to. If None, wrenches are applied to all bodies.
|
| 894 |
+
"""
|
| 895 |
+
self._data.write_external_wrench(forces, torques, body_ids, env_ids)
|
| 896 |
+
|
| 897 |
+
def write_mocap_pose_to_sim(
|
| 898 |
+
self,
|
| 899 |
+
mocap_pose: torch.Tensor,
|
| 900 |
+
env_ids: torch.Tensor | slice | None = None,
|
| 901 |
+
) -> None:
|
| 902 |
+
"""Set the pose of a mocap body into the simulation.
|
| 903 |
+
|
| 904 |
+
Args:
|
| 905 |
+
mocap_pose: Tensor of shape (N, 7) where N is the number of environments.
|
| 906 |
+
Format: [pos_x, pos_y, pos_z, quat_w, quat_x, quat_y, quat_z]
|
| 907 |
+
env_ids: Optional tensor or slice specifying which environments to set. If
|
| 908 |
+
None, all environments are set.
|
| 909 |
+
"""
|
| 910 |
+
self._data.write_mocap_pose(mocap_pose, env_ids)
|
| 911 |
+
|
| 912 |
+
##
|
| 913 |
+
# Private methods.
|
| 914 |
+
##
|
| 915 |
+
|
| 916 |
+
def _compute_indexing(self, model: mujoco.MjModel, device: str) -> EntityIndexing:
|
| 917 |
+
bodies = tuple([b for b in self.spec.bodies[1:]])
|
| 918 |
+
joints = self._non_free_joints
|
| 919 |
+
geoms = tuple(self.spec.geoms)
|
| 920 |
+
sites = tuple(self.spec.sites)
|
| 921 |
+
tendons = tuple(self.spec.tendons)
|
| 922 |
+
|
| 923 |
+
body_ids = torch.tensor([b.id for b in bodies], dtype=torch.int, device=device)
|
| 924 |
+
geom_ids = torch.tensor([g.id for g in geoms], dtype=torch.int, device=device)
|
| 925 |
+
site_ids = torch.tensor([s.id for s in sites], dtype=torch.int, device=device)
|
| 926 |
+
tendon_ids = torch.tensor([t.id for t in tendons], dtype=torch.int, device=device)
|
| 927 |
+
joint_ids = torch.tensor([j.id for j in joints], dtype=torch.int, device=device)
|
| 928 |
+
|
| 929 |
+
if self.is_actuated:
|
| 930 |
+
actuators = tuple(self.spec.actuators)
|
| 931 |
+
ctrl_ids = torch.tensor([a.id for a in actuators], dtype=torch.int, device=device)
|
| 932 |
+
else:
|
| 933 |
+
actuators = None
|
| 934 |
+
ctrl_ids = torch.empty(0, dtype=torch.int, device=device)
|
| 935 |
+
|
| 936 |
+
joint_q_adr = []
|
| 937 |
+
joint_v_adr = []
|
| 938 |
+
free_joint_q_adr = []
|
| 939 |
+
free_joint_v_adr = []
|
| 940 |
+
for joint in self.spec.joints:
|
| 941 |
+
jnt = model.joint(joint.name)
|
| 942 |
+
jnt_type = jnt.type[0]
|
| 943 |
+
vadr = jnt.dofadr[0]
|
| 944 |
+
qadr = jnt.qposadr[0]
|
| 945 |
+
if jnt_type == mujoco.mjtJoint.mjJNT_FREE:
|
| 946 |
+
free_joint_v_adr.extend(range(vadr, vadr + 6))
|
| 947 |
+
free_joint_q_adr.extend(range(qadr, qadr + 7))
|
| 948 |
+
else:
|
| 949 |
+
joint_v_adr.extend(range(vadr, vadr + dof_width(jnt_type)))
|
| 950 |
+
joint_q_adr.extend(range(qadr, qadr + qpos_width(jnt_type)))
|
| 951 |
+
joint_q_adr = torch.tensor(joint_q_adr, dtype=torch.int, device=device)
|
| 952 |
+
joint_v_adr = torch.tensor(joint_v_adr, dtype=torch.int, device=device)
|
| 953 |
+
free_joint_v_adr = torch.tensor(free_joint_v_adr, dtype=torch.int, device=device)
|
| 954 |
+
free_joint_q_adr = torch.tensor(free_joint_q_adr, dtype=torch.int, device=device)
|
| 955 |
+
|
| 956 |
+
if self.is_fixed_base and self.is_mocap:
|
| 957 |
+
mocap_id = int(model.body_mocapid[self.root_body.id])
|
| 958 |
+
else:
|
| 959 |
+
mocap_id = None
|
| 960 |
+
|
| 961 |
+
return EntityIndexing(
|
| 962 |
+
bodies=bodies,
|
| 963 |
+
joints=joints,
|
| 964 |
+
geoms=geoms,
|
| 965 |
+
sites=sites,
|
| 966 |
+
tendons=tendons,
|
| 967 |
+
actuators=actuators,
|
| 968 |
+
body_ids=body_ids,
|
| 969 |
+
geom_ids=geom_ids,
|
| 970 |
+
site_ids=site_ids,
|
| 971 |
+
tendon_ids=tendon_ids,
|
| 972 |
+
ctrl_ids=ctrl_ids,
|
| 973 |
+
joint_ids=joint_ids,
|
| 974 |
+
mocap_id=mocap_id,
|
| 975 |
+
joint_q_adr=joint_q_adr,
|
| 976 |
+
joint_v_adr=joint_v_adr,
|
| 977 |
+
free_joint_q_adr=free_joint_q_adr,
|
| 978 |
+
free_joint_v_adr=free_joint_v_adr,
|
| 979 |
+
)
|
| 980 |
+
|
| 981 |
+
def _apply_actuator_controls(self) -> None:
|
| 982 |
+
self._builtin_group.apply_controls(self._data)
|
| 983 |
+
for act in self._custom_actuators:
|
| 984 |
+
command = act.get_command(self._data)
|
| 985 |
+
self._data.write_ctrl(act.compute(command), act.ctrl_ids)
|
mjlab/src/mjlab/envs/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mjlab.envs.manager_based_rl_env import ManagerBasedRlEnv as ManagerBasedRlEnv
|
| 2 |
+
from mjlab.envs.manager_based_rl_env import ManagerBasedRlEnvCfg as ManagerBasedRlEnvCfg
|
| 3 |
+
from mjlab.envs.types import VecEnvObs as VecEnvObs
|
| 4 |
+
from mjlab.envs.types import VecEnvStepReturn as VecEnvStepReturn
|
mjlab/src/mjlab/envs/manager_based_rl_env.py
ADDED
|
@@ -0,0 +1,524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from dataclasses import dataclass, field
|
| 3 |
+
from typing import Any
|
| 4 |
+
|
| 5 |
+
import mujoco
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
import warp as wp
|
| 9 |
+
from prettytable import PrettyTable
|
| 10 |
+
|
| 11 |
+
from mjlab.envs import types
|
| 12 |
+
from mjlab.envs.mdp.events import reset_scene_to_default
|
| 13 |
+
from mjlab.managers.action_manager import ActionManager, ActionTermCfg
|
| 14 |
+
from mjlab.managers.command_manager import (
|
| 15 |
+
CommandManager,
|
| 16 |
+
CommandTermCfg,
|
| 17 |
+
NullCommandManager,
|
| 18 |
+
)
|
| 19 |
+
from mjlab.managers.curriculum_manager import (
|
| 20 |
+
CurriculumManager,
|
| 21 |
+
CurriculumTermCfg,
|
| 22 |
+
NullCurriculumManager,
|
| 23 |
+
)
|
| 24 |
+
from mjlab.managers.event_manager import EventManager, EventTermCfg
|
| 25 |
+
from mjlab.managers.metrics_manager import (
|
| 26 |
+
MetricsManager,
|
| 27 |
+
MetricsTermCfg,
|
| 28 |
+
NullMetricsManager,
|
| 29 |
+
)
|
| 30 |
+
from mjlab.managers.observation_manager import ObservationGroupCfg, ObservationManager
|
| 31 |
+
from mjlab.managers.reward_manager import RewardManager, RewardTermCfg
|
| 32 |
+
from mjlab.managers.termination_manager import TerminationManager, TerminationTermCfg
|
| 33 |
+
from mjlab.scene import Scene
|
| 34 |
+
from mjlab.scene.scene import SceneCfg
|
| 35 |
+
from mjlab.sim import SimulationCfg
|
| 36 |
+
from mjlab.sim.sim import Simulation
|
| 37 |
+
from mjlab.utils import random as random_utils
|
| 38 |
+
from mjlab.utils.logging import print_info
|
| 39 |
+
from mjlab.utils.spaces import Box
|
| 40 |
+
from mjlab.utils.spaces import Dict as DictSpace
|
| 41 |
+
from mjlab.viewer.debug_visualizer import DebugVisualizer
|
| 42 |
+
from mjlab.viewer.offscreen_renderer import OffscreenRenderer
|
| 43 |
+
from mjlab.viewer.viewer_config import ViewerConfig
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@dataclass(kw_only=True)
|
| 47 |
+
class ManagerBasedRlEnvCfg:
|
| 48 |
+
"""Configuration for a manager-based RL environment.
|
| 49 |
+
|
| 50 |
+
This config defines all aspects of an RL environment: the physical scene,
|
| 51 |
+
observations, actions, rewards, terminations, and optional features like
|
| 52 |
+
commands and curriculum learning.
|
| 53 |
+
|
| 54 |
+
The environment step size is ``sim.mujoco.timestep * decimation``. For example,
|
| 55 |
+
with a 2ms physics timestep and decimation=10, the environment runs at 50Hz.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
# Base environment configuration.
|
| 59 |
+
|
| 60 |
+
decimation: int
|
| 61 |
+
"""Number of physics simulation steps per environment step. Higher values mean
|
| 62 |
+
coarser control frequency. Environment step duration = physics_dt * decimation."""
|
| 63 |
+
|
| 64 |
+
scene: SceneCfg
|
| 65 |
+
"""Scene configuration defining terrain, entities, and sensors. The scene
|
| 66 |
+
specifies ``num_envs``, the number of parallel environments."""
|
| 67 |
+
|
| 68 |
+
observations: dict[str, ObservationGroupCfg] = field(default_factory=dict)
|
| 69 |
+
"""Observation groups configuration. Each group (e.g., "actor", "critic") contains
|
| 70 |
+
observation terms that are concatenated. Groups can have different settings for
|
| 71 |
+
noise, history, and delay."""
|
| 72 |
+
|
| 73 |
+
actions: dict[str, ActionTermCfg] = field(default_factory=dict)
|
| 74 |
+
"""Action terms configuration. Each term controls a specific entity/aspect
|
| 75 |
+
(e.g., joint positions). Action dimensions are concatenated across terms."""
|
| 76 |
+
|
| 77 |
+
events: dict[str, EventTermCfg] = field(
|
| 78 |
+
default_factory=lambda: {
|
| 79 |
+
"reset_scene_to_default": EventTermCfg(
|
| 80 |
+
func=reset_scene_to_default,
|
| 81 |
+
mode="reset",
|
| 82 |
+
)
|
| 83 |
+
}
|
| 84 |
+
)
|
| 85 |
+
"""Event terms for domain randomization and state resets. Default includes
|
| 86 |
+
``reset_scene_to_default`` which resets entities to their initial state.
|
| 87 |
+
Can be set to empty to disable all events including default reset."""
|
| 88 |
+
|
| 89 |
+
seed: int | None = None
|
| 90 |
+
"""Random seed for reproducibility. If None, a random seed is used. The actual
|
| 91 |
+
seed used is stored back into this field after initialization."""
|
| 92 |
+
|
| 93 |
+
sim: SimulationCfg = field(default_factory=SimulationCfg)
|
| 94 |
+
"""Simulation configuration including physics timestep, solver iterations,
|
| 95 |
+
contact parameters, and NaN guarding."""
|
| 96 |
+
|
| 97 |
+
viewer: ViewerConfig = field(default_factory=ViewerConfig)
|
| 98 |
+
"""Viewer configuration for rendering (camera position, resolution, etc.)."""
|
| 99 |
+
|
| 100 |
+
# RL-specific configuration.
|
| 101 |
+
|
| 102 |
+
episode_length_s: float = 0.0
|
| 103 |
+
"""Duration of an episode (in seconds).
|
| 104 |
+
|
| 105 |
+
Episode length in steps is computed as:
|
| 106 |
+
ceil(episode_length_s / (sim.mujoco.timestep * decimation))
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
rewards: dict[str, RewardTermCfg] = field(default_factory=dict)
|
| 110 |
+
"""Reward terms configuration."""
|
| 111 |
+
|
| 112 |
+
terminations: dict[str, TerminationTermCfg] = field(default_factory=dict)
|
| 113 |
+
"""Termination terms configuration. If empty, episodes never reset. Use
|
| 114 |
+
``mdp.time_out`` with ``time_out=True`` for episode time limits."""
|
| 115 |
+
|
| 116 |
+
commands: dict[str, CommandTermCfg] = field(default_factory=dict)
|
| 117 |
+
"""Command generator terms (e.g., velocity targets)."""
|
| 118 |
+
|
| 119 |
+
curriculum: dict[str, CurriculumTermCfg] = field(default_factory=dict)
|
| 120 |
+
"""Curriculum terms for adaptive difficulty."""
|
| 121 |
+
|
| 122 |
+
metrics: dict[str, MetricsTermCfg] = field(default_factory=dict)
|
| 123 |
+
"""Custom metric terms for logging per-step values as episode averages."""
|
| 124 |
+
|
| 125 |
+
is_finite_horizon: bool = False
|
| 126 |
+
"""Whether the task has a finite or infinite horizon. Defaults to False (infinite).
|
| 127 |
+
|
| 128 |
+
- **Finite horizon (True)**: The time limit defines the task boundary. When reached,
|
| 129 |
+
no future value exists beyond it, so the agent receives a terminal done signal.
|
| 130 |
+
- **Infinite horizon (False)**: The time limit is an artificial cutoff. The agent
|
| 131 |
+
receives a truncated done signal to bootstrap the value of continuing beyond the
|
| 132 |
+
limit.
|
| 133 |
+
"""
|
| 134 |
+
|
| 135 |
+
scale_rewards_by_dt: bool = True
|
| 136 |
+
"""Whether to multiply rewards by the environment step duration (dt).
|
| 137 |
+
|
| 138 |
+
When True (default), reward values are scaled by step_dt to normalize cumulative
|
| 139 |
+
episodic rewards across different simulation frequencies. Set to False for
|
| 140 |
+
algorithms that expect unscaled reward signals (e.g., HER, static reward scaling).
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class ManagerBasedRlEnv:
|
| 145 |
+
"""Manager-based RL environment."""
|
| 146 |
+
|
| 147 |
+
is_vector_env = True
|
| 148 |
+
metadata = {
|
| 149 |
+
"render_modes": [None, "rgb_array"],
|
| 150 |
+
"mujoco_version": mujoco.__version__,
|
| 151 |
+
"warp_version": wp.config.version,
|
| 152 |
+
}
|
| 153 |
+
cfg: ManagerBasedRlEnvCfg
|
| 154 |
+
|
| 155 |
+
def __init__(
|
| 156 |
+
self,
|
| 157 |
+
cfg: ManagerBasedRlEnvCfg,
|
| 158 |
+
device: str,
|
| 159 |
+
render_mode: str | None = None,
|
| 160 |
+
**kwargs,
|
| 161 |
+
) -> None:
|
| 162 |
+
# Initialize base environment state.
|
| 163 |
+
self.cfg = cfg
|
| 164 |
+
if self.cfg.seed is not None:
|
| 165 |
+
self.cfg.seed = self.seed(self.cfg.seed)
|
| 166 |
+
self._sim_step_counter = 0
|
| 167 |
+
self.extras = {}
|
| 168 |
+
self.obs_buf = {}
|
| 169 |
+
|
| 170 |
+
# Initialize scene and simulation.
|
| 171 |
+
self.scene = Scene(self.cfg.scene, device=device)
|
| 172 |
+
self.sim = Simulation(
|
| 173 |
+
num_envs=self.scene.num_envs,
|
| 174 |
+
cfg=self.cfg.sim,
|
| 175 |
+
model=self.scene.compile(),
|
| 176 |
+
device=device,
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
self.scene.initialize(
|
| 180 |
+
mj_model=self.sim.mj_model,
|
| 181 |
+
model=self.sim.model,
|
| 182 |
+
data=self.sim.data,
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
# Wire sensor context to simulation for sense_graph.
|
| 186 |
+
if self.scene.sensor_context is not None:
|
| 187 |
+
self.sim.set_sensor_context(self.scene.sensor_context)
|
| 188 |
+
|
| 189 |
+
# Print environment info.
|
| 190 |
+
print_info("")
|
| 191 |
+
table = PrettyTable()
|
| 192 |
+
table.title = "Base Environment"
|
| 193 |
+
table.field_names = ["Property", "Value"]
|
| 194 |
+
table.align["Property"] = "l"
|
| 195 |
+
table.align["Value"] = "l"
|
| 196 |
+
table.add_row(["Number of environments", self.num_envs])
|
| 197 |
+
table.add_row(["Environment device", self.device])
|
| 198 |
+
table.add_row(["Environment seed", self.cfg.seed])
|
| 199 |
+
table.add_row(["Physics step-size", self.physics_dt])
|
| 200 |
+
table.add_row(["Environment step-size", self.step_dt])
|
| 201 |
+
print_info(table.get_string())
|
| 202 |
+
print_info("")
|
| 203 |
+
|
| 204 |
+
# Initialize RL-specific state.
|
| 205 |
+
self.common_step_counter = 0
|
| 206 |
+
self.episode_length_buf = torch.zeros(
|
| 207 |
+
cfg.scene.num_envs, device=device, dtype=torch.long
|
| 208 |
+
)
|
| 209 |
+
self.render_mode = render_mode
|
| 210 |
+
self._offline_renderer: OffscreenRenderer | None = None
|
| 211 |
+
if self.render_mode == "rgb_array":
|
| 212 |
+
renderer = OffscreenRenderer(
|
| 213 |
+
model=self.sim.mj_model, cfg=self.cfg.viewer, scene=self.scene
|
| 214 |
+
)
|
| 215 |
+
renderer.initialize()
|
| 216 |
+
self._offline_renderer = renderer
|
| 217 |
+
self.metadata["render_fps"] = 1.0 / self.step_dt
|
| 218 |
+
|
| 219 |
+
# Load all managers.
|
| 220 |
+
self.load_managers()
|
| 221 |
+
self.setup_manager_visualizers()
|
| 222 |
+
|
| 223 |
+
# Properties.
|
| 224 |
+
|
| 225 |
+
@property
|
| 226 |
+
def num_envs(self) -> int:
|
| 227 |
+
"""Number of parallel environments."""
|
| 228 |
+
return self.scene.num_envs
|
| 229 |
+
|
| 230 |
+
@property
|
| 231 |
+
def physics_dt(self) -> float:
|
| 232 |
+
"""Physics simulation step size."""
|
| 233 |
+
return self.cfg.sim.mujoco.timestep
|
| 234 |
+
|
| 235 |
+
@property
|
| 236 |
+
def step_dt(self) -> float:
|
| 237 |
+
"""Environment step size (physics_dt * decimation)."""
|
| 238 |
+
return self.cfg.sim.mujoco.timestep * self.cfg.decimation
|
| 239 |
+
|
| 240 |
+
@property
|
| 241 |
+
def device(self) -> str:
|
| 242 |
+
"""Device for computation."""
|
| 243 |
+
return self.sim.device
|
| 244 |
+
|
| 245 |
+
@property
|
| 246 |
+
def max_episode_length_s(self) -> float:
|
| 247 |
+
"""Maximum episode length in seconds."""
|
| 248 |
+
return self.cfg.episode_length_s
|
| 249 |
+
|
| 250 |
+
@property
|
| 251 |
+
def max_episode_length(self) -> int:
|
| 252 |
+
"""Maximum episode length in steps."""
|
| 253 |
+
return math.ceil(self.max_episode_length_s / self.step_dt)
|
| 254 |
+
|
| 255 |
+
@property
|
| 256 |
+
def unwrapped(self) -> "ManagerBasedRlEnv":
|
| 257 |
+
"""Get the unwrapped environment (base case for wrapper chains)."""
|
| 258 |
+
return self
|
| 259 |
+
|
| 260 |
+
# Methods.
|
| 261 |
+
|
| 262 |
+
def setup_manager_visualizers(self) -> None:
|
| 263 |
+
self.manager_visualizers = {}
|
| 264 |
+
if getattr(self.command_manager, "active_terms", None):
|
| 265 |
+
self.manager_visualizers["command_manager"] = self.command_manager
|
| 266 |
+
|
| 267 |
+
def load_managers(self) -> None:
|
| 268 |
+
"""Load and initialize all managers.
|
| 269 |
+
|
| 270 |
+
Order is important! Event and command managers must be loaded first,
|
| 271 |
+
then action and observation managers, then other RL managers.
|
| 272 |
+
"""
|
| 273 |
+
# Event manager (required before everything else for domain randomization).
|
| 274 |
+
self.event_manager = EventManager(self.cfg.events, self)
|
| 275 |
+
print_info(f"[INFO] {self.event_manager}")
|
| 276 |
+
|
| 277 |
+
self.sim.expand_model_fields(self.event_manager.domain_randomization_fields)
|
| 278 |
+
|
| 279 |
+
# Command manager (must be before observation manager since observations
|
| 280 |
+
# may reference commands).
|
| 281 |
+
if len(self.cfg.commands) > 0:
|
| 282 |
+
self.command_manager = CommandManager(self.cfg.commands, self)
|
| 283 |
+
else:
|
| 284 |
+
self.command_manager = NullCommandManager()
|
| 285 |
+
print_info(f"[INFO] {self.command_manager}")
|
| 286 |
+
|
| 287 |
+
# Action and observation managers.
|
| 288 |
+
self.action_manager = ActionManager(self.cfg.actions, self)
|
| 289 |
+
print_info(f"[INFO] {self.action_manager}")
|
| 290 |
+
self.observation_manager = ObservationManager(self.cfg.observations, self)
|
| 291 |
+
print_info(f"[INFO] {self.observation_manager}")
|
| 292 |
+
|
| 293 |
+
# Other RL-specific managers.
|
| 294 |
+
|
| 295 |
+
self.termination_manager = TerminationManager(self.cfg.terminations, self)
|
| 296 |
+
print_info(f"[INFO] {self.termination_manager}")
|
| 297 |
+
self.reward_manager = RewardManager(
|
| 298 |
+
self.cfg.rewards, self, scale_by_dt=self.cfg.scale_rewards_by_dt
|
| 299 |
+
)
|
| 300 |
+
print_info(f"[INFO] {self.reward_manager}")
|
| 301 |
+
if len(self.cfg.curriculum) > 0:
|
| 302 |
+
self.curriculum_manager = CurriculumManager(self.cfg.curriculum, self)
|
| 303 |
+
else:
|
| 304 |
+
self.curriculum_manager = NullCurriculumManager()
|
| 305 |
+
print_info(f"[INFO] {self.curriculum_manager}")
|
| 306 |
+
if len(self.cfg.metrics) > 0:
|
| 307 |
+
self.metrics_manager = MetricsManager(self.cfg.metrics, self)
|
| 308 |
+
else:
|
| 309 |
+
self.metrics_manager = NullMetricsManager()
|
| 310 |
+
print_info(f"[INFO] {self.metrics_manager}")
|
| 311 |
+
|
| 312 |
+
# Configure spaces for the environment.
|
| 313 |
+
self._configure_gym_env_spaces()
|
| 314 |
+
|
| 315 |
+
# Initialize startup events if defined.
|
| 316 |
+
if "startup" in self.event_manager.available_modes:
|
| 317 |
+
self.event_manager.apply(mode="startup")
|
| 318 |
+
|
| 319 |
+
def reset(
|
| 320 |
+
self,
|
| 321 |
+
*,
|
| 322 |
+
seed: int | None = None,
|
| 323 |
+
env_ids: torch.Tensor | None = None,
|
| 324 |
+
options: dict[str, Any] | None = None,
|
| 325 |
+
) -> tuple[types.VecEnvObs, dict]:
|
| 326 |
+
del options # Unused.
|
| 327 |
+
if env_ids is None:
|
| 328 |
+
env_ids = torch.arange(self.num_envs, dtype=torch.int64, device=self.device)
|
| 329 |
+
if seed is not None:
|
| 330 |
+
self.seed(seed)
|
| 331 |
+
self._reset_idx(env_ids)
|
| 332 |
+
self.scene.write_data_to_sim()
|
| 333 |
+
self.sim.forward()
|
| 334 |
+
self.sim.sense()
|
| 335 |
+
self.obs_buf = self.observation_manager.compute(update_history=True)
|
| 336 |
+
return self.obs_buf, self.extras
|
| 337 |
+
|
| 338 |
+
def step(self, action: torch.Tensor) -> types.VecEnvStepReturn:
|
| 339 |
+
"""Run one environment step: apply actions, simulate, compute RL signals.
|
| 340 |
+
|
| 341 |
+
**Forward-call placement.** MuJoCo's ``mj_step`` runs forward kinematics
|
| 342 |
+
*before* integration, so after stepping, derived quantities (``xpos``,
|
| 343 |
+
``xquat``, ``site_xpos``, ``cvel``, ``sensordata``) lag ``qpos``/``qvel``
|
| 344 |
+
by one physics substep. Rather than calling ``sim.forward()`` twice (once
|
| 345 |
+
after the decimation loop and once after the reset block), this method
|
| 346 |
+
calls it **once**, right before observation computation. This single call
|
| 347 |
+
refreshes derived quantities for *all* envs: non-reset envs pick up
|
| 348 |
+
post-decimation kinematics, reset envs pick up post-reset kinematics.
|
| 349 |
+
|
| 350 |
+
The tradeoff is that termination and reward managers see derived
|
| 351 |
+
quantities that are stale by one physics substep (the last ``mj_step``
|
| 352 |
+
ran ``mj_forward`` from *pre*-integration ``qpos``). In practice, the
|
| 353 |
+
staleness is negligible for reward shaping and termination
|
| 354 |
+
checks. Critically, the staleness is *consistent*: every env,
|
| 355 |
+
every step, always sees the same lag, so the MDP is well-defined
|
| 356 |
+
and the value function can learn the correct mapping.
|
| 357 |
+
|
| 358 |
+
.. note::
|
| 359 |
+
|
| 360 |
+
Event and command authors do not need to call ``sim.forward()``
|
| 361 |
+
themselves. This method handles it. The only constraint is: do not
|
| 362 |
+
read derived quantities (``root_link_pose_w``, ``body_link_vel_w``,
|
| 363 |
+
etc.) in the same function that writes state
|
| 364 |
+
(``write_root_state_to_sim``, ``write_joint_state_to_sim``, etc.).
|
| 365 |
+
See :ref:`faq` for details.
|
| 366 |
+
"""
|
| 367 |
+
self.action_manager.process_action(action.to(self.device))
|
| 368 |
+
|
| 369 |
+
for _ in range(self.cfg.decimation):
|
| 370 |
+
self._sim_step_counter += 1
|
| 371 |
+
self.action_manager.apply_action()
|
| 372 |
+
self.scene.write_data_to_sim()
|
| 373 |
+
self.sim.step()
|
| 374 |
+
self.scene.update(dt=self.physics_dt)
|
| 375 |
+
|
| 376 |
+
# Update env counters.
|
| 377 |
+
self.episode_length_buf += 1
|
| 378 |
+
self.common_step_counter += 1
|
| 379 |
+
|
| 380 |
+
# Check terminations and compute rewards.
|
| 381 |
+
# NOTE: Derived quantities (xpos, xquat, ...) are stale by one physics
|
| 382 |
+
# substep here. See the docstring above for why this is acceptable.
|
| 383 |
+
self.reset_buf = self.termination_manager.compute()
|
| 384 |
+
self.reset_terminated = self.termination_manager.terminated
|
| 385 |
+
self.reset_time_outs = self.termination_manager.time_outs
|
| 386 |
+
|
| 387 |
+
self.reward_buf = self.reward_manager.compute(dt=self.step_dt)
|
| 388 |
+
self.metrics_manager.compute()
|
| 389 |
+
|
| 390 |
+
# Reset envs that terminated/timed-out and log the episode info.
|
| 391 |
+
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
|
| 392 |
+
if len(reset_env_ids) > 0:
|
| 393 |
+
self._reset_idx(reset_env_ids)
|
| 394 |
+
self.scene.write_data_to_sim()
|
| 395 |
+
|
| 396 |
+
# Single forward() call: recompute derived quantities from current
|
| 397 |
+
# qpos/qvel for every env. For non-reset envs this resolves the
|
| 398 |
+
# one-substep staleness left by mj_step; for reset envs it picks up
|
| 399 |
+
# the freshly written reset state.
|
| 400 |
+
self.sim.forward()
|
| 401 |
+
|
| 402 |
+
self.command_manager.compute(dt=self.step_dt)
|
| 403 |
+
|
| 404 |
+
if "interval" in self.event_manager.available_modes:
|
| 405 |
+
self.event_manager.apply(mode="interval", dt=self.step_dt)
|
| 406 |
+
|
| 407 |
+
self.sim.sense()
|
| 408 |
+
self.obs_buf = self.observation_manager.compute(update_history=True)
|
| 409 |
+
|
| 410 |
+
return (
|
| 411 |
+
self.obs_buf,
|
| 412 |
+
self.reward_buf,
|
| 413 |
+
self.reset_terminated,
|
| 414 |
+
self.reset_time_outs,
|
| 415 |
+
self.extras,
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
def render(self) -> np.ndarray | None:
|
| 419 |
+
if self.render_mode == "human" or self.render_mode is None:
|
| 420 |
+
return None
|
| 421 |
+
elif self.render_mode == "rgb_array":
|
| 422 |
+
if self._offline_renderer is None:
|
| 423 |
+
raise ValueError("Offline renderer not initialized")
|
| 424 |
+
debug_callback = (
|
| 425 |
+
self.update_visualizers if hasattr(self, "update_visualizers") else None
|
| 426 |
+
)
|
| 427 |
+
self._offline_renderer.update(self.sim.data, debug_vis_callback=debug_callback)
|
| 428 |
+
return self._offline_renderer.render()
|
| 429 |
+
else:
|
| 430 |
+
raise NotImplementedError(
|
| 431 |
+
f"Render mode {self.render_mode} is not supported. "
|
| 432 |
+
f"Please use: {self.metadata['render_modes']}."
|
| 433 |
+
)
|
| 434 |
+
|
| 435 |
+
def close(self) -> None:
|
| 436 |
+
if self._offline_renderer is not None:
|
| 437 |
+
self._offline_renderer.close()
|
| 438 |
+
|
| 439 |
+
@staticmethod
|
| 440 |
+
def seed(seed: int = -1) -> int:
|
| 441 |
+
if seed == -1:
|
| 442 |
+
seed = np.random.randint(0, 10_000)
|
| 443 |
+
print_info(f"Setting seed: {seed}")
|
| 444 |
+
random_utils.seed_rng(seed)
|
| 445 |
+
return seed
|
| 446 |
+
|
| 447 |
+
def update_visualizers(self, visualizer: DebugVisualizer) -> None:
|
| 448 |
+
for mod in self.manager_visualizers.values():
|
| 449 |
+
mod.debug_vis(visualizer)
|
| 450 |
+
for sensor in self.scene.sensors.values():
|
| 451 |
+
sensor.debug_vis(visualizer)
|
| 452 |
+
|
| 453 |
+
# Private methods.
|
| 454 |
+
|
| 455 |
+
def _configure_gym_env_spaces(self) -> None:
|
| 456 |
+
from mjlab.utils.spaces import batch_space
|
| 457 |
+
|
| 458 |
+
self.single_observation_space = DictSpace()
|
| 459 |
+
for group_name, group_term_names in self.observation_manager.active_terms.items():
|
| 460 |
+
has_concatenated_obs = self.observation_manager.group_obs_concatenate[group_name]
|
| 461 |
+
group_dim = self.observation_manager.group_obs_dim[group_name]
|
| 462 |
+
if has_concatenated_obs:
|
| 463 |
+
assert isinstance(group_dim, tuple)
|
| 464 |
+
self.single_observation_space.spaces[group_name] = Box(
|
| 465 |
+
shape=group_dim, low=-math.inf, high=math.inf
|
| 466 |
+
)
|
| 467 |
+
else:
|
| 468 |
+
assert not isinstance(group_dim, tuple)
|
| 469 |
+
group_term_cfgs = self.observation_manager._group_obs_term_cfgs[group_name]
|
| 470 |
+
# Create a nested dict for this group.
|
| 471 |
+
group_space = DictSpace()
|
| 472 |
+
for term_name, term_dim, _term_cfg in zip(
|
| 473 |
+
group_term_names, group_dim, group_term_cfgs, strict=False
|
| 474 |
+
):
|
| 475 |
+
group_space.spaces[term_name] = Box(
|
| 476 |
+
shape=term_dim, low=-math.inf, high=math.inf
|
| 477 |
+
)
|
| 478 |
+
self.single_observation_space.spaces[group_name] = group_space
|
| 479 |
+
|
| 480 |
+
action_dim = sum(self.action_manager.action_term_dim)
|
| 481 |
+
self.single_action_space = Box(shape=(action_dim,), low=-math.inf, high=math.inf)
|
| 482 |
+
|
| 483 |
+
self.observation_space = batch_space(self.single_observation_space, self.num_envs)
|
| 484 |
+
self.action_space = batch_space(self.single_action_space, self.num_envs)
|
| 485 |
+
|
| 486 |
+
def _reset_idx(self, env_ids: torch.Tensor | None = None) -> None:
|
| 487 |
+
self.curriculum_manager.compute(env_ids=env_ids)
|
| 488 |
+
self.sim.reset(env_ids)
|
| 489 |
+
self.scene.reset(env_ids)
|
| 490 |
+
|
| 491 |
+
if "reset" in self.event_manager.available_modes:
|
| 492 |
+
env_step_count = self._sim_step_counter // self.cfg.decimation
|
| 493 |
+
self.event_manager.apply(
|
| 494 |
+
mode="reset", env_ids=env_ids, global_env_step_count=env_step_count
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
# NOTE: This is order sensitive.
|
| 498 |
+
self.extras["log"] = dict()
|
| 499 |
+
# observation manager.
|
| 500 |
+
info = self.observation_manager.reset(env_ids)
|
| 501 |
+
self.extras["log"].update(info)
|
| 502 |
+
# action manager.
|
| 503 |
+
info = self.action_manager.reset(env_ids)
|
| 504 |
+
self.extras["log"].update(info)
|
| 505 |
+
# rewards manager.
|
| 506 |
+
info = self.reward_manager.reset(env_ids)
|
| 507 |
+
self.extras["log"].update(info)
|
| 508 |
+
# metrics manager.
|
| 509 |
+
info = self.metrics_manager.reset(env_ids)
|
| 510 |
+
self.extras["log"].update(info)
|
| 511 |
+
# curriculum manager.
|
| 512 |
+
info = self.curriculum_manager.reset(env_ids)
|
| 513 |
+
self.extras["log"].update(info)
|
| 514 |
+
# command manager.
|
| 515 |
+
info = self.command_manager.reset(env_ids)
|
| 516 |
+
self.extras["log"].update(info)
|
| 517 |
+
# event manager.
|
| 518 |
+
info = self.event_manager.reset(env_ids)
|
| 519 |
+
self.extras["log"].update(info)
|
| 520 |
+
# termination manager.
|
| 521 |
+
info = self.termination_manager.reset(env_ids)
|
| 522 |
+
self.extras["log"].update(info)
|
| 523 |
+
# reset the episode length buffer.
|
| 524 |
+
self.episode_length_buf[env_ids] = 0
|
mjlab/src/mjlab/envs/types.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
VecEnvObs = Dict[str, torch.Tensor | Dict[str, torch.Tensor]]
|
| 6 |
+
VecEnvStepReturn = tuple[VecEnvObs, torch.Tensor, torch.Tensor, torch.Tensor, dict]
|
mjlab/src/mjlab/managers/__init__.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Environment managers."""
|
| 2 |
+
|
| 3 |
+
from mjlab.managers.action_manager import ActionManager as ActionManager
|
| 4 |
+
from mjlab.managers.action_manager import ActionTerm as ActionTerm
|
| 5 |
+
from mjlab.managers.action_manager import ActionTermCfg as ActionTermCfg
|
| 6 |
+
from mjlab.managers.command_manager import CommandManager as CommandManager
|
| 7 |
+
from mjlab.managers.command_manager import CommandTerm as CommandTerm
|
| 8 |
+
from mjlab.managers.command_manager import CommandTermCfg as CommandTermCfg
|
| 9 |
+
from mjlab.managers.command_manager import NullCommandManager as NullCommandManager
|
| 10 |
+
from mjlab.managers.curriculum_manager import CurriculumManager as CurriculumManager
|
| 11 |
+
from mjlab.managers.curriculum_manager import CurriculumTermCfg as CurriculumTermCfg
|
| 12 |
+
from mjlab.managers.curriculum_manager import (
|
| 13 |
+
NullCurriculumManager as NullCurriculumManager,
|
| 14 |
+
)
|
| 15 |
+
from mjlab.managers.event_manager import EventManager as EventManager
|
| 16 |
+
from mjlab.managers.event_manager import EventMode as EventMode
|
| 17 |
+
from mjlab.managers.event_manager import EventTermCfg as EventTermCfg
|
| 18 |
+
from mjlab.managers.manager_base import ManagerBase as ManagerBase
|
| 19 |
+
from mjlab.managers.manager_base import ManagerTermBase as ManagerTermBase
|
| 20 |
+
from mjlab.managers.manager_base import ManagerTermBaseCfg as ManagerTermBaseCfg
|
| 21 |
+
from mjlab.managers.metrics_manager import MetricsManager as MetricsManager
|
| 22 |
+
from mjlab.managers.metrics_manager import MetricsTermCfg as MetricsTermCfg
|
| 23 |
+
from mjlab.managers.metrics_manager import NullMetricsManager as NullMetricsManager
|
| 24 |
+
from mjlab.managers.observation_manager import (
|
| 25 |
+
ObservationGroupCfg as ObservationGroupCfg,
|
| 26 |
+
)
|
| 27 |
+
from mjlab.managers.observation_manager import ObservationManager as ObservationManager
|
| 28 |
+
from mjlab.managers.observation_manager import ObservationTermCfg as ObservationTermCfg
|
| 29 |
+
from mjlab.managers.reward_manager import RewardManager as RewardManager
|
| 30 |
+
from mjlab.managers.reward_manager import RewardTermCfg as RewardTermCfg
|
| 31 |
+
from mjlab.managers.scene_entity_config import SceneEntityCfg as SceneEntityCfg
|
| 32 |
+
from mjlab.managers.termination_manager import TerminationManager as TerminationManager
|
| 33 |
+
from mjlab.managers.termination_manager import TerminationTermCfg as TerminationTermCfg
|
mjlab/src/mjlab/managers/action_manager.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Action manager for processing actions sent to the environment."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import abc
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import TYPE_CHECKING, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from prettytable import PrettyTable
|
| 11 |
+
|
| 12 |
+
from mjlab.managers.manager_base import ManagerBase, ManagerTermBase
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from mjlab.envs import ManagerBasedRlEnv
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclass(kw_only=True)
|
| 19 |
+
class ActionTermCfg(abc.ABC):
|
| 20 |
+
"""Configuration for an action term.
|
| 21 |
+
|
| 22 |
+
Action terms process raw actions from the policy and apply them to entities
|
| 23 |
+
in the scene (e.g., setting joint positions, velocities, or efforts).
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
entity_name: str
|
| 27 |
+
"""Name of the entity in the scene that this action term controls."""
|
| 28 |
+
|
| 29 |
+
clip: dict[str, tuple] | None = None
|
| 30 |
+
"""Optional clipping bounds per transmission type. Maps transmission name
|
| 31 |
+
(e.g., 'position', 'velocity') to (min, max) tuple."""
|
| 32 |
+
|
| 33 |
+
@abc.abstractmethod
|
| 34 |
+
def build(self, env: ManagerBasedRlEnv) -> ActionTerm:
|
| 35 |
+
"""Build the action term from this config."""
|
| 36 |
+
raise NotImplementedError
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ActionTerm(ManagerTermBase):
|
| 40 |
+
"""Base class for action terms.
|
| 41 |
+
|
| 42 |
+
The action term is responsible for processing the raw actions sent to the environment
|
| 43 |
+
and applying them to the entity managed by the term.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self, cfg: ActionTermCfg, env: ManagerBasedRlEnv):
|
| 47 |
+
self.cfg = cfg
|
| 48 |
+
super().__init__(env)
|
| 49 |
+
self._entity = self._env.scene[self.cfg.entity_name]
|
| 50 |
+
|
| 51 |
+
@property
|
| 52 |
+
@abc.abstractmethod
|
| 53 |
+
def action_dim(self) -> int:
|
| 54 |
+
raise NotImplementedError
|
| 55 |
+
|
| 56 |
+
@abc.abstractmethod
|
| 57 |
+
def process_actions(self, actions: torch.Tensor) -> None:
|
| 58 |
+
raise NotImplementedError
|
| 59 |
+
|
| 60 |
+
@abc.abstractmethod
|
| 61 |
+
def apply_actions(self) -> None:
|
| 62 |
+
raise NotImplementedError
|
| 63 |
+
|
| 64 |
+
@property
|
| 65 |
+
@abc.abstractmethod
|
| 66 |
+
def raw_action(self) -> torch.Tensor:
|
| 67 |
+
raise NotImplementedError
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class ActionManager(ManagerBase):
|
| 71 |
+
"""Manages action processing for the environment.
|
| 72 |
+
|
| 73 |
+
The action manager aggregates multiple action terms, each controlling a different
|
| 74 |
+
entity or aspect of the simulation. It splits the policy's action tensor and
|
| 75 |
+
routes each slice to the appropriate action term.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self, cfg: dict[str, ActionTermCfg], env: ManagerBasedRlEnv):
|
| 79 |
+
self.cfg = cfg
|
| 80 |
+
super().__init__(env=env)
|
| 81 |
+
|
| 82 |
+
# Create buffers to store actions.
|
| 83 |
+
self._action = torch.zeros(
|
| 84 |
+
(self.num_envs, self.total_action_dim), device=self.device
|
| 85 |
+
)
|
| 86 |
+
self._prev_action = torch.zeros_like(self._action)
|
| 87 |
+
self._prev_prev_action = torch.zeros_like(self._action)
|
| 88 |
+
|
| 89 |
+
def __str__(self) -> str:
|
| 90 |
+
msg = f"<ActionManager> contains {len(self._term_names)} active terms.\n"
|
| 91 |
+
table = PrettyTable()
|
| 92 |
+
table.title = f"Active Action Terms (shape: {self.total_action_dim})"
|
| 93 |
+
table.field_names = ["Index", "Name", "Dimension"]
|
| 94 |
+
table.align["Name"] = "l"
|
| 95 |
+
table.align["Dimension"] = "r"
|
| 96 |
+
for index, (name, term) in enumerate(self._terms.items()):
|
| 97 |
+
table.add_row([index, name, term.action_dim])
|
| 98 |
+
msg += table.get_string()
|
| 99 |
+
msg += "\n"
|
| 100 |
+
return msg
|
| 101 |
+
|
| 102 |
+
# Properties.
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def total_action_dim(self) -> int:
|
| 106 |
+
return sum(self.action_term_dim)
|
| 107 |
+
|
| 108 |
+
@property
|
| 109 |
+
def action_term_dim(self) -> list[int]:
|
| 110 |
+
return [term.action_dim for term in self._terms.values()]
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def action(self) -> torch.Tensor:
|
| 114 |
+
return self._action
|
| 115 |
+
|
| 116 |
+
@property
|
| 117 |
+
def prev_action(self) -> torch.Tensor:
|
| 118 |
+
return self._prev_action
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
def prev_prev_action(self) -> torch.Tensor:
|
| 122 |
+
return self._prev_prev_action
|
| 123 |
+
|
| 124 |
+
@property
|
| 125 |
+
def active_terms(self) -> list[str]:
|
| 126 |
+
return self._term_names
|
| 127 |
+
|
| 128 |
+
# Methods.
|
| 129 |
+
|
| 130 |
+
def get_term(self, name: str) -> ActionTerm:
|
| 131 |
+
return self._terms[name]
|
| 132 |
+
|
| 133 |
+
def reset(self, env_ids: torch.Tensor | slice | None = None) -> dict[str, float]:
|
| 134 |
+
if env_ids is None:
|
| 135 |
+
env_ids = slice(None)
|
| 136 |
+
# Reset action history.
|
| 137 |
+
self._prev_action[env_ids] = 0.0
|
| 138 |
+
self._prev_prev_action[env_ids] = 0.0
|
| 139 |
+
self._action[env_ids] = 0.0
|
| 140 |
+
# Reset action terms.
|
| 141 |
+
for term in self._terms.values():
|
| 142 |
+
term.reset(env_ids=env_ids)
|
| 143 |
+
return {}
|
| 144 |
+
|
| 145 |
+
def process_action(self, action: torch.Tensor) -> None:
|
| 146 |
+
if self.total_action_dim != action.shape[1]:
|
| 147 |
+
raise ValueError(
|
| 148 |
+
f"Invalid action shape, expected: {self.total_action_dim}, received: {action.shape[1]}."
|
| 149 |
+
)
|
| 150 |
+
self._prev_prev_action[:] = self._prev_action
|
| 151 |
+
self._prev_action[:] = self._action
|
| 152 |
+
self._action[:] = action.to(self.device)
|
| 153 |
+
# Split and apply.
|
| 154 |
+
idx = 0
|
| 155 |
+
for term in self._terms.values():
|
| 156 |
+
term_actions = action[:, idx : idx + term.action_dim]
|
| 157 |
+
term.process_actions(term_actions)
|
| 158 |
+
idx += term.action_dim
|
| 159 |
+
|
| 160 |
+
def apply_action(self) -> None:
|
| 161 |
+
for term in self._terms.values():
|
| 162 |
+
term.apply_actions()
|
| 163 |
+
|
| 164 |
+
def get_active_iterable_terms(
|
| 165 |
+
self, env_idx: int
|
| 166 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 167 |
+
terms = []
|
| 168 |
+
idx = 0
|
| 169 |
+
for name, term in self._terms.items():
|
| 170 |
+
term_actions = self._action[env_idx, idx : idx + term.action_dim].cpu()
|
| 171 |
+
terms.append((name, term_actions.tolist()))
|
| 172 |
+
idx += term.action_dim
|
| 173 |
+
return terms
|
| 174 |
+
|
| 175 |
+
def _prepare_terms(self):
|
| 176 |
+
self._term_names: list[str] = list()
|
| 177 |
+
self._terms: dict[str, ActionTerm] = dict()
|
| 178 |
+
|
| 179 |
+
for term_name, term_cfg in self.cfg.items():
|
| 180 |
+
term_cfg: ActionTermCfg | None
|
| 181 |
+
if term_cfg is None:
|
| 182 |
+
print(f"term: {term_name} set to None, skipping...")
|
| 183 |
+
continue
|
| 184 |
+
term = term_cfg.build(self._env)
|
| 185 |
+
self._term_names.append(term_name)
|
| 186 |
+
self._terms[term_name] = term
|
mjlab/src/mjlab/managers/command_manager.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Command manager for generating and updating commands."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import abc
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import TYPE_CHECKING, Any, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from prettytable import PrettyTable
|
| 11 |
+
|
| 12 |
+
from mjlab.managers.manager_base import ManagerBase, ManagerTermBase
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from mjlab.envs.manager_based_rl_env import ManagerBasedRlEnv
|
| 16 |
+
from mjlab.viewer.debug_visualizer import DebugVisualizer
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclass(kw_only=True)
|
| 20 |
+
class CommandTermCfg(abc.ABC):
|
| 21 |
+
"""Configuration for a command generator term.
|
| 22 |
+
|
| 23 |
+
Command terms generate goal commands for the agent (e.g., target velocity,
|
| 24 |
+
target position). Commands are automatically resampled at configurable
|
| 25 |
+
intervals and can track metrics for logging.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
resampling_time_range: tuple[float, float]
|
| 29 |
+
"""Time range in seconds for command resampling. When the timer expires, a new
|
| 30 |
+
command is sampled and the timer is reset to a value uniformly drawn from
|
| 31 |
+
``[min, max]``. Set both values equal for fixed-interval resampling."""
|
| 32 |
+
|
| 33 |
+
debug_vis: bool = False
|
| 34 |
+
"""Whether to enable debug visualization for this command term. When True,
|
| 35 |
+
the command term's ``_debug_vis_impl`` method is called each frame to render
|
| 36 |
+
visual aids (e.g., velocity arrows, target markers)."""
|
| 37 |
+
|
| 38 |
+
@abc.abstractmethod
|
| 39 |
+
def build(self, env: ManagerBasedRlEnv) -> CommandTerm:
|
| 40 |
+
"""Build the command term from this config."""
|
| 41 |
+
raise NotImplementedError
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class CommandTerm(ManagerTermBase):
|
| 45 |
+
"""Base class for command terms."""
|
| 46 |
+
|
| 47 |
+
def __init__(self, cfg: CommandTermCfg, env: ManagerBasedRlEnv):
|
| 48 |
+
self.cfg = cfg
|
| 49 |
+
super().__init__(env)
|
| 50 |
+
self.metrics = dict()
|
| 51 |
+
self.time_left = torch.zeros(self.num_envs, device=self.device)
|
| 52 |
+
self.command_counter = torch.zeros(
|
| 53 |
+
self.num_envs, device=self.device, dtype=torch.long
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
def debug_vis(self, visualizer: "DebugVisualizer") -> None:
|
| 57 |
+
if self.cfg.debug_vis:
|
| 58 |
+
self._debug_vis_impl(visualizer)
|
| 59 |
+
|
| 60 |
+
def _debug_vis_impl(self, visualizer: "DebugVisualizer") -> None:
|
| 61 |
+
pass
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
@abc.abstractmethod
|
| 65 |
+
def command(self):
|
| 66 |
+
raise NotImplementedError
|
| 67 |
+
|
| 68 |
+
def reset(self, env_ids: torch.Tensor | slice | None) -> dict[str, float]:
|
| 69 |
+
assert isinstance(env_ids, torch.Tensor)
|
| 70 |
+
extras = {}
|
| 71 |
+
for metric_name, metric_value in self.metrics.items():
|
| 72 |
+
extras[metric_name] = torch.mean(metric_value[env_ids]).item()
|
| 73 |
+
metric_value[env_ids] = 0.0
|
| 74 |
+
self.command_counter[env_ids] = 0
|
| 75 |
+
self._resample(env_ids)
|
| 76 |
+
return extras
|
| 77 |
+
|
| 78 |
+
def compute(self, dt: float) -> None:
|
| 79 |
+
self._update_metrics()
|
| 80 |
+
self.time_left -= dt
|
| 81 |
+
resample_env_ids = (self.time_left <= 0.0).nonzero().flatten()
|
| 82 |
+
if len(resample_env_ids) > 0:
|
| 83 |
+
self._resample(resample_env_ids)
|
| 84 |
+
self._update_command()
|
| 85 |
+
|
| 86 |
+
def _resample(self, env_ids: torch.Tensor) -> None:
|
| 87 |
+
if len(env_ids) != 0:
|
| 88 |
+
self.time_left[env_ids] = self.time_left[env_ids].uniform_(
|
| 89 |
+
*self.cfg.resampling_time_range
|
| 90 |
+
)
|
| 91 |
+
self._resample_command(env_ids)
|
| 92 |
+
self.command_counter[env_ids] += 1
|
| 93 |
+
|
| 94 |
+
@abc.abstractmethod
|
| 95 |
+
def _update_metrics(self) -> None:
|
| 96 |
+
"""Update the metrics based on the current state."""
|
| 97 |
+
raise NotImplementedError
|
| 98 |
+
|
| 99 |
+
@abc.abstractmethod
|
| 100 |
+
def _resample_command(self, env_ids: torch.Tensor) -> None:
|
| 101 |
+
"""Resample the command for the specified environments."""
|
| 102 |
+
raise NotImplementedError
|
| 103 |
+
|
| 104 |
+
@abc.abstractmethod
|
| 105 |
+
def _update_command(self) -> None:
|
| 106 |
+
"""Update the command based on the current state."""
|
| 107 |
+
raise NotImplementedError
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class CommandManager(ManagerBase):
|
| 111 |
+
"""Manages command generation for the environment.
|
| 112 |
+
|
| 113 |
+
The command manager generates and updates goal commands for the agent (e.g.,
|
| 114 |
+
target velocity, target position). Commands are resampled at configurable
|
| 115 |
+
intervals and can track metrics for logging.
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
_env: ManagerBasedRlEnv
|
| 119 |
+
|
| 120 |
+
def __init__(self, cfg: dict[str, CommandTermCfg], env: ManagerBasedRlEnv):
|
| 121 |
+
self._terms: dict[str, CommandTerm] = dict()
|
| 122 |
+
|
| 123 |
+
self.cfg = cfg
|
| 124 |
+
super().__init__(env)
|
| 125 |
+
self._commands = dict()
|
| 126 |
+
|
| 127 |
+
def __str__(self) -> str:
|
| 128 |
+
msg = f"<CommandManager> contains {len(self._terms.values())} active terms.\n"
|
| 129 |
+
table = PrettyTable()
|
| 130 |
+
table.title = "Active Command Terms"
|
| 131 |
+
table.field_names = ["Index", "Name", "Type"]
|
| 132 |
+
table.align["Name"] = "l"
|
| 133 |
+
for index, (name, term) in enumerate(self._terms.items()):
|
| 134 |
+
table.add_row([index, name, term.__class__.__name__])
|
| 135 |
+
msg += table.get_string()
|
| 136 |
+
msg += "\n"
|
| 137 |
+
return msg
|
| 138 |
+
|
| 139 |
+
def debug_vis(self, visualizer: "DebugVisualizer") -> None:
|
| 140 |
+
for term in self._terms.values():
|
| 141 |
+
term.debug_vis(visualizer)
|
| 142 |
+
|
| 143 |
+
# Properties.
|
| 144 |
+
|
| 145 |
+
@property
|
| 146 |
+
def active_terms(self) -> list[str]:
|
| 147 |
+
return list(self._terms.keys())
|
| 148 |
+
|
| 149 |
+
def get_active_iterable_terms(
|
| 150 |
+
self, env_idx: int
|
| 151 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 152 |
+
terms = []
|
| 153 |
+
idx = 0
|
| 154 |
+
for name, term in self._terms.items():
|
| 155 |
+
terms.append((name, term.command[env_idx].cpu().tolist()))
|
| 156 |
+
idx += term.command.shape[1]
|
| 157 |
+
return terms
|
| 158 |
+
|
| 159 |
+
def reset(self, env_ids: torch.Tensor | None) -> dict[str, torch.Tensor]:
|
| 160 |
+
extras = {}
|
| 161 |
+
for name, term in self._terms.items():
|
| 162 |
+
metrics = term.reset(env_ids=env_ids)
|
| 163 |
+
for metric_name, metric_value in metrics.items():
|
| 164 |
+
extras[f"Metrics/{name}/{metric_name}"] = metric_value
|
| 165 |
+
return extras
|
| 166 |
+
|
| 167 |
+
def compute(self, dt: float):
|
| 168 |
+
for term in self._terms.values():
|
| 169 |
+
term.compute(dt)
|
| 170 |
+
|
| 171 |
+
def get_command(self, name: str) -> torch.Tensor:
|
| 172 |
+
return self._terms[name].command
|
| 173 |
+
|
| 174 |
+
def get_term(self, name: str) -> CommandTerm:
|
| 175 |
+
return self._terms[name]
|
| 176 |
+
|
| 177 |
+
def get_term_cfg(self, name: str) -> CommandTermCfg:
|
| 178 |
+
return self.cfg[name]
|
| 179 |
+
|
| 180 |
+
def _prepare_terms(self):
|
| 181 |
+
for term_name, term_cfg in self.cfg.items():
|
| 182 |
+
term_cfg: CommandTermCfg | None
|
| 183 |
+
if term_cfg is None:
|
| 184 |
+
print(f"term: {term_name} set to None, skipping...")
|
| 185 |
+
continue
|
| 186 |
+
term = term_cfg.build(self._env)
|
| 187 |
+
if not isinstance(term, CommandTerm):
|
| 188 |
+
raise TypeError(
|
| 189 |
+
f"Returned object for the term {term_name} is not of type CommandType."
|
| 190 |
+
)
|
| 191 |
+
self._terms[term_name] = term
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class NullCommandManager:
|
| 195 |
+
"""Placeholder for absent command manager that safely no-ops all operations."""
|
| 196 |
+
|
| 197 |
+
def __init__(self):
|
| 198 |
+
self.active_terms: list[str] = []
|
| 199 |
+
self._terms: dict[str, Any] = {}
|
| 200 |
+
self.cfg = None
|
| 201 |
+
|
| 202 |
+
def __str__(self) -> str:
|
| 203 |
+
return "<NullCommandManager> (inactive)"
|
| 204 |
+
|
| 205 |
+
def __repr__(self) -> str:
|
| 206 |
+
return "NullCommandManager()"
|
| 207 |
+
|
| 208 |
+
def debug_vis(self, visualizer: "DebugVisualizer") -> None:
|
| 209 |
+
pass
|
| 210 |
+
|
| 211 |
+
def get_active_iterable_terms(
|
| 212 |
+
self, env_idx: int
|
| 213 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 214 |
+
return []
|
| 215 |
+
|
| 216 |
+
def reset(self, env_ids: torch.Tensor | None = None) -> dict[str, torch.Tensor]:
|
| 217 |
+
return {}
|
| 218 |
+
|
| 219 |
+
def compute(self, dt: float) -> None:
|
| 220 |
+
pass
|
| 221 |
+
|
| 222 |
+
def get_command(self, name: str) -> None:
|
| 223 |
+
return None
|
| 224 |
+
|
| 225 |
+
def get_term(self, name: str) -> None:
|
| 226 |
+
return None
|
| 227 |
+
|
| 228 |
+
def get_term_cfg(self, name: str) -> None:
|
| 229 |
+
return None
|
mjlab/src/mjlab/managers/curriculum_manager.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Curriculum manager for updating environment quantities subject to a training curriculum."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from copy import deepcopy
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import TYPE_CHECKING, Any, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from prettytable import PrettyTable
|
| 11 |
+
|
| 12 |
+
from mjlab.managers.manager_base import ManagerBase, ManagerTermBaseCfg
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from mjlab.envs.manager_based_rl_env import ManagerBasedRlEnv
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclass(kw_only=True)
|
| 19 |
+
class CurriculumTermCfg(ManagerTermBaseCfg):
|
| 20 |
+
"""Configuration for a curriculum term.
|
| 21 |
+
|
| 22 |
+
Curriculum terms modify environment parameters during training to implement
|
| 23 |
+
curriculum learning strategies (e.g., gradually increasing task difficulty).
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class CurriculumManager(ManagerBase):
|
| 30 |
+
"""Manages curriculum learning for the environment.
|
| 31 |
+
|
| 32 |
+
The curriculum manager updates environment parameters during training based
|
| 33 |
+
on agent performance. Each term can modify different aspects of the task
|
| 34 |
+
difficulty (e.g., terrain complexity, command ranges).
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
_env: ManagerBasedRlEnv
|
| 38 |
+
|
| 39 |
+
def __init__(self, cfg: dict[str, CurriculumTermCfg], env: ManagerBasedRlEnv):
|
| 40 |
+
self._term_names: list[str] = list()
|
| 41 |
+
self._term_cfgs: list[CurriculumTermCfg] = list()
|
| 42 |
+
self._class_term_cfgs: list[CurriculumTermCfg] = list()
|
| 43 |
+
|
| 44 |
+
self.cfg = deepcopy(cfg)
|
| 45 |
+
super().__init__(env)
|
| 46 |
+
|
| 47 |
+
self._curriculum_state = dict()
|
| 48 |
+
for term_name in self._term_names:
|
| 49 |
+
self._curriculum_state[term_name] = None
|
| 50 |
+
|
| 51 |
+
def __str__(self) -> str:
|
| 52 |
+
msg = f"<CurriculumManager> contains {len(self._term_names)} active terms.\n"
|
| 53 |
+
table = PrettyTable()
|
| 54 |
+
table.title = "Active Curriculum Terms"
|
| 55 |
+
table.field_names = ["Index", "Name"]
|
| 56 |
+
table.align["Name"] = "l"
|
| 57 |
+
for index, name in enumerate(self._term_names):
|
| 58 |
+
table.add_row([index, name])
|
| 59 |
+
msg += table.get_string()
|
| 60 |
+
msg += "\n"
|
| 61 |
+
return msg
|
| 62 |
+
|
| 63 |
+
# Properties.
|
| 64 |
+
|
| 65 |
+
@property
|
| 66 |
+
def active_terms(self) -> list[str]:
|
| 67 |
+
return self._term_names
|
| 68 |
+
|
| 69 |
+
# Methods.
|
| 70 |
+
|
| 71 |
+
def get_term_cfg(self, term_name: str) -> CurriculumTermCfg:
|
| 72 |
+
if term_name not in self._term_names:
|
| 73 |
+
raise ValueError(f"Term '{term_name}' not found in active terms.")
|
| 74 |
+
return self._term_cfgs[self._term_names.index(term_name)]
|
| 75 |
+
|
| 76 |
+
def get_active_iterable_terms(
|
| 77 |
+
self, env_idx: int
|
| 78 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 79 |
+
terms = []
|
| 80 |
+
for term_name, term_state in self._curriculum_state.items():
|
| 81 |
+
if term_state is not None:
|
| 82 |
+
data = []
|
| 83 |
+
if isinstance(term_state, dict):
|
| 84 |
+
for _key, value in term_state.items():
|
| 85 |
+
if isinstance(value, torch.Tensor):
|
| 86 |
+
value = value.item()
|
| 87 |
+
terms[term_name].append(value)
|
| 88 |
+
else:
|
| 89 |
+
if isinstance(term_state, torch.Tensor):
|
| 90 |
+
term_state = term_state.item()
|
| 91 |
+
data.append(term_state)
|
| 92 |
+
terms.append((term_name, data))
|
| 93 |
+
return terms
|
| 94 |
+
|
| 95 |
+
def reset(self, env_ids: torch.Tensor | slice | None = None) -> dict[str, float]:
|
| 96 |
+
extras = {}
|
| 97 |
+
for term_name, term_state in self._curriculum_state.items():
|
| 98 |
+
if term_state is not None:
|
| 99 |
+
if isinstance(term_state, dict):
|
| 100 |
+
for key, value in term_state.items():
|
| 101 |
+
if isinstance(value, torch.Tensor):
|
| 102 |
+
value = value.item()
|
| 103 |
+
extras[f"Curriculum/{term_name}/{key}"] = value
|
| 104 |
+
else:
|
| 105 |
+
if isinstance(term_state, torch.Tensor):
|
| 106 |
+
term_state = term_state.item()
|
| 107 |
+
extras[f"Curriculum/{term_name}"] = term_state
|
| 108 |
+
for term_cfg in self._class_term_cfgs:
|
| 109 |
+
term_cfg.func.reset(env_ids=env_ids)
|
| 110 |
+
return extras
|
| 111 |
+
|
| 112 |
+
def compute(self, env_ids: torch.Tensor | slice | None = None):
|
| 113 |
+
if env_ids is None:
|
| 114 |
+
env_ids = slice(None)
|
| 115 |
+
for name, term_cfg in zip(self._term_names, self._term_cfgs, strict=False):
|
| 116 |
+
state = term_cfg.func(self._env, env_ids, **term_cfg.params)
|
| 117 |
+
self._curriculum_state[name] = state
|
| 118 |
+
|
| 119 |
+
def _prepare_terms(self):
|
| 120 |
+
for term_name, term_cfg in self.cfg.items():
|
| 121 |
+
term_cfg: CurriculumTermCfg | None
|
| 122 |
+
if term_cfg is None:
|
| 123 |
+
print(f"term: {term_name} set to None, skipping...")
|
| 124 |
+
continue
|
| 125 |
+
self._resolve_common_term_cfg(term_name, term_cfg)
|
| 126 |
+
self._term_names.append(term_name)
|
| 127 |
+
self._term_cfgs.append(term_cfg)
|
| 128 |
+
if hasattr(term_cfg.func, "reset") and callable(term_cfg.func.reset):
|
| 129 |
+
self._class_term_cfgs.append(term_cfg)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class NullCurriculumManager:
|
| 133 |
+
"""Placeholder for absent curriculum manager that safely no-ops all operations."""
|
| 134 |
+
|
| 135 |
+
def __init__(self):
|
| 136 |
+
self.active_terms: list[str] = []
|
| 137 |
+
self._curriculum_state: dict[str, Any] = {}
|
| 138 |
+
self.cfg = None
|
| 139 |
+
|
| 140 |
+
def __str__(self) -> str:
|
| 141 |
+
return "<NullCurriculumManager> (inactive)"
|
| 142 |
+
|
| 143 |
+
def __repr__(self) -> str:
|
| 144 |
+
return "NullCurriculumManager()"
|
| 145 |
+
|
| 146 |
+
def get_active_iterable_terms(
|
| 147 |
+
self, env_idx: int
|
| 148 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 149 |
+
return []
|
| 150 |
+
|
| 151 |
+
def reset(self, env_ids: torch.Tensor | None = None) -> dict[str, float]:
|
| 152 |
+
return {}
|
| 153 |
+
|
| 154 |
+
def compute(self, env_ids: torch.Tensor | None = None) -> None:
|
| 155 |
+
pass
|
mjlab/src/mjlab/managers/manager_base.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import abc
|
| 4 |
+
import inspect
|
| 5 |
+
from collections.abc import Sequence
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from typing import TYPE_CHECKING, Any
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from mjlab.managers.scene_entity_config import SceneEntityCfg
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from mjlab.envs import ManagerBasedRlEnv
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class ManagerTermBaseCfg:
|
| 19 |
+
"""Base configuration for manager terms.
|
| 20 |
+
|
| 21 |
+
This is the base config for terms in observation, reward, termination, curriculum,
|
| 22 |
+
and event managers. It provides a common interface for specifying a callable
|
| 23 |
+
and its parameters.
|
| 24 |
+
|
| 25 |
+
The ``func`` field accepts either a function or a class:
|
| 26 |
+
|
| 27 |
+
**Function-based terms** are simpler and suitable for stateless computations:
|
| 28 |
+
|
| 29 |
+
.. code-block:: python
|
| 30 |
+
|
| 31 |
+
RewardTermCfg(func=mdp.joint_torques_l2, weight=-0.01)
|
| 32 |
+
|
| 33 |
+
**Class-based terms** are instantiated with ``(cfg, env)`` and useful when you need
|
| 34 |
+
to:
|
| 35 |
+
|
| 36 |
+
- Cache computed values at initialization (e.g., resolve regex patterns to indices)
|
| 37 |
+
- Maintain state across calls
|
| 38 |
+
- Perform expensive setup once rather than every call
|
| 39 |
+
|
| 40 |
+
.. code-block:: python
|
| 41 |
+
|
| 42 |
+
class posture:
|
| 43 |
+
def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRlEnv):
|
| 44 |
+
# Resolve std dict to tensor once at init
|
| 45 |
+
self.std = resolve_std_to_tensor(cfg.params["std"], env)
|
| 46 |
+
|
| 47 |
+
def __call__(self, env, **kwargs) -> torch.Tensor:
|
| 48 |
+
# Use cached self.std
|
| 49 |
+
return compute_posture_reward(env, self.std)
|
| 50 |
+
|
| 51 |
+
RewardTermCfg(func=posture, params={"std": {".*knee.*": 0.3}}, weight=1.0)
|
| 52 |
+
|
| 53 |
+
Class-based terms can optionally implement ``reset(env_ids)`` for per-episode state.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
func: Any
|
| 57 |
+
"""The callable that computes this term's value. Can be a function or a class.
|
| 58 |
+
Classes are auto-instantiated with ``(cfg=term_cfg, env=env)``."""
|
| 59 |
+
|
| 60 |
+
params: dict[str, Any] = field(default_factory=lambda: {})
|
| 61 |
+
"""Additional keyword arguments passed to func when called."""
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class ManagerTermBase:
|
| 65 |
+
def __init__(self, env: ManagerBasedRlEnv):
|
| 66 |
+
self._env = env
|
| 67 |
+
|
| 68 |
+
# Properties.
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def num_envs(self) -> int:
|
| 72 |
+
return self._env.num_envs
|
| 73 |
+
|
| 74 |
+
@property
|
| 75 |
+
def device(self) -> str:
|
| 76 |
+
return self._env.device
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
def name(self) -> str:
|
| 80 |
+
return self.__class__.__name__
|
| 81 |
+
|
| 82 |
+
# Methods.
|
| 83 |
+
|
| 84 |
+
def reset(self, env_ids: torch.Tensor | slice | None) -> Any:
|
| 85 |
+
"""Resets the manager term."""
|
| 86 |
+
del env_ids # Unused.
|
| 87 |
+
pass
|
| 88 |
+
|
| 89 |
+
def __call__(self, *args, **kwargs) -> Any:
|
| 90 |
+
"""Returns the value of the term required by the manager."""
|
| 91 |
+
raise NotImplementedError
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class ManagerBase(abc.ABC):
|
| 95 |
+
"""Base class for all managers."""
|
| 96 |
+
|
| 97 |
+
def __init__(self, env: ManagerBasedRlEnv):
|
| 98 |
+
self._env = env
|
| 99 |
+
|
| 100 |
+
self._prepare_terms()
|
| 101 |
+
|
| 102 |
+
# Properties.
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def num_envs(self) -> int:
|
| 106 |
+
return self._env.num_envs
|
| 107 |
+
|
| 108 |
+
@property
|
| 109 |
+
def device(self) -> str:
|
| 110 |
+
return self._env.device
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
@abc.abstractmethod
|
| 114 |
+
def active_terms(self) -> list[str] | dict[Any, list[str]]:
|
| 115 |
+
raise NotImplementedError
|
| 116 |
+
|
| 117 |
+
# Methods.
|
| 118 |
+
|
| 119 |
+
def reset(self, env_ids: torch.Tensor) -> dict[str, Any]:
|
| 120 |
+
"""Resets the manager and returns logging info for the current step."""
|
| 121 |
+
del env_ids # Unused.
|
| 122 |
+
return {}
|
| 123 |
+
|
| 124 |
+
def get_active_iterable_terms(
|
| 125 |
+
self, env_idx: int
|
| 126 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 127 |
+
raise NotImplementedError
|
| 128 |
+
|
| 129 |
+
@abc.abstractmethod
|
| 130 |
+
def _prepare_terms(self):
|
| 131 |
+
raise NotImplementedError
|
| 132 |
+
|
| 133 |
+
def _resolve_common_term_cfg(self, term_name: str, term_cfg: ManagerTermBaseCfg):
|
| 134 |
+
del term_name # Unused.
|
| 135 |
+
for value in term_cfg.params.values():
|
| 136 |
+
if isinstance(value, SceneEntityCfg):
|
| 137 |
+
value.resolve(self._env.scene)
|
| 138 |
+
if inspect.isclass(term_cfg.func):
|
| 139 |
+
term_cfg.func = term_cfg.func(cfg=term_cfg, env=self._env)
|
mjlab/src/mjlab/managers/metrics_manager.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Metrics manager for logging custom per-step metrics during training."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from copy import deepcopy
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import TYPE_CHECKING, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from prettytable import PrettyTable
|
| 11 |
+
|
| 12 |
+
from mjlab.managers.manager_base import ManagerBase, ManagerTermBaseCfg
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from mjlab.envs.manager_based_rl_env import ManagerBasedRlEnv
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclass(kw_only=True)
|
| 19 |
+
class MetricsTermCfg(ManagerTermBaseCfg):
|
| 20 |
+
"""Configuration for a metrics term."""
|
| 21 |
+
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class MetricsManager(ManagerBase):
|
| 26 |
+
"""Accumulates per-step metric values, reports episode averages.
|
| 27 |
+
|
| 28 |
+
Unlike rewards, metrics have no weight, no dt scaling, and no
|
| 29 |
+
normalization by episode length. Episode values are true per-step
|
| 30 |
+
averages (sum / step_count), so a metric in [0,1] stays in [0,1]
|
| 31 |
+
in the logger.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
_env: ManagerBasedRlEnv
|
| 35 |
+
|
| 36 |
+
def __init__(self, cfg: dict[str, MetricsTermCfg], env: ManagerBasedRlEnv):
|
| 37 |
+
self._term_names: list[str] = list()
|
| 38 |
+
self._term_cfgs: list[MetricsTermCfg] = list()
|
| 39 |
+
self._class_term_cfgs: list[MetricsTermCfg] = list()
|
| 40 |
+
|
| 41 |
+
self.cfg = deepcopy(cfg)
|
| 42 |
+
super().__init__(env=env)
|
| 43 |
+
|
| 44 |
+
self._episode_sums: dict[str, torch.Tensor] = {}
|
| 45 |
+
for term_name in self._term_names:
|
| 46 |
+
self._episode_sums[term_name] = torch.zeros(
|
| 47 |
+
self.num_envs, dtype=torch.float, device=self.device
|
| 48 |
+
)
|
| 49 |
+
self._step_count = torch.zeros(self.num_envs, dtype=torch.long, device=self.device)
|
| 50 |
+
self._step_values = torch.zeros(
|
| 51 |
+
(self.num_envs, len(self._term_names)), dtype=torch.float, device=self.device
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def __str__(self) -> str:
|
| 55 |
+
msg = f"<MetricsManager> contains {len(self._term_names)} active terms.\n"
|
| 56 |
+
table = PrettyTable()
|
| 57 |
+
table.title = "Active Metrics Terms"
|
| 58 |
+
table.field_names = ["Index", "Name"]
|
| 59 |
+
table.align["Name"] = "l"
|
| 60 |
+
for index, name in enumerate(self._term_names):
|
| 61 |
+
table.add_row([index, name])
|
| 62 |
+
msg += table.get_string()
|
| 63 |
+
msg += "\n"
|
| 64 |
+
return msg
|
| 65 |
+
|
| 66 |
+
# Properties.
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def active_terms(self) -> list[str]:
|
| 70 |
+
return self._term_names
|
| 71 |
+
|
| 72 |
+
# Methods.
|
| 73 |
+
|
| 74 |
+
def reset(
|
| 75 |
+
self, env_ids: torch.Tensor | slice | None = None
|
| 76 |
+
) -> dict[str, torch.Tensor]:
|
| 77 |
+
if env_ids is None:
|
| 78 |
+
env_ids = slice(None)
|
| 79 |
+
extras = {}
|
| 80 |
+
counts = self._step_count[env_ids].float()
|
| 81 |
+
# Avoid division by zero for envs that haven't stepped.
|
| 82 |
+
safe_counts = torch.clamp(counts, min=1.0)
|
| 83 |
+
for key in self._episode_sums:
|
| 84 |
+
episode_avg = torch.mean(self._episode_sums[key][env_ids] / safe_counts)
|
| 85 |
+
extras["Episode_Metrics/" + key] = episode_avg
|
| 86 |
+
self._episode_sums[key][env_ids] = 0.0
|
| 87 |
+
self._step_count[env_ids] = 0
|
| 88 |
+
for term_cfg in self._class_term_cfgs:
|
| 89 |
+
term_cfg.func.reset(env_ids=env_ids)
|
| 90 |
+
return extras
|
| 91 |
+
|
| 92 |
+
def compute(self) -> None:
|
| 93 |
+
self._step_count += 1
|
| 94 |
+
for term_idx, (name, term_cfg) in enumerate(
|
| 95 |
+
zip(self._term_names, self._term_cfgs, strict=False)
|
| 96 |
+
):
|
| 97 |
+
value = term_cfg.func(self._env, **term_cfg.params)
|
| 98 |
+
self._episode_sums[name] += value
|
| 99 |
+
self._step_values[:, term_idx] = value
|
| 100 |
+
|
| 101 |
+
def get_active_iterable_terms(
|
| 102 |
+
self, env_idx: int
|
| 103 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 104 |
+
terms = []
|
| 105 |
+
for idx, name in enumerate(self._term_names):
|
| 106 |
+
terms.append((name, [self._step_values[env_idx, idx].cpu().item()]))
|
| 107 |
+
return terms
|
| 108 |
+
|
| 109 |
+
def _prepare_terms(self):
|
| 110 |
+
for term_name, term_cfg in self.cfg.items():
|
| 111 |
+
term_cfg: MetricsTermCfg | None
|
| 112 |
+
if term_cfg is None:
|
| 113 |
+
print(f"term: {term_name} set to None, skipping...")
|
| 114 |
+
continue
|
| 115 |
+
self._resolve_common_term_cfg(term_name, term_cfg)
|
| 116 |
+
self._term_names.append(term_name)
|
| 117 |
+
self._term_cfgs.append(term_cfg)
|
| 118 |
+
if hasattr(term_cfg.func, "reset") and callable(term_cfg.func.reset):
|
| 119 |
+
self._class_term_cfgs.append(term_cfg)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class NullMetricsManager:
|
| 123 |
+
"""Placeholder for absent metrics manager that safely no-ops all operations."""
|
| 124 |
+
|
| 125 |
+
def __init__(self):
|
| 126 |
+
self.active_terms: list[str] = []
|
| 127 |
+
self.cfg = None
|
| 128 |
+
|
| 129 |
+
def __str__(self) -> str:
|
| 130 |
+
return "<NullMetricsManager> (inactive)"
|
| 131 |
+
|
| 132 |
+
def __repr__(self) -> str:
|
| 133 |
+
return "NullMetricsManager()"
|
| 134 |
+
|
| 135 |
+
def get_active_iterable_terms(
|
| 136 |
+
self, env_idx: int
|
| 137 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 138 |
+
return []
|
| 139 |
+
|
| 140 |
+
def reset(self, env_ids: torch.Tensor | None = None) -> dict[str, float]:
|
| 141 |
+
return {}
|
| 142 |
+
|
| 143 |
+
def compute(self) -> None:
|
| 144 |
+
pass
|
mjlab/src/mjlab/managers/observation_manager.py
ADDED
|
@@ -0,0 +1,485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Observation manager for computing observations."""
|
| 2 |
+
|
| 3 |
+
from copy import deepcopy
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Literal, Sequence
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
from prettytable import PrettyTable
|
| 10 |
+
|
| 11 |
+
from mjlab.managers.manager_base import ManagerBase, ManagerTermBaseCfg
|
| 12 |
+
from mjlab.utils.buffers import CircularBuffer, DelayBuffer
|
| 13 |
+
from mjlab.utils.noise import noise_cfg, noise_model
|
| 14 |
+
from mjlab.utils.noise.noise_cfg import NoiseCfg, NoiseModelCfg
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class ObservationTermCfg(ManagerTermBaseCfg):
|
| 19 |
+
"""Configuration for an observation term.
|
| 20 |
+
|
| 21 |
+
Processing pipeline: compute → noise → clip → scale → delay → history.
|
| 22 |
+
Delay models sensor latency. History provides temporal context. Both are optional
|
| 23 |
+
and can be combined.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
noise: NoiseCfg | NoiseModelCfg | None = None
|
| 27 |
+
"""Noise model to apply to the observation."""
|
| 28 |
+
|
| 29 |
+
clip: tuple[float, float] | None = None
|
| 30 |
+
"""Range (min, max) to clip the observation values."""
|
| 31 |
+
|
| 32 |
+
scale: tuple[float, ...] | float | torch.Tensor | None = None
|
| 33 |
+
"""Scaling factor(s) to multiply the observation by."""
|
| 34 |
+
|
| 35 |
+
delay_min_lag: int = 0
|
| 36 |
+
"""Minimum lag (in steps) for delayed observations. Lag sampled uniformly from
|
| 37 |
+
[min_lag, max_lag]. Convert to ms: lag * (1000 / control_hz)."""
|
| 38 |
+
|
| 39 |
+
delay_max_lag: int = 0
|
| 40 |
+
"""Maximum lag (in steps) for delayed observations. Use min=max for constant delay."""
|
| 41 |
+
|
| 42 |
+
delay_per_env: bool = True
|
| 43 |
+
"""If True, each environment samples its own lag. If False, all environments share
|
| 44 |
+
the same lag at each step."""
|
| 45 |
+
|
| 46 |
+
delay_hold_prob: float = 0.0
|
| 47 |
+
"""Probability of reusing the previous lag instead of resampling. Useful for
|
| 48 |
+
temporally correlated latency patterns."""
|
| 49 |
+
|
| 50 |
+
delay_update_period: int = 0
|
| 51 |
+
"""Resample lag every N steps (models multi-rate sensors). If 0, update every step."""
|
| 52 |
+
|
| 53 |
+
delay_per_env_phase: bool = True
|
| 54 |
+
"""If True and update_period > 0, stagger update timing across envs to avoid
|
| 55 |
+
synchronized resampling."""
|
| 56 |
+
|
| 57 |
+
history_length: int = 0
|
| 58 |
+
"""Number of past observations to keep in history. 0 = no history."""
|
| 59 |
+
|
| 60 |
+
flatten_history_dim: bool = True
|
| 61 |
+
"""Whether to flatten the history dimension into observation.
|
| 62 |
+
|
| 63 |
+
When True and concatenate_terms=True, uses term-major ordering:
|
| 64 |
+
[A_t0, A_t1, ..., A_tH-1, B_t0, B_t1, ..., B_tH-1, ...]
|
| 65 |
+
See docs/source/observation.rst for details on ordering."""
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@dataclass
|
| 69 |
+
class ObservationGroupCfg:
|
| 70 |
+
"""Configuration for an observation group.
|
| 71 |
+
|
| 72 |
+
An observation group bundles multiple observation terms together. Groups are
|
| 73 |
+
typically used to separate observations for different purposes (e.g., "actor"
|
| 74 |
+
for the actor, "critic" for the value function).
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
terms: dict[str, ObservationTermCfg]
|
| 78 |
+
"""Dictionary mapping term names to their configurations."""
|
| 79 |
+
|
| 80 |
+
concatenate_terms: bool = True
|
| 81 |
+
"""Whether to concatenate all terms into a single tensor. If False, returns
|
| 82 |
+
a dict mapping term names to their individual tensors."""
|
| 83 |
+
|
| 84 |
+
concatenate_dim: int = -1
|
| 85 |
+
"""Dimension along which to concatenate terms. Default -1 (last dimension)."""
|
| 86 |
+
|
| 87 |
+
enable_corruption: bool = False
|
| 88 |
+
"""Whether to apply noise corruption to observations. Set to True during
|
| 89 |
+
training for domain randomization, False during evaluation."""
|
| 90 |
+
|
| 91 |
+
history_length: int | None = None
|
| 92 |
+
"""Group-level history length override. If set, applies to all terms in
|
| 93 |
+
this group. If None, each term uses its own ``history_length`` setting."""
|
| 94 |
+
|
| 95 |
+
flatten_history_dim: bool = True
|
| 96 |
+
"""Whether to flatten history into the observation dimension. If True,
|
| 97 |
+
observations have shape ``(num_envs, obs_dim * history_length)``. If False,
|
| 98 |
+
shape is ``(num_envs, history_length, obs_dim)``."""
|
| 99 |
+
|
| 100 |
+
nan_policy: Literal["disabled", "warn", "sanitize", "error"] = "disabled"
|
| 101 |
+
"""NaN/Inf handling policy for observations in this group.
|
| 102 |
+
|
| 103 |
+
- 'disabled': No checks (default, fastest)
|
| 104 |
+
- 'warn': Log warning with term name and env IDs, then sanitize (debugging)
|
| 105 |
+
- 'sanitize': Silent sanitization to 0.0 like reward manager (safe for production)
|
| 106 |
+
- 'error': Raise ValueError on NaN/Inf (strict development mode)
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
nan_check_per_term: bool = True
|
| 110 |
+
"""If True, check each observation term individually to identify NaN source.
|
| 111 |
+
If False, check only the final concatenated output (faster but less informative).
|
| 112 |
+
Only applies when nan_policy != 'disabled'."""
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class ObservationManager(ManagerBase):
|
| 116 |
+
"""Manages observation computation for the environment.
|
| 117 |
+
|
| 118 |
+
The observation manager computes observations from multiple terms organized
|
| 119 |
+
into groups. Each term can have noise, clipping, scaling, delay, and history
|
| 120 |
+
applied. Groups can optionally concatenate their terms into a single tensor.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
def __init__(self, cfg: dict[str, ObservationGroupCfg], env):
|
| 124 |
+
self.cfg = deepcopy(cfg)
|
| 125 |
+
super().__init__(env=env)
|
| 126 |
+
|
| 127 |
+
self._group_obs_dim: dict[str, tuple[int, ...] | list[tuple[int, ...]]] = dict()
|
| 128 |
+
|
| 129 |
+
for group_name, group_term_dims in self._group_obs_term_dim.items():
|
| 130 |
+
if self._group_obs_concatenate[group_name]:
|
| 131 |
+
term_dims = torch.stack(
|
| 132 |
+
[torch.tensor(dims, device="cpu") for dims in group_term_dims], dim=0
|
| 133 |
+
)
|
| 134 |
+
if len(term_dims.shape) > 1:
|
| 135 |
+
if self._group_obs_concatenate_dim[group_name] >= 0:
|
| 136 |
+
dim = self._group_obs_concatenate_dim[group_name] - 1
|
| 137 |
+
else:
|
| 138 |
+
dim = self._group_obs_concatenate_dim[group_name]
|
| 139 |
+
dim_sum = torch.sum(term_dims[:, dim], dim=0)
|
| 140 |
+
term_dims[0, dim] = dim_sum
|
| 141 |
+
term_dims = term_dims[0]
|
| 142 |
+
else:
|
| 143 |
+
term_dims = torch.sum(term_dims, dim=0)
|
| 144 |
+
self._group_obs_dim[group_name] = tuple(term_dims.tolist())
|
| 145 |
+
else:
|
| 146 |
+
self._group_obs_dim[group_name] = group_term_dims
|
| 147 |
+
|
| 148 |
+
self._obs_buffer: dict[str, torch.Tensor | dict[str, torch.Tensor]] | None = None
|
| 149 |
+
|
| 150 |
+
def __str__(self) -> str:
|
| 151 |
+
msg = f"<ObservationManager> contains {len(self._group_obs_term_names)} groups.\n"
|
| 152 |
+
for group_name, group_dim in self._group_obs_dim.items():
|
| 153 |
+
table = PrettyTable()
|
| 154 |
+
table.title = f"Active Observation Terms in Group: '{group_name}'"
|
| 155 |
+
if self._group_obs_concatenate[group_name]:
|
| 156 |
+
table.title += f" (shape: {group_dim})" # type: ignore
|
| 157 |
+
table.field_names = ["Index", "Name", "Shape"]
|
| 158 |
+
table.align["Name"] = "l"
|
| 159 |
+
obs_terms = zip(
|
| 160 |
+
self._group_obs_term_names[group_name],
|
| 161 |
+
self._group_obs_term_dim[group_name],
|
| 162 |
+
self._group_obs_term_cfgs[group_name],
|
| 163 |
+
strict=False,
|
| 164 |
+
)
|
| 165 |
+
for index, (name, dims, term_cfg) in enumerate(obs_terms):
|
| 166 |
+
if term_cfg.history_length > 0 and term_cfg.flatten_history_dim:
|
| 167 |
+
# Flattened history: show (9,) ← 3×(3,)
|
| 168 |
+
original_size = int(np.prod(dims)) // term_cfg.history_length
|
| 169 |
+
original_shape = (original_size,) if len(dims) == 1 else dims[1:]
|
| 170 |
+
shape_str = f"{dims} ← {term_cfg.history_length}×{original_shape}"
|
| 171 |
+
else:
|
| 172 |
+
shape_str = str(tuple(dims))
|
| 173 |
+
table.add_row([index, name, shape_str])
|
| 174 |
+
msg += table.get_string()
|
| 175 |
+
msg += "\n"
|
| 176 |
+
return msg
|
| 177 |
+
|
| 178 |
+
def get_active_iterable_terms(
|
| 179 |
+
self, env_idx: int
|
| 180 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 181 |
+
terms = []
|
| 182 |
+
|
| 183 |
+
if self._obs_buffer is None:
|
| 184 |
+
self.compute()
|
| 185 |
+
assert self._obs_buffer is not None
|
| 186 |
+
obs_buffer: dict[str, torch.Tensor | dict[str, torch.Tensor]] = self._obs_buffer
|
| 187 |
+
|
| 188 |
+
for group_name, _ in self.group_obs_dim.items():
|
| 189 |
+
if not self.group_obs_concatenate[group_name]:
|
| 190 |
+
buffers = obs_buffer[group_name]
|
| 191 |
+
assert isinstance(buffers, dict)
|
| 192 |
+
for name, term in buffers.items():
|
| 193 |
+
terms.append((group_name + "-" + name, term[env_idx].cpu().tolist())) # type: ignore[unsupported-operator]
|
| 194 |
+
continue
|
| 195 |
+
|
| 196 |
+
idx = 0
|
| 197 |
+
data = obs_buffer[group_name]
|
| 198 |
+
assert isinstance(data, torch.Tensor)
|
| 199 |
+
for name, shape in zip(
|
| 200 |
+
self._group_obs_term_names[group_name],
|
| 201 |
+
self._group_obs_term_dim[group_name],
|
| 202 |
+
strict=False,
|
| 203 |
+
):
|
| 204 |
+
data_length = np.prod(shape)
|
| 205 |
+
term = data[env_idx, idx : idx + data_length]
|
| 206 |
+
terms.append((group_name + "-" + name, term.cpu().tolist()))
|
| 207 |
+
idx += data_length
|
| 208 |
+
|
| 209 |
+
return terms
|
| 210 |
+
|
| 211 |
+
# Properties.
|
| 212 |
+
|
| 213 |
+
@property
|
| 214 |
+
def active_terms(self) -> dict[str, list[str]]:
|
| 215 |
+
return self._group_obs_term_names
|
| 216 |
+
|
| 217 |
+
@property
|
| 218 |
+
def group_obs_dim(self) -> dict[str, tuple[int, ...] | list[tuple[int, ...]]]:
|
| 219 |
+
return self._group_obs_dim
|
| 220 |
+
|
| 221 |
+
@property
|
| 222 |
+
def group_obs_term_dim(self) -> dict[str, list[tuple[int, ...]]]:
|
| 223 |
+
return self._group_obs_term_dim
|
| 224 |
+
|
| 225 |
+
@property
|
| 226 |
+
def group_obs_concatenate(self) -> dict[str, bool]:
|
| 227 |
+
return self._group_obs_concatenate
|
| 228 |
+
|
| 229 |
+
# Methods.
|
| 230 |
+
|
| 231 |
+
def get_term_cfg(self, group_name: str, term_name: str) -> ObservationTermCfg:
|
| 232 |
+
if group_name not in self._group_obs_term_names:
|
| 233 |
+
raise ValueError(f"Group '{group_name}' not found in active groups.")
|
| 234 |
+
if term_name not in self._group_obs_term_names[group_name]:
|
| 235 |
+
raise ValueError(f"Term '{term_name}' not found in group '{group_name}'.")
|
| 236 |
+
index = self._group_obs_term_names[group_name].index(term_name)
|
| 237 |
+
return self._group_obs_term_cfgs[group_name][index]
|
| 238 |
+
|
| 239 |
+
def reset(self, env_ids: torch.Tensor | slice | None = None) -> dict[str, float]:
|
| 240 |
+
# Invalidate cache since reset envs will have different observations.
|
| 241 |
+
self._obs_buffer = None
|
| 242 |
+
|
| 243 |
+
for group_name, group_cfg in self._group_obs_class_term_cfgs.items():
|
| 244 |
+
for term_cfg in group_cfg:
|
| 245 |
+
term_cfg.func.reset(env_ids=env_ids)
|
| 246 |
+
for term_name in self._group_obs_term_names[group_name]:
|
| 247 |
+
batch_ids = None if isinstance(env_ids, slice) else env_ids
|
| 248 |
+
if term_name in self._group_obs_term_delay_buffer[group_name]:
|
| 249 |
+
self._group_obs_term_delay_buffer[group_name][term_name].reset(
|
| 250 |
+
batch_ids=batch_ids
|
| 251 |
+
)
|
| 252 |
+
if term_name in self._group_obs_term_history_buffer[group_name]:
|
| 253 |
+
self._group_obs_term_history_buffer[group_name][term_name].reset(
|
| 254 |
+
batch_ids=batch_ids
|
| 255 |
+
)
|
| 256 |
+
for mod in self._group_obs_class_instances.values():
|
| 257 |
+
mod.reset(env_ids=env_ids)
|
| 258 |
+
return {}
|
| 259 |
+
|
| 260 |
+
def _check_and_handle_nans(
|
| 261 |
+
self, tensor: torch.Tensor, context: str, policy: str
|
| 262 |
+
) -> torch.Tensor:
|
| 263 |
+
"""Check for NaN/Inf and handle according to policy.
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
tensor: Observation tensor to check.
|
| 267 |
+
context: Context string for error/warning messages (e.g., "actor/base_lin_vel").
|
| 268 |
+
policy: NaN handling policy ("disabled", "warn", "sanitize", "error").
|
| 269 |
+
|
| 270 |
+
Returns:
|
| 271 |
+
The tensor, potentially sanitized depending on policy.
|
| 272 |
+
|
| 273 |
+
Raises:
|
| 274 |
+
ValueError: If policy is "error" and NaN/Inf detected.
|
| 275 |
+
"""
|
| 276 |
+
if policy == "disabled":
|
| 277 |
+
return tensor
|
| 278 |
+
|
| 279 |
+
has_nan = torch.isnan(tensor).any()
|
| 280 |
+
has_inf = torch.isinf(tensor).any()
|
| 281 |
+
|
| 282 |
+
if not (has_nan or has_inf):
|
| 283 |
+
return tensor
|
| 284 |
+
|
| 285 |
+
if policy == "error":
|
| 286 |
+
nan_mask = torch.isnan(tensor).any(dim=-1) | torch.isinf(tensor).any(dim=-1)
|
| 287 |
+
nan_env_ids = torch.where(nan_mask)[0].cpu().tolist()
|
| 288 |
+
raise ValueError(
|
| 289 |
+
f"NaN/Inf detected in observation '{context}' "
|
| 290 |
+
f"for environments: {nan_env_ids[:10]}"
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
if policy == "warn":
|
| 294 |
+
nan_mask = torch.isnan(tensor).any(dim=-1) | torch.isinf(tensor).any(dim=-1)
|
| 295 |
+
nan_env_ids = torch.where(nan_mask)[0].cpu().tolist()
|
| 296 |
+
print(
|
| 297 |
+
f"[ObservationManager] NaN/Inf in '{context}' "
|
| 298 |
+
f"(envs: {nan_env_ids[:5]}). Sanitizing to 0."
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
# Sanitize (applies to both "warn" and "sanitize" policies).
|
| 302 |
+
return torch.nan_to_num(tensor, nan=0.0, posinf=0.0, neginf=0.0)
|
| 303 |
+
|
| 304 |
+
def compute(
|
| 305 |
+
self, update_history: bool = False
|
| 306 |
+
) -> dict[str, torch.Tensor | dict[str, torch.Tensor]]:
|
| 307 |
+
# Return cached observations if not updating and cache exists.
|
| 308 |
+
# This prevents double-pushing to delay buffers when compute() is called
|
| 309 |
+
# multiple times per control step (e.g., in get_observations() after step()).
|
| 310 |
+
if not update_history and self._obs_buffer is not None:
|
| 311 |
+
return self._obs_buffer
|
| 312 |
+
|
| 313 |
+
obs_buffer: dict[str, torch.Tensor | dict[str, torch.Tensor]] = dict()
|
| 314 |
+
for group_name in self._group_obs_term_names:
|
| 315 |
+
obs_buffer[group_name] = self.compute_group(group_name, update_history)
|
| 316 |
+
self._obs_buffer = obs_buffer
|
| 317 |
+
return obs_buffer
|
| 318 |
+
|
| 319 |
+
def compute_group(
|
| 320 |
+
self, group_name: str, update_history: bool = False
|
| 321 |
+
) -> torch.Tensor | dict[str, torch.Tensor]:
|
| 322 |
+
group_cfg = self.cfg[group_name]
|
| 323 |
+
group_term_names = self._group_obs_term_names[group_name]
|
| 324 |
+
group_obs: dict[str, torch.Tensor] = {}
|
| 325 |
+
obs_terms = zip(
|
| 326 |
+
group_term_names, self._group_obs_term_cfgs[group_name], strict=False
|
| 327 |
+
)
|
| 328 |
+
for term_name, term_cfg in obs_terms:
|
| 329 |
+
obs: torch.Tensor = term_cfg.func(self._env, **term_cfg.params).clone()
|
| 330 |
+
if isinstance(term_cfg.noise, noise_cfg.NoiseCfg):
|
| 331 |
+
obs = term_cfg.noise.apply(obs)
|
| 332 |
+
elif isinstance(term_cfg.noise, noise_cfg.NoiseModelCfg):
|
| 333 |
+
obs = self._group_obs_class_instances[term_name](obs)
|
| 334 |
+
if term_cfg.clip:
|
| 335 |
+
obs = obs.clip_(min=term_cfg.clip[0], max=term_cfg.clip[1])
|
| 336 |
+
if term_cfg.scale is not None:
|
| 337 |
+
scale = term_cfg.scale
|
| 338 |
+
assert isinstance(scale, torch.Tensor)
|
| 339 |
+
obs = obs.mul_(scale)
|
| 340 |
+
|
| 341 |
+
# Check for NaN/Inf before delay/history buffers (per-term checking).
|
| 342 |
+
if group_cfg.nan_check_per_term and group_cfg.nan_policy != "disabled":
|
| 343 |
+
obs = self._check_and_handle_nans(
|
| 344 |
+
obs, context=f"{group_name}/{term_name}", policy=group_cfg.nan_policy
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
if term_cfg.delay_max_lag > 0:
|
| 348 |
+
delay_buffer = self._group_obs_term_delay_buffer[group_name][term_name]
|
| 349 |
+
delay_buffer.append(obs)
|
| 350 |
+
obs = delay_buffer.compute()
|
| 351 |
+
if term_cfg.history_length > 0:
|
| 352 |
+
circular_buffer = self._group_obs_term_history_buffer[group_name][term_name]
|
| 353 |
+
if update_history or not circular_buffer.is_initialized:
|
| 354 |
+
circular_buffer.append(obs)
|
| 355 |
+
|
| 356 |
+
if term_cfg.flatten_history_dim:
|
| 357 |
+
group_obs[term_name] = circular_buffer.buffer.reshape(self._env.num_envs, -1)
|
| 358 |
+
else:
|
| 359 |
+
group_obs[term_name] = circular_buffer.buffer
|
| 360 |
+
else:
|
| 361 |
+
group_obs[term_name] = obs
|
| 362 |
+
|
| 363 |
+
# Final NaN check for non-per-term checking.
|
| 364 |
+
if not group_cfg.nan_check_per_term and group_cfg.nan_policy != "disabled":
|
| 365 |
+
if self._group_obs_concatenate[group_name]:
|
| 366 |
+
# Will check after concatenation below.
|
| 367 |
+
pass
|
| 368 |
+
else:
|
| 369 |
+
for term_name in group_obs:
|
| 370 |
+
group_obs[term_name] = self._check_and_handle_nans(
|
| 371 |
+
group_obs[term_name],
|
| 372 |
+
context=f"{group_name}/{term_name}",
|
| 373 |
+
policy=group_cfg.nan_policy,
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
if self._group_obs_concatenate[group_name]:
|
| 377 |
+
result = torch.cat(
|
| 378 |
+
list(group_obs.values()), dim=self._group_obs_concatenate_dim[group_name]
|
| 379 |
+
)
|
| 380 |
+
# Final check for concatenated result (non-per-term checking).
|
| 381 |
+
if not group_cfg.nan_check_per_term and group_cfg.nan_policy != "disabled":
|
| 382 |
+
result = self._check_and_handle_nans(
|
| 383 |
+
result, context=group_name, policy=group_cfg.nan_policy
|
| 384 |
+
)
|
| 385 |
+
return result
|
| 386 |
+
return group_obs
|
| 387 |
+
|
| 388 |
+
def _prepare_terms(self) -> None:
|
| 389 |
+
self._group_obs_term_names: dict[str, list[str]] = dict()
|
| 390 |
+
self._group_obs_term_dim: dict[str, list[tuple[int, ...]]] = dict()
|
| 391 |
+
self._group_obs_term_cfgs: dict[str, list[ObservationTermCfg]] = dict()
|
| 392 |
+
self._group_obs_class_term_cfgs: dict[str, list[ObservationTermCfg]] = dict()
|
| 393 |
+
self._group_obs_concatenate: dict[str, bool] = dict()
|
| 394 |
+
self._group_obs_concatenate_dim: dict[str, int] = dict()
|
| 395 |
+
self._group_obs_class_instances: dict[str, noise_model.NoiseModel] = {}
|
| 396 |
+
self._group_obs_term_delay_buffer: dict[str, dict[str, DelayBuffer]] = dict()
|
| 397 |
+
self._group_obs_term_history_buffer: dict[str, dict[str, CircularBuffer]] = dict()
|
| 398 |
+
|
| 399 |
+
for group_name, group_cfg in self.cfg.items():
|
| 400 |
+
group_cfg: ObservationGroupCfg | None
|
| 401 |
+
if group_cfg is None:
|
| 402 |
+
print(f"group: {group_name} set to None, skipping...")
|
| 403 |
+
continue
|
| 404 |
+
|
| 405 |
+
self._group_obs_term_names[group_name] = list()
|
| 406 |
+
self._group_obs_term_dim[group_name] = list()
|
| 407 |
+
self._group_obs_term_cfgs[group_name] = list()
|
| 408 |
+
self._group_obs_class_term_cfgs[group_name] = list()
|
| 409 |
+
group_entry_delay_buffer: dict[str, DelayBuffer] = dict()
|
| 410 |
+
group_entry_history_buffer: dict[str, CircularBuffer] = dict()
|
| 411 |
+
|
| 412 |
+
self._group_obs_concatenate[group_name] = group_cfg.concatenate_terms
|
| 413 |
+
self._group_obs_concatenate_dim[group_name] = (
|
| 414 |
+
group_cfg.concatenate_dim + 1
|
| 415 |
+
if group_cfg.concatenate_dim >= 0
|
| 416 |
+
else group_cfg.concatenate_dim
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
for term_name, term_cfg in group_cfg.terms.items():
|
| 420 |
+
term_cfg: ObservationTermCfg | None
|
| 421 |
+
if term_cfg is None:
|
| 422 |
+
print(f"term: {term_name} set to None, skipping...")
|
| 423 |
+
continue
|
| 424 |
+
|
| 425 |
+
# NOTE: This deepcopy is important to avoid cross-group contamination of term
|
| 426 |
+
# configs.
|
| 427 |
+
term_cfg = deepcopy(term_cfg)
|
| 428 |
+
self._resolve_common_term_cfg(term_name, term_cfg)
|
| 429 |
+
|
| 430 |
+
if not group_cfg.enable_corruption:
|
| 431 |
+
term_cfg.noise = None
|
| 432 |
+
if group_cfg.history_length is not None:
|
| 433 |
+
term_cfg.history_length = group_cfg.history_length
|
| 434 |
+
term_cfg.flatten_history_dim = group_cfg.flatten_history_dim
|
| 435 |
+
self._group_obs_term_names[group_name].append(term_name)
|
| 436 |
+
self._group_obs_term_cfgs[group_name].append(term_cfg)
|
| 437 |
+
if hasattr(term_cfg.func, "reset") and callable(term_cfg.func.reset):
|
| 438 |
+
self._group_obs_class_term_cfgs[group_name].append(term_cfg)
|
| 439 |
+
|
| 440 |
+
obs_dims = tuple(term_cfg.func(self._env, **term_cfg.params).shape)
|
| 441 |
+
|
| 442 |
+
if term_cfg.scale is not None:
|
| 443 |
+
term_cfg.scale = torch.tensor(
|
| 444 |
+
term_cfg.scale, dtype=torch.float, device=self._env.device
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
if term_cfg.noise is not None and isinstance(
|
| 448 |
+
term_cfg.noise, noise_cfg.NoiseModelCfg
|
| 449 |
+
):
|
| 450 |
+
noise_model_cls = term_cfg.noise.class_type
|
| 451 |
+
assert issubclass(noise_model_cls, noise_model.NoiseModel), (
|
| 452 |
+
f"Class type for observation term '{term_name}' NoiseModelCfg"
|
| 453 |
+
f" is not a subclass of 'NoiseModel'. Received: '{type(noise_model_cls)}'."
|
| 454 |
+
)
|
| 455 |
+
self._group_obs_class_instances[term_name] = noise_model_cls(
|
| 456 |
+
term_cfg.noise, num_envs=self._env.num_envs, device=self._env.device
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
if term_cfg.delay_max_lag > 0:
|
| 460 |
+
group_entry_delay_buffer[term_name] = DelayBuffer(
|
| 461 |
+
min_lag=term_cfg.delay_min_lag,
|
| 462 |
+
max_lag=term_cfg.delay_max_lag,
|
| 463 |
+
batch_size=self._env.num_envs,
|
| 464 |
+
device=self._env.device,
|
| 465 |
+
per_env=term_cfg.delay_per_env,
|
| 466 |
+
hold_prob=term_cfg.delay_hold_prob,
|
| 467 |
+
update_period=term_cfg.delay_update_period,
|
| 468 |
+
per_env_phase=term_cfg.delay_per_env_phase,
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
if term_cfg.history_length > 0:
|
| 472 |
+
group_entry_history_buffer[term_name] = CircularBuffer(
|
| 473 |
+
max_len=term_cfg.history_length,
|
| 474 |
+
batch_size=self._env.num_envs,
|
| 475 |
+
device=self._env.device,
|
| 476 |
+
)
|
| 477 |
+
old_dims = list(obs_dims)
|
| 478 |
+
old_dims.insert(1, term_cfg.history_length)
|
| 479 |
+
obs_dims = tuple(old_dims)
|
| 480 |
+
if term_cfg.flatten_history_dim:
|
| 481 |
+
obs_dims = (obs_dims[0], int(np.prod(obs_dims[1:])))
|
| 482 |
+
|
| 483 |
+
self._group_obs_term_dim[group_name].append(obs_dims[1:])
|
| 484 |
+
self._group_obs_term_delay_buffer[group_name] = group_entry_delay_buffer
|
| 485 |
+
self._group_obs_term_history_buffer[group_name] = group_entry_history_buffer
|
mjlab/src/mjlab/managers/reward_manager.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Reward manager for computing reward signals."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from copy import deepcopy
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import TYPE_CHECKING, Any
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from prettytable import PrettyTable
|
| 11 |
+
|
| 12 |
+
from mjlab.managers.manager_base import ManagerBase, ManagerTermBaseCfg
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from mjlab.envs.manager_based_rl_env import ManagerBasedRlEnv
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclass(kw_only=True)
|
| 19 |
+
class RewardTermCfg(ManagerTermBaseCfg):
|
| 20 |
+
"""Configuration for a reward term."""
|
| 21 |
+
|
| 22 |
+
func: Any
|
| 23 |
+
"""The callable that computes this reward term's value."""
|
| 24 |
+
|
| 25 |
+
weight: float
|
| 26 |
+
"""Weight multiplier for this reward term."""
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class RewardManager(ManagerBase):
|
| 30 |
+
"""Manages reward computation by aggregating weighted reward terms.
|
| 31 |
+
|
| 32 |
+
Reward Scaling Behavior:
|
| 33 |
+
By default, rewards are scaled by the environment step duration (dt). This
|
| 34 |
+
normalizes cumulative episodic rewards across different simulation frequencies.
|
| 35 |
+
The scaling can be disabled via the ``scale_by_dt`` parameter.
|
| 36 |
+
|
| 37 |
+
When ``scale_by_dt=True`` (default):
|
| 38 |
+
- ``reward_buf`` (returned by ``compute()``) = raw_value * weight * dt
|
| 39 |
+
- ``_episode_sums`` (cumulative rewards) are scaled by dt
|
| 40 |
+
- ``Episode_Reward/*`` logged metrics are scaled by dt
|
| 41 |
+
|
| 42 |
+
When ``scale_by_dt=False``:
|
| 43 |
+
- ``reward_buf`` = raw_value * weight (no dt scaling)
|
| 44 |
+
|
| 45 |
+
Regardless of the scaling setting:
|
| 46 |
+
- ``_step_reward`` (via ``get_active_iterable_terms()``) always contains
|
| 47 |
+
the unscaled reward rate (raw_value * weight)
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
_env: ManagerBasedRlEnv
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
cfg: dict[str, RewardTermCfg],
|
| 55 |
+
env: ManagerBasedRlEnv,
|
| 56 |
+
*,
|
| 57 |
+
scale_by_dt: bool = True,
|
| 58 |
+
):
|
| 59 |
+
self._term_names: list[str] = list()
|
| 60 |
+
self._term_cfgs: list[RewardTermCfg] = list()
|
| 61 |
+
self._class_term_cfgs: list[RewardTermCfg] = list()
|
| 62 |
+
self._scale_by_dt = scale_by_dt
|
| 63 |
+
|
| 64 |
+
self.cfg = deepcopy(cfg)
|
| 65 |
+
super().__init__(env=env)
|
| 66 |
+
self._episode_sums = dict()
|
| 67 |
+
for term_name in self._term_names:
|
| 68 |
+
self._episode_sums[term_name] = torch.zeros(
|
| 69 |
+
self.num_envs, dtype=torch.float, device=self.device
|
| 70 |
+
)
|
| 71 |
+
self._reward_buf = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
|
| 72 |
+
self._step_reward = torch.zeros(
|
| 73 |
+
(self.num_envs, len(self._term_names)), dtype=torch.float, device=self.device
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
def __str__(self) -> str:
|
| 77 |
+
msg = f"<RewardManager> contains {len(self._term_names)} active terms.\n"
|
| 78 |
+
table = PrettyTable()
|
| 79 |
+
table.title = "Active Reward Terms"
|
| 80 |
+
table.field_names = ["Index", "Name", "Weight"]
|
| 81 |
+
table.align["Name"] = "l"
|
| 82 |
+
table.align["Weight"] = "r"
|
| 83 |
+
for index, (name, term_cfg) in enumerate(
|
| 84 |
+
zip(self._term_names, self._term_cfgs, strict=False)
|
| 85 |
+
):
|
| 86 |
+
table.add_row([index, name, term_cfg.weight])
|
| 87 |
+
msg += table.get_string()
|
| 88 |
+
msg += "\n"
|
| 89 |
+
return msg
|
| 90 |
+
|
| 91 |
+
# Properties.
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
def active_terms(self) -> list[str]:
|
| 95 |
+
return self._term_names
|
| 96 |
+
|
| 97 |
+
# Methods.
|
| 98 |
+
|
| 99 |
+
def reset(
|
| 100 |
+
self, env_ids: torch.Tensor | slice | None = None
|
| 101 |
+
) -> dict[str, torch.Tensor]:
|
| 102 |
+
if env_ids is None:
|
| 103 |
+
env_ids = slice(None)
|
| 104 |
+
extras = {}
|
| 105 |
+
for key in self._episode_sums.keys():
|
| 106 |
+
episodic_sum_avg = torch.mean(self._episode_sums[key][env_ids])
|
| 107 |
+
extras["Episode_Reward/" + key] = (
|
| 108 |
+
episodic_sum_avg / self._env.max_episode_length_s
|
| 109 |
+
)
|
| 110 |
+
self._episode_sums[key][env_ids] = 0.0
|
| 111 |
+
for term_cfg in self._class_term_cfgs:
|
| 112 |
+
term_cfg.func.reset(env_ids=env_ids)
|
| 113 |
+
return extras
|
| 114 |
+
|
| 115 |
+
def compute(self, dt: float) -> torch.Tensor:
|
| 116 |
+
self._reward_buf[:] = 0.0
|
| 117 |
+
scale = dt if self._scale_by_dt else 1.0
|
| 118 |
+
for term_idx, (name, term_cfg) in enumerate(
|
| 119 |
+
zip(self._term_names, self._term_cfgs, strict=False)
|
| 120 |
+
):
|
| 121 |
+
if term_cfg.weight == 0.0:
|
| 122 |
+
self._step_reward[:, term_idx] = 0.0
|
| 123 |
+
continue
|
| 124 |
+
value = term_cfg.func(self._env, **term_cfg.params) * term_cfg.weight * scale
|
| 125 |
+
# NaN/Inf can occur from corrupted physics state; zero them to avoid policy crash.
|
| 126 |
+
value = torch.nan_to_num(value, nan=0.0, posinf=0.0, neginf=0.0)
|
| 127 |
+
self._reward_buf += value
|
| 128 |
+
self._episode_sums[name] += value
|
| 129 |
+
self._step_reward[:, term_idx] = value / scale
|
| 130 |
+
return self._reward_buf
|
| 131 |
+
|
| 132 |
+
def get_active_iterable_terms(self, env_idx):
|
| 133 |
+
terms = []
|
| 134 |
+
for idx, name in enumerate(self._term_names):
|
| 135 |
+
terms.append((name, [self._step_reward[env_idx, idx].cpu().item()]))
|
| 136 |
+
return terms
|
| 137 |
+
|
| 138 |
+
def get_term_cfg(self, term_name: str) -> RewardTermCfg:
|
| 139 |
+
if term_name not in self._term_names:
|
| 140 |
+
raise ValueError(f"Term '{term_name}' not found in active terms.")
|
| 141 |
+
return self._term_cfgs[self._term_names.index(term_name)]
|
| 142 |
+
|
| 143 |
+
def _prepare_terms(self):
|
| 144 |
+
for term_name, term_cfg in self.cfg.items():
|
| 145 |
+
term_cfg: RewardTermCfg | None
|
| 146 |
+
if term_cfg is None:
|
| 147 |
+
print(f"term: {term_name} set to None, skipping...")
|
| 148 |
+
continue
|
| 149 |
+
self._resolve_common_term_cfg(term_name, term_cfg)
|
| 150 |
+
self._term_names.append(term_name)
|
| 151 |
+
self._term_cfgs.append(term_cfg)
|
| 152 |
+
if hasattr(term_cfg.func, "reset") and callable(term_cfg.func.reset):
|
| 153 |
+
self._class_term_cfgs.append(term_cfg)
|
mjlab/src/mjlab/managers/scene_entity_config.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Configuration for scene entities used by manager terms."""
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass, field
|
| 4 |
+
from typing import NamedTuple
|
| 5 |
+
|
| 6 |
+
from mjlab.entity import Entity
|
| 7 |
+
from mjlab.scene import Scene
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class _FieldConfig(NamedTuple):
|
| 11 |
+
"""Configuration for a resolvable entity field."""
|
| 12 |
+
|
| 13 |
+
names_attr: str
|
| 14 |
+
ids_attr: str
|
| 15 |
+
find_method: str
|
| 16 |
+
num_attr: str
|
| 17 |
+
kind_label: str
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
_FIELD_CONFIGS = [
|
| 21 |
+
_FieldConfig("joint_names", "joint_ids", "find_joints", "num_joints", "joint"),
|
| 22 |
+
_FieldConfig("body_names", "body_ids", "find_bodies", "num_bodies", "body"),
|
| 23 |
+
_FieldConfig("geom_names", "geom_ids", "find_geoms", "num_geoms", "geom"),
|
| 24 |
+
_FieldConfig("site_names", "site_ids", "find_sites", "num_sites", "site"),
|
| 25 |
+
_FieldConfig(
|
| 26 |
+
"actuator_names", "actuator_ids", "find_actuators", "num_actuators", "actuator"
|
| 27 |
+
),
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass
|
| 32 |
+
class SceneEntityCfg:
|
| 33 |
+
"""Configuration for a scene entity that is used by the manager's term.
|
| 34 |
+
|
| 35 |
+
This configuration allows flexible specification of entity components either by name
|
| 36 |
+
or by ID. During resolution, it ensures consistency between names and IDs, and can
|
| 37 |
+
optimize to slice(None) when all components are selected.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
name: str
|
| 41 |
+
"""The name of the entity in the scene."""
|
| 42 |
+
|
| 43 |
+
joint_names: str | tuple[str, ...] | None = None
|
| 44 |
+
"""Names of joints to include. Can be a single string or tuple."""
|
| 45 |
+
|
| 46 |
+
joint_ids: list[int] | slice = field(default_factory=lambda: slice(None))
|
| 47 |
+
"""IDs of joints to include. Can be a list or slice."""
|
| 48 |
+
|
| 49 |
+
body_names: str | tuple[str, ...] | None = None
|
| 50 |
+
"""Names of bodies to include. Can be a single string or tuple."""
|
| 51 |
+
|
| 52 |
+
body_ids: list[int] | slice = field(default_factory=lambda: slice(None))
|
| 53 |
+
"""IDs of bodies to include. Can be a list or slice."""
|
| 54 |
+
|
| 55 |
+
geom_names: str | tuple[str, ...] | None = None
|
| 56 |
+
"""Names of geometries to include. Can be a single string or tuple."""
|
| 57 |
+
|
| 58 |
+
geom_ids: list[int] | slice = field(default_factory=lambda: slice(None))
|
| 59 |
+
"""IDs of geometries to include. Can be a list or slice."""
|
| 60 |
+
|
| 61 |
+
site_names: str | tuple[str, ...] | None = None
|
| 62 |
+
"""Names of sites to include. Can be a single string or tuple."""
|
| 63 |
+
|
| 64 |
+
site_ids: list[int] | slice = field(default_factory=lambda: slice(None))
|
| 65 |
+
"""IDs of sites to include. Can be a list or slice."""
|
| 66 |
+
|
| 67 |
+
actuator_names: str | list[str] | None = None
|
| 68 |
+
"""Names of actuators to include. Can be a single string or list."""
|
| 69 |
+
|
| 70 |
+
actuator_ids: list[int] | slice = field(default_factory=lambda: slice(None))
|
| 71 |
+
"""IDs of actuators to include. Can be a list or slice."""
|
| 72 |
+
|
| 73 |
+
preserve_order: bool = False
|
| 74 |
+
"""If True, maintains the order of components as specified."""
|
| 75 |
+
|
| 76 |
+
def resolve(self, scene: Scene) -> None:
|
| 77 |
+
"""Resolve names and IDs for all configured fields.
|
| 78 |
+
|
| 79 |
+
This method ensures consistency between names and IDs for each field type.
|
| 80 |
+
It handles three cases:
|
| 81 |
+
1. Both names and IDs provided: Validates they match
|
| 82 |
+
2. Only names provided: Computes IDs (optimizes to slice(None) if all selected)
|
| 83 |
+
3. Only IDs provided: Computes names
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
scene: The scene containing the entity to resolve against.
|
| 87 |
+
|
| 88 |
+
Raises:
|
| 89 |
+
ValueError: If provided names and IDs are inconsistent.
|
| 90 |
+
KeyError: If the entity name is not found in the scene.
|
| 91 |
+
"""
|
| 92 |
+
entity = scene[self.name]
|
| 93 |
+
|
| 94 |
+
for config in _FIELD_CONFIGS:
|
| 95 |
+
self._resolve_field(entity, config)
|
| 96 |
+
|
| 97 |
+
def _resolve_field(self, entity: Entity, config: _FieldConfig) -> None:
|
| 98 |
+
"""Resolve a single field's names and IDs.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
entity: The entity to resolve against.
|
| 102 |
+
config: Field configuration specifying attribute names and methods.
|
| 103 |
+
"""
|
| 104 |
+
names = getattr(self, config.names_attr)
|
| 105 |
+
ids = getattr(self, config.ids_attr)
|
| 106 |
+
|
| 107 |
+
# Early return if nothing to resolve.
|
| 108 |
+
if names is None and not isinstance(ids, list):
|
| 109 |
+
return
|
| 110 |
+
|
| 111 |
+
# Get entity metadata.
|
| 112 |
+
entity_all_names = getattr(entity, config.names_attr)
|
| 113 |
+
entity_count = getattr(entity, config.num_attr)
|
| 114 |
+
find_method = getattr(entity, config.find_method)
|
| 115 |
+
|
| 116 |
+
# Normalize single values to lists for uniform processing.
|
| 117 |
+
names = self._normalize_to_list(names)
|
| 118 |
+
if names is not None:
|
| 119 |
+
setattr(self, config.names_attr, names)
|
| 120 |
+
|
| 121 |
+
if isinstance(ids, (int, list)):
|
| 122 |
+
ids = self._normalize_to_list(ids)
|
| 123 |
+
setattr(self, config.ids_attr, ids)
|
| 124 |
+
|
| 125 |
+
# Handle three resolution cases.
|
| 126 |
+
if names is not None and isinstance(ids, list):
|
| 127 |
+
self._validate_consistency(
|
| 128 |
+
names, ids, entity_all_names, find_method, config.kind_label
|
| 129 |
+
)
|
| 130 |
+
elif names is not None:
|
| 131 |
+
self._resolve_names_to_ids(
|
| 132 |
+
names, entity_all_names, entity_count, find_method, config.ids_attr
|
| 133 |
+
)
|
| 134 |
+
elif isinstance(ids, list):
|
| 135 |
+
self._resolve_ids_to_names(ids, entity_all_names, config.names_attr)
|
| 136 |
+
|
| 137 |
+
def _normalize_to_list(self, value: str | int | list | None) -> list | None:
|
| 138 |
+
"""Convert single values to lists for uniform processing."""
|
| 139 |
+
if value is None:
|
| 140 |
+
return None
|
| 141 |
+
if isinstance(value, (str, int)):
|
| 142 |
+
return [value]
|
| 143 |
+
return value
|
| 144 |
+
|
| 145 |
+
def _validate_consistency(
|
| 146 |
+
self,
|
| 147 |
+
names: list[str],
|
| 148 |
+
ids: list[int],
|
| 149 |
+
entity_all_names: list[str],
|
| 150 |
+
find_method,
|
| 151 |
+
kind_label: str,
|
| 152 |
+
) -> None:
|
| 153 |
+
"""Validate that provided names and IDs are consistent.
|
| 154 |
+
|
| 155 |
+
Raises:
|
| 156 |
+
ValueError: If names and IDs don't match.
|
| 157 |
+
"""
|
| 158 |
+
found_ids, _ = find_method(names, preserve_order=self.preserve_order)
|
| 159 |
+
computed_names = [entity_all_names[i] for i in ids]
|
| 160 |
+
|
| 161 |
+
if found_ids != ids or computed_names != names:
|
| 162 |
+
raise ValueError(
|
| 163 |
+
f"Inconsistent {kind_label} names and indices. "
|
| 164 |
+
f"Names {names} resolved to indices {found_ids}, "
|
| 165 |
+
f"but indices {ids} (mapping to names {computed_names}) were provided."
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
def _resolve_names_to_ids(
|
| 169 |
+
self,
|
| 170 |
+
names: list[str],
|
| 171 |
+
entity_all_names: list[str],
|
| 172 |
+
entity_count: int,
|
| 173 |
+
find_method,
|
| 174 |
+
ids_attr: str,
|
| 175 |
+
) -> None:
|
| 176 |
+
"""Resolve names to IDs, optimizing to slice(None) when all are selected."""
|
| 177 |
+
found_ids, _ = find_method(names, preserve_order=self.preserve_order)
|
| 178 |
+
|
| 179 |
+
# Optimize to slice(None) if all components are selected in order.
|
| 180 |
+
if len(found_ids) == entity_count and names == entity_all_names:
|
| 181 |
+
setattr(self, ids_attr, slice(None))
|
| 182 |
+
else:
|
| 183 |
+
setattr(self, ids_attr, found_ids)
|
| 184 |
+
|
| 185 |
+
def _resolve_ids_to_names(
|
| 186 |
+
self, ids: list[int], entity_all_names: list[str], names_attr: str
|
| 187 |
+
) -> None:
|
| 188 |
+
"""Resolve IDs to their corresponding names."""
|
| 189 |
+
resolved_names = [entity_all_names[i] for i in ids]
|
| 190 |
+
setattr(self, names_attr, resolved_names)
|
mjlab/src/mjlab/managers/termination_manager.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Termination manager for computing done signals."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from copy import deepcopy
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import TYPE_CHECKING, Sequence
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from prettytable import PrettyTable
|
| 11 |
+
|
| 12 |
+
from mjlab.managers.manager_base import ManagerBase, ManagerTermBaseCfg
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from mjlab.envs.manager_based_rl_env import ManagerBasedRlEnv
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclass
|
| 19 |
+
class TerminationTermCfg(ManagerTermBaseCfg):
|
| 20 |
+
"""Configuration for a termination term."""
|
| 21 |
+
|
| 22 |
+
time_out: bool = False
|
| 23 |
+
"""Whether the term contributes towards episodic timeouts."""
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class TerminationManager(ManagerBase):
|
| 27 |
+
"""Manages termination conditions for the environment.
|
| 28 |
+
|
| 29 |
+
The termination manager aggregates multiple termination terms to compute
|
| 30 |
+
episode done signals. Terms can be either truncations (time-based) or
|
| 31 |
+
terminations (failure conditions).
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
_env: ManagerBasedRlEnv
|
| 35 |
+
|
| 36 |
+
def __init__(self, cfg: dict[str, TerminationTermCfg], env: ManagerBasedRlEnv):
|
| 37 |
+
self._term_names: list[str] = list()
|
| 38 |
+
self._term_cfgs: list[TerminationTermCfg] = list()
|
| 39 |
+
self._class_term_cfgs: list[TerminationTermCfg] = list()
|
| 40 |
+
|
| 41 |
+
self.cfg = deepcopy(cfg)
|
| 42 |
+
super().__init__(env)
|
| 43 |
+
|
| 44 |
+
self._term_dones = dict()
|
| 45 |
+
for term_name in self._term_names:
|
| 46 |
+
self._term_dones[term_name] = torch.zeros(
|
| 47 |
+
self.num_envs, device=self.device, dtype=torch.bool
|
| 48 |
+
)
|
| 49 |
+
self._truncated_buf = torch.zeros(
|
| 50 |
+
self.num_envs, device=self.device, dtype=torch.bool
|
| 51 |
+
)
|
| 52 |
+
self._terminated_buf = torch.zeros_like(self._truncated_buf)
|
| 53 |
+
|
| 54 |
+
def __str__(self) -> str:
|
| 55 |
+
msg = f"<TerminationManager> contains {len(self._term_names)} active terms.\n"
|
| 56 |
+
table = PrettyTable()
|
| 57 |
+
table.title = "Active Termination Terms"
|
| 58 |
+
table.field_names = ["Index", "Name", "Time Out"]
|
| 59 |
+
table.align["Name"] = "l"
|
| 60 |
+
for index, (name, term_cfg) in enumerate(
|
| 61 |
+
zip(self._term_names, self._term_cfgs, strict=False)
|
| 62 |
+
):
|
| 63 |
+
table.add_row([index, name, term_cfg.time_out])
|
| 64 |
+
msg += table.get_string()
|
| 65 |
+
msg += "\n"
|
| 66 |
+
return msg
|
| 67 |
+
|
| 68 |
+
# Properties.
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def active_terms(self) -> list[str]:
|
| 72 |
+
return self._term_names
|
| 73 |
+
|
| 74 |
+
@property
|
| 75 |
+
def dones(self) -> torch.Tensor:
|
| 76 |
+
return self._truncated_buf | self._terminated_buf
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
def time_outs(self) -> torch.Tensor:
|
| 80 |
+
return self._truncated_buf
|
| 81 |
+
|
| 82 |
+
@property
|
| 83 |
+
def terminated(self) -> torch.Tensor:
|
| 84 |
+
return self._terminated_buf
|
| 85 |
+
|
| 86 |
+
# Methods.
|
| 87 |
+
|
| 88 |
+
def reset(
|
| 89 |
+
self, env_ids: torch.Tensor | slice | None = None
|
| 90 |
+
) -> dict[str, torch.Tensor]:
|
| 91 |
+
if env_ids is None:
|
| 92 |
+
env_ids = slice(None)
|
| 93 |
+
extras = {}
|
| 94 |
+
for key in self._term_dones.keys():
|
| 95 |
+
extras["Episode_Termination/" + key] = torch.count_nonzero(
|
| 96 |
+
self._term_dones[key][env_ids]
|
| 97 |
+
).item()
|
| 98 |
+
for term_cfg in self._class_term_cfgs:
|
| 99 |
+
term_cfg.func.reset(env_ids=env_ids)
|
| 100 |
+
return extras
|
| 101 |
+
|
| 102 |
+
def compute(self) -> torch.Tensor:
|
| 103 |
+
self._truncated_buf[:] = False
|
| 104 |
+
self._terminated_buf[:] = False
|
| 105 |
+
for name, term_cfg in zip(self._term_names, self._term_cfgs, strict=False):
|
| 106 |
+
value = term_cfg.func(self._env, **term_cfg.params)
|
| 107 |
+
if term_cfg.time_out:
|
| 108 |
+
self._truncated_buf |= value
|
| 109 |
+
else:
|
| 110 |
+
self._terminated_buf |= value
|
| 111 |
+
self._term_dones[name][:] = value
|
| 112 |
+
return self._truncated_buf | self._terminated_buf
|
| 113 |
+
|
| 114 |
+
def get_term(self, name: str) -> torch.Tensor:
|
| 115 |
+
return self._term_dones[name]
|
| 116 |
+
|
| 117 |
+
def get_term_cfg(self, term_name: str) -> TerminationTermCfg:
|
| 118 |
+
if term_name not in self._term_names:
|
| 119 |
+
raise ValueError(f"Term '{term_name}' not found in active terms.")
|
| 120 |
+
return self._term_cfgs[self._term_names.index(term_name)]
|
| 121 |
+
|
| 122 |
+
def get_active_iterable_terms(
|
| 123 |
+
self, env_idx: int
|
| 124 |
+
) -> Sequence[tuple[str, Sequence[float]]]:
|
| 125 |
+
terms = []
|
| 126 |
+
for key in self._term_dones.keys():
|
| 127 |
+
terms.append((key, [self._term_dones[key][env_idx].float().cpu().item()]))
|
| 128 |
+
return terms
|
| 129 |
+
|
| 130 |
+
def _prepare_terms(self):
|
| 131 |
+
for term_name, term_cfg in self.cfg.items():
|
| 132 |
+
term_cfg: TerminationTermCfg | None
|
| 133 |
+
if term_cfg is None:
|
| 134 |
+
print(f"term: {term_name} set to None, skipping...")
|
| 135 |
+
continue
|
| 136 |
+
self._resolve_common_term_cfg(term_name, term_cfg)
|
| 137 |
+
self._term_names.append(term_name)
|
| 138 |
+
self._term_cfgs.append(term_cfg)
|
| 139 |
+
if hasattr(term_cfg.func, "reset") and callable(term_cfg.func.reset):
|
| 140 |
+
self._class_term_cfgs.append(term_cfg)
|
mjlab/src/mjlab/py.typed
ADDED
|
File without changes
|
mjlab/src/mjlab/rl/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mjlab.rl.config import RslRlBaseRunnerCfg as RslRlBaseRunnerCfg
|
| 2 |
+
from mjlab.rl.config import RslRlModelCfg as RslRlModelCfg
|
| 3 |
+
from mjlab.rl.config import RslRlOnPolicyRunnerCfg as RslRlOnPolicyRunnerCfg
|
| 4 |
+
from mjlab.rl.config import RslRlPpoAlgorithmCfg as RslRlPpoAlgorithmCfg
|
| 5 |
+
from mjlab.rl.runner import MjlabOnPolicyRunner as MjlabOnPolicyRunner
|
| 6 |
+
from mjlab.rl.vecenv_wrapper import RslRlVecEnvWrapper as RslRlVecEnvWrapper
|
mjlab/src/mjlab/rl/config.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""RSL-RL configuration."""
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass, field
|
| 4 |
+
from typing import Any, Literal, Tuple
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@dataclass
|
| 8 |
+
class RslRlModelCfg:
|
| 9 |
+
"""Config for a single neural network model (Actor or Critic)."""
|
| 10 |
+
|
| 11 |
+
hidden_dims: Tuple[int, ...] = (128, 128, 128)
|
| 12 |
+
"""The hidden dimensions of the network."""
|
| 13 |
+
activation: str = "elu"
|
| 14 |
+
"""The activation function."""
|
| 15 |
+
obs_normalization: bool = False
|
| 16 |
+
"""Whether to normalize the observations. Default is False."""
|
| 17 |
+
init_noise_std: float = 1.0
|
| 18 |
+
"""The initial noise standard deviation."""
|
| 19 |
+
noise_std_type: Literal["scalar", "log"] = "scalar"
|
| 20 |
+
"""The type of noise standard deviation."""
|
| 21 |
+
stochastic: bool = False
|
| 22 |
+
"""Whether the model output is stochastic."""
|
| 23 |
+
cnn_cfg: dict[str, Any] | None = None
|
| 24 |
+
"""CNN encoder config. When set, class_name should be "CNNModel".
|
| 25 |
+
|
| 26 |
+
Passed to ``rsl_rl.modules.CNN``. Common keys: output_channels,
|
| 27 |
+
kernel_size, stride, padding, activation, global_pool, max_pool.
|
| 28 |
+
"""
|
| 29 |
+
class_name: str = "MLPModel"
|
| 30 |
+
"""Model class name resolved by RSL-RL (MLPModel or CNNModel)."""
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class RslRlPpoAlgorithmCfg:
|
| 35 |
+
"""Config for the PPO algorithm."""
|
| 36 |
+
|
| 37 |
+
num_learning_epochs: int = 5
|
| 38 |
+
"""The number of learning epochs per update."""
|
| 39 |
+
num_mini_batches: int = 4
|
| 40 |
+
"""The number of mini-batches per update.
|
| 41 |
+
mini batch size = num_envs * num_steps / num_mini_batches
|
| 42 |
+
"""
|
| 43 |
+
learning_rate: float = 1e-3
|
| 44 |
+
"""The learning rate."""
|
| 45 |
+
schedule: Literal["adaptive", "fixed"] = "adaptive"
|
| 46 |
+
"""The learning rate schedule."""
|
| 47 |
+
gamma: float = 0.99
|
| 48 |
+
"""The discount factor."""
|
| 49 |
+
lam: float = 0.95
|
| 50 |
+
"""The lambda parameter for Generalized Advantage Estimation (GAE)."""
|
| 51 |
+
entropy_coef: float = 0.005
|
| 52 |
+
"""The coefficient for the entropy loss."""
|
| 53 |
+
desired_kl: float = 0.01
|
| 54 |
+
"""The desired KL divergence between the new and old policies."""
|
| 55 |
+
max_grad_norm: float = 1.0
|
| 56 |
+
"""The maximum gradient norm for the policy."""
|
| 57 |
+
value_loss_coef: float = 1.0
|
| 58 |
+
"""The coefficient for the value loss."""
|
| 59 |
+
use_clipped_value_loss: bool = True
|
| 60 |
+
"""Whether to use clipped value loss."""
|
| 61 |
+
clip_param: float = 0.2
|
| 62 |
+
"""The clipping parameter for the policy."""
|
| 63 |
+
normalize_advantage_per_mini_batch: bool = False
|
| 64 |
+
"""Whether to normalize the advantage per mini-batch. Default is False. If True, the
|
| 65 |
+
advantage is normalized over the mini-batches only. Otherwise, the advantage is
|
| 66 |
+
normalized over the entire collected trajectories.
|
| 67 |
+
"""
|
| 68 |
+
optimizer: Literal["adam", "adamw", "sgd", "rmsprop"] = "adam"
|
| 69 |
+
"""The optimizer to use."""
|
| 70 |
+
share_cnn_encoders: bool = False
|
| 71 |
+
"""Share CNN encoders between actor and critic."""
|
| 72 |
+
class_name: str = "PPO"
|
| 73 |
+
"""Algorithm class name resolved by RSL-RL."""
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@dataclass
|
| 77 |
+
class RslRlBaseRunnerCfg:
|
| 78 |
+
seed: int = 42
|
| 79 |
+
"""The seed for the experiment. Default is 42."""
|
| 80 |
+
num_steps_per_env: int = 24
|
| 81 |
+
"""The number of steps per environment update."""
|
| 82 |
+
max_iterations: int = 300
|
| 83 |
+
"""The maximum number of iterations."""
|
| 84 |
+
obs_groups: dict[str, tuple[str, ...]] = field(
|
| 85 |
+
default_factory=lambda: {"actor": ("actor",), "critic": ("critic",)},
|
| 86 |
+
)
|
| 87 |
+
save_interval: int = 50
|
| 88 |
+
"""The number of iterations between saves."""
|
| 89 |
+
experiment_name: str = "exp1"
|
| 90 |
+
"""Directory name used to group runs under
|
| 91 |
+
``logs/rsl_rl/{experiment_name}/``."""
|
| 92 |
+
run_name: str = ""
|
| 93 |
+
"""Optional label appended to the timestamped run directory
|
| 94 |
+
(e.g. ``2025-01-27_14-30-00_{run_name}``). Also becomes the
|
| 95 |
+
display name for the run in wandb."""
|
| 96 |
+
logger: Literal["wandb", "tensorboard"] = "wandb"
|
| 97 |
+
"""The logger to use. Default is wandb."""
|
| 98 |
+
wandb_project: str = "mjlab"
|
| 99 |
+
"""The wandb project name."""
|
| 100 |
+
wandb_tags: Tuple[str, ...] = ()
|
| 101 |
+
"""Tags for the wandb run. Default is empty tuple."""
|
| 102 |
+
resume: bool = False
|
| 103 |
+
"""Whether to resume the experiment. Default is False."""
|
| 104 |
+
load_run: str = ".*"
|
| 105 |
+
"""The run directory to load. Default is ".*" which means all runs. If regex
|
| 106 |
+
expression, the latest (alphabetical order) matching run will be loaded.
|
| 107 |
+
"""
|
| 108 |
+
load_checkpoint: str = "model_.*.pt"
|
| 109 |
+
"""The checkpoint file to load. Default is "model_.*.pt" (all). If regex expression,
|
| 110 |
+
the latest (alphabetical order) matching file will be loaded.
|
| 111 |
+
"""
|
| 112 |
+
clip_actions: float | None = None
|
| 113 |
+
"""The clipping range for action values. If None (default), no clipping is applied."""
|
| 114 |
+
upload_model: bool = True
|
| 115 |
+
"""Whether to upload model files (.pt, .onnx) to W&B on save. Set to
|
| 116 |
+
False to keep metric logging but avoid storage usage. Default is True."""
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@dataclass
|
| 120 |
+
class RslRlOnPolicyRunnerCfg(RslRlBaseRunnerCfg):
|
| 121 |
+
class_name: str = "OnPolicyRunner"
|
| 122 |
+
"""The runner class name. Default is OnPolicyRunner."""
|
| 123 |
+
actor: RslRlModelCfg = field(default_factory=lambda: RslRlModelCfg(stochastic=True))
|
| 124 |
+
"""The actor configuration."""
|
| 125 |
+
critic: RslRlModelCfg = field(default_factory=lambda: RslRlModelCfg(stochastic=False))
|
| 126 |
+
"""The critic configuration."""
|
| 127 |
+
algorithm: RslRlPpoAlgorithmCfg = field(default_factory=RslRlPpoAlgorithmCfg)
|
| 128 |
+
"""The algorithm configuration."""
|
mjlab/src/mjlab/rl/exporter_utils.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Shared utilities for ONNX policy export across RL tasks."""
|
| 2 |
+
|
| 3 |
+
import onnx
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from mjlab.entity import Entity
|
| 7 |
+
from mjlab.envs import ManagerBasedRlEnv
|
| 8 |
+
from mjlab.envs.mdp.actions import JointPositionAction
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def list_to_csv_str(arr, *, decimals: int = 3, delimiter: str = ",") -> str:
|
| 12 |
+
"""Convert list to CSV string with specified decimal precision."""
|
| 13 |
+
fmt = f"{{:.{decimals}f}}"
|
| 14 |
+
return delimiter.join(
|
| 15 |
+
fmt.format(x)
|
| 16 |
+
if isinstance(x, (int, float))
|
| 17 |
+
else str(x) # numbers → format, strings → as-is
|
| 18 |
+
for x in arr
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_base_metadata(
|
| 23 |
+
env: ManagerBasedRlEnv, run_path: str
|
| 24 |
+
) -> dict[str, list | str | float]:
|
| 25 |
+
"""Get base metadata common to all RL policy exports.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
env: The RL environment.
|
| 29 |
+
run_path: W&B run path or other identifier.
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
Dictionary of metadata fields that are common across all tasks.
|
| 33 |
+
"""
|
| 34 |
+
robot: Entity = env.scene["robot"]
|
| 35 |
+
joint_action = env.action_manager.get_term("joint_pos")
|
| 36 |
+
assert isinstance(joint_action, JointPositionAction)
|
| 37 |
+
# Build mapping from joint name to actuator ID for natural joint order.
|
| 38 |
+
# Each spec actuator controls exactly one joint (via its target field).
|
| 39 |
+
joint_name_to_ctrl_id = {}
|
| 40 |
+
for actuator in robot.spec.actuators:
|
| 41 |
+
joint_name = actuator.target.split("/")[-1]
|
| 42 |
+
joint_name_to_ctrl_id[joint_name] = actuator.id
|
| 43 |
+
# Get actuator IDs in natural joint order (same order as robot.joint_names).
|
| 44 |
+
ctrl_ids_natural = [
|
| 45 |
+
joint_name_to_ctrl_id[jname]
|
| 46 |
+
for jname in robot.joint_names # global joint order
|
| 47 |
+
if jname in joint_name_to_ctrl_id # skip non-actuated joints
|
| 48 |
+
]
|
| 49 |
+
joint_stiffness = env.sim.mj_model.actuator_gainprm[ctrl_ids_natural, 0]
|
| 50 |
+
joint_damping = -env.sim.mj_model.actuator_biasprm[ctrl_ids_natural, 2]
|
| 51 |
+
return {
|
| 52 |
+
"run_path": run_path,
|
| 53 |
+
"joint_names": list(robot.joint_names),
|
| 54 |
+
"joint_stiffness": joint_stiffness.tolist(),
|
| 55 |
+
"joint_damping": joint_damping.tolist(),
|
| 56 |
+
"default_joint_pos": robot.data.default_joint_pos[0].cpu().tolist(),
|
| 57 |
+
"command_names": list(env.command_manager.active_terms),
|
| 58 |
+
"observation_names": env.observation_manager.active_terms["actor"],
|
| 59 |
+
"action_scale": joint_action._scale[0].cpu().tolist()
|
| 60 |
+
if isinstance(joint_action._scale, torch.Tensor)
|
| 61 |
+
else joint_action._scale,
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def attach_metadata_to_onnx(
|
| 66 |
+
onnx_path: str, metadata: dict[str, list | str | float]
|
| 67 |
+
) -> None:
|
| 68 |
+
"""Attach metadata to an ONNX model file.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
onnx_path: Path to the ONNX model file.
|
| 72 |
+
metadata: Dictionary of metadata key-value pairs to attach.
|
| 73 |
+
"""
|
| 74 |
+
model = onnx.load(onnx_path)
|
| 75 |
+
|
| 76 |
+
for k, v in metadata.items():
|
| 77 |
+
entry = onnx.StringStringEntryProto()
|
| 78 |
+
entry.key = k
|
| 79 |
+
entry.value = list_to_csv_str(v) if isinstance(v, list) else str(v)
|
| 80 |
+
model.metadata_props.append(entry)
|
| 81 |
+
|
| 82 |
+
onnx.save(model, onnx_path)
|
mjlab/src/mjlab/rl/runner.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from rsl_rl.env import VecEnv
|
| 5 |
+
from rsl_rl.runners import OnPolicyRunner
|
| 6 |
+
|
| 7 |
+
from mjlab.rl.vecenv_wrapper import RslRlVecEnvWrapper
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class MjlabOnPolicyRunner(OnPolicyRunner):
|
| 11 |
+
"""Base runner that persists environment state across checkpoints."""
|
| 12 |
+
|
| 13 |
+
env: RslRlVecEnvWrapper
|
| 14 |
+
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
env: VecEnv,
|
| 18 |
+
train_cfg: dict,
|
| 19 |
+
log_dir: str | None = None,
|
| 20 |
+
device: str = "cpu",
|
| 21 |
+
) -> None:
|
| 22 |
+
# Strip None-valued cnn_cfg so MLPModel doesn't receive it.
|
| 23 |
+
for key in ("actor", "critic"):
|
| 24 |
+
if key in train_cfg and train_cfg[key].get("cnn_cfg") is None:
|
| 25 |
+
train_cfg[key].pop("cnn_cfg", None)
|
| 26 |
+
super().__init__(env, train_cfg, log_dir, device)
|
| 27 |
+
|
| 28 |
+
def export_policy_to_onnx(
|
| 29 |
+
self, path: str, filename: str = "policy.onnx", verbose: bool = False
|
| 30 |
+
) -> None:
|
| 31 |
+
"""Export policy to ONNX format using legacy export path.
|
| 32 |
+
|
| 33 |
+
Overrides the base implementation to set dynamo=False, avoiding warnings about
|
| 34 |
+
dynamic_axes being deprecated with the new TorchDynamo export path
|
| 35 |
+
(torch>=2.9 default).
|
| 36 |
+
"""
|
| 37 |
+
onnx_model = self.alg.get_policy().as_onnx(verbose=verbose)
|
| 38 |
+
onnx_model.to("cpu")
|
| 39 |
+
onnx_model.eval()
|
| 40 |
+
os.makedirs(path, exist_ok=True)
|
| 41 |
+
torch.onnx.export(
|
| 42 |
+
onnx_model,
|
| 43 |
+
onnx_model.get_dummy_inputs(), # type: ignore[operator]
|
| 44 |
+
os.path.join(path, filename),
|
| 45 |
+
export_params=True,
|
| 46 |
+
opset_version=18,
|
| 47 |
+
verbose=verbose,
|
| 48 |
+
input_names=onnx_model.input_names, # type: ignore[arg-type]
|
| 49 |
+
output_names=onnx_model.output_names, # type: ignore[arg-type]
|
| 50 |
+
dynamic_axes={},
|
| 51 |
+
dynamo=False,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def save(self, path: str, infos=None) -> None:
|
| 55 |
+
"""Save checkpoint.
|
| 56 |
+
|
| 57 |
+
Extends the base implementation to persist the environment's
|
| 58 |
+
common_step_counter and to respect the ``upload_model`` config flag.
|
| 59 |
+
"""
|
| 60 |
+
env_state = {"common_step_counter": self.env.unwrapped.common_step_counter}
|
| 61 |
+
infos = {**(infos or {}), "env_state": env_state}
|
| 62 |
+
# Inline base OnPolicyRunner.save() to conditionally gate W&B upload.
|
| 63 |
+
saved_dict = self.alg.save()
|
| 64 |
+
saved_dict["iter"] = self.current_learning_iteration
|
| 65 |
+
saved_dict["infos"] = infos
|
| 66 |
+
torch.save(saved_dict, path)
|
| 67 |
+
if self.cfg["upload_model"]:
|
| 68 |
+
self.logger.save_model(path, self.current_learning_iteration)
|
| 69 |
+
|
| 70 |
+
def load(
|
| 71 |
+
self,
|
| 72 |
+
path: str,
|
| 73 |
+
load_cfg: dict | None = None,
|
| 74 |
+
strict: bool = True,
|
| 75 |
+
map_location: str | None = None,
|
| 76 |
+
) -> dict:
|
| 77 |
+
"""Load checkpoint.
|
| 78 |
+
|
| 79 |
+
Extends the base implementation to:
|
| 80 |
+
1. Restore common_step_counter to preserve curricula state.
|
| 81 |
+
2. Migrate legacy checkpoints (actor.* -> mlp.*, actor_obs_normalizer.*
|
| 82 |
+
-> obs_normalizer.*) to the current format (rsl-rl>=4.0).
|
| 83 |
+
"""
|
| 84 |
+
loaded_dict = torch.load(path, map_location=map_location, weights_only=False)
|
| 85 |
+
|
| 86 |
+
if "model_state_dict" in loaded_dict:
|
| 87 |
+
print(f"Detected legacy checkpoint at {path}. Migrating to new format...")
|
| 88 |
+
model_state_dict = loaded_dict.pop("model_state_dict")
|
| 89 |
+
actor_state_dict = {}
|
| 90 |
+
critic_state_dict = {}
|
| 91 |
+
|
| 92 |
+
for key, value in model_state_dict.items():
|
| 93 |
+
# Migrate actor keys.
|
| 94 |
+
if key.startswith("actor."):
|
| 95 |
+
new_key = key.replace("actor.", "mlp.")
|
| 96 |
+
actor_state_dict[new_key] = value
|
| 97 |
+
elif key.startswith("actor_obs_normalizer."):
|
| 98 |
+
new_key = key.replace("actor_obs_normalizer.", "obs_normalizer.")
|
| 99 |
+
actor_state_dict[new_key] = value
|
| 100 |
+
elif key in ["std", "log_std"]:
|
| 101 |
+
actor_state_dict[key] = value
|
| 102 |
+
|
| 103 |
+
# Migrate critic keys.
|
| 104 |
+
if key.startswith("critic."):
|
| 105 |
+
new_key = key.replace("critic.", "mlp.")
|
| 106 |
+
critic_state_dict[new_key] = value
|
| 107 |
+
elif key.startswith("critic_obs_normalizer."):
|
| 108 |
+
new_key = key.replace("critic_obs_normalizer.", "obs_normalizer.")
|
| 109 |
+
critic_state_dict[new_key] = value
|
| 110 |
+
|
| 111 |
+
loaded_dict["actor_state_dict"] = actor_state_dict
|
| 112 |
+
loaded_dict["critic_state_dict"] = critic_state_dict
|
| 113 |
+
|
| 114 |
+
load_iteration = self.alg.load(loaded_dict, load_cfg, strict)
|
| 115 |
+
if load_iteration:
|
| 116 |
+
self.current_learning_iteration = loaded_dict["iter"]
|
| 117 |
+
|
| 118 |
+
infos = loaded_dict["infos"]
|
| 119 |
+
if infos and "env_state" in infos:
|
| 120 |
+
self.env.unwrapped.common_step_counter = infos["env_state"]["common_step_counter"]
|
| 121 |
+
return infos
|
mjlab/src/mjlab/rl/spatial_softmax.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CNN with a spatial softmax layer."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Any
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from rsl_rl.models.cnn_model import CNNModel
|
| 10 |
+
from rsl_rl.models.mlp_model import MLPModel
|
| 11 |
+
from rsl_rl.modules import CNN
|
| 12 |
+
from tensordict import TensorDict
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class SpatialSoftmax(nn.Module):
|
| 16 |
+
"""Spatial soft-argmax over feature maps.
|
| 17 |
+
|
| 18 |
+
Given input of shape ``(B, C, H, W)``, computes a softmax over each channel's spatial
|
| 19 |
+
locations and returns the expected (x, y) coordinates, yielding output shape
|
| 20 |
+
``(B, C * 2)``.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
height: Height of the input feature maps.
|
| 24 |
+
width: Width of the input feature maps.
|
| 25 |
+
temperature: Temperature for the spatial softmax. Lower values produce sharper
|
| 26 |
+
distributions.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, height: int, width: int, temperature: float = 1.0) -> None:
|
| 30 |
+
super().__init__()
|
| 31 |
+
# Create normalised coordinate grids in [-1, 1].
|
| 32 |
+
pos_x, pos_y = torch.meshgrid(
|
| 33 |
+
torch.linspace(-1.0, 1.0, height),
|
| 34 |
+
torch.linspace(-1.0, 1.0, width),
|
| 35 |
+
indexing="ij",
|
| 36 |
+
)
|
| 37 |
+
# Register as buffers so they move with the module's device.
|
| 38 |
+
self.register_buffer("pos_x", pos_x.reshape(1, 1, -1))
|
| 39 |
+
self.register_buffer("pos_y", pos_y.reshape(1, 1, -1))
|
| 40 |
+
self.temperature = temperature
|
| 41 |
+
|
| 42 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 43 |
+
"""Compute spatial soft-argmax.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
x: Feature maps of shape ``(B, C, H, W)``.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
Keypoint coordinates of shape ``(B, C * 2)``.
|
| 50 |
+
"""
|
| 51 |
+
B, C, H, W = x.shape
|
| 52 |
+
# Flatten spatial dims: (B, C, H*W).
|
| 53 |
+
features = x.reshape(B, C, -1)
|
| 54 |
+
# Spatial softmax.
|
| 55 |
+
weights = torch.softmax(features / self.temperature, dim=-1)
|
| 56 |
+
# Expected coordinates.
|
| 57 |
+
pos_x: torch.Tensor = self.pos_x # type: ignore[assignment]
|
| 58 |
+
pos_y: torch.Tensor = self.pos_y # type: ignore[assignment]
|
| 59 |
+
expected_x = (weights * pos_x).sum(dim=-1)
|
| 60 |
+
expected_y = (weights * pos_y).sum(dim=-1)
|
| 61 |
+
# Interleave: (B, C*2).
|
| 62 |
+
return torch.stack([expected_x, expected_y], dim=-1).reshape(B, C * 2)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class SpatialSoftmaxCNN(nn.Module):
|
| 66 |
+
"""CNN encoder with spatial-softmax pooling.
|
| 67 |
+
|
| 68 |
+
Wraps ``rsl_rl.modules.CNN`` (created with ``global_pool="none"``
|
| 69 |
+
and ``flatten=False``) followed by :class:`SpatialSoftmax`. Exposes the same
|
| 70 |
+
interface as ``CNN``: :attr:`output_dim` (int) and :attr:`output_channels`
|
| 71 |
+
(``None``, signalling flattened output).
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
input_dim: ``(H, W)`` of the input images.
|
| 75 |
+
input_channels: Number of input channels.
|
| 76 |
+
temperature: Temperature for the spatial softmax.
|
| 77 |
+
**cnn_kwargs: Remaining keyword arguments forwarded to ``rsl_rl.modules.CNN``.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
def __init__(
|
| 81 |
+
self,
|
| 82 |
+
input_dim: tuple[int, int],
|
| 83 |
+
input_channels: int,
|
| 84 |
+
temperature: float = 1.0,
|
| 85 |
+
**cnn_kwargs: Any,
|
| 86 |
+
) -> None:
|
| 87 |
+
super().__init__()
|
| 88 |
+
# Override pooling/flatten — spatial softmax replaces these.
|
| 89 |
+
cnn_kwargs.pop("global_pool", None)
|
| 90 |
+
cnn_kwargs.pop("flatten", None)
|
| 91 |
+
self.cnn = CNN(
|
| 92 |
+
input_dim=input_dim,
|
| 93 |
+
input_channels=input_channels,
|
| 94 |
+
global_pool="none",
|
| 95 |
+
flatten=False,
|
| 96 |
+
**cnn_kwargs,
|
| 97 |
+
)
|
| 98 |
+
# cnn.output_dim is (H, W) when flatten=False.
|
| 99 |
+
out_h, out_w = self.cnn.output_dim # type: ignore[misc]
|
| 100 |
+
num_channels: int = self.cnn.output_channels # type: ignore[assignment]
|
| 101 |
+
self.spatial_softmax = SpatialSoftmax(out_h, out_w, temperature)
|
| 102 |
+
self._output_dim = num_channels * 2
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def output_dim(self) -> int:
|
| 106 |
+
"""Total flattened output dimension (C * 2)."""
|
| 107 |
+
return self._output_dim
|
| 108 |
+
|
| 109 |
+
@property
|
| 110 |
+
def output_channels(self) -> None:
|
| 111 |
+
"""Always ``None`` (output is flattened keypoints)."""
|
| 112 |
+
return None
|
| 113 |
+
|
| 114 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 115 |
+
features = self.cnn(x)
|
| 116 |
+
return self.spatial_softmax(features)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class SpatialSoftmaxCNNModel(CNNModel):
|
| 120 |
+
"""CNN model that uses spatial-softmax pooling.
|
| 121 |
+
|
| 122 |
+
Drop-in replacement for ``rsl_rl.models.CNNModel``. The only difference is that each
|
| 123 |
+
CNN encoder is wrapped with :class:`SpatialSoftmaxCNN` instead of a plain ``CNN``.
|
| 124 |
+
|
| 125 |
+
The ``spatial_softmax_temperature`` parameter is extracted from ``cnn_cfg`` before
|
| 126 |
+
the remaining keys are forwarded to ``CNN``.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
def __init__(
|
| 130 |
+
self,
|
| 131 |
+
obs: TensorDict,
|
| 132 |
+
obs_groups: dict[str, list[str]],
|
| 133 |
+
obs_set: str,
|
| 134 |
+
output_dim: int,
|
| 135 |
+
cnn_cfg: dict[str, dict] | dict[str, Any],
|
| 136 |
+
cnns: nn.ModuleDict | None = None,
|
| 137 |
+
hidden_dims: tuple[int] | list[int] = [256, 256, 256], # noqa: B006
|
| 138 |
+
activation: str = "elu",
|
| 139 |
+
obs_normalization: bool = False,
|
| 140 |
+
stochastic: bool = False,
|
| 141 |
+
init_noise_std: float = 1.0,
|
| 142 |
+
noise_std_type: str = "scalar",
|
| 143 |
+
state_dependent_std: bool = False,
|
| 144 |
+
) -> None:
|
| 145 |
+
# Separate 1D / 2D observation groups (sets self.obs_groups_2d,
|
| 146 |
+
# obs_dims_2d, obs_channels_2d; returns 1D info for MLPModel).
|
| 147 |
+
self._get_obs_dim(obs, obs_groups, obs_set)
|
| 148 |
+
|
| 149 |
+
if cnns is not None:
|
| 150 |
+
if set(cnns.keys()) != set(self.obs_groups_2d):
|
| 151 |
+
raise ValueError(
|
| 152 |
+
"The 2D observations must be identical for all models sharing CNN encoders."
|
| 153 |
+
)
|
| 154 |
+
print(
|
| 155 |
+
"Sharing CNN encoders between models, the CNN "
|
| 156 |
+
"configurations of the receiving model are ignored."
|
| 157 |
+
)
|
| 158 |
+
_cnns = cnns
|
| 159 |
+
else:
|
| 160 |
+
# Expand a single flat config to per-group configs.
|
| 161 |
+
if not all(isinstance(v, dict) for v in cnn_cfg.values()):
|
| 162 |
+
cnn_cfg = {group: cnn_cfg for group in self.obs_groups_2d}
|
| 163 |
+
assert len(cnn_cfg) == len(self.obs_groups_2d), (
|
| 164 |
+
"The number of CNN configurations must match the "
|
| 165 |
+
"number of 2D observation groups."
|
| 166 |
+
)
|
| 167 |
+
_cnns = {}
|
| 168 |
+
for idx, obs_group in enumerate(self.obs_groups_2d):
|
| 169 |
+
group_cfg = dict(cnn_cfg[obs_group])
|
| 170 |
+
group_cfg.pop("spatial_softmax", None)
|
| 171 |
+
temperature = group_cfg.pop("spatial_softmax_temperature", 1.0)
|
| 172 |
+
_cnns[obs_group] = SpatialSoftmaxCNN(
|
| 173 |
+
input_dim=self.obs_dims_2d[idx],
|
| 174 |
+
input_channels=self.obs_channels_2d[idx],
|
| 175 |
+
temperature=temperature,
|
| 176 |
+
**group_cfg,
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
self.cnn_latent_dim = 0
|
| 180 |
+
for cnn in _cnns.values():
|
| 181 |
+
if cnn.output_channels is not None:
|
| 182 |
+
raise ValueError(
|
| 183 |
+
"The output of the CNN must be flattened before passing it to the MLP."
|
| 184 |
+
)
|
| 185 |
+
self.cnn_latent_dim += int(cnn.output_dim) # type: ignore[arg-type]
|
| 186 |
+
|
| 187 |
+
MLPModel.__init__(
|
| 188 |
+
self,
|
| 189 |
+
obs,
|
| 190 |
+
obs_groups,
|
| 191 |
+
obs_set,
|
| 192 |
+
output_dim,
|
| 193 |
+
hidden_dims,
|
| 194 |
+
activation,
|
| 195 |
+
obs_normalization,
|
| 196 |
+
stochastic,
|
| 197 |
+
init_noise_std,
|
| 198 |
+
noise_std_type,
|
| 199 |
+
state_dependent_std,
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
if isinstance(_cnns, nn.ModuleDict):
|
| 203 |
+
self.cnns = _cnns
|
| 204 |
+
else:
|
| 205 |
+
self.cnns = nn.ModuleDict(_cnns)
|
mjlab/src/mjlab/rl/vecenv_wrapper.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from rsl_rl.env import VecEnv
|
| 3 |
+
from tensordict import TensorDict
|
| 4 |
+
|
| 5 |
+
from mjlab.envs import ManagerBasedRlEnv, ManagerBasedRlEnvCfg
|
| 6 |
+
from mjlab.utils.spaces import Space
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class RslRlVecEnvWrapper(VecEnv):
|
| 10 |
+
def __init__(
|
| 11 |
+
self,
|
| 12 |
+
env: ManagerBasedRlEnv,
|
| 13 |
+
clip_actions: float | None = None,
|
| 14 |
+
):
|
| 15 |
+
self.env = env
|
| 16 |
+
self.clip_actions = clip_actions
|
| 17 |
+
|
| 18 |
+
self.num_envs = self.unwrapped.num_envs
|
| 19 |
+
self.device = torch.device(self.unwrapped.device)
|
| 20 |
+
self.max_episode_length = self.unwrapped.max_episode_length
|
| 21 |
+
self.num_actions = self.unwrapped.action_manager.total_action_dim
|
| 22 |
+
self._modify_action_space()
|
| 23 |
+
|
| 24 |
+
# Reset at the start since rsl_rl does not call reset.
|
| 25 |
+
self.env.reset()
|
| 26 |
+
|
| 27 |
+
@property
|
| 28 |
+
def cfg(self) -> ManagerBasedRlEnvCfg:
|
| 29 |
+
return self.unwrapped.cfg
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def render_mode(self) -> str | None:
|
| 33 |
+
return self.env.render_mode
|
| 34 |
+
|
| 35 |
+
@property
|
| 36 |
+
def observation_space(self) -> Space:
|
| 37 |
+
return self.env.observation_space
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def action_space(self) -> Space:
|
| 41 |
+
return self.env.action_space
|
| 42 |
+
|
| 43 |
+
@classmethod
|
| 44 |
+
def class_name(cls) -> str:
|
| 45 |
+
return cls.__name__
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def unwrapped(self) -> ManagerBasedRlEnv:
|
| 49 |
+
return self.env
|
| 50 |
+
|
| 51 |
+
# Properties.
|
| 52 |
+
|
| 53 |
+
@property
|
| 54 |
+
def episode_length_buf(self) -> torch.Tensor:
|
| 55 |
+
return self.unwrapped.episode_length_buf
|
| 56 |
+
|
| 57 |
+
@episode_length_buf.setter
|
| 58 |
+
def episode_length_buf(self, value: torch.Tensor) -> None: # pyright: ignore[reportIncompatibleVariableOverride]
|
| 59 |
+
self.unwrapped.episode_length_buf = value
|
| 60 |
+
|
| 61 |
+
def seed(self, seed: int = -1) -> int:
|
| 62 |
+
return self.unwrapped.seed(seed)
|
| 63 |
+
|
| 64 |
+
def get_observations(self) -> TensorDict:
|
| 65 |
+
obs_dict = self.unwrapped.observation_manager.compute()
|
| 66 |
+
return TensorDict(obs_dict, batch_size=[self.num_envs])
|
| 67 |
+
|
| 68 |
+
def reset(self) -> tuple[TensorDict, dict]:
|
| 69 |
+
obs_dict, extras = self.env.reset()
|
| 70 |
+
return TensorDict(obs_dict, batch_size=[self.num_envs]), extras
|
| 71 |
+
|
| 72 |
+
def step(
|
| 73 |
+
self, actions: torch.Tensor
|
| 74 |
+
) -> tuple[TensorDict, torch.Tensor, torch.Tensor, dict]:
|
| 75 |
+
if self.clip_actions is not None:
|
| 76 |
+
actions = torch.clamp(actions, -self.clip_actions, self.clip_actions)
|
| 77 |
+
obs_dict, rew, terminated, truncated, extras = self.env.step(actions)
|
| 78 |
+
term_or_trunc = terminated | truncated
|
| 79 |
+
assert isinstance(rew, torch.Tensor)
|
| 80 |
+
assert isinstance(term_or_trunc, torch.Tensor)
|
| 81 |
+
dones = term_or_trunc.to(dtype=torch.long)
|
| 82 |
+
if not self.cfg.is_finite_horizon:
|
| 83 |
+
extras["time_outs"] = truncated
|
| 84 |
+
return (
|
| 85 |
+
TensorDict(obs_dict, batch_size=[self.num_envs]),
|
| 86 |
+
rew,
|
| 87 |
+
dones,
|
| 88 |
+
extras,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
def close(self) -> None:
|
| 92 |
+
return self.env.close()
|
| 93 |
+
|
| 94 |
+
# Private methods.
|
| 95 |
+
|
| 96 |
+
def _modify_action_space(self) -> None:
|
| 97 |
+
if self.clip_actions is None:
|
| 98 |
+
return
|
| 99 |
+
|
| 100 |
+
from mjlab.utils.spaces import Box, batch_space
|
| 101 |
+
|
| 102 |
+
self.unwrapped.single_action_space = Box(
|
| 103 |
+
shape=(self.num_actions,), low=-self.clip_actions, high=self.clip_actions
|
| 104 |
+
)
|
| 105 |
+
self.unwrapped.action_space = batch_space(
|
| 106 |
+
self.unwrapped.single_action_space, self.num_envs
|
| 107 |
+
)
|
mjlab/src/mjlab/scene/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mjlab.scene.scene import Scene as Scene
|
| 2 |
+
from mjlab.scene.scene import SceneCfg as SceneCfg
|
mjlab/src/mjlab/scene/scene.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import warnings
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Any, Callable
|
| 7 |
+
|
| 8 |
+
import mujoco
|
| 9 |
+
import mujoco_warp as mjwarp
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
|
| 13 |
+
from mjlab.entity import Entity, EntityCfg
|
| 14 |
+
from mjlab.sensor import BuiltinSensor, Sensor, SensorCfg
|
| 15 |
+
from mjlab.sensor.camera_sensor import CameraSensor
|
| 16 |
+
from mjlab.sensor.raycast_sensor import RayCastSensor
|
| 17 |
+
from mjlab.sensor.sensor_context import SensorContext
|
| 18 |
+
from mjlab.terrains.terrain_importer import TerrainImporter, TerrainImporterCfg
|
| 19 |
+
|
| 20 |
+
_SCENE_XML = Path(__file__).parent / "scene.xml"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@dataclass(kw_only=True)
|
| 24 |
+
class SceneCfg:
|
| 25 |
+
num_envs: int = 1
|
| 26 |
+
env_spacing: float = 2.0
|
| 27 |
+
terrain: TerrainImporterCfg | None = None
|
| 28 |
+
entities: dict[str, EntityCfg] = field(default_factory=dict)
|
| 29 |
+
sensors: tuple[SensorCfg, ...] = field(default_factory=tuple)
|
| 30 |
+
extent: float | None = None
|
| 31 |
+
spec_fn: Callable[[mujoco.MjSpec], None] | None = None
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class Scene:
|
| 35 |
+
def __init__(self, scene_cfg: SceneCfg, device: str) -> None:
|
| 36 |
+
self._cfg = scene_cfg
|
| 37 |
+
self._device = device
|
| 38 |
+
self._entities: dict[str, Entity] = {}
|
| 39 |
+
self._sensors: dict[str, Sensor] = {}
|
| 40 |
+
self._terrain: TerrainImporter | None = None
|
| 41 |
+
self._default_env_origins: torch.Tensor | None = None
|
| 42 |
+
self._sensor_context: SensorContext | None = None
|
| 43 |
+
|
| 44 |
+
self._spec = mujoco.MjSpec.from_file(str(_SCENE_XML))
|
| 45 |
+
if self._cfg.extent is not None:
|
| 46 |
+
self._spec.stat.extent = self._cfg.extent
|
| 47 |
+
self._add_terrain()
|
| 48 |
+
self._add_entities()
|
| 49 |
+
self._add_sensors()
|
| 50 |
+
if self._cfg.spec_fn is not None:
|
| 51 |
+
self._cfg.spec_fn(self._spec)
|
| 52 |
+
|
| 53 |
+
def compile(self) -> mujoco.MjModel:
|
| 54 |
+
return self._spec.compile()
|
| 55 |
+
|
| 56 |
+
def to_zip(self, path: Path) -> None:
|
| 57 |
+
"""Export the scene to a zip file.
|
| 58 |
+
|
| 59 |
+
Warning: The generated zip may require manual adjustment of asset paths
|
| 60 |
+
to be reloadable. Specifically, you may need to add assetdir="assets"
|
| 61 |
+
to the compiler directive in the XML.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
path: Output path for the zip file.
|
| 65 |
+
|
| 66 |
+
TODO: Verify if this is fixed in future MuJoCo releases.
|
| 67 |
+
"""
|
| 68 |
+
with path.open("wb") as f:
|
| 69 |
+
mujoco.MjSpec.to_zip(self._spec, f)
|
| 70 |
+
|
| 71 |
+
# Attributes.
|
| 72 |
+
|
| 73 |
+
@property
|
| 74 |
+
def spec(self) -> mujoco.MjSpec:
|
| 75 |
+
return self._spec
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def env_origins(self) -> torch.Tensor:
|
| 79 |
+
if self._terrain is not None:
|
| 80 |
+
assert self._terrain.env_origins is not None
|
| 81 |
+
return self._terrain.env_origins
|
| 82 |
+
assert self._default_env_origins is not None
|
| 83 |
+
return self._default_env_origins
|
| 84 |
+
|
| 85 |
+
@property
|
| 86 |
+
def env_spacing(self) -> float:
|
| 87 |
+
return self._cfg.env_spacing
|
| 88 |
+
|
| 89 |
+
@property
|
| 90 |
+
def entities(self) -> dict[str, Entity]:
|
| 91 |
+
return self._entities
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
def sensors(self) -> dict[str, Sensor]:
|
| 95 |
+
return self._sensors
|
| 96 |
+
|
| 97 |
+
@property
|
| 98 |
+
def terrain(self) -> TerrainImporter | None:
|
| 99 |
+
return self._terrain
|
| 100 |
+
|
| 101 |
+
@property
|
| 102 |
+
def num_envs(self) -> int:
|
| 103 |
+
return self._cfg.num_envs
|
| 104 |
+
|
| 105 |
+
@property
|
| 106 |
+
def device(self) -> str:
|
| 107 |
+
return self._device
|
| 108 |
+
|
| 109 |
+
def __getitem__(self, key: str) -> Any:
|
| 110 |
+
if key == "terrain":
|
| 111 |
+
if self._terrain is None:
|
| 112 |
+
raise KeyError("No terrain configured in this scene.")
|
| 113 |
+
return self._terrain
|
| 114 |
+
|
| 115 |
+
if key in self._sensors:
|
| 116 |
+
return self._sensors[key]
|
| 117 |
+
if key in self._entities:
|
| 118 |
+
return self._entities[key]
|
| 119 |
+
|
| 120 |
+
# Not found, raise helpful error.
|
| 121 |
+
available = list(self._entities.keys()) + list(self._sensors.keys())
|
| 122 |
+
if self._terrain is not None:
|
| 123 |
+
available.append("terrain")
|
| 124 |
+
raise KeyError(f"Scene element '{key}' not found. Available: {available}")
|
| 125 |
+
|
| 126 |
+
# Methods.
|
| 127 |
+
|
| 128 |
+
@property
|
| 129 |
+
def sensor_context(self) -> SensorContext | None:
|
| 130 |
+
"""Shared sensing resources, or None if no cameras/raycasts."""
|
| 131 |
+
return self._sensor_context
|
| 132 |
+
|
| 133 |
+
def initialize(
|
| 134 |
+
self,
|
| 135 |
+
mj_model: mujoco.MjModel,
|
| 136 |
+
model: mjwarp.Model,
|
| 137 |
+
data: mjwarp.Data,
|
| 138 |
+
):
|
| 139 |
+
self._default_env_origins = torch.zeros(
|
| 140 |
+
(self._cfg.num_envs, 3), device=self._device, dtype=torch.float32
|
| 141 |
+
)
|
| 142 |
+
for ent in self._entities.values():
|
| 143 |
+
ent.initialize(mj_model, model, data, self._device)
|
| 144 |
+
for sensor in self._sensors.values():
|
| 145 |
+
sensor.initialize(mj_model, model, data, self._device)
|
| 146 |
+
|
| 147 |
+
# Create SensorContext if any sensors require it.
|
| 148 |
+
ctx_sensors = [s for s in self._sensors.values() if s.requires_sensor_context]
|
| 149 |
+
if ctx_sensors:
|
| 150 |
+
camera_sensors = [s for s in ctx_sensors if isinstance(s, CameraSensor)]
|
| 151 |
+
raycast_sensors = [s for s in ctx_sensors if isinstance(s, RayCastSensor)]
|
| 152 |
+
self._sensor_context = SensorContext(
|
| 153 |
+
mj_model=mj_model,
|
| 154 |
+
model=model,
|
| 155 |
+
data=data,
|
| 156 |
+
camera_sensors=camera_sensors,
|
| 157 |
+
raycast_sensors=raycast_sensors,
|
| 158 |
+
device=self._device,
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
def reset(self, env_ids: torch.Tensor | slice | None = None) -> None:
|
| 162 |
+
for ent in self._entities.values():
|
| 163 |
+
ent.reset(env_ids)
|
| 164 |
+
for sensor in self._sensors.values():
|
| 165 |
+
sensor.reset(env_ids)
|
| 166 |
+
|
| 167 |
+
def update(self, dt: float) -> None:
|
| 168 |
+
for ent in self._entities.values():
|
| 169 |
+
ent.update(dt)
|
| 170 |
+
for sensor in self._sensors.values():
|
| 171 |
+
sensor.update(dt)
|
| 172 |
+
|
| 173 |
+
def write_data_to_sim(self) -> None:
|
| 174 |
+
for ent in self._entities.values():
|
| 175 |
+
ent.write_data_to_sim()
|
| 176 |
+
|
| 177 |
+
# Private methods.
|
| 178 |
+
|
| 179 |
+
def _add_entities(self) -> None:
|
| 180 |
+
# Collect keyframes from each entity to merge into a single scene keyframe.
|
| 181 |
+
# Order matters: qpos/ctrl are concatenated in entity iteration order.
|
| 182 |
+
key_qpos: list[np.ndarray] = []
|
| 183 |
+
key_ctrl: list[np.ndarray] = []
|
| 184 |
+
for ent_name, ent_cfg in self._cfg.entities.items():
|
| 185 |
+
ent = ent_cfg.build()
|
| 186 |
+
self._entities[ent_name] = ent
|
| 187 |
+
# Extract keyframe before attach (must delete before attach to avoid corruption).
|
| 188 |
+
if ent.spec.keys:
|
| 189 |
+
if len(ent.spec.keys) > 1:
|
| 190 |
+
warnings.warn(
|
| 191 |
+
f"Entity '{ent_name}' has {len(ent.spec.keys)} keyframes; only the "
|
| 192 |
+
"first one will be used.",
|
| 193 |
+
stacklevel=2,
|
| 194 |
+
)
|
| 195 |
+
key_qpos.append(np.array(ent.spec.keys[0].qpos))
|
| 196 |
+
key_ctrl.append(np.array(ent.spec.keys[0].ctrl))
|
| 197 |
+
ent.spec.delete(ent.spec.keys[0])
|
| 198 |
+
frame = self._spec.worldbody.add_frame()
|
| 199 |
+
self._spec.attach(ent.spec, prefix=f"{ent_name}/", frame=frame)
|
| 200 |
+
# Add merged keyframe to scene spec.
|
| 201 |
+
if key_qpos:
|
| 202 |
+
combined_qpos = np.concatenate(key_qpos)
|
| 203 |
+
combined_ctrl = np.concatenate(key_ctrl)
|
| 204 |
+
self._spec.add_key(
|
| 205 |
+
name="init_state",
|
| 206 |
+
qpos=combined_qpos.tolist(),
|
| 207 |
+
ctrl=combined_ctrl.tolist(),
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
def _add_terrain(self) -> None:
|
| 211 |
+
if self._cfg.terrain is None:
|
| 212 |
+
return
|
| 213 |
+
self._cfg.terrain.num_envs = self._cfg.num_envs
|
| 214 |
+
self._cfg.terrain.env_spacing = self._cfg.env_spacing
|
| 215 |
+
self._terrain = TerrainImporter(self._cfg.terrain, self._device)
|
| 216 |
+
frame = self._spec.worldbody.add_frame()
|
| 217 |
+
self._spec.attach(self._terrain.spec, prefix="", frame=frame)
|
| 218 |
+
|
| 219 |
+
def _add_sensors(self) -> None:
|
| 220 |
+
for sensor_cfg in self._cfg.sensors:
|
| 221 |
+
sns = sensor_cfg.build()
|
| 222 |
+
sns.edit_spec(self._spec, self._entities)
|
| 223 |
+
self._sensors[sensor_cfg.name] = sns
|
| 224 |
+
|
| 225 |
+
for sns in self._spec.sensors:
|
| 226 |
+
if sns.name not in self._sensors:
|
| 227 |
+
self._sensors[sns.name] = BuiltinSensor.from_existing(sns.name)
|
mjlab/src/mjlab/scene/scene.xml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!-- Visualization defaults for mjlab scenes. Only applies when using native MuJoCo viewer. -->
|
| 2 |
+
<mujoco model="mjlab scene">
|
| 3 |
+
<visual>
|
| 4 |
+
<headlight diffuse="0.6 0.6 0.6" ambient="0.3 0.3 0.3" specular="0 0 0"/>
|
| 5 |
+
<rgba force="1 0 0 1" haze="0.15 0.25 0.35 1"/>
|
| 6 |
+
<global azimuth="135" elevation="-25" offwidth="1920" offheight="1080"/>
|
| 7 |
+
<map force="0.005"/>
|
| 8 |
+
<scale forcewidth="0.25" contactwidth="0.4" contactheight="0.15" framelength="5.0" framewidth="0.3"/>
|
| 9 |
+
<quality shadowsize="8192"/>
|
| 10 |
+
</visual>
|
| 11 |
+
<statistic meansize="0.03"/>
|
| 12 |
+
</mujoco>
|