exptech commited on
Commit
ee93ecd
·
verified ·
1 Parent(s): c17c995

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .claude/plans/CLAUDE.md +7 -0
  2. dance/B_SpiralDance/capture/B_SpiralDance.bvh +0 -0
  3. dance/B_SpiralDance/retarget/B_SpiralDance.csv +0 -0
  4. dance/B_StretchDance/capture/B_StretchDance.bvh +0 -0
  5. dance/B_StretchDance/retarget/B_StretchDance.csv +0 -0
  6. dance/J_Dance12_LushLife/capture/J_Dance12_LushLife.bvh +0 -0
  7. dance/J_Dance12_LushLife/retarget/J_Dance12_LushLife.csv +0 -0
  8. dance/J_Dance18_TikTok/retarget/J_Dance18_TikTok.csv +0 -0
  9. dance/J_Dance19_LetsGO/capture/J_Dance19_LetsGO.bvh +0 -0
  10. dance/J_Dance19_LetsGO/retarget/J_Dance19_LetsGO.csv +0 -0
  11. dance/J_Dance22_Thrilling/capture/J_Dance22_Thrilling.bvh +0 -0
  12. dance/J_Dance22_Thrilling/retarget/J_Dance22_Thrilling.csv +0 -0
  13. dance/J_Dance23_MidnightSun/capture/J_Dance23_MidnightSun.bvh +0 -0
  14. dance/J_Dance23_MidnightSun/retarget/J_Dance23_MidnightSun.csv +0 -0
  15. dance/J_Dance3_Woah/capture/J_Dance3_Woah.bvh +0 -0
  16. dance/J_Dance3_Woah/retarget/J_Dance3_Woah.csv +0 -0
  17. dance/J_Dance4_Broadway/capture/J_Dance4_Broadway.bvh +0 -0
  18. dance/J_Dance7_Party/capture/J_Dance7_Party.bvh +0 -0
  19. dance/J_Dance7_Party/retarget/J_Dance7_Party.csv +0 -0
  20. dance/J_Dance8_WestCoast/retarget/J_Dance8_WestCoast.csv +0 -0
  21. dance/J_ShortDance13_SingleLadies/capture/J_ShortDance13_SingleLadies.bvh +0 -0
  22. dance/J_ShortDance13_SingleLadies/retarget/J_ShortDance13_SingleLadies.csv +0 -0
  23. dance/J_ShortDance16_JazzWalk/capture/J_ShortDance16_JazzWalk.bvh +0 -0
  24. dance/J_ShortDance16_JazzWalk/retarget/J_ShortDance16_JazzWalk.csv +0 -0
  25. mjlab/.claude/settings.json +33 -0
  26. mjlab/docs/_templates/versioning.html +13 -0
  27. mjlab/docs/conf.py +176 -0
  28. mjlab/docs/index.rst +90 -0
  29. mjlab/docs/source/actuators.rst +627 -0
  30. mjlab/docs/source/changelog.rst +116 -0
  31. mjlab/docs/source/distributed_training.rst +118 -0
  32. mjlab/docs/source/faq.rst +458 -0
  33. mjlab/docs/source/installation.rst +296 -0
  34. mjlab/docs/source/migration_isaac_lab.rst +283 -0
  35. mjlab/docs/source/motivation.rst +134 -0
  36. mjlab/docs/source/nan_guard.rst +148 -0
  37. mjlab/docs/source/observation.rst +333 -0
  38. mjlab/docs/source/randomization.rst +223 -0
  39. mjlab/docs/source/raycast_sensor.rst +346 -0
  40. mjlab/docs/source/sensors.rst +334 -0
  41. mjlab/notebooks/create_new_task.ipynb +856 -0
  42. mjlab/notebooks/demo.ipynb +99 -0
  43. mjlab/scripts/fix_mjpython_macos.sh +36 -0
  44. mjlab/scripts/run_docker.sh +37 -0
  45. mjlab/tests/conftest.py +174 -0
  46. mjlab/tests/smoke_test.py +36 -0
  47. mjlab/tests/test_action_manager.py +118 -0
  48. mjlab/tests/test_actions.py +196 -0
  49. mjlab/tests/test_actuator.py +86 -0
  50. mjlab/tests/test_actuator_builtin_group.py +168 -0
.claude/plans/CLAUDE.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <claude-mem-context>
2
+ # Recent Activity
3
+
4
+ <!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
5
+
6
+ *No recent activity*
7
+ </claude-mem-context>
dance/B_SpiralDance/capture/B_SpiralDance.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/B_SpiralDance/retarget/B_SpiralDance.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/B_StretchDance/capture/B_StretchDance.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/B_StretchDance/retarget/B_StretchDance.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance12_LushLife/capture/J_Dance12_LushLife.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance12_LushLife/retarget/J_Dance12_LushLife.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance18_TikTok/retarget/J_Dance18_TikTok.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance19_LetsGO/capture/J_Dance19_LetsGO.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance19_LetsGO/retarget/J_Dance19_LetsGO.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance22_Thrilling/capture/J_Dance22_Thrilling.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance22_Thrilling/retarget/J_Dance22_Thrilling.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance23_MidnightSun/capture/J_Dance23_MidnightSun.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance23_MidnightSun/retarget/J_Dance23_MidnightSun.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance3_Woah/capture/J_Dance3_Woah.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance3_Woah/retarget/J_Dance3_Woah.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance4_Broadway/capture/J_Dance4_Broadway.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance7_Party/capture/J_Dance7_Party.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance7_Party/retarget/J_Dance7_Party.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_Dance8_WestCoast/retarget/J_Dance8_WestCoast.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_ShortDance13_SingleLadies/capture/J_ShortDance13_SingleLadies.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_ShortDance13_SingleLadies/retarget/J_ShortDance13_SingleLadies.csv ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_ShortDance16_JazzWalk/capture/J_ShortDance16_JazzWalk.bvh ADDED
The diff for this file is too large to render. See raw diff
 
dance/J_ShortDance16_JazzWalk/retarget/J_ShortDance16_JazzWalk.csv ADDED
The diff for this file is too large to render. See raw diff
 
mjlab/.claude/settings.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(find:*)",
5
+ "Bash(make check:*)",
6
+ "Bash(make test:*)",
7
+ "Bash(make docs:*)",
8
+ "Bash(git fetch:*)",
9
+ "Bash(git commit:*)",
10
+ "Bash(git checkout:*)",
11
+ "Bash(git reset:*)",
12
+ "Bash(gh api:*)",
13
+ "Bash(git ls-tree:*)"
14
+ ]
15
+ },
16
+ "hooks": {
17
+ "PostToolUse": [
18
+ {
19
+ "matcher": "Write|Edit",
20
+ "hooks": [
21
+ {
22
+ "type": "command",
23
+ "command": "uv run ruff format"
24
+ }
25
+ ]
26
+ }
27
+ ]
28
+ },
29
+ "enabledPlugins": {
30
+ "code-simplifier@claude-plugins-official": true,
31
+ "pr-review-toolkit@claude-plugins-official": true
32
+ }
33
+ }
mjlab/docs/_templates/versioning.html ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% if versions %}
2
+ <div class="sidebar-version-switcher">
3
+ <label class="sidebar-version-label" for="version-select">Version</label>
4
+ <select id="version-select" class="sidebar-version-select" onchange="location = this.value;">
5
+ {%- for item in versions.branches %}
6
+ <option value="{{ item.url }}" {% if item == current_version %}selected{% endif %}>{{ item.name }}</option>
7
+ {%- endfor %}
8
+ {%- for item in versions.tags|reverse %}
9
+ <option value="{{ item.url }}" {% if item == current_version %}selected{% endif %}>{{ item.name }}</option>
10
+ {%- endfor %}
11
+ </select>
12
+ </div>
13
+ {% endif %}
mjlab/docs/conf.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ import sphinx_book_theme
5
+
6
+ sys.path.insert(0, os.path.abspath("../src"))
7
+ sys.path.insert(0, os.path.abspath("../src/mjlab"))
8
+
9
+
10
+ project = "mjlab"
11
+ copyright = "2025, The mjlab Developers"
12
+ author = "The mjlab Developers"
13
+
14
+ extensions = [
15
+ "sphinx.ext.autodoc",
16
+ "sphinx.ext.autosummary",
17
+ "autodocsumm",
18
+ "myst_parser",
19
+ "sphinx.ext.napoleon",
20
+ "sphinxemoji.sphinxemoji",
21
+ "sphinx.ext.intersphinx",
22
+ "sphinx.ext.mathjax",
23
+ "sphinx.ext.todo",
24
+ "sphinx.ext.viewcode",
25
+ "sphinxcontrib.bibtex",
26
+ "sphinxcontrib.icon",
27
+ "sphinx_copybutton",
28
+ "sphinx_design",
29
+ "sphinx_tabs.tabs",
30
+ "sphinx_multiversion",
31
+ ]
32
+
33
+ mathjax3_config = {
34
+ "tex": {
35
+ "inlineMath": [["\\(", "\\)"]],
36
+ "displayMath": [["\\[", "\\]"]],
37
+ },
38
+ }
39
+
40
+ panels_add_bootstrap_css = False
41
+ panels_add_fontawesome_css = True
42
+
43
+ source_suffix = {
44
+ ".rst": "restructuredtext",
45
+ ".md": "markdown",
46
+ }
47
+
48
+ nitpick_ignore = [
49
+ ("py:obj", "slice(None)"),
50
+ ]
51
+
52
+ nitpick_ignore_regex = [
53
+ (r"py:.*", r"pxr.*"),
54
+ (r"py:.*", r"trimesh.*"),
55
+ ]
56
+
57
+ # emoji style
58
+ sphinxemoji_style = "twemoji"
59
+ autodoc_typehints = "signature"
60
+ autoclass_content = "class"
61
+ autodoc_class_signature = "separated"
62
+ autodoc_member_order = "bysource"
63
+ autodoc_inherit_docstrings = True
64
+ bibtex_bibfiles = ["source/_static/refs.bib"]
65
+ autosummary_generate = True
66
+ autosummary_generate_overwrite = False
67
+ autodoc_default_options = {
68
+ "members": True,
69
+ "undoc-members": True,
70
+ "show-inheritance": True,
71
+ "member-order": "bysource",
72
+ "autosummary": True,
73
+ }
74
+ intersphinx_mapping = {
75
+ "python": ("https://docs.python.org/3", None),
76
+ }
77
+
78
+ exclude_patterns = [
79
+ "_build",
80
+ "_redirect",
81
+ "_templates",
82
+ "Thumbs.db",
83
+ ".DS_Store",
84
+ "README.md",
85
+ "licenses/*",
86
+ ]
87
+
88
+ autodoc_mock_imports = [
89
+ "matplotlib",
90
+ "scipy",
91
+ "carb",
92
+ "warp",
93
+ "pxr",
94
+ "h5py",
95
+ "hid",
96
+ "prettytable",
97
+ "tqdm",
98
+ "tensordict",
99
+ "trimesh",
100
+ "toml",
101
+ "mujoco_warp",
102
+ "gymnasium",
103
+ "rsl_rl",
104
+ "viser",
105
+ "wandb",
106
+ "torchvision",
107
+ ]
108
+
109
+ suppress_warnings = [
110
+ "ref.python",
111
+ "docutils",
112
+ ]
113
+
114
+ language = "en"
115
+
116
+ html_title = "mjlab Documentation"
117
+ html_theme_path = [sphinx_book_theme.get_html_theme_path()]
118
+ html_theme = "sphinx_book_theme"
119
+ html_favicon = "source/_static/favicon.ico"
120
+ html_show_copyright = True
121
+ html_show_sphinx = False
122
+ html_last_updated_fmt = ""
123
+
124
+ html_static_path = ["source/_static"]
125
+ html_css_files = ["css/custom.css"]
126
+
127
+ html_theme_options = {
128
+ "path_to_docs": "docs/",
129
+ "collapse_navigation": True,
130
+ "repository_url": "https://github.com/mujocolab/mjlab",
131
+ "use_repository_button": True,
132
+ "use_issues_button": True,
133
+ "use_edit_page_button": True,
134
+ "show_toc_level": 2,
135
+ "use_sidenotes": True,
136
+ "logo": {
137
+ "text": "The mjlab Documentation",
138
+ },
139
+ "icon_links": [
140
+ {
141
+ "name": "Benchmarks",
142
+ "url": "https://mujocolab.github.io/mjlab/nightly/",
143
+ "icon": "fa-solid fa-chart-line",
144
+ "type": "fontawesome",
145
+ },
146
+ ],
147
+ "icon_links_label": "Quick Links",
148
+ }
149
+
150
+ templates_path = [
151
+ "_templates",
152
+ ]
153
+
154
+ smv_remote_whitelist = r"^.*$"
155
+ smv_branch_whitelist = os.getenv("SMV_BRANCH_WHITELIST", r"^(main|devel)$")
156
+ smv_tag_whitelist = os.getenv("SMV_TAG_WHITELIST", r"^v[1-9]\d*\.\d+\.\d+$")
157
+
158
+ html_sidebars = {
159
+ "**": [
160
+ "navbar-logo.html",
161
+ "search-field.html",
162
+ "versioning.html",
163
+ "sbt-sidebar-nav.html",
164
+ ]
165
+ }
166
+
167
+
168
+ def skip_member(app, what, name, obj, skip, options):
169
+ exclusions = ["from_dict", "to_dict", "replace", "copy", "validate", "__post_init__"]
170
+ if name in exclusions:
171
+ return True
172
+ return None
173
+
174
+
175
+ def setup(app):
176
+ app.connect("autodoc-skip-member", skip_member)
mjlab/docs/index.rst ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Welcome to mjlab!
2
+ =================
3
+
4
+ .. figure:: source/_static/mjlab-banner.jpg
5
+ :width: 100%
6
+ :alt: mjlab
7
+
8
+ What is mjlab?
9
+ ==============
10
+
11
+ **mjlab = Isaac Lab's API + MuJoCo's simplicity + GPU acceleration**
12
+
13
+ We took Isaac Lab's proven manager-based architecture and RL abstractions,
14
+ then built them directly on MuJoCo Warp. No translation layers, no Omniverse
15
+ overhead. Just fast, transparent physics.
16
+
17
+ You can try mjlab *without installing anything* by using `uvx`:
18
+
19
+ .. code-block:: bash
20
+
21
+ # Install uv if you haven't already
22
+ curl -LsSf https://astral.sh/uv/install.sh | sh
23
+
24
+ # Run the mjlab demo (no local installation needed)
25
+ uvx --from mjlab demo
26
+
27
+ If this runs, your setup is compatible with mjlab *for evaluation*.
28
+
29
+ License & citation
30
+ ==================
31
+
32
+ mjlab is licensed under the Apache License, Version 2.0.
33
+ Please refer to the `LICENSE file <https://github.com/mujocolab/mjlab/blob/main/LICENSE/>`_ for details.
34
+
35
+ If you use mjlab in your research, we would appreciate a citation:
36
+
37
+ .. code-block:: bibtex
38
+
39
+ @article{Zakka_mjlab_A_Lightweight_2026,
40
+ author = {Zakka, Kevin and Liao, Qiayuan and Yi, Brent and Le Lay, Louis and Sreenath, Koushil and Abbeel, Pieter},
41
+ title = {{mjlab: A Lightweight Framework for GPU-Accelerated Robot Learning}},
42
+ url = {https://arxiv.org/abs/2601.22074},
43
+ year = {2026}
44
+ }
45
+
46
+ Acknowledgments
47
+ ===============
48
+
49
+ mjlab would not exist without the excellent work of the Isaac Lab team, whose API design
50
+ and abstractions mjlab builds upon.
51
+
52
+ Thanks also to the MuJoCo Warp team — especially Erik Frey and Taylor Howell — for
53
+ answering our questions, giving helpful feedback, and implementing features based
54
+ on our requests countless times.
55
+
56
+ Table of Contents
57
+ =================
58
+
59
+ .. toctree::
60
+ :maxdepth: 1
61
+ :caption: Getting Started
62
+
63
+ source/installation
64
+ source/migration_isaac_lab
65
+
66
+ .. toctree::
67
+ :maxdepth: 1
68
+ :caption: About the Project
69
+
70
+ source/motivation
71
+ source/faq
72
+ source/changelog
73
+
74
+ .. toctree::
75
+ :maxdepth: 2
76
+ :caption: API Reference
77
+
78
+ source/api/index
79
+
80
+ .. toctree::
81
+ :maxdepth: 1
82
+ :caption: Core Concepts
83
+
84
+ source/randomization
85
+ source/nan_guard
86
+ source/observation
87
+ source/actuators
88
+ source/sensors
89
+ source/raycast_sensor
90
+ source/distributed_training
mjlab/docs/source/actuators.rst ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _actuators:
2
+
3
+ Actuators
4
+ =========
5
+
6
+ Actuators convert high-level commands (position, velocity, effort) into
7
+ low-level efforts that drive joints. Implementations use either
8
+ built-in actuators (physics engine computes torques and integrates damping
9
+ forces implicitly) or explicit actuators (user computes torques explicitly,
10
+ integrator cannot account for their velocity derivatives).
11
+
12
+ Choosing an Actuator Type
13
+ -------------------------
14
+
15
+ **Built-in actuators** (``BuiltinPositionActuator``, ``BuiltinVelocityActuator``): Use
16
+ MuJoCo's native implementations. The physics engine computes torques and
17
+ integrates damping forces implicitly, providing the best numerical stability.
18
+
19
+ **Explicit actuators** (``IdealPdActuator``, ``DcMotorActuator``,
20
+ ``LearnedMlpActuator``): Compute torques explicitly so the simulator cannot
21
+ account for velocity derivatives. Use when you need custom control laws or
22
+ actuator dynamics that can't be expressed with built-in types (e.g.,
23
+ velocity-dependent torque limits, learned actuator networks).
24
+
25
+ **XML actuators** (``XmlPositionActuator``, ``XmlMotorActuator``,
26
+ ``XmlVelocityActuator``): Wrap actuators already defined in your robot's XML
27
+ file.
28
+
29
+ **Delayed actuators** (``DelayedActuator``): Generic wrapper that adds command
30
+ delays to any actuator type. Use for modeling communication latency.
31
+
32
+ TL;DR
33
+ -----
34
+
35
+ **Basic PD control:**
36
+
37
+ .. code-block:: python
38
+
39
+ from mjlab.actuator import BuiltinPositionActuatorCfg
40
+ from mjlab.entity import EntityCfg, EntityArticulationInfoCfg
41
+
42
+ robot_cfg = EntityCfg(
43
+ spec_fn=lambda: load_robot_spec(),
44
+ articulation=EntityArticulationInfoCfg(
45
+ actuators=(
46
+ BuiltinPositionActuatorCfg(
47
+ target_names_expr=(".*_hip_.*", ".*_knee_.*"),
48
+ stiffness=80.0,
49
+ damping=10.0,
50
+ effort_limit=100.0,
51
+ ),
52
+ ),
53
+ ),
54
+ )
55
+
56
+ **Add delays:**
57
+
58
+ .. code-block:: python
59
+
60
+ from mjlab.actuator import DelayedActuatorCfg, BuiltinPositionActuatorCfg
61
+
62
+ DelayedActuatorCfg(
63
+ base_cfg=BuiltinPositionActuatorCfg(
64
+ target_names_expr=(".*",),
65
+ stiffness=80.0,
66
+ damping=10.0,
67
+ ),
68
+ delay_target="position",
69
+ delay_min_lag=2, # Minimum 2 physics steps
70
+ delay_max_lag=5, # Maximum 5 physics steps
71
+ )
72
+
73
+
74
+ Actuator Interface
75
+ ------------------
76
+
77
+ All actuators implement a unified ``compute()`` interface that receives an
78
+ ``ActuatorCmd`` (containing position, velocity, and effort targets) and returns
79
+ control signals for the low-level MuJoCo actuators driving each joint. The
80
+ abstraction provides lifecycle hooks for model modification, initialization,
81
+ reset, and runtime updates.
82
+
83
+ **Core interface:**
84
+
85
+ .. code-block:: python
86
+
87
+ def compute(self, cmd: ActuatorCmd) -> torch.Tensor:
88
+ """Convert high-level commands to control signals.
89
+
90
+ Args:
91
+ cmd: Command containing position_target, velocity_target, effort_target
92
+ (each is a [num_envs, num_targets] tensor or None)
93
+
94
+ Returns:
95
+ Control signals for this actuator ([num_envs, num_targets] tensor)
96
+ """
97
+
98
+ **Lifecycle hooks:**
99
+
100
+ - ``edit_spec``: Modify MjSpec before compilation (add actuators, set gains)
101
+ - ``initialize``: Post-compilation setup (resolve indices, allocate buffers)
102
+ - ``reset``: Per-environment reset logic
103
+ - ``update``: Pre-step updates
104
+ - ``compute``: Convert commands to control signals
105
+
106
+ **Properties:**
107
+
108
+ - ``target_ids``: Tensor of local target indices controlled by this actuator
109
+ - ``target_names``: List of target names controlled by this actuator
110
+ - ``ctrl_ids``: Tensor of global control input indices for this actuator
111
+
112
+ Actuator Types
113
+ --------------
114
+
115
+ Built-in Actuators
116
+ ^^^^^^^^^^^^^^^^^^
117
+
118
+ Built-in actuators use MuJoCo's native actuator types via the MjSpec API. The physics
119
+ engine computes the control law and integrates velocity-dependent damping forces
120
+ implicitly, providing best numerical stability.
121
+
122
+ **BuiltinPositionActuator**: Creates ``<position>`` actuators for PD control.
123
+
124
+ **BuiltinVelocityActuator**: Creates ``<velocity>`` actuators for velocity control.
125
+
126
+ **BuiltinMotorActuator**: Creates ``<motor>`` actuators for direct torque control.
127
+
128
+ .. code-block:: python
129
+
130
+ from mjlab.actuator import BuiltinPositionActuatorCfg, BuiltinVelocityActuatorCfg
131
+
132
+ # Mobile manipulator: PD for arm joints, velocity control for wheels.
133
+ actuators = (
134
+ BuiltinPositionActuatorCfg(
135
+ target_names_expr=(".*_shoulder_.*", ".*_elbow_.*", ".*_wrist_.*"),
136
+ stiffness=100.0,
137
+ damping=10.0,
138
+ effort_limit=150.0,
139
+ ),
140
+ BuiltinVelocityActuatorCfg(
141
+ target_names_expr=(".*_wheel_.*",),
142
+ damping=20.0,
143
+ effort_limit=50.0,
144
+ ),
145
+ )
146
+
147
+
148
+ Explicit Actuators
149
+ ^^^^^^^^^^^^^^^^^^
150
+
151
+ These actuators explicitly compute efforts and forward them to an underlying <motor>
152
+ actuator acting as a passthrough. This enables custom control laws and actuator
153
+ dynamics that can't be expressed with built-in types.
154
+
155
+ .. important::
156
+
157
+ Explicit actuators may be less numerically stable
158
+ than built-in actuators because the integrator cannot account for the
159
+ velocity derivatives of the control forces, especially with high damping
160
+ gains.
161
+
162
+ **IdealPdActuator**: Base class that implements an ideal PD controller.
163
+
164
+ **DcMotorActuator**: Example of a more realistic actuator model built on top
165
+ of ``IdealPdActuator``. Adds velocity-dependent torque saturation to model DC
166
+ motor torque-speed curves (back-EMF effects). It implements a linear
167
+ torque-speed curve: maximum torque at zero velocity, zero torque at maximum
168
+ velocity.
169
+
170
+ .. code-block:: python
171
+
172
+ from mjlab.actuator import IdealPdActuatorCfg, DcMotorActuatorCfg
173
+
174
+ # Ideal PD for hips, DC motor model with torque-speed curve for knees.
175
+ actuators = (
176
+ IdealPdActuatorCfg(
177
+ target_names_expr=(".*_hip_.*",),
178
+ stiffness=80.0,
179
+ damping=10.0,
180
+ effort_limit=100.0,
181
+ ),
182
+ DcMotorActuatorCfg(
183
+ target_names_expr=(".*_knee_.*",),
184
+ stiffness=80.0,
185
+ damping=10.0,
186
+ effort_limit=25.0, # Continuous torque limit
187
+ saturation_effort=50.0, # Peak torque at stall
188
+ velocity_limit=30.0, # No-load speed (rad/s)
189
+ ),
190
+ )
191
+
192
+
193
+ **DcMotorActuator parameters:**
194
+
195
+ - ``saturation_effort``: Peak motor torque at zero velocity (stall torque)
196
+ - ``velocity_limit``: Maximum motor velocity (no-load speed, *rad/s*)
197
+ - ``effort_limit``: Continuous torque limit (from base class)
198
+
199
+ **LearnedMlpActuator**: Neural network-based actuator that uses a trained MLP
200
+ to predict torque outputs from joint state history. Useful when analytical
201
+ models can't capture complex actuator dynamics like delays, nonlinearities, and
202
+ friction effects. Inherits DC motor velocity-based torque limits.
203
+
204
+ .. code-block:: python
205
+
206
+ from mjlab.actuator import LearnedMlpActuatorCfg
207
+
208
+ actuators = (
209
+ LearnedMlpActuatorCfg(
210
+ target_names_expr=(".*_ankle_.*",),
211
+ network_file="models/ankle_actuator.pt", # TorchScript model
212
+ pos_scale=1.0, # Input scaling for position errors
213
+ vel_scale=0.05, # Input scaling for velocities
214
+ torque_scale=10.0, # Output scaling for torques
215
+ input_order="pos_vel",
216
+ history_length=3, # Use current + 2 previous timesteps
217
+ saturation_effort=50.0,
218
+ velocity_limit=30.0,
219
+ effort_limit=25.0,
220
+ ),
221
+ )
222
+
223
+ **LearnedMlpActuator parameters:**
224
+
225
+ - ``network_file``: Path to TorchScript MLP model (``.pt`` file)
226
+ - ``pos_scale``: Scaling factor for position error inputs
227
+ - ``vel_scale``: Scaling factor for velocity inputs
228
+ - ``torque_scale``: Scaling factor for network torque outputs
229
+ - ``input_order``: ``pos_vel`` (position then velocity) or ``vel_pos``
230
+ - ``history_length``: Number of timesteps to use (e.g., 3 = current + 2 past)
231
+ - ``saturation_effort``, ``velocity_limit``, ``effort_limit``: Same as
232
+ DcMotorActuator
233
+
234
+ The network receives scaled inputs
235
+ ``[pos_error[t], pos_error[t-1], ..., vel[t], vel[t-1], ...]`` and outputs torques
236
+ that are scaled and clipped by DC motor limits.
237
+
238
+ XML Actuators
239
+ ^^^^^^^^^^^^^
240
+
241
+ XML actuators wrap actuators already defined in your robot's XML file. The
242
+ config finds existing actuators by matching their ``target`` joint name against
243
+ the ``target_names_expr`` patterns. Each joint must have exactly one matching
244
+ actuator.
245
+
246
+ **XmlPositionActuator**: Wraps existing ``<position>`` actuators
247
+
248
+ **XmlVelocityActuator**: Wraps existing ``<velocity>`` actuators
249
+
250
+ **XmlMotorActuator**: Wraps existing ``<motor>`` actuators
251
+
252
+ .. code-block:: python
253
+
254
+ from mjlab.actuator import XmlPositionActuatorCfg
255
+
256
+ # Robot XML already has:
257
+ # <actuator>
258
+ # <position name="hip_joint" joint="hip_joint" kp="100"/>
259
+ # </actuator>
260
+
261
+ # Wrap existing XML actuators.
262
+ actuators = (
263
+ XmlPositionActuatorCfg(target_names_expr=("hip_joint",)),
264
+ )
265
+
266
+ Delayed Actuator
267
+ ^^^^^^^^^^^^^^^^
268
+
269
+ Generic wrapper that adds command delays to any actuator. Useful for modeling
270
+ actuator latency and communication delays. The delay operates on command
271
+ targets before they reach the actuator's control law.
272
+
273
+ .. code-block:: python
274
+
275
+ from mjlab.actuator import DelayedActuatorCfg, IdealPdActuatorCfg
276
+
277
+ # Add 2-5 step delay to position commands.
278
+ actuators = (
279
+ DelayedActuatorCfg(
280
+ base_cfg=IdealPdActuatorCfg(
281
+ target_names_expr=(".*",),
282
+ stiffness=80.0,
283
+ damping=10.0,
284
+ ),
285
+ delay_target="position", # Delay position commands
286
+ delay_min_lag=2,
287
+ delay_max_lag=5,
288
+ delay_hold_prob=0.3, # 30% chance to keep previous lag
289
+ delay_update_period=10, # Update lag every 10 steps
290
+ ),
291
+ )
292
+
293
+
294
+ **Multi-target delays:**
295
+
296
+ .. code-block:: python
297
+
298
+ DelayedActuatorCfg(
299
+ base_cfg=IdealPdActuatorCfg(...),
300
+ delay_target=("position", "velocity", "effort"),
301
+ delay_min_lag=2,
302
+ delay_max_lag=5,
303
+ )
304
+
305
+ Delays are quantized to physics timesteps. For example, with 500Hz physics
306
+ (2ms/step), ``delay_min_lag=2`` represents a 4ms minimum delay.
307
+
308
+ .. note::
309
+
310
+ Each target gets an independent delay buffer with its own lag
311
+ schedule. This provides maximum flexibility for modeling different latency
312
+ characteristics for position, velocity, and effort commands.
313
+
314
+ PD Control and Integrator Choice
315
+ --------------------------------
316
+
317
+ The distinction between **built-in** and **explicit** PD control only makes sense
318
+ in the context of how MuJoCo integrates velocity-dependent forces. This section
319
+ explains how each actuator style interacts with the integrator, and why
320
+ mjlab uses ``<implicitfast>`` **by default**.
321
+
322
+ Built-in vs Explicit PD Control
323
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
324
+
325
+ **BuiltinPositionActuator** uses MuJoCo's internal PD implementation:
326
+
327
+ - Creates ``<position>`` actuators in the MjSpec
328
+ - Physics engine computes the PD law and integrates velocity-dependent damping
329
+ forces implicitly
330
+
331
+ **IdealPdActuator** implements PD control explicitly:
332
+
333
+ - Creates ``<motor>`` actuators in the MjSpec
334
+ - Computes torques explicitly: ``τ = Kp·pos_error + Kd·vel_error``
335
+ - The integrator cannot account for the velocity derivatives of these forces
336
+
337
+ They match closely in the linear, unconstrained regime and small time steps.
338
+ However, built-in PD is more numerically robust and as such can be used with
339
+ larger gains and larger timesteps.
340
+
341
+ Integrator Behavior in MuJoCo
342
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
343
+
344
+ The choice of integrator in MuJoCo strongly affects stability for
345
+ velocity-dependent forces:
346
+
347
+ - ``euler`` is semi-implicit but treats joint damping implicitly. Other
348
+ forces, including explicit actuator damping, are integrated explicitly.
349
+ - ``implicitfast`` treats *all known velocity-dependent forces implicitly*,
350
+ stabilizing systems with large damping or stiff actuation.
351
+
352
+ mjlab Recommendation
353
+ ^^^^^^^^^^^^^^^^^^^^
354
+
355
+ mjlab actuators apply damping inside the actuator (not in joints). Because of
356
+ this, **Euler** cannot integrate the damping implicitly, making it less stable.
357
+ The ``implicitfast`` integrator, however, handles both proportional and
358
+ damping terms of the actuator implicitly, improving stability without
359
+ additional cost.
360
+
361
+ .. note::
362
+
363
+ mjlab defaults to ``<implicitfast>``, as it is MuJoCo's recommended
364
+ integrator and provides superior stability for actuator-side damping.
365
+
366
+ Authoring Actuator Configs
367
+ --------------------------
368
+
369
+ Since actuator parameters are uniform within each config, use separate actuator
370
+ configs for joints that need different parameters:
371
+
372
+ .. code-block:: python
373
+
374
+ from mjlab.actuator import BuiltinPositionActuatorCfg
375
+
376
+ # G1 humanoid with different gains per joint group.
377
+ G1_ACTUATORS = (
378
+ BuiltinPositionActuatorCfg(
379
+ target_names_expr=(".*_hip_.*", "waist_yaw_joint"),
380
+ stiffness=180.0,
381
+ damping=18.0,
382
+ effort_limit=88.0,
383
+ armature=0.0015,
384
+ ),
385
+ BuiltinPositionActuatorCfg(
386
+ target_names_expr=("left_hip_pitch_joint", "right_hip_pitch_joint"),
387
+ stiffness=200.0,
388
+ damping=20.0,
389
+ effort_limit=88.0,
390
+ armature=0.0015,
391
+ ),
392
+ BuiltinPositionActuatorCfg(
393
+ target_names_expr=(".*_knee_joint",),
394
+ stiffness=150.0,
395
+ damping=15.0,
396
+ effort_limit=139.0,
397
+ armature=0.0025,
398
+ ),
399
+ BuiltinPositionActuatorCfg(
400
+ target_names_expr=(".*_ankle_.*",),
401
+ stiffness=40.0,
402
+ damping=5.0,
403
+ effort_limit=25.0,
404
+ armature=0.0008,
405
+ ),
406
+ )
407
+
408
+ This design choice reflects a deliberate simplification in mjlab: each
409
+ ``ActuatorCfg`` represents a single actuator type (e.g., a specific motor/gearbox
410
+ model) applied uniformly across all joints it drives. Hardware parameters such
411
+ as ``armature`` (reflected rotor inertia) and ``gear`` describe properties of the
412
+ actuator hardware, even though they are implemented in MuJoCo as joint or
413
+ actuator fields. In other frameworks (like Isaac Lab), these fields may accept
414
+ ``float | dict[str, float]`` to support per-joint variation. mjlab instead
415
+ encourages one config per actuator type or per joint group, keeping the hardware
416
+ model physically consistent and explicit. The main trade-off is verbosity in
417
+ special cases, such as parallel linkages, where per-joint overrides could have
418
+ been convenient, but the benefit is clearer semantics and simpler maintenance.
419
+
420
+ Computing Hardware Parameters from Motor Specs
421
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
422
+
423
+ mjlab provides utilities in ``mjlab.utils.actuator`` to compute actuator
424
+ parameters from physical motor specifications. This is particularly useful for
425
+ computing reflected inertia (``armature``) and deriving appropriate control gains
426
+ from hardware datasheets.
427
+
428
+ **Example: Unitree G1 motor configuration**
429
+
430
+ .. code-block:: python
431
+
432
+ from math import pi
433
+
434
+ from mjlab.utils.actuator import (
435
+ reflected_inertia_from_two_stage_planetary,
436
+ ElectricActuator
437
+ )
438
+
439
+ # Motor specs from manufacturer datasheet.
440
+ ROTOR_INERTIAS_7520_14 = (
441
+ 0.489e-4, # Motor rotor inertia (kg·m**2)
442
+ 0.098e-4, # Planet carrier inertia
443
+ 0.533e-4, # Output stage inertia
444
+ )
445
+ GEARS_7520_14 = (
446
+ 1, # First stage (motor to planet)
447
+ 4.5, # Second stage (planet to carrier)
448
+ 1 + (48/22), # Third stage (carrier to output)
449
+ )
450
+
451
+ # Compute reflected inertia at joint output.
452
+ # J_reflected = J_motor*(N₁*N₂)**2 + J_carrier*N₂**2 + J_output.
453
+ ARMATURE_7520_14 = reflected_inertia_from_two_stage_planetary(
454
+ ROTOR_INERTIAS_7520_14, GEARS_7520_14
455
+ )
456
+
457
+ # Create motor spec container.
458
+ ACTUATOR_7520_14 = ElectricActuator(
459
+ reflected_inertia=ARMATURE_7520_14,
460
+ velocity_limit=32.0, # rad/s at joint
461
+ effort_limit=88.0, # N·m continuous torque
462
+ )
463
+
464
+ # Derive PD gains from natural frequency and damping ratio.
465
+ NATURAL_FREQ = 10 * 2*pi # 10 Hz bandwidth.
466
+ DAMPING_RATIO = 2.0 # Overdamped, see note below.
467
+ STIFFNESS = ARMATURE_7520_14 * NATURAL_FREQ**2
468
+ DAMPING = 2 * DAMPING_RATIO * ARMATURE_7520_14 * NATURAL_FREQ
469
+
470
+ # Use in actuator config.
471
+ from mjlab.actuator import BuiltinPositionActuatorCfg
472
+
473
+ actuator = BuiltinPositionActuatorCfg(
474
+ target_names_expr=(".*_hip_pitch_joint",),
475
+ stiffness=STIFFNESS,
476
+ damping=DAMPING,
477
+ effort_limit=ACTUATOR_7520_14.effort_limit,
478
+ armature=ACTUATOR_7520_14.reflected_inertia,
479
+ )
480
+
481
+ .. note::
482
+
483
+ The example uses `DAMPING_RATIO = 2.0`
484
+ (overdamped) rather than the critically damped value of 1.0. This is because
485
+ the reflected inertia calculation only accounts for the motor's rotor inertia,
486
+ not the apparent inertia of the links being moved. In practice, the total
487
+ effective inertia at the joint is higher than just the reflected motor inertia,
488
+ so using an overdamped ratio provides better stability margins when the true
489
+ system inertia is underestimated.
490
+
491
+ **Parallel linkage approximation:**
492
+
493
+ For joints driven by parallel linkages (like the G1's ankles with dual motors),
494
+ the effective armature in the nominal configuration can be approximated as the
495
+ sum of the individual motor armatures:
496
+
497
+ .. code-block:: python
498
+
499
+ # Two 5020 motors driving ankle through parallel linkage.
500
+ G1_ACTUATOR_ANKLE = BuiltinPositionActuatorCfg(
501
+ target_names_expr=(".*_ankle_pitch_joint", ".*_ankle_roll_joint"),
502
+ stiffness=STIFFNESS_5020 * 2,
503
+ damping=DAMPING_5020 * 2,
504
+ effort_limit=ACTUATOR_5020.effort_limit * 2,
505
+ armature=ACTUATOR_5020.reflected_inertia * 2,
506
+ )
507
+
508
+
509
+ Using Actuators in Environments
510
+ -------------------------------
511
+
512
+ Action Terms
513
+ ^^^^^^^^^^^^
514
+
515
+ Actuators are typically controlled via action terms in the action manager:
516
+
517
+ .. code-block:: python
518
+
519
+ from mjlab.envs.mdp.actions import JointPositionActionCfg
520
+
521
+ JointPositionActionCfg(
522
+ entity_name="robot",
523
+ actuator_names=(".*",), # Regex patterns for joint selection
524
+ scale=1.0,
525
+ use_default_offset=True, # Use robot's default joint positions as offset
526
+ )
527
+
528
+ **Available action terms:**
529
+
530
+ - ``JointPositionAction``: Sets position targets (for PD actuators)
531
+ - ``JointVelocityAction``: Sets velocity targets (for velocity actuators)
532
+ - ``JointEffortAction``: Sets effort/torque targets (for torque actuators)
533
+ - ``DifferentialIKAction``: Task-space control via damped least-squares IK
534
+
535
+ The action manager calls ``entity.set_joint_position_target()``,
536
+ ``set_joint_velocity_target()``, or ``set_joint_effort_target()`` under the hood,
537
+ which populate the ``ActuatorCmd`` passed to each actuator's ``compute()`` method.
538
+
539
+ Differential IK Action
540
+ """"""""""""""""""""""
541
+
542
+ ``DifferentialIKAction`` converts task-space commands (Cartesian position
543
+ and/or orientation) into joint-space targets via damped least-squares (DLS)
544
+ inverse kinematics. It runs one IK step per decimation substep via
545
+ ``apply_actions()``, or can be iterated externally via ``compute_dq()``.
546
+
547
+ The action dimension is determined automatically by the active objectives:
548
+
549
+ - ``orientation_weight == 0`` → **3D** (position only)
550
+ - ``orientation_weight > 0, use_relative_mode=True`` → **6D** (delta pos +
551
+ delta axis-angle)
552
+ - ``orientation_weight > 0, use_relative_mode=False`` → **7D** (absolute
553
+ pos + quaternion)
554
+
555
+ .. code-block:: python
556
+
557
+ from mjlab.envs.mdp.actions import DifferentialIKActionCfg
558
+
559
+ DifferentialIKActionCfg(
560
+ entity_name="robot",
561
+ actuator_names=("joint.*",), # Regex for controlled joints
562
+ frame_name="grasp_site", # End-effector element name
563
+ frame_type="site", # "body", "site", or "geom"
564
+ use_relative_mode=False, # Absolute target mode
565
+ damping=0.05, # DLS damping (lambda)
566
+ max_dq=0.5, # Per-step joint displacement limit
567
+ position_weight=1.0, # Position tracking weight
568
+ orientation_weight=1.0, # Orientation tracking weight
569
+ joint_limit_weight=0.1, # Soft joint-limit avoidance
570
+ posture_weight=0.0, # Null-space posture regularization
571
+ posture_target={".*": 0.0}, # Posture target (regex → value)
572
+ )
573
+
574
+ **Standalone usage (outside RL):**
575
+
576
+ The ``compute_dq()`` method returns joint displacements without writing to
577
+ actuator targets, enabling multi-iteration IK in standalone scripts:
578
+
579
+ .. code-block:: python
580
+
581
+ from mjlab.envs.mdp.actions import DifferentialIKAction
582
+
583
+ action: DifferentialIKAction = cfg.build(env)
584
+ action.process_actions(target_pose)
585
+ for _ in range(20): # Multiple IK iterations
586
+ dq = action.compute_dq()
587
+ q = entity.data.joint_pos[:, action._joint_ids] + dq
588
+ entity.write_joint_position_to_sim(q, joint_ids=action._joint_ids)
589
+ sim.forward()
590
+
591
+ **Weighted objectives:**
592
+
593
+ All objectives (position, orientation, joint limits, posture) are stacked
594
+ into a single DLS system. Setting a weight to zero disables that objective
595
+ with no overhead in the solve. Weights can be changed at runtime (e.g. from
596
+ GUI sliders in the ``scripts/demos/ik_control.py`` demo).
597
+
598
+ Domain Randomization
599
+ ^^^^^^^^^^^^^^^^^^^^
600
+
601
+ .. code-block:: python
602
+
603
+ from mjlab.envs.mdp import events
604
+ from mjlab.managers.event_manager import EventTermCfg
605
+ from mjlab.managers.scene_entity_config import SceneEntityCfg
606
+
607
+ EventTermCfg(
608
+ func=events.randomize_pd_gains,
609
+ mode="reset",
610
+ params={
611
+ "entity_cfg": SceneEntityCfg("robot", actuator_names=(".*",)),
612
+ "kp_range": (0.8, 1.2),
613
+ "kd_range": (0.8, 1.2),
614
+ "distribution": "uniform",
615
+ "operation": "scale", # or "abs" for absolute values
616
+ },
617
+ )
618
+
619
+ EventTermCfg(
620
+ func=events.randomize_effort_limits,
621
+ mode="reset",
622
+ params={
623
+ "entity_cfg": SceneEntityCfg("robot", actuator_names=(".*_leg_.*",)),
624
+ "effort_limit_range": (0.7, 1.0), # Reduce effort by 0-30%
625
+ "operation": "scale",
626
+ },
627
+ )
mjlab/docs/source/changelog.rst ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ =========
2
+ Changelog
3
+ =========
4
+
5
+ Upcoming version (not yet released)
6
+ -----------------------------------
7
+
8
+ Added
9
+ ^^^^^
10
+
11
+ - Added ``upload_model`` option to ``RslRlBaseRunnerCfg`` to control W&B model
12
+ file uploads (``.pt`` and ``.onnx``) while keeping metric logging enabled
13
+ (:gh:`654`).
14
+
15
+ Changed
16
+ ^^^^^^^
17
+
18
+ - Replaced the single ``scale`` parameter in ``DifferentialIKActionCfg`` with
19
+ separate ``delta_pos_scale`` and ``delta_ori_scale`` for independent scaling
20
+ of position and orientation components.
21
+
22
+ Fixed
23
+ ^^^^^
24
+
25
+ - Bundled ``ffmpeg`` for ``mediapy`` via ``imageio-ffmpeg``, removing the
26
+ requirement for a system ``ffmpeg`` install. Thanks to
27
+ `@rdeits-bd <https://github.com/rdeits-bd>`_ for the suggestion.
28
+ - Fixed ``height_scan`` returning ~0 for missed rays; now defaults to
29
+ ``max_distance``. Replaced ``clip=(-1, 1)`` with ``scale`` normalization
30
+ in the velocity task config. Thanks to `@eufrizz <https://github.com/eufrizz>`_
31
+ for reporting and the initial fix (`#642 <https://github.com/mujocolab/mjlab/pull/642>`_).
32
+ - Fixed ghost mesh visualization for fixed-base entities by extending
33
+ ``DebugVisualizer.add_ghost_mesh`` to optionally accept ``mocap_pos`` and
34
+ ``mocap_quat`` (`#645 <https://github.com/mujocolab/mjlab/pull/645>`_).
35
+
36
+ Version 1.1.1 (February 14, 2026)
37
+ ---------------------------------
38
+
39
+ Added
40
+ ^^^^^
41
+
42
+ - Added reward term visualization to the native viewer (toggle with ``P``) (`#629 <https://github.com/mujocolab/mjlab/pull/629>`_).
43
+ - Added ``DifferentialIKAction`` for task-space control via damped
44
+ least-squares IK. Supports weighted position/orientation tracking,
45
+ soft joint-limit avoidance, and null-space posture regularization.
46
+ Includes an interactive viser demo (``scripts/demos/differential_ik.py``) (`#632 <https://github.com/mujocolab/mjlab/pull/632>`_).
47
+
48
+ Fixed
49
+ ^^^^^
50
+
51
+ - Fixed ``play.py`` defaulting to the base rsl-rl ``OnPolicyRunner`` instead
52
+ of ``MjlabOnPolicyRunner``, which caused a ``TypeError`` from an unexpected
53
+ ``cnn_cfg`` keyword argument (`#626 <https://github.com/mujocolab/mjlab/pull/626>`_). Contribution by
54
+ `@griffinaddison <https://github.com/griffinaddison>`_.
55
+
56
+ Changed
57
+ ^^^^^^^
58
+
59
+ - Removed ``body_mass``, ``body_inertia``, ``body_pos``, and ``body_quat``
60
+ from ``FIELD_SPECS`` in domain randomization. These fields have derived
61
+ quantities that require ``set_const`` to recompute; without that call,
62
+ randomizing them silently breaks physics (`#631 <https://github.com/mujocolab/mjlab/pull/631>`_).
63
+ - Replaced ``moviepy`` with ``mediapy`` for video recording. ``mediapy``
64
+ handles cloud storage paths (GCS, S3) natively (`#637 <https://github.com/mujocolab/mjlab/pull/637>`_).
65
+
66
+ .. figure:: _static/changelog/native_reward.png
67
+ :width: 80%
68
+
69
+ Version 1.1.0 (February 12, 2026)
70
+ ---------------------------------
71
+
72
+ Added
73
+ ^^^^^
74
+
75
+ - Added RGB and depth camera sensors and BVH-accelerated raycasting (`#597 <https://github.com/mujocolab/mjlab/pull/597>`_).
76
+ - Added ``MetricsManager`` for logging custom metrics during training (`#596 <https://github.com/mujocolab/mjlab/pull/596>`_).
77
+ - Added terrain visualizer (`#609 <https://github.com/mujocolab/mjlab/pull/609>`_). Contribution by
78
+ `@mktk1117 <https://github.com/mktk1117>`_.
79
+
80
+ .. figure:: _static/changelog/terrain_visualizer.jpg
81
+ :width: 80%
82
+
83
+ - Added many new terrains including ``HfDiscreteObstaclesTerrainCfg``,
84
+ ``HfPerlinNoiseTerrainCfg``, ``BoxSteppingStonesTerrainCfg``,
85
+ ``BoxNarrowBeamsTerrainCfg``, ``BoxRandomStairsTerrainCfg``, and
86
+ more. Added flat patch sampling for heightfield terrains (`#542 <https://github.com/mujocolab/mjlab/pull/542>`_, `#581 <https://github.com/mujocolab/mjlab/pull/581>`_).
87
+ - Added site group visualization to the Viser viewer (Geoms and Sites
88
+ tabs unified into a single Groups tab) (`#551 <https://github.com/mujocolab/mjlab/pull/551>`_).
89
+ - Added ``env_ids`` parameter to ``Entity.write_ctrl_to_sim`` (`#567 <https://github.com/mujocolab/mjlab/pull/567>`_).
90
+
91
+ Changed
92
+ ^^^^^^^
93
+
94
+ - Upgraded ``rsl-rl-lib`` to 4.0.0 and replaced the custom ONNX
95
+ exporter with rsl-rl's built-in ``as_onnx()`` (`#589 <https://github.com/mujocolab/mjlab/pull/589>`_, `#595 <https://github.com/mujocolab/mjlab/pull/595>`_).
96
+ - ``sim.forward()`` is now called unconditionally after the decimation
97
+ loop. See :ref:`faq-sim-forward` for details (`#591 <https://github.com/mujocolab/mjlab/pull/591>`_).
98
+ - Unnamed freejoints are now automatically named to prevent
99
+ ``KeyError`` during entity init (`#545 <https://github.com/mujocolab/mjlab/pull/545>`_).
100
+
101
+ Fixed
102
+ ^^^^^
103
+
104
+ - Fixed ``randomize_pd_gains`` crash with ``num_envs > 1`` (`#564 <https://github.com/mujocolab/mjlab/pull/564>`_).
105
+ - Fixed ``ctrl_ids`` index error with multiple actuated entities (`#573 <https://github.com/mujocolab/mjlab/pull/573>`_).
106
+ Reported by `@bwrooney82 <https://github.com/bwrooney82>`_.
107
+ - Fixed Viser viewer rendering textured robots as gray (`#544 <https://github.com/mujocolab/mjlab/pull/544>`_).
108
+ - Fixed Viser plane rendering ignoring MuJoCo size parameter (`#540 <https://github.com/mujocolab/mjlab/pull/540>`_).
109
+ - Fixed ``HfDiscreteObstaclesTerrainCfg`` spawn height (`#552 <https://github.com/mujocolab/mjlab/pull/552>`_).
110
+ - Fixed ``RaycastSensor`` visualization ignoring the all-envs toggle (`#607 <https://github.com/mujocolab/mjlab/pull/607>`_).
111
+ Contribution by `@oxkitsune <https://github.com/oxkitsune>`_.
112
+
113
+ Version 1.0.0 (January 28, 2026)
114
+ --------------------------------
115
+
116
+ Initial release of mjlab.
mjlab/docs/source/distributed_training.rst ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _distributed-training:
2
+
3
+ Distributed Training
4
+ ====================
5
+
6
+ mjlab supports multi-GPU distributed training using
7
+ `torchrunx <https://github.com/apoorvkh/torchrunx>`_. Distributed training
8
+ parallelizes RL workloads across multiple GPUs by running independent rollouts
9
+ on each device and synchronizing gradients during policy updates. Throughput
10
+ scales nearly linearly with GPU count.
11
+
12
+ TL;DR
13
+ -----
14
+
15
+ **Single GPU (default):**
16
+
17
+ .. code-block:: bash
18
+
19
+ uv run train <task-name> <task-specific CLI args>
20
+ # or explicitly: --gpu-ids 0
21
+
22
+
23
+ **Multi-GPU:**
24
+
25
+ .. code-block:: bash
26
+
27
+ uv run train <task-name> \
28
+ --gpu-ids 0 1 \
29
+ <task-specific CLI args>
30
+
31
+
32
+ **All GPUs:**
33
+
34
+ .. code-block:: bash
35
+
36
+ uv run train <task-name> \
37
+ --gpu-ids all \
38
+ <task-specific CLI args>
39
+
40
+
41
+ **CPU mode:**
42
+
43
+ .. code-block:: bash
44
+
45
+ uv run train <task-name> \
46
+ --gpu-ids None \
47
+ <task-specific CLI args>
48
+ # or: CUDA_VISIBLE_DEVICES="" uv run train <task-name> ...
49
+
50
+
51
+ **Key points:**
52
+
53
+ - ``--gpu-ids`` specifies GPU indices (e.g., ``--gpu-ids 0 1`` for 2 GPUs)
54
+ - GPU indices are relative to ``CUDA_VISIBLE_DEVICES`` if set
55
+ - ``CUDA_VISIBLE_DEVICES=2,3 uv run train ... --gpu-ids 0 1`` uses physical GPUs 2 and 3
56
+ - Each GPU runs the full ``num-envs`` count (e.g., 2 GPUs × 4096 envs = 8192 total)
57
+ - Single-GPU and CPU modes run directly; multi-GPU uses ``torchrunx`` for process
58
+ spawning
59
+
60
+ Configuration
61
+ -------------
62
+
63
+ **torchrunx Logging:**
64
+
65
+ By default, torchrunx process logs are saved to ``{log_dir}/torchrunx/``. You can
66
+ customize this:
67
+
68
+ .. code-block:: bash
69
+
70
+ # Disable torchrunx file logging.
71
+ uv run train <task-name> --gpu-ids 0 1 --torchrunx-log-dir ""
72
+
73
+ # Custom log directory.
74
+ uv run train <task-name> --gpu-ids 0 1 --torchrunx-log-dir /path/to/logs
75
+
76
+ # Or use environment variable (takes precedence over flag).
77
+ TORCHRUNX_LOG_DIR=/tmp/logs uv run train <task-name> --gpu-ids 0 1
78
+
79
+
80
+ The priority is ``TORCHRUNX_LOG_DIR`` env var, ``--torchrunx-log-dir`` flag, default
81
+ ``{log_dir}/torchrunx``.
82
+
83
+ **Single-Writer Operations:**
84
+
85
+ Only rank 0 performs file I/O operations (config files, videos, wandb logging)
86
+ to avoid race conditions. All workers participate in training, but logging
87
+ artifacts are written once by the main process.
88
+
89
+ How It Works
90
+ ------------
91
+
92
+ mjlab's role is simple: **isolate mjwarp simulations on each GPU** using
93
+ ``wp.ScopedDevice``. This ensures each process's environments stay on their
94
+ assigned device. ``torchrunx`` handles the rest.
95
+
96
+ **Process spawning.** Multi-GPU training uses ``torchrunx.Launcher(...).run(...)``
97
+ to spawn N independent processes (one per GPU) and sets environment variables
98
+ (``RANK``, ``LOCAL_RANK``, ``WORLD_SIZE``) to coordinate them. Each process executes
99
+ the training function with its assigned GPU.
100
+
101
+ **Independent rollouts.** Each process maintains its own:
102
+
103
+ - Environment instances (with ``num-envs`` parallel environments), isolated on
104
+ its assigned GPU via ``wp.ScopedDevice``
105
+ - Policy network copy
106
+ - Experience buffer (sized ``num_steps_per_env × num-envs``)
107
+
108
+ Each process uses ``seed = cfg.seed + local_rank`` to ensure different random
109
+ experiences across GPUs, increasing sample diversity.
110
+
111
+ **Gradient synchronization.** During the update phase, ``rsl_rl`` synchronizes
112
+ gradients after each mini-batch through its ``reduce_parameters()`` method:
113
+
114
+ 1. Each process computes gradients independently on its local mini-batch
115
+ 2. All policy gradients are flattened into a single tensor
116
+ 3. ``torch.distributed.all_reduce`` averages gradients across all GPUs
117
+ 4. Averaged gradients are copied back to each parameter, keeping policies
118
+ synchronized
mjlab/docs/source/faq.rst ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _faq:
2
+
3
+ FAQ & Troubleshooting
4
+ =====================
5
+
6
+ This page collects common questions about **platform support**, **performance**,
7
+ **training stability**, and **visualization**, along with practical debugging
8
+ tips and links to further resources.
9
+
10
+ Platform Support
11
+ ----------------
12
+
13
+ Does it work on macOS?
14
+ ~~~~~~~~~~~~~~~~~~~~~~
15
+
16
+ Yes, but only with limited performance. mjlab runs on macOS
17
+ using **CPU-only** execution through MuJoCo Warp.
18
+
19
+ - **Training is not recommended on macOS**, as it lacks GPU acceleration.
20
+ - **Evaluation works**, but is significantly slower than on Linux with CUDA.
21
+
22
+ For serious training workloads, we strongly recommend **Linux with an NVIDIA GPU**.
23
+
24
+ Does it work on Windows?
25
+ ~~~~~~~~~~~~~~~~~~~~~~~~
26
+
27
+ We have performed preliminary testing on **Windows** and **WSL**, but some
28
+ workflows are not guaranteed to be stable.
29
+
30
+ - Windows support may **lag behind** Linux.
31
+ - Windows will be **tested less frequently**, since Linux is the primary
32
+ development and deployment platform.
33
+ - Community contributions that improve Windows support are very welcome.
34
+
35
+ CUDA Compatibility
36
+ ~~~~~~~~~~~~~~~~~~
37
+
38
+ Not all CUDA versions are supported by MuJoCo Warp.
39
+
40
+ - See `mujoco_warp#101 <https://github.com/google-deepmind/mujoco_warp/issues/101>`_
41
+ for details on CUDA compatibility.
42
+ - **Recommended**: CUDA **12.4+** (for conditional execution support in CUDA
43
+ graphs).
44
+
45
+ Performance
46
+ -----------
47
+
48
+ Is it faster than Isaac Lab?
49
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
50
+
51
+ Based on our experience over the last few months, mjlab is **on par or
52
+ faster** than Isaac Lab.
53
+
54
+ What GPU do you recommend?
55
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
56
+
57
+ - **RTX 40-series GPUs** (or newer)
58
+ - **L40s, H100**
59
+
60
+ Does mjlab support multi-GPU training?
61
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
62
+
63
+ Yes, mjlab supports **multi-GPU distributed training** using
64
+ `torchrunx <https://github.com/apoorvkh/torchrunx>`_.
65
+
66
+ - Use ``--gpu-ids 0 1`` (or ``--gpu-ids all``) when running the ``train``
67
+ command.
68
+ - See the :doc:`distributed_training` for configuration details and examples.
69
+
70
+ Training & Debugging
71
+ --------------------
72
+
73
+ My training crashes with NaN errors
74
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
75
+
76
+ A typical error when using ``rsl_rl`` looks like:
77
+
78
+ .. code-block:: bash
79
+
80
+ RuntimeError: normal expects all elements of std >= 0.0
81
+
82
+ This occurs when NaN/Inf values in the **physics state** propagate to the
83
+ policy network, causing its output standard deviation to become negative or NaN.
84
+
85
+ There are many possible causes, including potential bugs in **MuJoCo Warp**
86
+ (which is still in beta). mjlab offers two complementary mechanisms to help
87
+ you handle this:
88
+
89
+ 1. **For training stability** - NaN termination
90
+
91
+ Add a ``nan_detection`` termination to reset environments that hit NaN:
92
+
93
+ .. code-block:: python
94
+
95
+ from dataclasses import dataclass, field
96
+
97
+ from mjlab.envs.mdp.terminations import nan_detection
98
+ from mjlab.managers.termination_manager import TerminationTermCfg
99
+
100
+ @dataclass
101
+ class TerminationCfg:
102
+ # Your other terminations...
103
+ nan_term: TerminationTermCfg = field(
104
+ default_factory=lambda: TerminationTermCfg(
105
+ func=nan_detection,
106
+ time_out=False,
107
+ )
108
+ )
109
+
110
+ This marks NaN environments as terminated so they can reset while training
111
+ continues. Terminations are logged as
112
+ ``Episode_Termination/nan_term`` in your metrics.
113
+
114
+ .. warning::
115
+
116
+ This is a **band-aid solution**. If NaNs correlate with your task objective
117
+ (for example, NaNs occur exactly when the agent tries to grasp an object),
118
+ the policy will never learn to complete that part of the task. Always
119
+ investigate the **root cause** using ``nan_guard`` in addition to this
120
+ termination.
121
+
122
+ 2. **For debugging** - NaN guard
123
+
124
+ Enable ``nan_guard`` to capture the simulation state when NaNs occur:
125
+
126
+ .. code-block:: bash
127
+
128
+ uv run train.py --enable-nan-guard True
129
+
130
+ See the :doc:`NaN Guard documentation <nan_guard>` for details.
131
+
132
+ The ``nan_guard`` tool makes it easier to:
133
+
134
+ - Inspect the simulation state at the moment NaNs appear.
135
+ - Build a minimal reproducible example (MRE).
136
+ - Report potential framework bugs to the
137
+ `MuJoCo Warp team <https://github.com/google-deepmind/mujoco_warp/issues>`_.
138
+
139
+ Reporting well-isolated issues helps improve the framework for everyone.
140
+
141
+ .. _faq-sim-forward:
142
+
143
+ When do I need to call ``sim.forward()``?
144
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
145
+
146
+ Short answer: you almost certainly don't.
147
+
148
+ ``sim.forward()`` wraps MuJoCo's ``mj_forward``, which runs the full forward
149
+ dynamics pipeline (kinematics, contacts, forces, constraint solving, sensors)
150
+ but skips integration, leaving ``qpos``/``qvel`` unchanged. It brings all
151
+ derived quantities in ``mjData`` (``xpos``, ``xquat``, ``site_xpos``,
152
+ ``cvel``, ``sensordata``, etc.) into a consistent state with the current
153
+ ``qpos``/``qvel``.
154
+ The environment's ``step()`` method calls it once per step, right before
155
+ observation computation, so observations, commands, and interval events
156
+ always see fresh derived quantities. Termination and reward managers run
157
+ *before* this call and therefore see derived quantities that are stale by
158
+ one physics substep, a deliberate tradeoff that avoids a second
159
+ ``forward()`` call while keeping the MDP well-defined (the staleness is
160
+ consistent across all envs and all steps).
161
+
162
+ The one case where this matters is if you write an event or command that
163
+ both writes state and reads derived quantities in the same function. For
164
+ example, if Event A calls ``entity.write_root_velocity_to_sim()`` (which
165
+ modifies ``qvel``) and then immediately reads ``entity.data.root_link_vel_w``
166
+ (which comes from ``cvel``), the read will see stale values from before the
167
+ write.
168
+
169
+ .. warning::
170
+
171
+ Write methods (``write_root_state_to_sim``, ``write_joint_state_to_sim``,
172
+ etc.) modify ``qpos``/``qvel`` directly. Read properties
173
+ (``root_link_pose_w``, ``body_link_vel_w``, etc.) return derived
174
+ quantities that are only current as of the last ``sim.forward()`` call.
175
+ If you need to write then read in the same function, call
176
+ ``env.sim.forward()`` between them.
177
+
178
+ For a deeper explanation, see `Discussion #289
179
+ <https://github.com/mujocolab/mjlab/discussions/289>`_.
180
+
181
+ Why aren't my training runs reproducible even with a fixed seed?
182
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
183
+
184
+ MuJoCo Warp does not yet guarantee determinism, so running the same
185
+ simulation with identical inputs may produce slightly different outputs.
186
+ This is a known limitation being tracked in
187
+ `mujoco_warp#562 <https://github.com/google-deepmind/mujoco_warp/issues/562>`_.
188
+
189
+ Until determinism is implemented upstream, mjlab training runs will not be
190
+ perfectly reproducible even when setting a seed.
191
+
192
+ Rendering & Visualization
193
+ -------------------------
194
+
195
+ What visualization options are available?
196
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
197
+
198
+ mjlab currently supports two visualizers for policy evaluation and
199
+ debugging:
200
+
201
+ - **Native MuJoCo visualizer** - the built-in visualizer that ships with MuJoCo.
202
+ - **Viser** - `Viser <https://github.com/nerfstudio-project/viser>`_,
203
+ a web-based 3D visualization tool.
204
+
205
+ We are exploring **training-time visualization** (e.g., live rollout viewers),
206
+ but this is not yet available.
207
+
208
+ As an alternative, mjlab supports **video logging to Weights & Biases
209
+ (W&B)**, so you can monitor rollout videos directly in the experiment dashboard.
210
+
211
+ What about camera/pixel rendering for vision-based RL?
212
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
213
+
214
+ Camera rendering for **pixel-based agents** is not yet available.
215
+
216
+ The MuJoCo Warp team is actively developing **camera support**. Once mature, it
217
+ will be integrated into mjlab for vision-based RL workflows.
218
+
219
+ How many environments can I visualize at once?
220
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
221
+
222
+ Visualizers are **limited to 32 environments maximum** for performance reasons.
223
+
224
+ - **Offscreen renderer** (for video recording): Hard-capped at 32 envs
225
+ (see ``_MAX_ENVS`` in ``viewer/offscreen_renderer.py:12``)
226
+ - **Native/Viser viewers**: Limited by MuJoCo's geometry buffer
227
+ (default 10,000 geoms, configurable via ``max_geom`` parameter)
228
+
229
+ With thousands of environments, only a subset will be rendered. The viewer
230
+ shows whichever environments fit within the geometry budget.
231
+
232
+ Why are my fixed-base robots all stacked at the origin instead of in a grid?
233
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
234
+
235
+ Fixed-base robots require an **explicit reset event** to position them at
236
+ their ``env_origins``. If your robots appear stacked at (0, 0, 0):
237
+
238
+ **Common causes:**
239
+
240
+ 1. **Missing reset event** - Most common issue.
241
+ 2. **env_spacing is 0 or very small** - Check your ``SceneCfg(env_spacing=...)``.
242
+ Even with proper reset events, if ``env_spacing=0.0``, all robots will
243
+ be at the same position. If ``env_spacing`` is very small (e.g., 0.01),
244
+ they'll be clustered in a tiny area that looks like a line from a distance.
245
+
246
+ **Solution**: Add a reset event that calls ``reset_root_state_uniform``:
247
+
248
+ .. code-block:: python
249
+
250
+ # In your ManagerBasedRlEnvCfg
251
+ events = {
252
+ # For positioning the base of the robot at env_origins.
253
+ "reset_base": EventTermCfg(
254
+ func=mdp.reset_root_state_uniform,
255
+ mode="reset",
256
+ params={
257
+ "pose_range": {}, # Empty = use default pose + env_origins
258
+ "velocity_range": {},
259
+ },
260
+ ),
261
+ # ... other events
262
+ }
263
+
264
+ This pattern is used in the example manipulation task (see ``lift_cube_env_cfg.py:84-93``).
265
+
266
+ **Why this is needed**: Fixed-base robots are automatically wrapped in mocap
267
+ bodies by ``auto_wrap_fixed_base_mocap()``, but mocap positioning only happens
268
+ when you explicitly call a reset event. The ``env_origins`` offset is applied
269
+ inside ``reset_root_state_uniform()`` at line 127 of ``envs/mdp/events.py``.
270
+
271
+ See `issue #560 <https://github.com/mujocolab/mjlab/issues/560>`_ for examples.
272
+
273
+ How does env_origins determine robot layout?
274
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
275
+
276
+ Robot spacing depends on your terrain configuration:
277
+
278
+ **Plane terrain** (``terrain_type="plane"``):
279
+ - Creates an approximately square grid automatically
280
+ - Grid size: ``ceil(sqrt(num_envs))`` rows x cols
281
+ - Spacing controlled by ``env_spacing`` parameter (default: 2.0m)
282
+ - Examples with ``env_spacing=2.0``:
283
+ - 32 envs → 7x5 grid spanning 12m x 8m
284
+ - 4096 envs → 64x64 grid spanning 126m x 126m
285
+ - **Important**: If ``env_spacing=0``, all robots will be at (0, 0, 0)
286
+ - Implementation: ``terrain_importer.py:_compute_env_origins_grid()``
287
+
288
+ **Procedural terrain** (``terrain_type="generator"``):
289
+ - Origins loaded from pre-generated terrain sub-patches
290
+ - Grid size: ``TerrainGeneratorCfg.num_rows x num_cols``
291
+ - Row index = difficulty level (curriculum mode)
292
+ - Column index = terrain type variant
293
+ - **Important allocation behavior**: Columns (terrain types) are evenly distributed
294
+ across environments, but rows (difficulty levels) are randomly sampled. This means
295
+ multiple environments can spawn on the same (row, col) patch, leaving others unoccupied,
296
+ even when ``num_envs > num_patches``.
297
+ - Example: 5x5 grid (25 patches), 100 envs → each column gets exactly 20 envs,
298
+ but those 20 are randomly distributed across 5 rows, so some patches remain empty.
299
+ - Supports ``randomize_env_origins()`` to shuffle positions during training
300
+
301
+ How do I ensure each terrain type gets its own column?
302
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
303
+
304
+ Set ``curriculum=True`` in your ``TerrainGeneratorCfg``. This makes column
305
+ allocation deterministic, with each column getting one terrain type based on
306
+ normalized proportions.
307
+
308
+ Example with 2 terrain types:
309
+
310
+ .. code-block:: python
311
+
312
+ TerrainGeneratorCfg(
313
+ num_rows=3,
314
+ num_cols=2,
315
+ curriculum=True, # Required for deterministic column allocation!
316
+ sub_terrains={
317
+ "flat": BoxFlatTerrainCfg(proportion=0.5), # Gets column 0
318
+ "pillars": HfDiscreteObstaclesTerrainCfg(
319
+ proportion=0.5, # Gets column 1
320
+ ),
321
+ },
322
+ )
323
+
324
+ Without ``curriculum=True``, every patch is randomly sampled and you'll get
325
+ a random mix of both terrain types scattered across all patches.
326
+
327
+ **Note**: When ``num_cols`` equals the number of terrain types, each terrain
328
+ gets exactly one column regardless of proportion values (they're normalized).
329
+ When ``num_cols > num_terrain_types``, proportions determine how many columns
330
+ each terrain type occupies.
331
+
332
+ What is flat patch sampling and how does it affect robot spawning?
333
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
334
+
335
+ Flat patch sampling detects flat regions on heightfield terrains where robots
336
+ can safely spawn. It uses morphological filtering on the heightfield to find
337
+ circular areas where height variation is within a tolerance.
338
+
339
+ Configure it on any sub-terrain via ``flat_patch_sampling``:
340
+
341
+ .. code-block:: python
342
+
343
+ from mjlab.terrains.terrain_generator import FlatPatchSamplingCfg
344
+
345
+ "obstacles": HfDiscreteObstaclesTerrainCfg(
346
+ ...,
347
+ flat_patch_sampling={
348
+ "spawn": FlatPatchSamplingCfg(
349
+ num_patches=10, # patches to sample per sub-terrain
350
+ patch_radius=0.5, # flatness check radius (meters)
351
+ max_height_diff=0.05, # max height variation within radius
352
+ ),
353
+ },
354
+ )
355
+
356
+ Then use ``reset_root_state_from_flat_patches`` as your reset event to spawn
357
+ robots on detected patches instead of at the sub-terrain center.
358
+
359
+ **Key details:**
360
+
361
+ - Only heightfield (``Hf*``) terrains support actual flat patch detection.
362
+ Box terrains (``Box*``) don't have heightfield data to analyze.
363
+ - If any sub-terrain in the grid configures ``flat_patch_sampling``, the
364
+ flat patches array is allocated for **all** cells. Sub-terrains that don't
365
+ produce patches have their slots filled with the sub-terrain's spawn origin,
366
+ so ``reset_root_state_from_flat_patches`` always gets valid positions.
367
+ - Without ``flat_patch_sampling``, use ``reset_root_state_uniform`` which
368
+ spawns at the sub-terrain origin (``env_origins``) plus an optional random
369
+ offset.
370
+
371
+ Development & Extensions
372
+ ------------------------
373
+
374
+ Can I develop custom tasks in my own repository?
375
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
376
+
377
+ Yes, mjlab has a **plugin system** that lets you develop tasks in separate
378
+ repositories while still integrating seamlessly with the core:
379
+
380
+ - Your tasks appear as regular entries for the ``train`` and ``play`` commands.
381
+ - You can version and maintain your task repositories independently.
382
+
383
+ A complete guide will be available in a future release.
384
+
385
+ Assets & Compatibility
386
+ ----------------------
387
+
388
+ What robots are included?
389
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
390
+
391
+ mjlab includes two **reference robots**:
392
+
393
+ - **Unitree Go1** (quadruped).
394
+ - **Unitree G1** (humanoid).
395
+
396
+ These robots serve as:
397
+
398
+ - Minimal examples for **robot integration**.
399
+ - Stable, well-tested baselines for **benchmark tasks**.
400
+
401
+ To keep the core library lean, we do **not** plan to aggressively expand the
402
+ built-in robot library. Additional robots may be provided in separate
403
+ repositories or community-maintained packages.
404
+
405
+ Can I use USD or URDF models?
406
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
407
+
408
+ No, mjlab expects **MJCF (MuJoCo XML)** models.
409
+
410
+ - You will need to **convert** USD or URDF assets to MJCF.
411
+ - For many common robots, you can directly use
412
+ `MuJoCo Menagerie <https://github.com/google-deepmind/mujoco_menagerie>`_,
413
+ which ships high-quality MJCF models and assets.
414
+
415
+ Getting Help
416
+ ------------
417
+
418
+ GitHub Issues
419
+ ~~~~~~~~~~~~~
420
+
421
+ Use GitHub issues for:
422
+
423
+ - **Bug reports**
424
+ - **Performance regressions**
425
+ - **Documentation gaps**
426
+
427
+ When filing a bug, please include:
428
+
429
+ - CUDA driver and runtime versions
430
+ - GPU model
431
+ - A minimal reproduction script
432
+ - Complete error logs and stack traces
433
+ - Appropriate labels (for example: ``bug``, ``performance``, ``docs``)
434
+
435
+ `Open an issue <https://github.com/mujocolab/mjlab/issues>`_
436
+
437
+ Discussions
438
+ ~~~~~~~~~~~
439
+
440
+ Use GitHub Discussions for:
441
+
442
+ - Usage questions (config, debugging, best practices)
443
+ - Performance tuning tips
444
+ - Asset conversion and modeling questions
445
+ - Design discussions and roadmap ideas
446
+
447
+ `Start a discussion <https://github.com/mujocolab/mjlab/discussions>`_
448
+
449
+ Known Limitations
450
+ -----------------
451
+
452
+ We're tracking missing features for the stable release in
453
+ https://github.com/mujocolab/mjlab/issues/100. Check our
454
+ `open issues <https://github.com/mujocolab/mjlab/issues>`_ to see what's actively
455
+ being worked on.
456
+
457
+ If something isn't working or if we've missed something, please
458
+ `file a bug report <https://github.com/mujocolab/mjlab/issues/new>`_.
mjlab/docs/source/installation.rst ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _installation:
2
+
3
+ Installation Guide
4
+ ==================
5
+
6
+ This guide presents different installation paths so you can
7
+ choose the one that best fits your use case.
8
+
9
+ .. contents::
10
+ :local:
11
+ :depth: 1
12
+
13
+ .. note::
14
+
15
+ **System Requirements**
16
+
17
+ - **Operating System**: Linux recommended
18
+ - **Python**: 3.10 or higher
19
+ - **GPU**: NVIDIA GPU
20
+ - **CUDA version**: CUDA 12.4+ Recommended
21
+
22
+ See :ref:`faq` for more details on what is exactly supported.
23
+
24
+
25
+ How to choose an installation method?
26
+ -------------------------------------
27
+
28
+ Select the card that best matches how you plan to use ``mjlab``.
29
+
30
+ .. grid:: 2
31
+ :gutter: 2
32
+
33
+ .. grid-item-card:: Method 1 - Use mjlab as a dependency (uv)
34
+ :link: install-uv-dependency
35
+ :link-type: ref
36
+
37
+ You are **using mjlab as a dependency** in your own project managed by ``uv``. **(Recommended for most users)**
38
+
39
+ .. grid-item-card:: Method 2 - Develop / contribute (uv)
40
+ :link: install-uv-develop
41
+ :link-type: ref
42
+
43
+ You are **trying mjlab** or **contributing to mjlab itself** directly from inside the mjlab repository, with ``uv`` managing the environment.
44
+
45
+ .. grid-item-card:: Method 3 - Classic pip / venv / conda
46
+ :link: install-pip
47
+ :link-type: ref
48
+
49
+ You are using **classic tools** (``pip`` / ``venv`` / ``conda``) and **do not use uv**.
50
+
51
+ .. grid-item-card:: Method 4 - Docker / clusters
52
+ :link: install-docker
53
+ :link-type: ref
54
+
55
+ You are **running in containers or on clusters** and prefer a **Docker-based** setup.
56
+
57
+
58
+ .. _install-uv-dependency:
59
+
60
+ Method 1 - Use mjlab as a dependency (uv)
61
+ -----------------------------------------
62
+
63
+ This is our recommended way to use ``mjlab``. You have
64
+ your own project and want to use ``mjlab`` as a dependency
65
+ using ``uv``.
66
+
67
+ 1. Install uv
68
+ ^^^^^^^^^^^^^
69
+
70
+ If you do not have ``uv`` installed, run:
71
+
72
+ .. code-block:: bash
73
+
74
+ curl -LsSf https://astral.sh/uv/install.sh | sh
75
+
76
+ 2. Initialize your project
77
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
78
+
79
+ Initialize a managed Python project:
80
+
81
+ .. code-block:: bash
82
+
83
+ # Create a new package-based project
84
+ uv init --package my_mjlab_project
85
+ cd my_mjlab_project
86
+
87
+ 3. Add mjlab dependencies
88
+ ^^^^^^^^^^^^^^^^^^^^^^^^^
89
+
90
+ There are different options to add ``mjlab`` as a dependency.
91
+ We recommend using the latest stable version from PyPI. If you need
92
+ the latest features, use the direct GitHub installation. Finally, if you
93
+ need to use a feature you have developed locally, use the local editable
94
+ install. These options are interchangeable: you can switch at any time.
95
+
96
+ .. tab-set::
97
+
98
+ .. tab-item:: PyPI
99
+
100
+ Once in your project, install the latest snapshot from PyPI:
101
+
102
+ .. code:: bash
103
+
104
+ uv add mjlab
105
+
106
+ .. tab-item:: Source
107
+
108
+ Once in your project, install directly from GitHub without cloning:
109
+
110
+ .. code:: bash
111
+
112
+ uv add "mjlab @ git+https://github.com/mujocolab/mjlab"
113
+
114
+ .. tab-item:: Local
115
+
116
+ Clone the repository:
117
+
118
+ .. code:: bash
119
+
120
+ git clone https://github.com/mujocolab/mjlab.git
121
+
122
+ Once in your project, add it as an editable dependency:
123
+
124
+ .. code:: bash
125
+
126
+ uv add --editable /path/to/cloned/mjlab
127
+
128
+ .. tip::
129
+
130
+ For a complete example of how to structure a project that integrates a custom robot
131
+ with an existing ``mjlab`` task, check out the
132
+ `ANYmal C Velocity Tracking <https://github.com/mujocolab/anymal_c_velocity>`_ repository.
133
+
134
+ Verification
135
+ ^^^^^^^^^^^^
136
+
137
+ After installation, verify that ``mjlab`` is working by running the demo:
138
+
139
+ .. code-block:: bash
140
+
141
+ uv run demo
142
+
143
+
144
+ .. _install-uv-develop:
145
+
146
+ Method 2 - Develop / contribute (uv)
147
+ ------------------------------------
148
+
149
+ This method is for developing ``mjlab`` itself or contributing to the project.
150
+
151
+ .. code:: bash
152
+
153
+ git clone https://github.com/mujocolab/mjlab.git
154
+ cd mjlab
155
+ uv sync
156
+
157
+ Verification
158
+ ^^^^^^^^^^^^
159
+
160
+ After installation, verify that ``mjlab`` is working by running the demo:
161
+
162
+ .. code-block:: bash
163
+
164
+ uv run demo
165
+
166
+
167
+ .. _install-pip:
168
+
169
+ Method 3 - Classic pip / venv / conda
170
+ -------------------------------------
171
+
172
+ While ``mjlab`` is designed to work with `uv <https://docs.astral.sh/uv/>`_, you can
173
+ also use it with any pip-based virtual environment (``venv``, ``conda``, ``virtualenv``, etc.).
174
+
175
+ Create and activate your virtual environment
176
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
177
+
178
+ .. tab-set::
179
+
180
+ .. tab-item:: venv
181
+
182
+ Using ``venv`` (standard library):
183
+
184
+ .. code:: bash
185
+
186
+ python -m venv mjlab-env
187
+ source mjlab-env/bin/activate
188
+
189
+ .. tab-item:: conda
190
+
191
+ Using ``conda``:
192
+
193
+ .. code:: bash
194
+
195
+ conda create -n mjlab python=3.13
196
+ conda activate mjlab
197
+
198
+
199
+ Install mjlab and dependencies via pip
200
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
201
+
202
+ .. tab-set::
203
+
204
+ .. tab-item:: PyPI
205
+
206
+ From PyPI:
207
+
208
+ .. code:: bash
209
+
210
+ pip install mjlab
211
+
212
+ .. tab-item:: Source
213
+
214
+ From Source:
215
+
216
+ .. code:: bash
217
+
218
+ git clone https://github.com/mujocolab/mjlab.git
219
+ cd mjlab
220
+ pip install -e .
221
+
222
+
223
+ Verification
224
+ ^^^^^^^^^^^^
225
+
226
+ After installation, verify that ``mjlab`` is working by running the demo:
227
+
228
+ .. code-block:: bash
229
+
230
+ demo
231
+
232
+
233
+ .. _install-docker:
234
+
235
+ Method 4 - Docker / clusters
236
+ ----------------------------
237
+
238
+ This method is recommended if you prefer running ``mjlab`` in containers (for example on
239
+ servers or clusters).
240
+
241
+
242
+ Prerequisites
243
+ ^^^^^^^^^^^^^
244
+
245
+ - Install Docker: `Docker installation guide <https://docs.docker.com/engine/install/>`_.
246
+ - Install an appropriate NVIDIA driver for your system and the
247
+ `NVIDIA Container Toolkit <https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html>`_.
248
+
249
+ - Be sure to register the container runtime with Docker and restart, as described in
250
+ the Docker configuration section of the NVIDIA install guide.
251
+
252
+
253
+ Build the Docker image
254
+ ^^^^^^^^^^^^^^^^^^^^^^
255
+
256
+ From the root of the repository:
257
+
258
+ .. code-block:: bash
259
+
260
+ make docker-build
261
+
262
+
263
+ Run mjlab in Docker
264
+ ^^^^^^^^^^^^^^^^^^^
265
+
266
+ Use the included helper script to run an ``mjlab`` Docker container with useful arguments preconfigured:
267
+
268
+ .. code-block:: bash
269
+
270
+ ./scripts/run_docker.sh
271
+
272
+ Examples:
273
+
274
+ - Demo with viewer:
275
+
276
+ .. code-block:: bash
277
+
278
+ ./scripts/run_docker.sh uv run demo
279
+
280
+ - Training example:
281
+
282
+ .. code-block:: bash
283
+
284
+ ./scripts/run_docker.sh uv run train Mjlab-Velocity-Flat-Unitree-G1 --env.scene.num-envs 4096
285
+
286
+
287
+ Having some troubles?
288
+ ---------------------
289
+
290
+ 1. **Check the FAQ**
291
+
292
+ Consult the mjlab :ref:`faq` for answers to common installation and runtime issues
293
+
294
+ 2. **Still stuck?**
295
+
296
+ Open an issue on GitHub: https://github.com/mujocolab/mjlab/issues
mjlab/docs/source/migration_isaac_lab.rst ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _migration-isaaclab:
2
+
3
+ Migrating from Isaac Lab
4
+ ========================
5
+
6
+ .. warning::
7
+
8
+ This guide is a work in progress. As more users migrate, we will update this
9
+ page with additional patterns and edge cases. If something is not covered,
10
+ please open an issue on GitHub or start a discussion:
11
+
12
+ - Issues: https://github.com/mujocolab/mjlab/issues
13
+ - Discussions: https://github.com/mujocolab/mjlab/discussions
14
+
15
+ TL;DR
16
+ -----
17
+
18
+ Most Isaac Lab *manager-based* task configs can be ported to ``mjlab`` with
19
+ only small changes:
20
+
21
+ - The overall **MDP structure is the same** (managers for rewards, observations,
22
+ actions, commands, terminations, events, curriculum).
23
+ - The **environment base classes are similar**, but naming is slightly
24
+ different.
25
+ - The biggest change is **configuration style**: Isaac Lab uses nested
26
+ ``@configclass`` definitions; ``mjlab`` uses dictionaries of config objects.
27
+
28
+ If you are familiar with Isaac Lab's manager-based API, migration is mostly
29
+ mechanical.
30
+
31
+ Key Differences
32
+ ---------------
33
+
34
+ 1. Import Paths
35
+ ~~~~~~~~~~~~~~~
36
+
37
+ Isaac Lab:
38
+
39
+ .. code-block:: python
40
+
41
+ from isaaclab.envs import ManagerBasedRLEnv
42
+
43
+ mjlab:
44
+
45
+ .. code-block:: python
46
+
47
+ from mjlab.envs import ManagerBasedRlEnvCfg
48
+
49
+ .. note::
50
+
51
+ ``mjlab`` uses a consistent ``CamelCase`` naming convention (for example,
52
+ ``RlEnv`` instead of ``RLEnv``).
53
+
54
+ 2. Configuration Structure
55
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
56
+
57
+ Isaac Lab uses nested ``@configclass`` blocks for manager terms. ``mjlab``
58
+ instead uses **plain dictionaries** mapping names to config objects, which makes
59
+ it easy to construct variants, merge configs, or generate them programmatically.
60
+
61
+ **Isaac Lab:**
62
+
63
+ .. code-block:: python
64
+
65
+ @configclass
66
+ class RewardsCfg:
67
+ """Reward terms for the MDP."""
68
+
69
+ motion_global_anchor_pos = RewTerm(
70
+ func=mdp.motion_global_anchor_position_error_exp,
71
+ weight=0.5,
72
+ params={"command_name": "motion", "std": 0.3},
73
+ )
74
+ motion_global_anchor_ori = RewTerm(
75
+ func=mdp.motion_global_anchor_orientation_error_exp,
76
+ weight=0.5,
77
+ params={"command_name": "motion", "std": 0.4},
78
+ )
79
+
80
+ **mjlab:**
81
+
82
+ .. code-block:: python
83
+
84
+ rewards = {
85
+ "motion_global_anchor_pos": RewardTermCfg(
86
+ func=mdp.motion_global_anchor_position_error_exp,
87
+ weight=0.5,
88
+ params={"command_name": "motion", "std": 0.3},
89
+ ),
90
+ "motion_global_anchor_ori": RewardTermCfg(
91
+ func=mdp.motion_global_anchor_orientation_error_exp,
92
+ weight=0.5,
93
+ params={"command_name": "motion", "std": 0.4},
94
+ ),
95
+ }
96
+
97
+ cfg = ManagerBasedRlEnvCfg(
98
+ scene=scene,
99
+ rewards=rewards,
100
+ # ... other manager dictionaries:
101
+ # observations=..., actions=..., commands=..., terminations=...,
102
+ # events=..., curriculum=...
103
+ )
104
+
105
+ This pattern applies to all managers:
106
+
107
+ - ``rewards``
108
+ - ``observations``
109
+ - ``actions``
110
+ - ``commands``
111
+ - ``terminations``
112
+ - ``events``
113
+ - ``curriculum``
114
+
115
+ 3. Scene Configuration
116
+ ~~~~~~~~~~~~~~~~~~~~~~
117
+
118
+ Scene setup is **simpler** in ``mjlab``:
119
+
120
+ - No Omniverse / USD scene graph, no ``prim_path`` management.
121
+ - Assets are pure MuJoCo (MJCF) with modifier dataclasses applied to
122
+ ``mujoco.MjSpec``.
123
+ - Lights, materials, textures, and sensors are configured as part of
124
+ ``SceneCfg`` and robot configs.
125
+
126
+ **Isaac Lab:**
127
+
128
+ .. code-block:: python
129
+
130
+ from whole_body_tracking.robots.g1 import G1_ACTION_SCALE, G1_CYLINDER_CFG
131
+ from isaaclab.scene import InteractiveSceneCfg
132
+ from isaaclab.sensors import ContactSensorCfg
133
+ from isaaclab.terrains import TerrainImporterCfg
134
+ import isaaclab.sim as sim_utils
135
+ from isaaclab.assets import ArticulationCfg, AssetBaseCfg
136
+
137
+ @configclass
138
+ class MySceneCfg(InteractiveSceneCfg):
139
+ """Configuration for the terrain scene with a legged robot."""
140
+
141
+ # ground terrain
142
+ terrain = TerrainImporterCfg(
143
+ prim_path="/World/ground",
144
+ terrain_type="plane",
145
+ collision_group=-1,
146
+ physics_material=sim_utils.RigidBodyMaterialCfg(
147
+ friction_combine_mode="multiply",
148
+ restitution_combine_mode="multiply",
149
+ static_friction=1.0,
150
+ dynamic_friction=1.0,
151
+ ),
152
+ visual_material=sim_utils.MdlFileCfg(
153
+ mdl_path="{NVIDIA_NUCLEUS_DIR}/Materials/Base/Architecture/Shingles_01.mdl",
154
+ project_uvw=True,
155
+ ),
156
+ )
157
+ # lights
158
+ light = AssetBaseCfg(
159
+ prim_path="/World/light",
160
+ spawn=sim_utils.DistantLightCfg(
161
+ color=(0.75, 0.75, 0.75), intensity=3000.0
162
+ ),
163
+ )
164
+ sky_light = AssetBaseCfg(
165
+ prim_path="/World/skyLight",
166
+ spawn=sim_utils.DomeLightCfg(
167
+ color=(0.13, 0.13, 0.13), intensity=1000.0
168
+ ),
169
+ )
170
+ robot = G1_CYLINDER_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
171
+
172
+ **mjlab:**
173
+
174
+ .. code-block:: python
175
+
176
+ from dataclasses import replace
177
+
178
+ from mjlab.scene import SceneCfg
179
+ from mjlab.asset_zoo.robots.unitree_g1.g1_constants import get_g1_robot_cfg
180
+ from mjlab.utils.spec_config import ContactSensorCfg
181
+ from mjlab.terrains import TerrainImporterCfg
182
+
183
+ # Configure contact sensor
184
+ self_collision_sensor = ContactSensorCfg(
185
+ name="self_collision",
186
+ subtree1="pelvis",
187
+ subtree2="pelvis",
188
+ data=("found",),
189
+ reduce="netforce",
190
+ num=10, # report up to 10 contacts
191
+ )
192
+
193
+ # Add sensor to robot config
194
+ g1_cfg = replace(get_g1_robot_cfg(), sensors=(self_collision_sensor,))
195
+
196
+ # Create scene
197
+ SCENE_CFG = SceneCfg(
198
+ terrain=TerrainImporterCfg(terrain_type="plane"),
199
+ entities={"robot": g1_cfg},
200
+ )
201
+
202
+ Key changes:
203
+
204
+ - No USD ``prim_path`` or cloning; the scene is described directly in MuJoCo.
205
+ - Materials, lights, and visual properties are applied via
206
+ ``MjSpec``-modifier dataclasses.
207
+ - See ``mjlab.utils.spec_config`` in the repository for helpers that apply
208
+ these changes for you.
209
+ - ``asset_name`` has been unified to ``entity_name`` across all configurations.
210
+
211
+ Complete Example Comparison
212
+ ---------------------------
213
+
214
+ A good way to learn the pattern is to compare concrete tasks that have already
215
+ been ported:
216
+
217
+ - Isaac Lab implementation (Beyond Mimic):
218
+
219
+ - https://github.com/HybridRobotics/whole_body_tracking/blob/main/source/whole_body_tracking/whole_body_tracking/tasks/tracking/tracking_env_cfg.py
220
+
221
+ - mjlab implementation:
222
+
223
+ - https://github.com/mujocolab/mjlab/blob/main/src/mjlab/tasks/tracking/tracking_env_cfg.py
224
+
225
+ You will see that:
226
+
227
+ - Manager dictionaries in ``mjlab`` mirror Isaac Lab's config classes,
228
+ - Reward, observation, command, and termination logic is almost identical,
229
+ - Scene and asset setup are simplified to pure MuJoCo.
230
+
231
+ Migration Checklist
232
+ -------------------
233
+
234
+ Use this as a quick checklist when porting a task:
235
+
236
+ 1. **Base class and imports**
237
+
238
+ - Replace Isaac Lab imports (for example,
239
+ ``from isaaclab.envs import ManagerBasedRLEnv``) with the corresponding
240
+ ``mjlab`` imports (for example,
241
+ ``from mjlab.envs import ManagerBasedRlEnvCfg``).
242
+
243
+ 2. **Manager configuration**
244
+
245
+ - Convert each Isaac Lab ``@configclass`` manager (``RewardsCfg``,
246
+ ``ObservationsCfg``, etc.) into a dictionary of config objects.
247
+ - Pass these dictionaries into ``ManagerBasedRlEnvCfg``.
248
+
249
+ 3. **Scene and assets**
250
+
251
+ - Replace ``InteractiveSceneCfg`` with a ``SceneCfg`` instance.
252
+ - Replace USD / ``prim_path`` logic with MuJoCo asset configs and scene
253
+ entities (for example, a robot from ``asset_zoo``).
254
+
255
+ 4. **Sensors and contact handling**
256
+
257
+ - Convert Isaac Lab ``ContactSensorCfg`` to
258
+ ``mjlab.utils.spec_config.ContactSensorCfg`` and attach it to the robot
259
+ config.
260
+
261
+ 5. **RL entry points**
262
+
263
+ - Make sure your training script or entry point uses the correct task id and
264
+ environment config (for example, via Gymnasium registration or direct
265
+ construction, depending on how your project is structured).
266
+
267
+ Tips and Support
268
+ ----------------
269
+
270
+ 1. Check the examples in the repository under:
271
+
272
+ - ``src/mjlab/tasks/``
273
+
274
+ 2. If you get stuck:
275
+
276
+ - Open an issue: https://github.com/mujocolab/mjlab/issues
277
+ - Start a discussion: https://github.com/mujocolab/mjlab/discussions
278
+
279
+ 3. Keep in mind MuJoCo vs Isaac Sim differences:
280
+
281
+ - Some Omniverse / USD rendering features do not have direct equivalents.
282
+ - Focus first on matching the **physics and observations**, then polish
283
+ visuals if needed.
mjlab/docs/source/motivation.rst ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _motivation:
2
+
3
+ Why mjlab?
4
+ ==========
5
+
6
+ The Problem
7
+ -----------
8
+
9
+ GPU-accelerated robotics simulation has great tools,
10
+ but each has tradeoffs:
11
+
12
+ **Isaac Lab**: Excellent API and RL abstractions, but
13
+ heavy installation, slow startup, and Omniverse overhead
14
+ make rapid iteration painful.
15
+
16
+ **MJX**: Fast and lightweight, but JAX's learning curve
17
+ and poor collision scaling (if using the ``jax``
18
+ `implementation <https://github.com/google-deepmind/mujoco/blob/32e08f9507c9bdc5a1a5411c6fa9f0346542b038/mjx/mujoco/mjx/_src/types.py#L28-L33>`_
19
+ rather than the ``warp`` one) limit adoption.
20
+
21
+ **Newton**: Brand new generic simulator supporting
22
+ multiple solvers (MuJoCo, VBD, etc.) with USD-based
23
+ format instead of MJCF/XML. Doesn't yet have the
24
+ ecosystem and community resources that MuJoCo has built
25
+ over the years.
26
+
27
+ Our Solution
28
+ ------------
29
+
30
+ **mjlab = Isaac Lab's API + MuJoCo's simplicity +
31
+ GPU acceleration**
32
+
33
+ We took Isaac Lab's proven manager-based architecture
34
+ and RL abstractions, then built them directly on MuJoCo
35
+ Warp. No translation layers, no Omniverse overhead.
36
+ Just fast, transparent physics.
37
+
38
+ Why Not Use Isaac Lab with Newton?
39
+ Isaac Lab recently added
40
+ `experimental Newton support <https://github.com/isaac-sim/IsaacLab/tree/dev/newton>`_,
41
+ which is great for existing Isaac users who want to
42
+ try MuJoCo via Newton's backend.
43
+
44
+ If you want a comprehensive platform (RL, imitation
45
+ learning, photorealistic rendering, etc.), use Isaac
46
+ Lab. If you want a focused tool for RL and sim2real
47
+ with MuJoCo, use mjlab.
48
+
49
+
50
+ Why Not Add MuJoCo Warp to Isaac Lab?
51
+ This would be fantastic for the ecosystem!
52
+ NVIDIA's team is exploring this with their
53
+ recent `experimental Newton integration <https://github.com/isaac-sim/IsaacLab/tree/dev/newton>`_,
54
+ which is exciting.
55
+
56
+ But for us, we wanted to start with something
57
+ more focused that we could realistically
58
+ maintain. Isaac Lab is architected around
59
+ Omniverse/Isaac Sim's powerful capabilities,
60
+ which makes sense given everything it supports.
61
+ Integrating MuJoCo Warp there would mean working
62
+ within that broader framework and supporting
63
+ use cases beyond our scope.
64
+
65
+ Maintaining multi-backend compatibility
66
+ naturally involves tradeoffs in complexity
67
+ and dependency management. By starting fresh, we could:
68
+
69
+ - Write a lean codebase optimized specifically for MuJoCo Warp
70
+ - Keep dependencies minimal and installation fast
71
+ - Maintain direct access to native mjModel/mjData structures
72
+ - Iterate quickly without navigating a larger platform's constraints
73
+
74
+ Think of mjlab as a love letter to Isaac
75
+ Lab's brilliant API design. We're bringing
76
+ those manager-based abstractions to researchers
77
+ who want something smaller and MuJoCo-specific.
78
+ It's complementary, not competitive.
79
+
80
+ Philosophy
81
+ ----------
82
+
83
+ **Bare Metal Performance**
84
+
85
+ - Direct MuJoCo Warp integration, no translation layers
86
+ - Native mjModel/mjData structures MuJoCo users know and love
87
+ - GPU-accelerated with minimal overhead
88
+
89
+ **Developer Experience First**
90
+
91
+ - One-line installation: ``uvx --from mjlab demo``
92
+ - Blazing fast startup
93
+ - Standard Python debugging (pdb anywhere!)
94
+ - Fast iteration cycles
95
+
96
+ **Focused Scope**
97
+
98
+ - Rigid-body robotics and RL, not trying to do everything
99
+ - Clean, maintainable codebase over feature bloat
100
+ - MuJoCo-native implementation, not a generic wrapper
101
+
102
+ When to Use mjlab
103
+ -----------------
104
+
105
+ **Use mjlab if you want:**
106
+
107
+ - Fast iteration and debugging
108
+ - Direct MuJoCo physics control
109
+ - Proven RL abstractions (Isaac Lab-style)
110
+ - GPU acceleration without heavyweight dependencies
111
+ - Simple installation and deployment
112
+
113
+ **Use Isaac Lab if you need:**
114
+
115
+ - Photorealistic rendering
116
+ - USD pipeline integration
117
+ - Omniverse ecosystem features
118
+
119
+ **Use Newton if you need:**
120
+
121
+ - Multi-physics solver support (e.g., deformables)
122
+ - Differentiable simulation
123
+
124
+
125
+ The Bottom Line
126
+ ---------------
127
+
128
+ mjlab isn't trying to replace everything. It's
129
+ built for researchers who love MuJoCo's simplicity
130
+ and want Isaac Lab's RL abstractions with GPU acceleration,
131
+ minus the overhead.
132
+
133
+
134
+
mjlab/docs/source/nan_guard.rst ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _nan-guard:
2
+
3
+ NaN Guard
4
+ =========
5
+
6
+ The NaN guard captures simulation states when NaN/Inf is detected, helping debug
7
+ numerical instability issues.
8
+
9
+ TL;DR
10
+ -----
11
+
12
+ **Running into NaN issues during training?** Enable the NaN guard with a single flag:
13
+
14
+ .. code-block:: bash
15
+
16
+ uv run train.py --enable-nan-guard True
17
+
18
+
19
+ This will automatically capture and save simulation states when NaN/Inf is
20
+ detected, making it easy to debug what went wrong.
21
+
22
+ You can also enable it programmatically in your simulation config:
23
+
24
+ .. code-block:: python
25
+
26
+ from mjlab.sim.sim import SimulationCfg
27
+ from mjlab.utils.nan_guard import NanGuardCfg
28
+
29
+ cfg = SimulationCfg(
30
+ nan_guard=NanGuardCfg(
31
+ enabled=True,
32
+ buffer_size=100,
33
+ output_dir="/tmp/mjlab/nan_dumps",
34
+ max_envs_to_dump=5,
35
+ ),
36
+ )
37
+
38
+ Configuration
39
+ -------------
40
+
41
+ ``enabled`` (default: ``False``)
42
+ Enable/disable NaN detection and dumping. When disabled, has minimal overhead.
43
+
44
+ ``buffer_size`` (default: ``100``)
45
+ Number of recent simulation states to keep in rolling buffer.
46
+
47
+ ``output_dir`` (default: ``"/tmp/mjlab/nan_dumps"``)
48
+ Directory where NaN dump files are saved.
49
+
50
+ ``max_envs_to_dump`` (default: ``5``) Maximum number of NaN environments to dump
51
+ to disk. All environments are tracked in the buffer, but only the first N NaN
52
+ environments are saved to reduce dump size.
53
+
54
+ Behavior
55
+ --------
56
+
57
+ - **Captures** simulation state before each step (using ``mj_getState``)
58
+ - **Detects** NaN/Inf in ``qpos``, ``qvel``, ``qacc``, ``qacc_warmstart`` after each step
59
+ - **Dumps** buffer and model to disk on first detection
60
+ - **Exits** only dumps once per training run to avoid spam
61
+
62
+ Output Format
63
+ -------------
64
+
65
+ Each NaN detection creates timestamped files plus latest symlinks:
66
+
67
+ - ``nan_dump_TIMESTAMP.npz`` - Compressed state buffer
68
+ - ``states_step_NNNNNN`` - Captured states for each step (shape: ``[num_envs_dumped, state_size]``)
69
+ - ``_metadata`` - Dict with ``num_envs_total``, ``nan_env_ids``, ``dumped_env_ids``, etc.
70
+ - ``model_TIMESTAMP.mjb`` - MuJoCo model in binary format
71
+ - ``nan_dump_latest.npz`` - Symlink to most recent dump
72
+ - ``model_latest.mjb`` - Symlink to most recent model
73
+
74
+ Visualizing Dumps
75
+ -----------------
76
+
77
+ Use the interactive viewer to scrub through captured states:
78
+
79
+ .. code-block:: bash
80
+
81
+ # View latest dump.
82
+ uv run viz-nan /tmp/mjlab/nan_dumps/nan_dump_latest.npz
83
+
84
+ # Or view a specific dump.
85
+ uv run viz-nan /tmp/mjlab/nan_dumps/nan_dump_20251014_123456.npz
86
+
87
+
88
+ .. figure:: _static/content/nan_debug.gif
89
+ :alt: NaN Debug Viewer
90
+
91
+ NaN debug viewer.
92
+
93
+ The viewer provides:
94
+
95
+ - Step slider to scrub through the buffer
96
+ - Environment slider to compare different environments
97
+ - Info panel showing which environments have NaN/Inf
98
+ - 3D visualization of the robot and terrain at each state
99
+
100
+ This makes it easy to see exactly what went wrong and compare crashed
101
+ environments against clean ones.
102
+
103
+ Performance
104
+ -----------
105
+
106
+ When disabled (``enabled=False``), all operations are no-ops with
107
+ negligible overhead. When enabled, overhead scales with ``buffer_size`` and
108
+ ``max_envs_to_capture``.
109
+
110
+ Related Features
111
+ ----------------
112
+
113
+ NaN Detection Termination
114
+ -------------------------
115
+
116
+ While ``nan_guard`` helps **debug** NaN issues by capturing states, you can also
117
+ **prevent** training crashes using the ``nan_detection`` termination:
118
+
119
+ .. code-block:: python
120
+
121
+ from mjlab.envs.mdp.terminations import nan_detection
122
+ from mjlab.managers.termination_manager import TerminationTermCfg
123
+
124
+ # In your termination config:
125
+ nan_term: TerminationTermCfg = field(
126
+ default_factory=lambda: TerminationTermCfg(
127
+ func=nan_detection,
128
+ time_out=False
129
+ )
130
+ )
131
+
132
+
133
+ This marks NaN environments as terminated, allowing them to reset while training
134
+ continues. Terminations are logged as ``Episode_Termination/nan_term`` in your
135
+ metrics.
136
+
137
+ .. important::
138
+
139
+ ``nan_detection`` is a band-aid, not a cure. If NaNs occur
140
+ during your task objective (e.g., your task is to grasp objects but NaNs
141
+ happen when grasping), the policy will never learn to complete the task since
142
+ it resets before receiving rewards. Monitor your ``Episode_Termination/nan_term``
143
+ metrics carefully.
144
+
145
+ **When to use which:**
146
+
147
+ - ``nan_guard``: Debug and understand why NaNs occur (always do this first)
148
+ - ``nan_detection``: Keep training stable while working on a permanent fix
mjlab/docs/source/observation.rst ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _observation:
2
+
3
+ Observation History and Delay
4
+ =============================
5
+
6
+ Observations have two temporal features: history and delay. History stacks past
7
+ frames for temporal context, while delay can be used to model sensor latency.
8
+
9
+ TL;DR
10
+ -----
11
+
12
+ **Add history to stack frames:**
13
+
14
+ .. code-block:: python
15
+
16
+ from mjlab.managers.observation_manager import ObservationTermCfg
17
+
18
+ joint_vel: ObservationTermCfg = ObservationTermCfg(
19
+ func=joint_vel,
20
+ history_length=5, # Keep last 5 frames
21
+ flatten_history_dim=True # Flatten for MLP: (12,) * 5 = (60,)
22
+ )
23
+
24
+
25
+ **Add delay to model sensor latency:**
26
+
27
+ .. code-block:: python
28
+
29
+ # At 50Hz control (20ms/step): lag=2-3 → 40-60ms latency
30
+ camera: ObservationTermCfg = ObservationTermCfg(
31
+ func=camera_obs,
32
+ delay_min_lag=2,
33
+ delay_max_lag=3,
34
+ )
35
+
36
+
37
+ **Combine both:**
38
+
39
+ .. code-block:: python
40
+
41
+ joint_pos: ObservationTermCfg = ObservationTermCfg(
42
+ func=joint_pos,
43
+ delay_min_lag=1,
44
+ delay_max_lag=3, # Delayed observations
45
+ history_length=5, # Stack 5 delayed frames
46
+ flatten_history_dim=True
47
+ )
48
+ # Pipeline: compute → delay → stack → flatten
49
+
50
+
51
+ Observation History
52
+ -------------------
53
+
54
+ History stacks past observations to provide temporal context.
55
+
56
+ Basic Usage
57
+ ^^^^^^^^^^^
58
+
59
+ **Flattened history (for MLPs):**
60
+
61
+ .. code-block:: python
62
+
63
+ joint_vel: ObservationTermCfg = ObservationTermCfg(
64
+ func=joint_vel, # Returns (num_envs, 12)
65
+ history_length=3,
66
+ flatten_history_dim=True # Output: (num_envs, 36)
67
+ )
68
+
69
+
70
+ **Structured history (for RNNs):**
71
+
72
+ .. code-block:: python
73
+
74
+ joint_vel: ObservationTermCfg = ObservationTermCfg(
75
+ func=joint_vel, # Returns (num_envs, 12)
76
+ history_length=3,
77
+ flatten_history_dim=False # Output: (num_envs, 3, 12)
78
+ )
79
+
80
+
81
+ Group-Level Override
82
+ ^^^^^^^^^^^^^^^^^^^^
83
+
84
+ Apply history to all terms in a group:
85
+
86
+
87
+ .. code-block:: python
88
+
89
+ @dataclass
90
+ class PolicyCfg(ObservationGroupCfg):
91
+ concatenate_terms: bool = True
92
+ history_length: int = 5 # Applied to all terms
93
+ flatten_history_dim: bool = True
94
+
95
+ joint_pos: ObservationTermCfg = ObservationTermCfg(func=joint_pos)
96
+ joint_vel: ObservationTermCfg = ObservationTermCfg(func=joint_vel)
97
+ # Both terms get 5-frame history, flattened
98
+
99
+
100
+ Term-level settings override group settings:
101
+
102
+
103
+ .. code-block:: python
104
+
105
+ @dataclass
106
+ class PolicyCfg(ObservationGroupCfg):
107
+ history_length: int = 3 # Default for group
108
+
109
+ joint_pos: ObservationTermCfg = ObservationTermCfg(
110
+ func=joint_pos,
111
+ history_length=5 # Override: use 5 instead of 3
112
+ )
113
+
114
+
115
+
116
+ Reset Behavior
117
+ ^^^^^^^^^^^^^^
118
+
119
+ History buffers are cleared on environment reset. The first observation after
120
+ reset is backfilled across all history slots, ensuring valid data from step 0.
121
+
122
+
123
+ .. code-block:: python
124
+
125
+ # At reset
126
+ buffer = [obs_0, obs_0, obs_0] # Backfilled
127
+
128
+ # After 2 steps
129
+ buffer = [obs_0, obs_1, obs_2] # Normal accumulation
130
+
131
+
132
+ History Flattening Order (Term-Major vs Time-Major)
133
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
134
+
135
+ When ``flatten_history_dim=True`` and ``concatenate_terms=True``, mjlab uses
136
+ **term-major** ordering, where each term's full history is flattened before
137
+ concatenating terms:
138
+
139
+
140
+ .. code-block:: bash
141
+
142
+ Term A: shape (num_envs, obs_dim_A) with history_length=3
143
+ Term B: shape (num_envs, obs_dim_B) with history_length=3
144
+
145
+ mjlab output (TERM-MAJOR):
146
+ [A_t0, A_t1, A_t2, B_t0, B_t1, B_t2, ...]
147
+ └─ all A history ─┘ └─ all B history ─┘
148
+
149
+
150
+ An alternative approach is **time-major** (or frame-major) ordering, where
151
+ complete observation frames are built at each timestep before concatenating
152
+ across time:
153
+
154
+
155
+ .. code-block:: bash
156
+
157
+ TIME-MAJOR (alternative approach):
158
+ [A_t0, B_t0, ..., A_t1, B_t1, ..., A_t2, B_t2, ...]
159
+ └─ frame t0 ──┘ └─ frame t1 ──┘ └─ frame t2 ──┘
160
+
161
+
162
+ **Sim2sim compatibility:** If you need to transfer policies to/from frameworks
163
+ that use time-major ordering, you will need to reorder observations. This
164
+ affects policies trained with history but not those without.
165
+
166
+ Observation Delay
167
+ -----------------
168
+
169
+ Real robots have sensors with communication delays (WiFi, USB). The delay system
170
+ models sensor latency by returning observations from earlier timesteps.
171
+
172
+ Delay Parameters
173
+ ^^^^^^^^^^^^^^^^
174
+
175
+ ``delay_min_lag`` / ``delay_max_lag`` (default: 0) Lag range in steps. Uniformly
176
+ samples an integer lag from ``[min_lag, max_lag]`` (both inclusive).
177
+ ``lag=0`` means current observation, ``lag=2`` means 2 steps ago.
178
+
179
+ ``delay_per_env`` (default: True) If True, each environment gets a different
180
+ lag. If False, all environments share the same lag.
181
+
182
+ ``delay_hold_prob`` (default: 0.0)
183
+ Probability [0, 1] of keeping the previous lag instead of resampling.
184
+
185
+ ``delay_update_period`` (default: 0) How often (in steps) to resample the lag.
186
+ If 0, resample every step. If N > 0, the lag value stays constant for N steps
187
+ before being resampled (creates temporally correlated latency patterns).
188
+
189
+ ``delay_per_env_phase`` (default: True) If True and ``delay_update_period > 0``,
190
+ stagger resample timing across environments with random phase offsets.
191
+
192
+ .. note::
193
+
194
+ ``delay_update_period`` controls how often the *lag value* is resampled, not
195
+ how often observations are refreshed. You still get a new (delayed) observation
196
+ every step - the lag just stays constant for N steps before being resampled.
197
+
198
+ **Visualizing delay (50Hz control = 20ms/step):**
199
+
200
+ .. code-block:: bash
201
+
202
+ Sensor captures: A B C D E F G H
203
+ ↓ ↓ ↓ ↓ ↓ ↓ ↓ ↓
204
+ Control steps: 0 1 2 3 4 5 6 7
205
+ 20ms 40ms 60ms 80ms 100ms 120ms 140ms 160ms
206
+
207
+ No delay (baseline - perfect sensor):
208
+ You receive: A B C D E F G H
209
+ ↑ current observation every step
210
+
211
+ Delay with lag=2:
212
+ You receive: A A A B C D E F
213
+ ↑clamp↑ ↑ ↑ ↑ ↑ ↑ ↑
214
+ Steps 0-1: lag clamped (buffer not full yet)
215
+ Step 2+: 40ms delay, every step gets NEW observation
216
+
217
+
218
+ **Example - Camera with 40-60ms latency at 50Hz control:**
219
+
220
+
221
+ .. code-block:: python
222
+
223
+ camera: ObservationTermCfg = ObservationTermCfg(
224
+ func=camera_obs,
225
+ delay_min_lag=2, # 40ms latency
226
+ delay_max_lag=3, # 60ms latency
227
+ )
228
+
229
+ Computing Delays from Real-World Latency
230
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
231
+
232
+ Convert real-world latency to simulation steps:
233
+
234
+
235
+ delay_steps = latency_ms / (1000 / control_hz)
236
+
237
+
238
+ **Example at 50Hz control (20ms per step):**
239
+ - 40ms latency = 40 / 20 = 2 steps
240
+ - 60ms latency = 60 / 20 = 3 steps
241
+ - 100ms latency = 100 / 20 = 5 steps
242
+
243
+ **Example at 100Hz control (10ms per step):**
244
+ - 40ms latency = 40 / 10 = 4 steps
245
+ - 60ms latency = 60 / 10 = 6 steps
246
+
247
+ .. note::
248
+
249
+ Delays are quantized to control timesteps. At 50Hz control (20ms/step),
250
+ you can only represent 0ms, 20ms, 40ms, 60ms, etc. To approximate a 45ms sensor,
251
+ use ``delay_min_lag=2, delay_max_lag=3`` which uniformly samples lag ∈ {2, 3}
252
+ (both inclusive), giving either 40ms or 60ms delay.
253
+
254
+ Examples
255
+ ^^^^^^^^
256
+
257
+ **Joint encoders (no delay):**
258
+
259
+ .. code-block:: python
260
+
261
+ joint_pos: ObservationTermCfg = ObservationTermCfg(func=joint_pos)
262
+ # delay_min_lag=delay_max_lag=0 by default.
263
+
264
+
265
+ **Camera with 40-60ms latency at 50Hz control:**
266
+
267
+ .. code-block:: python
268
+
269
+ # 40-60ms latency = 2-3 steps at 50Hz (20ms/step)
270
+ camera: ObservationTermCfg = ObservationTermCfg(
271
+ func=camera_obs,
272
+ delay_min_lag=2, # 40ms
273
+ delay_max_lag=3, # 60ms
274
+ )
275
+
276
+
277
+ **Mixed system - fast encoders and slow camera:**
278
+
279
+ .. code-block:: python
280
+
281
+ @dataclass
282
+ class PolicyCfg(ObservationGroupCfg):
283
+ # Fast encoders (no delay)
284
+ joint_pos: ObservationTermCfg = ObservationTermCfg(
285
+ func=joint_pos,
286
+ )
287
+
288
+ # Camera with 40-80ms latency
289
+ camera: ObservationTermCfg = ObservationTermCfg(
290
+ func=camera_obs,
291
+ delay_min_lag=2, # 40ms
292
+ delay_max_lag=4, # 80ms
293
+ )
294
+
295
+
296
+ Processing Pipeline
297
+ -------------------
298
+
299
+ Observations flow through this pipeline:
300
+
301
+
302
+ compute → noise → clip → scale → delay → history → flatten
303
+
304
+
305
+ **Why delay before history?** History stacks delayed observations. This models
306
+ real systems where you buffer old sensor readings, not future ones.
307
+
308
+ Example with both:
309
+
310
+ .. code-block:: python
311
+
312
+ joint_vel: ObservationTermCfg = ObservationTermCfg(
313
+ func=joint_vel,
314
+ scale=0.1, # Scale raw values
315
+ delay_min_lag=1, # 20ms delay at 50Hz
316
+ delay_max_lag=2, # 40ms delay at 50Hz
317
+ history_length=3, # Stack 3 delayed frames
318
+ flatten_history_dim=True
319
+ )
320
+ # Pipeline:
321
+ # 1. compute() returns (num_envs, 12)
322
+ # 2. scale: multiply by 0.1
323
+ # 3. delay: return observation from 1-2 steps ago
324
+ # 4. history: stack last 3 delayed frames → (num_envs, 3, 12)
325
+ # 5. flatten: reshape → (num_envs, 36)
326
+
327
+
328
+ Performance
329
+ -----------
330
+
331
+ Delay buffers are only created when ``delay_max_lag > 0``. Terms with no delay
332
+ (the default) have zero overhead. Similarly, history buffers are only created
333
+ when ``history_length > 0``.
mjlab/docs/source/randomization.rst ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Domain Randomization
2
+ ====================
3
+
4
+ Domain randomization varies physical parameters during training so that policies
5
+ are robust to modeling errors and real-world variation. This guide shows
6
+ how to attach randomization terms to your environment using ``EventTerm`` and
7
+ ``mdp.randomize_field``.
8
+
9
+ TL;DR
10
+ -----
11
+
12
+ Use an ``EventTerm`` that calls ``mdp.randomize_field`` with a target **field**, a
13
+ **value range** (or per-axis ranges), and an **operation** describing how to
14
+ apply the draw.
15
+
16
+ .. code-block:: python
17
+
18
+ from mjlab.managers.event_manager import EventTermCfg
19
+ from mjlab.managers.scene_entity_config import SceneEntityCfg
20
+ from mjlab.envs import mdp
21
+
22
+ foot_friction: EventTermCfg = EventTermCfg(
23
+ mode="reset", # randomize each episode
24
+ func=mdp.randomize_field,
25
+ domain_randomization=True, # marks this as domain randomization
26
+ params={
27
+ "asset_cfg": SceneEntityCfg("robot", geom_names=[".*_foot.*"]),
28
+ "field": "geom_friction",
29
+ "ranges": (0.3, 1.2),
30
+ "operation": "abs",
31
+ },
32
+ )
33
+
34
+ Domain Randomization Flag
35
+ -------------------------
36
+
37
+ When creating an ``EventTermCfg`` for domain randomization, set ``domain_randomization=True``.
38
+ This allows the environment to track which fields are being randomized:
39
+
40
+ .. code-block:: python
41
+
42
+ EventTermCfg(
43
+ mode="reset",
44
+ func=mdp.randomize_field,
45
+ domain_randomization=True, # required for DR tracking
46
+ params={"field": "geom_friction", ...},
47
+ )
48
+
49
+ This flag is especially useful when using custom class-based event terms instead of
50
+ ``mdp.randomize_field``.
51
+
52
+ Event Modes
53
+ -----------
54
+
55
+ * ``"startup"`` - randomize once at initialization
56
+ * ``"reset"`` - randomize at every episode reset
57
+ * ``"interval"`` - randomize at regular time intervals
58
+
59
+ Available Fields
60
+ ----------------
61
+
62
+ **Joint/DOF:** ``dof_armature``, ``dof_frictionloss``, ``dof_damping``, ``jnt_range``,
63
+ ``jnt_stiffness``, ``qpos0``
64
+
65
+ **Body:** ``body_mass``, ``body_ipos``, ``body_iquat``, ``body_inertia``, ``body_pos``,
66
+ ``body_quat``
67
+
68
+ **Geom:** ``geom_friction``, ``geom_pos``, ``geom_quat``, ``geom_rgba``
69
+
70
+ **Site:** ``site_pos``, ``site_quat``
71
+
72
+ Randomization Parameters
73
+ ------------------------
74
+
75
+ **Distribution:** ``"uniform"`` (default), ``"log_uniform"`` (values must be > 0),
76
+ ``"gaussian"`` (``mean, std``)
77
+
78
+ **Operation:** ``"abs"`` (default, set), ``"scale"`` (multiply), ``"add"`` (offset)
79
+
80
+ Axis selection
81
+ ^^^^^^^^^^^^^^
82
+
83
+ Multi-dimensional fields can be randomized per-axis.
84
+
85
+ **Friction.** Geoms have three coefficients ``[tangential, torsional, rolling]``.
86
+ For ``condim=3`` (standard frictional contact), only **axis 0 (tangential)**
87
+ affects contact behavior:
88
+
89
+ .. code-block:: python
90
+
91
+ # Tangential friction (affects condim=3)
92
+ params={"field": "geom_friction", "ranges": {0: (0.3, 1.2)}}
93
+
94
+ # Tangential + torsional (torsional matters for condim >= 4)
95
+ params={"field": "geom_friction", "ranges": {0: (0.5, 1.0), 1: (0.001, 0.01)}}
96
+
97
+ # X and Y position
98
+ params={"field": "body_pos", "axes": [0, 1], "ranges": (-0.1, 0.1)}
99
+
100
+
101
+ Examples
102
+ --------
103
+
104
+ Friction (reset)
105
+ ^^^^^^^^^^^^^^^^
106
+
107
+ .. code-block:: python
108
+
109
+ foot_friction: EventTermCfg = EventTermCfg(
110
+ mode="reset",
111
+ func=mdp.randomize_field,
112
+ domain_randomization=True,
113
+ params={
114
+ "asset_cfg": SceneEntityCfg("robot", geom_names=[".*_foot.*"]),
115
+ "field": "geom_friction",
116
+ "ranges": (0.3, 1.2),
117
+ "operation": "abs",
118
+ },
119
+ )
120
+
121
+ .. note::
122
+
123
+ Give your robot's collision geoms higher **priority** than terrain
124
+ (geom priority defaults to 0). Then you only need to randomize robot friction.
125
+ MuJoCo will use the higher-priority geom's friction in (robot, terrain)
126
+ contacts.
127
+
128
+ .. code-block:: python
129
+
130
+ from mjlab.utils.spec_config import CollisionCfg
131
+
132
+ robot_collision = CollisionCfg(
133
+ geom_names_expr=[".*_foot.*"],
134
+ priority=1,
135
+ friction=(0.6,),
136
+ condim=3,
137
+ )
138
+
139
+
140
+ Joint Offset (startup)
141
+ ^^^^^^^^^^^^^^^^^^^^^^
142
+
143
+ Randomize default joint positions to simulate joint offset calibration errors:
144
+
145
+ .. code-block:: python
146
+
147
+ joint_offset: EventTermCfg = EventTermCfg(
148
+ mode="startup",
149
+ func=mdp.randomize_field,
150
+ domain_randomization=True,
151
+ params={
152
+ "asset_cfg": SceneEntityCfg("robot", joint_names=[".*"]),
153
+ "field": "qpos0",
154
+ "ranges": (-0.01, 0.01),
155
+ "operation": "add",
156
+ },
157
+ )
158
+
159
+
160
+ Center of Mass (COM) (startup)
161
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
162
+
163
+ .. code-block:: python
164
+
165
+ com: EventTermCfg = EventTermCfg(
166
+ mode="startup",
167
+ func=mdp.randomize_field,
168
+ domain_randomization=True,
169
+ params={
170
+ "asset_cfg": SceneEntityCfg("robot", body_names=["torso"]),
171
+ "field": "body_ipos",
172
+ "ranges": {0: (-0.02, 0.02), 1: (-0.02, 0.02)},
173
+ "operation": "add",
174
+ },
175
+ )
176
+
177
+ Custom Class-Based Event Terms
178
+ ------------------------------
179
+
180
+ You can create custom event terms using classes instead of functions. This is useful
181
+ for event terms that need to maintain state or perform initialization logic:
182
+
183
+ .. code-block:: python
184
+
185
+ class RandomizeTerrainFriction:
186
+ """Custom event term that randomizes terrain friction."""
187
+
188
+ def __init__(self, cfg, env):
189
+ # Find the terrain geom index during initialization
190
+ self._terrain_idx = None
191
+ for idx, geom in enumerate(env.scene.spec.geoms):
192
+ if geom.name == "terrain":
193
+ self._terrain_idx = idx
194
+
195
+ if self._terrain_idx is None:
196
+ raise ValueError("Terrain geom not found in the model.")
197
+
198
+ def __call__(self, env, env_ids, ranges):
199
+ """Called each time the event is triggered."""
200
+ from mjlab.utils.math import sample_uniform
201
+ env.sim.model.geom_friction[env_ids, self._terrain_idx, 0] = sample_uniform(
202
+ ranges[0], ranges[1], len(env_ids), env.device
203
+ )
204
+
205
+
206
+ # Use the custom class in your environment config
207
+ terrain_friction: EventTermCfg = EventTermCfg(
208
+ mode="reset",
209
+ func=RandomizeTerrainFriction,
210
+ domain_randomization=True,
211
+ params={"field": "geom_friction", "ranges": (0.3, 1.2)},
212
+ )
213
+
214
+
215
+ Migrating from Isaac Lab
216
+ ------------------------
217
+
218
+ Isaac Lab exposes explicit friction combination modes (``multiply``, ``average``,
219
+ ``min``, ``max``). MuJoCo instead uses **priority-based selection**: if one
220
+ contacting geom has higher ``priority``, its friction is used; otherwise the
221
+ **element-wise maximum** is used. See the
222
+ `MuJoCo contact documentation <https://mujoco.readthedocs.io/en/stable/computation/index.html#contact>`_
223
+ for details.
mjlab/docs/source/raycast_sensor.rst ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _raycast_sensor:
2
+
3
+ RayCast Sensor
4
+ ==============
5
+
6
+ The RayCast sensor provides GPU-accelerated raycasting for terrain scanning,
7
+ obstacle detection, and depth sensing. It supports flexible ray patterns,
8
+ multiple frame attachment options, and configurable alignment modes.
9
+
10
+ .. raw:: html
11
+
12
+ <video controls style="display: block; margin: 0 auto; max-width: 100%; height: auto;">
13
+ <source src="../_static/raycast_demo.mp4" type="video/mp4">
14
+ </video>
15
+
16
+ Quick Start
17
+ -----------
18
+
19
+ .. code-block:: python
20
+
21
+ from mjlab.sensor import RayCastSensorCfg, GridPatternCfg, ObjRef
22
+
23
+ raycast_cfg = RayCastSensorCfg(
24
+ name="terrain_scan",
25
+ frame=ObjRef(type="body", name="base", entity="robot"),
26
+ pattern=GridPatternCfg(size=(1.0, 1.0), resolution=0.1),
27
+ max_distance=5.0,
28
+ )
29
+
30
+ scene_cfg = SceneCfg(
31
+ entities={"robot": robot_cfg},
32
+ sensors=(raycast_cfg,),
33
+ )
34
+
35
+ # Access at runtime.
36
+ sensor = env.scene["terrain_scan"]
37
+ data = sensor.data
38
+ distances = data.distances # [B, N] distance to hit, -1 if miss
39
+ hit_pos = data.hit_pos_w # [B, N, 3] world-space hit positions
40
+ normals = data.normals_w # [B, N, 3] surface normals
41
+
42
+
43
+ Ray Patterns
44
+ ------------
45
+
46
+ Ray patterns define the spatial distribution and direction of rays emitted
47
+ from the sensor frame. Two pattern types are available for different use cases.
48
+
49
+ Grid Pattern
50
+ ^^^^^^^^^^^^
51
+
52
+ Parallel rays arranged in a 2D grid with fixed spatial resolution.
53
+
54
+ .. image:: _static/pattern_grid.jpg
55
+ :width: 600
56
+ :align: center
57
+ :alt: Parallel grid ray pattern with fixed footprint
58
+
59
+ .. note::
60
+
61
+ The grid pattern produces a *fixed ground footprint* that does not change
62
+ with sensor height. Ray spacing is defined in world units (meters).
63
+
64
+ .. raw:: html
65
+
66
+ <video autoplay loop muted playsinline style="display: block; margin: 0 auto; max-width: 100%; height: auto;">
67
+ <source src="../_static/pattern_grid.mp4" type="video/mp4">
68
+ </video>
69
+
70
+ .. code-block:: python
71
+
72
+ from mjlab.sensor import GridPatternCfg
73
+
74
+ pattern = GridPatternCfg(
75
+ size=(1.0, 1.0), # Grid dimensions in meters
76
+ resolution=0.1, # Spacing between rays
77
+ direction=(0.0, 0.0, -1.0), # Ray direction (down)
78
+ )
79
+
80
+ **Characteristics:**
81
+
82
+ - All rays are parallel
83
+ - Spacing defined in meters
84
+ - Ground footprint is height-invariant
85
+ - Good for: height maps, terrain scanning, spatially uniform sampling
86
+
87
+
88
+ Pinhole Camera Pattern
89
+ ^^^^^^^^^^^^^^^^^^^^^^
90
+
91
+ Diverging rays emitted from a single origin, analogous to a depth camera or
92
+ LiDAR sensor.
93
+
94
+ .. raw:: html
95
+
96
+ <video autoplay loop muted playsinline style="display: block; margin: 0 auto; max-width: 100%; height: auto;">
97
+ <source src="../_static/pattern_pinhole.mp4" type="video/mp4">
98
+ </video>
99
+
100
+ .. note::
101
+
102
+ Unlike the grid pattern, the pinhole pattern has a *fixed angular field of view*.
103
+ As the sensor moves higher, the ground coverage increases.
104
+
105
+ .. code-block:: python
106
+
107
+ from mjlab.sensor import PinholeCameraPatternCfg
108
+
109
+ # Explicit parameters.
110
+ pattern = PinholeCameraPatternCfg(
111
+ width=16,
112
+ height=12,
113
+ fovy=45.0, # Vertical FOV in degrees
114
+ )
115
+
116
+ # From a MuJoCo camera.
117
+ pattern = PinholeCameraPatternCfg.from_mujoco_camera("robot/depth_cam")
118
+
119
+ # From intrinsic matrix.
120
+ pattern = PinholeCameraPatternCfg.from_intrinsic_matrix(
121
+ intrinsic_matrix=[500, 0, 320, 0, 500, 240, 0, 0, 1],
122
+ width=640,
123
+ height=480,
124
+ )
125
+
126
+ **Characteristics:**
127
+
128
+ - Rays diverge from a single point
129
+ - Coverage defined in angular units (degrees)
130
+ - Ground footprint increases with height
131
+ - Good for: depth cameras, LiDAR, perspective sensing
132
+
133
+
134
+ Pattern Comparison
135
+ ^^^^^^^^^^^^^^^^^^
136
+
137
+ .. list-table::
138
+ :header-rows: 1
139
+ :widths: 20 40 40
140
+
141
+ * - Aspect
142
+ - Grid
143
+ - Pinhole
144
+ * - Ray direction
145
+ - Parallel
146
+ - Diverging
147
+ * - Spacing unit
148
+ - Meters
149
+ - Degrees (FOV)
150
+ * - Height affects coverage
151
+ - No
152
+ - Yes
153
+ * - Projection model
154
+ - Orthographic
155
+ - Perspective
156
+
157
+
158
+ Frame Attachment
159
+ ----------------
160
+
161
+ Rays attach to a frame in the scene via ``ObjRef``.
162
+
163
+ .. code-block:: python
164
+
165
+ frame = ObjRef(type="body", name="base", entity="robot")
166
+ frame = ObjRef(type="site", name="scan_site", entity="robot")
167
+ frame = ObjRef(type="geom", name="sensor_mount", entity="robot")
168
+
169
+ The ``exclude_parent_body`` option (default: ``True``) prevents rays from
170
+ hitting the body to which they are attached.
171
+
172
+
173
+ Ray Alignment
174
+ -------------
175
+
176
+ The ``ray_alignment`` setting controls how rays orient relative to the frame
177
+ when the body rotates.
178
+
179
+ .. raw:: html
180
+
181
+ <video autoplay loop muted playsinline
182
+ style="display: block; margin: 0 auto; max-width: 100%; height: auto;">
183
+ <source src="../_static/ray_alignment_comparison.mp4" type="video/mp4">
184
+ </video>
185
+
186
+ .. list-table::
187
+ :header-rows: 1
188
+ :widths: 15 45 40
189
+
190
+ * - Mode
191
+ - Description
192
+ - Use Case
193
+ * - ``"base"``
194
+ - Full position and rotation
195
+ - Body-mounted sensors
196
+ * - ``"yaw"``
197
+ - Ignores pitch and roll
198
+ - Terrain height maps
199
+ * - ``"world"``
200
+ - Fixed world direction
201
+ - Gravity-aligned sensing
202
+
203
+ .. code-block:: python
204
+
205
+ RayCastSensorCfg(
206
+ name="height_scan",
207
+ frame=ObjRef(type="body", name="base", entity="robot"),
208
+ pattern=GridPatternCfg(size=(1.0, 1.0), resolution=0.1),
209
+ ray_alignment="yaw",
210
+ )
211
+
212
+
213
+ Geom Group Filtering
214
+ --------------------
215
+
216
+ MuJoCo geoms can be assigned to groups 0-5. Use ``include_geom_groups`` to
217
+ restrict which geoms rays can hit.
218
+
219
+ .. code-block:: python
220
+
221
+ RayCastSensorCfg(
222
+ name="terrain_only",
223
+ frame=ObjRef(type="body", name="base", entity="robot"),
224
+ pattern=GridPatternCfg(),
225
+ include_geom_groups=(0, 1),
226
+ )
227
+
228
+
229
+ Output Data
230
+ -----------
231
+
232
+ The sensor returns ``RayCastData``:
233
+
234
+ .. code-block:: python
235
+
236
+ @dataclass
237
+ class RayCastData:
238
+ distances: Tensor # [B, N] distance to hit, -1 if miss
239
+ hit_pos_w: Tensor # [B, N, 3] world-space hit positions
240
+ normals_w: Tensor # [B, N, 3] surface normals
241
+ pos_w: Tensor # [B, 3] sensor frame position
242
+ quat_w: Tensor # [B, 4] sensor frame orientation (w, x, y, z)
243
+
244
+ ``B`` is the number of environments and ``N`` is the number of rays.
245
+
246
+
247
+ Debug Visualization
248
+ -------------------
249
+
250
+ Enable visualization with ``debug_vis=True``:
251
+
252
+ .. code-block:: python
253
+
254
+ RayCastSensorCfg(
255
+ name="scan",
256
+ frame=ObjRef(type="body", name="base", entity="robot"),
257
+ pattern=GridPatternCfg(),
258
+ debug_vis=True,
259
+ )
260
+
261
+
262
+ Examples
263
+ --------
264
+
265
+ Height Map for Locomotion
266
+ ^^^^^^^^^^^^^^^^^^^^^^^^^
267
+
268
+ .. code-block:: python
269
+
270
+ # Dense grid for terrain-aware locomotion.
271
+ height_scan = RayCastSensorCfg(
272
+ name="height_scan",
273
+ frame=ObjRef(type="body", name="base", entity="robot"),
274
+ pattern=GridPatternCfg(
275
+ size=(1.6, 1.0),
276
+ resolution=0.1,
277
+ direction=(0.0, 0.0, -1.0),
278
+ ),
279
+ ray_alignment="yaw", # Stay level on slopes
280
+ max_distance=2.0,
281
+ exclude_parent_body=True,
282
+ )
283
+
284
+ # In observation function.
285
+ def height_obs(env: ManagerBasedRlEnv) -> torch.Tensor:
286
+ sensor = env.scene["height_scan"]
287
+ return sensor.data.distances # [B, N]
288
+
289
+
290
+ Depth Camera Simulation
291
+ ^^^^^^^^^^^^^^^^^^^^^^^
292
+
293
+ .. code-block:: python
294
+
295
+ # Simulate a depth camera.
296
+ depth_cam = RayCastSensorCfg(
297
+ name="depth",
298
+ frame=ObjRef(type="site", name="camera_site", entity="robot"),
299
+ pattern=PinholeCameraPatternCfg.from_mujoco_camera("robot/depth_cam"),
300
+ max_distance=10.0,
301
+ )
302
+
303
+ # Reshape to image.
304
+ def depth_image(env: ManagerBasedRlEnv) -> torch.Tensor:
305
+ sensor = env.scene["depth"]
306
+ distances = sensor.data.distances # [B, W*H]
307
+ return distances.view(-1, 12, 16) # [B, H, W]
308
+
309
+
310
+ Obstacle Detection
311
+ ^^^^^^^^^^^^^^^^^^
312
+
313
+ .. code-block:: python
314
+
315
+ # Forward-facing obstacle scan.
316
+ obstacle_scan = RayCastSensorCfg(
317
+ name="obstacle",
318
+ frame=ObjRef(type="body", name="head", entity="robot"),
319
+ pattern=GridPatternCfg(
320
+ size=(0.5, 0.3),
321
+ resolution=0.1,
322
+ direction=(-1.0, 0.0, 0.0), # Forward
323
+ ),
324
+ max_distance=3.0,
325
+ include_geom_groups=(0,), # Filtering to only group 0 geoms
326
+ )
327
+
328
+
329
+ Running the Demo
330
+ ----------------
331
+
332
+ A demo script is included to visualize the sensor on varied terrain:
333
+
334
+ .. code-block:: bash
335
+
336
+ # Grid pattern (default)
337
+ uv run mjpython scripts/demos/raycast_sensor.py --pattern grid
338
+
339
+ # Pinhole camera pattern
340
+ uv run mjpython scripts/demos/raycast_sensor.py --pattern pinhole
341
+
342
+ # With yaw alignment
343
+ uv run mjpython scripts/demos/raycast_sensor.py --alignment yaw
344
+
345
+ # Viser viewer (for remote/headless)
346
+ uv run python scripts/demos/raycast_sensor.py --viewer viser
mjlab/docs/source/sensors.rst ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _sensors:
2
+
3
+ Sensors
4
+ =======
5
+
6
+ Sensors provide a configurable way to measure physical quantities
7
+ in your simulation. They live at the scene level alongside entities
8
+ and terrain, can reference multiple entities (e.g., detect contact
9
+ between robot and terrain), and return structured data for use in
10
+ rewards, terminations, and observations.
11
+
12
+ Quick Note: Entity Data vs Sensors
13
+ ----------------------------------
14
+
15
+ **Before diving into sensors**, it's helpful to understand that mjlab provides two complementary ways to access simulation data:
16
+
17
+ **Entity Data** (``entity.data.*``)
18
+
19
+ - Common quantities are available out of the box with zero configuration
20
+ - Provides convenient coordinate frame transformations between world and body frames, COM and link frames
21
+ - Offers a familiar API for users coming from Isaac Lab
22
+ - Example: ``robot.data.root_link_lin_vel_b``, ``robot.data.joint_pos``
23
+
24
+ **Sensors** (this system)
25
+
26
+ - Provides reusable, configurable sensor definitions that can be shared across tasks
27
+ - Extensible through subclassing to add custom logic like noise, filtering, or processing
28
+ - Maps directly to real robot sensors like IMUs, force sensors, and cameras
29
+ - Example: ``env.scene["feet_contact"].data``, ``env.scene["robot/imu"].data``
30
+
31
+ **Use them together:**
32
+
33
+ - Use entity data for quick access to state and transforms
34
+ - Use sensors for measurements that span entities or need configuration
35
+ - Mix both approaches freely in your rewards and observations based on your needs
36
+
37
+ .. code-block:: python
38
+
39
+ from mjlab.sensor import BuiltinSensorCfg, ContactSensorCfg, ContactMatch, ObjRef
40
+
41
+ scene_cfg = SceneCfg(
42
+ entities={"robot": robot_cfg},
43
+ sensors=(
44
+ BuiltinSensorCfg(
45
+ name="imu_acc",
46
+ sensor_type="accelerometer",
47
+ obj=ObjRef(type="site", name="imu_site", entity="robot"),
48
+ ),
49
+ ContactSensorCfg(
50
+ name="feet_contact",
51
+ primary=ContactMatch(mode="geom", pattern=r".*_foot$", entity="robot"),
52
+ secondary=ContactMatch(mode="body", pattern="terrain"),
53
+ fields=("found", "force"),
54
+ ),
55
+ ),
56
+ )
57
+
58
+ # Access at runtime.
59
+ imu_acc_data = env.scene["robot/imu_acc"].data # [B, 3] acceleration
60
+ feet_contact = env.scene["feet_contact"].data # ContactData with .found, .force
61
+
62
+
63
+ Sensor Types
64
+ ------------
65
+
66
+ mjlab provides three sensor implementations:
67
+
68
+ BuiltinSensor
69
+ ^^^^^^^^^^^^^
70
+ Wraps MuJoCo's native sensor types (57 total) for measuring forces, positions,
71
+ velocities, and other physical quantities. Returns raw `torch.Tensor` data.
72
+
73
+ ContactSensor
74
+ ^^^^^^^^^^^^^
75
+ Detects contacts between bodies, geoms, or subtrees. Returns structured
76
+ `ContactData` with forces, positions, air time metrics, etc.
77
+
78
+ RayCastSensor
79
+ ^^^^^^^^^^^^^
80
+ GPU-accelerated raycasting for terrain scanning and depth sensing. Supports
81
+ grid and pinhole camera patterns with configurable alignment modes.
82
+ See :ref:`raycast_sensor` for full documentation.
83
+
84
+ BuiltinSensor
85
+ -------------
86
+
87
+ Sensor Types
88
+ ^^^^^^^^^^^^
89
+
90
+ +-----------+----------------------------------------------------------------------------------------------------------------------------------------------------+
91
+ | Category | Available Sensors |
92
+ +===========+====================================================================================================================================================+
93
+ | **Site** | ``accelerometer``, ``velocimeter``, ``gyro``, ``force``, ``torque``, ``magnetometer``, ``rangefinder`` |
94
+ +-----------+----------------------------------------------------------------------------------------------------------------------------------------------------+
95
+ | **Joint** | ``jointpos``, ``jointvel``, ``jointlimitpos``, ``jointlimitvel``, ``jointlimitfrc``, ``jointactuatorfrc`` |
96
+ +-----------+----------------------------------------------------------------------------------------------------------------------------------------------------+
97
+ | **Frame** | ``framepos``, ``framequat``, ``framexaxis``, ``frameyaxis``, ``framezaxis``, ``framelinvel``, ``frameangvel``, ``framelinacc``, ``frameangacc`` |
98
+ +-----------+----------------------------------------------------------------------------------------------------------------------------------------------------+
99
+ | **Other** | ``actuatorpos``, ``actuatorvel``, ``actuatorfrc``, ``subtreecom``, ``subtreelinvel``, ``subtreeangmom``, ``clock``, ``e_potential``, ``e_kinetic`` |
100
+ +-----------+----------------------------------------------------------------------------------------------------------------------------------------------------+
101
+
102
+ Usage
103
+ ^^^^^
104
+
105
+ BuiltinSensor returns a ``torch.Tensor`` with
106
+ shape ``[N_envs, dim]`` where dim depends on the
107
+ sensor type (e.g., 3 for vectors, 4 for quaternions).
108
+ Configure with ``BuiltinSensorCfg``, specifying the
109
+ sensor type, attached object via ``ObjRef``, and
110
+ optional parameters like ``cutoff`` to limit
111
+ output magnitude or ``ref`` for frame sensors.
112
+
113
+ Examples
114
+ ^^^^^^^^
115
+
116
+ .. code-block:: python
117
+
118
+ # Accelerometer.
119
+ imu_acc = BuiltinSensorCfg(
120
+ name="imu_acc",
121
+ sensor_type="accelerometer",
122
+ obj=ObjRef(type="site", name="imu_site", entity="robot"),
123
+ )
124
+
125
+ # Joint limits.
126
+ joint_limit = BuiltinSensorCfg(
127
+ name="knee_limit",
128
+ sensor_type="jointlimitpos",
129
+ obj=ObjRef(type="joint", name="knee_joint", entity="robot"),
130
+ cutoff=0.1,
131
+ )
132
+
133
+ # Frame tracking (relative position).
134
+ ee_pos = BuiltinSensorCfg(
135
+ name="ee_pos",
136
+ sensor_type="framepos",
137
+ obj=ObjRef(type="body", name="end_effector", entity="robot"),
138
+ ref=ObjRef(type="body", name="base", entity="robot"),
139
+ )
140
+
141
+ ContactSensor
142
+ -------------
143
+
144
+ ContactSensor detects and reports contact between
145
+ bodies, geoms, or entire subtrees in your simulation.
146
+ It's particularly useful for foot contact detection,
147
+ self-collision monitoring, and measuring ground reaction
148
+ forces. The sensor tracks contacts between a "primary" set
149
+ of objects (e.g., robot feet) and an optional "secondary"
150
+ set (e.g., terrain), returning structured data including
151
+ forces, positions, and timing information.
152
+
153
+ Pattern Matching
154
+ ^^^^^^^^^^^^^^^^
155
+ Use ``ContactMatch`` to specify what to track:
156
+
157
+ .. code-block:: python
158
+
159
+ ContactMatch(
160
+ mode="geom", # "geom", "body", or "subtree"
161
+ pattern=r".*_foot$", # Regex or list of names
162
+ entity="robot", # Optional entity scope
163
+ exclude=(r".*_heel$",), # Optional exclusions
164
+ )
165
+
166
+
167
+ Patterns can be:
168
+ - **List of exact names:** ``["left_foot", "right_foot"]``
169
+ - **Regex:** ``r".*_collision$"`` (expands to all matches)
170
+ - **With exclusions:** Filter out specific matches
171
+
172
+ Configuration
173
+ ^^^^^^^^^^^^^
174
+
175
+ .. code-block:: python
176
+
177
+ ContactSensorCfg(
178
+ name="feet_ground",
179
+ primary=ContactMatch(...), # What to track
180
+ secondary=ContactMatch(...), # Optional filter
181
+ fields=("found", "force"), # Data to extract
182
+ reduce="maxforce", # Contact selection
183
+ num_slots=1, # Contacts per primary
184
+ track_air_time=False, # Landing/takeoff tracking
185
+ global_frame=False, # Force frame
186
+ )
187
+
188
+
189
+ **Fields:** ``"found"``, ``"force"``, ``"torque"``, ``"dist"``, ``"pos"``, ``"normal"``, ``"tangent"``
190
+
191
+ **Reduction modes:**
192
+ - ``"none"`` - Fast, non-deterministic
193
+ - ``"mindist"`` - Closest contacts
194
+ - ``"maxforce"`` - Strongest contacts
195
+ - ``"netforce"`` - Returns single synthetic contact at force-weighted centroid with net wrench
196
+
197
+ Output: ContactData
198
+ ^^^^^^^^^^^^^^^^^^^
199
+
200
+ .. code-block:: python
201
+
202
+ @dataclass
203
+ class ContactData:
204
+ found: Tensor | None # [B, N] contact count
205
+ force: Tensor | None # [B, N, 3]
206
+ torque: Tensor | None # [B, N, 3]
207
+ dist: Tensor | None # [B, N] penetration
208
+ pos: Tensor | None # [B, N, 3] position
209
+ normal: Tensor | None # [B, N, 3] primary→secondary
210
+ tangent: Tensor | None # [B, N, 3]
211
+
212
+ # With track_air_time=True.
213
+ current_air_time: Tensor | None
214
+ last_air_time: Tensor | None
215
+ current_contact_time: Tensor | None
216
+ last_contact_time: Tensor | None
217
+
218
+
219
+ Shape: ``[B, N * num_slots]`` where N = number of primary matches
220
+
221
+ Understanding num_slots
222
+ ^^^^^^^^^^^^^^^^^^^^^^^
223
+
224
+ - ``num_slots=1`` (most common): Single representative contact per match
225
+ - ``num_slots > 1``: Multiple contact points per geom/body
226
+ - ``reduce="netforce"``: Always returns exactly one contact regardless of num_slots
227
+
228
+ .. code-block:: python
229
+
230
+ # 4 feet, 1 contact each → [B, 4].
231
+ ContactSensorCfg(primary=ContactMatch(pattern=["LF", "RF", "LH", "RH"]), num_slots=1)
232
+
233
+ # 4 feet, 3 contacts each → [B, 12].
234
+ ContactSensorCfg(primary=ContactMatch(pattern=["LF", "RF", "LH", "RH"]), num_slots=3)
235
+
236
+
237
+ Examples
238
+ ^^^^^^^^
239
+
240
+ .. code-block:: python
241
+
242
+ # Foot contacts with forces.
243
+ feet = ContactSensorCfg(
244
+ name="feet_ground",
245
+ primary=ContactMatch(mode="geom", pattern=r".*_foot$", entity="robot"),
246
+ secondary=ContactMatch(mode="body", pattern="terrain"),
247
+ fields=("found", "force", "pos"),
248
+ reduce="maxforce",
249
+ )
250
+
251
+ # Self-collision detection.
252
+ self_collision = ContactSensorCfg(
253
+ name="self_collision",
254
+ primary=ContactMatch(mode="subtree", pattern="pelvis", entity="robot"),
255
+ secondary=ContactMatch(mode="subtree", pattern="pelvis", entity="robot"),
256
+ fields=("found",),
257
+ )
258
+
259
+ # Air time tracking for gait analysis.
260
+ feet_air = ContactSensorCfg(
261
+ name="feet_air",
262
+ primary=ContactMatch(pattern=["LF", "RF", "LH", "RH"], entity="robot"),
263
+ track_air_time=True,
264
+ fields=("found",),
265
+ )
266
+
267
+ # Net ground reaction force.
268
+ grf = ContactSensorCfg(
269
+ name="grf",
270
+ primary=ContactMatch(mode="subtree", pattern=["left_ankle", "right_ankle"], entity="robot"),
271
+ secondary=ContactMatch(mode="body", pattern="terrain"),
272
+ fields=("force",),
273
+ reduce="netforce",
274
+ )
275
+
276
+ Auto-discovery
277
+ --------------
278
+
279
+ Sensors defined in an entity's XML are automatically discovered and prefixed with the entity's name.
280
+
281
+ .. code-block:: xml
282
+
283
+ <!-- In robot.xml -->
284
+ <sensor>
285
+ <accelerometer name="trunk_imu" site="imu_site"/>
286
+ <jointpos name="hip_sensor" joint="hip_joint"/>
287
+ </sensor>
288
+
289
+ .. code-block:: python
290
+
291
+ imu = env.scene["robot/trunk_imu"]
292
+ hip = env.scene["robot/hip_sensor"]
293
+
294
+
295
+ Usage Patterns
296
+ --------------
297
+
298
+ **In observations**
299
+
300
+ .. code-block:: python
301
+
302
+ def imu_acc_obs(env: ManagerBasedRlEnv) -> torch.Tensor:
303
+ sensor = env.scene["robot/imu_acc"]
304
+ return sensor.data # [N_envs, 3]
305
+
306
+
307
+ **In rewards**
308
+
309
+ .. code-block:: python
310
+
311
+ def foot_slip(env: ManagerBasedRlEnv) -> torch.Tensor:
312
+ sensor = env.scene["feet_ground"]
313
+ vel = sensor.data.force[..., :2].norm(dim=-1)
314
+ in_contact = sensor.data.found > 0
315
+ return -torch.where(in_contact, vel, 0.0).mean(dim=1)
316
+
317
+
318
+ **In terminations**
319
+
320
+ .. code-block:: python
321
+
322
+ def illegal_contact(env: ManagerBasedRlEnv) -> torch.Tensor:
323
+ sensor = env.scene["nonfoot_contact"]
324
+ return torch.any(sensor.data.found, dim=-1) # [B]
325
+
326
+
327
+ **Air time helpers**
328
+
329
+ .. code-block:: python
330
+
331
+ sensor = env.scene["feet_air"]
332
+ first_contact = sensor.compute_first_contact(dt=0.01) # Just landed
333
+ first_air = sensor.compute_first_air(dt=0.01) # Just took off
334
+
mjlab/notebooks/create_new_task.ipynb ADDED
@@ -0,0 +1,856 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "colab_type": "text",
7
+ "id": "view-in-github"
8
+ },
9
+ "source": [
10
+ "<a href=\"https://colab.research.google.com/github/mujocolab/mjlab/blob/main/notebooks/create_new_task.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "markdown",
15
+ "metadata": {
16
+ "id": "PO76KS1i-MwA"
17
+ },
18
+ "source": [
19
+ "# **🤖 CartPole Tutorial with mjlab**\n",
20
+ "\n",
21
+ "This notebook demonstrates how to create a custom reinforcement learning task using mjlab. We'll build a CartPole environment from scratch, including:\n",
22
+ "\n",
23
+ "1. **Robot Definition** - Define the CartPole model in MuJoCo XML\n",
24
+ "2. **Task Configuration** - Set up observations, actions, rewards, and terminations\n",
25
+ "3. **Training** - Train a policy using PPO\n",
26
+ "4. **Evaluation** - Visualize the simulation with the trained policy\n",
27
+ "\n",
28
+ "> **Note**: This tutorial demonstrates how to create a new task in mjlab. For more context, see the [official documentation](https://mujocolab.github.io/mjlab/)."
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "markdown",
33
+ "metadata": {
34
+ "id": "3ywZTgfR3C_w"
35
+ },
36
+ "source": [
37
+ "## **📦 Setup and Installation**"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": null,
43
+ "metadata": {
44
+ "collapsed": true,
45
+ "id": "dtLMJHzy3Nee"
46
+ },
47
+ "outputs": [],
48
+ "source": "# Clone the mjlab repository\n!if [ ! -d \"mjlab\" ]; then git clone -q https://github.com/mujocolab/mjlab.git; fi\n%cd /content/mjlab\n\n# Install mjlab in editable mode\n!pip install -e . -q\n\nprint(\"✓ Installation complete!\")"
49
+ },
50
+ {
51
+ "cell_type": "markdown",
52
+ "metadata": {
53
+ "id": "SSf2943z3b0s"
54
+ },
55
+ "source": [
56
+ "### **🔑 WandB Setup**\n",
57
+ "\n",
58
+ "Configure Weights & Biases for experiment tracking. Add your WandB API key to Colab Secrets:\n",
59
+ "- `WANDB_API_KEY`: from [wandb.ai/authorize](https://wandb.ai/authorize)\n",
60
+ "- `WANDB_ENTITY`: your wandb entity name"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": null,
66
+ "metadata": {
67
+ "id": "KC9ywCnm3dGg"
68
+ },
69
+ "outputs": [],
70
+ "source": [
71
+ "import os\n",
72
+ "\n",
73
+ "from google.colab import userdata\n",
74
+ "\n",
75
+ "try:\n",
76
+ " # Set this to use wandb logger\n",
77
+ " os.environ[\"WANDB_API_KEY\"] = userdata.get(\"WANDB_API_KEY\")\n",
78
+ " os.environ[\"WANDB_ENTITY\"] = userdata.get(\"WANDB_ENTITY\")\n",
79
+ "\n",
80
+ " print(\"✓ WandB configured successfully!\")\n",
81
+ "except (AttributeError, KeyError):\n",
82
+ " # Set this to disable wandb logger\n",
83
+ " os.environ[\"WANDB_MODE\"] = \"disabled\"\n",
84
+ "\n",
85
+ " print(\"⚠ WandB secrets not found. Training will proceed without WandB logging.\")"
86
+ ]
87
+ },
88
+ {
89
+ "cell_type": "markdown",
90
+ "metadata": {
91
+ "id": "mispfmy73lmq"
92
+ },
93
+ "source": [
94
+ "---\n",
95
+ "\n",
96
+ "## **🤖 Step 1: Define the Robot**\n",
97
+ "\n",
98
+ "We'll create a simple CartPole robot with:\n",
99
+ "- A sliding cart (1 DOF)\n",
100
+ "- A hinged pole (1 DOF)\n",
101
+ "- A velocity actuator to control the cart"
102
+ ]
103
+ },
104
+ {
105
+ "cell_type": "markdown",
106
+ "metadata": {
107
+ "id": "-FvJYPWD3scd"
108
+ },
109
+ "source": [
110
+ "### **📁 Structure Directories**"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": null,
116
+ "metadata": {
117
+ "id": "OP-yET-R3ofN"
118
+ },
119
+ "outputs": [],
120
+ "source": [
121
+ "# Create the cartpole robot directory structure\n",
122
+ "!mkdir -p /content/mjlab/src/mjlab/asset_zoo/robots/cartpole/\n",
123
+ "!mkdir -p /content/mjlab/src/mjlab/asset_zoo/robots/cartpole/xmls\n",
124
+ "\n",
125
+ "print(\"✓ Directory structure created\")"
126
+ ]
127
+ },
128
+ {
129
+ "cell_type": "markdown",
130
+ "metadata": {
131
+ "id": "MRyN1Pok3u25"
132
+ },
133
+ "source": [
134
+ "### **📝 Create MuJoCo XML Model**\n",
135
+ "\n",
136
+ "This XML defines the CartPole physics:\n",
137
+ "- **Ground plane** for visualization\n",
138
+ "- **Cart body** with a sliding joint (±2m range)\n",
139
+ "- **Pole body** with a hinge joint (±90° range)\n",
140
+ "- **Velocity actuator** for cart control"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": null,
146
+ "metadata": {
147
+ "id": "gWGyFX5V3yWc"
148
+ },
149
+ "outputs": [],
150
+ "source": [
151
+ "%%writefile /content/mjlab/src/mjlab/asset_zoo/robots/cartpole/xmls/cartpole.xml\n",
152
+ "<mujoco model=\"cartpole\">\n",
153
+ " <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n",
154
+ " <worldbody>\n",
155
+ " <geom name=\"ground\" type=\"plane\" pos=\"0 0 0\" size=\"5 5 0.1\" rgba=\"0.8 0.9 0.8 1\"/>\n",
156
+ " <body name=\"cart\" pos=\"0 0 0.1\">\n",
157
+ " <geom type=\"box\" size=\"0.2 0.1 0.1\" rgba=\"0.2 0.2 0.8 1\" mass=\"1.0\"/>\n",
158
+ " <joint name=\"slide\" type=\"slide\" axis=\"1 0 0\" limited=\"true\" range=\"-2 2\"/>\n",
159
+ " <body name=\"pole\" pos=\"0 0 0.1\">\n",
160
+ " <geom type=\"capsule\" size=\"0.05 0.5\" fromto=\"0 0 0 0 0 1\" rgba=\"0.8 0.2 0.2 1\" mass=\"2.0\"/>\n",
161
+ " <joint name=\"hinge\" type=\"hinge\" axis=\"0 1 0\" range=\"-90 90\"/>\n",
162
+ " </body>\n",
163
+ " </body>\n",
164
+ " </worldbody>\n",
165
+ " <actuator>\n",
166
+ " <velocity name=\"slide_velocity\" joint=\"slide\" ctrlrange=\"-20 20\" kv=\"20\"/>\n",
167
+ " </actuator>\n",
168
+ "</mujoco>"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "markdown",
173
+ "metadata": {
174
+ "id": "MpYCG9jI31dZ"
175
+ },
176
+ "source": [
177
+ "### **⚙️ Create Robot Configuration**"
178
+ ]
179
+ },
180
+ {
181
+ "cell_type": "code",
182
+ "execution_count": null,
183
+ "metadata": {
184
+ "id": "HDhiyDTn4AVa"
185
+ },
186
+ "outputs": [],
187
+ "source": [
188
+ "%%writefile /content/mjlab/src/mjlab/asset_zoo/robots/cartpole/cartpole_constants.py\n",
189
+ "from pathlib import Path\n",
190
+ "import mujoco\n",
191
+ "\n",
192
+ "from mjlab import MJLAB_SRC_PATH\n",
193
+ "from mjlab.entity import Entity, EntityCfg, EntityArticulationInfoCfg\n",
194
+ "from mjlab.actuator import XmlVelocityActuatorCfg\n",
195
+ "\n",
196
+ "CARTPOLE_XML: Path = (\n",
197
+ " MJLAB_SRC_PATH / \"asset_zoo\" / \"robots\" / \"cartpole\" / \"xmls\" / \"cartpole.xml\"\n",
198
+ ")\n",
199
+ "assert CARTPOLE_XML.exists(), f\"XML not found: {CARTPOLE_XML}\"\n",
200
+ "\n",
201
+ "def get_spec() -> mujoco.MjSpec:\n",
202
+ " return mujoco.MjSpec.from_file(str(CARTPOLE_XML))\n",
203
+ "\n",
204
+ "def get_cartpole_robot_cfg() -> EntityCfg:\n",
205
+ " \"\"\"Get a fresh CartPole robot configuration instance.\"\"\"\n",
206
+ " actuators = (\n",
207
+ " XmlVelocityActuatorCfg(\n",
208
+ " target_names_expr=(\"slide\",),\n",
209
+ " ),\n",
210
+ " )\n",
211
+ " articulation = EntityArticulationInfoCfg(actuators=actuators)\n",
212
+ " return EntityCfg(\n",
213
+ " spec_fn=get_spec,\n",
214
+ " articulation=articulation\n",
215
+ " )\n",
216
+ "\n",
217
+ "# if __name__ == \"__main__\":\n",
218
+ "# import mujoco.viewer as viewer\n",
219
+ "# robot = Entity(get_cartpole_robot_cfg())\n",
220
+ "# viewer.launch(robot.spec.compile())"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "code",
225
+ "execution_count": null,
226
+ "metadata": {
227
+ "id": "-WSaDod04FwN"
228
+ },
229
+ "outputs": [],
230
+ "source": [
231
+ "# Create __init__.py for the cartpole robot package\n",
232
+ "%%writefile /content/mjlab/src/mjlab/asset_zoo/robots/cartpole/__init__.py\n",
233
+ "# Empty __init__.py to mark the directory as a Python package"
234
+ ]
235
+ },
236
+ {
237
+ "cell_type": "code",
238
+ "execution_count": null,
239
+ "metadata": {
240
+ "id": "W1tiBPfp_oVP"
241
+ },
242
+ "outputs": [],
243
+ "source": [
244
+ "import sys\n",
245
+ "\n",
246
+ "# Append src dir to python path\n",
247
+ "mjlab_src = \"/content/mjlab/src\"\n",
248
+ "if mjlab_src not in sys.path:\n",
249
+ " sys.path.insert(0, mjlab_src)\n",
250
+ " print(f\"✓ Added {mjlab_src} to Python path\")"
251
+ ]
252
+ },
253
+ {
254
+ "cell_type": "markdown",
255
+ "metadata": {
256
+ "id": "ToWF84qC4Hfg"
257
+ },
258
+ "source": [
259
+ "### **✅ Verify Robot Setup**\n",
260
+ "\n",
261
+ "Let's test that the robot can be loaded correctly."
262
+ ]
263
+ },
264
+ {
265
+ "cell_type": "code",
266
+ "execution_count": null,
267
+ "metadata": {
268
+ "id": "5tVsvqzQ4J9h"
269
+ },
270
+ "outputs": [],
271
+ "source": [
272
+ "from mjlab.asset_zoo.robots.cartpole.cartpole_constants import get_cartpole_robot_cfg\n",
273
+ "\n",
274
+ "from mjlab.entity import Entity\n",
275
+ "\n",
276
+ "# Load the robot\n",
277
+ "robot = Entity(get_cartpole_robot_cfg())\n",
278
+ "model = robot.spec.compile()\n",
279
+ "\n",
280
+ "# Display robot information\n",
281
+ "print(\"✓ CartPole robot loaded successfully!\")\n",
282
+ "print(f\" • Degrees of Freedom (DOF): {model.nv}\")\n",
283
+ "print(f\" • Number of Actuators: {model.nu}\")\n",
284
+ "print(f\" • Bodies: {model.nbody}\")\n",
285
+ "print(f\" • Joints: {model.njnt}\")"
286
+ ]
287
+ },
288
+ {
289
+ "cell_type": "markdown",
290
+ "metadata": {
291
+ "id": "e2_9dixlHON1"
292
+ },
293
+ "source": [
294
+ "### **📋 Register the Robot**\n",
295
+ "\n",
296
+ "Add the CartPole robot to the asset zoo registry."
297
+ ]
298
+ },
299
+ {
300
+ "cell_type": "code",
301
+ "execution_count": null,
302
+ "metadata": {
303
+ "id": "8qDIF__lHPcb"
304
+ },
305
+ "outputs": [],
306
+ "source": [
307
+ "# Add CartPole import to robots __init__.py\n",
308
+ "with open(\"/content/mjlab/src/mjlab/asset_zoo/robots/__init__.py\", \"a\") as f:\n",
309
+ " f.write(\"\\n# CartPole robot\\n\")\n",
310
+ " f.write(\"from mjlab.asset_zoo.robots.cartpole.cartpole_constants import \")\n",
311
+ " f.write(\"get_cartpole_robot_cfg as get_cartpole_robot_cfg\\n\")\n",
312
+ "\n",
313
+ "print(\"✓ CartPole robot registered in asset zoo\")"
314
+ ]
315
+ },
316
+ {
317
+ "cell_type": "markdown",
318
+ "metadata": {
319
+ "id": "6lVD_L6PHWNm"
320
+ },
321
+ "source": [
322
+ "---\n",
323
+ "\n",
324
+ "## **🎯 Step 2: Define the Task (MDP)**\n",
325
+ "\n",
326
+ "Now we'll define the Markov Decision Process:\n",
327
+ "- **Observations**: pole angle, angular velocity, cart position, cart velocity\n",
328
+ "- **Actions**: cart velocity commands\n",
329
+ "- **Rewards**: upright reward + effort penalty\n",
330
+ "- **Terminations**: pole tips over or timeout\n",
331
+ "- **Events**: random pushes for robustness"
332
+ ]
333
+ },
334
+ {
335
+ "cell_type": "markdown",
336
+ "metadata": {
337
+ "id": "RQxe4TBrHb-I"
338
+ },
339
+ "source": [
340
+ "### **📁 Create Task Directory**"
341
+ ]
342
+ },
343
+ {
344
+ "cell_type": "code",
345
+ "execution_count": null,
346
+ "metadata": {
347
+ "id": "nWBqdkziHc2G"
348
+ },
349
+ "outputs": [],
350
+ "source": [
351
+ "!mkdir -p /content/mjlab/src/mjlab/tasks/cartpole\n",
352
+ "\n",
353
+ "print(\"✓ Task directory created\")"
354
+ ]
355
+ },
356
+ {
357
+ "cell_type": "markdown",
358
+ "metadata": {
359
+ "id": "GJfjPpm0Hhj1"
360
+ },
361
+ "source": [
362
+ "### **📝 Create Environment Configuration**\n",
363
+ "\n",
364
+ "This file contains the MDP (Markov Decision Process) components:\n",
365
+ "1. **Scene Config**: 64 parallel environments\n",
366
+ "2. **Actions**: Joint velocity control with 20.0 scale\n",
367
+ "3. **Observations**: Normalized state variables\n",
368
+ "4. **Rewards**: Upright reward (5.0) + effort penalty (-0.01)\n",
369
+ "5. **Events**: Joint resets + random pushes\n",
370
+ "6. **Terminations**: Pole tipped (>30°) or timeout (10s)"
371
+ ]
372
+ },
373
+ {
374
+ "cell_type": "code",
375
+ "execution_count": null,
376
+ "metadata": {
377
+ "id": "javx9XDIHkFI"
378
+ },
379
+ "outputs": [],
380
+ "source": [
381
+ "%%writefile /content/mjlab/src/mjlab/tasks/cartpole/env_cfg.py\n",
382
+ "\"\"\"CartPole task environment configuration.\"\"\"\n",
383
+ "\n",
384
+ "import math\n",
385
+ "import torch\n",
386
+ "\n",
387
+ "from mjlab.envs import ManagerBasedRlEnvCfg\n",
388
+ "from mjlab.envs.mdp.actions import JointVelocityActionCfg\n",
389
+ "from mjlab.managers.observation_manager import ObservationGroupCfg, ObservationTermCfg\n",
390
+ "from mjlab.managers.reward_manager import RewardTermCfg\n",
391
+ "from mjlab.managers.termination_manager import TerminationTermCfg\n",
392
+ "from mjlab.managers.event_manager import EventTermCfg\n",
393
+ "from mjlab.managers.scene_entity_config import SceneEntityCfg\n",
394
+ "from mjlab.scene import SceneCfg\n",
395
+ "from mjlab.sim import MujocoCfg, SimulationCfg\n",
396
+ "from mjlab.viewer import ViewerConfig\n",
397
+ "from mjlab.asset_zoo.robots.cartpole.cartpole_constants import get_cartpole_robot_cfg\n",
398
+ "from mjlab.envs import mdp\n",
399
+ "\n",
400
+ "\n",
401
+ "def cartpole_env_cfg(play: bool = False) -> ManagerBasedRlEnvCfg:\n",
402
+ " \"\"\"Create CartPole environment configuration.\n",
403
+ "\n",
404
+ " Args:\n",
405
+ " play: If True, disables corruption and extends episode length for evaluation.\n",
406
+ " \"\"\"\n",
407
+ "\n",
408
+ " # ==============================================================================\n",
409
+ " # Scene Configuration\n",
410
+ " # ==============================================================================\n",
411
+ "\n",
412
+ " scene_cfg = SceneCfg(\n",
413
+ " num_envs=64 if not play else 16, # Fewer envs for play mode\n",
414
+ " extent=1.0, # Spacing between environments\n",
415
+ " entities={\"robot\": get_cartpole_robot_cfg()},\n",
416
+ " )\n",
417
+ "\n",
418
+ " viewer_cfg = ViewerConfig(\n",
419
+ " origin_type=ViewerConfig.OriginType.ASSET_BODY,\n",
420
+ " entity_name=\"robot\",\n",
421
+ " body_name=\"pole\",\n",
422
+ " distance=3.0,\n",
423
+ " elevation=10.0,\n",
424
+ " azimuth=90.0,\n",
425
+ " )\n",
426
+ "\n",
427
+ " sim_cfg = SimulationCfg(\n",
428
+ " mujoco=MujocoCfg(\n",
429
+ " timestep=0.02, # 50 Hz control\n",
430
+ " iterations=1,\n",
431
+ " ),\n",
432
+ " )\n",
433
+ "\n",
434
+ " # ==============================================================================\n",
435
+ " # Actions\n",
436
+ " # ==============================================================================\n",
437
+ "\n",
438
+ " actions = {\n",
439
+ " \"joint_pos\": JointVelocityActionCfg(\n",
440
+ " entity_name=\"robot\",\n",
441
+ " actuator_names=(\".*\",),\n",
442
+ " scale=20.0,\n",
443
+ " use_default_offset=False,\n",
444
+ " ),\n",
445
+ " }\n",
446
+ "\n",
447
+ " # ==============================================================================\n",
448
+ " # Observations\n",
449
+ " # ==============================================================================\n",
450
+ "\n",
451
+ " actor_terms = {\n",
452
+ " \"angle\": ObservationTermCfg(\n",
453
+ " func=lambda env: env.sim.data.qpos[:, 1:2] / math.pi\n",
454
+ " ),\n",
455
+ " \"ang_vel\": ObservationTermCfg(\n",
456
+ " func=lambda env: env.sim.data.qvel[:, 1:2] / 5.0\n",
457
+ " ),\n",
458
+ " \"cart_pos\": ObservationTermCfg(\n",
459
+ " func=lambda env: env.sim.data.qpos[:, 0:1] / 2.0\n",
460
+ " ),\n",
461
+ " \"cart_vel\": ObservationTermCfg(\n",
462
+ " func=lambda env: env.sim.data.qvel[:, 0:1] / 20.0\n",
463
+ " ),\n",
464
+ " }\n",
465
+ "\n",
466
+ " observations = {\n",
467
+ " \"actor\": ObservationGroupCfg(\n",
468
+ " terms=actor_terms,\n",
469
+ " concatenate_terms=True,\n",
470
+ " enable_corruption=not play, # Disable corruption in play mode\n",
471
+ " ),\n",
472
+ " \"critic\": ObservationGroupCfg(\n",
473
+ " terms=actor_terms, # Critic uses same observations\n",
474
+ " concatenate_terms=True,\n",
475
+ " enable_corruption=False,\n",
476
+ " ),\n",
477
+ " }\n",
478
+ "\n",
479
+ " # ==============================================================================\n",
480
+ " # Rewards\n",
481
+ " # ==============================================================================\n",
482
+ "\n",
483
+ " def compute_upright_reward(env):\n",
484
+ " \"\"\"Reward for keeping pole upright (cosine of angle).\"\"\"\n",
485
+ " return env.sim.data.qpos[:, 1].cos()\n",
486
+ "\n",
487
+ " def compute_effort_penalty(env):\n",
488
+ " \"\"\"Penalty for control effort.\"\"\"\n",
489
+ " return -0.01 * (env.sim.data.ctrl[:, 0] ** 2)\n",
490
+ "\n",
491
+ " rewards = {\n",
492
+ " \"upright\": RewardTermCfg(func=compute_upright_reward, weight=5.0),\n",
493
+ " \"effort\": RewardTermCfg(func=compute_effort_penalty, weight=1.0),\n",
494
+ " }\n",
495
+ "\n",
496
+ " # ==============================================================================\n",
497
+ " # Events\n",
498
+ " # ==============================================================================\n",
499
+ "\n",
500
+ " def random_push_cart(env, env_ids, force_range=(-5, 5)):\n",
501
+ " \"\"\"Apply random force to cart for robustness training.\"\"\"\n",
502
+ " n = len(env_ids)\n",
503
+ " random_forces = (\n",
504
+ " torch.rand(n, device=env.device) *\n",
505
+ " (force_range[1] - force_range[0]) +\n",
506
+ " force_range[0]\n",
507
+ " )\n",
508
+ " env.sim.data.qfrc_applied[env_ids, 0] = random_forces\n",
509
+ "\n",
510
+ " events = {\n",
511
+ " \"reset_robot_joints\": EventTermCfg(\n",
512
+ " func=mdp.reset_joints_by_offset,\n",
513
+ " mode=\"reset\",\n",
514
+ " params={\n",
515
+ " \"asset_cfg\": SceneEntityCfg(\"robot\"),\n",
516
+ " \"position_range\": (-0.1, 0.1),\n",
517
+ " \"velocity_range\": (-0.1, 0.1),\n",
518
+ " },\n",
519
+ " ),\n",
520
+ " }\n",
521
+ "\n",
522
+ " # Add random pushes only in training mode\n",
523
+ " if not play:\n",
524
+ " events[\"random_push\"] = EventTermCfg(\n",
525
+ " func=random_push_cart,\n",
526
+ " mode=\"interval\",\n",
527
+ " interval_range_s=(1.0, 2.0),\n",
528
+ " params={\"force_range\": (-20.0, 20.0)},\n",
529
+ " )\n",
530
+ "\n",
531
+ " # ==============================================================================\n",
532
+ " # Terminations\n",
533
+ " # ==============================================================================\n",
534
+ "\n",
535
+ " def check_pole_tipped(env):\n",
536
+ " \"\"\"Check if pole has tipped beyond 30 degrees.\"\"\"\n",
537
+ " return env.sim.data.qpos[:, 1].abs() > math.radians(30)\n",
538
+ "\n",
539
+ " terminations = {\n",
540
+ " \"timeout\": TerminationTermCfg(func=mdp.time_out, time_out=True),\n",
541
+ " \"tipped\": TerminationTermCfg(func=check_pole_tipped, time_out=False),\n",
542
+ " }\n",
543
+ "\n",
544
+ " # ==============================================================================\n",
545
+ " # Environment Configuration\n",
546
+ " # ==============================================================================\n",
547
+ "\n",
548
+ " return ManagerBasedRlEnvCfg(\n",
549
+ " scene=scene_cfg,\n",
550
+ " observations=observations,\n",
551
+ " actions=actions,\n",
552
+ " rewards=rewards,\n",
553
+ " events=events,\n",
554
+ " terminations=terminations,\n",
555
+ " sim=sim_cfg,\n",
556
+ " viewer=viewer_cfg,\n",
557
+ " decimation=1, # No action repeat\n",
558
+ " episode_length_s=int(1e9) if play else 10.0, # Infinite for play, 10s for training\n",
559
+ " )"
560
+ ]
561
+ },
562
+ {
563
+ "cell_type": "markdown",
564
+ "metadata": {
565
+ "id": "fC5maMjzSj_X"
566
+ },
567
+ "source": [
568
+ "### **⚙️ Create RL Configuration**\n",
569
+ "\n",
570
+ "This file defines the PPO (Proximal Policy Optimization) training parameters."
571
+ ]
572
+ },
573
+ {
574
+ "cell_type": "code",
575
+ "execution_count": null,
576
+ "metadata": {
577
+ "id": "C81zZm6mSj_X"
578
+ },
579
+ "outputs": [],
580
+ "source": [
581
+ "%%writefile /content/mjlab/src/mjlab/tasks/cartpole/rl_cfg.py\n",
582
+ "\"\"\"RL configuration for CartPole task.\"\"\"\n",
583
+ "\n",
584
+ "from mjlab.rl.config import (\n",
585
+ " RslRlOnPolicyRunnerCfg,\n",
586
+ " RslRlModelCfg,\n",
587
+ " RslRlPpoAlgorithmCfg,\n",
588
+ ")\n",
589
+ "\n",
590
+ "\n",
591
+ "def cartpole_ppo_runner_cfg() -> RslRlOnPolicyRunnerCfg:\n",
592
+ " \"\"\"Create RL runner configuration for CartPole task.\"\"\"\n",
593
+ " return RslRlOnPolicyRunnerCfg(\n",
594
+ " actor=RslRlModelCfg(\n",
595
+ " hidden_dims=(256, 128, 64), # Smaller network for simpler task\n",
596
+ " activation=\"elu\",\n",
597
+ " obs_normalization=True,\n",
598
+ " stochastic=True,\n",
599
+ " init_noise_std=1.0,\n",
600
+ " ),\n",
601
+ " critic=RslRlModelCfg(\n",
602
+ " hidden_dims=(256, 128, 64),\n",
603
+ " activation=\"elu\",\n",
604
+ " obs_normalization=True,\n",
605
+ " stochastic=False,\n",
606
+ " init_noise_std=1.0,\n",
607
+ " ),\n",
608
+ " algorithm=RslRlPpoAlgorithmCfg(\n",
609
+ " value_loss_coef=1.0,\n",
610
+ " use_clipped_value_loss=True,\n",
611
+ " clip_param=0.2,\n",
612
+ " entropy_coef=0.01,\n",
613
+ " num_learning_epochs=5,\n",
614
+ " num_mini_batches=4,\n",
615
+ " learning_rate=1.0e-3,\n",
616
+ " schedule=\"adaptive\",\n",
617
+ " gamma=0.99,\n",
618
+ " lam=0.95,\n",
619
+ " desired_kl=0.01,\n",
620
+ " max_grad_norm=1.0,\n",
621
+ " ),\n",
622
+ " experiment_name=\"cartpole\",\n",
623
+ " save_interval=50,\n",
624
+ " num_steps_per_env=24,\n",
625
+ " max_iterations=5_000, # Fewer iterations for simpler task\n",
626
+ " )"
627
+ ]
628
+ },
629
+ {
630
+ "cell_type": "markdown",
631
+ "metadata": {
632
+ "id": "Oc8-AHGcHt78"
633
+ },
634
+ "source": [
635
+ "### **📋 Register the Task Environment**\n",
636
+ "\n",
637
+ "Register the CartPole task with mjlab registry."
638
+ ]
639
+ },
640
+ {
641
+ "cell_type": "code",
642
+ "execution_count": null,
643
+ "metadata": {
644
+ "id": "YitUGUBRHxD4"
645
+ },
646
+ "outputs": [],
647
+ "source": [
648
+ "%%writefile /content/mjlab/src/mjlab/tasks/cartpole/__init__.py\n",
649
+ "\"\"\"CartPole task registration.\"\"\"\n",
650
+ "\n",
651
+ "from mjlab.tasks.registry import register_mjlab_task\n",
652
+ "from mjlab.rl.runner import MjlabOnPolicyRunner\n",
653
+ "\n",
654
+ "from .env_cfg import cartpole_env_cfg\n",
655
+ "from .rl_cfg import cartpole_ppo_runner_cfg\n",
656
+ "\n",
657
+ "register_mjlab_task(\n",
658
+ " task_id=\"Mjlab-Cartpole\",\n",
659
+ " env_cfg=cartpole_env_cfg(),\n",
660
+ " play_env_cfg=cartpole_env_cfg(play=True),\n",
661
+ " rl_cfg=cartpole_ppo_runner_cfg(),\n",
662
+ " runner_cls=MjlabOnPolicyRunner,\n",
663
+ ")"
664
+ ]
665
+ },
666
+ {
667
+ "cell_type": "markdown",
668
+ "metadata": {
669
+ "id": "K7wqLZR1rnGn"
670
+ },
671
+ "source": [
672
+ "---\n",
673
+ "\n",
674
+ "## **🚀 Step 3: Train the Agent**\n",
675
+ "\n",
676
+ "Now let's train a PPO policy to balance the CartPole!"
677
+ ]
678
+ },
679
+ {
680
+ "cell_type": "code",
681
+ "execution_count": null,
682
+ "metadata": {
683
+ "id": "Hht_hF4trqP2"
684
+ },
685
+ "outputs": [],
686
+ "source": [
687
+ "!python -m mjlab.scripts.train Mjlab-Cartpole --agent.max-iterations 201 --agent.save-interval 20"
688
+ ]
689
+ },
690
+ {
691
+ "cell_type": "markdown",
692
+ "metadata": {
693
+ "id": "xCaqPznGrx8H"
694
+ },
695
+ "source": [
696
+ "### **📁 Locate Training Checkpoints**\n",
697
+ "\n",
698
+ "After training, checkpoints are saved locally."
699
+ ]
700
+ },
701
+ {
702
+ "cell_type": "code",
703
+ "execution_count": null,
704
+ "metadata": {
705
+ "id": "uPnmHYu8r0uY"
706
+ },
707
+ "outputs": [],
708
+ "source": [
709
+ "import os\n",
710
+ "import re\n",
711
+ "from pathlib import Path\n",
712
+ "\n",
713
+ "# Find the most recent training run\n",
714
+ "log_dir = Path(\"/content/mjlab/logs/rsl_rl/cartpole\")\n",
715
+ "if log_dir.exists():\n",
716
+ " runs = sorted(log_dir.glob(\"*\"), key=os.path.getmtime, reverse=True)\n",
717
+ " if runs:\n",
718
+ " latest_run = runs[0]\n",
719
+ " print(f\"✓ Latest training run: {latest_run.name}\\n\")\n",
720
+ "\n",
721
+ " # List checkpoints - sorted by iteration number\n",
722
+ " checkpoints = list(latest_run.glob(\"model_*.pt\"))\n",
723
+ " if checkpoints:\n",
724
+ " # Extract iteration number and sort numerically\n",
725
+ " def get_iteration(ckpt):\n",
726
+ " match = re.search(r\"model_(\\d+)\\.pt\", ckpt.name)\n",
727
+ " return int(match.group(1)) if match else 0\n",
728
+ "\n",
729
+ " checkpoints = sorted(checkpoints, key=get_iteration)\n",
730
+ "\n",
731
+ " print(f\"Found {len(checkpoints)} checkpoints:\")\n",
732
+ " for ckpt in checkpoints[-5:]: # Show last 5\n",
733
+ " size_mb = ckpt.stat().st_size / (1024 * 1024)\n",
734
+ " iteration = get_iteration(ckpt)\n",
735
+ " print(f\" • {ckpt.name} (iteration {iteration}, {size_mb:.2f} MB)\")\n",
736
+ "\n",
737
+ " # Store the last checkpoint path\n",
738
+ " last_checkpoint = str(checkpoints[-1])\n",
739
+ " print(f\"\\n💾 Last checkpoint: {last_checkpoint}\")\n",
740
+ " else:\n",
741
+ " print(\"⚠ No checkpoints found yet\")\n",
742
+ " else:\n",
743
+ " print(\"⚠ No training runs found\")\n",
744
+ "else:\n",
745
+ " print(\"⚠ Log directory not found. Have you run training yet?\")"
746
+ ]
747
+ },
748
+ {
749
+ "cell_type": "markdown",
750
+ "metadata": {
751
+ "id": "eWFS9Pw7r2uH"
752
+ },
753
+ "source": [
754
+ "---\n",
755
+ "\n",
756
+ "## **🎮 Step 4: Visualize the Trained Policy**\n",
757
+ "\n",
758
+ "Let's see the trained policy in action!"
759
+ ]
760
+ },
761
+ {
762
+ "cell_type": "markdown",
763
+ "metadata": {
764
+ "id": "78PgHtpfr5sb"
765
+ },
766
+ "source": [
767
+ "### **🌐 Launch Viser API**"
768
+ ]
769
+ },
770
+ {
771
+ "cell_type": "code",
772
+ "execution_count": null,
773
+ "metadata": {
774
+ "id": "_9tGiFyBr2bW"
775
+ },
776
+ "outputs": [],
777
+ "source": [
778
+ "import subprocess\n",
779
+ "import sys\n",
780
+ "\n",
781
+ "process = subprocess.Popen(\n",
782
+ " [\n",
783
+ " \"python\",\n",
784
+ " \"-m\",\n",
785
+ " \"mjlab.scripts.play\",\n",
786
+ " \"Mjlab-Cartpole\",\n",
787
+ " \"--checkpoint_file\",\n",
788
+ " last_checkpoint,\n",
789
+ " \"--num_envs\",\n",
790
+ " \"4\",\n",
791
+ " ],\n",
792
+ " stdout=subprocess.PIPE,\n",
793
+ " stderr=subprocess.STDOUT,\n",
794
+ " universal_newlines=True,\n",
795
+ " bufsize=1,\n",
796
+ ")\n",
797
+ "\n",
798
+ "detected_port = None\n",
799
+ "\n",
800
+ "for line in process.stdout:\n",
801
+ " print(line, end=\"\")\n",
802
+ " sys.stdout.flush()\n",
803
+ "\n",
804
+ " # Extract port number from viser output\n",
805
+ " port_match = re.search(r\":(\\d{4})\", line)\n",
806
+ " if port_match and \"viser\" in line.lower():\n",
807
+ " detected_port = int(port_match.group(1))\n",
808
+ " print(\"\\n\" + \"=\" * 34)\n",
809
+ " print(f\"✅ Server is running on port {detected_port}!\")\n",
810
+ " print(\"=\" * 34)\n",
811
+ " break"
812
+ ]
813
+ },
814
+ {
815
+ "cell_type": "markdown",
816
+ "metadata": {
817
+ "id": "XgzJXyBXssZS"
818
+ },
819
+ "source": [
820
+ "### **🖥️ Embed Client as iframe**"
821
+ ]
822
+ },
823
+ {
824
+ "cell_type": "code",
825
+ "execution_count": null,
826
+ "metadata": {
827
+ "id": "ll89QnuSuUxx"
828
+ },
829
+ "outputs": [],
830
+ "source": [
831
+ "from google.colab import output\n",
832
+ "\n",
833
+ "port = detected_port if detected_port else 8081\n",
834
+ "output.serve_kernel_port_as_iframe(port=port, height=700)"
835
+ ]
836
+ }
837
+ ],
838
+ "metadata": {
839
+ "accelerator": "GPU",
840
+ "colab": {
841
+ "gpuType": "T4",
842
+ "include_colab_link": true,
843
+ "provenance": [],
844
+ "toc_visible": true
845
+ },
846
+ "kernelspec": {
847
+ "display_name": "Python 3",
848
+ "name": "python3"
849
+ },
850
+ "language_info": {
851
+ "name": "python"
852
+ }
853
+ },
854
+ "nbformat": 4,
855
+ "nbformat_minor": 0
856
+ }
mjlab/notebooks/demo.ipynb ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "## Use W&B offline"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": null,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "!wandb offline"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "markdown",
21
+ "metadata": {},
22
+ "source": [
23
+ "## **Or** login using an API from your W&B account"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": null,
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "!wandb login"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "markdown",
37
+ "metadata": {},
38
+ "source": [
39
+ "## Now you are set to run the demo!"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": null,
45
+ "metadata": {
46
+ "colab": {
47
+ "base_uri": "https://localhost:8080/"
48
+ },
49
+ "id": "AqSj5JwRz_Lw",
50
+ "outputId": "4c299831-6279-430e-ca8e-f2a58e8f4720"
51
+ },
52
+ "outputs": [],
53
+ "source": "import subprocess\nimport sys\n\nprocess = subprocess.Popen(\n [\n \"uvx\",\n \"--refresh\",\n \"--from\",\n \"mjlab==1.1.0\",\n \"demo\",\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n bufsize=1,\n)\n\nfor line in process.stdout:\n print(line, end=\"\")\n sys.stdout.flush()\n\n if \"serving\" in line.lower() or \"running on\" in line.lower() or \"8081\" in line:\n print(\"\\n\" + \"=\" * 50)\n print(\"✅ Server is running! Execute the next cell to view.\")\n print(\"=\" * 50)\n break"
54
+ },
55
+ {
56
+ "cell_type": "code",
57
+ "execution_count": null,
58
+ "metadata": {
59
+ "colab": {
60
+ "base_uri": "https://localhost:8080/",
61
+ "height": 421
62
+ },
63
+ "id": "i7xycbkt1MFB",
64
+ "outputId": "7fae85cc-1678-4f59-f700-688fbdc6fb84"
65
+ },
66
+ "outputs": [],
67
+ "source": [
68
+ "from google.colab import output\n",
69
+ "\n",
70
+ "output.serve_kernel_port_as_iframe(8081)"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "code",
75
+ "execution_count": null,
76
+ "metadata": {
77
+ "id": "1IhDzu_i1uQt"
78
+ },
79
+ "outputs": [],
80
+ "source": []
81
+ }
82
+ ],
83
+ "metadata": {
84
+ "accelerator": "GPU",
85
+ "colab": {
86
+ "gpuType": "T4",
87
+ "provenance": []
88
+ },
89
+ "kernelspec": {
90
+ "display_name": "Python 3",
91
+ "name": "python3"
92
+ },
93
+ "language_info": {
94
+ "name": "python"
95
+ }
96
+ },
97
+ "nbformat": 4,
98
+ "nbformat_minor": 0
99
+ }
mjlab/scripts/fix_mjpython_macos.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Fix mjpython on macOS by creating a symlink to libpython.
3
+ # This is needed because mjpython expects libpython in .venv/lib/
4
+
5
+ set -e
6
+
7
+ VENV_DIR=".venv"
8
+
9
+ if [[ "$(uname)" != "Darwin" ]]; then
10
+ echo "This script is only needed on macOS."
11
+ exit 0
12
+ fi
13
+
14
+ if [[ ! -d "$VENV_DIR" ]]; then
15
+ echo "Error: .venv directory not found. Run 'uv sync' first."
16
+ exit 1
17
+ fi
18
+
19
+ # Get Python version and prefix from the venv.
20
+ PYTHON_VERSION=$("$VENV_DIR/bin/python" -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')")
21
+ PYTHON_PREFIX=$("$VENV_DIR/bin/python" -c "import sys; print(sys.base_prefix)")
22
+ DYLIB_NAME="libpython${PYTHON_VERSION}.dylib"
23
+
24
+ # Find the dylib in the Python installation.
25
+ DYLIB_PATH="$PYTHON_PREFIX/lib/$DYLIB_NAME"
26
+
27
+ if [[ ! -f "$DYLIB_PATH" ]]; then
28
+ echo "Error: Could not find $DYLIB_PATH"
29
+ exit 1
30
+ fi
31
+
32
+ # Create the symlink.
33
+ mkdir -p "$VENV_DIR/lib"
34
+ ln -sf "$DYLIB_PATH" "$VENV_DIR/lib/$DYLIB_NAME"
35
+
36
+ echo "Created symlink: $VENV_DIR/lib/$DYLIB_NAME -> $DYLIB_PATH"
mjlab/scripts/run_docker.sh ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env sh
2
+ #
3
+ # Injects useful arguments for running mjlab in docker.
4
+ # See docs/source/installation.rst for usage.
5
+ #
6
+ # Patterned after the uv-in-docker example:
7
+ # https://github.com/astral-sh/uv-docker-example/blob/5748835918ec293d547bbe0e42df34e140aca1eb/run.sh
8
+ #
9
+ # Key arguments:
10
+ # --rm Remove the container after exiting
11
+ # --runtime=nvidia Use NVIDIA Container runtime to give GPU access
12
+ # --gpus all Expose all GPUs by default
13
+ # -v .:/app Mount current directory to /app (code changes don't require rebuild)
14
+ # -v /app/.venv Mount venv separately (keeps developer's environment out of container)
15
+ # -p 8080:8080 Publish port 8080 for viewing mjlab web interface on the host
16
+ # -it (conditional) Launch in interactive mode if running in a terminal
17
+ # (Note: if running training, there's a blocking wandb prompt before training begins)
18
+ # docker build Build and launch the image (tag matches the Makefile)
19
+ # "$@" Forward all arguments to the docker image
20
+
21
+
22
+ if [ -t 1 ]; then
23
+ INTERACTIVE="-it"
24
+ else
25
+ INTERACTIVE=""
26
+ fi
27
+
28
+ docker run \
29
+ --rm \
30
+ --runtime=nvidia \
31
+ --gpus all \
32
+ --volume .:/app \
33
+ --volume /app/.venv \
34
+ --publish 8080:8080 \
35
+ $INTERACTIVE \
36
+ $(docker build -qt mjlab .) \
37
+ "$@"
mjlab/tests/conftest.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Shared test fixtures and utilities."""
2
+
3
+ import os
4
+ from pathlib import Path
5
+
6
+ import mujoco
7
+ import pytest
8
+ import torch
9
+ import warp as wp
10
+
11
+ from mjlab.entity import Entity, EntityArticulationInfoCfg, EntityCfg
12
+ from mjlab.sim.sim import Simulation, SimulationCfg
13
+
14
+
15
+ @pytest.fixture(scope="session", autouse=True)
16
+ def configure_test_environment():
17
+ """Configure test environment settings automatically for all tests."""
18
+ wp.config.quiet = True
19
+
20
+
21
+ def get_test_device() -> str:
22
+ """Get device for testing, preferring CUDA if available.
23
+
24
+ Can be overridden with FORCE_CPU=1 environment variable to test
25
+ CPU-only behavior on GPU machines.
26
+ """
27
+ if os.environ.get("FORCE_CPU") == "1":
28
+ return "cpu"
29
+ return "cuda" if torch.cuda.is_available() else "cpu"
30
+
31
+
32
+ @pytest.fixture
33
+ def fixtures_dir() -> Path:
34
+ """Path to test fixtures directory."""
35
+ return Path(__file__).parent / "fixtures"
36
+
37
+
38
+ def load_fixture_xml(fixture_name: str) -> str:
39
+ """Load XML content from fixture file.
40
+
41
+ Args:
42
+ fixture_name: Name of the fixture file (without .xml extension) or full path.
43
+
44
+ Returns:
45
+ XML content as string.
46
+ """
47
+ fixtures_path = Path(__file__).parent / "fixtures"
48
+ if not fixture_name.endswith(".xml"):
49
+ fixture_name = f"{fixture_name}.xml"
50
+ fixture_file = fixtures_path / fixture_name
51
+ return fixture_file.read_text()
52
+
53
+
54
+ def create_entity_with_actuator(xml_string: str, actuator_cfg):
55
+ """Create entity with actuator from XML string.
56
+
57
+ Args:
58
+ xml_string: MuJoCo XML model string.
59
+ actuator_cfg: Actuator configuration.
60
+
61
+ Returns:
62
+ Entity instance.
63
+ """
64
+ cfg = EntityCfg(
65
+ spec_fn=lambda: mujoco.MjSpec.from_string(xml_string),
66
+ articulation=EntityArticulationInfoCfg(actuators=(actuator_cfg,)),
67
+ )
68
+ return Entity(cfg)
69
+
70
+
71
+ def create_entity_from_fixture(fixture_name: str, actuator_cfg=None):
72
+ """Create entity from fixture file.
73
+
74
+ Args:
75
+ fixture_name: Name of the fixture file (without .xml extension).
76
+ actuator_cfg: Optional actuator configuration.
77
+
78
+ Returns:
79
+ Entity instance.
80
+ """
81
+ xml_string = load_fixture_xml(fixture_name)
82
+ if actuator_cfg is not None:
83
+ return create_entity_with_actuator(xml_string, actuator_cfg)
84
+ cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(xml_string))
85
+ return Entity(cfg)
86
+
87
+
88
+ def initialize_entity(entity: Entity, device: str, num_envs: int = 1):
89
+ """Initialize entity with simulation.
90
+
91
+ Args:
92
+ entity: Entity to initialize.
93
+ device: Device to use ("cpu" or "cuda").
94
+ num_envs: Number of environments.
95
+
96
+ Returns:
97
+ Tuple of (entity, simulation).
98
+ """
99
+ model = entity.compile()
100
+ sim_cfg = SimulationCfg()
101
+ sim = Simulation(num_envs=num_envs, cfg=sim_cfg, model=model, device=device)
102
+ entity.initialize(model, sim.model, sim.data, device)
103
+ return entity, sim
104
+
105
+
106
+ # =============================================================================
107
+ # XML Fixture Loaders
108
+ # =============================================================================
109
+
110
+
111
+ @pytest.fixture
112
+ def fixed_base_box_xml() -> str:
113
+ """Load fixed base box XML fixture."""
114
+ return load_fixture_xml("fixed_base_box")
115
+
116
+
117
+ @pytest.fixture
118
+ def floating_base_box_xml() -> str:
119
+ """Load floating base box XML fixture."""
120
+ return load_fixture_xml("floating_base_box")
121
+
122
+
123
+ @pytest.fixture
124
+ def fixed_base_articulated_xml() -> str:
125
+ """Load fixed base articulated robot XML fixture."""
126
+ return load_fixture_xml("fixed_base_articulated")
127
+
128
+
129
+ @pytest.fixture
130
+ def floating_base_articulated_xml() -> str:
131
+ """Load floating base articulated robot XML fixture."""
132
+ return load_fixture_xml("floating_base_articulated")
133
+
134
+
135
+ @pytest.fixture
136
+ def biped_xml() -> str:
137
+ """Load biped robot XML fixture with ground plane."""
138
+ return load_fixture_xml("biped")
139
+
140
+
141
+ @pytest.fixture
142
+ def robot_with_floor_xml() -> str:
143
+ """XML for a floating body above a ground plane."""
144
+ return """
145
+ <mujoco>
146
+ <worldbody>
147
+ <geom name="floor" type="plane" size="10 10 0.1" pos="0 0 0"/>
148
+ <body name="base" pos="0 0 2">
149
+ <freejoint name="free_joint"/>
150
+ <geom name="base_geom" type="box" size="0.2 0.2 0.1" mass="5.0"/>
151
+ <site name="base_site" pos="0 0 -0.1"/>
152
+ </body>
153
+ </worldbody>
154
+ </mujoco>
155
+ """
156
+
157
+
158
+ @pytest.fixture
159
+ def falling_box_xml() -> str:
160
+ """XML for a box that can fall onto a ground plane."""
161
+ return """
162
+ <mujoco>
163
+ <worldbody>
164
+ <body name="ground" pos="0 0 0">
165
+ <geom name="ground_geom" type="plane" size="5 5 0.1" rgba="0.5 0.5 0.5 1"/>
166
+ </body>
167
+ <body name="box" pos="0 0 0.5">
168
+ <freejoint name="box_joint"/>
169
+ <geom name="box_geom" type="box" size="0.1 0.1 0.1" rgba="0.8 0.3 0.3 1"
170
+ mass="1.0"/>
171
+ </body>
172
+ </worldbody>
173
+ </mujoco>
174
+ """
mjlab/tests/smoke_test.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Smoke test for mjlab package."""
2
+
3
+ import io
4
+ import sys
5
+ import warnings
6
+ from contextlib import redirect_stderr, redirect_stdout
7
+
8
+ try:
9
+ import pytest
10
+ except ModuleNotFoundError:
11
+ pytest = None # type: ignore[assignment]
12
+
13
+
14
+ @pytest.mark.slow if pytest else lambda f: f
15
+ def test_basic_functionality() -> None:
16
+ """Test that mjlab can create and close an environment."""
17
+ from mjlab.envs.manager_based_rl_env import ManagerBasedRlEnv
18
+ from mjlab.tasks.velocity.config.go1.env_cfgs import unitree_go1_flat_env_cfg
19
+
20
+ # Suppress env spam.
21
+ with warnings.catch_warnings():
22
+ warnings.simplefilter("ignore")
23
+ with redirect_stdout(io.StringIO()), redirect_stderr(io.StringIO()):
24
+ env = ManagerBasedRlEnv(unitree_go1_flat_env_cfg(), device="cpu")
25
+ assert env.sim.data.time == 0.0
26
+ env.close()
27
+
28
+
29
+ if __name__ == "__main__":
30
+ try:
31
+ test_basic_functionality()
32
+ print("✓ Smoke test passed!")
33
+ sys.exit(0)
34
+ except Exception as e:
35
+ print(f"✗ Smoke test failed: {e}")
36
+ sys.exit(1)
mjlab/tests/test_action_manager.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for action manager functionality."""
2
+
3
+ from unittest.mock import Mock
4
+
5
+ import pytest
6
+ import torch
7
+ from conftest import get_test_device
8
+
9
+ from mjlab.managers.action_manager import ActionManager
10
+
11
+
12
+ @pytest.fixture
13
+ def device():
14
+ """Test device fixture."""
15
+ return get_test_device()
16
+
17
+
18
+ def _make_mock_action_term(action_dim: int):
19
+ """Create a mock action term factory."""
20
+
21
+ def factory(env):
22
+ term = Mock()
23
+ term.action_dim = action_dim
24
+ term.raw_action = torch.zeros(env.num_envs, action_dim, device=env.device)
25
+ term.process_actions = Mock()
26
+ term.apply_actions = Mock()
27
+ term.reset = Mock()
28
+ return term
29
+
30
+ return factory
31
+
32
+
33
+ @pytest.fixture
34
+ def mock_env(device):
35
+ """Create a mock environment for testing."""
36
+ env = Mock()
37
+ env.num_envs = 4
38
+ env.device = device
39
+ return env
40
+
41
+
42
+ @pytest.fixture
43
+ def action_term_cfg():
44
+ """Create a simple action term config."""
45
+ cfg = Mock()
46
+ cfg.build = _make_mock_action_term(action_dim=3)
47
+ cfg.entity_name = "robot"
48
+ return cfg
49
+
50
+
51
+ def test_action_history_tracking(mock_env, action_term_cfg, device):
52
+ """Test that action, prev_action, and prev_prev_action track history correctly."""
53
+ manager = ActionManager({"action": action_term_cfg}, mock_env)
54
+
55
+ # Initial state: all zeros.
56
+ assert torch.all(manager.action == 0.0)
57
+ assert torch.all(manager.prev_action == 0.0)
58
+ assert torch.all(manager.prev_prev_action == 0.0)
59
+
60
+ # Process actions and verify history shifts correctly.
61
+ actions = [
62
+ torch.tensor([[float(i)] * 3] * mock_env.num_envs, device=device)
63
+ for i in range(1, 5)
64
+ ]
65
+
66
+ manager.process_action(actions[0])
67
+ assert torch.allclose(manager.action, actions[0])
68
+ assert torch.all(manager.prev_action == 0.0)
69
+ assert torch.all(manager.prev_prev_action == 0.0)
70
+
71
+ manager.process_action(actions[1])
72
+ assert torch.allclose(manager.action, actions[1])
73
+ assert torch.allclose(manager.prev_action, actions[0])
74
+ assert torch.all(manager.prev_prev_action == 0.0)
75
+
76
+ manager.process_action(actions[2])
77
+ assert torch.allclose(manager.action, actions[2])
78
+ assert torch.allclose(manager.prev_action, actions[1])
79
+ assert torch.allclose(manager.prev_prev_action, actions[0])
80
+
81
+ manager.process_action(actions[3])
82
+ assert torch.allclose(manager.action, actions[3])
83
+ assert torch.allclose(manager.prev_action, actions[2])
84
+ assert torch.allclose(manager.prev_prev_action, actions[1])
85
+
86
+
87
+ def test_action_history_reset(mock_env, action_term_cfg, device):
88
+ """Test that reset clears action history for all or specific environments."""
89
+ manager = ActionManager({"action": action_term_cfg}, mock_env)
90
+
91
+ # Populate history.
92
+ actions = [
93
+ torch.tensor([[float(i)] * 3] * mock_env.num_envs, device=device)
94
+ for i in range(1, 4)
95
+ ]
96
+ for a in actions:
97
+ manager.process_action(a)
98
+
99
+ # Partial reset: only envs 0 and 2.
100
+ manager.reset(env_ids=torch.tensor([0, 2]))
101
+
102
+ # Reset envs should be zeros.
103
+ for env_id in [0, 2]:
104
+ assert torch.all(manager.action[env_id] == 0.0)
105
+ assert torch.all(manager.prev_action[env_id] == 0.0)
106
+ assert torch.all(manager.prev_prev_action[env_id] == 0.0)
107
+
108
+ # Non-reset envs should retain history.
109
+ for env_id in [1, 3]:
110
+ assert torch.allclose(manager.action[env_id], actions[2][env_id])
111
+ assert torch.allclose(manager.prev_action[env_id], actions[1][env_id])
112
+ assert torch.allclose(manager.prev_prev_action[env_id], actions[0][env_id])
113
+
114
+ # Full reset.
115
+ manager.reset()
116
+ assert torch.all(manager.action == 0.0)
117
+ assert torch.all(manager.prev_action == 0.0)
118
+ assert torch.all(manager.prev_prev_action == 0.0)
mjlab/tests/test_actions.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for actions."""
2
+
3
+ from pathlib import Path
4
+ from unittest.mock import Mock
5
+
6
+ import mujoco
7
+ import pytest
8
+ import torch
9
+ from conftest import get_test_device, load_fixture_xml
10
+
11
+ from mjlab.actuator.actuator import TransmissionType
12
+ from mjlab.actuator.builtin_actuator import BuiltinMotorActuatorCfg
13
+ from mjlab.entity import Entity, EntityArticulationInfoCfg, EntityCfg
14
+ from mjlab.envs import ManagerBasedRlEnv
15
+ from mjlab.envs.mdp.actions import (
16
+ JointPositionActionCfg,
17
+ SiteEffortActionCfg,
18
+ TendonEffortActionCfg,
19
+ TendonLengthActionCfg,
20
+ TendonVelocityActionCfg,
21
+ )
22
+ from mjlab.sim.sim import Simulation, SimulationCfg
23
+
24
+
25
+ @pytest.fixture
26
+ def device():
27
+ return get_test_device()
28
+
29
+
30
+ @pytest.fixture
31
+ def fixtures_dir():
32
+ return Path(__file__).parent / "fixtures"
33
+
34
+
35
+ def make_entity(xml_or_path, target_expr, transmission_type, device, from_file=False):
36
+ """Create and initialize entity."""
37
+
38
+ def spec_fn():
39
+ if from_file:
40
+ return mujoco.MjSpec.from_file(str(xml_or_path))
41
+ return mujoco.MjSpec.from_string(xml_or_path)
42
+
43
+ cfg = EntityCfg(
44
+ spec_fn=spec_fn,
45
+ articulation=EntityArticulationInfoCfg(
46
+ actuators=(
47
+ BuiltinMotorActuatorCfg(
48
+ target_names_expr=target_expr,
49
+ transmission_type=transmission_type,
50
+ effort_limit=10.0,
51
+ ),
52
+ )
53
+ ),
54
+ )
55
+ entity = Entity(cfg)
56
+ model = entity.compile()
57
+ sim = Simulation(num_envs=4, cfg=SimulationCfg(), model=model, device=device)
58
+ entity.initialize(model, sim.model, sim.data, device)
59
+ return entity
60
+
61
+
62
+ def make_env(entity, name, device):
63
+ """Create mock environment."""
64
+ env = Mock(spec=ManagerBasedRlEnv)
65
+ env.num_envs = 4
66
+ env.device = device
67
+ env.scene = {name: entity}
68
+ return env
69
+
70
+
71
+ def test_base_action_applies_scale_and_offset(fixtures_dir, device):
72
+ """BaseAction: processed = raw * scale + offset."""
73
+ entity = make_entity(
74
+ fixtures_dir / "tendon_finger.xml",
75
+ ("finger_tendon",),
76
+ TransmissionType.TENDON,
77
+ device,
78
+ from_file=True,
79
+ )
80
+ env = make_env(entity, "finger", device)
81
+
82
+ cfg = TendonLengthActionCfg(
83
+ entity_name="finger",
84
+ actuator_names=("finger_tendon",),
85
+ scale=2.0,
86
+ offset=0.5,
87
+ )
88
+ action = cfg.build(env)
89
+
90
+ raw = torch.tensor([[1.0], [2.0], [3.0], [4.0]], device=device)
91
+ action.process_actions(raw)
92
+
93
+ assert torch.allclose(action._processed_actions, raw * 2.0 + 0.5)
94
+
95
+
96
+ def test_base_action_reset_zeros_specific_envs(fixtures_dir, device):
97
+ """BaseAction.reset() zeros raw_action for specified env_ids only."""
98
+ entity = make_entity(
99
+ fixtures_dir / "tendon_finger.xml",
100
+ ("finger_tendon",),
101
+ TransmissionType.TENDON,
102
+ device,
103
+ from_file=True,
104
+ )
105
+ env = make_env(entity, "finger", device)
106
+
107
+ cfg = TendonLengthActionCfg(entity_name="finger", actuator_names=("finger_tendon",))
108
+ action = cfg.build(env)
109
+
110
+ action.process_actions(torch.ones(4, 1, device=device))
111
+ action.reset(env_ids=torch.tensor([0, 2], device=device))
112
+
113
+ assert torch.all(action.raw_action[[0, 2]] == 0.0)
114
+ assert torch.all(action.raw_action[[1, 3]] == 1.0)
115
+
116
+
117
+ @pytest.mark.parametrize(
118
+ "cfg_cls,target_attr,fixture,target_expr,transmission,entity_name",
119
+ [
120
+ # Joints.
121
+ (
122
+ JointPositionActionCfg,
123
+ "joint_pos_target",
124
+ "floating_base_articulated",
125
+ ("joint.*",),
126
+ TransmissionType.JOINT,
127
+ "robot",
128
+ ),
129
+ # Tendons.
130
+ (
131
+ TendonLengthActionCfg,
132
+ "tendon_len_target",
133
+ "tendon_finger.xml",
134
+ ("finger_tendon",),
135
+ TransmissionType.TENDON,
136
+ "finger",
137
+ ),
138
+ (
139
+ TendonVelocityActionCfg,
140
+ "tendon_vel_target",
141
+ "tendon_finger.xml",
142
+ ("finger_tendon",),
143
+ TransmissionType.TENDON,
144
+ "finger",
145
+ ),
146
+ (
147
+ TendonEffortActionCfg,
148
+ "tendon_effort_target",
149
+ "tendon_finger.xml",
150
+ ("finger_tendon",),
151
+ TransmissionType.TENDON,
152
+ "finger",
153
+ ),
154
+ # Sites.
155
+ (
156
+ SiteEffortActionCfg,
157
+ "site_effort_target",
158
+ "quadcopter.xml",
159
+ ("rotor_.*",),
160
+ TransmissionType.SITE,
161
+ "drone",
162
+ ),
163
+ ],
164
+ )
165
+ def test_action_sets_entity_target(
166
+ fixtures_dir,
167
+ device,
168
+ cfg_cls,
169
+ target_attr,
170
+ fixture,
171
+ target_expr,
172
+ transmission,
173
+ entity_name,
174
+ ):
175
+ """Each action type writes to correct entity.data field."""
176
+ if fixture.endswith(".xml"):
177
+ entity = make_entity(
178
+ fixtures_dir / fixture, target_expr, transmission, device, from_file=True
179
+ )
180
+ else:
181
+ entity = make_entity(
182
+ load_fixture_xml(fixture), target_expr, transmission, device, from_file=False
183
+ )
184
+
185
+ env = make_env(entity, entity_name, device)
186
+ cfg = cfg_cls(entity_name=entity_name, actuator_names=target_expr)
187
+ action = cfg.build(env)
188
+
189
+ target = torch.arange(4 * action.action_dim, device=device, dtype=torch.float32)
190
+ target = target.reshape(4, action.action_dim) * 0.1
191
+
192
+ action.process_actions(target)
193
+ action.apply_actions()
194
+
195
+ entity_target = getattr(entity.data, target_attr)
196
+ assert torch.allclose(entity_target, target)
mjlab/tests/test_actuator.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for actuator module."""
2
+
3
+ import pytest
4
+ import torch
5
+ from conftest import (
6
+ create_entity_with_actuator,
7
+ get_test_device,
8
+ initialize_entity,
9
+ load_fixture_xml,
10
+ )
11
+
12
+ from mjlab.actuator import (
13
+ BuiltinPositionActuatorCfg,
14
+ IdealPdActuatorCfg,
15
+ )
16
+
17
+
18
+ @pytest.fixture(scope="module")
19
+ def device():
20
+ return get_test_device()
21
+
22
+
23
+ @pytest.fixture(scope="module")
24
+ def robot_xml():
25
+ return load_fixture_xml("floating_base_articulated")
26
+
27
+
28
+ def test_builtin_pd_actuator_compute(device, robot_xml):
29
+ """BuiltinPositionActuator writes position targets to ctrl."""
30
+ actuator_cfg = BuiltinPositionActuatorCfg(
31
+ target_names_expr=("joint.*",), stiffness=50.0, damping=5.0
32
+ )
33
+ entity = create_entity_with_actuator(robot_xml, actuator_cfg)
34
+ entity, sim = initialize_entity(entity, device)
35
+
36
+ entity.set_joint_position_target(torch.tensor([[0.5, -0.3]], device=device))
37
+ entity.write_data_to_sim()
38
+
39
+ ctrl = sim.data.ctrl[0]
40
+ assert torch.allclose(ctrl, torch.tensor([0.5, -0.3], device=device))
41
+
42
+
43
+ def test_ideal_pd_actuator_compute(device, robot_xml):
44
+ """IdealPdActuator computes torques via explicit PD control."""
45
+ actuator_cfg = IdealPdActuatorCfg(
46
+ target_names_expr=("joint.*",), effort_limit=100.0, stiffness=50.0, damping=5.0
47
+ )
48
+ entity = create_entity_with_actuator(robot_xml, actuator_cfg)
49
+ entity, sim = initialize_entity(entity, device)
50
+
51
+ entity.write_joint_state_to_sim(
52
+ position=torch.tensor([[0.0, 0.0]], device=device),
53
+ velocity=torch.tensor([[0.0, 0.0]], device=device),
54
+ )
55
+
56
+ entity.set_joint_position_target(torch.tensor([[0.1, -0.1]], device=device))
57
+ entity.set_joint_velocity_target(torch.tensor([[0.0, 0.0]], device=device))
58
+ entity.set_joint_effort_target(torch.tensor([[0.0, 0.0]], device=device))
59
+ entity.write_data_to_sim()
60
+
61
+ ctrl = sim.data.ctrl[0]
62
+ assert torch.allclose(ctrl, torch.tensor([5.0, -5.0], device=device))
63
+
64
+
65
+ def test_targets_cleared_on_reset(device, robot_xml):
66
+ """Entity.reset() zeros all targets."""
67
+ actuator_cfg = BuiltinPositionActuatorCfg(
68
+ target_names_expr=("joint.*",), stiffness=50.0, damping=5.0
69
+ )
70
+ entity = create_entity_with_actuator(robot_xml, actuator_cfg)
71
+ entity, sim = initialize_entity(entity, device)
72
+
73
+ entity.set_joint_position_target(torch.tensor([[0.5, -0.3]], device=device))
74
+ entity.write_data_to_sim()
75
+
76
+ assert not torch.allclose(
77
+ entity.data.joint_pos_target, torch.zeros(1, 2, device=device)
78
+ )
79
+
80
+ entity.reset()
81
+
82
+ assert torch.allclose(entity.data.joint_pos_target, torch.zeros(1, 2, device=device))
83
+ assert torch.allclose(entity.data.joint_vel_target, torch.zeros(1, 2, device=device))
84
+ assert torch.allclose(
85
+ entity.data.joint_effort_target, torch.zeros(1, 2, device=device)
86
+ )
mjlab/tests/test_actuator_builtin_group.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for BuiltinActuatorGroup."""
2
+
3
+ import mujoco
4
+ import pytest
5
+ import torch
6
+ from conftest import get_test_device, load_fixture_xml
7
+
8
+ from mjlab.actuator import (
9
+ BuiltinMotorActuatorCfg,
10
+ BuiltinPositionActuatorCfg,
11
+ IdealPdActuatorCfg,
12
+ )
13
+ from mjlab.entity import Entity, EntityArticulationInfoCfg, EntityCfg
14
+ from mjlab.sim.sim import Simulation, SimulationCfg
15
+
16
+ ROBOT_XML = load_fixture_xml("floating_base_articulated")
17
+
18
+ ROBOT_XML_3JOINT = """
19
+ <mujoco>
20
+ <worldbody>
21
+ <body name="base" pos="0 0 1">
22
+ <freejoint name="free_joint"/>
23
+ <geom name="base_geom" type="box" size="0.2 0.2 0.1" mass="1.0"/>
24
+ <body name="link1" pos="0 0 0">
25
+ <joint name="joint1" type="hinge" axis="0 0 1" range="-1.57 1.57"/>
26
+ <geom name="link1_geom" type="box" size="0.1 0.1 0.1" mass="0.1"/>
27
+ </body>
28
+ <body name="link2" pos="0 0 0">
29
+ <joint name="joint2" type="hinge" axis="0 0 1" range="-1.57 1.57"/>
30
+ <geom name="link2_geom" type="box" size="0.1 0.1 0.1" mass="0.1"/>
31
+ </body>
32
+ <body name="link3" pos="0 0 0">
33
+ <joint name="joint3" type="hinge" axis="0 0 1" range="-1.57 1.57"/>
34
+ <geom name="link3_geom" type="box" size="0.1 0.1 0.1" mass="0.1"/>
35
+ </body>
36
+ </body>
37
+ </worldbody>
38
+ </mujoco>
39
+ """
40
+
41
+
42
+ @pytest.fixture(scope="module")
43
+ def device():
44
+ return get_test_device()
45
+
46
+
47
+ def create_entity(actuator_cfgs, robot_xml=ROBOT_XML):
48
+ cfg = EntityCfg(
49
+ spec_fn=lambda: mujoco.MjSpec.from_string(robot_xml),
50
+ articulation=EntityArticulationInfoCfg(actuators=actuator_cfgs),
51
+ )
52
+ return Entity(cfg)
53
+
54
+
55
+ def initialize_entity(entity, device, num_envs=1):
56
+ model = entity.compile()
57
+ sim_cfg = SimulationCfg()
58
+ sim = Simulation(num_envs=num_envs, cfg=sim_cfg, model=model, device=device)
59
+ entity.initialize(model, sim.model, sim.data, device)
60
+ return entity, sim
61
+
62
+
63
+ def test_position_actuator_batched(device):
64
+ """BuiltinPositionActuator writes position targets via batched path."""
65
+ actuator_cfg = BuiltinPositionActuatorCfg(
66
+ target_names_expr=("joint.*",), stiffness=50.0, damping=5.0
67
+ )
68
+ entity = create_entity((actuator_cfg,))
69
+ entity, sim = initialize_entity(entity, device)
70
+
71
+ entity.set_joint_position_target(torch.tensor([[0.5, -0.3]], device=device))
72
+ entity.write_data_to_sim()
73
+
74
+ ctrl = sim.data.ctrl[0]
75
+ assert torch.allclose(ctrl, torch.tensor([0.5, -0.3], device=device))
76
+
77
+
78
+ def test_motor_actuator_batched(device):
79
+ """BuiltinMotorActuator writes effort targets via batched path."""
80
+ actuator_cfg = BuiltinMotorActuatorCfg(
81
+ target_names_expr=("joint.*",), effort_limit=100.0
82
+ )
83
+ entity = create_entity((actuator_cfg,))
84
+ entity, sim = initialize_entity(entity, device)
85
+
86
+ entity.set_joint_effort_target(torch.tensor([[10.0, -5.0]], device=device))
87
+ entity.write_data_to_sim()
88
+
89
+ ctrl = sim.data.ctrl[0]
90
+ assert torch.allclose(ctrl, torch.tensor([10.0, -5.0], device=device))
91
+
92
+
93
+ def test_mixed_builtin_actuators(device):
94
+ """Multiple builtin actuator types can coexist and all use batched path."""
95
+ position_cfg = BuiltinPositionActuatorCfg(
96
+ target_names_expr=("joint1",), stiffness=50.0, damping=5.0
97
+ )
98
+ motor_cfg = BuiltinMotorActuatorCfg(target_names_expr=("joint2",), effort_limit=100.0)
99
+ entity = create_entity((position_cfg, motor_cfg))
100
+ entity, sim = initialize_entity(entity, device)
101
+
102
+ entity.set_joint_position_target(torch.tensor([[0.5, 0.0]], device=device))
103
+ entity.set_joint_effort_target(torch.tensor([[0.0, -3.0]], device=device))
104
+ entity.write_data_to_sim()
105
+
106
+ ctrl = sim.data.ctrl[0]
107
+ assert torch.allclose(ctrl, torch.tensor([0.5, -3.0], device=device))
108
+
109
+
110
+ def test_builtin_and_custom_actuators(device):
111
+ """Builtin actuators use batched path, custom actuators use compute()."""
112
+ builtin_cfg = BuiltinPositionActuatorCfg(
113
+ target_names_expr=("joint1",), stiffness=50.0, damping=5.0
114
+ )
115
+ custom_cfg = IdealPdActuatorCfg(
116
+ target_names_expr=("joint2",), effort_limit=100.0, stiffness=50.0, damping=5.0
117
+ )
118
+ entity = create_entity((builtin_cfg, custom_cfg))
119
+ entity, sim = initialize_entity(entity, device)
120
+
121
+ entity.write_joint_state_to_sim(
122
+ position=torch.tensor([[0.0, 0.0]], device=device),
123
+ velocity=torch.tensor([[0.0, 0.0]], device=device),
124
+ )
125
+
126
+ entity.set_joint_position_target(torch.tensor([[0.5, 0.2]], device=device))
127
+ entity.set_joint_velocity_target(torch.tensor([[0.0, 0.0]], device=device))
128
+ entity.set_joint_effort_target(torch.tensor([[0.0, 0.0]], device=device))
129
+ entity.write_data_to_sim()
130
+
131
+ ctrl = sim.data.ctrl[0]
132
+ # joint1: builtin position -> ctrl = 0.5
133
+ # joint2: ideal pd -> ctrl = kp * (0.2 - 0.0) = 50.0 * 0.2 = 10.0
134
+ assert torch.allclose(ctrl, torch.tensor([0.5, 10.0], device=device))
135
+
136
+
137
+ def test_builtin_group_mismatched_indices(device):
138
+ """Test that controls are written correctly when actuators use different joints.
139
+
140
+ Regression test for ctrl_ids/joint_ids swap bug in BuiltinActuatorGroup.
141
+ When actuators are added in different order than joints, ctrl_ids != joint_ids.
142
+ This test verifies controls are written to the correct MuJoCo actuator indices.
143
+ """
144
+ # Add actuators in different order than joints.
145
+ # Actuators: position on joint2, motor on joint1+joint3.
146
+ # Natural joint order: joint1, joint2, joint3.
147
+ # MuJoCo actuator IDs (definition order): act0=joint2, act1=joint1, act2=joint3.
148
+ position_cfg = BuiltinPositionActuatorCfg(
149
+ target_names_expr=("joint2",), stiffness=50.0, damping=5.0
150
+ )
151
+ motor_cfg = BuiltinMotorActuatorCfg(
152
+ target_names_expr=("joint1", "joint3"), effort_limit=100.0
153
+ )
154
+ entity = create_entity((position_cfg, motor_cfg), robot_xml=ROBOT_XML_3JOINT)
155
+ entity, sim = initialize_entity(entity, device, num_envs=1)
156
+
157
+ # Set targets indexed by joint_id: joint1=10.0, joint2=20.0, joint3=30.0
158
+ entity.set_joint_position_target(torch.tensor([[10.0, 20.0, 30.0]], device=device))
159
+ entity.set_joint_effort_target(torch.tensor([[100.0, 200.0, 300.0]], device=device))
160
+ entity.write_data_to_sim()
161
+
162
+ # Expected ctrl values in MuJoCo actuator definition order:
163
+ # ctrl[0] = joint2 position = 20.0 (actuator 0 controls joint2)
164
+ # ctrl[1] = joint1 effort = 100.0 (actuator 1 controls joint1)
165
+ # ctrl[2] = joint3 effort = 300.0 (actuator 2 controls joint3)
166
+ assert torch.allclose(
167
+ sim.data.ctrl[0], torch.tensor([20.0, 100.0, 300.0], device=device)
168
+ ), f"Got {sim.data.ctrl[0]}, expected [20.0, 100.0, 300.0]"