Datasets:

ArXiv:
Wendakang commited on
Commit
3e10edb
·
1 Parent(s): 3b0c0b7

update arcade

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .github/workflows/python-publish.yml +70 -0
  2. .github/workflows/python_app.yaml +33 -0
  3. .gitignore +14 -0
  4. LICENSE +21 -0
  5. README.md +212 -0
  6. analysis.py +113 -0
  7. imgs/autoencode_f.gif +3 -0
  8. imgs/autoencode_p.gif +3 -0
  9. imgs/battleship_f.gif +3 -0
  10. imgs/battleship_p.gif +3 -0
  11. imgs/cartpole_f.gif +3 -0
  12. imgs/cartpole_p.gif +3 -0
  13. imgs/countrecall_f.gif +3 -0
  14. imgs/countrecall_p.gif +3 -0
  15. imgs/fps.png +3 -0
  16. imgs/grads_example.jpg +3 -0
  17. imgs/minesweeper_f.gif +3 -0
  18. imgs/minesweeper_p.gif +3 -0
  19. imgs/navigator_f.gif +3 -0
  20. imgs/navigator_p.gif +3 -0
  21. imgs/ncartpole_f.gif +3 -0
  22. imgs/ncartpole_p.gif +3 -0
  23. imgs/wandb.png +3 -0
  24. plotting/Atari_FPS_test.py +58 -0
  25. plotting/MinAtar_popgym_FPS_test.py +51 -0
  26. plotting/PArcade_gymnax_FPS_test.py +78 -0
  27. plotting/analysis_plot.ipynb +0 -0
  28. plotting/churn.py +202 -0
  29. plotting/fps.sh +46 -0
  30. plotting/generate_analysis_data.py +419 -0
  31. plotting/noiseva.py +449 -0
  32. plotting/plot_fps.py +74 -0
  33. plotting/plot_grads.ipynb +0 -0
  34. plotting/plot_separate_returns_curve.py +336 -0
  35. plotting/plot_twins_returns_curve.py +482 -0
  36. plotting/plottable.py +207 -0
  37. plotting/rlcgrad.py +222 -0
  38. plotting/run_multi_seed_analysis.py +119 -0
  39. popgym_arcade/__init__.py +1 -0
  40. popgym_arcade/baselines/__init__.py +0 -0
  41. popgym_arcade/baselines/model/__init__.py +10 -0
  42. popgym_arcade/baselines/model/builder.py +706 -0
  43. popgym_arcade/baselines/model/memorax/__init__.py +4 -0
  44. popgym_arcade/baselines/model/memorax/gras.py +106 -0
  45. popgym_arcade/baselines/model/memorax/groups.py +158 -0
  46. popgym_arcade/baselines/model/memorax/magmas/__init__.py +5 -0
  47. popgym_arcade/baselines/model/memorax/magmas/elman.py +138 -0
  48. popgym_arcade/baselines/model/memorax/magmas/gru.py +120 -0
  49. popgym_arcade/baselines/model/memorax/magmas/lstm.py +134 -0
  50. popgym_arcade/baselines/model/memorax/magmas/mgu.py +111 -0
.github/workflows/python-publish.yml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow will upload a Python Package to PyPI when a release is created
2
+ # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3
+
4
+ # This workflow uses actions that are not certified by GitHub.
5
+ # They are provided by a third-party and are governed by
6
+ # separate terms of service, privacy policy, and support
7
+ # documentation.
8
+
9
+ name: Upload Python Package
10
+
11
+ on:
12
+ release:
13
+ types: [published]
14
+
15
+ permissions:
16
+ contents: read
17
+
18
+ jobs:
19
+ release-build:
20
+ runs-on: ubuntu-latest
21
+
22
+ steps:
23
+ - uses: actions/checkout@v4
24
+
25
+ - uses: actions/setup-python@v5
26
+ with:
27
+ python-version: "3.x"
28
+
29
+ - name: Build release distributions
30
+ run: |
31
+ # NOTE: put your own distribution build steps here.
32
+ python -m pip install build
33
+ python -m build
34
+
35
+ - name: Upload distributions
36
+ uses: actions/upload-artifact@v4
37
+ with:
38
+ name: release-dists
39
+ path: dist/
40
+
41
+ pypi-publish:
42
+ runs-on: ubuntu-latest
43
+ needs:
44
+ - release-build
45
+ permissions:
46
+ # IMPORTANT: this permission is mandatory for trusted publishing
47
+ id-token: write
48
+
49
+ # Dedicated environments with protections for publishing are strongly recommended.
50
+ # For more information, see: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment#deployment-protection-rules
51
+ environment:
52
+ name: pypi
53
+ # OPTIONAL: uncomment and update to include your PyPI project URL in the deployment status:
54
+ url: https://pypi.org/p/popgym-arcade
55
+ #
56
+ # ALTERNATIVE: if your GitHub Release name is the PyPI project version string
57
+ # ALTERNATIVE: exactly, uncomment the following line instead:
58
+ # url: https://pypi.org/project/YOURPROJECT/${{ github.event.release.name }}
59
+
60
+ steps:
61
+ - name: Retrieve release distributions
62
+ uses: actions/download-artifact@v4
63
+ with:
64
+ name: release-dists
65
+ path: dist/
66
+
67
+ - name: Publish release distributions to PyPI
68
+ uses: pypa/gh-action-pypi-publish@release/v1
69
+ with:
70
+ packages-dir: dist/
.github/workflows/python_app.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow will install Python dependencies, run tests and lint with a single version of Python
2
+ # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3
+
4
+ name: Tests
5
+
6
+ on:
7
+ push:
8
+ branches: [ "main" ]
9
+ pull_request:
10
+ branches: [ "main" ]
11
+
12
+ permissions:
13
+ contents: read
14
+
15
+ jobs:
16
+ build:
17
+
18
+ runs-on: ubuntu-latest
19
+
20
+ steps:
21
+ - uses: actions/checkout@v3
22
+ - name: Set up Python 3.13
23
+ uses: actions/setup-python@v3
24
+ with:
25
+ python-version: "3.13"
26
+ - name: Install dependencies
27
+ run: |
28
+ python -m pip install --upgrade pip
29
+ pip install pytest pytest-cov
30
+ pip install -e .
31
+ - name: Test with pytest
32
+ run: |
33
+ pytest tests/test* --doctest-modules --junitxml=junit/test-results.xml --cov=. --cov-report=xml --cov-report=html
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.swp
2
+ *.swo
3
+ *.pyc
4
+ *__pycache__
5
+ *.egg-info
6
+ *.DS_Store
7
+ *.idea
8
+ *.pkl
9
+ *.pdf
10
+ *.csv
11
+ build/
12
+ .vscode/
13
+ plotting/fpsdata/
14
+ plotting/gradientdata/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Zekang Wang and Zhe He
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # POPGym Arcade - GPU-Accelerated POMDPs
2
+
3
+ [![Tests](https://github.com/bolt-research/popgym-arcade/actions/workflows/python_app.yaml/badge.svg)](https://github.com/bolt-research/popgym-arcade/actions/workflows/python_app.yaml)
4
+
5
+ <div style="display: flex; flex-direction: column; align-items: center; gap: 20px;">
6
+ <div style="display: flex; flex-wrap: wrap; gap: 10px; justify-content: space-between;
7
+ border: 2px solid #3498db; border-radius: 10px;
8
+ padding: 10px;
9
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
10
+ background: linear-gradient(135deg, #ffffff, #ffe4e1);">
11
+ <img src="imgs/minesweeper_f.gif" alt="GIF 1" style="width: 100px; height: 100px; border-radius: 5px;">
12
+ <img src="imgs/countrecall_f.gif" alt="GIF 2" style="width: 100px; height: 100px; border-radius: 5px;">
13
+ <img src="imgs/battleship_f.gif" alt="GIF 3" style="width: 100px; height: 100px; border-radius: 5px;">
14
+ <img src="imgs/cartpole_f.gif" alt="GIF 4" style="width: 100px; height: 100px; border-radius: 5px;">
15
+ <img src="imgs/ncartpole_f.gif" alt="GIF 5" style="width: 100px; height: 100px; border-radius: 5px;">
16
+ <img src="imgs/autoencode_f.gif" alt="GIF 6" style="width: 100px; height: 100px; border-radius: 5px;">
17
+ <img src="imgs/navigator_f.gif" alt="GIF 7" style="width: 100px; height: 100px; border-radius: 5px;">
18
+ </div>
19
+ <div style="display: flex; flex-wrap: wrap; gap: 10px; justify-content: space-between;
20
+ border: 2px solid #e74c3c; border-radius: 10px;
21
+ padding: 10px;
22
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
23
+ background: linear-gradient(135deg, #ffffff, #ffe4e1);">
24
+ <img src="imgs/minesweeper_p.gif" alt="GIF 1" style="width: 100px; height: 100px; border-radius: 5px;">
25
+ <img src="imgs/countrecall_p.gif" alt="GIF 2" style="width: 100px; height: 100px; border-radius: 5px;">
26
+ <img src="imgs/battleship_p.gif" alt="GIF 3" style="width: 100px; height: 100px; border-radius: 5px;">
27
+ <img src="imgs/cartpole_p.gif" alt="GIF 4" style="width: 100px; height: 100px; border-radius: 5px;">
28
+ <img src="imgs/ncartpole_p.gif" alt="GIF 5" style="width: 100px; height: 100px; border-radius: 5px;">
29
+ <img src="imgs/autoencode_p.gif" alt="GIF 6" style="width: 100px; height: 100px; border-radius: 5px;">
30
+ <img src="imgs/navigator_p.gif" alt="GIF 7" style="width: 100px; height: 100px; border-radius: 5px;">
31
+ </div>
32
+ </div>
33
+
34
+
35
+ [//]: # (<p float="left">)
36
+
37
+ [//]: # ( <img src="imgs/minesweeper_f.gif" width="96" height="96" /> )
38
+
39
+ [//]: # ( <img src="imgs/countrecall_f.gif" width="96" height="96" /> )
40
+
41
+ [//]: # ( <img src="imgs/battleship_f.gif" width="96" height="96" /> )
42
+
43
+ [//]: # ( <img src="imgs/cartpole_f.gif" width="96" height="96" /> )
44
+
45
+ [//]: # ( <img src="imgs/ncartpole_f.gif" width="96" height="96" /> )
46
+
47
+ [//]: # ( <img src="imgs/autoencode_f.gif" width="96" height="96" /> )
48
+
49
+ [//]: # ( <img src="imgs/navigator_f.gif" width="96" height="96" /> )
50
+
51
+ [//]: # (</p>)
52
+
53
+ [//]: # ()
54
+ [//]: # (<p float="left">)
55
+
56
+ [//]: # ( <img src="imgs/minesweeper_p.gif" width="96" height="96" /> )
57
+
58
+ [//]: # ( <img src="imgs/countrecall_p.gif" width="96" height="96" /> )
59
+
60
+ [//]: # ( <img src="imgs/battleship_p.gif" width="96" height="96" /> )
61
+
62
+ [//]: # ( <img src="imgs/cartpole_p.gif" width="96" height="96" /> )
63
+
64
+ [//]: # ( <img src="imgs/ncartpole_p.gif" width="96" height="96" /> )
65
+
66
+ [//]: # ( <img src="imgs/autoencode_p.gif" width="96" height="96" /> )
67
+
68
+ [//]: # ( <img src="imgs/navigator_p.gif" width="96" height="96" /> )
69
+
70
+ [//]: # (</p>)
71
+
72
+ POPGym Arcade contains 7 pixel-based POMDPs in the style of the [Arcade Learning Environment](https://github.com/Farama-Foundation/Arcade-Learning-Environment). Each environment provides:
73
+ - 3 Difficulty settings
74
+ - Common observation and action space shared across all envs
75
+ - Fully observable and partially observable configurations
76
+ - Fast and easy GPU vectorization using `jax.vmap` and `jax.jit`
77
+
78
+ ## Gradient Visualization
79
+ We also provide tools to visualize how policies use memory.
80
+ <img src="imgs/grads_example.jpg" height="192" />
81
+
82
+ See [below](#Memory-Introspection-Tools) for further instructions.
83
+
84
+ ## Throughput
85
+ You can expect millions of frames per second on a consumer-grade GPU. With `obs_size=128`, most policies converge within 30-60 minutes of training.
86
+
87
+ <img src="imgs/fps.png" height="192" />
88
+ <img src="imgs/wandb.png" height="192" />
89
+
90
+ ## Getting Started
91
+
92
+
93
+ ### Installation
94
+
95
+ To install the environments, run
96
+
97
+ ```bash
98
+ pip install popgym-arcade
99
+ ```
100
+ If you plan to use our training scripts, install the baselines as well
101
+
102
+ ```bash
103
+ pip install 'popgym-arcade[baselines]'
104
+ ```
105
+
106
+ ### Human Play
107
+ To best understand the environments, you should try and play them yourself. The [play script](popgym_arcade/play.py) lets you play the games yourself using the arrow keys and spacebar.
108
+
109
+ ```bash
110
+ popgym-arcade-play NoisyCartPoleEasy # play MDP 256 pixel version
111
+ popgym-arcade-play BattleShipEasy -p -o 128 # play POMDP 128 pixel version
112
+ ```
113
+
114
+ ### Creating and Stepping Environments
115
+ Our envs are `gymnax` envs, so you can use your wrappers and code designed to work with `gymnax`. The following example demonstrates how to integrate POPGym Arcade into your code.
116
+
117
+ ```python
118
+ import popgym_arcade
119
+ import jax
120
+
121
+ # Create both POMDP and MDP env variants
122
+ pomdp, pomdp_params = popgym_arcade.make("BattleShipEasy", partial_obs=True)
123
+ mdp, mdp_params = popgym_arcade.make("BattleShipEasy", partial_obs=False)
124
+
125
+ # Let's vectorize and compile the envs
126
+ # Note when you are training a policy, it is better to compile your policy_update rather than the env_step
127
+ pomdp_reset = jax.jit(jax.vmap(pomdp.reset, in_axes=(0, None)))
128
+ pomdp_step = jax.jit(jax.vmap(pomdp.step, in_axes=(0, 0, 0, None)))
129
+ mdp_reset = jax.jit(jax.vmap(mdp.reset, in_axes=(0, None)))
130
+ mdp_step = jax.jit(jax.vmap(mdp.step, in_axes=(0, 0, 0, None)))
131
+
132
+ # Initialize four vectorized environments
133
+ n_envs = 4
134
+ # Initialize PRNG keys
135
+ key = jax.random.key(0)
136
+ reset_keys = jax.random.split(key, n_envs)
137
+
138
+ # Reset environments
139
+ observation, env_state = pomdp_reset(reset_keys, pomdp_params)
140
+
141
+ # Step the POMDPs
142
+ for t in range(10):
143
+ # Propagate some randomness
144
+ action_key, step_key = jax.random.split(jax.random.key(t))
145
+ action_keys = jax.random.split(action_key, n_envs)
146
+ step_keys = jax.random.split(step_key, n_envs)
147
+ # Pick actions at random
148
+ actions = jax.vmap(pomdp.action_space(pomdp_params).sample)(action_keys)
149
+ # Step the env to the next state
150
+ # No need to reset, gymnax automatically resets when done
151
+ observation, env_state, reward, done, info = pomdp_step(step_keys, env_state, actions, pomdp_params)
152
+
153
+ # POMDP and MDP variants share states
154
+ # We can plug the POMDP states into the MDP and continue playing
155
+ action_keys = jax.random.split(jax.random.key(t + 1), n_envs)
156
+ step_keys = jax.random.split(jax.random.key(t + 2), n_envs)
157
+ markov_state, env_state, reward, done, info = mdp_step(step_keys, env_state, actions, mdp_params)
158
+ ```
159
+
160
+ ## Memory Introspection Tools
161
+ We implement visualization tools to probe which pixels persist in agent memory, and their
162
+ impact on Q value predictions. Try code below or [vis example](plotting/plot_grads.ipynb) to visualize the memory your agent uses
163
+
164
+ ```python
165
+ from popgym_arcade.baselines.model.builder import QNetworkRNN
166
+ from popgym_arcade.baselines.utils import get_saliency_maps, vis_fn
167
+ import equinox as eqx
168
+ import jax
169
+
170
+ config = {
171
+ # Env string
172
+ "ENV_NAME": "NavigatorEasy",
173
+ # Whether to use full or partial observability
174
+ "PARTIAL": True,
175
+ # Memory model type (see models directory)
176
+ "MEMORY_TYPE": "lru",
177
+ # Evaluation episode seed
178
+ "SEED": 0,
179
+ # Observation size in pixels (128 or 256)
180
+ "OBS_SIZE": 128,
181
+ }
182
+
183
+ # Initialize the random key
184
+ rng = jax.random.PRNGKey(config["SEED"])
185
+
186
+ # Initialize the model
187
+ network = QNetworkRNN(rng, rnn_type=config["MEMORY_TYPE"], obs_size=config["OBS_SIZE"])
188
+ # Load the model
189
+ model = eqx.tree_deserialise_leaves("PATH_TO_YOUR_MODEL_WEIGHTS.pkl", network)
190
+ # Compute the saliency maps
191
+ grads, obs_seq, grad_accumulator = get_saliency_maps(rng, model, config)
192
+ # Visualize the saliency maps
193
+ # If you have latex installed, set use_latex=True
194
+ vis_fn(grads, obs_seq, config, use_latex=False)
195
+ ```
196
+
197
+ ## Other Useful Libraries
198
+ - [`gymnax`](https://github.com/RobertTLange/gymnax) - The (deprecated) `jax`-capable `gymnasium` API
199
+ - [`stable-gymnax`](https://github.com/smorad/stable-gymnax) - A maintained and patched version of `gymnax`
200
+ - [`popgym`](https://github.com/proroklab/popgym) - The original collection of POMDPs, implemented in `numpy`
201
+ - [`popjaxrl`](https://github.com/luchris429/popjaxrl) - A `jax` version of `popgym`
202
+ - [`popjym`](https://github.com/EdanToledo/popjym) - A more readable version of `popjaxrl` environments that served as a basis for our work
203
+
204
+ ## Citation
205
+ ```
206
+ @article{wang2025popgym,
207
+ title={POPGym Arcade: Parallel Pixelated POMDPs},
208
+ author={Wang, Zekang and He, Zhe and Zhang, Borong and Toledo, Edan and Morad, Steven},
209
+ journal={arXiv preprint arXiv:2503.01450},
210
+ year={2025}
211
+ }
212
+ ```
analysis.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import equinox as eqx
2
+ import jax
3
+ import jax.numpy as jnp
4
+
5
+ import popgym_arcade
6
+ from popgym_arcade.baselines.model.builder import QNetworkRNN
7
+ from popgym_arcade.baselines.utils import get_saliency_maps, vis_fn
8
+ from popgym_arcade.wrappers import LogWrapper
9
+
10
+ ##
11
+ ## Simpler approach:
12
+ ## Compute gradients using random initial state
13
+ ##
14
+ config = {
15
+ "ENV_NAME": "MineSweeperEasy",
16
+ "PARTIAL": False,
17
+ "MEMORY_TYPE": "lru",
18
+ "SEED": 0,
19
+ "OBS_SIZE": 128,
20
+ }
21
+ # Path to your model weights
22
+ config["MODEL_PATH"] = (
23
+ f"nips_analysis_128/PQN_RNN_{config['MEMORY_TYPE']}_{config['ENV_NAME']}_model_Partial={config['PARTIAL']}_SEED=0.pkl"
24
+ )
25
+
26
+ # Initialize the random key
27
+ rng = jax.random.PRNGKey(config["SEED"])
28
+
29
+ # Initialize the model
30
+ network = QNetworkRNN(rng, rnn_type=config["MEMORY_TYPE"], obs_size=config["OBS_SIZE"])
31
+ # Load the model
32
+ model = eqx.tree_deserialise_leaves(config["MODEL_PATH"], network)
33
+ # Compute the saliency maps
34
+ grads, obs_seq, grad_accumulator = get_saliency_maps(rng, model, config, max_steps=30)
35
+ # Visualize the saliency maps
36
+ vis_fn(grads, obs_seq, config, use_latex=True)
37
+
38
+
39
+ ##
40
+ ## More complex approach
41
+ ## Generate custom initial state and then compute gradients
42
+ ##
43
+ config = {
44
+ "ENV_NAME": "NavigatorEasy",
45
+ "PARTIAL": True,
46
+ "MEMORY_TYPE": "lru",
47
+ "SEED": 0,
48
+ "OBS_SIZE": 128,
49
+ }
50
+ config["MODEL_PATH"] = (
51
+ f"nips_analysis_128/PQN_RNN_{config['MEMORY_TYPE']}_{config['ENV_NAME']}_model_Partial={config['PARTIAL']}_SEED=0.pkl"
52
+ )
53
+
54
+
55
+ rng = jax.random.PRNGKey(config["SEED"])
56
+ # Initialize the model
57
+ network = QNetworkRNN(rng, rnn_type=config["MEMORY_TYPE"], obs_size=config["OBS_SIZE"])
58
+ # Load the model
59
+ model = eqx.tree_deserialise_leaves(config["MODEL_PATH"], network)
60
+
61
+ # Setup initial state
62
+ seed, _rng = jax.random.split(jax.random.key(config["SEED"]))
63
+ env, env_params = popgym_arcade.make(
64
+ config["ENV_NAME"], partial_obs=config["PARTIAL"], obs_size=config["OBS_SIZE"]
65
+ )
66
+ env = LogWrapper(env)
67
+ n_envs = 1
68
+ vmap_reset = lambda n_envs: lambda rng: jax.vmap(env.reset, in_axes=(0, None))(
69
+ jax.random.split(rng, n_envs), env_params
70
+ )
71
+ vmap_step = lambda n_envs: lambda rng, env_state, action: jax.vmap(
72
+ env.step, in_axes=(0, 0, 0, None)
73
+ )(jax.random.split(rng, n_envs), env_state, action, env_params)
74
+ init_obs, init_state = vmap_reset(n_envs)(_rng)
75
+
76
+ # Replace initial state with custom initial state
77
+ new_init_state = eqx.tree_at(
78
+ lambda x: x.env_state.action_x, init_state, replace=jnp.array([6])
79
+ )
80
+ new_init_state = eqx.tree_at(
81
+ lambda x: x.env_state.action_y, new_init_state, replace=jnp.array([6])
82
+ )
83
+ board = (
84
+ new_init_state.env_state.board.at[jnp.where(new_init_state.env_state.board == 2)]
85
+ .set(0)
86
+ .at[:, 1, 1]
87
+ .set(2)
88
+ )
89
+ # Can also set the entire board manually if needed
90
+ # board = (
91
+ # jnp.zeros_like(new_init_state.env_state.board)
92
+ # # tnt
93
+ # .at[0, 3, 2].set(1)
94
+ # .at[0, 4, 2].set(1)
95
+ # .at[0, 5, 3].set(1)
96
+ # .at[0, 6, 3].set(1)
97
+ # # goal
98
+ # .at[0, 6, 6].set(2)
99
+ # )
100
+ new_init_state = eqx.tree_at(lambda x: x.env_state.board, new_init_state, replace=board)
101
+ new_init_obs = jax.vmap(env.get_obs)(new_init_state.env_state)
102
+
103
+
104
+ # Compute the saliency maps
105
+ grads, obs_seq, grad_accumulator = get_saliency_maps(
106
+ rng,
107
+ model,
108
+ config,
109
+ max_steps=10,
110
+ initial_state_and_obs=(new_init_state, new_init_obs),
111
+ )
112
+ # Visualize the saliency maps
113
+ vis_fn(grads, obs_seq, config, use_latex=True)
imgs/autoencode_f.gif ADDED

Git LFS Details

  • SHA256: 5e7b2e5000c1465e19aa58fa2d4d94d09922b448a47da200bf924dd04bfaa335
  • Pointer size: 130 Bytes
  • Size of remote file: 87.4 kB
imgs/autoencode_p.gif ADDED

Git LFS Details

  • SHA256: e424d373e939e70d288c6ddd890ecd26d8f1c431d2096253089c905a93529387
  • Pointer size: 130 Bytes
  • Size of remote file: 45 kB
imgs/battleship_f.gif ADDED

Git LFS Details

  • SHA256: 49f561ed63277e8d89ae6c92040d3dd9bc97fb42ab52ef47162d90a4529594c0
  • Pointer size: 130 Bytes
  • Size of remote file: 91.8 kB
imgs/battleship_p.gif ADDED

Git LFS Details

  • SHA256: 72b64ced29977839749ef19877f2abeed3205e6139cd483de4c451241993c429
  • Pointer size: 130 Bytes
  • Size of remote file: 87 kB
imgs/cartpole_f.gif ADDED

Git LFS Details

  • SHA256: 92aa1b5365702985c4c9205e8c73b233acf17d7db650f7a84649d9351ad925a3
  • Pointer size: 131 Bytes
  • Size of remote file: 173 kB
imgs/cartpole_p.gif ADDED

Git LFS Details

  • SHA256: 1139ac45f70bf17ccd1d853b38e07951715e3a83508c03e6e19653af2037dc43
  • Pointer size: 131 Bytes
  • Size of remote file: 101 kB
imgs/countrecall_f.gif ADDED

Git LFS Details

  • SHA256: e7fe5748c9e4ffe184485dd493bf591a24a9c010db9010de5485d1d69022f59f
  • Pointer size: 131 Bytes
  • Size of remote file: 120 kB
imgs/countrecall_p.gif ADDED

Git LFS Details

  • SHA256: 601c07367c33ea4e2b9c01523346b72246fc66fcb208373367affdcccd10a192
  • Pointer size: 130 Bytes
  • Size of remote file: 60.4 kB
imgs/fps.png ADDED

Git LFS Details

  • SHA256: e124dab88ef3e565e73011135d5edb4b81e7ad7de8b2b5e77ff9c135598987a1
  • Pointer size: 131 Bytes
  • Size of remote file: 157 kB
imgs/grads_example.jpg ADDED

Git LFS Details

  • SHA256: 7091db492b7961e9aca1a950035dd44bd6660fe6a3cae71f767d4b9bff349f84
  • Pointer size: 131 Bytes
  • Size of remote file: 754 kB
imgs/minesweeper_f.gif ADDED

Git LFS Details

  • SHA256: 3ed804fb492596413c44783961be10d743329517d68ec885499ea0a16d642bfe
  • Pointer size: 131 Bytes
  • Size of remote file: 131 kB
imgs/minesweeper_p.gif ADDED

Git LFS Details

  • SHA256: 135aab587d98e6357ea1e969e8682ff1aae850fc43211dc9500d9d1c458f1014
  • Pointer size: 131 Bytes
  • Size of remote file: 103 kB
imgs/navigator_f.gif ADDED

Git LFS Details

  • SHA256: 48f00c9407e2eb3dfb7e21c7eba1b2da8a96fb785a660da6729d70ccee7aa5c1
  • Pointer size: 131 Bytes
  • Size of remote file: 187 kB
imgs/navigator_p.gif ADDED

Git LFS Details

  • SHA256: a993daead1628cdd0b7a387598106f494379b976a654b9a06b0e1768a612259a
  • Pointer size: 131 Bytes
  • Size of remote file: 213 kB
imgs/ncartpole_f.gif ADDED

Git LFS Details

  • SHA256: 44a7e4d63ef03db38e72330e7aecbe905dd45448a4fd7ff17a6cd047da2822e3
  • Pointer size: 131 Bytes
  • Size of remote file: 420 kB
imgs/ncartpole_p.gif ADDED

Git LFS Details

  • SHA256: 53ec3cd06fe8d0fd1e6f7a8d6bf702d8415c0ea8d570508f3c834f2c1481e39a
  • Pointer size: 131 Bytes
  • Size of remote file: 161 kB
imgs/wandb.png ADDED

Git LFS Details

  • SHA256: d6e7fcdc523e901f6bfcd9f7578cacc180926090d14411000218de227742cf3f
  • Pointer size: 131 Bytes
  • Size of remote file: 763 kB
plotting/Atari_FPS_test.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ import sys
4
+ import time
5
+
6
+ import ale_py
7
+ import gymnasium as gym
8
+ import jax
9
+ import jax.numpy as jnp
10
+
11
+ import sys
12
+
13
+ sys.path.extend(["/home/ubuntu-user/popjym-main"])
14
+
15
+ # import matplotlib.pyplot as plt
16
+
17
+ gym.register_envs(ale_py)
18
+ n_envs = int(os.getenv("NUM_ENVS", 512))
19
+ n_steps = int(os.getenv("NUM_STEPS", 32))
20
+ seed = int(os.getenv("SEED"))
21
+
22
+ env_name = os.getenv("ENV_NAME", "Pong-v4")
23
+ env = gym.make_vec(env_name, num_envs=n_envs, vectorization_mode="sync")
24
+ env_params = None
25
+
26
+
27
+ def test_multi_env_fps(
28
+ env=env, env_params=env_params, seed=seed, n_envs=n_envs, n_steps=n_steps
29
+ ):
30
+ """Test FPS for multiple environments."""
31
+
32
+ obs, infos = env.reset(seed=seed)
33
+
34
+ for _ in range(n_steps):
35
+ _ = env.action_space.seed(seed)
36
+ actions = env.action_space.sample()
37
+ obs, rewards, terminates, truncates, infos = env.step(actions)
38
+ # plt.imshow(obs[0])
39
+ # plt.show()
40
+
41
+ return obs
42
+
43
+
44
+ start = time.time()
45
+ test_multi_env_fps(env, env_params, seed, n_envs, n_steps)
46
+ end = time.time()
47
+
48
+ runtime = end - start
49
+ fps = n_envs * n_steps / runtime
50
+ print(f"time: {end - start}s")
51
+ print(f"{env_name} - Multi Env - Envs: {n_envs}, Steps: {n_steps}, FPS: {fps}")
52
+ csv_file = "atari_fps_results.csv"
53
+ write_header = not os.path.exists(csv_file)
54
+ with open(csv_file, mode="a", newline="") as file:
55
+ writer = csv.writer(file)
56
+ if write_header:
57
+ writer.writerow(["Environment", "Num Envs", "Num Steps", "FPS", "Seed"])
58
+ writer.writerow([env_name, n_envs, n_steps, f"{fps:.0f}", seed])
plotting/MinAtar_popgym_FPS_test.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ import time
3
+
4
+ # Test FPS for MinAtar environment
5
+ # Source: https://github.com/kenjyoung/MinAtar/tree/master
6
+ import gymnasium as gym
7
+ import popgym
8
+ from popgym.envs.battleship import Battleship
9
+
10
+ TestEnv = gym.make("MinAtar/Asterix-v1")
11
+
12
+ NUM_STEPS = 512
13
+
14
+ # Test FPS for popgym environment
15
+ # Source: https://github.com/proroklab/popgym
16
+ # TestEnv = Battleship()
17
+
18
+
19
+ def run_sample(e, num_steps):
20
+ env = e
21
+ env.reset()
22
+ start = time.time()
23
+ for i in range(num_steps):
24
+ obs, _, terminated, truncated, _ = env.step(env.action_space.sample())
25
+ print(obs.shape)
26
+ if terminated or truncated:
27
+ env.reset()
28
+ end = time.time()
29
+ elapsed = end - start
30
+ fps = num_steps / elapsed
31
+ return fps
32
+
33
+
34
+ def main():
35
+ print(f"Testing environment: {TestEnv}")
36
+ for n in range(1, 10):
37
+ num_workers = 2**n
38
+
39
+ # Single environment test (for baseline reference)
40
+ fps_single = run_sample(TestEnv, NUM_STEPS)
41
+ print(f"{TestEnv} (1x) FPS: {fps_single:.0f}")
42
+
43
+ with multiprocessing.Pool(processes=num_workers) as p:
44
+ envs = num_workers * [TestEnv]
45
+ steps = num_workers * [int(NUM_STEPS // num_workers)]
46
+ fps_multi = sum(p.starmap(run_sample, zip(envs, steps)))
47
+ print(f"{TestEnv} ({num_workers}x) FPS: {fps_multi:.0f}")
48
+
49
+
50
+ if __name__ == "__main__":
51
+ main()
plotting/PArcade_gymnax_FPS_test.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Compute the average steps per second of the environment.
3
+
4
+ """
5
+
6
+ import sys
7
+
8
+ sys.path.extend(["/home/ubuntu-user/popjym-main"])
9
+
10
+ import csv
11
+ import os
12
+ import sys
13
+ import time
14
+
15
+ import gymnax
16
+ import jax
17
+ import jax.numpy as jnp
18
+ from jaxlib.xla_extension import XlaRuntimeError
19
+
20
+ import popgym_arcade
21
+
22
+ env_name = os.getenv("ENV_NAME", "MineSweeperEasy")
23
+ partial_obs = os.getenv("PARTIAL_OBS", "False") == "True"
24
+
25
+ n_envs = int(os.getenv("NUM_ENVS", 512))
26
+ n_steps = int(os.getenv("NUM_STEPS", 512))
27
+ seed = jax.random.PRNGKey(0)
28
+
29
+ # Test FPS for popgym arcade environments
30
+ # env, env_params = popgym_arcade.make(env_name, partial_obs=partial_obs)
31
+
32
+ # Test FPS for gymnax environments
33
+ # Source: https://github.com/RobertTLange/gymnax
34
+ env, env_params = gymnax.make(env_name)
35
+
36
+ # jax.config.update("jax_enable_x64", True)
37
+
38
+
39
+ def test_multi_env_fps(
40
+ env=env, env_params=env_params, seed=seed, num_envs=n_envs, num_steps=n_steps
41
+ ):
42
+ """Test FPS for multiple environments."""
43
+ vmap_reset = jax.vmap(env.reset, in_axes=(0, None))
44
+ vmap_step = jax.vmap(env.step, in_axes=(0, 0, 0, None))
45
+ vmap_sample = jax.vmap(env.action_space(env_params).sample)
46
+ seeds = jax.random.split(seed, num_envs)
47
+ obs, states = vmap_reset(seeds, env_params)
48
+
49
+ for _ in range(num_steps):
50
+ action = vmap_sample(seeds)
51
+ obs, states, rewards, dones, _ = vmap_step(seeds, states, action, env_params)
52
+
53
+ return obs
54
+
55
+
56
+ fps = jax.jit(test_multi_env_fps)
57
+ carry = fps(seed=jax.random.PRNGKey(1))
58
+ carry.block_until_ready()
59
+
60
+ start = time.time()
61
+ carry = fps(seed=jax.random.PRNGKey(2))
62
+ carry.block_until_ready()
63
+ end = time.time()
64
+
65
+ runtime = end - start
66
+ fps = n_envs * n_steps / runtime
67
+ print(f"env: {env_name}, partial_obs: {partial_obs}")
68
+ print(f"time: {end - start}s")
69
+ print(f"{env_name} - Multi Env - Envs: {n_envs}, Steps: {n_steps}, FPS: {fps}")
70
+ csv_file = "gymnaxfpsdata.csv"
71
+ write_header = not os.path.exists(csv_file)
72
+ with open(csv_file, mode="a", newline="") as file:
73
+ writer = csv.writer(file)
74
+ if write_header:
75
+ writer.writerow(["Environment", "Partial Obs", "Num Envs", "Num Steps", "FPS"])
76
+ writer.writerow([env_name, partial_obs, n_envs, n_steps, f"{fps:.0f}"])
77
+
78
+ print("Testing complete. Results appended to fps_results.csv")
plotting/analysis_plot.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
plotting/churn.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Plotting churn ratio difference between partial and full observability
3
+ """
4
+
5
+ import jax.numpy as jnp
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+ import pandas as pd
9
+ import seaborn as sns
10
+ from jax import lax
11
+ from scipy.interpolate import interp1d
12
+
13
+ import wandb
14
+
15
+
16
+ def f(name):
17
+ WINDOW_SIZE = 100
18
+ SIGMA = 100
19
+ INTERP_POINTS = 1000
20
+ NORMALIZING_FACTOR = 200
21
+
22
+ ENV_MAX_STEPS = {
23
+ "CountRecallEasy": 2e7,
24
+ "CountRecallMedium": 2e7,
25
+ "CountRecallHard": 2e7,
26
+ "BattleShipEasy": 2e7,
27
+ "BattleShipMedium": 2e7,
28
+ "BattleShipHard": 2e7,
29
+ # other environments with default max steps 1e7
30
+ }
31
+ AXIS_FONT = {"fontsize": 9, "labelpad": 8}
32
+ TICK_FONT = {"labelsize": 8}
33
+
34
+ api = wandb.Api()
35
+ runs = api.runs("bolt-um/Arcade-RLC-Churn")
36
+ filtered_runs = [run for run in runs if run.state == "finished"]
37
+ print(f"Total runs: {len(runs)}, Completed runs: {len(filtered_runs)}")
38
+
39
+ METRIC_MAPPING = {
40
+ "PQN": {"churn_ratio": "churn_ratio", "time_col": "env_step"},
41
+ "PQN_RNN": {"churn_ratio": "churn_ratio", "time_col": "env_step"},
42
+ "default": {"churn_ratio": "churn_ratio", "time_col": "TOTAL_TIMESTEPS"},
43
+ }
44
+
45
+ def process_run(run):
46
+ """Process individual W&B run with dynamic max steps per environment"""
47
+ try:
48
+ config = {k: v for k, v in run.config.items() if not k.startswith("_")}
49
+ env_name = config.get("ENV_NAME", "UnknownEnv")
50
+ partial_status = str(config.get("PARTIAL", False))
51
+
52
+ if env_name in ENV_MAX_STEPS:
53
+ env_max_step = ENV_MAX_STEPS[env_name]
54
+ else:
55
+ env_max_step = 1e7
56
+
57
+ alg_name = config.get("ALG_NAME", "").upper()
58
+ memory_type = "MLP"
59
+ if alg_name == "PQN_RNN":
60
+ memory_type = config.get("MEMORY_TYPE", "Unknown").capitalize()
61
+
62
+ metric_map = METRIC_MAPPING.get(alg_name, METRIC_MAPPING["default"])
63
+ history = list(
64
+ run.scan_history(
65
+ keys=[metric_map["churn_ratio"], metric_map["time_col"]]
66
+ )
67
+ )
68
+ history = pd.DataFrame(
69
+ history, columns=[metric_map["churn_ratio"], metric_map["time_col"]]
70
+ )
71
+
72
+ history["true_steps"] = history[metric_map["time_col"]].clip(
73
+ upper=env_max_step
74
+ )
75
+ history = history.sort_values(metric_map["time_col"]).drop_duplicates(
76
+ subset=["true_steps"]
77
+ )
78
+
79
+ if len(history) < 2:
80
+ print(f"Skipping {run.name} due to insufficient data points")
81
+ return None
82
+
83
+ # Get first and last values for extrapolation
84
+ first_return = history[metric_map["churn_ratio"]].iloc[0]
85
+ last_return = history[metric_map["churn_ratio"]].iloc[-1]
86
+
87
+ # Create unified interpolation grid for this environment
88
+ unified_steps = np.linspace(0, env_max_step, INTERP_POINTS)
89
+ unified_steps = np.round(unified_steps, decimals=5)
90
+ scale_factor = NORMALIZING_FACTOR / env_max_step
91
+
92
+ # Interpolate returns to uniform grid
93
+ interp_func = interp1d(
94
+ history["true_steps"],
95
+ history[metric_map["churn_ratio"]],
96
+ kind="linear",
97
+ bounds_error=False,
98
+ fill_value=(first_return, last_return),
99
+ )
100
+ interpolated_churn_ratio = interp_func(unified_steps)
101
+
102
+ return pd.DataFrame(
103
+ {
104
+ "Algorithm": f"{alg_name} ({memory_type})",
105
+ "churn_ratio": interpolated_churn_ratio,
106
+ # "Smoothed Return": smoothed_returns,
107
+ # "Cummax Return": np.array(cummax_returns), # Convert back to NumPy
108
+ "True Steps": unified_steps,
109
+ "EnvName": env_name,
110
+ "Partial": partial_status,
111
+ "Seed": str(config.get("SEED", 0)),
112
+ "run_id": run.id,
113
+ "StepsNormalized": unified_steps / env_max_step,
114
+ "EnvMaxStep": env_max_step,
115
+ "ScaleFactor": scale_factor,
116
+ }
117
+ )
118
+
119
+ except Exception as e:
120
+ print(f"Error processing {run.name}: {str(e)}")
121
+ return None
122
+
123
+ # Process all runs and combine data
124
+ # all_data = [df for run in filtered_runs if (df := process_run(run)) is not None]
125
+
126
+ # if not all_data:
127
+ # print("No valid data to process")
128
+ # exit()
129
+ # runs_df = pd.concat(all_data, ignore_index=True)
130
+ # runs_df.to_pickle("churnratiodata.pkl")
131
+
132
+ runs_df = pd.read_pickle("churnratiodata.pkl")
133
+ # print(f"Total runs processed: {runs_df}")
134
+
135
+ diff_df = pd.DataFrame()
136
+
137
+ for env_name in runs_df["EnvName"].unique():
138
+ env_data = runs_df[runs_df["EnvName"] == env_name]
139
+
140
+ partial_true = env_data[env_data["Partial"] == "True"]
141
+ partial_false = env_data[env_data["Partial"] == "False"]
142
+
143
+ merged = pd.merge(
144
+ partial_true[["StepsNormalized", "churn_ratio"]],
145
+ partial_false[["StepsNormalized", "churn_ratio"]],
146
+ on="StepsNormalized",
147
+ suffixes=("_true", "_false"),
148
+ how="inner",
149
+ )
150
+
151
+ merged["churn_diff"] = np.abs(
152
+ merged["churn_ratio_true"] - merged["churn_ratio_false"]
153
+ )
154
+ merged["EnvName"] = env_name.replace("Easy", "")
155
+
156
+ # diff_df = pd.concat([diff_df, merged[['EnvName', 'StepsNormalized', 'churn_diff']]], ignore_index=True)
157
+ merged["churn_diff_cummax"] = merged.groupby("EnvName")["churn_diff"].cummax()
158
+ # merged['churn_diff_avg'] = merged.groupby('EnvName')['churn_diff'].transform('mean')
159
+ merged["churn_diff_avg"] = merged.groupby("EnvName")["churn_diff"].transform(
160
+ lambda x: x.rolling(window=20, min_periods=1).mean()
161
+ )
162
+
163
+ diff_df = pd.concat(
164
+ [
165
+ diff_df,
166
+ merged[
167
+ [
168
+ "EnvName",
169
+ "StepsNormalized",
170
+ "churn_diff",
171
+ "churn_diff_cummax",
172
+ "churn_diff_avg",
173
+ ]
174
+ ],
175
+ ],
176
+ ignore_index=True,
177
+ )
178
+
179
+ plt.figure(figsize=(12, 7))
180
+ sns.set()
181
+ sns.lineplot(
182
+ data=diff_df,
183
+ x="StepsNormalized",
184
+ y="churn_diff_avg",
185
+ hue="EnvName",
186
+ palette="Spectral",
187
+ linewidth=2.5,
188
+ )
189
+
190
+ plt.title("Relative Policy Churn", fontsize=35)
191
+ plt.xlabel("Training Progress", fontsize=35)
192
+ plt.ylabel("POMDP/MDP Difference", fontsize=35)
193
+ plt.tick_params(axis="both", which="major", labelsize=35)
194
+ plt.legend(title="", loc="upper left", fontsize=20, ncol=2)
195
+ plt.grid(True, alpha=0.5)
196
+ plt.tight_layout()
197
+ plt.savefig("{}.pdf".format(name), dpi=300, bbox_inches="tight", facecolor="white")
198
+
199
+
200
+ for i in range(1):
201
+ f(f"churn{i}")
202
+ print(f"churn{i} done")
plotting/fps.sh ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # filepath: ./run_all_experiments.sh
3
+
4
+ # POPGYM Arcade environment names
5
+ # ENV_NAMES=("MineSweeperEasy" "NavigatorEasy" "CartPoleEasy" "AutoEncodeEasy" "BattleShipEasy" "NoisyCartPoleEasy" "CountRecallEasy")
6
+
7
+
8
+ # # Atari environment names
9
+ # ENV_NAMES=("Pong-v4")
10
+
11
+ # gymnax environment names
12
+ # JIT error: "Asterix-MinAtar" "Freeway-MinAtar"
13
+ ENV_NAMES=("Acrobot-v1" "Pendulum-v1" "SpaceInvaders-MinAtar" "Breakout-MinAtar" "CartPole-v1" "MountainCar-v0")
14
+
15
+ # PARTIAL_OBS=("True")
16
+ SEEDS=($(seq 0 9))
17
+ NUM_ENVS_POWERS=($(seq 1 9))
18
+ NUM_STEPS=32
19
+
20
+ # Loop over each setting combination
21
+ for env_name in "${ENV_NAMES[@]}"; do
22
+ for seed in "${SEEDS[@]}"; do
23
+ for power in "${NUM_ENVS_POWERS[@]}"; do
24
+ # Calculate number of environments using 2^power
25
+ n_envs=$(echo "2^$power" | bc)
26
+
27
+ # Export environment variables
28
+ export ENV_NAME="$env_name"
29
+ export SEED="$seed"
30
+ export NUM_ENVS="$n_envs"
31
+ export NUM_STEPS="$NUM_STEPS"
32
+
33
+ echo "Running experiment with ENV_NAME=$ENV_NAME, SEED=$SEED, NUM_ENVS=$NUM_ENVS, NUM_STEPS=$NUM_STEPS"
34
+ python PArcade_gymnax_FPS_test.py
35
+
36
+ echo "Cleaning up leftover GPU processes..."
37
+ pids=$(nvidia-smi | awk '/python/ {print $5}')
38
+ for pid in $pids; do
39
+ echo "Killing process with PID $pid"
40
+ kill -9 $pid 2>/dev/null
41
+ done
42
+ done
43
+ done
44
+ done
45
+
46
+ echo "All experiments complete."
plotting/generate_analysis_data.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import pandas as pd
3
+ import seaborn as sns
4
+
5
+ # Import the function from run_multi_seed_analysis.py
6
+ from plotting.run_multi_seed_analysis import run_multiple_seeds_and_save_csv
7
+
8
+
9
+ def analyze_model_saliency(
10
+ config, seeds=[0, 1, 2, 3, 4], max_steps=200, visualize=True
11
+ ):
12
+ """
13
+ Analyze the saliency maps of a model with the given configuration.
14
+
15
+ Args:
16
+ config (dict): Dictionary containing model configuration with keys:
17
+ - ENV_NAME: Environment name
18
+ - PARTIAL: Whether to use partial observations
19
+ - MEMORY_TYPE: Type of memory to use
20
+ - OBS_SIZE: Size of observations
21
+ - MODEL_SEED: Seed used for the model (to locate model file)
22
+ seeds (list): List of seeds to run the analysis with
23
+ max_steps (int): Maximum number of steps per episode
24
+ visualize (bool): Whether to create visualization plots
25
+
26
+ Returns:
27
+ dict: A dictionary containing:
28
+ - csv_path: Path to the CSV file with results
29
+ - avg_plot_path: Path to the average saliency plot
30
+ - individual_plot_path: Path to the individual seeds saliency plot
31
+ """
32
+ output_csv = f'saliency_results_{config["MEMORY_TYPE"]}_{config["ENV_NAME"]}_Partial={config["PARTIAL"]}.csv'
33
+
34
+ # Run the analysis for all seeds
35
+ output_csv = run_multiple_seeds_and_save_csv(
36
+ config, seeds, max_steps=max_steps, output_csv=output_csv
37
+ )
38
+
39
+ result_paths = {"csv_path": output_csv}
40
+
41
+ if visualize:
42
+ # Load the results for visualization
43
+ results_df = pd.read_csv(output_csv)
44
+
45
+ # Create a visualization of the distributions for each seed
46
+ plt.figure(figsize=(12, 8))
47
+ sns.set_style("whitegrid")
48
+
49
+ # Filter columns that represent positions
50
+ pos_columns = [col for col in results_df.columns if col.startswith("pos_")]
51
+
52
+ # Plot each seed's distribution
53
+ for idx, row in results_df.iterrows():
54
+ seed = row["seed"]
55
+ positions = [float(col.split("_")[1]) for col in pos_columns]
56
+ values = [row[col] for col in pos_columns]
57
+ plt.plot(positions, values, marker="o", markersize=3, label=f"Seed {seed}")
58
+
59
+ plt.xlabel("Normalized Episode Position")
60
+ plt.ylabel("Saliency Magnitude")
61
+ plt.title(
62
+ f"Terminal Saliency Distribution by Seed\n{config['MEMORY_TYPE']} on {config['ENV_NAME']}"
63
+ )
64
+ plt.legend()
65
+ plt.tight_layout()
66
+
67
+ individual_plot_path = f"saliency_plot_{config['MEMORY_TYPE']}_{config['ENV_NAME']}_Partial={config['PARTIAL']}.png"
68
+ plt.savefig(individual_plot_path, dpi=300)
69
+ plt.close()
70
+
71
+ # Calculate average distribution across seeds
72
+ avg_values = [results_df[col].mean() for col in pos_columns]
73
+ std_values = [results_df[col].std() for col in pos_columns]
74
+ positions = [float(col.split("_")[1]) for col in pos_columns]
75
+
76
+ plt.figure(figsize=(12, 8))
77
+ plt.plot(positions, avg_values, "b-", linewidth=2, label="Mean Distribution")
78
+ plt.fill_between(
79
+ positions,
80
+ [avg - std for avg, std in zip(avg_values, std_values)],
81
+ [avg + std for avg, std in zip(avg_values, std_values)],
82
+ color="b",
83
+ alpha=0.2,
84
+ label="Standard Deviation",
85
+ )
86
+
87
+ plt.xlabel("Normalized Episode Position")
88
+ plt.ylabel("Average Saliency Magnitude")
89
+ plt.title(
90
+ f"Average Terminal Saliency Distribution Across Seeds\n{config['MEMORY_TYPE']} on {config['ENV_NAME']}"
91
+ )
92
+ plt.legend()
93
+ plt.tight_layout()
94
+
95
+ avg_plot_path = f"avg_saliency_plot_{config['MEMORY_TYPE']}_{config['ENV_NAME']}_Partial={config['PARTIAL']}.png"
96
+ plt.savefig(avg_plot_path, dpi=300)
97
+ plt.close()
98
+
99
+ result_paths["individual_plot_path"] = individual_plot_path
100
+ result_paths["avg_plot_path"] = avg_plot_path
101
+
102
+ print(f"Analysis complete. Results saved to: {output_csv}")
103
+ return result_paths
104
+
105
+
106
+ # Example usage
107
+ if __name__ == "__main__":
108
+ configs = [
109
+ # fart models
110
+ {
111
+ "ENV_NAME": "AutoEncodeEasy",
112
+ "PARTIAL": False,
113
+ "MEMORY_TYPE": "fart",
114
+ "OBS_SIZE": 128,
115
+ "MODEL_SEED": 3,
116
+ },
117
+ {
118
+ "ENV_NAME": "AutoEncodeEasy",
119
+ "PARTIAL": True,
120
+ "MEMORY_TYPE": "fart",
121
+ "OBS_SIZE": 128,
122
+ "MODEL_SEED": 4,
123
+ },
124
+ {
125
+ "ENV_NAME": "BattleShipEasy",
126
+ "PARTIAL": False,
127
+ "MEMORY_TYPE": "fart",
128
+ "OBS_SIZE": 128,
129
+ "MODEL_SEED": 0,
130
+ },
131
+ {
132
+ "ENV_NAME": "BattleShipEasy",
133
+ "PARTIAL": True,
134
+ "MEMORY_TYPE": "fart",
135
+ "OBS_SIZE": 128,
136
+ "MODEL_SEED": 0,
137
+ },
138
+ {
139
+ "ENV_NAME": "CartPoleEasy",
140
+ "PARTIAL": False,
141
+ "MEMORY_TYPE": "fart",
142
+ "OBS_SIZE": 128,
143
+ "MODEL_SEED": 0,
144
+ },
145
+ {
146
+ "ENV_NAME": "CartPoleEasy",
147
+ "PARTIAL": True,
148
+ "MEMORY_TYPE": "fart",
149
+ "OBS_SIZE": 128,
150
+ "MODEL_SEED": 1,
151
+ },
152
+ {
153
+ "ENV_NAME": "CountRecallEasy",
154
+ "PARTIAL": False,
155
+ "MEMORY_TYPE": "fart",
156
+ "OBS_SIZE": 128,
157
+ "MODEL_SEED": 0,
158
+ },
159
+ {
160
+ "ENV_NAME": "CountRecallEasy",
161
+ "PARTIAL": True,
162
+ "MEMORY_TYPE": "fart",
163
+ "OBS_SIZE": 128,
164
+ "MODEL_SEED": 0,
165
+ },
166
+ {
167
+ "ENV_NAME": "MineSweeperEasy",
168
+ "PARTIAL": False,
169
+ "MEMORY_TYPE": "fart",
170
+ "OBS_SIZE": 128,
171
+ "MODEL_SEED": 4,
172
+ },
173
+ {
174
+ "ENV_NAME": "MineSweeperEasy",
175
+ "PARTIAL": True,
176
+ "MEMORY_TYPE": "fart",
177
+ "OBS_SIZE": 128,
178
+ "MODEL_SEED": 3,
179
+ },
180
+ {
181
+ "ENV_NAME": "NavigatorEasy",
182
+ "PARTIAL": False,
183
+ "MEMORY_TYPE": "fart",
184
+ "OBS_SIZE": 128,
185
+ "MODEL_SEED": 2,
186
+ },
187
+ {
188
+ "ENV_NAME": "NavigatorEasy",
189
+ "PARTIAL": True,
190
+ "MEMORY_TYPE": "fart",
191
+ "OBS_SIZE": 128,
192
+ "MODEL_SEED": 1,
193
+ },
194
+ {
195
+ "ENV_NAME": "NoisyCartPoleEasy",
196
+ "PARTIAL": False,
197
+ "MEMORY_TYPE": "fart",
198
+ "OBS_SIZE": 128,
199
+ "MODEL_SEED": 4,
200
+ },
201
+ {
202
+ "ENV_NAME": "NoisyCartPoleEasy",
203
+ "PARTIAL": True,
204
+ "MEMORY_TYPE": "fart",
205
+ "OBS_SIZE": 128,
206
+ "MODEL_SEED": 0,
207
+ },
208
+ # lru models
209
+ {
210
+ "ENV_NAME": "AutoEncodeEasy",
211
+ "PARTIAL": False,
212
+ "MEMORY_TYPE": "lru",
213
+ "OBS_SIZE": 128,
214
+ "MODEL_SEED": 0,
215
+ },
216
+ {
217
+ "ENV_NAME": "AutoEncodeEasy",
218
+ "PARTIAL": True,
219
+ "MEMORY_TYPE": "lru",
220
+ "OBS_SIZE": 128,
221
+ "MODEL_SEED": 0,
222
+ },
223
+ {
224
+ "ENV_NAME": "BattleShipEasy",
225
+ "PARTIAL": False,
226
+ "MEMORY_TYPE": "lru",
227
+ "OBS_SIZE": 128,
228
+ "MODEL_SEED": 1,
229
+ },
230
+ {
231
+ "ENV_NAME": "BattleShipEasy",
232
+ "PARTIAL": True,
233
+ "MEMORY_TYPE": "lru",
234
+ "OBS_SIZE": 128,
235
+ "MODEL_SEED": 2,
236
+ },
237
+ {
238
+ "ENV_NAME": "CartPoleEasy",
239
+ "PARTIAL": False,
240
+ "MEMORY_TYPE": "lru",
241
+ "OBS_SIZE": 128,
242
+ "MODEL_SEED": 3,
243
+ },
244
+ {
245
+ "ENV_NAME": "CartPoleEasy",
246
+ "PARTIAL": True,
247
+ "MEMORY_TYPE": "lru",
248
+ "OBS_SIZE": 128,
249
+ "MODEL_SEED": 3,
250
+ },
251
+ {
252
+ "ENV_NAME": "CountRecallEasy",
253
+ "PARTIAL": False,
254
+ "MEMORY_TYPE": "lru",
255
+ "OBS_SIZE": 128,
256
+ "MODEL_SEED": 0,
257
+ },
258
+ {
259
+ "ENV_NAME": "CountRecallEasy",
260
+ "PARTIAL": True,
261
+ "MEMORY_TYPE": "lru",
262
+ "OBS_SIZE": 128,
263
+ "MODEL_SEED": 1,
264
+ },
265
+ {
266
+ "ENV_NAME": "MineSweeperEasy",
267
+ "PARTIAL": False,
268
+ "MEMORY_TYPE": "lru",
269
+ "OBS_SIZE": 128,
270
+ "MODEL_SEED": 4,
271
+ },
272
+ {
273
+ "ENV_NAME": "MineSweeperEasy",
274
+ "PARTIAL": True,
275
+ "MEMORY_TYPE": "lru",
276
+ "OBS_SIZE": 128,
277
+ "MODEL_SEED": 1,
278
+ },
279
+ {
280
+ "ENV_NAME": "NavigatorEasy",
281
+ "PARTIAL": False,
282
+ "MEMORY_TYPE": "lru",
283
+ "OBS_SIZE": 128,
284
+ "MODEL_SEED": 2,
285
+ },
286
+ {
287
+ "ENV_NAME": "NavigatorEasy",
288
+ "PARTIAL": True,
289
+ "MEMORY_TYPE": "lru",
290
+ "OBS_SIZE": 128,
291
+ "MODEL_SEED": 4,
292
+ },
293
+ {
294
+ "ENV_NAME": "NoisyCartPoleEasy",
295
+ "PARTIAL": False,
296
+ "MEMORY_TYPE": "lru",
297
+ "OBS_SIZE": 128,
298
+ "MODEL_SEED": 2,
299
+ },
300
+ {
301
+ "ENV_NAME": "NoisyCartPoleEasy",
302
+ "PARTIAL": True,
303
+ "MEMORY_TYPE": "lru",
304
+ "OBS_SIZE": 128,
305
+ "MODEL_SEED": 2,
306
+ },
307
+ # mingru models
308
+ {
309
+ "ENV_NAME": "AutoEncodeEasy",
310
+ "PARTIAL": False,
311
+ "MEMORY_TYPE": "mingru",
312
+ "OBS_SIZE": 128,
313
+ "MODEL_SEED": 1,
314
+ },
315
+ {
316
+ "ENV_NAME": "AutoEncodeEasy",
317
+ "PARTIAL": True,
318
+ "MEMORY_TYPE": "mingru",
319
+ "OBS_SIZE": 128,
320
+ "MODEL_SEED": 1,
321
+ },
322
+ {
323
+ "ENV_NAME": "BattleShipEasy",
324
+ "PARTIAL": False,
325
+ "MEMORY_TYPE": "mingru",
326
+ "OBS_SIZE": 128,
327
+ "MODEL_SEED": 2,
328
+ },
329
+ {
330
+ "ENV_NAME": "BattleShipEasy",
331
+ "PARTIAL": True,
332
+ "MEMORY_TYPE": "mingru",
333
+ "OBS_SIZE": 128,
334
+ "MODEL_SEED": 2,
335
+ },
336
+ {
337
+ "ENV_NAME": "CartPoleEasy",
338
+ "PARTIAL": False,
339
+ "MEMORY_TYPE": "mingru",
340
+ "OBS_SIZE": 128,
341
+ "MODEL_SEED": 4,
342
+ },
343
+ {
344
+ "ENV_NAME": "CartPoleEasy",
345
+ "PARTIAL": True,
346
+ "MEMORY_TYPE": "mingru",
347
+ "OBS_SIZE": 128,
348
+ "MODEL_SEED": 4,
349
+ },
350
+ {
351
+ "ENV_NAME": "CountRecallEasy",
352
+ "PARTIAL": False,
353
+ "MEMORY_TYPE": "mingru",
354
+ "OBS_SIZE": 128,
355
+ "MODEL_SEED": 2,
356
+ },
357
+ {
358
+ "ENV_NAME": "CountRecallEasy",
359
+ "PARTIAL": True,
360
+ "MEMORY_TYPE": "mingru",
361
+ "OBS_SIZE": 128,
362
+ "MODEL_SEED": 2,
363
+ },
364
+ {
365
+ "ENV_NAME": "MineSweeperEasy",
366
+ "PARTIAL": False,
367
+ "MEMORY_TYPE": "mingru",
368
+ "OBS_SIZE": 128,
369
+ "MODEL_SEED": 0,
370
+ },
371
+ {
372
+ "ENV_NAME": "MineSweeperEasy",
373
+ "PARTIAL": True,
374
+ "MEMORY_TYPE": "mingru",
375
+ "OBS_SIZE": 128,
376
+ "MODEL_SEED": 2,
377
+ },
378
+ {
379
+ "ENV_NAME": "NavigatorEasy",
380
+ "PARTIAL": False,
381
+ "MEMORY_TYPE": "mingru",
382
+ "OBS_SIZE": 128,
383
+ "MODEL_SEED": 0,
384
+ },
385
+ {
386
+ "ENV_NAME": "NavigatorEasy",
387
+ "PARTIAL": True,
388
+ "MEMORY_TYPE": "mingru",
389
+ "OBS_SIZE": 128,
390
+ "MODEL_SEED": 0,
391
+ },
392
+ {
393
+ "ENV_NAME": "NoisyCartPoleEasy",
394
+ "PARTIAL": False,
395
+ "MEMORY_TYPE": "mingru",
396
+ "OBS_SIZE": 128,
397
+ "MODEL_SEED": 3,
398
+ },
399
+ {
400
+ "ENV_NAME": "NoisyCartPoleEasy",
401
+ "PARTIAL": True,
402
+ "MEMORY_TYPE": "mingru",
403
+ "OBS_SIZE": 128,
404
+ "MODEL_SEED": 4,
405
+ },
406
+ ]
407
+
408
+ # Example usage
409
+ seeds = [0, 1, 2, 3, 4]
410
+
411
+ for config in configs:
412
+ print(
413
+ f"Analyzing {config['MEMORY_TYPE']} on {config['ENV_NAME']} (Partial={config['PARTIAL']}, Seed={config['MODEL_SEED']})"
414
+ )
415
+
416
+ results = analyze_model_saliency(
417
+ config=config, seeds=seeds, max_steps=200, visualize=True
418
+ )
419
+ print(f"Successfully analyzed: {results}")
plotting/noiseva.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Require pip install moviepy==1.0.3
2
+ import os
3
+ from typing import NamedTuple
4
+
5
+ import chex
6
+ import equinox as eqx
7
+ import jax
8
+ import jax.numpy as jnp
9
+ import matplotlib.pyplot as plt
10
+ import numpy as np
11
+ import optax
12
+ import seaborn as sns
13
+ from jax import lax
14
+
15
+ import popgym_arcade
16
+ import wandb
17
+ from popgym_arcade.baselines.model import QNetworkRNN, add_batch_dim
18
+ from popgym_arcade.baselines.pqn_rnn import debug_shape
19
+ from popgym_arcade.wrappers import LogWrapper
20
+
21
+ plt.rcParams["text.usetex"] = True
22
+ plt.rcParams["font.family"] = "sans-serif"
23
+ plt.rcParams["font.sans-serif"] = ["Arial"]
24
+
25
+ # This is the number of steps or frames to evaluate
26
+ STEPS = 101
27
+
28
+
29
+ def evaluate(model, config):
30
+ seed = jax.random.PRNGKey(11)
31
+ seed, _rng = jax.random.split(seed)
32
+ env, env_params = popgym_arcade.make(
33
+ config["ENV_NAME"], partial_obs=config["PARTIAL"], obs_size=config["OBS_SIZE"]
34
+ )
35
+ vmap_reset = lambda n_envs: lambda rng: jax.vmap(env.reset, in_axes=(0, None))(
36
+ jax.random.split(rng, n_envs), env_params
37
+ )
38
+ vmap_step = lambda n_envs: lambda rng, env_state, action: jax.vmap(
39
+ env.step, in_axes=(0, 0, 0, None)
40
+ )(jax.random.split(rng, n_envs), env_state, action, env_params)
41
+
42
+ def run_evaluation(rng):
43
+ # Reset environment
44
+ obs, state = vmap_reset(2)(rng)
45
+ init_done = jnp.zeros(2, dtype=bool)
46
+ init_action = jnp.zeros(2, dtype=int)
47
+ init_hs = model.initialize_carry(key=rng)
48
+ hs = add_batch_dim(init_hs, 2)
49
+
50
+ frame_shape = obs[0].shape
51
+ frames = jnp.zeros((STEPS, *frame_shape), dtype=jnp.float32)
52
+ # Store initial observation
53
+ frame = jnp.asarray(obs[0])
54
+ frame = (frame * 255).astype(jnp.float32)
55
+ frames = frames.at[0].set(frame)
56
+ normal_qvals = jnp.zeros((STEPS, 2, 5))
57
+ carry = (hs, obs, init_done, init_action, state, frames, rng)
58
+
59
+ def evaluate_step(carry, i):
60
+ hs, obs, done, action, state, frames, _rng = carry
61
+ _rng, rng_step = jax.random.split(_rng, 2)
62
+
63
+ obs_batch = obs[jnp.newaxis, :]
64
+ done_batch = done[jnp.newaxis, :]
65
+ action_batch = action[jnp.newaxis, :]
66
+ # jax.debug.print("hs shape: {}", debug_shape(hs)) # tuple (2, 512) (2,)
67
+ # jax.debug.print("obs_batch shape: {}", obs_batch.shape) # Shape (1, 2, 128, 128, 3)
68
+ # jax.debug.print("done_batch shape: {}", done_batch.shape) # Shape (1, 2)
69
+ # jax.debug.print("action_batch shape: {}", action_batch.shape) # Shape (1, 2)
70
+ hs, q_val = model(hs, obs_batch, done_batch, action_batch)
71
+ q_val = lax.stop_gradient(q_val)
72
+ q_val = q_val.squeeze(axis=0)
73
+ # jax.debug.print("q_val shape: {}", q_val.shape) # Shape (2, n_actions)
74
+
75
+ action = jnp.argmax(q_val, axis=-1)
76
+
77
+ obs, new_state, reward, done, info = vmap_step(2)(rng_step, state, action)
78
+ state = new_state
79
+
80
+ frame = jnp.asarray(obs[0])
81
+ frame = (frame * 255).astype(jnp.float32)
82
+ frames = frames.at[i + 1].set(frame)
83
+
84
+ carry = (hs, obs, done, action, state, frames, _rng)
85
+ return carry, q_val
86
+
87
+ def body_fun(i, val):
88
+ carry, normal_qvals = val
89
+ carry, q_val = evaluate_step(carry, i)
90
+ normal_qvals = normal_qvals.at[i].set(q_val)
91
+
92
+ return (carry, normal_qvals)
93
+
94
+ carry, normal_qvals = lax.fori_loop(0, STEPS, body_fun, (carry, normal_qvals))
95
+ _, _, _, _, _, frames, _rng = carry
96
+ return frames, _rng, normal_qvals
97
+
98
+ # imageio.mimsave('{}_{}_{}_Partial={}_SEED={}.gif'.format(config["TRAIN_TYPE"], config["MEMORY_TYPE"], config["ENV_NAME"], config["PARTIAL"], config["SEED"]), frames)
99
+ # wandb.log({"{}_{}_{}_model_Partial={}_SEED={}".format(config["TRAIN_TYPE"], config["MEMORY_TYPE"], config["ENV_NAME"], config["PARTIAL"], config["SEED"]): wandb.Video(frames, fps=4)})
100
+ wandb.init(
101
+ project=f'{config["PROJECT"]}',
102
+ name=f'{config["TRAIN_TYPE"]}_{config["MEMORY_TYPE"]}_{config["ENV_NAME"]}_Partial={config["PARTIAL"]}_SEED={config["SEED"]}',
103
+ )
104
+
105
+ # Rollout
106
+ noiseless_frames, _rng, normal_qvals = run_evaluation(
107
+ _rng
108
+ ) # Shape (STEPS, 128, 128, 3), normal_qvals shape (STEPS, 2, n_actions)
109
+
110
+ def add_noise(o, _rng):
111
+ noise = jax.random.normal(_rng, o.shape) * 1.0
112
+ return noise + o
113
+
114
+ def qvals_for_frames(frames, rng, init_hs):
115
+ hs = add_batch_dim(init_hs, 2)
116
+ init_done = jnp.zeros(2, dtype=bool)
117
+ init_action = jnp.zeros(2, dtype=int)
118
+ q_vals = jnp.zeros((STEPS, 2, 5))
119
+
120
+ def process_step(carry, frame):
121
+ hs, done, action = carry
122
+ obs = jnp.asarray(frame)
123
+ # jax.debug.print("frame shape: {}", obs.shape) # Shape (128, 128, 3)
124
+ obs = jnp.stack([obs, obs], axis=0) # Simulate 2 environments
125
+
126
+ obs_batch = obs[jnp.newaxis, :] # Shape (1, 2, 128, 128, 3)
127
+ done_batch = done[jnp.newaxis, :] # Shape (1, 2)
128
+ action_batch = action[jnp.newaxis, :] # Shape (1, 2)
129
+
130
+ hs, q_val = model(hs, obs_batch, done_batch, action_batch)
131
+ q_val = lax.stop_gradient(q_val)
132
+ q_val = q_val.squeeze(axis=0) # Shape (2, n_actions)
133
+ # jax.debug.print("=q_val shape: {}", q_val.shape) # Shape (2, n_actions)
134
+ carry = (hs, done, action)
135
+ return carry, q_val
136
+
137
+ def body_fun(i, val):
138
+ carry, q_vals = val
139
+ carry, q_val = process_step(carry, frames[i])
140
+ q_vals = q_vals.at[i].set(q_val)
141
+ return (carry, q_vals)
142
+
143
+ carry = (hs, init_done, init_action)
144
+ _, q_vals = lax.fori_loop(0, STEPS, body_fun, (carry, q_vals))
145
+ return q_vals # Shape (STEPS, 2, n_actions)
146
+
147
+ last_qs = []
148
+ noisy_frames = []
149
+ num_noise = STEPS - 1
150
+
151
+ for noise_idx in range(
152
+ 1, num_noise + 1
153
+ ): # This is how many trajectories we want to generate
154
+ init_hs = model.initialize_carry(key=_rng)
155
+ rng, sub_rng = jax.random.split(_rng)
156
+
157
+ frames = noiseless_frames.copy()
158
+
159
+ noisy_frame = add_noise(frames[noise_idx], sub_rng)
160
+ noisy_frame = (noisy_frame * 255).astype(jnp.float32) # Shape (128, 128, 3)
161
+ frames = frames.at[noise_idx].set(noisy_frame) # Shape (STEPS, 128, 128, 3)
162
+ noisy_frames.append(np.array(noisy_frame, dtype=np.uint8))
163
+
164
+ frames_np = np.array(frames, dtype=np.uint8) # Shape (STEPS, 128, 128, 3)
165
+ frames_np = frames_np.transpose((0, 3, 1, 2))
166
+
167
+ log_key = "{}_{}_{}_model_Partial={}_SEED={}_NoiseIdx={}".format(
168
+ config["TRAIN_TYPE"],
169
+ config["MEMORY_TYPE"],
170
+ config["ENV_NAME"],
171
+ config["PARTIAL"],
172
+ config["SEED"],
173
+ noise_idx,
174
+ )
175
+ wandb.log({log_key: wandb.Video(frames_np, fps=4)})
176
+
177
+ q_vals = qvals_for_frames(frames, sub_rng, init_hs)
178
+ q_vals = jnp.array(q_vals)
179
+ # print(f"{noise_idx}{q_vals}") # Shape (STEPS, 2, n_actions)
180
+ q_vals_np = np.array(q_vals) # shape (STEPS, 2, 5)
181
+ q_vals_plot = q_vals_np[:, 0, :] # shape (STEPS, 5)
182
+ last_qs.append(q_vals_plot[-1])
183
+
184
+ frames = np.arange(q_vals_plot.shape[0])
185
+ n_actions = q_vals_plot.shape[1]
186
+
187
+ _rng = rng
188
+
189
+ plt.rcParams["text.usetex"] = True
190
+ plt.rcParams["text.latex.preamble"] = (
191
+ r"\usepackage{amsmath} \usepackage{amssymb} \usepackage{amsfonts}"
192
+ )
193
+
194
+ def plot(noisy_frames, noiseless_frames, normal_qvals, last_qs):
195
+ sns.set()
196
+ fig, axes = plt.subplots(num_noise + 1, 4, figsize=(10, 3 * (num_noise + 1)))
197
+
198
+ normal_last_qvals = normal_qvals[:, 0, :][-1]
199
+ last_qs = np.array(last_qs) # shape (10, 5)
200
+ all_qvals = [normal_last_qvals] + [last_qs[idx] for idx in range(num_noise)]
201
+ ymin = min(q.min() for q in all_qvals)
202
+ ymax = max(q.max() for q in all_qvals) + 0.1
203
+ action_symbols = ["↑", "↓", "←", "→", "4"]
204
+
205
+ axes[0, 0].imshow(np.array(noiseless_frames[0], dtype=np.uint8))
206
+ axes[0, 0].set_title(r"$O_0$", fontsize=20)
207
+ axes[0, 1].imshow(np.array(noiseless_frames[1], dtype=np.uint8))
208
+ axes[0, 1].set_title(r"$O_1$", fontsize=20)
209
+ axes[0, 2].imshow(np.array(noiseless_frames[-1], dtype=np.uint8))
210
+ axes[0, 2].set_title(rf"$O_{{{num_noise}}}$", fontsize=20)
211
+ max_idx = np.argmax(normal_last_qvals)
212
+ colors = ["#BBBBBB"] * len(normal_last_qvals)
213
+ colors[max_idx] = "lightblue"
214
+
215
+ axes[0, 3].bar(
216
+ np.arange(normal_last_qvals.shape[0]),
217
+ normal_last_qvals,
218
+ color=colors,
219
+ edgecolor="black",
220
+ )
221
+ axes[0, 3].set_title(rf"$Q(s_{{{num_noise}}}, a_{{{num_noise}}})$", fontsize=20)
222
+ axes[0, 3].set_xticks(np.arange(len(normal_last_qvals)))
223
+
224
+ axes[0, 3].set_xticklabels(action_symbols, fontsize=10)
225
+ # axes[0, 3].set_ylim(ymin, ymax)
226
+ # axes[0, 3].set_yticks(np.arange(0.0, ymax + 0.05, 0.1))
227
+ # axes[0, 3].tick_params(axis='y', labelsize=20)
228
+ axes[0, 3].set_yticks([])
229
+ axes[0, 3].yaxis.set_visible(False)
230
+
231
+ for idx in range(num_noise):
232
+
233
+ qvals = last_qs[idx]
234
+ max_idx = np.argmax(qvals)
235
+ colors = ["#BBBBBB"] * len(qvals)
236
+ colors[max_idx] = "#FFB6C1"
237
+ axes[idx + 1, 0].imshow(np.array(noiseless_frames[0], dtype=np.uint8))
238
+ axes[idx + 1, 0].set_title(r"$O_0$", fontsize=20)
239
+ axes[idx + 1, 1].imshow(np.array(noisy_frames[idx], dtype=np.uint8))
240
+ axes[idx + 1, 1].set_title(rf"$O_{{{idx+1}}} + \epsilon$", fontsize=20)
241
+ axes[idx + 1, 2].imshow(np.array(noiseless_frames[-1], dtype=np.uint8))
242
+ axes[idx + 1, 2].set_title(rf"$O_{{{num_noise}}}$", fontsize=20)
243
+ axes[idx + 1, 3].bar(
244
+ np.arange(last_qs[idx].shape[0]),
245
+ last_qs[idx],
246
+ color=colors,
247
+ edgecolor="black",
248
+ )
249
+ axes[idx + 1, 3].set_title(
250
+ rf"$Q(s_{{{num_noise}}}, a_{{{num_noise}}})$", fontsize=20
251
+ )
252
+ # axes[idx , 3].set_ylim(ymin, ymax)
253
+ # axes[idx + 1, 3].set_yticks(np.arange(0.0, ymax + 0.05, 0.1))
254
+
255
+ # axes[idx + 1, 3].tick_params(axis='y', labelsize=20)
256
+ axes[idx + 1, 3].set_yticks([])
257
+ axes[idx + 1, 3].yaxis.set_visible(False)
258
+ axes[idx + 1, 3].set_xticks(np.arange(len(last_qs[idx])))
259
+
260
+ axes[idx + 1, 3].set_xticklabels(action_symbols, fontsize=10)
261
+
262
+ for row in axes:
263
+ for ax in row[:3]:
264
+ ax.axis("off")
265
+
266
+ plt.tight_layout()
267
+ plt.subplots_adjust(wspace=0.1)
268
+
269
+ for row in axes:
270
+ ax2 = row[2]
271
+ ax3 = row[3]
272
+
273
+ pos2 = ax2.get_position()
274
+ pos3 = ax3.get_position()
275
+
276
+ new_spacing = 0.04 # Adjust this value to increase/decrease the space between ax2 and ax3
277
+ new_ax3_x0 = pos2.x1 + new_spacing
278
+
279
+ ax3.set_position([new_ax3_x0, pos3.y0, pos3.width, pos3.height])
280
+
281
+ plt.savefig("summary.png", dpi=300, bbox_inches="tight")
282
+ plt.close()
283
+
284
+ def batch_plot(noisy_frames, noiseless_frames, normal_qvals, last_qs):
285
+ sns.set()
286
+ plt.rcParams["text.usetex"] = True
287
+ plt.rcParams["text.latex.preamble"] = (
288
+ r"\usepackage{amsmath} \usepackage{amssymb} \usepackage{amsfonts}"
289
+ )
290
+
291
+ BATCH_SIZE = 20 # Number of rows per batch
292
+ total_rows = num_noise + 1
293
+ num_batches = (total_rows + BATCH_SIZE - 1) // BATCH_SIZE
294
+
295
+ normal_last_qvals = normal_qvals[:, 0, :][-1]
296
+ last_qs = np.array(last_qs) # shape (num_noise, 5)
297
+ all_qvals = [normal_last_qvals] + [last_qs[idx] for idx in range(num_noise)]
298
+ ymin = min(q.min() for q in all_qvals)
299
+ ymax = max(q.max() for q in all_qvals) + 0.1
300
+ action_symbols = ["↑", "↓", "←", "→", "4"]
301
+
302
+ for batch_idx in range(num_batches):
303
+ start = batch_idx * BATCH_SIZE
304
+ end = min((batch_idx + 1) * BATCH_SIZE, total_rows)
305
+ batch_rows = end - start
306
+
307
+ fig, axes = plt.subplots(batch_rows, 4, figsize=(10, 3 * batch_rows))
308
+ if batch_rows == 1:
309
+ axes = axes[None, :] # Ensure axes is 2D
310
+
311
+ for row_idx in range(batch_rows):
312
+ idx = start + row_idx
313
+ if idx == 0:
314
+ # first row
315
+ axes[row_idx, 0].imshow(
316
+ np.array(noiseless_frames[0], dtype=np.uint8)
317
+ )
318
+ axes[row_idx, 0].set_title(r"$O_0$", fontsize=20)
319
+ axes[row_idx, 1].imshow(
320
+ np.array(noiseless_frames[1], dtype=np.uint8)
321
+ )
322
+ axes[row_idx, 1].set_title(r"$O_1$", fontsize=20)
323
+ axes[row_idx, 2].imshow(
324
+ np.array(noiseless_frames[-1], dtype=np.uint8)
325
+ )
326
+ axes[row_idx, 2].set_title(rf"$O_{{{num_noise}}}$", fontsize=20)
327
+ max_idx = np.argmax(normal_last_qvals)
328
+ colors = ["#BBBBBB"] * len(normal_last_qvals)
329
+ colors[max_idx] = "lightblue"
330
+ axes[row_idx, 3].bar(
331
+ np.arange(normal_last_qvals.shape[0]),
332
+ normal_last_qvals,
333
+ color=colors,
334
+ edgecolor="black",
335
+ )
336
+ axes[row_idx, 3].set_title(
337
+ rf"$Q(s_{{{num_noise}}}, a_{{{num_noise}}})$", fontsize=20
338
+ )
339
+ axes[row_idx, 3].set_xticks(np.arange(len(normal_last_qvals)))
340
+ axes[row_idx, 3].set_xticklabels(action_symbols, fontsize=10)
341
+ axes[row_idx, 3].set_yticks([])
342
+ axes[row_idx, 3].yaxis.set_visible(False)
343
+ else:
344
+ qvals = last_qs[idx - 1]
345
+ max_idx = np.argmax(qvals)
346
+ colors = ["#BBBBBB"] * len(qvals)
347
+ colors[max_idx] = "#FFB6C1"
348
+ axes[row_idx, 0].imshow(
349
+ np.array(noiseless_frames[0], dtype=np.uint8)
350
+ )
351
+ axes[row_idx, 0].set_title(r"$O_0$", fontsize=20)
352
+ axes[row_idx, 1].imshow(
353
+ np.array(noisy_frames[idx - 1], dtype=np.uint8)
354
+ )
355
+ axes[row_idx, 1].set_title(
356
+ rf"$O_{{{idx}}} + \epsilon$", fontsize=20
357
+ )
358
+ axes[row_idx, 2].imshow(
359
+ np.array(noiseless_frames[-1], dtype=np.uint8)
360
+ )
361
+ axes[row_idx, 2].set_title(rf"$O_{{{num_noise}}}$", fontsize=20)
362
+ axes[row_idx, 3].bar(
363
+ np.arange(qvals.shape[0]),
364
+ qvals,
365
+ color=colors,
366
+ edgecolor="black",
367
+ )
368
+ axes[row_idx, 3].set_title(
369
+ rf"$Q(s_{{{num_noise}}}, a_{{{num_noise}}})$", fontsize=20
370
+ )
371
+ axes[row_idx, 3].set_yticks([])
372
+ axes[row_idx, 3].yaxis.set_visible(False)
373
+ axes[row_idx, 3].set_xticks(np.arange(len(qvals)))
374
+ axes[row_idx, 3].set_xticklabels(action_symbols, fontsize=10)
375
+
376
+ for ax in axes[row_idx, :3]:
377
+ ax.axis("off")
378
+
379
+ plt.tight_layout()
380
+ plt.subplots_adjust(wspace=0.1)
381
+
382
+ for row in axes:
383
+ ax2 = row[2]
384
+ ax3 = row[3]
385
+ pos2 = ax2.get_position()
386
+ pos3 = ax3.get_position()
387
+ new_spacing = 0.04
388
+ new_ax3_x0 = pos2.x1 + new_spacing
389
+ ax3.set_position([new_ax3_x0, pos3.y0, pos3.width, pos3.height])
390
+
391
+ plt.savefig(f"summary_batch_{batch_idx}.png", dpi=300, bbox_inches="tight")
392
+ plt.close()
393
+
394
+ # plot(noisy_frames, noiseless_frames, normal_qvals, last_qs)
395
+ batch_plot(noisy_frames, noiseless_frames, normal_qvals, last_qs)
396
+
397
+
398
+ os.environ["WANDB_MODE"] = "disabled"
399
+ MEMORY_TYPES = {"lru"}
400
+ # , "mingru", "fart"
401
+ ENV_NAMES = {
402
+ # "AutoEncodeEasy",
403
+ # "BattleShipEasy",
404
+ "CartPoleEasy",
405
+ # "NoisyCartPoleEasy",
406
+ # "CountRecallEasy",
407
+ # "MineSweeperEasy",
408
+ # "NavigatorEasy",
409
+ }
410
+ PATH = "./pkls_gradients/"
411
+ for filename in os.listdir(PATH):
412
+ if filename.startswith("PQN_RNN_"):
413
+ parts = filename.split("_")
414
+ train_type = "_".join(parts[:2]) # "PQN_RNN"
415
+ memory_type = parts[2].lower()
416
+ env_name = parts[3]
417
+ partial_part = parts[5]
418
+ seed_part = parts[6]
419
+ else:
420
+ continue
421
+
422
+ # Extract Partial and SEED values
423
+ partial = partial_part.split("=")[1]
424
+ seed = seed_part.split("=")[1].replace(".pkl", "")
425
+ # Check if this file matches our criteria
426
+ if (
427
+ train_type == "PQN_RNN"
428
+ and partial.lower() == "false"
429
+ and memory_type in MEMORY_TYPES
430
+ and env_name in ENV_NAMES
431
+ ):
432
+
433
+ # Create config
434
+ config = {
435
+ "ENV_NAME": env_name,
436
+ "OBS_SIZE": 128,
437
+ "MEMORY_TYPE": memory_type,
438
+ "PARTIAL": False,
439
+ "TRAIN_TYPE": train_type,
440
+ "SEED": int(seed),
441
+ "PROJECT": "noiseva",
442
+ }
443
+ print(f"Evaluating {filename} with config: {config}")
444
+
445
+ rng = jax.random.PRNGKey(config["SEED"])
446
+ rng, _rng = jax.random.split(rng)
447
+ network = QNetworkRNN(_rng, config["OBS_SIZE"], config["MEMORY_TYPE"])
448
+ model = eqx.tree_deserialise_leaves(PATH + filename, network)
449
+ evaluate(model, config)
plotting/plot_fps.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import pandas as pd
3
+ import seaborn as sns
4
+
5
+ # import sys; sys.path.extend(['/home/ubuntu-user/popjym-main/'])
6
+
7
+ alldata = pd.read_csv(
8
+ "F:/Desktop/ML/Popjym/popjym/popjym/popjym/plotting/fps_data/all.csv"
9
+ )
10
+ popgymarcadedata = pd.read_csv(
11
+ "F:/Desktop/ML/Popjym/popjym/popjym/popjym/plotting/fps_data/True_popgymarcadefpsdata.csv"
12
+ )
13
+
14
+ sns.set()
15
+ # sns.color_palette("Paired")
16
+
17
+ # plt.figure(figsize=(12, 8))
18
+ fig, axes = plt.subplots(1, 2, figsize=(24, 8), sharex=True, sharey=True)
19
+
20
+ sns.lineplot(
21
+ data=popgymarcadedata,
22
+ x="Num Envs",
23
+ y="FPS",
24
+ hue="Environment",
25
+ marker="o",
26
+ markersize=25,
27
+ ax=axes[0],
28
+ # errorbar=('ci', 95)
29
+ )
30
+ axes[0].set_xscale("log", base=2)
31
+ axes[0].set_yscale("log", base=10)
32
+ axes[0].tick_params(axis="both", which="major", labelsize=35)
33
+ axes[0].legend(title="", title_fontsize="20", fontsize="22", ncol=3)
34
+ axes[0].set_xlabel("")
35
+ axes[0].set_ylabel("")
36
+ sns.lineplot(
37
+ data=alldata,
38
+ x="Num Envs",
39
+ y="FPS",
40
+ hue="Environment",
41
+ marker="o",
42
+ markersize=25,
43
+ # errorbar=('ci', 95)
44
+ ax=axes[1],
45
+ )
46
+
47
+ axes[1].set_xscale("log", base=2)
48
+ axes[1].set_yscale("log", base=2)
49
+ axes[1].tick_params(axis="both", which="major", labelsize=35)
50
+ axes[1].legend(title="", title_fontsize="20", fontsize="22", ncol=3)
51
+ axes[1].set_xlabel("")
52
+ axes[1].set_ylabel("")
53
+ fig.text(0.55, 0.04, "Number of Parallel Environments", ha="center", fontsize=35)
54
+ fig.text(0.04, 0.6, "Frames per second", va="center", rotation="vertical", fontsize=35)
55
+
56
+ # fig.subplots_adjust(wspace=0.3)
57
+
58
+ # fig.subplots_adjust(left=0.05, wspace=0.3)
59
+ plt.tight_layout(rect=[0.07, 0.07, 1, 1])
60
+ plt.show()
61
+
62
+ # plt.xlabel('Number of Parallel Environments', fontsize=50)
63
+ # plt.ylabel('Frames per second (FPS)', fontsize=50)
64
+ # # Optionally, adjust tick label sizes
65
+ # plt.tick_params(axis='both', which='major', labelsize=50)
66
+
67
+ # plt.xscale('log', base=2)
68
+ # plt.yscale('log', base=10)
69
+
70
+ # # Show the plot
71
+ # plt.legend(title='', title_fontsize='20', fontsize='20')
72
+ # plt.tight_layout()
73
+ # # plt.savefig('POMDP.png')
74
+ # plt.show()
plotting/plot_grads.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
plotting/plot_separate_returns_curve.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file is to plot the MDP and POMDP results separately.
3
+ """
4
+
5
+ import jax.numpy as jnp # Import JAX
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+ import pandas as pd
9
+ import seaborn as sns
10
+ from jax import lax # Import lax for cummax
11
+ from scipy.interpolate import interp1d
12
+
13
+ import wandb
14
+
15
+
16
+ def f(name):
17
+ WINDOW_SIZE = 100
18
+ SIGMA = 100
19
+ INTERP_POINTS = 1000
20
+ NORMALIZING_FACTOR = 200
21
+
22
+ ENV_MAX_STEPS = {
23
+ "CountRecallEasy": 2e7,
24
+ "CountRecallMedium": 2e7,
25
+ "CountRecallHard": 2e7,
26
+ "BattleShipEasy": 2e7,
27
+ "BattleShipMedium": 2e7,
28
+ "BattleShipHard": 2e7,
29
+ # other environments with default max steps 1e7
30
+ }
31
+ AXIS_FONT = {"fontsize": 9, "labelpad": 8}
32
+ TICK_FONT = {"labelsize": 8}
33
+
34
+ api = wandb.Api()
35
+ runs = api.runs("bolt-um/Arcade-RLC")
36
+ filtered_runs = [run for run in runs if run.state == "finished"]
37
+ print(f"Total runs: {len(runs)}, Completed runs: {len(filtered_runs)}")
38
+
39
+ METRIC_MAPPING = {
40
+ "PQN": {"return_col": "returned_episode_returns", "time_col": "env_step"},
41
+ "PQN_RNN": {"return_col": "returned_episode_returns", "time_col": "env_step"},
42
+ "default": {"return_col": "episodic return", "time_col": "TOTAL_TIMESTEPS"},
43
+ }
44
+
45
+ def process_run(run):
46
+ """Process individual W&B run with dynamic max steps per environment"""
47
+ try:
48
+ config = {k: v for k, v in run.config.items() if not k.startswith("_")}
49
+ env_name = config.get("ENV_NAME", "UnknownEnv")
50
+ partial_status = str(config.get("PARTIAL", False))
51
+
52
+ if env_name in ENV_MAX_STEPS:
53
+ env_max_step = ENV_MAX_STEPS[env_name]
54
+ else:
55
+ env_max_step = 1e7
56
+
57
+ alg_name = config.get("ALG_NAME", "").upper()
58
+ memory_type = "MLP"
59
+ if alg_name == "PQN_RNN":
60
+ memory_type = config.get("MEMORY_TYPE", "Unknown").capitalize()
61
+
62
+ metric_map = METRIC_MAPPING.get(alg_name, METRIC_MAPPING["default"])
63
+ # history = run.scan_history(keys=[metric_map["return_col"], metric_map["time_col"]])
64
+ history = list(
65
+ run.scan_history(
66
+ keys=[metric_map["return_col"], metric_map["time_col"]]
67
+ )
68
+ )
69
+ history = pd.DataFrame(
70
+ history, columns=[metric_map["return_col"], metric_map["time_col"]]
71
+ )
72
+
73
+ history["true_steps"] = history[metric_map["time_col"]].clip(
74
+ upper=env_max_step
75
+ )
76
+ history = history.sort_values(metric_map["time_col"]).drop_duplicates(
77
+ subset=["true_steps"]
78
+ )
79
+
80
+ if len(history) < 2:
81
+ print(f"Skipping {run.name} due to insufficient data points")
82
+ return None
83
+
84
+ # Get first and last values for extrapolation
85
+ first_return = history[metric_map["return_col"]].iloc[0]
86
+ last_return = history[metric_map["return_col"]].iloc[-1]
87
+
88
+ # Create unified interpolation grid for this environment
89
+ unified_steps = np.linspace(0, env_max_step, INTERP_POINTS)
90
+ unified_steps = np.round(unified_steps, decimals=5)
91
+ scale_factor = NORMALIZING_FACTOR / env_max_step
92
+
93
+ # Interpolate returns to uniform grid
94
+ interp_func = interp1d(
95
+ history["true_steps"],
96
+ history[metric_map["return_col"]],
97
+ kind="linear",
98
+ bounds_error=False,
99
+ fill_value=(first_return, last_return),
100
+ )
101
+ interpolated_returns = interp_func(unified_steps)
102
+
103
+ smoothed_returns = (
104
+ pd.Series(interpolated_returns)
105
+ .ewm(span=100, adjust=False, min_periods=1)
106
+ .mean()
107
+ .values
108
+ )
109
+ # smoothed_returns = pd.Series(interpolated_returns).rolling(window=WINDOW_SIZE, min_periods=1).mean().values
110
+
111
+ # Compute cumulative maximum using JAX
112
+ cummax_returns = lax.cummax(jnp.array(smoothed_returns))
113
+
114
+ return pd.DataFrame(
115
+ {
116
+ "Algorithm": f"{alg_name} ({memory_type})",
117
+ "Return": interpolated_returns,
118
+ "Smoothed Return": smoothed_returns,
119
+ "Cummax Return": np.array(cummax_returns), # Convert back to NumPy
120
+ "True Steps": unified_steps,
121
+ "EnvName": env_name,
122
+ "Partial": partial_status,
123
+ "Seed": str(config.get("SEED", 0)),
124
+ "run_id": run.id,
125
+ "StepsNormalized": unified_steps * scale_factor,
126
+ "EnvMaxStep": env_max_step,
127
+ "ScaleFactor": scale_factor,
128
+ }
129
+ )
130
+
131
+ except Exception as e:
132
+ print(f"Error processing {run.name}: {str(e)}")
133
+ return None
134
+
135
+ # Process all runs and combine data
136
+ all_data = [df for run in filtered_runs if (df := process_run(run)) is not None]
137
+ if not all_data:
138
+ print("No valid data to process")
139
+ exit()
140
+ runs_df = pd.concat(all_data, ignore_index=True)
141
+
142
+ # Generate interpolation grid for each environment
143
+ interpolated_data = []
144
+ for (alg, env, partial), group in runs_df.groupby(
145
+ ["Algorithm", "EnvName", "Partial"]
146
+ ):
147
+ all_steps = group["True Steps"].unique()
148
+ if len(all_steps) != INTERP_POINTS:
149
+ print(
150
+ f"Alignment error in {alg}-{env}-{partial}: {len(all_steps)} vs {INTERP_POINTS}"
151
+ )
152
+ continue
153
+
154
+ pivot_df = group.pivot_table(
155
+ index="True Steps",
156
+ columns="run_id",
157
+ values="Smoothed Return",
158
+ aggfunc="first",
159
+ )
160
+
161
+ # Calculate cumulative maximum for each run
162
+ cummax_df = group.pivot_table(
163
+ index="True Steps",
164
+ columns="run_id",
165
+ values="Cummax Return",
166
+ aggfunc="first",
167
+ )
168
+
169
+ stats_df = pd.DataFrame(
170
+ {
171
+ "Steps": pivot_df.index,
172
+ "Mean": pivot_df.mean(axis=1),
173
+ "Cummax Mean": cummax_df.mean(axis=1), # Mean of cumulative max
174
+ "Std": pivot_df.std(axis=1),
175
+ "Count": pivot_df.count(axis=1),
176
+ }
177
+ )
178
+ stats_df["Lower"] = stats_df["Mean"] - stats_df["Std"]
179
+ stats_df["Upper"] = stats_df["Mean"] + stats_df["Std"]
180
+
181
+ stats_df["Lower"] = np.array(lax.cummax(jnp.array(stats_df["Lower"])))
182
+ stats_df["Upper"] = np.array(lax.cummax(jnp.array(stats_df["Upper"])))
183
+
184
+ stats_df["StepsNormalized"] = stats_df["Steps"] * (
185
+ NORMALIZING_FACTOR / group["EnvMaxStep"].iloc[0]
186
+ )
187
+
188
+ interpolated_data.append(
189
+ pd.DataFrame(
190
+ {
191
+ "Algorithm": alg,
192
+ "EnvName": env,
193
+ "Partial": partial,
194
+ "Steps": stats_df["Steps"],
195
+ "Smoothed": stats_df["Mean"],
196
+ "Cummax": stats_df["Cummax Mean"], # Add cumulative max
197
+ "Lower": stats_df["Lower"],
198
+ "Upper": stats_df["Upper"],
199
+ "StepsNormalized": stats_df["StepsNormalized"],
200
+ "EnvMaxStep": group["EnvMaxStep"].iloc[0],
201
+ }
202
+ )
203
+ )
204
+
205
+ final_df = pd.concat(interpolated_data, ignore_index=True)
206
+
207
+ def plot_comparative_curves(df, name):
208
+ """Plot comparative curves for all environments"""
209
+ algorithms = df["Algorithm"].unique().tolist()
210
+ palette = sns.husl_palette(n_colors=len(algorithms), s=0.7)
211
+
212
+ # plt.style.use('seaborn-v0_8')
213
+ sns.set()
214
+ envs = df[["EnvName", "Partial", "EnvMaxStep"]].drop_duplicates()
215
+ envs["BaseName"] = envs["EnvName"].apply(
216
+ lambda x: x.replace("Easy", "").replace("Medium", "").replace("Hard", "")
217
+ )
218
+ envs["Difficulty"] = envs["EnvName"].apply(
219
+ lambda x: (
220
+ 0 if "Easy" in x else 1 if "Medium" in x else 2 if "Hard" in x else 3
221
+ )
222
+ )
223
+ envs = envs.sort_values(by=["BaseName", "Difficulty"])
224
+
225
+ n_plots = len(envs)
226
+ n_cols = min(3, n_plots)
227
+ n_rows = int(np.ceil(n_plots / n_cols))
228
+
229
+ fig = plt.figure(figsize=(12, n_rows * 4.5))
230
+ gs = fig.add_gridspec(
231
+ n_rows, n_cols, hspace=0.35, wspace=0.25, bottom=0.12, top=0.92
232
+ )
233
+
234
+ for idx, (_, row) in enumerate(envs.iterrows()):
235
+ ax = fig.add_subplot(gs[idx // n_cols, idx % n_cols])
236
+ env, partial, max_step = row[["EnvName", "Partial", "EnvMaxStep"]]
237
+ env_data = df[(df.EnvName == env) & (df.Partial == partial)]
238
+
239
+ ax.set_xlim(0, 200)
240
+
241
+ env_data_filtered = env_data[np.isfinite(env_data["Smoothed"])]
242
+
243
+ y_min = env_data_filtered["Smoothed"].min()
244
+ y_max = env_data_filtered["Smoothed"].max()
245
+
246
+ ax.set_ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
247
+
248
+ ax.text(
249
+ 1.0,
250
+ -0.1,
251
+ f"{max_step:.0e}".replace("+", "").replace("0", ""),
252
+ transform=ax.transAxes,
253
+ ha="right",
254
+ va="top",
255
+ fontsize=8,
256
+ color="#666666",
257
+ bbox=dict(facecolor="white", alpha=0.8, edgecolor="none", pad=2),
258
+ )
259
+
260
+ for alg, color in zip(algorithms, palette):
261
+ alg_data = env_data[env_data.Algorithm == alg]
262
+ if not alg_data.empty:
263
+ # Plot smoothed returns
264
+ # ax.plot(
265
+ # alg_data['StepsNormalized'],
266
+ # alg_data['Smoothed'],
267
+ # color=color,
268
+ # linewidth=2.5,
269
+ # alpha=0.9,
270
+ # label=alg,
271
+ # solid_capstyle='round',
272
+ # zorder=5
273
+ # )
274
+
275
+ # Plot cumulative maximum
276
+ ax.plot(
277
+ alg_data["StepsNormalized"],
278
+ alg_data["Cummax"],
279
+ color=color,
280
+ linewidth=2.5,
281
+ alpha=0.9,
282
+ label=alg,
283
+ solid_capstyle="round",
284
+ zorder=5,
285
+ )
286
+
287
+ ax.fill_between(
288
+ alg_data["StepsNormalized"],
289
+ alg_data["Lower"],
290
+ alg_data["Upper"],
291
+ color=color,
292
+ alpha=0.2,
293
+ linewidth=0,
294
+ edgecolor=None,
295
+ zorder=2,
296
+ )
297
+
298
+ ax.xaxis.set_major_locator(plt.MultipleLocator(50))
299
+ ax.yaxis.set_major_locator(plt.AutoLocator())
300
+ ax.set_ylabel("Episodic Return", **AXIS_FONT)
301
+ ax.tick_params(**TICK_FONT)
302
+
303
+ ax.set_title(
304
+ f"{env} ({'Partial' if partial=='True' else 'Full'})",
305
+ fontsize=10,
306
+ pad=12,
307
+ fontweight="semibold",
308
+ )
309
+
310
+ ax.grid(True, alpha=0.8, linestyle="-", linewidth=0.8)
311
+
312
+ handles, labels = ax.get_legend_handles_labels()
313
+ fig.legend(
314
+ handles,
315
+ labels,
316
+ loc="upper center",
317
+ ncol=min(4, len(algorithms)),
318
+ bbox_to_anchor=(0.5, 1.02),
319
+ frameon=True,
320
+ framealpha=0.95,
321
+ edgecolor="#DDDDDD",
322
+ title="Algorithm Types",
323
+ title_fontsize=9,
324
+ fontsize=8,
325
+ )
326
+
327
+ plt.savefig(
328
+ "{}.pdf".format(name), dpi=300, bbox_inches="tight", facecolor="white"
329
+ )
330
+ plt.close()
331
+
332
+ plot_comparative_curves(final_df, name)
333
+
334
+
335
+ for i in range(1):
336
+ f(f"plot_{i}")
plotting/plot_twins_returns_curve.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file to plot the partial and full curves for all algorithms in the same plot for all environments.
3
+ """
4
+
5
+ import jax.numpy as jnp
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+ import pandas as pd
9
+ import seaborn as sns
10
+ from jax import lax
11
+ from scipy.interpolate import interp1d
12
+
13
+ import wandb
14
+
15
+
16
+ def f(name):
17
+ WINDOW_SIZE = 100
18
+ SIGMA = 100
19
+ INTERP_POINTS = 1000
20
+ NORMALIZING_FACTOR = 200
21
+
22
+ ENV_MAX_STEPS = {
23
+ "CountRecallEasy": 2e7,
24
+ "CountRecallMedium": 2e7,
25
+ "CountRecallHard": 2e7,
26
+ "BattleShipEasy": 2e7,
27
+ "BattleShipMedium": 2e7,
28
+ "BattleShipHard": 2e7,
29
+ # other environments with default max steps 1e7
30
+ }
31
+ AXIS_FONT = {"fontsize": 9, "labelpad": 8}
32
+ TICK_FONT = {"labelsize": 8}
33
+
34
+ api = wandb.Api()
35
+ runs = api.runs("bolt-um/Arcade-RLC")
36
+ filtered_runs = [run for run in runs if run.state == "finished"]
37
+ print(f"Total runs: {len(runs)}, Completed runs: {len(filtered_runs)}")
38
+
39
+ METRIC_MAPPING = {
40
+ "PQN": {"return_col": "returned_episode_returns", "time_col": "env_step"},
41
+ "PQN_RNN": {"return_col": "returned_episode_returns", "time_col": "env_step"},
42
+ "default": {"return_col": "episodic return", "time_col": "TOTAL_TIMESTEPS"},
43
+ }
44
+
45
+ def process_run(run):
46
+ """Process individual W&B run with dynamic max steps per environment"""
47
+ try:
48
+ config = {k: v for k, v in run.config.items() if not k.startswith("_")}
49
+ env_name = config.get("ENV_NAME", "UnknownEnv")
50
+ partial_status = str(config.get("PARTIAL", False))
51
+
52
+ if env_name in ENV_MAX_STEPS:
53
+ env_max_step = ENV_MAX_STEPS[env_name]
54
+ else:
55
+ env_max_step = 1e7
56
+
57
+ alg_name = config.get("ALG_NAME", "").upper()
58
+ memory_type = "MLP"
59
+ if alg_name == "PQN_RNN":
60
+ memory_type = config.get("MEMORY_TYPE", "Unknown").capitalize()
61
+
62
+ metric_map = METRIC_MAPPING.get(alg_name, METRIC_MAPPING["default"])
63
+ # history = run.history(keys=[metric_map["return_col"], metric_map["time_col"]])
64
+ history = list(
65
+ run.scan_history(
66
+ keys=[metric_map["return_col"], metric_map["time_col"]]
67
+ )
68
+ )
69
+ history = pd.DataFrame(
70
+ history, columns=[metric_map["return_col"], metric_map["time_col"]]
71
+ )
72
+
73
+ history["true_steps"] = history[metric_map["time_col"]].clip(
74
+ upper=env_max_step
75
+ )
76
+ history = history.sort_values(metric_map["time_col"]).drop_duplicates(
77
+ subset=["true_steps"]
78
+ )
79
+
80
+ if len(history) < 2:
81
+ print(f"Skipping {run.name} due to insufficient data points")
82
+ return None
83
+
84
+ # Get first and last values for extrapolation
85
+ first_return = history[metric_map["return_col"]].iloc[0]
86
+ last_return = history[metric_map["return_col"]].iloc[-1]
87
+
88
+ # Create unified interpolation grid for this environment
89
+ unified_steps = np.linspace(0, env_max_step, INTERP_POINTS)
90
+ unified_steps = np.round(unified_steps, decimals=5)
91
+ scale_factor = NORMALIZING_FACTOR / env_max_step
92
+
93
+ # Interpolate returns to uniform grid
94
+ interp_func = interp1d(
95
+ history["true_steps"],
96
+ history[metric_map["return_col"]],
97
+ kind="linear",
98
+ bounds_error=False,
99
+ fill_value=(first_return, last_return),
100
+ )
101
+ interpolated_returns = interp_func(unified_steps)
102
+
103
+ smoothed_returns = (
104
+ pd.Series(interpolated_returns)
105
+ .ewm(span=100, adjust=False, min_periods=1)
106
+ .mean()
107
+ .values
108
+ )
109
+
110
+ # Compute cumulative maximum using JAX
111
+ cummax_returns = lax.cummax(jnp.array(smoothed_returns))
112
+
113
+ return pd.DataFrame(
114
+ {
115
+ "Algorithm": f"{alg_name} ({memory_type})",
116
+ "Return": interpolated_returns,
117
+ "Smoothed Return": smoothed_returns,
118
+ "Cummax Return": np.array(cummax_returns), # Convert back to NumPy
119
+ "True Steps": unified_steps,
120
+ "EnvName": env_name,
121
+ "Partial": partial_status,
122
+ "Seed": str(config.get("SEED", 0)),
123
+ "run_id": run.id,
124
+ "StepsNormalized": unified_steps * scale_factor,
125
+ "EnvMaxStep": env_max_step,
126
+ "ScaleFactor": scale_factor,
127
+ }
128
+ )
129
+
130
+ except Exception as e:
131
+ print(f"Error processing {run.name}: {str(e)}")
132
+ return None
133
+
134
+ # Process all runs and combine data
135
+ # all_data = [df for run in filtered_runs if (df := process_run(run)) is not None]
136
+
137
+ # if not all_data:
138
+ # print("No valid data to process")
139
+ # exit()
140
+ # runs_df = pd.concat(all_data, ignore_index=True)
141
+ # runs_df.to_pickle("newdata.pkl")
142
+
143
+ runs_df = pd.read_pickle("newdata.pkl")
144
+
145
+ # Generate interpolation grid for each environment
146
+ interpolated_data = []
147
+ for (alg, env, partial), group in runs_df.groupby(
148
+ ["Algorithm", "EnvName", "Partial"]
149
+ ):
150
+ all_steps = group["True Steps"].unique()
151
+ if len(all_steps) != INTERP_POINTS:
152
+ print(
153
+ f"Alignment error in {alg}-{env}: {len(all_steps)} vs {INTERP_POINTS}"
154
+ )
155
+ continue
156
+
157
+ pivot_df = group.pivot_table(
158
+ index="True Steps",
159
+ columns=["run_id", "Partial"],
160
+ values="Smoothed Return",
161
+ aggfunc="first",
162
+ )
163
+
164
+ # Calculate cumulative maximum for each run
165
+ cummax_df = group.pivot_table(
166
+ index="True Steps",
167
+ columns=["run_id", "Partial"],
168
+ values="Cummax Return",
169
+ aggfunc="first",
170
+ )
171
+
172
+ # Compute mean and std for smoothed returns
173
+ stats_df = pd.DataFrame(
174
+ {
175
+ "Steps": pivot_df.index,
176
+ "Mean": pivot_df.mean(axis=1),
177
+ "Cummax Mean": cummax_df.mean(axis=1), # Mean of cumulative max
178
+ "Std": pivot_df.std(axis=1),
179
+ "Count": pivot_df.count(axis=1),
180
+ }
181
+ )
182
+
183
+ # Compute lower and upper bounds for confidence interval
184
+ stats_df["Lower"] = stats_df["Mean"] - stats_df["Std"]
185
+ stats_df["Upper"] = stats_df["Mean"] + stats_df["Std"]
186
+
187
+ # Apply cummax to lower and upper bounds
188
+ stats_df["Lower"] = np.array(lax.cummax(jnp.array(stats_df["Lower"])))
189
+ stats_df["Upper"] = np.array(lax.cummax(jnp.array(stats_df["Upper"])))
190
+
191
+ # stats_df['StepsNormalized'] = stats_df['Steps'] * (NORMALIZING_FACTOR / group['EnvMaxStep'].iloc[0])
192
+ stats_df["StepsNormalized"] = stats_df["Steps"] / group["EnvMaxStep"].iloc[0]
193
+
194
+ interpolated_data.append(
195
+ pd.DataFrame(
196
+ {
197
+ "Algorithm": alg,
198
+ "EnvName": env,
199
+ "Partial": partial, # Include 'Partial' column
200
+ "Steps": stats_df["Steps"],
201
+ "Smoothed": stats_df["Mean"],
202
+ "Cummax": stats_df["Cummax Mean"], # Add cumulative max
203
+ "Lower": stats_df["Lower"], # Cumulative max applied to lower bound
204
+ "Upper": stats_df["Upper"], # Cumulative max applied to upper bound
205
+ "StepsNormalized": stats_df["StepsNormalized"],
206
+ "EnvMaxStep": group["EnvMaxStep"].iloc[0],
207
+ }
208
+ )
209
+ )
210
+
211
+ final_df = pd.concat(interpolated_data, ignore_index=True)
212
+ df_battleship = final_df[final_df["EnvName"].str.contains("BattleShip")].copy()
213
+
214
+ def plot_comparative_curves(df, name):
215
+ """Plot comparative curves for all environments"""
216
+ algorithms = df["Algorithm"].unique().tolist()
217
+ palette = sns.color_palette(
218
+ "husl", n_colors=len(algorithms)
219
+ ) # Use a standard palette for algorithms
220
+
221
+ # plt.style.use('seaborn-v0_8')
222
+
223
+ envs = df[["EnvName", "EnvMaxStep"]].drop_duplicates()
224
+ envs["BaseName"] = envs["EnvName"].apply(
225
+ lambda x: x.replace("Easy", "").replace("Medium", "").replace("Hard", "")
226
+ )
227
+ envs["Difficulty"] = envs["EnvName"].apply(
228
+ lambda x: (
229
+ 0 if "Easy" in x else 1 if "Medium" in x else 2 if "Hard" in x else 3
230
+ )
231
+ )
232
+ envs = envs.sort_values(by=["BaseName", "Difficulty"]).head(2)
233
+ sns.set()
234
+ n_plots = len(envs)
235
+ n_cols = min(3, n_plots)
236
+ n_rows = int(np.ceil(n_plots / n_cols))
237
+ fig, axes = plt.subplots(
238
+ n_rows, n_cols, figsize=(16, n_rows * 4.5), sharey=True
239
+ )
240
+ # fig = plt.figure(figsize=(12, n_rows*4.5))
241
+ # gs = fig.add_gridspec(n_rows, n_cols, hspace=0.35, wspace=0.25,
242
+ # bottom=0.12, top=0.92)
243
+ if n_rows * n_cols == 1:
244
+ axes = [axes]
245
+ else:
246
+ axes = axes.flatten()
247
+ # axes[0].set_xticks([0.5, 1.0])
248
+ # axes[1].set_xticks([0.0, 0.5, 1.0])
249
+ for idx, (_, row) in enumerate(envs.iterrows()):
250
+ # ax = fig.add_subplot(gs[idx//n_cols, idx%n_cols])
251
+ ax = axes[idx]
252
+ env, max_step = row[["EnvName", "EnvMaxStep"]]
253
+ env_data = df[df.EnvName == env]
254
+ # ax.set_xlim(0, 200)
255
+ ax.set_xlim(0.0, 1.0)
256
+ env_data_filtered = env_data[np.isfinite(env_data["Smoothed"])]
257
+
258
+ y_min = env_data_filtered["Smoothed"].min()
259
+ y_max = env_data_filtered["Smoothed"].max()
260
+
261
+ # ax.set_ylim(y_min - 0.05*(y_max-y_min),
262
+ # y_max + 0.05*(y_max-y_min))
263
+ ax.set_ylim(0.0, 1.0)
264
+ ax.set_xticks([0.0, 0.5, 1.0])
265
+ ax.set_yticks([0.0, 0.5, 1.0])
266
+ ax.tick_params(axis="x", pad=5)
267
+ ax.tick_params(axis="y", pad=15)
268
+ ax.text(
269
+ 1.07,
270
+ -0.15,
271
+ f"{max_step:.0e}".replace("+", "").replace("0", ""),
272
+ transform=ax.transAxes,
273
+ ha="right",
274
+ va="top",
275
+ fontsize=35,
276
+ color="#666666",
277
+ bbox=dict(facecolor="white", alpha=0.8, edgecolor="none", pad=2),
278
+ )
279
+
280
+ for alg_idx, alg in enumerate(algorithms):
281
+ for partial_status in [
282
+ "True",
283
+ "False",
284
+ ]: # Plot both Partial and Full statuses
285
+ alg_data = env_data[
286
+ (env_data.Algorithm == alg)
287
+ & (env_data.Partial == partial_status)
288
+ ]
289
+ if not alg_data.empty:
290
+ # Use distinct colors for partial and full
291
+ if partial_status == "True":
292
+ color = palette[alg_idx]
293
+ else:
294
+ color = sns.desaturate(palette[alg_idx], 0.5)
295
+
296
+ line_style = (
297
+ "--" if partial_status == "True" else "-"
298
+ ) # Use dashed lines for Partial
299
+ ax.plot(
300
+ alg_data["StepsNormalized"],
301
+ alg_data["Cummax"],
302
+ color=color,
303
+ linewidth=2.5,
304
+ alpha=0.9,
305
+ label=f"{alg} ({'Partial' if partial_status == 'True' else 'Full'})",
306
+ linestyle=line_style,
307
+ solid_capstyle="round",
308
+ zorder=5,
309
+ )
310
+
311
+ ax.fill_between(
312
+ alg_data["StepsNormalized"],
313
+ alg_data["Lower"],
314
+ alg_data["Upper"],
315
+ color=color,
316
+ alpha=0.2,
317
+ linewidth=0,
318
+ edgecolor=None,
319
+ zorder=2,
320
+ )
321
+
322
+ ax.set_xlabel("Env Steps", fontsize=35, labelpad=8)
323
+ # ax.set_ylabel("Episodic Return", fontsize=20)
324
+ ax.tick_params(labelsize=30)
325
+ ax.set_title(f"{env}", fontsize=35, pad=12, fontweight="semibold")
326
+
327
+ ax.grid(True, alpha=0.8, linestyle="-", linewidth=0.8)
328
+ fig.text(
329
+ 0.04, 0.5, "Episodic Return", va="center", rotation="vertical", fontsize=35
330
+ )
331
+ # handles, labels = ax.get_legend_handles_labels()
332
+ # fig.legend(handles, labels,
333
+ # loc='upper center',
334
+ # ncol=min(4, len(algorithms)),
335
+ # bbox_to_anchor=(0.5, 1.02),
336
+ # frameon=True,
337
+ # framealpha=0.95,
338
+ # edgecolor='#DDDDDD',
339
+ # title="Algorithm Types",
340
+ # title_fontsize=9,
341
+ # fontsize=8)
342
+
343
+ plt.savefig(
344
+ "{}.pdf".format(name), dpi=300, bbox_inches="tight", facecolor="white"
345
+ )
346
+ # plt.show()
347
+ plt.close()
348
+
349
+ # plot_comparative_curves(final_df, name)
350
+ def plot_env_curves(df, name):
351
+ """Plot separate curves per environment and save a separate legend PDF."""
352
+ algorithms = df["Algorithm"].unique().tolist()
353
+ palette = sns.color_palette("husl", n_colors=len(algorithms))
354
+ sns.set()
355
+ # Group environments (sorting by base name and difficulty)
356
+ envs = df[["EnvName", "EnvMaxStep"]].drop_duplicates()
357
+ envs["BaseName"] = envs["EnvName"].apply(
358
+ lambda x: x.replace("Easy", "").replace("Medium", "").replace("Hard", "")
359
+ )
360
+ envs["Difficulty"] = envs["EnvName"].apply(
361
+ lambda x: (
362
+ 0 if "Easy" in x else 1 if "Medium" in x else 2 if "Hard" in x else 3
363
+ )
364
+ )
365
+ envs = envs.sort_values(by=["BaseName", "Difficulty"])
366
+
367
+ # Prepare storage for legend handles
368
+ legend_handles = []
369
+ legend_labels = []
370
+
371
+ # Loop over each environment and create a separate figure/pdf
372
+ for idx, row in envs.iterrows():
373
+ env = row["EnvName"]
374
+ max_step = row["EnvMaxStep"]
375
+ fig, ax = plt.subplots(figsize=(6, 4))
376
+ # ax.set_xlim(0, 200)
377
+ ax.set_xlim(0, 1)
378
+
379
+ env_data = df[df.EnvName == env]
380
+ env_data_filtered = env_data[np.isfinite(env_data["Smoothed"])]
381
+ y_min = env_data_filtered["Smoothed"].min()
382
+ y_max = env_data_filtered["Smoothed"].max()
383
+ ax.set_ylim(y_min - 0.1 * (y_max - y_min), y_max + 0.1 * (y_max - y_min))
384
+ # ax.set_ylim(0.0, 1.0)
385
+ ax.text(
386
+ 1.05,
387
+ -0.15,
388
+ f"{max_step:.0e}".replace("+", "").replace("0", ""),
389
+ transform=ax.transAxes,
390
+ ha="right",
391
+ va="top",
392
+ fontsize=20,
393
+ color="#666666",
394
+ bbox=dict(facecolor="white", alpha=0.8, edgecolor="none", pad=2),
395
+ )
396
+
397
+ # Plot curves per algorithm and partial status
398
+ for alg_idx, alg in enumerate(algorithms):
399
+ for partial_status in ["True", "False"]:
400
+ alg_data = env_data[
401
+ (env_data.Algorithm == alg)
402
+ & (env_data.Partial == partial_status)
403
+ ]
404
+ if not alg_data.empty:
405
+ # Choose line color and style based on partial status
406
+ color = (
407
+ palette[alg_idx]
408
+ if partial_status == "True"
409
+ else sns.desaturate(palette[alg_idx], 0.5)
410
+ )
411
+ line_style = "--" if partial_status == "True" else "-"
412
+
413
+ (line,) = ax.plot(
414
+ alg_data["StepsNormalized"],
415
+ alg_data["Cummax"],
416
+ color=color,
417
+ linewidth=2.5,
418
+ alpha=0.9,
419
+ linestyle=line_style,
420
+ solid_capstyle="round",
421
+ zorder=5,
422
+ )
423
+
424
+ label = (
425
+ f"{alg} ({'Partial' if partial_status=='True' else 'Full'})"
426
+ )
427
+ # Add handle & label once (global legend)
428
+ if label not in legend_labels:
429
+ legend_handles.append(line)
430
+ legend_labels.append(label)
431
+
432
+ ax.fill_between(
433
+ alg_data["StepsNormalized"],
434
+ alg_data["Lower"],
435
+ alg_data["Upper"],
436
+ color=color,
437
+ alpha=0.2,
438
+ linewidth=0,
439
+ edgecolor=None,
440
+ zorder=2,
441
+ )
442
+
443
+ ax.set_xlabel("Env Steps", fontsize=20, labelpad=8)
444
+ ax.set_ylabel("Episodic Return", fontsize=20, labelpad=8)
445
+ ax.set_title(env, fontsize=20, pad=12, fontweight="semibold")
446
+ ax.tick_params(labelsize=20)
447
+ ax.grid(True, alpha=0.8, linestyle="-", linewidth=0.8)
448
+
449
+ # Save individual environment figure as its own pdf
450
+ fig.savefig(
451
+ f"{env}_{name}_curve.pdf",
452
+ dpi=300,
453
+ bbox_inches="tight",
454
+ facecolor="white",
455
+ )
456
+ plt.close(fig)
457
+
458
+ # Create separate figure just for the legend and save it
459
+ # fig_legend = plt.figure(figsize=(8, 1))
460
+ # legend = fig_legend.legend(
461
+ # legend_handles, legend_labels,
462
+ # loc='center',
463
+ # ncol=min(4, len(legend_handles)),
464
+ # frameon=True,
465
+ # framealpha=0.95,
466
+ # edgecolor='#ffffff',
467
+ # title="",
468
+ # title_fontsize=9,
469
+ # fontsize=20
470
+ # )
471
+ # # Remove axes for legend figure
472
+ # fig_legend.canvas.draw()
473
+ # fig_legend.savefig(f"legend.pdf", dpi=300, bbox_inches='tight', facecolor='white')
474
+ # plt.close(fig_legend)
475
+
476
+ # plot_env_curves(final_df, name)
477
+ plot_comparative_curves(df_battleship, name)
478
+
479
+
480
+ for i in range(1):
481
+ f(f"plot_PartialCompare{i}")
482
+ print(f"plot_PartialCompare{i} done")
plotting/plottable.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file is to plot the MDP and POMDP results separately.
3
+ """
4
+
5
+ import jax.numpy as jnp # Import JAX
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+ import pandas as pd
9
+ import seaborn as sns
10
+ from jax import lax # Import lax for cummax
11
+ from scipy.interpolate import interp1d
12
+
13
+ import wandb
14
+
15
+
16
+ def f(name):
17
+ WINDOW_SIZE = 100
18
+ SIGMA = 100
19
+ INTERP_POINTS = 1000
20
+ NORMALIZING_FACTOR = 200
21
+
22
+ ENV_MAX_STEPS = {
23
+ "CountRecallEasy": 2e7,
24
+ "CountRecallMedium": 2e7,
25
+ "CountRecallHard": 2e7,
26
+ "BattleShipEasy": 2e7,
27
+ "BattleShipMedium": 2e7,
28
+ "BattleShipHard": 2e7,
29
+ # other environments with default max steps 1e7
30
+ }
31
+ AXIS_FONT = {"fontsize": 9, "labelpad": 8}
32
+ TICK_FONT = {"labelsize": 8}
33
+
34
+ api = wandb.Api()
35
+ runs = api.runs("bolt-um/Arcade-RLC")
36
+ filtered_runs = [run for run in runs if run.state == "finished"]
37
+ print(f"Total runs: {len(runs)}, Completed runs: {len(filtered_runs)}")
38
+
39
+ METRIC_MAPPING = {
40
+ "PQN": {"return_col": "returned_episode_returns", "time_col": "env_step"},
41
+ "PQN_RNN": {"return_col": "returned_episode_returns", "time_col": "env_step"},
42
+ "default": {"return_col": "episodic return", "time_col": "TOTAL_TIMESTEPS"},
43
+ }
44
+
45
+ def process_run(run):
46
+ """Process individual W&B run with dynamic max steps per environment"""
47
+ try:
48
+ config = {k: v for k, v in run.config.items() if not k.startswith("_")}
49
+ env_name = config.get("ENV_NAME", "UnknownEnv")
50
+ partial_status = str(config.get("PARTIAL", False))
51
+
52
+ if env_name in ENV_MAX_STEPS:
53
+ env_max_step = ENV_MAX_STEPS[env_name]
54
+ else:
55
+ env_max_step = 1e7
56
+
57
+ alg_name = config.get("ALG_NAME", "").upper()
58
+ memory_type = "MLP"
59
+ if alg_name == "PQN_RNN":
60
+ memory_type = config.get("MEMORY_TYPE", "Unknown").capitalize()
61
+
62
+ metric_map = METRIC_MAPPING.get(alg_name, METRIC_MAPPING["default"])
63
+ # history = run.scan_history(keys=[metric_map["return_col"], metric_map["time_col"]])
64
+ history = list(
65
+ run.scan_history(
66
+ keys=[metric_map["return_col"], metric_map["time_col"]]
67
+ )
68
+ )
69
+ history = pd.DataFrame(
70
+ history, columns=[metric_map["return_col"], metric_map["time_col"]]
71
+ )
72
+
73
+ history["true_steps"] = history[metric_map["time_col"]].clip(
74
+ upper=env_max_step
75
+ )
76
+ history = history.sort_values(metric_map["time_col"]).drop_duplicates(
77
+ subset=["true_steps"]
78
+ )
79
+
80
+ if len(history) < 2:
81
+ print(f"Skipping {run.name} due to insufficient data points")
82
+ return None
83
+
84
+ # Get first and last values for extrapolation
85
+ first_return = history[metric_map["return_col"]].iloc[0]
86
+ last_return = history[metric_map["return_col"]].iloc[-1]
87
+
88
+ # Create unified interpolation grid for this environment
89
+ unified_steps = np.linspace(0, env_max_step, INTERP_POINTS)
90
+ unified_steps = np.round(unified_steps, decimals=5)
91
+ scale_factor = NORMALIZING_FACTOR / env_max_step
92
+
93
+ # Interpolate returns to uniform grid
94
+ interp_func = interp1d(
95
+ history["true_steps"],
96
+ history[metric_map["return_col"]],
97
+ kind="linear",
98
+ bounds_error=False,
99
+ fill_value=(first_return, last_return),
100
+ )
101
+ interpolated_returns = interp_func(unified_steps)
102
+
103
+ smoothed_returns = (
104
+ pd.Series(interpolated_returns)
105
+ .ewm(span=100, adjust=False, min_periods=1)
106
+ .mean()
107
+ .values
108
+ )
109
+ # smoothed_returns = pd.Series(interpolated_returns).rolling(window=WINDOW_SIZE, min_periods=1).mean().values
110
+
111
+ # Compute cumulative maximum using JAX
112
+ cummax_returns = lax.cummax(jnp.array(smoothed_returns))
113
+
114
+ return pd.DataFrame(
115
+ {
116
+ "Algorithm": f"{alg_name} ({memory_type})",
117
+ "Return": interpolated_returns,
118
+ "Smoothed Return": smoothed_returns,
119
+ "Cummax Return": np.array(cummax_returns), # Convert back to NumPy
120
+ "True Steps": unified_steps,
121
+ "EnvName": env_name,
122
+ "Partial": partial_status,
123
+ "Seed": str(config.get("SEED", 0)),
124
+ "run_id": run.id,
125
+ "StepsNormalized": unified_steps * scale_factor,
126
+ "EnvMaxStep": env_max_step,
127
+ "ScaleFactor": scale_factor,
128
+ }
129
+ )
130
+
131
+ except Exception as e:
132
+ print(f"Error processing {run.name}: {str(e)}")
133
+ return None
134
+
135
+ # # Process all runs and combine data
136
+ # all_data = [df for run in filtered_runs if (df := process_run(run)) is not None]
137
+ # if not all_data:
138
+ # print("No valid data to process")
139
+ # exit()
140
+ # runs_df = pd.concat(all_data, ignore_index=True)
141
+ # # save the data
142
+ # runs_df.to_csv("data.csv")
143
+
144
+ # load the data
145
+ runs_df = pd.read_csv("F:/desktop/env_group.csv")
146
+
147
+ # runs_df['FinalReturn'] = runs_df['Cummax Return'].astype(float)
148
+
149
+ # # First aggregate across seeds within each environment for each model and Partial status:
150
+ # # For each (Algorithm, Partial, EnvName) group, take the maximum final return across seeds.
151
+ # seedgroup = runs_df.groupby(['Algorithm', 'Partial', 'EnvName', 'run_id', 'Seed'])['FinalReturn'].max().reset_index()
152
+
153
+ # seedgroup.to_csv("seedgroup.csv")
154
+ # env_group = seedgroup.groupby(['Algorithm', 'Partial', 'EnvName'])['FinalReturn'].agg(['mean', 'std']).reset_index()
155
+ # # max for each seed then aggregate
156
+
157
+ # # env_group.to_csv("env_group.csv")
158
+
159
+ # # Now aggregate across the environments and difficults: compute mean and std for each (Algorithm, Partial)
160
+ # model_group = env_group.groupby(['Algorithm', 'Partial']).agg(
161
+ # mean=('mean', 'mean'),
162
+ # std=('std', 'mean')
163
+ # ).reset_index()
164
+ # # model_group.to_csv("model_group.csv")
165
+
166
+ # Pivot the table so that rows = Model and columns for Partial outcomes.
167
+ # This will produce MultiIndex columns; then we rename them.
168
+ pivot = {}
169
+ for algo, group in runs_df.groupby("Algorithm"):
170
+ table = group.pivot(index="EnvName", columns="Partial", values=["mean", "std"])
171
+ pivot[algo] = table
172
+ # Rename columns so that "False" becomes "MDP" and "True" becomes "POMDP".
173
+ table.columns = table.columns.map(
174
+ lambda x: (
175
+ "MDP"
176
+ if (x[0] == "mean" and str(x[1]) == "False")
177
+ else (
178
+ "POMDP"
179
+ if (x[0] == "mean" and str(x[1]) == "True")
180
+ else (
181
+ "MDP_std"
182
+ if (x[0] == "std" and str(x[1]) == "False")
183
+ else (
184
+ "POMDP_std"
185
+ if (x[0] == "std" and str(x[1]) == "True")
186
+ else f"{x[0]}_{x[1]}"
187
+ )
188
+ )
189
+ )
190
+ )
191
+ )
192
+
193
+ # Compute the overall performance (MDP+POMDP) for the mean as the average of the two
194
+ table["MDP+POMDP"] = table[["MDP", "POMDP"]].mean(axis=1)
195
+
196
+ # Optionally, compute a combined variance (average the variances, here approximated via std)
197
+ table["MDP+POMDP_std"] = table[["MDP_std", "POMDP_std"]].mean(axis=1)
198
+ for algo, table in pivot.items():
199
+ print(f"\n{algo}")
200
+ print(table)
201
+ # Print or save the table
202
+ # print(table)
203
+ table.to_csv(f"{algo}.csv", index=True)
204
+
205
+
206
+ for i in range(1):
207
+ f(f"plot_{i}")
plotting/rlcgrad.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file to plot the partial and full curves for all algorithms in the same plot for all environments.
3
+ """
4
+
5
+ import jax.numpy as jnp
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+ import pandas as pd
9
+ import seaborn as sns
10
+ from jax import lax
11
+ from scipy.interpolate import interp1d
12
+
13
+ import wandb
14
+
15
+
16
+ def f(name):
17
+ WINDOW_SIZE = 100
18
+ SIGMA = 100
19
+ INTERP_POINTS = 1000
20
+ NORMALIZING_FACTOR = 200
21
+
22
+ ENV_MAX_STEPS = {
23
+ "CountRecallEasy": 1e8,
24
+ "CountRecallMedium": 1e8,
25
+ "CountRecallHard": 1e8,
26
+ "BattleShipEasy": 1e8,
27
+ "BattleShipMedium": 1e8,
28
+ "BattleShipHard": 1e8,
29
+ # other environments with default max steps 1e7
30
+ }
31
+ AXIS_FONT = {"fontsize": 9, "labelpad": 8}
32
+ TICK_FONT = {"labelsize": 8}
33
+
34
+ api = wandb.Api()
35
+ runs = api.runs("bolt-um/Arcade-RLC-Grad")
36
+ filtered_runs = [run for run in runs if run.state == "finished"]
37
+ print(f"Total runs: {len(runs)}, Completed runs: {len(filtered_runs)}")
38
+
39
+ METRIC_MAPPING = {
40
+ "PQN": {"return_col": "returned_episode_returns", "time_col": "env_step"},
41
+ "PQN_RNN": {"return_col": "returned_episode_returns", "time_col": "env_step"},
42
+ "default": {"return_col": "episodic return", "time_col": "TOTAL_TIMESTEPS"},
43
+ }
44
+
45
+ def process_run(run):
46
+ """Process individual W&B run with dynamic max steps per environment"""
47
+ try:
48
+ config = {k: v for k, v in run.config.items() if not k.startswith("_")}
49
+ env_name = config.get("ENV_NAME", "UnknownEnv")
50
+ partial_status = str(config.get("PARTIAL", False))
51
+
52
+ if env_name in ENV_MAX_STEPS:
53
+ env_max_step = ENV_MAX_STEPS[env_name]
54
+ else:
55
+ env_max_step = 1e8
56
+
57
+ alg_name = config.get("ALG_NAME", "").upper()
58
+ memory_type = "MLP"
59
+ if alg_name == "PQN_RNN":
60
+ memory_type = config.get("MEMORY_TYPE", "Unknown").capitalize()
61
+
62
+ metric_map = METRIC_MAPPING.get(alg_name, METRIC_MAPPING["default"])
63
+ # history = run.history(keys=[metric_map["return_col"], metric_map["time_col"]])
64
+ history = list(
65
+ run.scan_history(
66
+ keys=[metric_map["return_col"], metric_map["time_col"]]
67
+ )
68
+ )
69
+ history = pd.DataFrame(
70
+ history, columns=[metric_map["return_col"], metric_map["time_col"]]
71
+ )
72
+
73
+ history["true_steps"] = history[metric_map["time_col"]].clip(
74
+ upper=env_max_step
75
+ )
76
+ history = history.sort_values(metric_map["time_col"]).drop_duplicates(
77
+ subset=["true_steps"]
78
+ )
79
+
80
+ if len(history) < 2:
81
+ print(f"Skipping {run.name} due to insufficient data points")
82
+ return None
83
+
84
+ # Get first and last values for extrapolation
85
+ first_return = history[metric_map["return_col"]].iloc[0]
86
+ last_return = history[metric_map["return_col"]].iloc[-1]
87
+
88
+ # Create unified interpolation grid for this environment
89
+ unified_steps = np.linspace(0, env_max_step, INTERP_POINTS)
90
+ unified_steps = np.round(unified_steps, decimals=5)
91
+ scale_factor = NORMALIZING_FACTOR / env_max_step
92
+
93
+ # Interpolate returns to uniform grid
94
+ interp_func = interp1d(
95
+ history["true_steps"],
96
+ history[metric_map["return_col"]],
97
+ kind="linear",
98
+ bounds_error=False,
99
+ fill_value=(first_return, last_return),
100
+ )
101
+ interpolated_returns = interp_func(unified_steps)
102
+
103
+ smoothed_returns = (
104
+ pd.Series(interpolated_returns)
105
+ .ewm(span=100, adjust=False, min_periods=1)
106
+ .mean()
107
+ .values
108
+ )
109
+
110
+ # Compute cumulative maximum using JAX
111
+ cummax_returns = lax.cummax(jnp.array(smoothed_returns))
112
+
113
+ return pd.DataFrame(
114
+ {
115
+ "Algorithm": f"{alg_name} ({memory_type})",
116
+ "Return": interpolated_returns,
117
+ "Smoothed Return": smoothed_returns,
118
+ "Cummax Return": np.array(cummax_returns), # Convert back to NumPy
119
+ "True Steps": unified_steps,
120
+ "EnvName": env_name,
121
+ "Partial": partial_status,
122
+ "Seed": str(config.get("SEED", 0)),
123
+ "run_id": run.id,
124
+ "StepsNormalized": unified_steps / env_max_step,
125
+ "EnvMaxStep": env_max_step,
126
+ "ScaleFactor": scale_factor,
127
+ }
128
+ )
129
+
130
+ except Exception as e:
131
+ print(f"Error processing {run.name}: {str(e)}")
132
+ return None
133
+
134
+ # Process all runs and combine data
135
+ # all_data = [df for run in filtered_runs if (df := process_run(run)) is not None]
136
+
137
+ # if not all_data:
138
+ # print("No valid data to process")
139
+ # exit()
140
+ # runs_df = pd.concat(all_data, ignore_index=True)
141
+ # runs_df.to_pickle("rlcgrad.pkl")
142
+
143
+ runs_df = pd.read_pickle("rlcgrad.pkl")
144
+
145
+ def plot_comparative_curves(runs_df, name):
146
+ """Plot comparative curves for all environments in a single plot"""
147
+ runs_df["EnvBaseName"] = runs_df["EnvName"].apply(
148
+ lambda x: x.replace("Easy", "").replace("Medium", "").replace("Hard", "")
149
+ )
150
+
151
+ envs = runs_df["EnvBaseName"].unique()
152
+
153
+ palette = sns.color_palette("husl", len(envs))
154
+ env_color_map = dict(zip(envs, palette))
155
+
156
+ partial_map = {"True": "POMDP", "False": "MDP"}
157
+ max_step = 1e8
158
+ plt.figure(figsize=(12, 7))
159
+ sns.set()
160
+ plt.text(
161
+ 1,
162
+ -0.15,
163
+ f"{max_step:.0e}".replace("+", "").replace("0", ""),
164
+ transform=plt.gca().transAxes,
165
+ ha="right",
166
+ va="top",
167
+ fontsize=35,
168
+ color="#666666",
169
+ bbox=dict(facecolor="white", alpha=0.8, edgecolor="none", pad=2),
170
+ )
171
+
172
+ pomdp_handles = []
173
+ pomdp_labels = []
174
+ mdp_handles = []
175
+ mdp_labels = []
176
+ for env_base in envs:
177
+ for partial_status in ["False", "True"]:
178
+ data = runs_df[
179
+ (runs_df["EnvBaseName"] == env_base)
180
+ & (runs_df["Partial"] == partial_status)
181
+ ]
182
+ if not data.empty:
183
+ color = env_color_map[env_base]
184
+ line_style = "--" if partial_status == "True" else "-"
185
+ label = f"{env_base} - {partial_map[partial_status]}"
186
+
187
+ line = plt.plot(
188
+ data["StepsNormalized"],
189
+ data["Cummax Return"],
190
+ color=color,
191
+ linewidth=2.5,
192
+ linestyle=line_style,
193
+ label=label,
194
+ )[0]
195
+ if partial_status == "True":
196
+ pomdp_handles.append(line)
197
+ pomdp_labels.append(label)
198
+ else:
199
+ mdp_handles.append(line)
200
+ mdp_labels.append(label)
201
+
202
+ plt.xlabel("Env Steps", fontsize=35)
203
+ plt.ylabel("Episodic Return", fontsize=35)
204
+ plt.tick_params(axis="both", which="major", labelsize=35)
205
+ plt.grid(True, alpha=0.5)
206
+
207
+ handles = mdp_handles + pomdp_handles
208
+ labels = mdp_labels + pomdp_labels
209
+ plt.legend(handles, labels, loc="best", fontsize=22, ncol=2)
210
+
211
+ plt.title("LRU", fontsize=35, pad=12, fontweight="semibold")
212
+ plt.tight_layout()
213
+ plt.savefig(
214
+ "{}.pdf".format(name), dpi=300, bbox_inches="tight", facecolor="white"
215
+ )
216
+ plt.close()
217
+
218
+ plot_comparative_curves(runs_df, name)
219
+
220
+
221
+ for i in range(1):
222
+ f(f"rlcgrad{i}")
plotting/run_multi_seed_analysis.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import argparse
5
+ import os
6
+
7
+ import equinox as eqx
8
+ import jax
9
+ import jax.numpy as jnp
10
+ import numpy as np
11
+ import pandas as pd
12
+
13
+ from popgym_arcade.baselines.model.builder import QNetworkRNN
14
+ from popgym_arcade.baselines.utils import get_terminal_saliency_maps
15
+
16
+
17
+ def run_multiple_seeds_and_save_csv(config, seeds, max_steps=200, output_csv=None):
18
+ """
19
+ Run saliency analysis on multiple seeds and save the results in a CSV file.
20
+
21
+ Args:
22
+ config: Configuration dictionary
23
+ seeds: List of seeds to run
24
+ max_steps: Maximum number of steps for each episode
25
+ output_csv: Path to save the CSV file (default: auto-generated based on config)
26
+
27
+ Returns:
28
+ Path to the saved CSV file
29
+ """
30
+ # Create a default output path if none provided
31
+ if output_csv is None:
32
+ output_csv = f'saliency_results_{config["MEMORY_TYPE"]}_{config["ENV_NAME"]}_Partial={config["PARTIAL"]}.csv'
33
+
34
+ # List to store results
35
+ all_results = []
36
+
37
+ # Store saliency distributions for each seed
38
+ for seed_value in seeds:
39
+ print(f"Processing seed {seed_value}...")
40
+
41
+ # Update config with current seed
42
+ config["SEED"] = seed_value
43
+
44
+ # Create the model path for this seed
45
+ model_path = f"pkls_gradients/PQN_RNN_{config['MEMORY_TYPE']}_{config['ENV_NAME']}_model_Partial={config['PARTIAL']}_SEED={config['MODEL_SEED']}.pkl"
46
+
47
+ # Initialize random key for this seed
48
+ rng = jax.random.PRNGKey(seed_value)
49
+
50
+ # Initialize and load the model
51
+ network = QNetworkRNN(
52
+ rng, rnn_type=config["MEMORY_TYPE"], obs_size=config["OBS_SIZE"]
53
+ )
54
+ # try:
55
+ model = eqx.tree_deserialise_leaves(model_path, network)
56
+
57
+ # Define path for saving the distribution for this seed
58
+ dist_save_path = f'dist_{config["MEMORY_TYPE"]}_{config["ENV_NAME"]}_Partial={config["PARTIAL"]}_SEED={seed_value}.npy'
59
+
60
+ # Run terminal saliency analysis
61
+ grads_obs = get_terminal_saliency_maps(
62
+ rng,
63
+ model,
64
+ config,
65
+ )
66
+
67
+ # print(grads_obs.shape)
68
+ # grads_obs = grads_obs.squeeze(1)
69
+
70
+ grads_obs = jnp.abs(grads_obs).sum(axis=(1, 2, 3))
71
+ dist = grads_obs / grads_obs.sum()
72
+ print(dist.sum())
73
+ # Convert JAX array to numpy for DataFrame
74
+ dist_np = np.array(dist)
75
+
76
+ # Create result dictionary
77
+ result = {
78
+ "seed": seed_value,
79
+ "distribution": dist_np,
80
+ "length": len(dist_np),
81
+ "dist_path": dist_save_path,
82
+ }
83
+
84
+ all_results.append(result)
85
+ print(f"Seed {seed_value} completed. Distribution length: {len(dist_np)}")
86
+
87
+ # except Exception as e:
88
+ # raise e
89
+ # # print(f"Error processing seed {seed_value}: {e}")
90
+
91
+ # Process results for CSV format
92
+ csv_data = []
93
+ max_length = max([r["length"] for r in all_results]) if all_results else 0
94
+
95
+ for result in all_results:
96
+ # Pad distribution to max length if needed
97
+ padded_dist = np.zeros(max_length)
98
+ padded_dist[: result["length"]] = result["distribution"]
99
+
100
+ # Create row data
101
+ row = {
102
+ "seed": result["seed"],
103
+ "length": result["length"],
104
+ "dist_path": result["dist_path"],
105
+ }
106
+
107
+ # Add each position value as a separate column
108
+ for i in range(max_length):
109
+ norm_pos = i / max_length if max_length > 0 else 0
110
+ row[f"pos_{norm_pos:.3f}"] = padded_dist[i]
111
+
112
+ csv_data.append(row)
113
+
114
+ # Create DataFrame and save to CSV
115
+ df = pd.DataFrame(csv_data)
116
+ df.to_csv(output_csv, index=False)
117
+ print(f"Results saved to {output_csv}")
118
+
119
+ return output_csv
popgym_arcade/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from popgym_arcade.registration import make
popgym_arcade/baselines/__init__.py ADDED
File without changes
popgym_arcade/baselines/model/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from popgym_arcade.baselines.model.builder import (
2
+ ActorCritic,
3
+ ActorCriticRNN,
4
+ QNetwork,
5
+ QNetworkRNN,
6
+ )
7
+ from popgym_arcade.baselines.model.memorax import (
8
+ add_batch_dim,
9
+ get_residual_memory_model,
10
+ )
popgym_arcade/baselines/model/builder.py ADDED
@@ -0,0 +1,706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import equinox as eqx
4
+ import equinox.nn as nn
5
+ import jax
6
+ import jax.numpy as jnp
7
+ from distreqx import distributions
8
+ from jaxtyping import Array, PRNGKeyArray
9
+
10
+ from popgym_arcade.baselines.model.memorax import get_residual_memory_model
11
+
12
+
13
+ class ActorCritic(eqx.Module):
14
+ action_dim: int = 5
15
+ actor_cnn: nn.Sequential
16
+ actor_trunk: nn.Sequential
17
+ critic_cnn: nn.Sequential
18
+ critic_trunk: nn.Sequential
19
+
20
+ def __init__(self, key: PRNGKeyArray, obs_size: int):
21
+ key_array = jax.random.split(key, 14)
22
+ if obs_size == 256:
23
+ self.actor_cnn = nn.Sequential(
24
+ [
25
+ nn.Conv2d(
26
+ in_channels=3,
27
+ out_channels=64,
28
+ kernel_size=7,
29
+ stride=2,
30
+ key=key_array[0],
31
+ ),
32
+ nn.Lambda(jax.nn.leaky_relu),
33
+ nn.MaxPool2d(kernel_size=2, stride=2),
34
+ nn.Conv2d(
35
+ in_channels=64,
36
+ out_channels=128,
37
+ kernel_size=3,
38
+ stride=2,
39
+ key=key_array[1],
40
+ ),
41
+ nn.Lambda(jax.nn.leaky_relu),
42
+ nn.MaxPool2d(kernel_size=2, stride=2),
43
+ nn.Conv2d(
44
+ in_channels=128,
45
+ out_channels=256,
46
+ kernel_size=3,
47
+ stride=2,
48
+ key=key_array[2],
49
+ ),
50
+ nn.Lambda(jax.nn.leaky_relu),
51
+ nn.MaxPool2d(kernel_size=2, stride=2),
52
+ nn.Conv2d(
53
+ in_channels=256,
54
+ out_channels=512,
55
+ kernel_size=3,
56
+ stride=2,
57
+ key=key_array[3],
58
+ ),
59
+ nn.Lambda(jax.nn.leaky_relu),
60
+ ]
61
+ )
62
+ self.critic_cnn = nn.Sequential(
63
+ [
64
+ nn.Conv2d(
65
+ in_channels=3,
66
+ out_channels=64,
67
+ kernel_size=7,
68
+ stride=2,
69
+ key=key_array[7],
70
+ ),
71
+ nn.Lambda(jax.nn.leaky_relu),
72
+ nn.MaxPool2d(kernel_size=2, stride=2),
73
+ nn.Conv2d(
74
+ in_channels=64,
75
+ out_channels=128,
76
+ kernel_size=3,
77
+ stride=2,
78
+ key=key_array[8],
79
+ ),
80
+ nn.Lambda(jax.nn.leaky_relu),
81
+ nn.MaxPool2d(kernel_size=2, stride=2),
82
+ nn.Conv2d(
83
+ in_channels=128,
84
+ out_channels=256,
85
+ kernel_size=3,
86
+ stride=2,
87
+ key=key_array[9],
88
+ ),
89
+ nn.Lambda(jax.nn.leaky_relu),
90
+ nn.MaxPool2d(kernel_size=2, stride=2),
91
+ nn.Conv2d(
92
+ in_channels=256,
93
+ out_channels=512,
94
+ kernel_size=3,
95
+ stride=2,
96
+ key=key_array[10],
97
+ ),
98
+ nn.Lambda(jax.nn.leaky_relu),
99
+ ]
100
+ )
101
+ else:
102
+ self.actor_cnn = nn.Sequential(
103
+ [
104
+ nn.Conv2d(
105
+ in_channels=3,
106
+ out_channels=64,
107
+ kernel_size=5,
108
+ stride=2,
109
+ key=key_array[0],
110
+ ),
111
+ nn.Lambda(jax.nn.leaky_relu),
112
+ nn.MaxPool2d(kernel_size=2, stride=2),
113
+ nn.Conv2d(
114
+ in_channels=64,
115
+ out_channels=128,
116
+ kernel_size=3,
117
+ stride=2,
118
+ key=key_array[1],
119
+ ),
120
+ nn.Lambda(jax.nn.leaky_relu),
121
+ nn.MaxPool2d(kernel_size=2, stride=2),
122
+ nn.Conv2d(
123
+ in_channels=128,
124
+ out_channels=256,
125
+ kernel_size=3,
126
+ stride=2,
127
+ key=key_array[2],
128
+ ),
129
+ nn.Lambda(jax.nn.leaky_relu),
130
+ nn.MaxPool2d(kernel_size=3, stride=1),
131
+ nn.Conv2d(
132
+ in_channels=256,
133
+ out_channels=512,
134
+ kernel_size=1,
135
+ stride=1,
136
+ key=key_array[3],
137
+ ),
138
+ nn.Lambda(jax.nn.leaky_relu),
139
+ ]
140
+ )
141
+ self.critic_cnn = nn.Sequential(
142
+ [
143
+ nn.Conv2d(
144
+ in_channels=3,
145
+ out_channels=64,
146
+ kernel_size=5,
147
+ stride=2,
148
+ key=key_array[0],
149
+ ),
150
+ nn.Lambda(jax.nn.leaky_relu),
151
+ nn.MaxPool2d(kernel_size=2, stride=2),
152
+ nn.Conv2d(
153
+ in_channels=64,
154
+ out_channels=128,
155
+ kernel_size=3,
156
+ stride=2,
157
+ key=key_array[1],
158
+ ),
159
+ nn.Lambda(jax.nn.leaky_relu),
160
+ nn.MaxPool2d(kernel_size=2, stride=2),
161
+ nn.Conv2d(
162
+ in_channels=128,
163
+ out_channels=256,
164
+ kernel_size=3,
165
+ stride=2,
166
+ key=key_array[2],
167
+ ),
168
+ nn.Lambda(jax.nn.leaky_relu),
169
+ nn.MaxPool2d(kernel_size=3, stride=1),
170
+ nn.Conv2d(
171
+ in_channels=256,
172
+ out_channels=512,
173
+ kernel_size=1,
174
+ stride=1,
175
+ key=key_array[3],
176
+ ),
177
+ nn.Lambda(jax.nn.leaky_relu),
178
+ ]
179
+ )
180
+ self.actor_trunk = nn.Sequential(
181
+ [
182
+ nn.Linear(in_features=512, out_features=256, key=key_array[4]),
183
+ nn.LayerNorm(shape=256),
184
+ nn.Lambda(jax.nn.leaky_relu),
185
+ nn.Linear(in_features=256, out_features=256, key=key_array[5]),
186
+ nn.LayerNorm(shape=256),
187
+ nn.Lambda(jax.nn.leaky_relu),
188
+ nn.Linear(
189
+ in_features=256, out_features=self.action_dim, key=key_array[6]
190
+ ),
191
+ ]
192
+ )
193
+
194
+ self.critic_trunk = nn.Sequential(
195
+ [
196
+ nn.Linear(in_features=512, out_features=256, key=key_array[11]),
197
+ nn.LayerNorm(shape=256),
198
+ nn.Lambda(jax.nn.leaky_relu),
199
+ nn.Linear(in_features=256, out_features=256, key=key_array[12]),
200
+ nn.LayerNorm(shape=256),
201
+ nn.Lambda(jax.nn.leaky_relu),
202
+ nn.Linear(in_features=256, out_features=1, key=key_array[13]),
203
+ ]
204
+ )
205
+
206
+ def __call__(self, x: Array) -> Tuple:
207
+ """Expects image in [0, 255]"""
208
+ x = x.transpose((0, 3, 1, 2)) / 255.0
209
+ actor_embedding = eqx.filter_vmap(self.actor_cnn)(x)
210
+ critic_embedding = eqx.filter_vmap(self.critic_cnn)(x)
211
+
212
+ actor_embedding = actor_embedding.reshape(actor_embedding.shape[0], -1)
213
+ critic_embedding = critic_embedding.reshape(critic_embedding.shape[0], -1)
214
+
215
+ actor_mean = eqx.filter_vmap(self.actor_trunk)(actor_embedding)
216
+ critic = eqx.filter_vmap(self.critic_trunk)(critic_embedding)
217
+ pi = distributions.Categorical(logits=actor_mean)
218
+ return pi, jnp.squeeze(critic, axis=-1)
219
+
220
+
221
+ class ActorCriticRNN(eqx.Module):
222
+ action_dim: int = 5
223
+ actor_cnn: nn.Sequential
224
+ actor_rnn: eqx.Module
225
+ actor_trunk: nn.Sequential
226
+ critic_cnn: nn.Sequential
227
+ critic_rnn: eqx.Module
228
+ critic_trunk: nn.Sequential
229
+
230
+ def __init__(self, key: PRNGKeyArray, obs_size: int, rnn_type: str = "lru"):
231
+ key_array = jax.random.split(key, 14)
232
+ if obs_size == 256:
233
+ self.actor_cnn = nn.Sequential(
234
+ [
235
+ nn.Conv2d(
236
+ in_channels=3,
237
+ out_channels=64,
238
+ kernel_size=7,
239
+ stride=2,
240
+ key=key_array[0],
241
+ ),
242
+ nn.Lambda(jax.nn.leaky_relu),
243
+ nn.MaxPool2d(kernel_size=2, stride=2),
244
+ nn.Conv2d(
245
+ in_channels=64,
246
+ out_channels=128,
247
+ kernel_size=3,
248
+ stride=2,
249
+ key=key_array[1],
250
+ ),
251
+ nn.Lambda(jax.nn.leaky_relu),
252
+ nn.MaxPool2d(kernel_size=2, stride=2),
253
+ nn.Conv2d(
254
+ in_channels=128,
255
+ out_channels=256,
256
+ kernel_size=3,
257
+ stride=2,
258
+ key=key_array[2],
259
+ ),
260
+ nn.Lambda(jax.nn.leaky_relu),
261
+ nn.MaxPool2d(kernel_size=2, stride=2),
262
+ nn.Conv2d(
263
+ in_channels=256,
264
+ out_channels=512,
265
+ kernel_size=3,
266
+ stride=2,
267
+ key=key_array[3],
268
+ ),
269
+ nn.Lambda(jax.nn.leaky_relu),
270
+ ]
271
+ )
272
+ self.critic_cnn = nn.Sequential(
273
+ [
274
+ nn.Conv2d(
275
+ in_channels=3,
276
+ out_channels=64,
277
+ kernel_size=7,
278
+ stride=2,
279
+ key=key_array[6],
280
+ ),
281
+ nn.Lambda(jax.nn.leaky_relu),
282
+ nn.MaxPool2d(kernel_size=2, stride=2),
283
+ nn.Conv2d(
284
+ in_channels=64,
285
+ out_channels=128,
286
+ kernel_size=3,
287
+ stride=2,
288
+ key=key_array[7],
289
+ ),
290
+ nn.Lambda(jax.nn.leaky_relu),
291
+ nn.MaxPool2d(kernel_size=2, stride=2),
292
+ nn.Conv2d(
293
+ in_channels=128,
294
+ out_channels=256,
295
+ kernel_size=3,
296
+ stride=2,
297
+ key=key_array[8],
298
+ ),
299
+ nn.Lambda(jax.nn.leaky_relu),
300
+ nn.MaxPool2d(kernel_size=2, stride=2),
301
+ nn.Conv2d(
302
+ in_channels=256,
303
+ out_channels=512,
304
+ kernel_size=3,
305
+ stride=2,
306
+ key=key_array[9],
307
+ ),
308
+ nn.Lambda(jax.nn.leaky_relu),
309
+ ]
310
+ )
311
+ else:
312
+ self.actor_cnn = nn.Sequential(
313
+ [
314
+ nn.Conv2d(
315
+ in_channels=3,
316
+ out_channels=64,
317
+ kernel_size=5,
318
+ stride=2,
319
+ key=key_array[0],
320
+ ),
321
+ nn.Lambda(jax.nn.leaky_relu),
322
+ nn.MaxPool2d(kernel_size=2, stride=2),
323
+ nn.Conv2d(
324
+ in_channels=64,
325
+ out_channels=128,
326
+ kernel_size=3,
327
+ stride=2,
328
+ key=key_array[1],
329
+ ),
330
+ nn.Lambda(jax.nn.leaky_relu),
331
+ nn.MaxPool2d(kernel_size=2, stride=2),
332
+ nn.Conv2d(
333
+ in_channels=128,
334
+ out_channels=256,
335
+ kernel_size=3,
336
+ stride=2,
337
+ key=key_array[2],
338
+ ),
339
+ nn.Lambda(jax.nn.leaky_relu),
340
+ nn.MaxPool2d(kernel_size=3, stride=1),
341
+ nn.Conv2d(
342
+ in_channels=256,
343
+ out_channels=512,
344
+ kernel_size=1,
345
+ stride=1,
346
+ key=key_array[3],
347
+ ),
348
+ nn.Lambda(jax.nn.leaky_relu),
349
+ ]
350
+ )
351
+ self.critic_cnn = nn.Sequential(
352
+ [
353
+ nn.Conv2d(
354
+ in_channels=3,
355
+ out_channels=64,
356
+ kernel_size=5,
357
+ stride=2,
358
+ key=key_array[0],
359
+ ),
360
+ nn.Lambda(jax.nn.leaky_relu),
361
+ nn.MaxPool2d(kernel_size=2, stride=2),
362
+ nn.Conv2d(
363
+ in_channels=64,
364
+ out_channels=128,
365
+ kernel_size=3,
366
+ stride=2,
367
+ key=key_array[1],
368
+ ),
369
+ nn.Lambda(jax.nn.leaky_relu),
370
+ nn.MaxPool2d(kernel_size=2, stride=2),
371
+ nn.Conv2d(
372
+ in_channels=128,
373
+ out_channels=256,
374
+ kernel_size=3,
375
+ stride=2,
376
+ key=key_array[2],
377
+ ),
378
+ nn.Lambda(jax.nn.leaky_relu),
379
+ nn.MaxPool2d(kernel_size=3, stride=1),
380
+ nn.Conv2d(
381
+ in_channels=256,
382
+ out_channels=512,
383
+ kernel_size=1,
384
+ stride=1,
385
+ key=key_array[3],
386
+ ),
387
+ nn.Lambda(jax.nn.leaky_relu),
388
+ ]
389
+ )
390
+ self.actor_rnn = get_residual_memory_model(
391
+ input=512,
392
+ hidden=512,
393
+ output=256,
394
+ num_layers=2,
395
+ rnn_type=rnn_type,
396
+ key=key_array[4],
397
+ )
398
+ self.actor_trunk = nn.Sequential(
399
+ [
400
+ nn.Linear(in_features=256, out_features=256, key=key_array[5]),
401
+ nn.LayerNorm(shape=256),
402
+ nn.Lambda(jax.nn.leaky_relu),
403
+ nn.Linear(
404
+ in_features=256, out_features=self.action_dim, key=key_array[12]
405
+ ),
406
+ ]
407
+ )
408
+
409
+ self.critic_rnn = get_residual_memory_model(
410
+ input=512,
411
+ hidden=512,
412
+ output=256,
413
+ num_layers=1,
414
+ rnn_type=rnn_type,
415
+ key=key_array[10],
416
+ )
417
+ self.critic_trunk = nn.Sequential(
418
+ [
419
+ nn.Linear(in_features=256, out_features=256, key=key_array[5]),
420
+ nn.LayerNorm(shape=256),
421
+ nn.Lambda(jax.nn.leaky_relu),
422
+ nn.Linear(in_features=256, out_features=1, key=key_array[13]),
423
+ ]
424
+ )
425
+
426
+ def __call__(self, actor_state, critic_state, x):
427
+ """Expects image in [0, 255]"""
428
+ inputs, dones = x
429
+ inputs = inputs.transpose((0, 1, 4, 2, 3)) / 255.0
430
+ actor_embedding = eqx.filter_vmap(eqx.filter_vmap(self.actor_cnn))(inputs)
431
+ critic_embedding = eqx.filter_vmap(eqx.filter_vmap(self.critic_cnn))(inputs)
432
+
433
+ actor_embedding = actor_embedding.reshape(
434
+ (actor_embedding.shape[0], actor_embedding.shape[1], -1)
435
+ )
436
+ critic_embedding = critic_embedding.reshape(
437
+ (critic_embedding.shape[0], critic_embedding.shape[1], -1)
438
+ )
439
+ actor_rnn_in = (actor_embedding, dones)
440
+ critic_rnn_in = (critic_embedding, dones)
441
+ actor_state, actor_embedding = eqx.filter_vmap(
442
+ self.actor_rnn, in_axes=(0, 1), out_axes=(0, 1)
443
+ )(actor_state, actor_rnn_in)
444
+ actor_state = eqx.filter_vmap(self.actor_rnn.latest_recurrent_state, in_axes=0)(
445
+ actor_state
446
+ )
447
+ critic_state, critic_embedding = eqx.filter_vmap(
448
+ self.critic_rnn, in_axes=(0, 1), out_axes=(0, 1)
449
+ )(critic_state, critic_rnn_in)
450
+ critic_state = eqx.filter_vmap(
451
+ self.critic_rnn.latest_recurrent_state, in_axes=0
452
+ )(critic_state)
453
+ actor_mean = eqx.filter_vmap(eqx.filter_vmap(self.actor_trunk))(actor_embedding)
454
+ pi = distributions.Categorical(logits=actor_mean)
455
+ # pi = distrax.Categorical(logits=actor_mean)
456
+ critic = eqx.filter_vmap(eqx.filter_vmap(self.critic_trunk))(critic_embedding)
457
+ return actor_state, critic_state, pi, jnp.squeeze(critic, axis=-1)
458
+
459
+ def initialize_carry(self, key: PRNGKeyArray):
460
+ key_init = jax.random.split(key, 2)
461
+ actor_state = eqx.filter_jit(self.actor_rnn.initialize_carry)(key=key_init[0])
462
+ critic_state = eqx.filter_jit(self.critic_rnn.initialize_carry)(key=key_init[1])
463
+ return actor_state, critic_state
464
+
465
+
466
+ class QNetwork(eqx.Module):
467
+ """CNN + MLP"""
468
+
469
+ action_dim: int = 5
470
+ cnn: nn.Sequential
471
+ trunk: nn.Sequential
472
+
473
+ def __init__(self, key: PRNGKeyArray, obs_size: int):
474
+ keys = jax.random.split(key, 9)
475
+ if obs_size == 256:
476
+ self.cnn = nn.Sequential(
477
+ [
478
+ nn.Conv2d(
479
+ in_channels=3,
480
+ out_channels=64,
481
+ kernel_size=7,
482
+ stride=2,
483
+ key=keys[0],
484
+ ),
485
+ nn.Lambda(jax.nn.leaky_relu),
486
+ nn.MaxPool2d(kernel_size=2, stride=2),
487
+ nn.Conv2d(
488
+ in_channels=64,
489
+ out_channels=128,
490
+ kernel_size=3,
491
+ stride=2,
492
+ key=keys[1],
493
+ ),
494
+ nn.Lambda(jax.nn.leaky_relu),
495
+ nn.MaxPool2d(kernel_size=2, stride=2),
496
+ nn.Conv2d(
497
+ in_channels=128,
498
+ out_channels=256,
499
+ kernel_size=3,
500
+ stride=2,
501
+ key=keys[2],
502
+ ),
503
+ nn.Lambda(jax.nn.leaky_relu),
504
+ nn.MaxPool2d(kernel_size=2, stride=2),
505
+ nn.Conv2d(
506
+ in_channels=256,
507
+ out_channels=512,
508
+ kernel_size=3,
509
+ stride=2,
510
+ key=keys[3],
511
+ ),
512
+ nn.Lambda(jax.nn.leaky_relu),
513
+ ]
514
+ )
515
+ else:
516
+ self.cnn = nn.Sequential(
517
+ [
518
+ nn.Conv2d(
519
+ in_channels=3,
520
+ out_channels=64,
521
+ kernel_size=5,
522
+ stride=2,
523
+ key=keys[0],
524
+ ),
525
+ nn.Lambda(jax.nn.leaky_relu),
526
+ nn.MaxPool2d(kernel_size=2, stride=2),
527
+ nn.Conv2d(
528
+ in_channels=64,
529
+ out_channels=128,
530
+ kernel_size=3,
531
+ stride=2,
532
+ key=keys[1],
533
+ ),
534
+ nn.Lambda(jax.nn.leaky_relu),
535
+ nn.MaxPool2d(kernel_size=2, stride=2),
536
+ nn.Conv2d(
537
+ in_channels=128,
538
+ out_channels=256,
539
+ kernel_size=3,
540
+ stride=2,
541
+ key=keys[2],
542
+ ),
543
+ nn.Lambda(jax.nn.leaky_relu),
544
+ nn.MaxPool2d(kernel_size=3, stride=1),
545
+ nn.Conv2d(
546
+ in_channels=256,
547
+ out_channels=512,
548
+ kernel_size=1,
549
+ stride=1,
550
+ key=keys[3],
551
+ ),
552
+ nn.Lambda(jax.nn.leaky_relu),
553
+ ]
554
+ )
555
+
556
+ self.trunk = nn.Sequential(
557
+ [
558
+ nn.Linear(in_features=512, out_features=256, key=keys[4]),
559
+ nn.LayerNorm(shape=256),
560
+ nn.Lambda(jax.nn.leaky_relu),
561
+ nn.Linear(in_features=256, out_features=256, key=keys[5]),
562
+ nn.LayerNorm(shape=256),
563
+ nn.Lambda(jax.nn.leaky_relu),
564
+ nn.Linear(in_features=256, out_features=self.action_dim, key=keys[6]),
565
+ ]
566
+ )
567
+
568
+ def __call__(self, x: jax.Array):
569
+ """Expects image in [0, 255]"""
570
+ print(jnp.max(x), jnp.min(x))
571
+ x = x.transpose((0, 3, 1, 2)) / 255.0
572
+ print(jnp.max(x), jnp.min(x))
573
+ x = eqx.filter_vmap(self.cnn)(x)
574
+ x = x.reshape(x.shape[0], -1)
575
+ x = eqx.filter_vmap(self.trunk)(x)
576
+ return x
577
+
578
+
579
+ class QNetworkRNN(eqx.Module):
580
+ """CNN + MLP"""
581
+
582
+ action_dim: int = 5
583
+ cnn: nn.Sequential
584
+ rnn: eqx.Module
585
+ trunk: nn.Sequential
586
+
587
+ def __init__(self, key: PRNGKeyArray, obs_size: int, rnn_type: str = "lru"):
588
+ keys = jax.random.split(key, 8)
589
+ if obs_size == 256:
590
+ self.cnn = nn.Sequential(
591
+ [
592
+ nn.Conv2d(
593
+ in_channels=3,
594
+ out_channels=64,
595
+ kernel_size=7,
596
+ stride=2,
597
+ key=keys[0],
598
+ ),
599
+ nn.Lambda(jax.nn.leaky_relu),
600
+ nn.MaxPool2d(kernel_size=2, stride=2),
601
+ nn.Conv2d(
602
+ in_channels=64,
603
+ out_channels=128,
604
+ kernel_size=3,
605
+ stride=2,
606
+ key=keys[1],
607
+ ),
608
+ nn.Lambda(jax.nn.leaky_relu),
609
+ nn.MaxPool2d(kernel_size=2, stride=2),
610
+ nn.Conv2d(
611
+ in_channels=128,
612
+ out_channels=256,
613
+ kernel_size=3,
614
+ stride=2,
615
+ key=keys[2],
616
+ ),
617
+ nn.Lambda(jax.nn.leaky_relu),
618
+ nn.MaxPool2d(kernel_size=2, stride=2),
619
+ nn.Conv2d(
620
+ in_channels=256,
621
+ out_channels=512,
622
+ kernel_size=3,
623
+ stride=2,
624
+ key=keys[3],
625
+ ),
626
+ nn.Lambda(jax.nn.leaky_relu),
627
+ ]
628
+ )
629
+ else:
630
+ self.cnn = nn.Sequential(
631
+ [
632
+ nn.Conv2d(
633
+ in_channels=3,
634
+ out_channels=64,
635
+ kernel_size=5,
636
+ stride=2,
637
+ key=keys[0],
638
+ ),
639
+ nn.Lambda(jax.nn.leaky_relu),
640
+ nn.MaxPool2d(kernel_size=2, stride=2),
641
+ nn.Conv2d(
642
+ in_channels=64,
643
+ out_channels=128,
644
+ kernel_size=3,
645
+ stride=2,
646
+ key=keys[1],
647
+ ),
648
+ nn.Lambda(jax.nn.leaky_relu),
649
+ nn.MaxPool2d(kernel_size=2, stride=2),
650
+ nn.Conv2d(
651
+ in_channels=128,
652
+ out_channels=256,
653
+ kernel_size=3,
654
+ stride=2,
655
+ key=keys[2],
656
+ ),
657
+ nn.Lambda(jax.nn.leaky_relu),
658
+ nn.MaxPool2d(kernel_size=3, stride=1),
659
+ nn.Conv2d(
660
+ in_channels=256,
661
+ out_channels=512,
662
+ kernel_size=1,
663
+ stride=1,
664
+ key=keys[3],
665
+ ),
666
+ nn.Lambda(jax.nn.leaky_relu),
667
+ ]
668
+ )
669
+ self.rnn = get_residual_memory_model(
670
+ input=517,
671
+ hidden=512,
672
+ output=256,
673
+ num_layers=2,
674
+ rnn_type=rnn_type,
675
+ key=keys[4],
676
+ )
677
+ self.trunk = nn.Sequential(
678
+ [nn.Linear(in_features=256, out_features=self.action_dim, key=keys[7])]
679
+ )
680
+
681
+ def __call__(self, hidden_state, x, done, last_action):
682
+ """Expects image in [0, 255]"""
683
+ x = x.transpose((0, 1, 4, 2, 3)) / 255.0
684
+ x = eqx.filter_vmap(eqx.filter_vmap(self.cnn))(x)
685
+
686
+ x = x.reshape((x.shape[0], x.shape[1], -1))
687
+
688
+ last_action = jax.nn.one_hot(last_action, self.action_dim)
689
+ x = jnp.concatenate([x, last_action], axis=-1)
690
+ rnn_in = (x, done)
691
+
692
+ hidden_state, x = eqx.filter_vmap(self.rnn, in_axes=(0, 1), out_axes=(0, 1))(
693
+ hidden_state, rnn_in
694
+ )
695
+ hidden_state = eqx.filter_vmap(self.rnn.latest_recurrent_state, in_axes=0)(
696
+ hidden_state
697
+ )
698
+
699
+ q_vals = eqx.filter_vmap(eqx.filter_vmap(self.trunk))(x)
700
+
701
+ return hidden_state, q_vals
702
+
703
+ def initialize_carry(self, key: PRNGKeyArray):
704
+ key_init = jax.random.split(key, 1)
705
+ hidden_state = eqx.filter_jit(self.rnn.initialize_carry)(key=key_init[0])
706
+ return hidden_state
popgym_arcade/baselines/model/memorax/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from popgym_arcade.baselines.model.memorax.train_utils import (
2
+ add_batch_dim,
3
+ get_residual_memory_model,
4
+ )
popgym_arcade/baselines/model/memorax/gras.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Callable, Optional, Tuple
3
+
4
+ import equinox as eqx
5
+ import jax
6
+ from jaxtyping import PRNGKeyArray, Shaped
7
+
8
+ from popgym_arcade.baselines.model.memorax.groups import BinaryAlgebra, Module
9
+ from popgym_arcade.baselines.model.memorax.mtypes import (
10
+ Input,
11
+ OutputEmbedding,
12
+ RecurrentState,
13
+ SingleRecurrentState,
14
+ )
15
+ from popgym_arcade.baselines.model.memorax.scans import semigroup_scan, set_action_scan
16
+
17
+
18
+ class GRAS(Module):
19
+ r"""A Generalized Recurrent Algebraic Structure (GRAS)
20
+
21
+ Given a recurrent state and inputs, returns the corresponding recurrent states and outputs
22
+
23
+ A GRAS contains a set action or semigroup :math:`(H, \bullet)` and two maps/functions :math:`f,g`
24
+
25
+ For a semigroup, we express a GRAS via
26
+
27
+ .. math::
28
+
29
+ f: X^n \times \{0, 1\}^n \mapsto H^n
30
+
31
+ \bullet: H \times H \mapsto H
32
+
33
+ g: H^n \times X^n \{0, \1}^n \mapsto Y^n
34
+
35
+ where :math:`\bullet` may be an associative/parallel scan or sequential scan.
36
+
37
+ For a set action, the GRAS recurrent update is slightly altered
38
+
39
+ .. math::
40
+ f: X^n \times \{0, 1\}^n \mapsto Z^n
41
+
42
+ \bullet: H \times Z \mapsto H
43
+
44
+ g: H^n \times X^n \{0, \1}^n \mapsto Y^n
45
+
46
+ where :math:`\bullet` must be a sequential scan.
47
+
48
+ """
49
+
50
+ algebra: BinaryAlgebra
51
+ scan: Callable[
52
+ [
53
+ Callable[[RecurrentState, RecurrentState], RecurrentState],
54
+ RecurrentState,
55
+ RecurrentState,
56
+ ],
57
+ RecurrentState,
58
+ ]
59
+
60
+ def forward_map(
61
+ self, x: Input, key: Optional[Shaped[PRNGKeyArray, ""]] = None
62
+ ) -> RecurrentState:
63
+ """Maps inputs to the monoid space"""
64
+ raise NotImplementedError
65
+
66
+ def backward_map(
67
+ self,
68
+ h: RecurrentState,
69
+ x: Input,
70
+ key: Optional[Shaped[PRNGKeyArray, ""]] = None,
71
+ ) -> OutputEmbedding:
72
+ """Maps the monoid space to outputs"""
73
+ raise NotImplementedError
74
+
75
+ def __call__(
76
+ self,
77
+ h: SingleRecurrentState,
78
+ x: Input,
79
+ key: Optional[Shaped[PRNGKeyArray, ""]] = None,
80
+ ) -> Tuple[RecurrentState, OutputEmbedding]:
81
+ """Calls the mapping and scan functions.
82
+
83
+ You probably do not need to override this."""
84
+ emb, start = x
85
+ T = start.shape[0]
86
+ if key is None:
87
+ in_key, scan_key, out_key = (None, None, None)
88
+ else:
89
+ in_key, scan_key, out_key = jax.random.split(key, 3)
90
+ in_key = jax.random.split(in_key, T)
91
+ # scan_key = jax.random.split(scan_key, T + 1)
92
+ out_key = jax.random.split(out_key, T)
93
+ scan_input = eqx.filter_vmap(self.forward_map)(x, in_key)
94
+ next_h = self.scan(self.algebra, h, scan_input)
95
+ y = eqx.filter_vmap(self.backward_map)(next_h, x, out_key)
96
+ return next_h, y
97
+
98
+ def initialize_carry(
99
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
100
+ ) -> SingleRecurrentState:
101
+ """Initialize the recurrent state for a new sequence."""
102
+ return self.algebra.initialize_carry(key=key)
103
+
104
+ def latest_recurrent_state(self, h: RecurrentState) -> RecurrentState:
105
+ """Get the latest state from a sequence of recurrent states."""
106
+ return jax.tree.map(lambda x: x[-1], h)
popgym_arcade/baselines/model/memorax/groups.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Optional, Tuple
3
+
4
+ import equinox as eqx
5
+ import jax
6
+ import jax.numpy as jnp
7
+ from jaxtyping import PRNGKeyArray, Shaped
8
+
9
+ from popgym_arcade.baselines.model.memorax.mtypes import (
10
+ Input,
11
+ RecurrentState,
12
+ ResetRecurrentState,
13
+ StartFlag,
14
+ )
15
+ from popgym_arcade.baselines.model.memorax.utils import debug_shape
16
+
17
+
18
+ class Module(eqx.Module):
19
+ r"""
20
+ The base module for memory/sequence models.
21
+
22
+ A module :math:`f` maps a recurrent state and inputs to an output recurrent state and outputs.
23
+ We always include a binary start flag in the inputs.
24
+
25
+ .. math::
26
+
27
+ f: H \times X^n \times \{0, 1\}^n \mapsto H^n \times Y^n
28
+
29
+
30
+ The start flag signifies the beginning of a new sequence. For example,
31
+ .. code::
32
+
33
+ [1, 2, 3, 4, 5]
34
+ [0, 0, 1, 0, 1]
35
+
36
+ Denotes that the input at 3 and 5 begin new sequences.
37
+
38
+ """
39
+
40
+ def __call__(self, s: RecurrentState, x: Input) -> RecurrentState:
41
+ raise NotImplementedError
42
+
43
+ def initialize_carry(
44
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
45
+ ) -> RecurrentState:
46
+ raise NotImplementedError
47
+
48
+
49
+ class BinaryAlgebra(Module):
50
+ r"""An binary algebraic structure (e.g., monoid, magma, group, etc) that maps two inputs to an output.
51
+
52
+ The inputs and output must belong to the same set
53
+
54
+ .. math::
55
+
56
+ f: H \times H \mapsto H
57
+ """
58
+
59
+ def __call__(self, carry: RecurrentState, input: RecurrentState) -> RecurrentState:
60
+ pass
61
+
62
+ def initialize_carry(
63
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
64
+ ) -> RecurrentState:
65
+ raise NotImplementedError
66
+
67
+
68
+ class SetAction(BinaryAlgebra):
69
+ r"""A magma, as defined in https://en.wikipedia.org/wiki/Magma_(algebra)
70
+
71
+ A Magma is a set :math:`H` and an operator :math:`\bullet` that maps two inputs to an output
72
+
73
+ .. math::
74
+
75
+ \bullet: H \times H \mapsto H
76
+ """
77
+
78
+ def __call__(self, carry: RecurrentState, input: RecurrentState) -> RecurrentState:
79
+ pass
80
+
81
+ def initialize_carry(
82
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
83
+ ) -> RecurrentState:
84
+ raise NotImplementedError
85
+
86
+
87
+ class Semigroup(BinaryAlgebra):
88
+ r"""A monoid, as defined in https://en.wikipedia.org/wiki/Monoid
89
+
90
+ A monoid is a set :math:`H`, an operator :math:`\bullet`, and an identity element :math:`e_I`. Unlike
91
+ the Magma, the monoid operator must be associative.
92
+
93
+ .. math::
94
+
95
+ \bullet: H \times H \mapsto H
96
+
97
+ e_I \in H
98
+
99
+ (a \bullet b) \bullet c = a \bullet (b \bullet c)
100
+
101
+ a \bullet e_I = e_I \bullet a = a
102
+ """
103
+
104
+ def __call__(self, carry: RecurrentState, input: RecurrentState) -> RecurrentState:
105
+ raise NotImplementedError
106
+
107
+ def initialize_carry(
108
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
109
+ ) -> RecurrentState:
110
+ raise NotImplementedError
111
+
112
+
113
+ class Resettable(BinaryAlgebra):
114
+ """A wrapper that resets the recurrent state upon beginning a new sequence.
115
+
116
+ You can apply this to monoids or magmas to reset the recurrent state upon a start flag.
117
+ """
118
+
119
+ algebra: BinaryAlgebra
120
+
121
+ def __init__(self, algebra: BinaryAlgebra):
122
+ self.algebra = algebra
123
+
124
+ def __call__(self, carry: ResetRecurrentState, input: ResetRecurrentState):
125
+ assert jax.tree.structure(carry) == jax.tree.structure(
126
+ input
127
+ ), f"Mismatched structures passed to algebra, {jax.tree.structure(carry)} and {jax.tree.structure(input)}"
128
+ states, prev_carry_reset_flag = carry
129
+ xs, start = input
130
+
131
+ def reset_state(
132
+ start: StartFlag,
133
+ current_state: RecurrentState,
134
+ initial_state: RecurrentState,
135
+ ):
136
+ assert initial_state.ndim == current_state.ndim
137
+ out = current_state * jnp.logical_not(start) + initial_state * start
138
+ return out
139
+
140
+ # TODO: Plumb key thru
141
+ initial_states = self.algebra.initialize_carry(None)
142
+ states = jax.tree.map(partial(reset_state, start), states, initial_states)
143
+ out = self.algebra(states, xs)
144
+ carry_reset_flag = jnp.logical_or(start, prev_carry_reset_flag)
145
+ to_return = out, carry_reset_flag
146
+ assert jax.tree.structure(carry) == jax.tree.structure(
147
+ to_return
148
+ ), f"Mismatched structures passed from algebra,\n{jax.tree.structure(carry)} and\n{jax.tree.structure(out)}"
149
+ assert all(
150
+ jax.tree.leaves(jax.tree.map(lambda x, y: x.shape == y.shape, states, out))
151
+ ), f"Shapes do not match\n{debug_shape(states)} and\n{debug_shape(out)}"
152
+
153
+ return to_return
154
+
155
+ def initialize_carry(
156
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
157
+ ) -> RecurrentState:
158
+ return self.algebra.initialize_carry(key), jnp.zeros((), dtype=bool)
popgym_arcade/baselines/model/memorax/magmas/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from popgym_arcade.baselines.model.memorax.magmas.elman import Elman
2
+ from popgym_arcade.baselines.model.memorax.magmas.gru import GRU
3
+ from popgym_arcade.baselines.model.memorax.magmas.lstm import LSTM
4
+ from popgym_arcade.baselines.model.memorax.magmas.mgu import MGU
5
+ from popgym_arcade.baselines.model.memorax.magmas.spherical import Spherical
popgym_arcade/baselines/model/memorax/magmas/elman.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional, Tuple
2
+
3
+ import jax
4
+ import jax.numpy as jnp
5
+ from beartype import beartype as typechecker
6
+ from equinox import nn
7
+ from jaxtyping import Array, Float, PRNGKeyArray, Shaped, jaxtyped
8
+
9
+ from popgym_arcade.baselines.model.memorax.gras import GRAS
10
+ from popgym_arcade.baselines.model.memorax.groups import (
11
+ BinaryAlgebra,
12
+ Resettable,
13
+ SetAction,
14
+ )
15
+ from popgym_arcade.baselines.model.memorax.mtypes import Input, StartFlag
16
+ from popgym_arcade.baselines.model.memorax.scans import set_action_scan
17
+
18
+ ElmanRecurrentState = Float[Array, "Recurrent"]
19
+ ElmanRecurrentStateWithReset = Tuple[ElmanRecurrentState, StartFlag]
20
+
21
+
22
+ class ElmanMagma(SetAction):
23
+ """
24
+ The Elman Magma
25
+
26
+ Paper: https://onlinelibrary.wiley.com/doi/abs/10.1207/s15516709cog1402_1.
27
+ """
28
+
29
+ recurrent_size: int
30
+ U_h: nn.Linear
31
+
32
+ def __init__(self, recurrent_size: int, key):
33
+ self.recurrent_size = recurrent_size
34
+ self.U_h = nn.Linear(recurrent_size, recurrent_size, key=key)
35
+
36
+ @jaxtyped(typechecker=typechecker)
37
+ def __call__(
38
+ self, carry: ElmanRecurrentState, input: ElmanRecurrentState
39
+ ) -> ElmanRecurrentState:
40
+ return jax.nn.tanh(self.U_h(carry) + input)
41
+
42
+ @jaxtyped(typechecker=typechecker)
43
+ def initialize_carry(
44
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
45
+ ) -> ElmanRecurrentState:
46
+ return jnp.zeros((self.recurrent_size,))
47
+
48
+
49
+ class LNElmanMagma(SetAction):
50
+ """
51
+ The Elman Recurrent Network
52
+
53
+ Paper: https://onlinelibrary.wiley.com/doi/abs/10.1207/s15516709cog1402_1.
54
+
55
+ The tanh is replaced with layernorm.
56
+ """
57
+
58
+ recurrent_size: int
59
+ U_h: nn.Linear
60
+ ln: nn.LayerNorm
61
+
62
+ def __init__(self, recurrent_size: int, key):
63
+ self.recurrent_size = recurrent_size
64
+ self.U_h = nn.Linear(recurrent_size, recurrent_size, key=key)
65
+ self.ln = nn.LayerNorm((recurrent_size,), use_bias=False, use_weight=False)
66
+
67
+ @jaxtyped(typechecker=typechecker)
68
+ def __call__(
69
+ self, carry: ElmanRecurrentState, input: Float[Array, "Recurrent"]
70
+ ) -> ElmanRecurrentState:
71
+ return self.ln(self.U_h(carry) + input)
72
+
73
+ @jaxtyped(typechecker=typechecker)
74
+ def initialize_carry(
75
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
76
+ ) -> ElmanRecurrentState:
77
+ return jnp.zeros((self.recurrent_size,))
78
+
79
+
80
+ class Elman(GRAS):
81
+ """
82
+ The Elman Recurrent Network
83
+
84
+ Paper: https://onlinelibrary.wiley.com/doi/abs/10.1207/s15516709cog1402_1.
85
+ """
86
+
87
+ algebra: BinaryAlgebra
88
+ scan: Callable[
89
+ [
90
+ Callable[
91
+ [ElmanRecurrentStateWithReset, ElmanRecurrentStateWithReset],
92
+ ElmanRecurrentStateWithReset,
93
+ ],
94
+ ElmanRecurrentStateWithReset,
95
+ ElmanRecurrentStateWithReset,
96
+ ],
97
+ ElmanRecurrentStateWithReset,
98
+ ]
99
+ recurrent_size: int
100
+ hidden_size: int
101
+ W_h: nn.Linear
102
+ W_y: nn.Linear
103
+
104
+ def __init__(self, recurrent_size, hidden_size, ln_variant=False, *, key):
105
+ self.recurrent_size = recurrent_size
106
+ self.hidden_size = hidden_size
107
+ keys = jax.random.split(key, 3)
108
+ if ln_variant:
109
+ self.algebra = Resettable(LNElmanMagma(recurrent_size, key=keys[0]))
110
+ else:
111
+ self.algebra = Resettable(ElmanMagma(recurrent_size, key=keys[0]))
112
+ self.scan = set_action_scan
113
+ self.W_h = nn.Linear(hidden_size, recurrent_size, use_bias=False, key=keys[1])
114
+ self.W_y = nn.Linear(recurrent_size, hidden_size, key=keys[2])
115
+
116
+ @jaxtyped(typechecker=typechecker)
117
+ def forward_map(
118
+ self, x: Input, key: Optional[Shaped[PRNGKeyArray, ""]] = None
119
+ ) -> ElmanRecurrentStateWithReset:
120
+ emb, start = x
121
+ return self.W_h(emb), start
122
+
123
+ @jaxtyped(typechecker=typechecker)
124
+ def backward_map(
125
+ self,
126
+ h: ElmanRecurrentStateWithReset,
127
+ x: Input,
128
+ key: Optional[Shaped[PRNGKeyArray, ""]] = None,
129
+ ) -> Float[Array, "{self.hidden_size}"]:
130
+ z, reset_flag = h
131
+ emb, start = x
132
+ return self.W_y(z)
133
+
134
+ @jaxtyped(typechecker=typechecker)
135
+ def initialize_carry(
136
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
137
+ ) -> ElmanRecurrentStateWithReset:
138
+ return self.algebra.initialize_carry(key)
popgym_arcade/baselines/model/memorax/magmas/gru.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional, Tuple
2
+
3
+ import jax
4
+ import jax.numpy as jnp
5
+ from beartype import beartype as typechecker
6
+ from equinox import nn
7
+ from jaxtyping import Array, Float, PRNGKeyArray, Shaped, jaxtyped
8
+
9
+ from popgym_arcade.baselines.model.memorax.gras import GRAS
10
+ from popgym_arcade.baselines.model.memorax.groups import (
11
+ BinaryAlgebra,
12
+ Resettable,
13
+ SetAction,
14
+ )
15
+ from popgym_arcade.baselines.model.memorax.mtypes import Input, StartFlag
16
+ from popgym_arcade.baselines.model.memorax.scans import set_action_scan
17
+
18
+ GRURecurrentState = Float[Array, "Recurrent"]
19
+ GRURecurrentStateWithReset = Tuple[GRURecurrentState, StartFlag]
20
+
21
+
22
+ class GRUMagma(SetAction):
23
+ """
24
+ The Gated Recurrent Unit Magma
25
+
26
+ Paper: https://arxiv.org/abs/1406.1078
27
+ """
28
+
29
+ recurrent_size: int
30
+ U_z: nn.Linear
31
+ U_r: nn.Linear
32
+ U_h: nn.Linear
33
+ W_z: nn.Linear
34
+ W_r: nn.Linear
35
+ W_h: nn.Linear
36
+
37
+ def __init__(self, recurrent_size: int, key):
38
+ self.recurrent_size = recurrent_size
39
+ keys = jax.random.split(key, 6)
40
+ self.U_z = nn.Linear(
41
+ recurrent_size, recurrent_size, use_bias=False, key=keys[0]
42
+ )
43
+ self.U_r = nn.Linear(
44
+ recurrent_size, recurrent_size, use_bias=False, key=keys[1]
45
+ )
46
+ self.U_h = nn.Linear(
47
+ recurrent_size, recurrent_size, use_bias=False, key=keys[2]
48
+ )
49
+
50
+ self.W_z = nn.Linear(recurrent_size, recurrent_size, key=keys[3])
51
+ self.W_r = nn.Linear(recurrent_size, recurrent_size, key=keys[4])
52
+ self.W_h = nn.Linear(recurrent_size, recurrent_size, key=keys[5])
53
+
54
+ @jaxtyped(typechecker=typechecker)
55
+ def __call__(
56
+ self, carry: GRURecurrentState, input: Float[Array, "Recurrent"]
57
+ ) -> GRURecurrentState:
58
+ z = jax.nn.sigmoid(self.W_z(input) + self.U_z(carry))
59
+ r = jax.nn.sigmoid(self.W_r(input) + self.U_r(carry))
60
+ h_hat = jax.nn.tanh(self.W_h(input) + self.U_h(r * carry))
61
+ out = (1 - z) * carry + z * h_hat
62
+ return out
63
+
64
+ @jaxtyped(typechecker=typechecker)
65
+ def initialize_carry(
66
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
67
+ ) -> GRURecurrentState:
68
+ return jnp.zeros((self.recurrent_size,))
69
+
70
+
71
+ class GRU(GRAS):
72
+ """
73
+ The Gated Recurrent Unit
74
+
75
+ Paper: https://arxiv.org/abs/1406.1078
76
+ """
77
+
78
+ algebra: BinaryAlgebra
79
+ scan: Callable[
80
+ [
81
+ Callable[
82
+ [GRURecurrentStateWithReset, GRURecurrentStateWithReset],
83
+ GRURecurrentStateWithReset,
84
+ ],
85
+ GRURecurrentStateWithReset,
86
+ GRURecurrentStateWithReset,
87
+ ],
88
+ GRURecurrentStateWithReset,
89
+ ]
90
+ recurrent_size: int
91
+
92
+ def __init__(self, recurrent_size, key):
93
+ keys = jax.random.split(key, 3)
94
+ self.recurrent_size = recurrent_size
95
+ self.algebra = Resettable(GRUMagma(recurrent_size, key=keys[0]))
96
+ self.scan = set_action_scan
97
+
98
+ @jaxtyped(typechecker=typechecker)
99
+ def forward_map(
100
+ self, x: Input, key: Optional[Shaped[PRNGKeyArray, ""]] = None
101
+ ) -> GRURecurrentStateWithReset:
102
+ emb, start = x
103
+ return emb, start
104
+
105
+ @jaxtyped(typechecker=typechecker)
106
+ def backward_map(
107
+ self,
108
+ h: GRURecurrentStateWithReset,
109
+ x: Input,
110
+ key: Optional[Shaped[PRNGKeyArray, ""]] = None,
111
+ ) -> Float[Array, "{self.recurrent_size}"]:
112
+ z, reset_flag = h
113
+ emb, start = x
114
+ return z
115
+
116
+ @jaxtyped(typechecker=typechecker)
117
+ def initialize_carry(
118
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
119
+ ) -> GRURecurrentStateWithReset:
120
+ return self.algebra.initialize_carry(key)
popgym_arcade/baselines/model/memorax/magmas/lstm.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional, Tuple
2
+
3
+ import jax
4
+ import jax.numpy as jnp
5
+ from beartype import beartype as typechecker
6
+ from equinox import nn
7
+ from jaxtyping import Array, Float, PRNGKeyArray, Shaped, jaxtyped
8
+
9
+ from popgym_arcade.baselines.model.memorax.gras import GRAS
10
+ from popgym_arcade.baselines.model.memorax.groups import (
11
+ BinaryAlgebra,
12
+ Resettable,
13
+ SetAction,
14
+ )
15
+ from popgym_arcade.baselines.model.memorax.mtypes import (
16
+ Input,
17
+ InputEmbedding,
18
+ StartFlag,
19
+ )
20
+ from popgym_arcade.baselines.model.memorax.scans import set_action_scan
21
+
22
+ LSTMRecurrentState = Tuple[Float[Array, "Recurrent"], Float[Array, "Recurrent"]]
23
+ LSTMRecurrentStateWithReset = Tuple[LSTMRecurrentState, StartFlag]
24
+
25
+
26
+ class LSTMMagma(SetAction):
27
+ """
28
+ The Long Short-Term Memory Magma
29
+
30
+ Paper: https://www.bioinf.jku.at/publications/older/2604.pdf
31
+ """
32
+
33
+ recurrent_size: int
34
+ U_z: nn.Linear
35
+ U_r: nn.Linear
36
+ U_h: nn.Linear
37
+ W_z: nn.Linear
38
+ W_r: nn.Linear
39
+ W_h: nn.Linear
40
+
41
+ def __init__(self, recurrent_size: int, key):
42
+ self.recurrent_size = recurrent_size
43
+ keys = jax.random.split(key, 8)
44
+ self.U_f = nn.Linear(
45
+ recurrent_size, recurrent_size, use_bias=False, key=keys[0]
46
+ )
47
+ self.U_i = nn.Linear(
48
+ recurrent_size, recurrent_size, use_bias=False, key=keys[1]
49
+ )
50
+ self.U_o = nn.Linear(
51
+ recurrent_size, recurrent_size, use_bias=False, key=keys[2]
52
+ )
53
+ self.U_c = nn.Linear(
54
+ recurrent_size, recurrent_size, use_bias=False, key=keys[3]
55
+ )
56
+
57
+ self.W_f = nn.Linear(recurrent_size, recurrent_size, key=keys[4])
58
+ self.W_i = nn.Linear(recurrent_size, recurrent_size, key=keys[5])
59
+ self.W_o = nn.Linear(recurrent_size, recurrent_size, key=keys[6])
60
+ self.W_c = nn.Linear(recurrent_size, recurrent_size, key=keys[7])
61
+
62
+ @jaxtyped(typechecker=typechecker)
63
+ def __call__(
64
+ self, carry: LSTMRecurrentState, input: Float[Array, "Recurrent"]
65
+ ) -> LSTMRecurrentState:
66
+ c, h = carry
67
+ f_f = jax.nn.sigmoid(self.W_f(input) + self.U_f(h))
68
+ f_i = jax.nn.sigmoid(self.W_i(input) + self.U_i(h))
69
+ f_o = jax.nn.sigmoid(self.W_o(input) + self.U_o(h))
70
+ f_c = jax.nn.sigmoid(self.W_c(input) + self.U_c(h))
71
+
72
+ c = f_f * c + f_i * f_c
73
+ h = f_o * c
74
+
75
+ return (c, h)
76
+
77
+ @jaxtyped(typechecker=typechecker)
78
+ def initialize_carry(
79
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
80
+ ) -> LSTMRecurrentState:
81
+ return (
82
+ jnp.zeros((self.recurrent_size,)),
83
+ jnp.zeros((self.recurrent_size,)),
84
+ )
85
+
86
+
87
+ class LSTM(GRAS):
88
+ """
89
+ The Long Short-Term Memory
90
+
91
+ Paper: https://www.bioinf.jku.at/publications/older/2604.pdf
92
+ """
93
+
94
+ algebra: BinaryAlgebra
95
+ scan: Callable[
96
+ [
97
+ Callable[
98
+ [LSTMRecurrentStateWithReset, LSTMRecurrentStateWithReset],
99
+ LSTMRecurrentStateWithReset,
100
+ ],
101
+ LSTMRecurrentStateWithReset,
102
+ LSTMRecurrentStateWithReset,
103
+ ],
104
+ LSTMRecurrentStateWithReset,
105
+ ]
106
+
107
+ def __init__(self, recurrent_size, key):
108
+ keys = jax.random.split(key, 3)
109
+ self.algebra = Resettable(LSTMMagma(recurrent_size, key=keys[0]))
110
+ self.scan = set_action_scan
111
+
112
+ @jaxtyped(typechecker=typechecker)
113
+ def forward_map(
114
+ self, x: Input, key: Optional[Shaped[PRNGKeyArray, ""]] = None
115
+ ) -> LSTMRecurrentStateWithReset:
116
+ emb, start = x
117
+ return emb, start
118
+
119
+ @jaxtyped(typechecker=typechecker)
120
+ def backward_map(
121
+ self,
122
+ h: LSTMRecurrentStateWithReset,
123
+ x: Input,
124
+ key: Optional[Shaped[PRNGKeyArray, ""]] = None,
125
+ ) -> Float[Array, "{self.hidden_size}"]:
126
+ z, reset_flag = h
127
+ emb, start = x
128
+ return z
129
+
130
+ @jaxtyped(typechecker=typechecker)
131
+ def initialize_carry(
132
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
133
+ ) -> LSTMRecurrentState:
134
+ return self.algebra.initialize_carry(key)
popgym_arcade/baselines/model/memorax/magmas/mgu.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional, Tuple
2
+
3
+ import jax
4
+ import jax.numpy as jnp
5
+ from beartype import beartype as typechecker
6
+ from equinox import nn
7
+ from jaxtyping import Array, Float, PRNGKeyArray, Shaped, jaxtyped
8
+
9
+ from popgym_arcade.baselines.model.memorax.gras import GRAS
10
+ from popgym_arcade.baselines.model.memorax.groups import (
11
+ BinaryAlgebra,
12
+ Resettable,
13
+ SetAction,
14
+ )
15
+ from popgym_arcade.baselines.model.memorax.mtypes import Input, StartFlag
16
+ from popgym_arcade.baselines.model.memorax.scans import set_action_scan
17
+
18
+ MGURecurrentState = Float[Array, "Recurrent"]
19
+ MGURecurrentStateWithReset = Tuple[MGURecurrentState, StartFlag]
20
+
21
+
22
+ class MGUMagma(SetAction):
23
+ """
24
+ The Minimal Gated Unit Magma
25
+
26
+ Paper: https://arxiv.org/abs/1701.03452
27
+ """
28
+
29
+ recurrent_size: int
30
+ U_h: nn.Linear
31
+ U_f: nn.Linear
32
+ W_h: nn.Linear
33
+ W_f: nn.Linear
34
+
35
+ def __init__(self, recurrent_size: int, key):
36
+ self.recurrent_size = recurrent_size
37
+ keys = jax.random.split(key, 4)
38
+ self.U_h = nn.Linear(
39
+ recurrent_size, recurrent_size, use_bias=False, key=keys[0]
40
+ )
41
+ self.U_f = nn.Linear(
42
+ recurrent_size, recurrent_size, use_bias=False, key=keys[1]
43
+ )
44
+ self.W_h = nn.Linear(recurrent_size, recurrent_size, key=keys[2])
45
+ self.W_f = nn.Linear(recurrent_size, recurrent_size, key=keys[3])
46
+
47
+ @jaxtyped(typechecker=typechecker)
48
+ def __call__(
49
+ self, carry: MGURecurrentState, input: Float[Array, "Recurrent"]
50
+ ) -> MGURecurrentState:
51
+ f = jax.nn.sigmoid(self.W_f(input) + self.U_f(carry))
52
+ h_hat = jax.nn.tanh(self.W_h(input) + self.U_h(f * carry))
53
+ h = (1 - f) * carry + f * h_hat
54
+ return h
55
+
56
+ @jaxtyped(typechecker=typechecker)
57
+ def initialize_carry(
58
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
59
+ ) -> MGURecurrentState:
60
+ return jnp.zeros((self.recurrent_size,))
61
+
62
+
63
+ class MGU(GRAS):
64
+ """The minimal gated unit
65
+
66
+ Paper: https://arxiv.org/abs/1701.03452
67
+ """
68
+
69
+ algebra: BinaryAlgebra
70
+ scan: Callable[
71
+ [
72
+ Callable[
73
+ [MGURecurrentStateWithReset, MGURecurrentStateWithReset],
74
+ MGURecurrentStateWithReset,
75
+ ],
76
+ MGURecurrentStateWithReset,
77
+ MGURecurrentStateWithReset,
78
+ ],
79
+ MGURecurrentStateWithReset,
80
+ ]
81
+ recurrent_size: int
82
+
83
+ def __init__(self, recurrent_size, key):
84
+ self.recurrent_size = recurrent_size
85
+ keys = jax.random.split(key, 3)
86
+ self.algebra = Resettable(MGUMagma(recurrent_size, key=keys[0]))
87
+ self.scan = set_action_scan
88
+
89
+ @jaxtyped(typechecker=typechecker)
90
+ def forward_map(
91
+ self, x: Input, key: Optional[Shaped[PRNGKeyArray, ""]] = None
92
+ ) -> MGURecurrentStateWithReset:
93
+ emb, start = x
94
+ return emb, start
95
+
96
+ @jaxtyped(typechecker=typechecker)
97
+ def backward_map(
98
+ self,
99
+ h: MGURecurrentStateWithReset,
100
+ x: Input,
101
+ key: Optional[Shaped[PRNGKeyArray, ""]] = None,
102
+ ) -> Float[Array, "{self.recurrent_size}"]:
103
+ z, reset_flag = h
104
+ emb, start = x
105
+ return z
106
+
107
+ @jaxtyped(typechecker=typechecker)
108
+ def initialize_carry(
109
+ self, key: Optional[Shaped[PRNGKeyArray, ""]] = None
110
+ ) -> MGURecurrentStateWithReset:
111
+ return self.algebra.initialize_carry(key)