LouisRG commited on
Commit
aad5902
·
verified ·
1 Parent(s): 000b970

Premier upload du modèle A2C SB3 Panda Reach

Browse files
README.md CHANGED
@@ -1,37 +1,40 @@
1
- ---
2
- library_name: stable-baselines3
3
- tags:
4
- - CartPole-v1
5
- - deep-reinforcement-learning
6
- - reinforcement-learning
7
- - stable-baselines3
8
- model-index:
9
- - name: A2C
10
- results:
11
- - task:
12
- type: reinforcement-learning
13
- name: reinforcement-learning
14
- dataset:
15
- name: CartPole-v1
16
- type: CartPole-v1
17
- metrics:
18
- - type: mean_reward
19
- value: 500.00 +/- 0.00
20
- name: mean_reward
21
- verified: false
22
- ---
23
-
24
- # **A2C** Agent playing **CartPole-v1**
25
- This is a trained model of a **A2C** agent playing **CartPole-v1**
26
- using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
27
-
28
- ## Usage (with Stable-baselines3)
29
- TODO: Add your code
30
-
31
-
32
- ```python
33
- from stable_baselines3 import ...
34
- from huggingface_sb3 import load_from_hub
35
-
36
- ...
37
- ```
 
 
 
 
1
+ ---
2
+ library_name: stable-baselines3
3
+ tags:
4
+ - PandaReachJointsDense-v3
5
+ - deep-reinforcement-learning
6
+ - reinforcement-learning
7
+ - stable-baselines3
8
+ model-index:
9
+ - name: A2C
10
+ results:
11
+ - task:
12
+ type: reinforcement-learning
13
+ name: reinforcement-learning
14
+ dataset:
15
+ name: PandaReachJointsDense-v3
16
+ type: PandaReachJointsDense-v3
17
+ metrics:
18
+ - type: mean_reward
19
+ value: -0.38 +/- 0.27
20
+ name: mean_reward
21
+ verified: false
22
+ ---
23
+
24
+ # **A2C** Agent playing **PandaReachJointsDense-v3**
25
+
26
+ This is a trained model of a **A2C** agent playing **PandaReachJointsDense-v3**
27
+ using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
28
+
29
+
30
+ ## Usage (with Stable-baselines3)
31
+
32
+ TODO: Add your code
33
+
34
+
35
+ ```python
36
+ from stable_baselines3 import ...
37
+ from huggingface_sb3 import load_from_hub
38
+
39
+ ...
40
+ ```
a2c_sb3_panda_reach.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9753b270f8b2262850362e878442576b680cf7269eb7154dd7c227d3b55274cd
3
+ size 113679
a2c_sb3_panda_reach/_stable_baselines3_version ADDED
@@ -0,0 +1 @@
 
 
1
+ 2.7.1
a2c_sb3_panda_reach/data ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "policy_class": {
3
+ ":type:": "<class 'abc.ABCMeta'>",
4
+ ":serialized:": "gAWVRQAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMG011bHRpSW5wdXRBY3RvckNyaXRpY1BvbGljeZSTlC4=",
5
+ "__module__": "stable_baselines3.common.policies",
6
+ "__doc__": "\n MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space (Tuple)\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Uses the CombinedExtractor\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
+ "__init__": "<function MultiInputActorCriticPolicy.__init__ at 0x0000024814FD20C0>",
8
+ "__abstractmethods__": "frozenset()",
9
+ "_abc_impl": "<_abc._abc_data object at 0x0000024814FCF280>"
10
+ },
11
+ "verbose": 1,
12
+ "policy_kwargs": {
13
+ ":type:": "<class 'dict'>",
14
+ ":serialized:": "gAWVgQAAAAAAAAB9lCiMD29wdGltaXplcl9jbGFzc5SME3RvcmNoLm9wdGltLnJtc3Byb3CUjAdSTVNwcm9wlJOUjBBvcHRpbWl6ZXJfa3dhcmdzlH2UKIwFYWxwaGGURz/vrhR64UeujANlcHOURz7k+LWI42jxjAx3ZWlnaHRfZGVjYXmUSwB1dS4=",
15
+ "optimizer_class": "<class 'torch.optim.rmsprop.RMSprop'>",
16
+ "optimizer_kwargs": {
17
+ "alpha": 0.99,
18
+ "eps": 1e-05,
19
+ "weight_decay": 0
20
+ }
21
+ },
22
+ "num_timesteps": 500000,
23
+ "_total_timesteps": 500000,
24
+ "_num_timesteps_at_start": 0,
25
+ "seed": null,
26
+ "action_noise": null,
27
+ "start_time": 1773829211581250500,
28
+ "learning_rate": 0.0007,
29
+ "tensorboard_log": "runs/p67z2l0u",
30
+ "_last_obs": {
31
+ ":type:": "<class 'collections.OrderedDict'>",
32
+ ":serialized:": "gAWVKwEAAAAAAACMC2NvbGxlY3Rpb25zlIwLT3JkZXJlZERpY3SUk5QpUpQojA1hY2hpZXZlZF9nb2FslIwSbnVtcHkuY29yZS5udW1lcmljlIwLX2Zyb21idWZmZXKUk5QolgwAAAAAAAAAUmRiveVyxD3nPVo+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwOGlIwBQ5R0lFKUjAxkZXNpcmVkX2dvYWyUaAcolgwAAAAAAAAA2SUJvry2wT2qCSg+lGgOSwFLA4aUaBJ0lFKUjAtvYnNlcnZhdGlvbpRoByiWGAAAAAAAAABSZGK95XLEPec9Wj5lECe+V33VvUDzjz2UaA5LAUsGhpRoEnSUUpR1Lg==",
33
+ "achieved_goal": "[[-0.05527145 0.09592227 0.21312676]]",
34
+ "desired_goal": "[[-0.13393344 0.09458682 0.16409937]]",
35
+ "observation": "[[-0.05527145 0.09592227 0.21312676 -0.16314848 -0.10424297 0.07028818]]"
36
+ },
37
+ "_last_episode_starts": {
38
+ ":type:": "<class 'numpy.ndarray'>",
39
+ ":serialized:": "gAWVdAAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYBAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwGFlIwBQ5R0lFKULg=="
40
+ },
41
+ "_last_original_obs": null,
42
+ "_episode_num": 0,
43
+ "use_sde": false,
44
+ "sde_sample_freq": -1,
45
+ "_current_progress_remaining": 0.0,
46
+ "_stats_window_size": 100,
47
+ "ep_info_buffer": {
48
+ ":type:": "<class 'collections.deque'>",
49
+ ":serialized:": "gAWV4AsAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHv8uxlg+hXbOMAWyUSwOMAXSUR0CaURvxYq5LdX2UKGgGR7/XML4N7SiNaAdLBGgIR0CaUSQiiZfEdX2UKGgGR7/bl7dBSk0raAdLBGgIR0CaUS9l2/zrdX2UKGgGR7/sRCpm29csaAdLCWgIR0CaUUkB0ZFYdX2UKGgGR7/pcj7hvR7aaAdLCWgIR0CaUWi9Zid8dX2UKGgGR7/ctvXK8tf5aAdLBWgIR0CaUXsr/bTMdX2UKGgGR7/SrO7g88s+aAdLBGgIR0CaUYuNxVABdX2UKGgGR7+5OHnEETxoaAdLAmgIR0CaUZGzru6VdX2UKGgGR7/IZUDMeOn3aAdLA2gIR0CaUZ39JjDsdX2UKGgGR7/JO1v2oNutaAdLA2gIR0CaUaczqKP5dX2UKGgGR7/ON96Tnq3WaAdLA2gIR0CaUbWJrLyMdX2UKGgGR7+7WZqmCROlaAdLAmgIR0CaUbqoqCpWdX2UKGgGR7/VZmZmZmZmaAdLBGgIR0CaUcwRGtp3dX2UKGgGR7+00iyIHkcTaAdLAmgIR0CaUdI2fkFOdX2UKGgGR7/sIQe3hGYsaAdLCWgIR0CaUgLeyiVTdX2UKGgGR7/IXzlLeyiVaAdLA2gIR0CaUhRHPNVzdX2UKGgGR7+Sr1dxAB1caAdLAWgIR0CaUhlmOEM9dX2UKGgGR7/aZr56+nIiaAdLBWgIR0CaUi7mMfihdX2UKGgGR7+7gccU/OdHaAdLAmgIR0CaUjULDye7dX2UKGgGR7/Sd8zAN5MUaAdLBGgIR0CaUkRm9QGfdX2UKGgGR7/lJcHGCI1taAdLB2gIR0CaUmES/TLGdX2UKGgGR7/cRGMGX5WSaAdLBWgIR0CaUnSHdoFndX2UKGgGR7/l3IEKVpsXaAdLB2gIR0CaUo4h2W6cdX2UKGgGR7/QBDXvphWpaAdLBGgIR0CaUp19v0iAdX2UKGgGR7/AL7XQMQVcaAdLA2gIR0CaUqWt2cJ/dX2UKGgGR7/ozJIUahpQaAdLCWgIR0CaUsd4VymzdX2UKGgGR7/FESdvsJIEaAdLA2gIR0CaUtG2CulodX2UKGgGR7/DwfhddE9daAdLA2gIR0CaUtvy9VWCdX2UKGgGR7/k9mpVCHARaAdLB2gIR0CaUv7EYO2BdX2UKGgGR7/Yog3cYZVGaAdLBGgIR0CaUx+IdlundX2UKGgGR7/haJIlMRHxaAdLB2gIR0CaU0JYkmhNdX2UKGgGR7/UiHqNZNfxaAdLBGgIR0CaU1cIqsltdX2UKGgGR7/BCJGe+VTraAdLAmgIR0CaU185S3spdX2UKGgGR7/w5XZGrjo7aAdLCmgIR0CaU4o6CDmKdX2UKGgGR7/kkdmxt52RaAdLBWgIR0CaU561stTUdX2UKGgGR7/PoGpuMuOCaAdLA2gIR0CaU6wEQoTgdX2UKGgGR7/ZkfLcKw6iaAdLBWgIR0CaU796C17ZdX2UKGgGR7/XDQ7cO9WZaAdLBGgIR0CaU9Hn2ZiNdX2UKGgGR7/ad+ocaOxTaAdLBWgIR0CaU+VcUucudX2UKGgGR7+lYwIt16mgaAdLAWgIR0CaU+l0YCQtdX2UKGgGR7/dcinpB5X2aAdLBmgIR0CaVAEBbOeKdX2UKGgGR7/ZWHUMG5c1aAdLBWgIR0CaVBR2KVIJdX2UKGgGR7/J8qnWJ79iaAdLA2gIR0CaVCHFglWwdX2UKGgGR7/H2g3974SIaAdLA2gIR0CaVCr/KhcrdX2UKGgGR7/h32EkB0ZFaAdLBmgIR0CaVEq7yxzJdX2UKGgGR7+1NcnmaH9FaAdLAmgIR0CaVFkSElE7dX2UKGgGR7/HKBd2Pkq+aAdLA2gIR0CaVGJIDoyLdX2UKGgGR7/nDin5zo2XaAdLB2gIR0CaVIJgLJCCdX2UKGgGR7/AdV/+bVjJaAdLAmgIR0CaVIiExqO+dX2UKGgGR7/janJkoWpIaAdLCWgIR0CaVK5p8F6idX2UKGgGR7/gPl+3H7xeaAdLBmgIR0CaVMPo3aSLdX2UKGgGR7/ZXwsoUi6haAdLBGgIR0CaVNRKpT/AdX2UKGgGR7+ocBEKE385aAdLAWgIR0CaVNddE9dNdX2UKGgGR7+2DEm6XjU/aAdLAmgIR0CaVOGbTc7AdX2UKGgGR7/ZXZGrjo6kaAdLBWgIR0CaVPUPQOWjdX2UKGgGR7/XdRzijtXxaAdLBWgIR0CaVQqREF4cdX2UKGgGR7/nzv7WNFSbaAdLBmgIR0CaVSCYkVvddX2UKGgGR7/W2Xb/Ot4iaAdLBGgIR0CaVS/0/W1/dX2UKGgGR7/QNuLrHEMtaAdLA2gIR0CaVTgmqo60dX2UKGgGR7/XY3Ns3yZsaAdLBWgIR0CaVUqU/wAmdX2UKGgGR7++MCLdepn6aAdLAmgIR0CaVVXYDklvdX2UKGgGR7+8f4h2W6bwaAdLAmgIR0CaVVv9LpRodX2UKGgGR7+5qwhW5paiaAdLAmgIR0CaVWEbHZK4dX2UKGgGR7/MF0PpY9xIaAdLA2gIR0CaVW9w3o9tdX2UKGgGR7+78DSw4bS7aAdLAmgIR0CaVXSPU8V6dX2UKGgGR7/WNlyzXz19aAdLBWgIR0CaVYgEEC/5dX2UKGgGR7/FQC0WuX/paAdLA2gIR0CaVZZaV2RrdX2UKGgGR7/fGkep4rz5aAdLBmgIR0CaVavalDWtdX2UKGgGR7/XKifxtpEhaAdLBWgIR0CaVb9Q40djdX2UKGgGR7/YRtP557gLaAdLBGgIR0CaVc2lEZzgdX2UKGgGR7+77l7tzCDVaAdLAmgIR0CaVdLEk0JodX2UKGgGR7++dI5HVf/naAdLAmgIR0CaVd0dBBzFdX2UKGgGR7/mPvrnkkrxaAdLB2gIR0CaVfP6be/IdX2UKGgGR7/gI8hcJMQFaAdLBGgIR0CaVgPcSGrTdX2UKGgGR7/AyxiXpnpTaAdLAmgIR0CaVgj59E1EdX2UKGgGR7/hzURWcSXdaAdLBmgIR0CaViasIVuadX2UKGgGR7/c+JP69CeFaAdLBWgIR0CaVjkadc0MdX2UKGgGR7/e8hLXcxj8aAdLBmgIR0CaVk2VVxS6dX2UKGgGR7/fBRhttQ9BaAdLBGgIR0CaVlzyBkI5dX2UKGgGR7/hq8cuJ1q4aAdLBmgIR0CaVnJx//eddX2UKGgGR7+9bB42S+xoaAdLAmgIR0CaVneQdS2qdX2UKGgGR7/O1JlJ6IFeaAdLA2gIR0CaVoPaL4vfdX2UKGgGR7+8Hu7YkE9uaAdLAmgIR0CaVoj5KvmpdX2UKGgGR7/DTzd1uBMBaAdLAmgIR0CaVpQ8fV7QdX2UKGgGR7+8ypJf6XSjaAdLAmgIR0CaVplbNbC8dX2UKGgGR7+kq4H5aePJaAdLAWgIR0CaVpxtpEhJdX2UKGgGR7/ZHc1wYLssaAdLBWgIR0CaVrFr2xptdX2UKGgGR7/dzPKMefZmaAdLBWgIR0CaVskZ75VPdX2UKGgGR7/REUj9n9NvaAdLA2gIR0CaVth2GIsRdX2UKGgGR7/bqmCROk+HaAdLBmgIR0CaVvEJSiuddX2UKGgGR7/xG9+PRzBAaAdLCmgIR0CaVyAjps42dX2UKGgGR7/DPD50r9VFaAdLA2gIR0CaVzCF9KEndX2UKGgGR7/XZWJaaCtjaAdLBGgIR0CaVz7bcoH+dX2UKGgGR7/gJjDsMRYjaAdLBmgIR0CaV1yNXHR1dX2UKGgGR7/FTTfBN21VaAdLA2gIR0CaV2S+QEIPdX2UKGgGR7+6DVYp2ECeaAdLAmgIR0CaV3ABkqc3dX2UKGgGR7/I7tiQT238aAdLA2gIR0CaV3gyuZCwdX2UKGgGR7++OAAhje9BaAdLAmgIR0CaV4N2C/XYdX2UKGgGR7/Q01ZTyauwaAdLBGgIR0CaV4/BWPtEdX2UKGgGR7/e0Q9RrJr+aAdLBmgIR0CaV62v0RODdWUu"
50
+ },
51
+ "ep_success_buffer": {
52
+ ":type:": "<class 'collections.deque'>",
53
+ ":serialized:": "gAWVhgAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIhlLg=="
54
+ },
55
+ "_n_updates": 100000,
56
+ "observation_space": {
57
+ ":type:": "<class 'gymnasium.spaces.dict.Dict'>",
58
+ ":serialized:": "gAWVkQMAAAAAAACMFWd5bW5hc2l1bS5zcGFjZXMuZGljdJSMBERpY3SUk5QpgZR9lCiMBnNwYWNlc5R9lCiMDWFjaGlldmVkX2dvYWyUjBRneW1uYXNpdW0uc3BhY2VzLmJveJSMA0JveJSTlCmBlH2UKIwFZHR5cGWUjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRijAZfc2hhcGWUSwOFlIwDbG93lIwSbnVtcHkuY29yZS5udW1lcmljlIwLX2Zyb21idWZmZXKUk5QolgwAAAAAAAAAAAAgwQAAIMEAACDBlGgTSwOFlIwBQ5R0lFKUjA1ib3VuZGVkX2JlbG93lGgbKJYDAAAAAAAAAAEBAZRoEIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksDhZRoHnSUUpSMBGhpZ2iUaBsolgwAAAAAAAAAAAAgQQAAIEEAACBBlGgTSwOFlGgedJRSlIwNYm91bmRlZF9hYm92ZZRoGyiWAwAAAAAAAAABAQGUaCVLA4WUaB50lFKUjAhsb3dfcmVwcpSMBS0xMC4wlIwJaGlnaF9yZXBylIwEMTAuMJSMCl9ucF9yYW5kb22UTnVijAxkZXNpcmVkX2dvYWyUaAopgZR9lChoDWgTaBZLA4WUaBhoGyiWDAAAAAAAAAAAACDBAAAgwQAAIMGUaBNLA4WUaB50lFKUaCFoGyiWAwAAAAAAAAABAQGUaCVLA4WUaB50lFKUaCtoGyiWDAAAAAAAAAAAACBBAAAgQQAAIEGUaBNLA4WUaB50lFKUaDBoGyiWAwAAAAAAAAABAQGUaCVLA4WUaB50lFKUaDWMBS0xMC4wlGg3jAQxMC4wlGg5TnVijAtvYnNlcnZhdGlvbpRoCimBlH2UKGgNaBNoFksGhZRoGGgbKJYYAAAAAAAAAAAAIMEAACDBAAAgwQAAIMEAACDBAAAgwZRoE0sGhZRoHnSUUpRoIWgbKJYGAAAAAAAAAAEBAQEBAZRoJUsGhZRoHnSUUpRoK2gbKJYYAAAAAAAAAAAAIEEAACBBAAAgQQAAIEEAACBBAAAgQZRoE0sGhZRoHnSUUpRoMGgbKJYGAAAAAAAAAAEBAQEBAZRoJUsGhZRoHnSUUpRoNYwFLTEwLjCUaDeMBDEwLjCUaDlOdWJ1aBZOaA1OaDlOdWIu",
59
+ "spaces": "{'achieved_goal': Box(-10.0, 10.0, (3,), float32), 'desired_goal': Box(-10.0, 10.0, (3,), float32), 'observation': Box(-10.0, 10.0, (6,), float32)}",
60
+ "_shape": null,
61
+ "dtype": null,
62
+ "_np_random": null
63
+ },
64
+ "action_space": {
65
+ ":type:": "<class 'gymnasium.spaces.box.Box'>",
66
+ ":serialized:": "gAWVxQEAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLB4WUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWHAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/AACAvwAAgL8AAIC/lGgLSweFlIwBQ5R0lFKUjA1ib3VuZGVkX2JlbG93lGgTKJYHAAAAAAAAAAEBAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLB4WUaBZ0lFKUjARoaWdolGgTKJYcAAAAAAAAAAAAgD8AAIA/AACAPwAAgD8AAIA/AACAPwAAgD+UaAtLB4WUaBZ0lFKUjA1ib3VuZGVkX2Fib3ZllGgTKJYHAAAAAAAAAAEBAQEBAQGUaB1LB4WUaBZ0lFKUjAhsb3dfcmVwcpSMBC0xLjCUjAloaWdoX3JlcHKUjAMxLjCUjApfbnBfcmFuZG9tlE51Yi4=",
67
+ "dtype": "float32",
68
+ "_shape": [
69
+ 7
70
+ ],
71
+ "low": "[-1. -1. -1. -1. -1. -1. -1.]",
72
+ "bounded_below": "[ True True True True True True True]",
73
+ "high": "[1. 1. 1. 1. 1. 1. 1.]",
74
+ "bounded_above": "[ True True True True True True True]",
75
+ "low_repr": "-1.0",
76
+ "high_repr": "1.0",
77
+ "_np_random": null
78
+ },
79
+ "n_envs": 1,
80
+ "n_steps": 5,
81
+ "gamma": 0.99,
82
+ "gae_lambda": 1.0,
83
+ "ent_coef": 0.0,
84
+ "vf_coef": 0.5,
85
+ "max_grad_norm": 0.5,
86
+ "rollout_buffer_class": {
87
+ ":type:": "<class 'abc.ABCMeta'>",
88
+ ":serialized:": "gAWVOgAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwRRGljdFJvbGxvdXRCdWZmZXKUk5Qu",
89
+ "__module__": "stable_baselines3.common.buffers",
90
+ "__annotations__": "{'observation_space': <class 'gymnasium.spaces.dict.Dict'>, 'obs_shape': dict[str, tuple[int, ...]], 'observations': dict[str, numpy.ndarray]}",
91
+ "__doc__": "\n Dict Rollout buffer used in on-policy algorithms like A2C/PPO.\n Extends the RolloutBuffer to use dictionary observations\n\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to Monte-Carlo advantage estimate when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n ",
92
+ "__init__": "<function DictRolloutBuffer.__init__ at 0x0000024814B49C60>",
93
+ "reset": "<function DictRolloutBuffer.reset at 0x0000024814B49D00>",
94
+ "add": "<function DictRolloutBuffer.add at 0x0000024814B49E40>",
95
+ "get": "<function DictRolloutBuffer.get at 0x0000024814B49EE0>",
96
+ "_get_samples": "<function DictRolloutBuffer._get_samples at 0x0000024814B49F80>",
97
+ "__abstractmethods__": "frozenset()",
98
+ "_abc_impl": "<_abc._abc_data object at 0x0000024814B41980>"
99
+ },
100
+ "rollout_buffer_kwargs": {},
101
+ "normalize_advantage": false,
102
+ "lr_schedule": {
103
+ ":type:": "<class 'stable_baselines3.common.utils.FloatSchedule'>",
104
+ ":serialized:": "gAWVeQAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMDUZsb2F0U2NoZWR1bGWUk5QpgZR9lIwOdmFsdWVfc2NoZWR1bGWUaACMEENvbnN0YW50U2NoZWR1bGWUk5QpgZR9lIwDdmFslEc/RvAGjbi6x3Nic2Iu",
105
+ "value_schedule": "ConstantSchedule(val=0.0007)"
106
+ }
107
+ }
a2c_sb3_panda_reach/policy.optimizer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:556dbb4d49ea49989578e7eccb6e38063f5f6ecc6157409732c3b509d29318b7
3
+ size 49685
a2c_sb3_panda_reach/policy.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86890ac6337c40ea7660d11086bc0fe252c217838e88daf056f71eec151cb8cd
3
+ size 47676
a2c_sb3_panda_reach/pytorch_variables.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98b4d78ebb79a7f668431910a5959abdce137f13e82a02bcc28e585416046b97
3
+ size 1261
a2c_sb3_panda_reach/system_info.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ - OS: Windows-11-10.0.26200-SP0 10.0.26200
2
+ - Python: 3.12.6
3
+ - Stable-Baselines3: 2.7.1
4
+ - PyTorch: 2.10.0+cpu
5
+ - GPU Enabled: False
6
+ - Numpy: 1.26.4
7
+ - Cloudpickle: 3.1.2
8
+ - Gymnasium: 1.2.3
config.json CHANGED
@@ -1 +1 @@
1
- {"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x000001F7552489A0>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x000001F755248A40>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x000001F755248AE0>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x000001F755248B80>", "_build": "<function ActorCriticPolicy._build at 0x000001F755248C20>", "forward": "<function ActorCriticPolicy.forward at 0x000001F755248CC0>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x000001F755248D60>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x000001F755248E00>", "_predict": "<function ActorCriticPolicy._predict at 0x000001F755248EA0>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x000001F755248F40>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x000001F755248FE0>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x000001F755249080>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x000001F75523E2C0>"}, "verbose": 1, "policy_kwargs": {":type:": "<class 'dict'>", ":serialized:": "gAWVgQAAAAAAAAB9lCiMD29wdGltaXplcl9jbGFzc5SME3RvcmNoLm9wdGltLnJtc3Byb3CUjAdSTVNwcm9wlJOUjBBvcHRpbWl6ZXJfa3dhcmdzlH2UKIwFYWxwaGGURz/vrhR64UeujANlcHOURz7k+LWI42jxjAx3ZWlnaHRfZGVjYXmUSwB1dS4=", "optimizer_class": "<class 'torch.optim.rmsprop.RMSprop'>", "optimizer_kwargs": {"alpha": 0.99, "eps": 1e-05, "weight_decay": 0}}, "num_timesteps": 100000, "_total_timesteps": 100000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1773223101838949400, "learning_rate": 0.0007, "tensorboard_log": null, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVtgAAAAAAAACME251bXB5Ll9jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWQAAAAAAAAAAOw54+iLQzvgF9u7vFtok+21xvPX8PVb0x8NY7wVQzPm0BBz4DJD8+79WyOiaygr43Ktw7wWpfPh/ybDz2LJa+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksESwSGlIwBQ5R0lFKULg=="}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVeAAAAAAAAACME251bXB5Ll9jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWBAAAAAAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksEhZSMAUOUdJRSlC4="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": 0.0, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVLgwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQFoAAAAAAACMAWyUS2iMAXSUR0BAHEnssxwidX2UKGgGR0BZQAAAAAAAaAdLZWgIR0BAItE5QxetdX2UKGgGR0BdQAAAAAAAaAdLdWgIR0BALG6PKdQPdX2UKGgGR0BbwAAAAAAAaAdLb2gIR0BALQQlKK51dX2UKGgGR0BTwAAAAAAAaAdLT2gIR0BAMpx3mmtRdX2UKGgGR0BhwAAAAAAAaAdLjmgIR0BATA4n4O+adX2UKGgGR0BcQAAAAAAAaAdLcWgIR0BAS+10DEFXdX2UKGgGR0BZQAAAAAAAaAdLZWgIR0BAUGFzuF6BdX2UKGgGR0BoAAAAAAAAaAdLwGgIR0BAYZPuXu3MdX2UKGgGR0BkoAAAAAAAaAdLpWgIR0BAe3+uNgjRdX2UKGgGR0BlIAAAAAAAaAdLqWgIR0BAe6BZpztDdX2UKGgGR0Bs4AAAAAAAaAdL52gIR0BAj/CqIacadX2UKGgGR0BrQAAAAAAAaAdL2mgIR0BAn3gk1MufdX2UKGgGR0BogAAAAAAAaAdLxGgIR0BAsL74zrNXdX2UKGgGR0BnwAAAAAAAaAdLvmgIR0BAw2q1gH/tdX2UKGgGR0BkoAAAAAAAaAdLpWgIR0BAykI5YHPedX2UKGgGR0B3sAAAAAAAaAdNewFoCEdAQOF5UtI07HV9lChoBkdAaYAAAAAAAGgHS8xoCEdAQOXlEJBw/HV9lChoBkdAaeAAAAAAAGgHS89oCEdAQQHEwWWQfnV9lChoBkdAckAAAAAAAGgHTSQBaAhHQEESRdQfp2V1fZQoaAZHQHAQAAAAAABoB00BAWgIR0BBLBsQ/X5GdX2UKGgGR0B2wAAAAAAAaAdNbAFoCEdAQUQkLQXyiHV9lChoBkdAacAAAAAAAGgHS85oCEdAQUorBj4Ho3V9lChoBkdAcjAAAAAAAGgHTSMBaAhHQEFQwvg3tKJ1fZQoaAZHQG6AAAAAAABoB0v0aAhHQEFwDxsl9jR1fZQoaAZHQGrgAAAAAABoB0vXaAhHQEGDroGIKtx1fZQoaAZHQH9AAAAAAABoB030AWgIR0BBzHKW9lErdX2UKGgGR0B8cAAAAAAAaAdNxwFoCEdAQcxR4yGi6HV9lChoBkdAeAAAAAAAAGgHTYABaAhHQEHs1PWQOnV1fZQoaAZHQH9AAAAAAABoB030AWgIR0BB9hO58Sf2dX2UKGgGR0BwoAAAAAAAaAdNCgFoCEdAQhKSV4X403V9lChoBkdAcQAAAAAAAGgHTRABaAhHQEIUy+pOvdN1fZQoaAZHQDUAAAAAAABoB0sVaAhHQEIb1hb4agp1fZQoaAZHQH9AAAAAAABoB030AWgIR0BCdLcTJyQxdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQn/49HMEBHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEKdEjPfKp11fZQoaAZHQH9AAAAAAABoB030AWgIR0BCptz0Yj0MdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQwD5GjKxLXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEMKdbxEv011fZQoaAZHQH9AAAAAAABoB030AWgIR0BDKk4//vORdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQzJo0ygwoXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEOKkyDZlFt1fZQoaAZHQH9AAAAAAABoB030AWgIR0BDlMBIWgvldX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQ7ECih37lHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEO6kZaV2Rt1fZQoaAZHQH9AAAAAAABoB030AWgIR0BEGxhDw6QvdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdARCRblijL0XV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQERDHvttygh1fZQoaAZHQH9AAAAAAABoB030AWgIR0BES37UG3WndX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdARKPfdhy8z3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQESr8qnWJ791fZQoaAZHQH9AAAAAAABoB030AWgIR0BEyvxpcophdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdARNQWYWtU43V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEUvjbSJCSl1fZQoaAZHQH9AAAAAAABoB030AWgIR0BFOyncclw+dX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdARV3225QP7XV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEVnugpSaVl1fZQoaAZHQH9AAAAAAABoB030AWgIR0BFyeVkc0cfdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdARdSJl8PWhHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEX08U21lXl1fZQoaAZHQH9AAAAAAABoB030AWgIR0BF/zPBzmwJdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdARlrZ8KG+K3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEZl8F6iTMd1fZQoaAZHQH9AAAAAAABoB030AWgIR0BGhGUwBYFJdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdARov9LpRoAXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEbjzJ6po9N1fZQoaAZHQH9AAAAAAABoB030AWgIR0BG7kroW56MdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdARwv+n62v0XV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEcUpvxYq5N1fZQoaAZHQH9AAAAAAABoB030AWgIR0BHaeAd4mkWdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAR3XJDE3sHHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEeWp7TlT3t1fZQoaAZHQH9AAAAAAABoB030AWgIR0BHoIl2NedDdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAR/x2yLQ5WHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEgJy9VWCEp1fZQoaAZHQH9AAAAAAABoB030AWgIR0BIK9pAUtZndX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdASDa5I6KceHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEiptNSIgvF1fZQoaAZHQH9AAAAAAABoB030AWgIR0BItk0SAYpEdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdASNdN8E3bVXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEjghUzbeuV1fZQoaAZHQH9AAAAAAABoB030AWgIR0BJQ+C04R29dX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdASVEC5mRNh3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEl0YVIqbz91fZQoaAZHQH9AAAAAAABoB030AWgIR0BJf4HoouwpdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdASfiNlyzXz3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEoQe1a4c3l1fZQoaAZHQH9AAAAAAABoB030AWgIR0BKTsMy8BdVdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdASlrcIqsls3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQErOTRplBhR1fZQoaAZHQH9AAAAAAABoB030AWgIR0BK3CD28IzFdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdASv0c2itaIXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEsG43m3fAN1fZQoaAZHQH9AAAAAAABoB030AWgIR0BLim3OObRXdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAS5m3F1jiGXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEvGPDHfdh11fZQoaAZHQH9AAAAAAABoB030AWgIR0BL0x3u/k/9dX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdATEZ9oexOcnV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQExQlDWsijd1fZQoaAZHQH9AAAAAAABoB030AWgIR0BMd8nVoYeldWUu"}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 5000, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWV/wEAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLBIWUjANsb3eUjBNudW1weS5fY29yZS5udW1lcmljlIwLX2Zyb21idWZmZXKUk5QolhAAAAAAAAAAmpmZwAAAgP9Qd9a+AACA/5RoC0sEhZSMAUOUdJRSlIwNYm91bmRlZF9iZWxvd5RoEyiWBAAAAAAAAAABAAEAlGgIjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwSFlGgWdJRSlIwEaGlnaJRoEyiWEAAAAAAAAACamZlAAACAf1B31j4AAIB/lGgLSwSFlGgWdJRSlIwNYm91bmRlZF9hYm92ZZRoEyiWBAAAAAAAAAABAAEAlGgdSwSFlGgWdJRSlIwIbG93X3JlcHKUjDFbLTQuOCAgICAgICAgICAgICAgIC1pbmYgLTAuNDE4ODc5MDMgICAgICAgIC1pbmZdlIwJaGlnaF9yZXBylIwtWzQuOCAgICAgICAgICAgICAgIGluZiAwLjQxODg3OTAzICAgICAgICBpbmZdlIwKX25wX3JhbmRvbZROdWIu", "dtype": "float32", "_shape": [4], "low": "[-4.8 -inf -0.41887903 -inf]", "bounded_below": "[ True False True False]", "high": "[4.8 inf 0.41887903 inf]", "bounded_above": "[ True False True False]", "low_repr": "[-4.8 -inf -0.41887903 -inf]", "high_repr": "[4.8 inf 0.41887903 inf]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWV3AAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwFZHR5cGWUjAVudW1weZSMBWR0eXBllJOUjAJpOJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRijAFulIwWbnVtcHkuX2NvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlGgLQwgCAAAAAAAAAJSGlFKUjAVzdGFydJRoEWgLQwgAAAAAAAAAAJSGlFKUjAZfc2hhcGWUKYwKX25wX3JhbmRvbZROdWIu", "dtype": "int64", "n": "2", "start": "0", "_shape": [], "_np_random": null}, "n_envs": 4, "n_steps": 5, "gamma": 0.99, "gae_lambda": 1.0, "ent_coef": 0.0, "vf_coef": 0.5, "max_grad_norm": 0.5, "rollout_buffer_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVNgAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwNUm9sbG91dEJ1ZmZlcpSTlC4=", "__module__": "stable_baselines3.common.buffers", "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'advantages': <class 'numpy.ndarray'>, 'returns': <class 'numpy.ndarray'>, 'episode_starts': <class 'numpy.ndarray'>, 'log_probs': <class 'numpy.ndarray'>, 'values': <class 'numpy.ndarray'>}", "__doc__": "\n Rollout buffer used in on-policy algorithms like A2C/PPO.\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to classic advantage when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n ", "__init__": "<function RolloutBuffer.__init__ at 0x000001F7551A1120>", "reset": "<function RolloutBuffer.reset at 0x000001F7551A11C0>", "compute_returns_and_advantage": "<function RolloutBuffer.compute_returns_and_advantage at 0x000001F7551A1260>", "add": "<function RolloutBuffer.add at 0x000001F7551A13A0>", "get": "<function RolloutBuffer.get at 0x000001F7551A1440>", "_get_samples": "<function RolloutBuffer._get_samples at 0x000001F7551A14E0>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x000001F75519DC80>"}, "rollout_buffer_kwargs": {}, "normalize_advantage": false, "lr_schedule": {":type:": "<class 'stable_baselines3.common.utils.FloatSchedule'>", ":serialized:": "gAWVeQAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMDUZsb2F0U2NoZWR1bGWUk5QpgZR9lIwOdmFsdWVfc2NoZWR1bGWUaACMEENvbnN0YW50U2NoZWR1bGWUk5QpgZR9lIwDdmFslEc/RvAGjbi6x3Nic2Iu", "value_schedule": "ConstantSchedule(val=0.0007)"}, "system_info": {"OS": "Windows-11-10.0.26200-SP0 10.0.26200", "Python": "3.12.6", "Stable-Baselines3": "2.7.1", "PyTorch": "2.10.0+cpu", "GPU Enabled": "False", "Numpy": "2.2.4", "Cloudpickle": "3.1.2", "Gymnasium": "1.2.3", "OpenAI Gym": "0.26.2"}}
 
1
+ {"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVRQAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMG011bHRpSW5wdXRBY3RvckNyaXRpY1BvbGljeZSTlC4=", "__module__": "stable_baselines3.common.policies", "__doc__": "\n MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space (Tuple)\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Uses the CombinedExtractor\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function MultiInputActorCriticPolicy.__init__ at 0x0000024814FD20C0>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x0000024814FCF280>"}, "verbose": 1, "policy_kwargs": {":type:": "<class 'dict'>", ":serialized:": "gAWVgQAAAAAAAAB9lCiMD29wdGltaXplcl9jbGFzc5SME3RvcmNoLm9wdGltLnJtc3Byb3CUjAdSTVNwcm9wlJOUjBBvcHRpbWl6ZXJfa3dhcmdzlH2UKIwFYWxwaGGURz/vrhR64UeujANlcHOURz7k+LWI42jxjAx3ZWlnaHRfZGVjYXmUSwB1dS4=", "optimizer_class": "<class 'torch.optim.rmsprop.RMSprop'>", "optimizer_kwargs": {"alpha": 0.99, "eps": 1e-05, "weight_decay": 0}}, "num_timesteps": 500000, "_total_timesteps": 500000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1773829211581250500, "learning_rate": 0.0007, "tensorboard_log": "runs/p67z2l0u", "_last_obs": {":type:": "<class 'collections.OrderedDict'>", ":serialized:": "gAWVKwEAAAAAAACMC2NvbGxlY3Rpb25zlIwLT3JkZXJlZERpY3SUk5QpUpQojA1hY2hpZXZlZF9nb2FslIwSbnVtcHkuY29yZS5udW1lcmljlIwLX2Zyb21idWZmZXKUk5QolgwAAAAAAAAAUmRiveVyxD3nPVo+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwOGlIwBQ5R0lFKUjAxkZXNpcmVkX2dvYWyUaAcolgwAAAAAAAAA2SUJvry2wT2qCSg+lGgOSwFLA4aUaBJ0lFKUjAtvYnNlcnZhdGlvbpRoByiWGAAAAAAAAABSZGK95XLEPec9Wj5lECe+V33VvUDzjz2UaA5LAUsGhpRoEnSUUpR1Lg==", "achieved_goal": "[[-0.05527145 0.09592227 0.21312676]]", "desired_goal": "[[-0.13393344 0.09458682 0.16409937]]", "observation": "[[-0.05527145 0.09592227 0.21312676 -0.16314848 -0.10424297 0.07028818]]"}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdAAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYBAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwGFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": 0.0, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWV4AsAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHv8uxlg+hXbOMAWyUSwOMAXSUR0CaURvxYq5LdX2UKGgGR7/XML4N7SiNaAdLBGgIR0CaUSQiiZfEdX2UKGgGR7/bl7dBSk0raAdLBGgIR0CaUS9l2/zrdX2UKGgGR7/sRCpm29csaAdLCWgIR0CaUUkB0ZFYdX2UKGgGR7/pcj7hvR7aaAdLCWgIR0CaUWi9Zid8dX2UKGgGR7/ctvXK8tf5aAdLBWgIR0CaUXsr/bTMdX2UKGgGR7/SrO7g88s+aAdLBGgIR0CaUYuNxVABdX2UKGgGR7+5OHnEETxoaAdLAmgIR0CaUZGzru6VdX2UKGgGR7/IZUDMeOn3aAdLA2gIR0CaUZ39JjDsdX2UKGgGR7/JO1v2oNutaAdLA2gIR0CaUaczqKP5dX2UKGgGR7/ON96Tnq3WaAdLA2gIR0CaUbWJrLyMdX2UKGgGR7+7WZqmCROlaAdLAmgIR0CaUbqoqCpWdX2UKGgGR7/VZmZmZmZmaAdLBGgIR0CaUcwRGtp3dX2UKGgGR7+00iyIHkcTaAdLAmgIR0CaUdI2fkFOdX2UKGgGR7/sIQe3hGYsaAdLCWgIR0CaUgLeyiVTdX2UKGgGR7/IXzlLeyiVaAdLA2gIR0CaUhRHPNVzdX2UKGgGR7+Sr1dxAB1caAdLAWgIR0CaUhlmOEM9dX2UKGgGR7/aZr56+nIiaAdLBWgIR0CaUi7mMfihdX2UKGgGR7+7gccU/OdHaAdLAmgIR0CaUjULDye7dX2UKGgGR7/Sd8zAN5MUaAdLBGgIR0CaUkRm9QGfdX2UKGgGR7/lJcHGCI1taAdLB2gIR0CaUmES/TLGdX2UKGgGR7/cRGMGX5WSaAdLBWgIR0CaUnSHdoFndX2UKGgGR7/l3IEKVpsXaAdLB2gIR0CaUo4h2W6cdX2UKGgGR7/QBDXvphWpaAdLBGgIR0CaUp19v0iAdX2UKGgGR7/AL7XQMQVcaAdLA2gIR0CaUqWt2cJ/dX2UKGgGR7/ozJIUahpQaAdLCWgIR0CaUsd4VymzdX2UKGgGR7/FESdvsJIEaAdLA2gIR0CaUtG2CulodX2UKGgGR7/DwfhddE9daAdLA2gIR0CaUtvy9VWCdX2UKGgGR7/k9mpVCHARaAdLB2gIR0CaUv7EYO2BdX2UKGgGR7/Yog3cYZVGaAdLBGgIR0CaUx+IdlundX2UKGgGR7/haJIlMRHxaAdLB2gIR0CaU0JYkmhNdX2UKGgGR7/UiHqNZNfxaAdLBGgIR0CaU1cIqsltdX2UKGgGR7/BCJGe+VTraAdLAmgIR0CaU185S3spdX2UKGgGR7/w5XZGrjo7aAdLCmgIR0CaU4o6CDmKdX2UKGgGR7/kkdmxt52RaAdLBWgIR0CaU561stTUdX2UKGgGR7/PoGpuMuOCaAdLA2gIR0CaU6wEQoTgdX2UKGgGR7/ZkfLcKw6iaAdLBWgIR0CaU796C17ZdX2UKGgGR7/XDQ7cO9WZaAdLBGgIR0CaU9Hn2ZiNdX2UKGgGR7/ad+ocaOxTaAdLBWgIR0CaU+VcUucudX2UKGgGR7+lYwIt16mgaAdLAWgIR0CaU+l0YCQtdX2UKGgGR7/dcinpB5X2aAdLBmgIR0CaVAEBbOeKdX2UKGgGR7/ZWHUMG5c1aAdLBWgIR0CaVBR2KVIJdX2UKGgGR7/J8qnWJ79iaAdLA2gIR0CaVCHFglWwdX2UKGgGR7/H2g3974SIaAdLA2gIR0CaVCr/KhcrdX2UKGgGR7/h32EkB0ZFaAdLBmgIR0CaVEq7yxzJdX2UKGgGR7+1NcnmaH9FaAdLAmgIR0CaVFkSElE7dX2UKGgGR7/HKBd2Pkq+aAdLA2gIR0CaVGJIDoyLdX2UKGgGR7/nDin5zo2XaAdLB2gIR0CaVIJgLJCCdX2UKGgGR7/AdV/+bVjJaAdLAmgIR0CaVIiExqO+dX2UKGgGR7/janJkoWpIaAdLCWgIR0CaVK5p8F6idX2UKGgGR7/gPl+3H7xeaAdLBmgIR0CaVMPo3aSLdX2UKGgGR7/ZXwsoUi6haAdLBGgIR0CaVNRKpT/AdX2UKGgGR7+ocBEKE385aAdLAWgIR0CaVNddE9dNdX2UKGgGR7+2DEm6XjU/aAdLAmgIR0CaVOGbTc7AdX2UKGgGR7/ZXZGrjo6kaAdLBWgIR0CaVPUPQOWjdX2UKGgGR7/XdRzijtXxaAdLBWgIR0CaVQqREF4cdX2UKGgGR7/nzv7WNFSbaAdLBmgIR0CaVSCYkVvddX2UKGgGR7/W2Xb/Ot4iaAdLBGgIR0CaVS/0/W1/dX2UKGgGR7/QNuLrHEMtaAdLA2gIR0CaVTgmqo60dX2UKGgGR7/XY3Ns3yZsaAdLBWgIR0CaVUqU/wAmdX2UKGgGR7++MCLdepn6aAdLAmgIR0CaVVXYDklvdX2UKGgGR7+8f4h2W6bwaAdLAmgIR0CaVVv9LpRodX2UKGgGR7+5qwhW5paiaAdLAmgIR0CaVWEbHZK4dX2UKGgGR7/MF0PpY9xIaAdLA2gIR0CaVW9w3o9tdX2UKGgGR7+78DSw4bS7aAdLAmgIR0CaVXSPU8V6dX2UKGgGR7/WNlyzXz19aAdLBWgIR0CaVYgEEC/5dX2UKGgGR7/FQC0WuX/paAdLA2gIR0CaVZZaV2RrdX2UKGgGR7/fGkep4rz5aAdLBmgIR0CaVavalDWtdX2UKGgGR7/XKifxtpEhaAdLBWgIR0CaVb9Q40djdX2UKGgGR7/YRtP557gLaAdLBGgIR0CaVc2lEZzgdX2UKGgGR7+77l7tzCDVaAdLAmgIR0CaVdLEk0JodX2UKGgGR7++dI5HVf/naAdLAmgIR0CaVd0dBBzFdX2UKGgGR7/mPvrnkkrxaAdLB2gIR0CaVfP6be/IdX2UKGgGR7/gI8hcJMQFaAdLBGgIR0CaVgPcSGrTdX2UKGgGR7/AyxiXpnpTaAdLAmgIR0CaVgj59E1EdX2UKGgGR7/hzURWcSXdaAdLBmgIR0CaViasIVuadX2UKGgGR7/c+JP69CeFaAdLBWgIR0CaVjkadc0MdX2UKGgGR7/e8hLXcxj8aAdLBmgIR0CaVk2VVxS6dX2UKGgGR7/fBRhttQ9BaAdLBGgIR0CaVlzyBkI5dX2UKGgGR7/hq8cuJ1q4aAdLBmgIR0CaVnJx//eddX2UKGgGR7+9bB42S+xoaAdLAmgIR0CaVneQdS2qdX2UKGgGR7/O1JlJ6IFeaAdLA2gIR0CaVoPaL4vfdX2UKGgGR7+8Hu7YkE9uaAdLAmgIR0CaVoj5KvmpdX2UKGgGR7/DTzd1uBMBaAdLAmgIR0CaVpQ8fV7QdX2UKGgGR7+8ypJf6XSjaAdLAmgIR0CaVplbNbC8dX2UKGgGR7+kq4H5aePJaAdLAWgIR0CaVpxtpEhJdX2UKGgGR7/ZHc1wYLssaAdLBWgIR0CaVrFr2xptdX2UKGgGR7/dzPKMefZmaAdLBWgIR0CaVskZ75VPdX2UKGgGR7/REUj9n9NvaAdLA2gIR0CaVth2GIsRdX2UKGgGR7/bqmCROk+HaAdLBmgIR0CaVvEJSiuddX2UKGgGR7/xG9+PRzBAaAdLCmgIR0CaVyAjps42dX2UKGgGR7/DPD50r9VFaAdLA2gIR0CaVzCF9KEndX2UKGgGR7/XZWJaaCtjaAdLBGgIR0CaVz7bcoH+dX2UKGgGR7/gJjDsMRYjaAdLBmgIR0CaV1yNXHR1dX2UKGgGR7/FTTfBN21VaAdLA2gIR0CaV2S+QEIPdX2UKGgGR7+6DVYp2ECeaAdLAmgIR0CaV3ABkqc3dX2UKGgGR7/I7tiQT238aAdLA2gIR0CaV3gyuZCwdX2UKGgGR7++OAAhje9BaAdLAmgIR0CaV4N2C/XYdX2UKGgGR7/Q01ZTyauwaAdLBGgIR0CaV4/BWPtEdX2UKGgGR7/e0Q9RrJr+aAdLBmgIR0CaV62v0RODdWUu"}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVhgAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIhlLg=="}, "_n_updates": 100000, "observation_space": {":type:": "<class 'gymnasium.spaces.dict.Dict'>", ":serialized:": "gAWVkQMAAAAAAACMFWd5bW5hc2l1bS5zcGFjZXMuZGljdJSMBERpY3SUk5QpgZR9lCiMBnNwYWNlc5R9lCiMDWFjaGlldmVkX2dvYWyUjBRneW1uYXNpdW0uc3BhY2VzLmJveJSMA0JveJSTlCmBlH2UKIwFZHR5cGWUjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRijAZfc2hhcGWUSwOFlIwDbG93lIwSbnVtcHkuY29yZS5udW1lcmljlIwLX2Zyb21idWZmZXKUk5QolgwAAAAAAAAAAAAgwQAAIMEAACDBlGgTSwOFlIwBQ5R0lFKUjA1ib3VuZGVkX2JlbG93lGgbKJYDAAAAAAAAAAEBAZRoEIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksDhZRoHnSUUpSMBGhpZ2iUaBsolgwAAAAAAAAAAAAgQQAAIEEAACBBlGgTSwOFlGgedJRSlIwNYm91bmRlZF9hYm92ZZRoGyiWAwAAAAAAAAABAQGUaCVLA4WUaB50lFKUjAhsb3dfcmVwcpSMBS0xMC4wlIwJaGlnaF9yZXBylIwEMTAuMJSMCl9ucF9yYW5kb22UTnVijAxkZXNpcmVkX2dvYWyUaAopgZR9lChoDWgTaBZLA4WUaBhoGyiWDAAAAAAAAAAAACDBAAAgwQAAIMGUaBNLA4WUaB50lFKUaCFoGyiWAwAAAAAAAAABAQGUaCVLA4WUaB50lFKUaCtoGyiWDAAAAAAAAAAAACBBAAAgQQAAIEGUaBNLA4WUaB50lFKUaDBoGyiWAwAAAAAAAAABAQGUaCVLA4WUaB50lFKUaDWMBS0xMC4wlGg3jAQxMC4wlGg5TnVijAtvYnNlcnZhdGlvbpRoCimBlH2UKGgNaBNoFksGhZRoGGgbKJYYAAAAAAAAAAAAIMEAACDBAAAgwQAAIMEAACDBAAAgwZRoE0sGhZRoHnSUUpRoIWgbKJYGAAAAAAAAAAEBAQEBAZRoJUsGhZRoHnSUUpRoK2gbKJYYAAAAAAAAAAAAIEEAACBBAAAgQQAAIEEAACBBAAAgQZRoE0sGhZRoHnSUUpRoMGgbKJYGAAAAAAAAAAEBAQEBAZRoJUsGhZRoHnSUUpRoNYwFLTEwLjCUaDeMBDEwLjCUaDlOdWJ1aBZOaA1OaDlOdWIu", "spaces": "{'achieved_goal': Box(-10.0, 10.0, (3,), float32), 'desired_goal': Box(-10.0, 10.0, (3,), float32), 'observation': Box(-10.0, 10.0, (6,), float32)}", "_shape": null, "dtype": null, "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVxQEAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLB4WUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWHAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/AACAvwAAgL8AAIC/lGgLSweFlIwBQ5R0lFKUjA1ib3VuZGVkX2JlbG93lGgTKJYHAAAAAAAAAAEBAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLB4WUaBZ0lFKUjARoaWdolGgTKJYcAAAAAAAAAAAAgD8AAIA/AACAPwAAgD8AAIA/AACAPwAAgD+UaAtLB4WUaBZ0lFKUjA1ib3VuZGVkX2Fib3ZllGgTKJYHAAAAAAAAAAEBAQEBAQGUaB1LB4WUaBZ0lFKUjAhsb3dfcmVwcpSMBC0xLjCUjAloaWdoX3JlcHKUjAMxLjCUjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "_shape": [7], "low": "[-1. -1. -1. -1. -1. -1. -1.]", "bounded_below": "[ True True True True True True True]", "high": "[1. 1. 1. 1. 1. 1. 1.]", "bounded_above": "[ True True True True True True True]", "low_repr": "-1.0", "high_repr": "1.0", "_np_random": null}, "n_envs": 1, "n_steps": 5, "gamma": 0.99, "gae_lambda": 1.0, "ent_coef": 0.0, "vf_coef": 0.5, "max_grad_norm": 0.5, "rollout_buffer_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOgAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwRRGljdFJvbGxvdXRCdWZmZXKUk5Qu", "__module__": "stable_baselines3.common.buffers", "__annotations__": "{'observation_space': <class 'gymnasium.spaces.dict.Dict'>, 'obs_shape': dict[str, tuple[int, ...]], 'observations': dict[str, numpy.ndarray]}", "__doc__": "\n Dict Rollout buffer used in on-policy algorithms like A2C/PPO.\n Extends the RolloutBuffer to use dictionary observations\n\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to Monte-Carlo advantage estimate when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n ", "__init__": "<function DictRolloutBuffer.__init__ at 0x0000024814B49C60>", "reset": "<function DictRolloutBuffer.reset at 0x0000024814B49D00>", "add": "<function DictRolloutBuffer.add at 0x0000024814B49E40>", "get": "<function DictRolloutBuffer.get at 0x0000024814B49EE0>", "_get_samples": "<function DictRolloutBuffer._get_samples at 0x0000024814B49F80>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x0000024814B41980>"}, "rollout_buffer_kwargs": {}, "normalize_advantage": false, "lr_schedule": {":type:": "<class 'stable_baselines3.common.utils.FloatSchedule'>", ":serialized:": "gAWVeQAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMDUZsb2F0U2NoZWR1bGWUk5QpgZR9lIwOdmFsdWVfc2NoZWR1bGWUaACMEENvbnN0YW50U2NoZWR1bGWUk5QpgZR9lIwDdmFslEc/RvAGjbi6x3Nic2Iu", "value_schedule": "ConstantSchedule(val=0.0007)"}, "system_info": {"OS": "Windows-11-10.0.26200-SP0 10.0.26200", "Python": "3.12.6", "Stable-Baselines3": "2.7.1", "PyTorch": "2.10.0+cpu", "GPU Enabled": "False", "Numpy": "1.26.4", "Cloudpickle": "3.1.2", "Gymnasium": "1.2.3"}}
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 500.0, "std_reward": 0.0, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2026-03-11T11:27:28.685873"}
 
1
+ {"mean_reward": -0.3833261474967003, "std_reward": 0.27460738643820504, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2026-03-18T11:48:22.434605"}