frankcholula commited on
Commit
a196170
·
verified ·
1 Parent(s): 34ef46c

Initial commit

Browse files
README.md CHANGED
@@ -16,7 +16,7 @@ model-index:
16
  type: MountainCarContinuous-v0
17
  metrics:
18
  - type: mean_reward
19
- value: -66.03 +/- 0.27
20
  name: mean_reward
21
  verified: false
22
  ---
 
16
  type: MountainCarContinuous-v0
17
  metrics:
18
  - type: mean_reward
19
+ value: 85.01 +/- 2.53
20
  name: mean_reward
21
  verified: false
22
  ---
args.yml CHANGED
@@ -56,13 +56,13 @@
56
  - - save_replay_buffer
57
  - false
58
  - - seed
59
- - 3581364082
60
  - - storage
61
  - null
62
  - - study_name
63
  - null
64
  - - tensorboard_log
65
- - runs/MountainCarContinuous-v0__ppo__3581364082__1755345830
66
  - - track
67
  - true
68
  - - trained_agent
 
56
  - - save_replay_buffer
57
  - false
58
  - - seed
59
+ - 122093306
60
  - - storage
61
  - null
62
  - - study_name
63
  - null
64
  - - tensorboard_log
65
+ - runs/MountainCarContinuous-v0__ppo__122093306__1755347236
66
  - - track
67
  - true
68
  - - trained_agent
ppo-MountainCarContinuous-v0.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3614d4136b8759d1115d7490633d19a0b6f3f25d56111539cbb07cb49673e37
3
- size 138026
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6ab25b906942faf2b9d15bd8a81e7e8f7244d8eb573baf25994546e15790c5f
3
+ size 138717
ppo-MountainCarContinuous-v0/data CHANGED
@@ -4,20 +4,20 @@
4
  ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
5
  "__module__": "stable_baselines3.common.policies",
6
  "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
- "__init__": "<function ActorCriticPolicy.__init__ at 0x7ff9e524c3a0>",
8
- "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7ff9e524c430>",
9
- "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7ff9e524c4c0>",
10
- "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7ff9e524c550>",
11
- "_build": "<function ActorCriticPolicy._build at 0x7ff9e524c5e0>",
12
- "forward": "<function ActorCriticPolicy.forward at 0x7ff9e524c670>",
13
- "extract_features": "<function ActorCriticPolicy.extract_features at 0x7ff9e524c700>",
14
- "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7ff9e524c790>",
15
- "_predict": "<function ActorCriticPolicy._predict at 0x7ff9e524c820>",
16
- "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7ff9e524c8b0>",
17
- "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7ff9e524c940>",
18
- "predict_values": "<function ActorCriticPolicy.predict_values at 0x7ff9e524c9d0>",
19
  "__abstractmethods__": "frozenset()",
20
- "_abc_impl": "<_abc._abc_data object at 0x7ff9e5242600>"
21
  },
22
  "verbose": 1,
23
  "policy_kwargs": {
@@ -29,13 +29,13 @@
29
  "_num_timesteps_at_start": 0,
30
  "seed": 0,
31
  "action_noise": null,
32
- "start_time": 1755345832430230034,
33
  "learning_rate": {
34
  ":type:": "<class 'stable_baselines3.common.utils.ConstantSchedule'>",
35
  ":serialized:": "gAWVTQAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMEENvbnN0YW50U2NoZWR1bGWUk5QpgZR9lIwDdmFslEc/FF5b1emsAXNiLg==",
36
  "val": 7.77e-05
37
  },
38
- "tensorboard_log": "runs/MountainCarContinuous-v0__ppo__3581364082__1755345830/MountainCarContinuous-v0",
39
  "_last_obs": null,
40
  "_last_episode_starts": {
41
  ":type:": "<class 'numpy.ndarray'>",
@@ -43,7 +43,7 @@
43
  },
44
  "_last_original_obs": {
45
  ":type:": "<class 'numpy.ndarray'>",
46
- ":serialized:": "gAWVfgAAAAAAAACME251bXB5Ll9jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAD7Yw6/AAAAAJSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGJLAUsChpSMAUOUdJRSlC4="
47
  },
48
  "_episode_num": 0,
49
  "use_sde": true,
@@ -52,7 +52,7 @@
52
  "_stats_window_size": 100,
53
  "ep_info_buffer": {
54
  ":type:": "<class 'collections.deque'>",
55
- ":serialized:": "gAWVCwUAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHwBKCOaOPvKGMAWyUTecDjAF0lEdACtNxlxwQ2HV9lChoBkdAVr3McIZ62WgHTd8DaAhHQBheTq0MPSV1fZQoaAZHQFh5pqynk1doB0v4aAhHQBsAk1Muez51fZQoaAZHQFeYGGmDUVloB01jAWgIR0AfDDpC8e0YdX2UKGgGR0BXgY/NZ/0/aAdLuGgIR0AgiCHymQ8wdX2UKGgGR0BW8LFwT/Q0aAdLtWgIR0AhfsdDIBBBdX2UKGgGR0BW/tD2JzkqaAdLuWgIR0Aif9uxbB42dX2UKGgGR0BW043FUADJaAdNEwFoCEdAJANdzGPxQXV9lChoBkfAO1f4M4LkS2gHTecDaAhHQCoUYfnwG4Z1fZQoaAZHwDo/RgJC0F9oB03nA2gIR0AwAjEvTPSldX2UKGgGR8A6tXrt3OfNaAdN5wNoCEdAMsGh7E5yVHV9lChoBkfANa0vXbuc+mgHTecDaAhHQDWz8UEgW8B1fZQoaAZHQFUQDh99c8loB01YAmgIR0A3ZImPYFq0dX2UKGgGR0BUsp8Sf16FaAdNXwJoCEdAOQ672+PBBXV9lChoBkdAV9uBK+SKWWgHS75oCEdAOZm5hBqsVHV9lChoBkdAV2a/RE4NqmgHS+JoCEdAOkBIBikO7XV9lChoBkfALb7P6be/H2gHTecDaAhHQD1PI3irDIl1fZQoaAZHwCQQ3aSLZSNoB03nA2gIR0BAHFwkxASndX2UKGgGR8Aw8KXv6TGHaAdN5wNoCEdAQXOWldkauXV9lChoBkdAU1y9OARTTGgHTaUDaAhHQELYRdQfp2V1fZQoaAZHQFenEcbR4QloB0vcaAhHQEMqHTI/7i11fZQoaAZHQFY34tYjjaRoB02jAWgIR0BDuGe18b71dX2UKGgGR8BI0t8E3bVSaAdN5wNoCEdARR24qgAZKnV9lChoBkfAShHJ3gUDdWgHTecDaAhHQEainVoYekp1fZQoaAZHQEs1rnDBMzxoB03dAmgIR0BHqlg2Ifr9dX2UKGgGR8BRONGRV6u5aAdN5wNoCEdASQCKHfuTinV9lChoBkdAVZ8VUMoc72gHS81oCEdASVaeyzHCGnV9lChoBkdAVsZOsT37DWgHS4JoCEdASY2ALApKBnV9lChoBkdAVlT2criEQGgHS4doCEdASc+HN5dGAnV9lChoBkdAViFvCMxXXGgHS71oCEdAShebPQfIS3V9lChoBkdAVqA9LYf4h2gHS4loCEdASkUoOQQtjHV9lChoBkdAVqVtHhCMP2gHS4doCEdASngetCAtnXV9lChoBkdAVqQfms/6f2gHS4doCEdASqgL3K0UoXV9lChoBkdAVvlShrWRR2gHS4ZoCEdAStiM1jy4F3V9lChoBkdAVt5NYbKif2gHS4poCEdASw/hZQpF1HV9lChoBkdAVaZRsMy8BmgHS9FoCEdAS1N1+y7f53V9lChoBkdAVuKg5BC2MWgHS41oCEdAS4hamoBJZnV9lChoBkdAVFXOkcjqwGgHTSwBaAhHQEvwtoSL61t1fZQoaAZHQFSJk9lmOENoB00pAWgIR0BMWsR6F/QTdX2UKGgGR0BWXMrqdH2AaAdLoWgIR0BMm+3pfQa8dX2UKGgGR0BWvSpaRp1zaAdLlWgIR0BM2hdt2s7udWUu"
56
  },
57
  "ep_success_buffer": {
58
  ":type:": "<class 'collections.deque'>",
@@ -102,14 +102,14 @@
102
  "__module__": "stable_baselines3.common.buffers",
103
  "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'advantages': <class 'numpy.ndarray'>, 'returns': <class 'numpy.ndarray'>, 'episode_starts': <class 'numpy.ndarray'>, 'log_probs': <class 'numpy.ndarray'>, 'values': <class 'numpy.ndarray'>}",
104
  "__doc__": "\n Rollout buffer used in on-policy algorithms like A2C/PPO.\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to classic advantage when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n ",
105
- "__init__": "<function RolloutBuffer.__init__ at 0x7ff9f478dab0>",
106
- "reset": "<function RolloutBuffer.reset at 0x7ff9f478db40>",
107
- "compute_returns_and_advantage": "<function RolloutBuffer.compute_returns_and_advantage at 0x7ff9f478dbd0>",
108
- "add": "<function RolloutBuffer.add at 0x7ff9f478dc60>",
109
- "get": "<function RolloutBuffer.get at 0x7ff9f478dcf0>",
110
- "_get_samples": "<function RolloutBuffer._get_samples at 0x7ff9f478dd80>",
111
  "__abstractmethods__": "frozenset()",
112
- "_abc_impl": "<_abc._abc_data object at 0x7ff9f4783b40>"
113
  },
114
  "rollout_buffer_kwargs": {},
115
  "batch_size": 256,
 
4
  ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
5
  "__module__": "stable_baselines3.common.policies",
6
  "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
+ "__init__": "<function ActorCriticPolicy.__init__ at 0x7f062a3a43a0>",
8
+ "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7f062a3a4430>",
9
+ "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7f062a3a44c0>",
10
+ "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7f062a3a4550>",
11
+ "_build": "<function ActorCriticPolicy._build at 0x7f062a3a45e0>",
12
+ "forward": "<function ActorCriticPolicy.forward at 0x7f062a3a4670>",
13
+ "extract_features": "<function ActorCriticPolicy.extract_features at 0x7f062a3a4700>",
14
+ "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7f062a3a4790>",
15
+ "_predict": "<function ActorCriticPolicy._predict at 0x7f062a3a4820>",
16
+ "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7f062a3a48b0>",
17
+ "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7f062a3a4940>",
18
+ "predict_values": "<function ActorCriticPolicy.predict_values at 0x7f062a3a49d0>",
19
  "__abstractmethods__": "frozenset()",
20
+ "_abc_impl": "<_abc._abc_data object at 0x7f063b273e80>"
21
  },
22
  "verbose": 1,
23
  "policy_kwargs": {
 
29
  "_num_timesteps_at_start": 0,
30
  "seed": 0,
31
  "action_noise": null,
32
+ "start_time": 1755347238713099672,
33
  "learning_rate": {
34
  ":type:": "<class 'stable_baselines3.common.utils.ConstantSchedule'>",
35
  ":serialized:": "gAWVTQAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMEENvbnN0YW50U2NoZWR1bGWUk5QpgZR9lIwDdmFslEc/FF5b1emsAXNiLg==",
36
  "val": 7.77e-05
37
  },
38
+ "tensorboard_log": "runs/MountainCarContinuous-v0__ppo__122093306__1755347236/MountainCarContinuous-v0",
39
  "_last_obs": null,
40
  "_last_episode_starts": {
41
  ":type:": "<class 'numpy.ndarray'>",
 
43
  },
44
  "_last_original_obs": {
45
  ":type:": "<class 'numpy.ndarray'>",
46
+ ":serialized:": "gAWVfgAAAAAAAACME251bXB5Ll9jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAC5Q/O+AAAAAJSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGJLAUsChpSMAUOUdJRSlC4="
47
  },
48
  "_episode_num": 0,
49
  "use_sde": true,
 
52
  "_stats_window_size": 100,
53
  "ep_info_buffer": {
54
  ":type:": "<class 'collections.deque'>",
55
+ ":serialized:": "gAWVEgcAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHwANX/YJ3PiWMAWyUTecDjAF0lEdADc5NoJzDGnV9lChoBke/+engpBomHGgHTecDaAhHQBocKsuFpPB1fZQoaAZHQFeFvkiliz9oB03FA2gIR0Aib+qBEroXdX2UKGgGR0BYf3zcynDSaAdNjwFoCEdAJJzuF6AvtnV9lChoBkdAWF6l3yI552gHTW0BaAhHQCab1mJ3xF11fZQoaAZHQFhRu5jH4oJoB007AWgIR0Aomfq5byH3dX2UKGgGR0BYVqaTfR/maAdNlgFoCEdAKwRUvPC2t3V9lChoBkdAWHIwudwvQGgHTUwBaAhHQC0AuCf6Gg11fZQoaAZHQFew1Tzd1uBoB0v9aAhHQC5/sqrilzl1fZQoaAZHQFfKCvX9R79oB00JAWgIR0AwAbi6xxDLdX2UKGgGR0BWPLQTmGM5aAdNNwFoCEdAMOhIvrWy1XV9lChoBkdAVzpFkQPI4mgHS9doCEdAMXKfnOjZc3V9lChoBkdAV0b9wWFewGgHTZ0BaAhHQDKP0wrUb1h1fZQoaAZHQFZ4vV3EAHVoB01bAWgIR0AzehGpda+wdX2UKGgGR0BWMJTMqz7eaAdLt2gIR0Az+NR3u/lAdX2UKGgGR0BWpP69CeEqaAdLl2gIR0A0Z0Kqn3tbdX2UKGgGR0BWfW0Z3s5XaAdLomgIR0A06FqBVdX1dX2UKGgGR0BWKdTxXnyNaAdL52gIR0A1moHcDbJwdX2UKGgGR0BWkyblRxcWaAdLj2gIR0A2DCe2/i5vdX2UKGgGR0BWNeJxeb/faAdLkWgIR0A2cDneSB9UdX2UKGgGR0BWMFBUrCm/aAdLlGgIR0A21UVzp5eJdX2UKGgGR0BV7DWCmMwUaAdLmGgIR0A3OyNn5BTodX2UKGgGR0BWCXLidat+aAdLnGgIR0A3mkKeCkGidX2UKGgGR0BWBP/zasZHaAdLnWgIR0A4BRBeHBUJdX2UKGgGR0BWQXsTnJT3aAdLk2gIR0A4aV/+bVjJdX2UKGgGR0BWJWy9mHxjaAdLmGgIR0A4yU83dbgTdX2UKGgGR0BWiLdznzQNaAdLmmgIR0A5Lz8gpz91dX2UKGgGR0BWBVNg0CRwaAdLpWgIR0A5nvRZ2ZAqdX2UKGgGR0BWbZkf9xZMaAdLpmgIR0A6HCtihFmWdX2UKGgGR0BTtTcEeQuFaAdNRgFoCEdAOwOdsi0OVnV9lChoBkfASqjyDqW1MWgHTecDaAhHQD3wZ9/jKgZ1fZQoaAZHwC/sGNaQmu1oB03nA2gIR0BAUAl4TsY3dX2UKGgGR0BVOEU9IPK/aAdNSwFoCEdAQMbNr0rbxnV9lChoBkfAEXg6U7jkuGgHTecDaAhHQEIyDh99c8l1fZQoaAZHwCSRvBJqZc9oB03nA2gIR0BDjwpWmxdIdX2UKGgGR0BVPKVhTfixaAdNVwFoCEdARASs6q8143V9lChoBkdAU5skGA08/2gHTagBaAhHQESWkka/ATJ1fZQoaAZHQFXvWkadc0NoB0upaAhHQETQsK9f1Hx1fZQoaAZHQFXmS75Ec81oB0upaAhHQEUM21lXiit1fZQoaAZHQFcPUHpr1uloB0u5aAhHQEVUlyimEXd1fZQoaAZHQFYQESuhbnpoB0vBaAhHQEWhQhwEQoV1fZQoaAZHQFaC6QNkOI9oB0u2aAhHQEXoZKnNxER1fZQoaAZHQFQcQEZBLPFoB003AWgIR0BGZCWmgrYodX2UKGgGR0BVoxoqTbFkaAdNCQFoCEdARsPuqm0mdHV9lChoBkdAVSzSThYNiGgHTT4BaAhHQEc0L876pHZ1fZQoaAZHQFczzhgmZ3NoB0v2aAhHQEeF6DXe3x51fZQoaAZHQFefA7PppvhoB0vraAhHQEfVj2Bas6t1fZQoaAZHQFYlCWNWEK5oB00JAWgIR0BIMXZGrjo7dX2UKGgGR0BWB0LlV94NaAdNNgFoCEdASJsAggX/HnV9lChoBkdAV31u76Hj62gHS/loCEdASPQHu7YkFHV9lChoBkdAV2HPyCnP3WgHTRYBaAhHQElca4tpVS51fZQoaAZHQFc2jBVMmF9oB00JAWgIR0BJxPCuU2UCdX2UKGgGR0BX6EAggX/HaAdNjAFoCEdASlUzdk8RtnV9lChoBkdAV2nSPU8V6GgHS/1oCEdASq/tpmEoOXV9lChoBkdAV7cb0e2d/mgHS4xoCEdASt/jMmnfmHV9lChoBkdAV6THcUM5O2gHS6poCEdASxsdT5wfhnV9lChoBkdAV1e40/GEPGgHTSMBaAhHQEuAaw2VE/l1fZQoaAZHQFXnT1TR6WxoB03IAWgIR0BMIgFotcv/dWUu"
56
  },
57
  "ep_success_buffer": {
58
  ":type:": "<class 'collections.deque'>",
 
102
  "__module__": "stable_baselines3.common.buffers",
103
  "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'advantages': <class 'numpy.ndarray'>, 'returns': <class 'numpy.ndarray'>, 'episode_starts': <class 'numpy.ndarray'>, 'log_probs': <class 'numpy.ndarray'>, 'values': <class 'numpy.ndarray'>}",
104
  "__doc__": "\n Rollout buffer used in on-policy algorithms like A2C/PPO.\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to classic advantage when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n ",
105
+ "__init__": "<function RolloutBuffer.__init__ at 0x7f0639865ab0>",
106
+ "reset": "<function RolloutBuffer.reset at 0x7f0639865b40>",
107
+ "compute_returns_and_advantage": "<function RolloutBuffer.compute_returns_and_advantage at 0x7f0639865bd0>",
108
+ "add": "<function RolloutBuffer.add at 0x7f0639865c60>",
109
+ "get": "<function RolloutBuffer.get at 0x7f0639865cf0>",
110
+ "_get_samples": "<function RolloutBuffer._get_samples at 0x7f0639865d80>",
111
  "__abstractmethods__": "frozenset()",
112
+ "_abc_impl": "<_abc._abc_data object at 0x7f0639868280>"
113
  },
114
  "rollout_buffer_kwargs": {},
115
  "batch_size": 256,
ppo-MountainCarContinuous-v0/policy.optimizer.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e4de05d7dc716ad1d4a4534a0f55e0670ab56503b630f44f02ad199cb07926a
3
  size 82542
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e53b45058f494faef27b60e0531499443654b20a4dc53a8e56062b873a0ed56b
3
  size 82542
ppo-MountainCarContinuous-v0/policy.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40a8c0bd9e3eaff8ed90b073aa82fc0aabf52d6413db9ac7f22c59d8e28a0a16
3
  size 40764
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9821bc26753873e6a15f53a4c9c467f73c30ed9cdb41bbda5bfb7d46f8ff5f22
3
  size 40764
replay.mp4 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:251b96341bea0c8c25ee5286b3ed5e677a017f7ba28a672a1532291b8e17f599
3
- size 212950
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2e9089aa8788349a8ff43c7090538b0409dfcdd8de5a47e7a214691539f749d
3
+ size 168768
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": -66.0250368, "std_reward": 0.26547952823138776, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2025-08-16T13:24:37.249650"}
 
1
+ {"mean_reward": 85.005738, "std_reward": 2.525246770575264, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2025-08-16T13:29:53.468107"}
train_eval_metrics.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:898075b6c59e72264a1c4bb4586d0a2cd7728158ad4ac9a67bd0e72175ef4478
3
- size 1217
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8467b4d0aee953031ccbc0c6f6d4c9e09e3c6bf20c63be12d97c7c4465df2c3f
3
+ size 1644
vec_normalize.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e4bd24d22006ccade2d9d9b270ee6b2de7778c297eb64adf1e7d4c0bdbb5af0
3
  size 1869
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3476d1d96467e02be16e464352492f3b9eb8a6c30624e8d123a5d96d1264cc1
3
  size 1869