| !!python/object/apply:collections.OrderedDict | |
| - - - batch_size | |
| - 512 | |
| - - clip_range | |
| - 0.1 | |
| - - ent_coef | |
| - 0.0010159833764878474 | |
| - - gae_lambda | |
| - 0.98 | |
| - - gamma | |
| - 0.995 | |
| - - learning_rate | |
| - 0.0003904770450788824 | |
| - - max_grad_norm | |
| - 0.9 | |
| - - n_envs | |
| - 1 | |
| - - n_epochs | |
| - 20 | |
| - - n_steps | |
| - 2048 | |
| - - n_timesteps | |
| - 1000000.0 | |
| - - normalize | |
| - gamma: 0.995 | |
| norm_obs: false | |
| norm_reward: true | |
| - - policy | |
| - MlpPolicy | |
| - - policy_kwargs | |
| - activation_fn: !!python/name:torch.nn.modules.activation.ReLU '' | |
| features_extractor_class: !!python/name:imitation.policies.base.NormalizeFeaturesExtractor '' | |
| net_arch: | |
| - pi: | |
| - 64 | |
| - 64 | |
| vf: | |
| - 64 | |
| - 64 | |
| - - vf_coef | |
| - 0.20315938606555833 | |