MattStammers commited on
Commit
247e099
·
1 Parent(s): 09ef5eb

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
.summary/0/events.out.tfevents.1694695704.rhmmedcatt-ProLiant-ML350-Gen10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16269aa4c71f6002c20f65c35fd2a052055180f25988e26e057cb148f87fc787
3
+ size 1363775
.summary/1/events.out.tfevents.1694695704.rhmmedcatt-ProLiant-ML350-Gen10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0e51f3ccaf767198a89a88da9b36376136fb3cfb63ec8f55b41da4449ab1eb7
3
+ size 1283466
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sample-factory
3
+ tags:
4
+ - deep-reinforcement-learning
5
+ - reinforcement-learning
6
+ - sample-factory
7
+ model-index:
8
+ - name: APPO
9
+ results:
10
+ - task:
11
+ type: reinforcement-learning
12
+ name: reinforcement-learning
13
+ dataset:
14
+ name: doom_battle2
15
+ type: doom_battle2
16
+ metrics:
17
+ - type: mean_reward
18
+ value: 6.50 +/- 3.01
19
+ name: mean_reward
20
+ verified: false
21
+ ---
22
+
23
+ A(n) **APPO** model trained on the **doom_battle2** environment.
24
+
25
+ This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
26
+ Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
27
+
28
+
29
+ ## Downloading the model
30
+
31
+ After installing Sample-Factory, download the model with:
32
+ ```
33
+ python -m sample_factory.huggingface.load_from_hub -r MattStammers/vizdoom_battle_two
34
+ ```
35
+
36
+
37
+ ## Using the model
38
+
39
+ To run the model after download, use the `enjoy` script corresponding to this environment:
40
+ ```
41
+ python -m <path.to.enjoy.module> --algo=APPO --env=doom_battle2 --train_dir=./train_dir --experiment=vizdoom_battle_two
42
+ ```
43
+
44
+
45
+ You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
46
+ See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
47
+
48
+ ## Training with this model
49
+
50
+ To continue training with this model, use the `train` script corresponding to this environment:
51
+ ```
52
+ python -m <path.to.train.module> --algo=APPO --env=doom_battle2 --train_dir=./train_dir --experiment=vizdoom_battle_two --restart_behavior=resume --train_for_env_steps=10000000000
53
+ ```
54
+
55
+ Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
56
+
checkpoint_p0/best_000002950_12083200_reward_7.781.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5610b15d1a3b8d528cd4a33b13cde6e95d808cf22f8d976c02cfd76a0e6caeb8
3
+ size 37627795
checkpoint_p0/checkpoint_000002837_11620352.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ac09e539628b66ac379329617a66f0a1c31747caef4b7d26132d7062b3ccebb
3
+ size 37628387
checkpoint_p0/checkpoint_000002950_12083200.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e30d17e573c3bc76c03705bf02a2c081da62210c52266f10311df381d88d32e6
3
+ size 37628387
checkpoint_p1/best_000002353_9637888_reward_8.260.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49958a56178e058108a290fd52f16f93d90dca936d45fb6b28e4ff5e616ef330
3
+ size 37627795
checkpoint_p1/checkpoint_000002372_9715712.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0edcc2bc2a8682ee9c3af1713baa39869f99e0cff2278a399f28dd87fa206294
3
+ size 37628387
checkpoint_p1/checkpoint_000002442_10002432.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5844303a72dbfb419bf6561a4f5fc96eb9cfd989bb1adccceed25601e505d4e
3
+ size 37628387
config.json ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "doom_battle2",
5
+ "experiment": "default_experiment",
6
+ "train_dir": "/home/cogstack/Documents/optuna/environments/sample_factory/train_dir",
7
+ "restart_behavior": "restart",
8
+ "device": "gpu",
9
+ "seed": null,
10
+ "num_policies": 2,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.001,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "symmetric_kl",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 0.2,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "lr_adaptive_min": 1e-06,
50
+ "lr_adaptive_max": 0.01,
51
+ "obs_subtract_mean": 0.0,
52
+ "obs_scale": 255.0,
53
+ "normalize_input": true,
54
+ "normalize_input_keys": null,
55
+ "decorrelate_experience_max_seconds": 0,
56
+ "decorrelate_envs_on_one_worker": true,
57
+ "actor_worker_gpus": [],
58
+ "set_workers_cpu_affinity": true,
59
+ "force_envs_single_thread": false,
60
+ "default_niceness": 0,
61
+ "log_to_file": true,
62
+ "experiment_summaries_interval": 10,
63
+ "flush_summaries_interval": 30,
64
+ "stats_avg": 100,
65
+ "summaries_use_frameskip": true,
66
+ "heartbeat_interval": 20,
67
+ "heartbeat_reporting_interval": 600,
68
+ "train_for_env_steps": 10000000,
69
+ "train_for_seconds": 10000000000,
70
+ "save_every_sec": 120,
71
+ "keep_checkpoints": 2,
72
+ "load_checkpoint_kind": "latest",
73
+ "save_milestones_sec": -1,
74
+ "save_best_every_sec": 5,
75
+ "save_best_metric": "reward",
76
+ "save_best_after": 100000,
77
+ "benchmark": false,
78
+ "encoder_mlp_layers": [
79
+ 512,
80
+ 512
81
+ ],
82
+ "encoder_conv_architecture": "convnet_simple",
83
+ "encoder_conv_mlp_layers": [
84
+ 512
85
+ ],
86
+ "use_rnn": true,
87
+ "rnn_size": 512,
88
+ "rnn_type": "gru",
89
+ "rnn_num_layers": 1,
90
+ "decoder_mlp_layers": [],
91
+ "nonlinearity": "elu",
92
+ "policy_initialization": "orthogonal",
93
+ "policy_init_gain": 1.0,
94
+ "actor_critic_share_weights": true,
95
+ "adaptive_stddev": true,
96
+ "continuous_tanh_scale": 0.0,
97
+ "initial_stddev": 1.0,
98
+ "use_env_info_cache": false,
99
+ "env_gpu_actions": false,
100
+ "env_gpu_observations": true,
101
+ "env_frameskip": 4,
102
+ "env_framestack": 1,
103
+ "pixel_format": "CHW",
104
+ "use_record_episode_statistics": false,
105
+ "with_wandb": false,
106
+ "wandb_user": null,
107
+ "wandb_project": "sample_factory",
108
+ "wandb_group": null,
109
+ "wandb_job_type": "SF",
110
+ "wandb_tags": [],
111
+ "with_pbt": false,
112
+ "pbt_mix_policies_in_one_env": true,
113
+ "pbt_period_env_steps": 5000000,
114
+ "pbt_start_mutation": 20000000,
115
+ "pbt_replace_fraction": 0.3,
116
+ "pbt_mutation_rate": 0.15,
117
+ "pbt_replace_reward_gap": 0.1,
118
+ "pbt_replace_reward_gap_absolute": 1e-06,
119
+ "pbt_optimize_gamma": false,
120
+ "pbt_target_objective": "true_objective",
121
+ "pbt_perturb_min": 1.1,
122
+ "pbt_perturb_max": 1.5,
123
+ "num_agents": -1,
124
+ "num_humans": 0,
125
+ "num_bots": -1,
126
+ "start_bot_difficulty": null,
127
+ "timelimit": null,
128
+ "res_w": 128,
129
+ "res_h": 72,
130
+ "wide_aspect_ratio": false,
131
+ "eval_env_frameskip": 1,
132
+ "fps": 35,
133
+ "command_line": "--env=doom_dm --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=20000000",
134
+ "cli_args": {
135
+ "env": "doom_dm",
136
+ "num_workers": 8,
137
+ "num_envs_per_worker": 4,
138
+ "train_for_env_steps": 20000000
139
+ },
140
+ "git_hash": "20b6d44612dad7d171f23e13b1f3b4c5e5631cf9",
141
+ "git_repo_name": "https://github.com/MattStammers/optuna.git"
142
+ }
git.diff ADDED
The diff for this file is too large to render. See raw diff
 
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1ba30b31506c47d6ebdca647cc817565f595add0958c7fc6af44bb8c9aa1e2e
3
+ size 29487665
sf_log.txt ADDED
@@ -0,0 +1,947 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-09-14 13:48:28,103][43239] Using GPUs [0] for process 0 (actually maps to GPUs [0])
2
+ [2023-09-14 13:48:28,103][43239] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
3
+ [2023-09-14 13:48:28,144][43239] Num visible devices: 1
4
+ [2023-09-14 13:48:28,195][43239] Starting seed is not provided
5
+ [2023-09-14 13:48:28,196][43239] Using GPUs [0] for process 0 (actually maps to GPUs [0])
6
+ [2023-09-14 13:48:28,196][43239] Initializing actor-critic model on device cuda:0
7
+ [2023-09-14 13:48:28,196][43239] RunningMeanStd input shape: (23,)
8
+ [2023-09-14 13:48:28,197][43239] RunningMeanStd input shape: (3, 72, 128)
9
+ [2023-09-14 13:48:28,197][43239] RunningMeanStd input shape: (1,)
10
+ [2023-09-14 13:48:28,209][43239] ConvEncoder: input_channels=3
11
+ [2023-09-14 13:48:28,370][43239] Conv encoder output size: 512
12
+ [2023-09-14 13:48:28,371][43239] Policy head output size: 640
13
+ [2023-09-14 13:48:28,389][43239] Created Actor Critic model with architecture:
14
+ [2023-09-14 13:48:28,389][43239] ActorCriticSharedWeights(
15
+ (obs_normalizer): ObservationNormalizer(
16
+ (running_mean_std): RunningMeanStdDictInPlace(
17
+ (running_mean_std): ModuleDict(
18
+ (measurements): RunningMeanStdInPlace()
19
+ (obs): RunningMeanStdInPlace()
20
+ )
21
+ )
22
+ )
23
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
24
+ (encoder): VizdoomEncoder(
25
+ (basic_encoder): ConvEncoder(
26
+ (enc): RecursiveScriptModule(
27
+ original_name=ConvEncoderImpl
28
+ (conv_head): RecursiveScriptModule(
29
+ original_name=Sequential
30
+ (0): RecursiveScriptModule(original_name=Conv2d)
31
+ (1): RecursiveScriptModule(original_name=ELU)
32
+ (2): RecursiveScriptModule(original_name=Conv2d)
33
+ (3): RecursiveScriptModule(original_name=ELU)
34
+ (4): RecursiveScriptModule(original_name=Conv2d)
35
+ (5): RecursiveScriptModule(original_name=ELU)
36
+ )
37
+ (mlp_layers): RecursiveScriptModule(
38
+ original_name=Sequential
39
+ (0): RecursiveScriptModule(original_name=Linear)
40
+ (1): RecursiveScriptModule(original_name=ELU)
41
+ )
42
+ )
43
+ )
44
+ (measurements_head): Sequential(
45
+ (0): Linear(in_features=23, out_features=128, bias=True)
46
+ (1): ELU(alpha=1.0)
47
+ (2): Linear(in_features=128, out_features=128, bias=True)
48
+ (3): ELU(alpha=1.0)
49
+ )
50
+ )
51
+ (core): ModelCoreRNN(
52
+ (core): GRU(640, 512)
53
+ )
54
+ (decoder): MlpDecoder(
55
+ (mlp): Identity()
56
+ )
57
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
58
+ (action_parameterization): ActionParameterizationDefault(
59
+ (distribution_linear): Linear(in_features=512, out_features=21, bias=True)
60
+ )
61
+ )
62
+ [2023-09-14 13:48:29,272][43239] Using optimizer <class 'torch.optim.adam.Adam'>
63
+ [2023-09-14 13:48:29,273][43239] No checkpoints found
64
+ [2023-09-14 13:48:29,273][43239] Did not load from checkpoint, starting from scratch!
65
+ [2023-09-14 13:48:29,273][43239] Initialized policy 0 weights for model version 0
66
+ [2023-09-14 13:48:29,275][43239] LearnerWorker_p0 finished initialization!
67
+ [2023-09-14 13:48:29,275][43239] Using GPUs [0] for process 0 (actually maps to GPUs [0])
68
+ [2023-09-14 13:48:29,699][43415] Using GPUs [1] for process 1 (actually maps to GPUs [1])
69
+ [2023-09-14 13:48:29,699][43415] Set environment var CUDA_VISIBLE_DEVICES to '1' (GPU indices [1]) for learning process 1
70
+ [2023-09-14 13:48:29,737][43415] Num visible devices: 1
71
+ [2023-09-14 13:48:29,778][43415] Starting seed is not provided
72
+ [2023-09-14 13:48:29,778][43415] Using GPUs [0] for process 1 (actually maps to GPUs [1])
73
+ [2023-09-14 13:48:29,779][43415] Initializing actor-critic model on device cuda:0
74
+ [2023-09-14 13:48:29,779][43415] RunningMeanStd input shape: (23,)
75
+ [2023-09-14 13:48:29,779][43415] RunningMeanStd input shape: (3, 72, 128)
76
+ [2023-09-14 13:48:29,780][43415] RunningMeanStd input shape: (1,)
77
+ [2023-09-14 13:48:29,792][43415] ConvEncoder: input_channels=3
78
+ [2023-09-14 13:48:30,065][43415] Conv encoder output size: 512
79
+ [2023-09-14 13:48:30,066][43415] Policy head output size: 640
80
+ [2023-09-14 13:48:30,095][43415] Created Actor Critic model with architecture:
81
+ [2023-09-14 13:48:30,095][43415] ActorCriticSharedWeights(
82
+ (obs_normalizer): ObservationNormalizer(
83
+ (running_mean_std): RunningMeanStdDictInPlace(
84
+ (running_mean_std): ModuleDict(
85
+ (measurements): RunningMeanStdInPlace()
86
+ (obs): RunningMeanStdInPlace()
87
+ )
88
+ )
89
+ )
90
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
91
+ (encoder): VizdoomEncoder(
92
+ (basic_encoder): ConvEncoder(
93
+ (enc): RecursiveScriptModule(
94
+ original_name=ConvEncoderImpl
95
+ (conv_head): RecursiveScriptModule(
96
+ original_name=Sequential
97
+ (0): RecursiveScriptModule(original_name=Conv2d)
98
+ (1): RecursiveScriptModule(original_name=ELU)
99
+ (2): RecursiveScriptModule(original_name=Conv2d)
100
+ (3): RecursiveScriptModule(original_name=ELU)
101
+ (4): RecursiveScriptModule(original_name=Conv2d)
102
+ (5): RecursiveScriptModule(original_name=ELU)
103
+ )
104
+ (mlp_layers): RecursiveScriptModule(
105
+ original_name=Sequential
106
+ (0): RecursiveScriptModule(original_name=Linear)
107
+ (1): RecursiveScriptModule(original_name=ELU)
108
+ )
109
+ )
110
+ )
111
+ (measurements_head): Sequential(
112
+ (0): Linear(in_features=23, out_features=128, bias=True)
113
+ (1): ELU(alpha=1.0)
114
+ (2): Linear(in_features=128, out_features=128, bias=True)
115
+ (3): ELU(alpha=1.0)
116
+ )
117
+ )
118
+ (core): ModelCoreRNN(
119
+ (core): GRU(640, 512)
120
+ )
121
+ (decoder): MlpDecoder(
122
+ (mlp): Identity()
123
+ )
124
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
125
+ (action_parameterization): ActionParameterizationDefault(
126
+ (distribution_linear): Linear(in_features=512, out_features=21, bias=True)
127
+ )
128
+ )
129
+ [2023-09-14 13:48:31,315][43415] Using optimizer <class 'torch.optim.adam.Adam'>
130
+ [2023-09-14 13:48:31,316][43415] No checkpoints found
131
+ [2023-09-14 13:48:31,316][43415] Did not load from checkpoint, starting from scratch!
132
+ [2023-09-14 13:48:31,316][43415] Initialized policy 1 weights for model version 0
133
+ [2023-09-14 13:48:31,318][43415] LearnerWorker_p1 finished initialization!
134
+ [2023-09-14 13:48:31,318][43415] Using GPUs [0] for process 1 (actually maps to GPUs [1])
135
+ [2023-09-14 13:48:31,727][43670] Worker 0 uses CPU cores [0, 1, 2, 3]
136
+ [2023-09-14 13:48:31,819][43675] Worker 5 uses CPU cores [20, 21, 22, 23]
137
+ [2023-09-14 13:48:31,825][43672] Worker 2 uses CPU cores [8, 9, 10, 11]
138
+ [2023-09-14 13:48:31,873][43669] Using GPUs [1] for process 1 (actually maps to GPUs [1])
139
+ [2023-09-14 13:48:31,873][43669] Set environment var CUDA_VISIBLE_DEVICES to '1' (GPU indices [1]) for inference process 1
140
+ [2023-09-14 13:48:31,887][43728] Worker 7 uses CPU cores [28, 29, 30, 31]
141
+ [2023-09-14 13:48:31,891][43669] Num visible devices: 1
142
+ [2023-09-14 13:48:32,011][43671] Worker 1 uses CPU cores [4, 5, 6, 7]
143
+ [2023-09-14 13:48:32,225][43673] Worker 3 uses CPU cores [12, 13, 14, 15]
144
+ [2023-09-14 13:48:32,325][43668] Using GPUs [0] for process 0 (actually maps to GPUs [0])
145
+ [2023-09-14 13:48:32,325][43668] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
146
+ [2023-09-14 13:48:32,342][43674] Worker 4 uses CPU cores [16, 17, 18, 19]
147
+ [2023-09-14 13:48:32,346][43668] Num visible devices: 1
148
+ [2023-09-14 13:48:32,458][43726] Worker 6 uses CPU cores [24, 25, 26, 27]
149
+ [2023-09-14 13:48:32,525][43669] RunningMeanStd input shape: (23,)
150
+ [2023-09-14 13:48:32,526][43669] RunningMeanStd input shape: (3, 72, 128)
151
+ [2023-09-14 13:48:32,526][43669] RunningMeanStd input shape: (1,)
152
+ [2023-09-14 13:48:32,538][43669] ConvEncoder: input_channels=3
153
+ [2023-09-14 13:48:32,642][43669] Conv encoder output size: 512
154
+ [2023-09-14 13:48:32,643][43669] Policy head output size: 640
155
+ [2023-09-14 13:48:32,955][43668] RunningMeanStd input shape: (23,)
156
+ [2023-09-14 13:48:32,956][43668] RunningMeanStd input shape: (3, 72, 128)
157
+ [2023-09-14 13:48:32,956][43668] RunningMeanStd input shape: (1,)
158
+ [2023-09-14 13:48:32,968][43668] ConvEncoder: input_channels=3
159
+ [2023-09-14 13:48:33,071][43668] Conv encoder output size: 512
160
+ [2023-09-14 13:48:33,072][43668] Policy head output size: 640
161
+ [2023-09-14 13:48:33,386][43670] Doom resolution: 160x120, resize resolution: (128, 72)
162
+ [2023-09-14 13:48:33,394][43674] Doom resolution: 160x120, resize resolution: (128, 72)
163
+ [2023-09-14 13:48:33,394][43728] Doom resolution: 160x120, resize resolution: (128, 72)
164
+ [2023-09-14 13:48:33,403][43673] Doom resolution: 160x120, resize resolution: (128, 72)
165
+ [2023-09-14 13:48:33,403][43671] Doom resolution: 160x120, resize resolution: (128, 72)
166
+ [2023-09-14 13:48:33,404][43675] Doom resolution: 160x120, resize resolution: (128, 72)
167
+ [2023-09-14 13:48:33,412][43672] Doom resolution: 160x120, resize resolution: (128, 72)
168
+ [2023-09-14 13:48:33,412][43726] Doom resolution: 160x120, resize resolution: (128, 72)
169
+ [2023-09-14 13:48:33,711][43728] Decorrelating experience for 0 frames...
170
+ [2023-09-14 13:48:33,735][43671] Decorrelating experience for 0 frames...
171
+ [2023-09-14 13:48:33,756][43726] Decorrelating experience for 0 frames...
172
+ [2023-09-14 13:48:33,781][43670] Decorrelating experience for 0 frames...
173
+ [2023-09-14 13:48:33,795][43674] Decorrelating experience for 0 frames...
174
+ [2023-09-14 13:48:34,021][43671] Decorrelating experience for 32 frames...
175
+ [2023-09-14 13:48:34,052][43672] Decorrelating experience for 0 frames...
176
+ [2023-09-14 13:48:34,101][43726] Decorrelating experience for 32 frames...
177
+ [2023-09-14 13:48:34,102][43673] Decorrelating experience for 0 frames...
178
+ [2023-09-14 13:48:34,104][43674] Decorrelating experience for 32 frames...
179
+ [2023-09-14 13:48:34,354][43670] Decorrelating experience for 32 frames...
180
+ [2023-09-14 13:48:34,377][43672] Decorrelating experience for 32 frames...
181
+ [2023-09-14 13:48:34,467][43675] Decorrelating experience for 0 frames...
182
+ [2023-09-14 13:48:34,528][43671] Decorrelating experience for 64 frames...
183
+ [2023-09-14 13:48:34,547][43726] Decorrelating experience for 64 frames...
184
+ [2023-09-14 13:48:34,637][43673] Decorrelating experience for 32 frames...
185
+ [2023-09-14 13:48:34,760][43728] Decorrelating experience for 32 frames...
186
+ [2023-09-14 13:48:34,827][43672] Decorrelating experience for 64 frames...
187
+ [2023-09-14 13:48:34,926][43670] Decorrelating experience for 64 frames...
188
+ [2023-09-14 13:48:34,929][43726] Decorrelating experience for 96 frames...
189
+ [2023-09-14 13:48:34,954][43675] Decorrelating experience for 32 frames...
190
+ [2023-09-14 13:48:35,000][43671] Decorrelating experience for 96 frames...
191
+ [2023-09-14 13:48:35,104][43674] Decorrelating experience for 64 frames...
192
+ [2023-09-14 13:48:35,125][43728] Decorrelating experience for 64 frames...
193
+ [2023-09-14 13:48:35,184][43673] Decorrelating experience for 64 frames...
194
+ [2023-09-14 13:48:35,347][43672] Decorrelating experience for 96 frames...
195
+ [2023-09-14 13:48:35,472][43675] Decorrelating experience for 64 frames...
196
+ [2023-09-14 13:48:35,503][43674] Decorrelating experience for 96 frames...
197
+ [2023-09-14 13:48:35,554][43670] Decorrelating experience for 96 frames...
198
+ [2023-09-14 13:48:35,597][43673] Decorrelating experience for 96 frames...
199
+ [2023-09-14 13:48:35,771][43728] Decorrelating experience for 96 frames...
200
+ [2023-09-14 13:48:35,917][43675] Decorrelating experience for 96 frames...
201
+ [2023-09-14 13:48:36,328][43671] Multiple policies in trajectory buffer: [0 1] (-1 means inactive agent)
202
+ [2023-09-14 13:48:36,356][43726] Multiple policies in trajectory buffer: [0 1] (-1 means inactive agent)
203
+ [2023-09-14 13:48:36,445][43673] Multiple policies in trajectory buffer: [0 1] (-1 means inactive agent)
204
+ [2023-09-14 13:48:36,698][43728] Multiple policies in trajectory buffer: [0 1] (-1 means inactive agent)
205
+ [2023-09-14 13:48:36,862][43415] Signal inference workers to stop experience collection...
206
+ [2023-09-14 13:48:36,868][43668] InferenceWorker_p0-w0: stopping experience collection
207
+ [2023-09-14 13:48:36,871][43669] InferenceWorker_p1-w0: stopping experience collection
208
+ [2023-09-14 13:48:39,985][43415] Signal inference workers to resume experience collection...
209
+ [2023-09-14 13:48:39,986][43669] InferenceWorker_p1-w0: resuming experience collection
210
+ [2023-09-14 13:48:39,986][43668] InferenceWorker_p0-w0: resuming experience collection
211
+ [2023-09-14 13:48:41,396][43670] Multiple policies in trajectory buffer: [0 1] (-1 means inactive agent)
212
+ [2023-09-14 13:48:41,473][43674] Multiple policies in trajectory buffer: [0 1] (-1 means inactive agent)
213
+ [2023-09-14 13:48:41,497][43675] Multiple policies in trajectory buffer: [0 1] (-1 means inactive agent)
214
+ [2023-09-14 13:48:42,205][43239] Signal inference workers to stop experience collection...
215
+ [2023-09-14 13:48:42,770][43239] Signal inference workers to resume experience collection...
216
+ [2023-09-14 13:48:42,869][43672] Multiple policies in trajectory buffer: [0 1] (-1 means inactive agent)
217
+ [2023-09-14 13:48:45,713][43669] Updated weights for policy 1, policy_version 10 (0.0696)
218
+ [2023-09-14 13:48:46,997][43668] Updated weights for policy 0, policy_version 10 (0.0011)
219
+ [2023-09-14 13:48:52,153][43669] Updated weights for policy 1, policy_version 20 (0.0011)
220
+ [2023-09-14 13:48:52,252][43668] Updated weights for policy 0, policy_version 20 (0.0011)
221
+ [2023-09-14 13:48:57,578][43668] Updated weights for policy 0, policy_version 30 (0.0010)
222
+ [2023-09-14 13:48:58,953][43669] Updated weights for policy 1, policy_version 30 (0.0010)
223
+ [2023-09-14 13:48:59,324][43239] Saving new best policy, reward=1.015!
224
+ [2023-09-14 13:48:59,324][43415] Saving new best policy, reward=0.659!
225
+ [2023-09-14 13:49:03,027][43668] Updated weights for policy 0, policy_version 40 (0.0010)
226
+ [2023-09-14 13:49:04,329][43415] Saving new best policy, reward=0.917!
227
+ [2023-09-14 13:49:05,264][43669] Updated weights for policy 1, policy_version 40 (0.0010)
228
+ [2023-09-14 13:49:09,121][43668] Updated weights for policy 0, policy_version 50 (0.0011)
229
+ [2023-09-14 13:49:09,324][43239] Saving new best policy, reward=1.051!
230
+ [2023-09-14 13:49:09,324][43415] Saving new best policy, reward=1.069!
231
+ [2023-09-14 13:49:10,515][43669] Updated weights for policy 1, policy_version 50 (0.0009)
232
+ [2023-09-14 13:49:14,328][43239] Saving new best policy, reward=1.074!
233
+ [2023-09-14 13:49:14,328][43415] Saving new best policy, reward=1.136!
234
+ [2023-09-14 13:49:15,694][43669] Updated weights for policy 1, policy_version 60 (0.0010)
235
+ [2023-09-14 13:49:15,803][43668] Updated weights for policy 0, policy_version 60 (0.0011)
236
+ [2023-09-14 13:49:21,450][43669] Updated weights for policy 1, policy_version 70 (0.0010)
237
+ [2023-09-14 13:49:21,997][43668] Updated weights for policy 0, policy_version 70 (0.0011)
238
+ [2023-09-14 13:49:27,336][43668] Updated weights for policy 0, policy_version 80 (0.0011)
239
+ [2023-09-14 13:49:27,407][43669] Updated weights for policy 1, policy_version 80 (0.0010)
240
+ [2023-09-14 13:49:32,135][43668] Updated weights for policy 0, policy_version 90 (0.0010)
241
+ [2023-09-14 13:49:34,328][43239] Saving new best policy, reward=1.111!
242
+ [2023-09-14 13:49:34,907][43669] Updated weights for policy 1, policy_version 90 (0.0010)
243
+ [2023-09-14 13:49:37,006][43668] Updated weights for policy 0, policy_version 100 (0.0010)
244
+ [2023-09-14 13:49:41,947][43668] Updated weights for policy 0, policy_version 110 (0.0010)
245
+ [2023-09-14 13:49:42,320][43669] Updated weights for policy 1, policy_version 100 (0.0010)
246
+ [2023-09-14 13:49:47,631][43668] Updated weights for policy 0, policy_version 120 (0.0010)
247
+ [2023-09-14 13:49:48,076][43669] Updated weights for policy 1, policy_version 110 (0.0011)
248
+ [2023-09-14 13:49:49,324][43415] Saving new best policy, reward=1.155!
249
+ [2023-09-14 13:49:53,133][43669] Updated weights for policy 1, policy_version 120 (0.0010)
250
+ [2023-09-14 13:49:54,328][43415] Saving new best policy, reward=1.220!
251
+ [2023-09-14 13:49:54,739][43668] Updated weights for policy 0, policy_version 130 (0.0012)
252
+ [2023-09-14 13:49:58,479][43669] Updated weights for policy 1, policy_version 130 (0.0011)
253
+ [2023-09-14 13:49:59,324][43415] Saving new best policy, reward=1.230!
254
+ [2023-09-14 13:49:59,324][43239] Saving new best policy, reward=1.182!
255
+ [2023-09-14 13:50:01,919][43668] Updated weights for policy 0, policy_version 140 (0.0011)
256
+ [2023-09-14 13:50:04,063][43669] Updated weights for policy 1, policy_version 140 (0.0010)
257
+ [2023-09-14 13:50:07,417][43668] Updated weights for policy 0, policy_version 150 (0.0010)
258
+ [2023-09-14 13:50:09,324][43239] Saving new best policy, reward=1.312!
259
+ [2023-09-14 13:50:11,743][43669] Updated weights for policy 1, policy_version 150 (0.0011)
260
+ [2023-09-14 13:50:12,082][43668] Updated weights for policy 0, policy_version 160 (0.0010)
261
+ [2023-09-14 13:50:14,329][43239] Saving new best policy, reward=1.482!
262
+ [2023-09-14 13:50:17,488][43668] Updated weights for policy 0, policy_version 170 (0.0010)
263
+ [2023-09-14 13:50:18,430][43669] Updated weights for policy 1, policy_version 160 (0.0010)
264
+ [2023-09-14 13:50:19,325][43239] Saving new best policy, reward=1.600!
265
+ [2023-09-14 13:50:23,419][43668] Updated weights for policy 0, policy_version 180 (0.0011)
266
+ [2023-09-14 13:50:24,329][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000000181_741376.pth...
267
+ [2023-09-14 13:50:24,331][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000169_692224.pth...
268
+ [2023-09-14 13:50:24,390][43239] Saving new best policy, reward=1.609!
269
+ [2023-09-14 13:50:24,497][43669] Updated weights for policy 1, policy_version 170 (0.0010)
270
+ [2023-09-14 13:50:28,816][43668] Updated weights for policy 0, policy_version 190 (0.0011)
271
+ [2023-09-14 13:50:31,532][43669] Updated weights for policy 1, policy_version 180 (0.0011)
272
+ [2023-09-14 13:50:33,837][43668] Updated weights for policy 0, policy_version 200 (0.0011)
273
+ [2023-09-14 13:50:37,392][43669] Updated weights for policy 1, policy_version 190 (0.0011)
274
+ [2023-09-14 13:50:39,760][43668] Updated weights for policy 0, policy_version 210 (0.0011)
275
+ [2023-09-14 13:50:43,218][43669] Updated weights for policy 1, policy_version 200 (0.0010)
276
+ [2023-09-14 13:50:45,719][43668] Updated weights for policy 0, policy_version 220 (0.0011)
277
+ [2023-09-14 13:50:48,705][43669] Updated weights for policy 1, policy_version 210 (0.0011)
278
+ [2023-09-14 13:50:52,273][43668] Updated weights for policy 0, policy_version 230 (0.0011)
279
+ [2023-09-14 13:50:54,557][43669] Updated weights for policy 1, policy_version 220 (0.0010)
280
+ [2023-09-14 13:50:57,745][43668] Updated weights for policy 0, policy_version 240 (0.0011)
281
+ [2023-09-14 13:51:00,989][43669] Updated weights for policy 1, policy_version 230 (0.0011)
282
+ [2023-09-14 13:51:03,374][43668] Updated weights for policy 0, policy_version 250 (0.0010)
283
+ [2023-09-14 13:51:07,884][43669] Updated weights for policy 1, policy_version 240 (0.0010)
284
+ [2023-09-14 13:51:08,593][43668] Updated weights for policy 0, policy_version 260 (0.0010)
285
+ [2023-09-14 13:51:13,853][43668] Updated weights for policy 0, policy_version 270 (0.0011)
286
+ [2023-09-14 13:51:14,336][43415] Saving new best policy, reward=1.377!
287
+ [2023-09-14 13:51:15,268][43669] Updated weights for policy 1, policy_version 250 (0.0010)
288
+ [2023-09-14 13:51:18,472][43668] Updated weights for policy 0, policy_version 280 (0.0012)
289
+ [2023-09-14 13:51:19,326][43415] Saving new best policy, reward=1.432!
290
+ [2023-09-14 13:51:23,371][43668] Updated weights for policy 0, policy_version 290 (0.0010)
291
+ [2023-09-14 13:51:23,377][43669] Updated weights for policy 1, policy_version 260 (0.0013)
292
+ [2023-09-14 13:51:28,720][43668] Updated weights for policy 0, policy_version 300 (0.0011)
293
+ [2023-09-14 13:51:30,321][43669] Updated weights for policy 1, policy_version 270 (0.0010)
294
+ [2023-09-14 13:51:34,272][43668] Updated weights for policy 0, policy_version 310 (0.0011)
295
+ [2023-09-14 13:51:34,330][43239] Saving new best policy, reward=1.678!
296
+ [2023-09-14 13:51:36,494][43669] Updated weights for policy 1, policy_version 280 (0.0010)
297
+ [2023-09-14 13:51:39,324][43239] Saving new best policy, reward=1.700!
298
+ [2023-09-14 13:51:40,233][43668] Updated weights for policy 0, policy_version 320 (0.0010)
299
+ [2023-09-14 13:51:42,311][43669] Updated weights for policy 1, policy_version 290 (0.0010)
300
+ [2023-09-14 13:51:44,329][43239] Saving new best policy, reward=1.701!
301
+ [2023-09-14 13:51:46,876][43668] Updated weights for policy 0, policy_version 330 (0.0010)
302
+ [2023-09-14 13:51:47,380][43669] Updated weights for policy 1, policy_version 300 (0.0010)
303
+ [2023-09-14 13:51:52,772][43669] Updated weights for policy 1, policy_version 310 (0.0011)
304
+ [2023-09-14 13:51:53,443][43668] Updated weights for policy 0, policy_version 340 (0.0011)
305
+ [2023-09-14 13:51:54,328][43239] Saving new best policy, reward=1.767!
306
+ [2023-09-14 13:51:57,919][43669] Updated weights for policy 1, policy_version 320 (0.0011)
307
+ [2023-09-14 13:52:00,148][43668] Updated weights for policy 0, policy_version 350 (0.0010)
308
+ [2023-09-14 13:52:03,598][43669] Updated weights for policy 1, policy_version 330 (0.0012)
309
+ [2023-09-14 13:52:06,729][43668] Updated weights for policy 0, policy_version 360 (0.0011)
310
+ [2023-09-14 13:52:09,945][43669] Updated weights for policy 1, policy_version 340 (0.0011)
311
+ [2023-09-14 13:52:12,169][43668] Updated weights for policy 0, policy_version 370 (0.0010)
312
+ [2023-09-14 13:52:16,166][43669] Updated weights for policy 1, policy_version 350 (0.0010)
313
+ [2023-09-14 13:52:18,197][43668] Updated weights for policy 0, policy_version 380 (0.0011)
314
+ [2023-09-14 13:52:21,813][43669] Updated weights for policy 1, policy_version 360 (0.0010)
315
+ [2023-09-14 13:52:24,331][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000000389_1593344.pth...
316
+ [2023-09-14 13:52:24,331][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000364_1490944.pth...
317
+ [2023-09-14 13:52:24,390][43415] Saving new best policy, reward=1.450!
318
+ [2023-09-14 13:52:24,528][43668] Updated weights for policy 0, policy_version 390 (0.0011)
319
+ [2023-09-14 13:52:27,985][43669] Updated weights for policy 1, policy_version 370 (0.0011)
320
+ [2023-09-14 13:52:30,632][43668] Updated weights for policy 0, policy_version 400 (0.0011)
321
+ [2023-09-14 13:52:33,920][43669] Updated weights for policy 1, policy_version 380 (0.0011)
322
+ [2023-09-14 13:52:36,505][43668] Updated weights for policy 0, policy_version 410 (0.0010)
323
+ [2023-09-14 13:52:39,324][43415] Saving new best policy, reward=1.671!
324
+ [2023-09-14 13:52:40,402][43669] Updated weights for policy 1, policy_version 390 (0.0011)
325
+ [2023-09-14 13:52:41,770][43668] Updated weights for policy 0, policy_version 420 (0.0010)
326
+ [2023-09-14 13:52:44,329][43415] Saving new best policy, reward=1.702!
327
+ [2023-09-14 13:52:46,467][43668] Updated weights for policy 0, policy_version 430 (0.0010)
328
+ [2023-09-14 13:52:47,865][43669] Updated weights for policy 1, policy_version 400 (0.0010)
329
+ [2023-09-14 13:52:49,324][43415] Saving new best policy, reward=1.784!
330
+ [2023-09-14 13:52:51,045][43668] Updated weights for policy 0, policy_version 440 (0.0011)
331
+ [2023-09-14 13:52:54,329][43415] Saving new best policy, reward=1.827!
332
+ [2023-09-14 13:52:55,365][43668] Updated weights for policy 0, policy_version 450 (0.0010)
333
+ [2023-09-14 13:52:57,315][43669] Updated weights for policy 1, policy_version 410 (0.0010)
334
+ [2023-09-14 13:52:59,506][43668] Updated weights for policy 0, policy_version 460 (0.0010)
335
+ [2023-09-14 13:53:04,025][43668] Updated weights for policy 0, policy_version 470 (0.0010)
336
+ [2023-09-14 13:53:05,818][43669] Updated weights for policy 1, policy_version 420 (0.0010)
337
+ [2023-09-14 13:53:09,119][43668] Updated weights for policy 0, policy_version 480 (0.0011)
338
+ [2023-09-14 13:53:12,965][43669] Updated weights for policy 1, policy_version 430 (0.0011)
339
+ [2023-09-14 13:53:14,357][43239] Saving new best policy, reward=1.769!
340
+ [2023-09-14 13:53:14,358][43668] Updated weights for policy 0, policy_version 490 (0.0011)
341
+ [2023-09-14 13:53:18,641][43669] Updated weights for policy 1, policy_version 440 (0.0010)
342
+ [2023-09-14 13:53:19,324][43239] Saving new best policy, reward=1.876!
343
+ [2023-09-14 13:53:20,741][43668] Updated weights for policy 0, policy_version 500 (0.0010)
344
+ [2023-09-14 13:53:23,652][43669] Updated weights for policy 1, policy_version 450 (0.0011)
345
+ [2023-09-14 13:53:28,164][43668] Updated weights for policy 0, policy_version 510 (0.0011)
346
+ [2023-09-14 13:53:28,905][43669] Updated weights for policy 1, policy_version 460 (0.0010)
347
+ [2023-09-14 13:53:33,632][43669] Updated weights for policy 1, policy_version 470 (0.0011)
348
+ [2023-09-14 13:53:35,645][43668] Updated weights for policy 0, policy_version 520 (0.0010)
349
+ [2023-09-14 13:53:39,143][43669] Updated weights for policy 1, policy_version 480 (0.0010)
350
+ [2023-09-14 13:53:41,851][43668] Updated weights for policy 0, policy_version 530 (0.0011)
351
+ [2023-09-14 13:53:44,820][43669] Updated weights for policy 1, policy_version 490 (0.0011)
352
+ [2023-09-14 13:53:48,151][43668] Updated weights for policy 0, policy_version 540 (0.0010)
353
+ [2023-09-14 13:53:50,557][43669] Updated weights for policy 1, policy_version 500 (0.0010)
354
+ [2023-09-14 13:53:54,059][43668] Updated weights for policy 0, policy_version 550 (0.0012)
355
+ [2023-09-14 13:53:54,328][43239] Saving new best policy, reward=1.914!
356
+ [2023-09-14 13:53:56,423][43669] Updated weights for policy 1, policy_version 510 (0.0010)
357
+ [2023-09-14 13:54:00,056][43668] Updated weights for policy 0, policy_version 560 (0.0011)
358
+ [2023-09-14 13:54:02,193][43669] Updated weights for policy 1, policy_version 520 (0.0010)
359
+ [2023-09-14 13:54:05,875][43668] Updated weights for policy 0, policy_version 570 (0.0010)
360
+ [2023-09-14 13:54:08,252][43669] Updated weights for policy 1, policy_version 530 (0.0011)
361
+ [2023-09-14 13:54:11,719][43668] Updated weights for policy 0, policy_version 580 (0.0011)
362
+ [2023-09-14 13:54:14,489][43669] Updated weights for policy 1, policy_version 540 (0.0011)
363
+ [2023-09-14 13:54:17,267][43668] Updated weights for policy 0, policy_version 590 (0.0011)
364
+ [2023-09-14 13:54:21,027][43669] Updated weights for policy 1, policy_version 550 (0.0011)
365
+ [2023-09-14 13:54:22,791][43668] Updated weights for policy 0, policy_version 600 (0.0010)
366
+ [2023-09-14 13:54:24,330][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000000602_2465792.pth...
367
+ [2023-09-14 13:54:24,330][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000555_2273280.pth...
368
+ [2023-09-14 13:54:24,396][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000000181_741376.pth
369
+ [2023-09-14 13:54:24,397][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000169_692224.pth
370
+ [2023-09-14 13:54:24,403][43239] Saving new best policy, reward=2.014!
371
+ [2023-09-14 13:54:27,319][43669] Updated weights for policy 1, policy_version 560 (0.0010)
372
+ [2023-09-14 13:54:28,571][43668] Updated weights for policy 0, policy_version 610 (0.0010)
373
+ [2023-09-14 13:54:33,377][43669] Updated weights for policy 1, policy_version 570 (0.0010)
374
+ [2023-09-14 13:54:34,470][43668] Updated weights for policy 0, policy_version 620 (0.0011)
375
+ [2023-09-14 13:54:39,477][43669] Updated weights for policy 1, policy_version 580 (0.0011)
376
+ [2023-09-14 13:54:40,347][43668] Updated weights for policy 0, policy_version 630 (0.0011)
377
+ [2023-09-14 13:54:45,463][43669] Updated weights for policy 1, policy_version 590 (0.0012)
378
+ [2023-09-14 13:54:46,286][43668] Updated weights for policy 0, policy_version 640 (0.0010)
379
+ [2023-09-14 13:54:49,324][43239] Saving new best policy, reward=2.046!
380
+ [2023-09-14 13:54:51,395][43669] Updated weights for policy 1, policy_version 600 (0.0011)
381
+ [2023-09-14 13:54:52,154][43668] Updated weights for policy 0, policy_version 650 (0.0011)
382
+ [2023-09-14 13:54:54,371][43239] Saving new best policy, reward=2.205!
383
+ [2023-09-14 13:54:57,541][43669] Updated weights for policy 1, policy_version 610 (0.0010)
384
+ [2023-09-14 13:54:57,685][43668] Updated weights for policy 0, policy_version 660 (0.0010)
385
+ [2023-09-14 13:54:59,324][43239] Saving new best policy, reward=2.273!
386
+ [2023-09-14 13:55:02,185][43668] Updated weights for policy 0, policy_version 670 (0.0010)
387
+ [2023-09-14 13:55:06,482][43668] Updated weights for policy 0, policy_version 680 (0.0010)
388
+ [2023-09-14 13:55:06,586][43669] Updated weights for policy 1, policy_version 620 (0.0010)
389
+ [2023-09-14 13:55:10,460][43668] Updated weights for policy 0, policy_version 690 (0.0011)
390
+ [2023-09-14 13:55:14,713][43668] Updated weights for policy 0, policy_version 700 (0.0010)
391
+ [2023-09-14 13:55:17,178][43669] Updated weights for policy 1, policy_version 630 (0.0011)
392
+ [2023-09-14 13:55:19,349][43668] Updated weights for policy 0, policy_version 710 (0.0011)
393
+ [2023-09-14 13:55:24,284][43668] Updated weights for policy 0, policy_version 720 (0.0012)
394
+ [2023-09-14 13:55:25,416][43669] Updated weights for policy 1, policy_version 640 (0.0010)
395
+ [2023-09-14 13:55:29,020][43668] Updated weights for policy 0, policy_version 730 (0.0010)
396
+ [2023-09-14 13:55:33,286][43668] Updated weights for policy 0, policy_version 740 (0.0010)
397
+ [2023-09-14 13:55:34,882][43669] Updated weights for policy 1, policy_version 650 (0.0010)
398
+ [2023-09-14 13:55:37,491][43668] Updated weights for policy 0, policy_version 750 (0.0011)
399
+ [2023-09-14 13:55:41,904][43668] Updated weights for policy 0, policy_version 760 (0.0010)
400
+ [2023-09-14 13:55:43,315][43669] Updated weights for policy 1, policy_version 660 (0.0011)
401
+ [2023-09-14 13:55:44,333][43415] Saving new best policy, reward=2.036!
402
+ [2023-09-14 13:55:46,577][43668] Updated weights for policy 0, policy_version 770 (0.0010)
403
+ [2023-09-14 13:55:50,879][43669] Updated weights for policy 1, policy_version 670 (0.0010)
404
+ [2023-09-14 13:55:51,567][43668] Updated weights for policy 0, policy_version 780 (0.0010)
405
+ [2023-09-14 13:55:54,329][43415] Saving new best policy, reward=2.052!
406
+ [2023-09-14 13:55:56,772][43668] Updated weights for policy 0, policy_version 790 (0.0011)
407
+ [2023-09-14 13:55:57,756][43669] Updated weights for policy 1, policy_version 680 (0.0010)
408
+ [2023-09-14 13:56:02,254][43668] Updated weights for policy 0, policy_version 800 (0.0010)
409
+ [2023-09-14 13:56:03,992][43669] Updated weights for policy 1, policy_version 690 (0.0010)
410
+ [2023-09-14 13:56:08,234][43668] Updated weights for policy 0, policy_version 810 (0.0010)
411
+ [2023-09-14 13:56:09,726][43669] Updated weights for policy 1, policy_version 700 (0.0010)
412
+ [2023-09-14 13:56:14,116][43668] Updated weights for policy 0, policy_version 820 (0.0010)
413
+ [2023-09-14 13:56:15,621][43669] Updated weights for policy 1, policy_version 710 (0.0010)
414
+ [2023-09-14 13:56:20,129][43668] Updated weights for policy 0, policy_version 830 (0.0010)
415
+ [2023-09-14 13:56:21,556][43669] Updated weights for policy 1, policy_version 720 (0.0011)
416
+ [2023-09-14 13:56:24,329][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000000837_3428352.pth...
417
+ [2023-09-14 13:56:24,330][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000724_2965504.pth...
418
+ [2023-09-14 13:56:24,385][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000364_1490944.pth
419
+ [2023-09-14 13:56:24,401][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000000389_1593344.pth
420
+ [2023-09-14 13:56:25,807][43668] Updated weights for policy 0, policy_version 840 (0.0010)
421
+ [2023-09-14 13:56:28,134][43669] Updated weights for policy 1, policy_version 730 (0.0010)
422
+ [2023-09-14 13:56:31,282][43668] Updated weights for policy 0, policy_version 850 (0.0011)
423
+ [2023-09-14 13:56:34,599][43669] Updated weights for policy 1, policy_version 740 (0.0011)
424
+ [2023-09-14 13:56:36,896][43668] Updated weights for policy 0, policy_version 860 (0.0010)
425
+ [2023-09-14 13:56:41,432][43669] Updated weights for policy 1, policy_version 750 (0.0013)
426
+ [2023-09-14 13:56:41,861][43668] Updated weights for policy 0, policy_version 870 (0.0011)
427
+ [2023-09-14 13:56:47,444][43668] Updated weights for policy 0, policy_version 880 (0.0009)
428
+ [2023-09-14 13:56:47,634][43669] Updated weights for policy 1, policy_version 760 (0.0010)
429
+ [2023-09-14 13:56:53,426][43668] Updated weights for policy 0, policy_version 890 (0.0010)
430
+ [2023-09-14 13:56:53,652][43669] Updated weights for policy 1, policy_version 770 (0.0010)
431
+ [2023-09-14 13:56:59,482][43669] Updated weights for policy 1, policy_version 780 (0.0010)
432
+ [2023-09-14 13:56:59,753][43668] Updated weights for policy 0, policy_version 900 (0.0013)
433
+ [2023-09-14 13:57:04,791][43669] Updated weights for policy 1, policy_version 790 (0.0010)
434
+ [2023-09-14 13:57:06,675][43668] Updated weights for policy 0, policy_version 910 (0.0011)
435
+ [2023-09-14 13:57:10,042][43669] Updated weights for policy 1, policy_version 800 (0.0011)
436
+ [2023-09-14 13:57:13,627][43668] Updated weights for policy 0, policy_version 920 (0.0011)
437
+ [2023-09-14 13:57:15,385][43669] Updated weights for policy 1, policy_version 810 (0.0011)
438
+ [2023-09-14 13:57:20,033][43668] Updated weights for policy 0, policy_version 930 (0.0011)
439
+ [2023-09-14 13:57:21,112][43669] Updated weights for policy 1, policy_version 820 (0.0011)
440
+ [2023-09-14 13:57:26,449][43669] Updated weights for policy 1, policy_version 830 (0.0011)
441
+ [2023-09-14 13:57:26,687][43668] Updated weights for policy 0, policy_version 940 (0.0010)
442
+ [2023-09-14 13:57:32,441][43668] Updated weights for policy 0, policy_version 950 (0.0011)
443
+ [2023-09-14 13:57:32,992][43669] Updated weights for policy 1, policy_version 840 (0.0010)
444
+ [2023-09-14 13:57:37,329][43668] Updated weights for policy 0, policy_version 960 (0.0010)
445
+ [2023-09-14 13:57:41,855][43668] Updated weights for policy 0, policy_version 970 (0.0010)
446
+ [2023-09-14 13:57:42,666][43669] Updated weights for policy 1, policy_version 850 (0.0010)
447
+ [2023-09-14 13:57:46,029][43668] Updated weights for policy 0, policy_version 980 (0.0016)
448
+ [2023-09-14 13:57:50,025][43668] Updated weights for policy 0, policy_version 990 (0.0010)
449
+ [2023-09-14 13:57:54,049][43668] Updated weights for policy 0, policy_version 1000 (0.0010)
450
+ [2023-09-14 13:57:54,603][43669] Updated weights for policy 1, policy_version 860 (0.0011)
451
+ [2023-09-14 13:57:58,510][43668] Updated weights for policy 0, policy_version 1010 (0.0010)
452
+ [2023-09-14 13:58:03,099][43668] Updated weights for policy 0, policy_version 1020 (0.0010)
453
+ [2023-09-14 13:58:03,562][43669] Updated weights for policy 1, policy_version 870 (0.0011)
454
+ [2023-09-14 13:58:07,997][43668] Updated weights for policy 0, policy_version 1030 (0.0010)
455
+ [2023-09-14 13:58:10,963][43669] Updated weights for policy 1, policy_version 880 (0.0011)
456
+ [2023-09-14 13:58:13,520][43668] Updated weights for policy 0, policy_version 1040 (0.0010)
457
+ [2023-09-14 13:58:14,328][43239] Saving new best policy, reward=2.304!
458
+ [2023-09-14 13:58:17,395][43669] Updated weights for policy 1, policy_version 890 (0.0010)
459
+ [2023-09-14 13:58:19,287][43668] Updated weights for policy 0, policy_version 1050 (0.0011)
460
+ [2023-09-14 13:58:19,324][43239] Saving new best policy, reward=2.442!
461
+ [2023-09-14 13:58:23,714][43669] Updated weights for policy 1, policy_version 900 (0.0010)
462
+ [2023-09-14 13:58:24,329][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001058_4333568.pth...
463
+ [2023-09-14 13:58:24,383][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000901_3690496.pth...
464
+ [2023-09-14 13:58:24,390][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000000602_2465792.pth
465
+ [2023-09-14 13:58:24,438][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000555_2273280.pth
466
+ [2023-09-14 13:58:25,099][43668] Updated weights for policy 0, policy_version 1060 (0.0009)
467
+ [2023-09-14 13:58:29,324][43415] Saving new best policy, reward=2.057!
468
+ [2023-09-14 13:58:30,395][43669] Updated weights for policy 1, policy_version 910 (0.0012)
469
+ [2023-09-14 13:58:30,553][43668] Updated weights for policy 0, policy_version 1070 (0.0010)
470
+ [2023-09-14 13:58:34,329][43415] Saving new best policy, reward=2.138!
471
+ [2023-09-14 13:58:35,767][43668] Updated weights for policy 0, policy_version 1080 (0.0011)
472
+ [2023-09-14 13:58:37,081][43669] Updated weights for policy 1, policy_version 920 (0.0010)
473
+ [2023-09-14 13:58:40,984][43668] Updated weights for policy 0, policy_version 1090 (0.0010)
474
+ [2023-09-14 13:58:43,780][43669] Updated weights for policy 1, policy_version 930 (0.0011)
475
+ [2023-09-14 13:58:46,294][43668] Updated weights for policy 0, policy_version 1100 (0.0011)
476
+ [2023-09-14 13:58:50,338][43669] Updated weights for policy 1, policy_version 940 (0.0011)
477
+ [2023-09-14 13:58:51,627][43668] Updated weights for policy 0, policy_version 1110 (0.0011)
478
+ [2023-09-14 13:58:56,854][43669] Updated weights for policy 1, policy_version 950 (0.0011)
479
+ [2023-09-14 13:58:57,001][43668] Updated weights for policy 0, policy_version 1120 (0.0010)
480
+ [2023-09-14 13:59:02,674][43668] Updated weights for policy 0, policy_version 1130 (0.0011)
481
+ [2023-09-14 13:59:02,957][43669] Updated weights for policy 1, policy_version 960 (0.0010)
482
+ [2023-09-14 13:59:08,010][43669] Updated weights for policy 1, policy_version 970 (0.0011)
483
+ [2023-09-14 13:59:09,955][43668] Updated weights for policy 0, policy_version 1140 (0.0011)
484
+ [2023-09-14 13:59:12,757][43669] Updated weights for policy 1, policy_version 980 (0.0010)
485
+ [2023-09-14 13:59:17,169][43668] Updated weights for policy 0, policy_version 1150 (0.0011)
486
+ [2023-09-14 13:59:17,660][43669] Updated weights for policy 1, policy_version 990 (0.0010)
487
+ [2023-09-14 13:59:22,555][43669] Updated weights for policy 1, policy_version 1000 (0.0010)
488
+ [2023-09-14 13:59:24,548][43668] Updated weights for policy 0, policy_version 1160 (0.0009)
489
+ [2023-09-14 13:59:28,072][43669] Updated weights for policy 1, policy_version 1010 (0.0010)
490
+ [2023-09-14 13:59:30,989][43668] Updated weights for policy 0, policy_version 1170 (0.0010)
491
+ [2023-09-14 13:59:33,835][43669] Updated weights for policy 1, policy_version 1020 (0.0010)
492
+ [2023-09-14 13:59:36,903][43668] Updated weights for policy 0, policy_version 1180 (0.0010)
493
+ [2023-09-14 13:59:39,854][43669] Updated weights for policy 1, policy_version 1030 (0.0012)
494
+ [2023-09-14 13:59:42,736][43668] Updated weights for policy 0, policy_version 1190 (0.0011)
495
+ [2023-09-14 13:59:45,654][43669] Updated weights for policy 1, policy_version 1040 (0.0010)
496
+ [2023-09-14 13:59:50,120][43668] Updated weights for policy 0, policy_version 1200 (0.0011)
497
+ [2023-09-14 13:59:51,136][43669] Updated weights for policy 1, policy_version 1050 (0.0011)
498
+ [2023-09-14 13:59:56,838][43669] Updated weights for policy 1, policy_version 1060 (0.0011)
499
+ [2023-09-14 13:59:57,029][43668] Updated weights for policy 0, policy_version 1210 (0.0010)
500
+ [2023-09-14 14:00:02,230][43669] Updated weights for policy 1, policy_version 1070 (0.0010)
501
+ [2023-09-14 14:00:03,727][43668] Updated weights for policy 0, policy_version 1220 (0.0012)
502
+ [2023-09-14 14:00:04,330][43239] Saving new best policy, reward=2.557!
503
+ [2023-09-14 14:00:08,274][43669] Updated weights for policy 1, policy_version 1080 (0.0010)
504
+ [2023-09-14 14:00:09,527][43668] Updated weights for policy 0, policy_version 1230 (0.0011)
505
+ [2023-09-14 14:00:14,329][43239] Saving new best policy, reward=2.746!
506
+ [2023-09-14 14:00:14,574][43668] Updated weights for policy 0, policy_version 1240 (0.0010)
507
+ [2023-09-14 14:00:15,445][43669] Updated weights for policy 1, policy_version 1090 (0.0011)
508
+ [2023-09-14 14:00:19,324][43239] Saving new best policy, reward=2.841!
509
+ [2023-09-14 14:00:19,564][43668] Updated weights for policy 0, policy_version 1250 (0.0011)
510
+ [2023-09-14 14:00:22,935][43669] Updated weights for policy 1, policy_version 1100 (0.0011)
511
+ [2023-09-14 14:00:24,329][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001259_5156864.pth...
512
+ [2023-09-14 14:00:24,330][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001101_4509696.pth...
513
+ [2023-09-14 14:00:24,387][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000000837_3428352.pth
514
+ [2023-09-14 14:00:24,395][43239] Saving new best policy, reward=2.896!
515
+ [2023-09-14 14:00:24,395][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000724_2965504.pth
516
+ [2023-09-14 14:00:24,558][43668] Updated weights for policy 0, policy_version 1260 (0.0009)
517
+ [2023-09-14 14:00:29,812][43668] Updated weights for policy 0, policy_version 1270 (0.0011)
518
+ [2023-09-14 14:00:30,140][43669] Updated weights for policy 1, policy_version 1110 (0.0011)
519
+ [2023-09-14 14:00:34,676][43668] Updated weights for policy 0, policy_version 1280 (0.0010)
520
+ [2023-09-14 14:00:37,667][43669] Updated weights for policy 1, policy_version 1120 (0.0012)
521
+ [2023-09-14 14:00:39,483][43668] Updated weights for policy 0, policy_version 1290 (0.0010)
522
+ [2023-09-14 14:00:44,503][43669] Updated weights for policy 1, policy_version 1130 (0.0011)
523
+ [2023-09-14 14:00:45,071][43668] Updated weights for policy 0, policy_version 1300 (0.0010)
524
+ [2023-09-14 14:00:50,953][43669] Updated weights for policy 1, policy_version 1140 (0.0011)
525
+ [2023-09-14 14:00:50,963][43668] Updated weights for policy 0, policy_version 1310 (0.0011)
526
+ [2023-09-14 14:00:56,331][43668] Updated weights for policy 0, policy_version 1320 (0.0011)
527
+ [2023-09-14 14:00:58,639][43669] Updated weights for policy 1, policy_version 1150 (0.0011)
528
+ [2023-09-14 14:01:01,760][43668] Updated weights for policy 0, policy_version 1330 (0.0010)
529
+ [2023-09-14 14:01:06,424][43669] Updated weights for policy 1, policy_version 1160 (0.0010)
530
+ [2023-09-14 14:01:06,625][43668] Updated weights for policy 0, policy_version 1340 (0.0012)
531
+ [2023-09-14 14:01:12,076][43668] Updated weights for policy 0, policy_version 1350 (0.0010)
532
+ [2023-09-14 14:01:13,251][43669] Updated weights for policy 1, policy_version 1170 (0.0012)
533
+ [2023-09-14 14:01:14,329][43239] Saving new best policy, reward=2.903!
534
+ [2023-09-14 14:01:17,503][43668] Updated weights for policy 0, policy_version 1360 (0.0011)
535
+ [2023-09-14 14:01:20,144][43669] Updated weights for policy 1, policy_version 1180 (0.0011)
536
+ [2023-09-14 14:01:22,468][43668] Updated weights for policy 0, policy_version 1370 (0.0010)
537
+ [2023-09-14 14:01:27,188][43669] Updated weights for policy 1, policy_version 1190 (0.0011)
538
+ [2023-09-14 14:01:27,696][43668] Updated weights for policy 0, policy_version 1380 (0.0011)
539
+ [2023-09-14 14:01:32,922][43668] Updated weights for policy 0, policy_version 1390 (0.0012)
540
+ [2023-09-14 14:01:33,947][43669] Updated weights for policy 1, policy_version 1200 (0.0010)
541
+ [2023-09-14 14:01:37,989][43668] Updated weights for policy 0, policy_version 1400 (0.0011)
542
+ [2023-09-14 14:01:41,125][43669] Updated weights for policy 1, policy_version 1210 (0.0010)
543
+ [2023-09-14 14:01:42,528][43668] Updated weights for policy 0, policy_version 1410 (0.0010)
544
+ [2023-09-14 14:01:47,083][43668] Updated weights for policy 0, policy_version 1420 (0.0011)
545
+ [2023-09-14 14:01:48,574][43669] Updated weights for policy 1, policy_version 1220 (0.0010)
546
+ [2023-09-14 14:01:52,129][43668] Updated weights for policy 0, policy_version 1430 (0.0010)
547
+ [2023-09-14 14:01:54,329][43239] Saving new best policy, reward=2.907!
548
+ [2023-09-14 14:01:55,407][43669] Updated weights for policy 1, policy_version 1230 (0.0011)
549
+ [2023-09-14 14:01:56,956][43668] Updated weights for policy 0, policy_version 1440 (0.0011)
550
+ [2023-09-14 14:01:59,324][43239] Saving new best policy, reward=3.081!
551
+ [2023-09-14 14:02:01,872][43668] Updated weights for policy 0, policy_version 1450 (0.0010)
552
+ [2023-09-14 14:02:02,697][43669] Updated weights for policy 1, policy_version 1240 (0.0010)
553
+ [2023-09-14 14:02:07,262][43668] Updated weights for policy 0, policy_version 1460 (0.0011)
554
+ [2023-09-14 14:02:09,010][43669] Updated weights for policy 1, policy_version 1250 (0.0011)
555
+ [2023-09-14 14:02:15,012][43669] Updated weights for policy 1, policy_version 1260 (0.0010)
556
+ [2023-09-14 14:02:15,318][43668] Updated weights for policy 0, policy_version 1470 (0.0014)
557
+ [2023-09-14 14:02:20,855][43669] Updated weights for policy 1, policy_version 1270 (0.0011)
558
+ [2023-09-14 14:02:21,542][43668] Updated weights for policy 0, policy_version 1480 (0.0012)
559
+ [2023-09-14 14:02:24,328][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001484_6078464.pth...
560
+ [2023-09-14 14:02:24,328][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001276_5226496.pth...
561
+ [2023-09-14 14:02:24,380][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001058_4333568.pth
562
+ [2023-09-14 14:02:24,384][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000000901_3690496.pth
563
+ [2023-09-14 14:02:26,247][43669] Updated weights for policy 1, policy_version 1280 (0.0010)
564
+ [2023-09-14 14:02:28,208][43668] Updated weights for policy 0, policy_version 1490 (0.0009)
565
+ [2023-09-14 14:02:31,557][43669] Updated weights for policy 1, policy_version 1290 (0.0009)
566
+ [2023-09-14 14:02:34,390][43668] Updated weights for policy 0, policy_version 1500 (0.0010)
567
+ [2023-09-14 14:02:37,138][43669] Updated weights for policy 1, policy_version 1300 (0.0010)
568
+ [2023-09-14 14:02:39,324][43239] Saving new best policy, reward=3.217!
569
+ [2023-09-14 14:02:40,217][43668] Updated weights for policy 0, policy_version 1510 (0.0010)
570
+ [2023-09-14 14:02:43,346][43669] Updated weights for policy 1, policy_version 1310 (0.0011)
571
+ [2023-09-14 14:02:44,381][43239] Saving new best policy, reward=3.237!
572
+ [2023-09-14 14:02:46,057][43668] Updated weights for policy 0, policy_version 1520 (0.0011)
573
+ [2023-09-14 14:02:49,324][43239] Saving new best policy, reward=3.414!
574
+ [2023-09-14 14:02:49,726][43669] Updated weights for policy 1, policy_version 1320 (0.0010)
575
+ [2023-09-14 14:02:51,366][43668] Updated weights for policy 0, policy_version 1530 (0.0011)
576
+ [2023-09-14 14:02:54,329][43239] Saving new best policy, reward=3.605!
577
+ [2023-09-14 14:02:56,241][43668] Updated weights for policy 0, policy_version 1540 (0.0011)
578
+ [2023-09-14 14:02:57,382][43669] Updated weights for policy 1, policy_version 1330 (0.0010)
579
+ [2023-09-14 14:03:01,131][43668] Updated weights for policy 0, policy_version 1550 (0.0010)
580
+ [2023-09-14 14:03:05,395][43669] Updated weights for policy 1, policy_version 1340 (0.0010)
581
+ [2023-09-14 14:03:05,700][43668] Updated weights for policy 0, policy_version 1560 (0.0011)
582
+ [2023-09-14 14:03:10,216][43668] Updated weights for policy 0, policy_version 1570 (0.0010)
583
+ [2023-09-14 14:03:13,447][43669] Updated weights for policy 1, policy_version 1350 (0.0011)
584
+ [2023-09-14 14:03:15,329][43668] Updated weights for policy 0, policy_version 1580 (0.0011)
585
+ [2023-09-14 14:03:19,381][43669] Updated weights for policy 1, policy_version 1360 (0.0010)
586
+ [2023-09-14 14:03:21,257][43668] Updated weights for policy 0, policy_version 1590 (0.0011)
587
+ [2023-09-14 14:03:24,748][43669] Updated weights for policy 1, policy_version 1370 (0.0010)
588
+ [2023-09-14 14:03:27,598][43668] Updated weights for policy 0, policy_version 1600 (0.0011)
589
+ [2023-09-14 14:03:30,602][43669] Updated weights for policy 1, policy_version 1380 (0.0011)
590
+ [2023-09-14 14:03:33,335][43668] Updated weights for policy 0, policy_version 1610 (0.0011)
591
+ [2023-09-14 14:03:34,329][43415] Saving new best policy, reward=2.186!
592
+ [2023-09-14 14:03:36,785][43669] Updated weights for policy 1, policy_version 1390 (0.0012)
593
+ [2023-09-14 14:03:39,087][43668] Updated weights for policy 0, policy_version 1620 (0.0011)
594
+ [2023-09-14 14:03:39,324][43239] Saving new best policy, reward=3.657!
595
+ [2023-09-14 14:03:39,338][43415] Saving new best policy, reward=2.286!
596
+ [2023-09-14 14:03:43,206][43669] Updated weights for policy 1, policy_version 1400 (0.0011)
597
+ [2023-09-14 14:03:44,395][43239] Saving new best policy, reward=3.679!
598
+ [2023-09-14 14:03:44,397][43668] Updated weights for policy 0, policy_version 1630 (0.0010)
599
+ [2023-09-14 14:03:49,196][43668] Updated weights for policy 0, policy_version 1640 (0.0010)
600
+ [2023-09-14 14:03:49,324][43239] Saving new best policy, reward=3.955!
601
+ [2023-09-14 14:03:50,986][43669] Updated weights for policy 1, policy_version 1410 (0.0011)
602
+ [2023-09-14 14:03:53,609][43668] Updated weights for policy 0, policy_version 1650 (0.0009)
603
+ [2023-09-14 14:03:54,329][43239] Saving new best policy, reward=4.199!
604
+ [2023-09-14 14:03:57,984][43668] Updated weights for policy 0, policy_version 1660 (0.0010)
605
+ [2023-09-14 14:03:59,895][43669] Updated weights for policy 1, policy_version 1420 (0.0011)
606
+ [2023-09-14 14:04:02,228][43668] Updated weights for policy 0, policy_version 1670 (0.0011)
607
+ [2023-09-14 14:04:04,328][43239] Saving new best policy, reward=4.518!
608
+ [2023-09-14 14:04:06,386][43668] Updated weights for policy 0, policy_version 1680 (0.0012)
609
+ [2023-09-14 14:04:09,040][43669] Updated weights for policy 1, policy_version 1430 (0.0011)
610
+ [2023-09-14 14:04:09,324][43415] Saving new best policy, reward=2.528!
611
+ [2023-09-14 14:04:10,738][43668] Updated weights for policy 0, policy_version 1690 (0.0010)
612
+ [2023-09-14 14:04:15,337][43668] Updated weights for policy 0, policy_version 1700 (0.0011)
613
+ [2023-09-14 14:04:17,431][43669] Updated weights for policy 1, policy_version 1440 (0.0010)
614
+ [2023-09-14 14:04:20,480][43668] Updated weights for policy 0, policy_version 1710 (0.0010)
615
+ [2023-09-14 14:04:23,985][43669] Updated weights for policy 1, policy_version 1450 (0.0010)
616
+ [2023-09-14 14:04:24,331][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001717_7032832.pth...
617
+ [2023-09-14 14:04:24,331][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001450_5939200.pth...
618
+ [2023-09-14 14:04:24,412][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001259_5156864.pth
619
+ [2023-09-14 14:04:24,412][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001101_4509696.pth
620
+ [2023-09-14 14:04:24,421][43415] Saving new best policy, reward=2.671!
621
+ [2023-09-14 14:04:25,684][43668] Updated weights for policy 0, policy_version 1720 (0.0010)
622
+ [2023-09-14 14:04:29,324][43415] Saving new best policy, reward=2.826!
623
+ [2023-09-14 14:04:30,083][43669] Updated weights for policy 1, policy_version 1460 (0.0010)
624
+ [2023-09-14 14:04:31,478][43668] Updated weights for policy 0, policy_version 1730 (0.0010)
625
+ [2023-09-14 14:04:35,824][43669] Updated weights for policy 1, policy_version 1470 (0.0011)
626
+ [2023-09-14 14:04:37,501][43668] Updated weights for policy 0, policy_version 1740 (0.0011)
627
+ [2023-09-14 14:04:40,997][43669] Updated weights for policy 1, policy_version 1480 (0.0009)
628
+ [2023-09-14 14:04:44,190][43668] Updated weights for policy 0, policy_version 1750 (0.0010)
629
+ [2023-09-14 14:04:46,085][43669] Updated weights for policy 1, policy_version 1490 (0.0010)
630
+ [2023-09-14 14:04:50,911][43669] Updated weights for policy 1, policy_version 1500 (0.0010)
631
+ [2023-09-14 14:04:51,239][43668] Updated weights for policy 0, policy_version 1760 (0.0010)
632
+ [2023-09-14 14:04:54,328][43415] Saving new best policy, reward=3.248!
633
+ [2023-09-14 14:04:56,277][43669] Updated weights for policy 1, policy_version 1510 (0.0011)
634
+ [2023-09-14 14:04:57,517][43668] Updated weights for policy 0, policy_version 1770 (0.0010)
635
+ [2023-09-14 14:04:59,324][43415] Saving new best policy, reward=3.498!
636
+ [2023-09-14 14:05:01,820][43669] Updated weights for policy 1, policy_version 1520 (0.0010)
637
+ [2023-09-14 14:05:03,636][43668] Updated weights for policy 0, policy_version 1780 (0.0011)
638
+ [2023-09-14 14:05:04,331][43415] Saving new best policy, reward=3.723!
639
+ [2023-09-14 14:05:07,829][43669] Updated weights for policy 1, policy_version 1530 (0.0010)
640
+ [2023-09-14 14:05:09,142][43668] Updated weights for policy 0, policy_version 1790 (0.0012)
641
+ [2023-09-14 14:05:13,996][43668] Updated weights for policy 0, policy_version 1800 (0.0010)
642
+ [2023-09-14 14:05:15,774][43669] Updated weights for policy 1, policy_version 1540 (0.0011)
643
+ [2023-09-14 14:05:18,355][43668] Updated weights for policy 0, policy_version 1810 (0.0010)
644
+ [2023-09-14 14:05:22,930][43668] Updated weights for policy 0, policy_version 1820 (0.0010)
645
+ [2023-09-14 14:05:24,329][43239] Saving new best policy, reward=4.656!
646
+ [2023-09-14 14:05:25,117][43669] Updated weights for policy 1, policy_version 1550 (0.0010)
647
+ [2023-09-14 14:05:27,325][43668] Updated weights for policy 0, policy_version 1830 (0.0011)
648
+ [2023-09-14 14:05:29,324][43239] Saving new best policy, reward=5.256!
649
+ [2023-09-14 14:05:31,523][43668] Updated weights for policy 0, policy_version 1840 (0.0010)
650
+ [2023-09-14 14:05:34,408][43669] Updated weights for policy 1, policy_version 1560 (0.0011)
651
+ [2023-09-14 14:05:36,131][43668] Updated weights for policy 0, policy_version 1850 (0.0010)
652
+ [2023-09-14 14:05:40,763][43668] Updated weights for policy 0, policy_version 1860 (0.0010)
653
+ [2023-09-14 14:05:42,313][43669] Updated weights for policy 1, policy_version 1570 (0.0010)
654
+ [2023-09-14 14:05:45,689][43668] Updated weights for policy 0, policy_version 1870 (0.0010)
655
+ [2023-09-14 14:05:49,395][43669] Updated weights for policy 1, policy_version 1580 (0.0010)
656
+ [2023-09-14 14:05:50,783][43668] Updated weights for policy 0, policy_version 1880 (0.0011)
657
+ [2023-09-14 14:05:55,908][43668] Updated weights for policy 0, policy_version 1890 (0.0011)
658
+ [2023-09-14 14:05:56,312][43669] Updated weights for policy 1, policy_version 1590 (0.0010)
659
+ [2023-09-14 14:06:00,933][43668] Updated weights for policy 0, policy_version 1900 (0.0010)
660
+ [2023-09-14 14:06:02,855][43669] Updated weights for policy 1, policy_version 1600 (0.0011)
661
+ [2023-09-14 14:06:06,464][43668] Updated weights for policy 0, policy_version 1910 (0.0010)
662
+ [2023-09-14 14:06:08,695][43669] Updated weights for policy 1, policy_version 1610 (0.0010)
663
+ [2023-09-14 14:06:09,324][43415] Saving new best policy, reward=3.730!
664
+ [2023-09-14 14:06:12,637][43668] Updated weights for policy 0, policy_version 1920 (0.0009)
665
+ [2023-09-14 14:06:14,243][43669] Updated weights for policy 1, policy_version 1620 (0.0010)
666
+ [2023-09-14 14:06:14,330][43415] Saving new best policy, reward=3.903!
667
+ [2023-09-14 14:06:18,729][43668] Updated weights for policy 0, policy_version 1930 (0.0009)
668
+ [2023-09-14 14:06:19,324][43415] Saving new best policy, reward=4.155!
669
+ [2023-09-14 14:06:20,153][43669] Updated weights for policy 1, policy_version 1630 (0.0011)
670
+ [2023-09-14 14:06:24,329][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001939_7942144.pth...
671
+ [2023-09-14 14:06:24,358][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001637_6705152.pth...
672
+ [2023-09-14 14:06:24,386][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001484_6078464.pth
673
+ [2023-09-14 14:06:24,415][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001276_5226496.pth
674
+ [2023-09-14 14:06:24,479][43668] Updated weights for policy 0, policy_version 1940 (0.0010)
675
+ [2023-09-14 14:06:26,261][43669] Updated weights for policy 1, policy_version 1640 (0.0010)
676
+ [2023-09-14 14:06:29,326][43415] Saving new best policy, reward=4.249!
677
+ [2023-09-14 14:06:29,845][43668] Updated weights for policy 0, policy_version 1950 (0.0010)
678
+ [2023-09-14 14:06:32,623][43669] Updated weights for policy 1, policy_version 1650 (0.0011)
679
+ [2023-09-14 14:06:34,330][43415] Saving new best policy, reward=4.344!
680
+ [2023-09-14 14:06:35,480][43668] Updated weights for policy 0, policy_version 1960 (0.0010)
681
+ [2023-09-14 14:06:38,290][43669] Updated weights for policy 1, policy_version 1660 (0.0011)
682
+ [2023-09-14 14:06:41,588][43668] Updated weights for policy 0, policy_version 1970 (0.0010)
683
+ [2023-09-14 14:06:43,979][43669] Updated weights for policy 1, policy_version 1670 (0.0010)
684
+ [2023-09-14 14:06:44,329][43415] Saving new best policy, reward=4.591!
685
+ [2023-09-14 14:06:47,329][43668] Updated weights for policy 0, policy_version 1980 (0.0011)
686
+ [2023-09-14 14:06:49,325][43415] Saving new best policy, reward=4.697!
687
+ [2023-09-14 14:06:50,349][43669] Updated weights for policy 1, policy_version 1680 (0.0010)
688
+ [2023-09-14 14:06:52,751][43668] Updated weights for policy 0, policy_version 1990 (0.0010)
689
+ [2023-09-14 14:06:54,330][43415] Saving new best policy, reward=4.854!
690
+ [2023-09-14 14:06:57,662][43669] Updated weights for policy 1, policy_version 1690 (0.0010)
691
+ [2023-09-14 14:06:57,761][43668] Updated weights for policy 0, policy_version 2000 (0.0010)
692
+ [2023-09-14 14:07:02,336][43668] Updated weights for policy 0, policy_version 2010 (0.0010)
693
+ [2023-09-14 14:07:04,331][43415] Saving new best policy, reward=4.907!
694
+ [2023-09-14 14:07:06,607][43668] Updated weights for policy 0, policy_version 2020 (0.0010)
695
+ [2023-09-14 14:07:07,024][43669] Updated weights for policy 1, policy_version 1700 (0.0012)
696
+ [2023-09-14 14:07:11,131][43668] Updated weights for policy 0, policy_version 2030 (0.0010)
697
+ [2023-09-14 14:07:15,677][43669] Updated weights for policy 1, policy_version 1710 (0.0010)
698
+ [2023-09-14 14:07:15,872][43668] Updated weights for policy 0, policy_version 2040 (0.0010)
699
+ [2023-09-14 14:07:20,539][43668] Updated weights for policy 0, policy_version 2050 (0.0011)
700
+ [2023-09-14 14:07:23,590][43669] Updated weights for policy 1, policy_version 1720 (0.0010)
701
+ [2023-09-14 14:07:25,341][43668] Updated weights for policy 0, policy_version 2060 (0.0010)
702
+ [2023-09-14 14:07:29,325][43239] Saving new best policy, reward=5.297!
703
+ [2023-09-14 14:07:30,256][43668] Updated weights for policy 0, policy_version 2070 (0.0010)
704
+ [2023-09-14 14:07:30,446][43669] Updated weights for policy 1, policy_version 1730 (0.0010)
705
+ [2023-09-14 14:07:34,328][43239] Saving new best policy, reward=5.849!
706
+ [2023-09-14 14:07:35,470][43668] Updated weights for policy 0, policy_version 2080 (0.0011)
707
+ [2023-09-14 14:07:37,088][43669] Updated weights for policy 1, policy_version 1740 (0.0011)
708
+ [2023-09-14 14:07:39,324][43239] Saving new best policy, reward=5.875!
709
+ [2023-09-14 14:07:41,059][43668] Updated weights for policy 0, policy_version 2090 (0.0012)
710
+ [2023-09-14 14:07:43,191][43669] Updated weights for policy 1, policy_version 1750 (0.0011)
711
+ [2023-09-14 14:07:46,409][43668] Updated weights for policy 0, policy_version 2100 (0.0010)
712
+ [2023-09-14 14:07:49,377][43239] Saving new best policy, reward=6.033!
713
+ [2023-09-14 14:07:50,130][43669] Updated weights for policy 1, policy_version 1760 (0.0010)
714
+ [2023-09-14 14:07:51,362][43668] Updated weights for policy 0, policy_version 2110 (0.0011)
715
+ [2023-09-14 14:07:54,329][43239] Saving new best policy, reward=6.162!
716
+ [2023-09-14 14:07:55,955][43668] Updated weights for policy 0, policy_version 2120 (0.0010)
717
+ [2023-09-14 14:07:57,963][43669] Updated weights for policy 1, policy_version 1770 (0.0011)
718
+ [2023-09-14 14:08:00,239][43668] Updated weights for policy 0, policy_version 2130 (0.0010)
719
+ [2023-09-14 14:08:04,504][43668] Updated weights for policy 0, policy_version 2140 (0.0010)
720
+ [2023-09-14 14:08:06,959][43669] Updated weights for policy 1, policy_version 1780 (0.0010)
721
+ [2023-09-14 14:08:08,635][43668] Updated weights for policy 0, policy_version 2150 (0.0011)
722
+ [2023-09-14 14:08:12,606][43668] Updated weights for policy 0, policy_version 2160 (0.0011)
723
+ [2023-09-14 14:08:16,598][43668] Updated weights for policy 0, policy_version 2170 (0.0011)
724
+ [2023-09-14 14:08:17,167][43669] Updated weights for policy 1, policy_version 1790 (0.0011)
725
+ [2023-09-14 14:08:20,408][43668] Updated weights for policy 0, policy_version 2180 (0.0011)
726
+ [2023-09-14 14:08:24,215][43668] Updated weights for policy 0, policy_version 2190 (0.0011)
727
+ [2023-09-14 14:08:24,329][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001796_7356416.pth...
728
+ [2023-09-14 14:08:24,330][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000002190_8970240.pth...
729
+ [2023-09-14 14:08:24,389][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001450_5939200.pth
730
+ [2023-09-14 14:08:24,391][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001717_7032832.pth
731
+ [2023-09-14 14:08:27,510][43669] Updated weights for policy 1, policy_version 1800 (0.0011)
732
+ [2023-09-14 14:08:28,276][43668] Updated weights for policy 0, policy_version 2200 (0.0010)
733
+ [2023-09-14 14:08:32,885][43668] Updated weights for policy 0, policy_version 2210 (0.0011)
734
+ [2023-09-14 14:08:35,856][43669] Updated weights for policy 1, policy_version 1810 (0.0011)
735
+ [2023-09-14 14:08:37,549][43668] Updated weights for policy 0, policy_version 2220 (0.0010)
736
+ [2023-09-14 14:08:42,052][43668] Updated weights for policy 0, policy_version 2230 (0.0011)
737
+ [2023-09-14 14:08:44,197][43669] Updated weights for policy 1, policy_version 1820 (0.0011)
738
+ [2023-09-14 14:08:46,655][43668] Updated weights for policy 0, policy_version 2240 (0.0012)
739
+ [2023-09-14 14:08:51,312][43668] Updated weights for policy 0, policy_version 2250 (0.0010)
740
+ [2023-09-14 14:08:52,659][43669] Updated weights for policy 1, policy_version 1830 (0.0010)
741
+ [2023-09-14 14:08:56,179][43668] Updated weights for policy 0, policy_version 2260 (0.0011)
742
+ [2023-09-14 14:08:59,325][43239] Saving new best policy, reward=6.441!
743
+ [2023-09-14 14:08:59,810][43669] Updated weights for policy 1, policy_version 1840 (0.0010)
744
+ [2023-09-14 14:09:01,065][43668] Updated weights for policy 0, policy_version 2270 (0.0011)
745
+ [2023-09-14 14:09:06,255][43669] Updated weights for policy 1, policy_version 1850 (0.0010)
746
+ [2023-09-14 14:09:06,260][43668] Updated weights for policy 0, policy_version 2280 (0.0010)
747
+ [2023-09-14 14:09:11,302][43668] Updated weights for policy 0, policy_version 2290 (0.0011)
748
+ [2023-09-14 14:09:12,812][43669] Updated weights for policy 1, policy_version 1860 (0.0010)
749
+ [2023-09-14 14:09:16,875][43668] Updated weights for policy 0, policy_version 2300 (0.0011)
750
+ [2023-09-14 14:09:18,874][43669] Updated weights for policy 1, policy_version 1870 (0.0010)
751
+ [2023-09-14 14:09:22,917][43668] Updated weights for policy 0, policy_version 2310 (0.0010)
752
+ [2023-09-14 14:09:24,291][43669] Updated weights for policy 1, policy_version 1880 (0.0010)
753
+ [2023-09-14 14:09:29,577][43669] Updated weights for policy 1, policy_version 1890 (0.0010)
754
+ [2023-09-14 14:09:29,583][43668] Updated weights for policy 0, policy_version 2320 (0.0010)
755
+ [2023-09-14 14:09:35,076][43669] Updated weights for policy 1, policy_version 1900 (0.0011)
756
+ [2023-09-14 14:09:36,208][43668] Updated weights for policy 0, policy_version 2330 (0.0009)
757
+ [2023-09-14 14:09:40,581][43669] Updated weights for policy 1, policy_version 1910 (0.0010)
758
+ [2023-09-14 14:09:43,163][43668] Updated weights for policy 0, policy_version 2340 (0.0010)
759
+ [2023-09-14 14:09:44,328][43415] Saving new best policy, reward=5.063!
760
+ [2023-09-14 14:09:45,925][43669] Updated weights for policy 1, policy_version 1920 (0.0011)
761
+ [2023-09-14 14:09:49,327][43239] Saving new best policy, reward=6.562!
762
+ [2023-09-14 14:09:49,878][43668] Updated weights for policy 0, policy_version 2350 (0.0011)
763
+ [2023-09-14 14:09:51,789][43669] Updated weights for policy 1, policy_version 1930 (0.0010)
764
+ [2023-09-14 14:09:54,329][43239] Saving new best policy, reward=6.657!
765
+ [2023-09-14 14:09:56,505][43668] Updated weights for policy 0, policy_version 2360 (0.0011)
766
+ [2023-09-14 14:09:57,622][43669] Updated weights for policy 1, policy_version 1940 (0.0010)
767
+ [2023-09-14 14:09:59,324][43239] Saving new best policy, reward=6.858!
768
+ [2023-09-14 14:10:03,057][43668] Updated weights for policy 0, policy_version 2370 (0.0011)
769
+ [2023-09-14 14:10:03,297][43669] Updated weights for policy 1, policy_version 1950 (0.0011)
770
+ [2023-09-14 14:10:04,350][43239] Saving new best policy, reward=6.891!
771
+ [2023-09-14 14:10:09,112][43668] Updated weights for policy 0, policy_version 2380 (0.0010)
772
+ [2023-09-14 14:10:09,130][43669] Updated weights for policy 1, policy_version 1960 (0.0010)
773
+ [2023-09-14 14:10:14,832][43669] Updated weights for policy 1, policy_version 1970 (0.0010)
774
+ [2023-09-14 14:10:15,334][43668] Updated weights for policy 0, policy_version 2390 (0.0010)
775
+ [2023-09-14 14:10:20,940][43668] Updated weights for policy 0, policy_version 2400 (0.0010)
776
+ [2023-09-14 14:10:21,091][43669] Updated weights for policy 1, policy_version 1980 (0.0012)
777
+ [2023-09-14 14:10:24,329][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000002405_9850880.pth...
778
+ [2023-09-14 14:10:24,329][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001985_8130560.pth...
779
+ [2023-09-14 14:10:24,392][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000001939_7942144.pth
780
+ [2023-09-14 14:10:24,399][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001637_6705152.pth
781
+ [2023-09-14 14:10:26,867][43669] Updated weights for policy 1, policy_version 1990 (0.0010)
782
+ [2023-09-14 14:10:26,962][43668] Updated weights for policy 0, policy_version 2410 (0.0011)
783
+ [2023-09-14 14:10:32,579][43669] Updated weights for policy 1, policy_version 2000 (0.0011)
784
+ [2023-09-14 14:10:32,938][43668] Updated weights for policy 0, policy_version 2420 (0.0011)
785
+ [2023-09-14 14:10:38,630][43669] Updated weights for policy 1, policy_version 2010 (0.0011)
786
+ [2023-09-14 14:10:38,934][43668] Updated weights for policy 0, policy_version 2430 (0.0011)
787
+ [2023-09-14 14:10:39,324][43415] Saving new best policy, reward=5.563!
788
+ [2023-09-14 14:10:44,330][43415] Saving new best policy, reward=5.930!
789
+ [2023-09-14 14:10:44,479][43669] Updated weights for policy 1, policy_version 2020 (0.0011)
790
+ [2023-09-14 14:10:44,985][43668] Updated weights for policy 0, policy_version 2440 (0.0010)
791
+ [2023-09-14 14:10:49,324][43415] Saving new best policy, reward=6.351!
792
+ [2023-09-14 14:10:50,287][43669] Updated weights for policy 1, policy_version 2030 (0.0009)
793
+ [2023-09-14 14:10:50,879][43668] Updated weights for policy 0, policy_version 2450 (0.0010)
794
+ [2023-09-14 14:10:54,330][43415] Saving new best policy, reward=6.748!
795
+ [2023-09-14 14:10:55,818][43669] Updated weights for policy 1, policy_version 2040 (0.0010)
796
+ [2023-09-14 14:10:57,371][43668] Updated weights for policy 0, policy_version 2460 (0.0010)
797
+ [2023-09-14 14:11:01,204][43669] Updated weights for policy 1, policy_version 2050 (0.0011)
798
+ [2023-09-14 14:11:04,281][43668] Updated weights for policy 0, policy_version 2470 (0.0011)
799
+ [2023-09-14 14:11:07,091][43669] Updated weights for policy 1, policy_version 2060 (0.0010)
800
+ [2023-09-14 14:11:09,559][43668] Updated weights for policy 0, policy_version 2480 (0.0010)
801
+ [2023-09-14 14:11:13,798][43669] Updated weights for policy 1, policy_version 2070 (0.0010)
802
+ [2023-09-14 14:11:14,800][43668] Updated weights for policy 0, policy_version 2490 (0.0011)
803
+ [2023-09-14 14:11:19,886][43668] Updated weights for policy 0, policy_version 2500 (0.0011)
804
+ [2023-09-14 14:11:20,074][43669] Updated weights for policy 1, policy_version 2080 (0.0011)
805
+ [2023-09-14 14:11:24,573][43668] Updated weights for policy 0, policy_version 2510 (0.0010)
806
+ [2023-09-14 14:11:27,680][43669] Updated weights for policy 1, policy_version 2090 (0.0011)
807
+ [2023-09-14 14:11:29,196][43668] Updated weights for policy 0, policy_version 2520 (0.0009)
808
+ [2023-09-14 14:11:33,781][43668] Updated weights for policy 0, policy_version 2530 (0.0010)
809
+ [2023-09-14 14:11:35,887][43669] Updated weights for policy 1, policy_version 2100 (0.0010)
810
+ [2023-09-14 14:11:38,616][43668] Updated weights for policy 0, policy_version 2540 (0.0012)
811
+ [2023-09-14 14:11:43,631][43668] Updated weights for policy 0, policy_version 2550 (0.0011)
812
+ [2023-09-14 14:11:43,689][43669] Updated weights for policy 1, policy_version 2110 (0.0010)
813
+ [2023-09-14 14:11:48,954][43668] Updated weights for policy 0, policy_version 2560 (0.0010)
814
+ [2023-09-14 14:11:50,154][43669] Updated weights for policy 1, policy_version 2120 (0.0011)
815
+ [2023-09-14 14:11:54,674][43668] Updated weights for policy 0, policy_version 2570 (0.0010)
816
+ [2023-09-14 14:11:55,888][43669] Updated weights for policy 1, policy_version 2130 (0.0010)
817
+ [2023-09-14 14:12:00,328][43668] Updated weights for policy 0, policy_version 2580 (0.0011)
818
+ [2023-09-14 14:12:01,827][43669] Updated weights for policy 1, policy_version 2140 (0.0010)
819
+ [2023-09-14 14:12:06,206][43668] Updated weights for policy 0, policy_version 2590 (0.0011)
820
+ [2023-09-14 14:12:07,757][43669] Updated weights for policy 1, policy_version 2150 (0.0010)
821
+ [2023-09-14 14:12:12,732][43668] Updated weights for policy 0, policy_version 2600 (0.0011)
822
+ [2023-09-14 14:12:13,422][43669] Updated weights for policy 1, policy_version 2160 (0.0010)
823
+ [2023-09-14 14:12:19,072][43669] Updated weights for policy 1, policy_version 2170 (0.0010)
824
+ [2023-09-14 14:12:19,090][43668] Updated weights for policy 0, policy_version 2610 (0.0010)
825
+ [2023-09-14 14:12:24,330][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000002618_10723328.pth...
826
+ [2023-09-14 14:12:24,330][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000002179_8925184.pth...
827
+ [2023-09-14 14:12:24,392][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001796_7356416.pth
828
+ [2023-09-14 14:12:24,392][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000002190_8970240.pth
829
+ [2023-09-14 14:12:24,399][43415] Saving new best policy, reward=6.862!
830
+ [2023-09-14 14:12:24,918][43669] Updated weights for policy 1, policy_version 2180 (0.0010)
831
+ [2023-09-14 14:12:25,140][43668] Updated weights for policy 0, policy_version 2620 (0.0010)
832
+ [2023-09-14 14:12:29,324][43415] Saving new best policy, reward=7.037!
833
+ [2023-09-14 14:12:30,769][43669] Updated weights for policy 1, policy_version 2190 (0.0011)
834
+ [2023-09-14 14:12:31,354][43668] Updated weights for policy 0, policy_version 2630 (0.0011)
835
+ [2023-09-14 14:12:34,330][43415] Saving new best policy, reward=7.231!
836
+ [2023-09-14 14:12:36,493][43669] Updated weights for policy 1, policy_version 2200 (0.0011)
837
+ [2023-09-14 14:12:37,565][43668] Updated weights for policy 0, policy_version 2640 (0.0010)
838
+ [2023-09-14 14:12:39,328][43415] Saving new best policy, reward=7.554!
839
+ [2023-09-14 14:12:42,128][43669] Updated weights for policy 1, policy_version 2210 (0.0010)
840
+ [2023-09-14 14:12:44,185][43668] Updated weights for policy 0, policy_version 2650 (0.0010)
841
+ [2023-09-14 14:12:47,484][43669] Updated weights for policy 1, policy_version 2220 (0.0011)
842
+ [2023-09-14 14:12:50,742][43668] Updated weights for policy 0, policy_version 2660 (0.0010)
843
+ [2023-09-14 14:12:53,386][43669] Updated weights for policy 1, policy_version 2230 (0.0010)
844
+ [2023-09-14 14:12:56,353][43668] Updated weights for policy 0, policy_version 2670 (0.0010)
845
+ [2023-09-14 14:12:59,589][43669] Updated weights for policy 1, policy_version 2240 (0.0010)
846
+ [2023-09-14 14:13:02,150][43668] Updated weights for policy 0, policy_version 2680 (0.0010)
847
+ [2023-09-14 14:13:05,423][43669] Updated weights for policy 1, policy_version 2250 (0.0010)
848
+ [2023-09-14 14:13:08,020][43668] Updated weights for policy 0, policy_version 2690 (0.0010)
849
+ [2023-09-14 14:13:10,908][43669] Updated weights for policy 1, policy_version 2260 (0.0011)
850
+ [2023-09-14 14:13:13,846][43668] Updated weights for policy 0, policy_version 2700 (0.0011)
851
+ [2023-09-14 14:13:16,425][43669] Updated weights for policy 1, policy_version 2270 (0.0011)
852
+ [2023-09-14 14:13:20,104][43668] Updated weights for policy 0, policy_version 2710 (0.0011)
853
+ [2023-09-14 14:13:21,670][43669] Updated weights for policy 1, policy_version 2280 (0.0010)
854
+ [2023-09-14 14:13:25,940][43668] Updated weights for policy 0, policy_version 2720 (0.0011)
855
+ [2023-09-14 14:13:27,244][43669] Updated weights for policy 1, policy_version 2290 (0.0011)
856
+ [2023-09-14 14:13:31,252][43668] Updated weights for policy 0, policy_version 2730 (0.0010)
857
+ [2023-09-14 14:13:33,521][43669] Updated weights for policy 1, policy_version 2300 (0.0010)
858
+ [2023-09-14 14:13:34,330][43415] Saving new best policy, reward=7.608!
859
+ [2023-09-14 14:13:36,369][43668] Updated weights for policy 0, policy_version 2740 (0.0011)
860
+ [2023-09-14 14:13:40,088][43669] Updated weights for policy 1, policy_version 2310 (0.0010)
861
+ [2023-09-14 14:13:41,556][43668] Updated weights for policy 0, policy_version 2750 (0.0010)
862
+ [2023-09-14 14:13:44,329][43415] Saving new best policy, reward=7.796!
863
+ [2023-09-14 14:13:46,341][43669] Updated weights for policy 1, policy_version 2320 (0.0010)
864
+ [2023-09-14 14:13:46,972][43668] Updated weights for policy 0, policy_version 2760 (0.0010)
865
+ [2023-09-14 14:13:49,324][43415] Saving new best policy, reward=7.836!
866
+ [2023-09-14 14:13:51,948][43668] Updated weights for policy 0, policy_version 2770 (0.0010)
867
+ [2023-09-14 14:13:53,199][43669] Updated weights for policy 1, policy_version 2330 (0.0010)
868
+ [2023-09-14 14:13:54,329][43415] Saving new best policy, reward=7.995!
869
+ [2023-09-14 14:13:57,247][43668] Updated weights for policy 0, policy_version 2780 (0.0011)
870
+ [2023-09-14 14:13:59,324][43415] Saving new best policy, reward=8.055!
871
+ [2023-09-14 14:13:59,620][43669] Updated weights for policy 1, policy_version 2340 (0.0010)
872
+ [2023-09-14 14:14:02,504][43668] Updated weights for policy 0, policy_version 2790 (0.0010)
873
+ [2023-09-14 14:14:04,330][43415] Saving new best policy, reward=8.121!
874
+ [2023-09-14 14:14:06,490][43669] Updated weights for policy 1, policy_version 2350 (0.0010)
875
+ [2023-09-14 14:14:07,319][43668] Updated weights for policy 0, policy_version 2800 (0.0010)
876
+ [2023-09-14 14:14:09,325][43415] Saving new best policy, reward=8.260!
877
+ [2023-09-14 14:14:11,961][43668] Updated weights for policy 0, policy_version 2810 (0.0010)
878
+ [2023-09-14 14:14:14,081][43669] Updated weights for policy 1, policy_version 2360 (0.0011)
879
+ [2023-09-14 14:14:16,729][43668] Updated weights for policy 0, policy_version 2820 (0.0010)
880
+ [2023-09-14 14:14:21,332][43668] Updated weights for policy 0, policy_version 2830 (0.0010)
881
+ [2023-09-14 14:14:22,188][43669] Updated weights for policy 1, policy_version 2370 (0.0012)
882
+ [2023-09-14 14:14:24,330][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000002372_9715712.pth...
883
+ [2023-09-14 14:14:24,330][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000002837_11620352.pth...
884
+ [2023-09-14 14:14:24,390][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000001985_8130560.pth
885
+ [2023-09-14 14:14:24,390][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000002405_9850880.pth
886
+ [2023-09-14 14:14:25,556][43668] Updated weights for policy 0, policy_version 2840 (0.0010)
887
+ [2023-09-14 14:14:29,687][43668] Updated weights for policy 0, policy_version 2850 (0.0010)
888
+ [2023-09-14 14:14:32,262][43669] Updated weights for policy 1, policy_version 2380 (0.0010)
889
+ [2023-09-14 14:14:33,713][43668] Updated weights for policy 0, policy_version 2860 (0.0010)
890
+ [2023-09-14 14:14:37,914][43668] Updated weights for policy 0, policy_version 2870 (0.0010)
891
+ [2023-09-14 14:14:41,966][43668] Updated weights for policy 0, policy_version 2880 (0.0010)
892
+ [2023-09-14 14:14:43,014][43669] Updated weights for policy 1, policy_version 2390 (0.0010)
893
+ [2023-09-14 14:14:44,329][43239] Saving new best policy, reward=6.896!
894
+ [2023-09-14 14:14:45,883][43668] Updated weights for policy 0, policy_version 2890 (0.0011)
895
+ [2023-09-14 14:14:49,919][43668] Updated weights for policy 0, policy_version 2900 (0.0010)
896
+ [2023-09-14 14:14:52,850][43669] Updated weights for policy 1, policy_version 2400 (0.0011)
897
+ [2023-09-14 14:14:54,189][43668] Updated weights for policy 0, policy_version 2910 (0.0010)
898
+ [2023-09-14 14:14:54,328][43239] Saving new best policy, reward=7.266!
899
+ [2023-09-14 14:14:59,384][43239] Saving new best policy, reward=7.369!
900
+ [2023-09-14 14:14:59,385][43668] Updated weights for policy 0, policy_version 2920 (0.0010)
901
+ [2023-09-14 14:14:59,878][43669] Updated weights for policy 1, policy_version 2410 (0.0011)
902
+ [2023-09-14 14:15:04,329][43239] Saving new best policy, reward=7.444!
903
+ [2023-09-14 14:15:04,576][43668] Updated weights for policy 0, policy_version 2930 (0.0011)
904
+ [2023-09-14 14:15:06,151][43669] Updated weights for policy 1, policy_version 2420 (0.0010)
905
+ [2023-09-14 14:15:09,358][43239] Saving new best policy, reward=7.643!
906
+ [2023-09-14 14:15:10,687][43668] Updated weights for policy 0, policy_version 2940 (0.0010)
907
+ [2023-09-14 14:15:11,436][43669] Updated weights for policy 1, policy_version 2430 (0.0011)
908
+ [2023-09-14 14:15:14,329][43239] Saving new best policy, reward=7.752!
909
+ [2023-09-14 14:15:16,324][43669] Updated weights for policy 1, policy_version 2440 (0.0010)
910
+ [2023-09-14 14:15:17,568][43415] Stopping Batcher_1...
911
+ [2023-09-14 14:15:17,568][43239] Stopping Batcher_0...
912
+ [2023-09-14 14:15:17,568][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000002950_12083200.pth...
913
+ [2023-09-14 14:15:17,569][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000002442_10002432.pth...
914
+ [2023-09-14 14:15:17,569][43668] Updated weights for policy 0, policy_version 2950 (0.0009)
915
+ [2023-09-14 14:15:17,569][43239] Loop batcher_evt_loop terminating...
916
+ [2023-09-14 14:15:17,569][43415] Loop batcher_evt_loop terminating...
917
+ [2023-09-14 14:15:17,583][43671] Stopping RolloutWorker_w1...
918
+ [2023-09-14 14:15:17,583][43670] Stopping RolloutWorker_w0...
919
+ [2023-09-14 14:15:17,583][43675] Stopping RolloutWorker_w5...
920
+ [2023-09-14 14:15:17,584][43728] Stopping RolloutWorker_w7...
921
+ [2023-09-14 14:15:17,584][43671] Loop rollout_proc1_evt_loop terminating...
922
+ [2023-09-14 14:15:17,584][43672] Stopping RolloutWorker_w2...
923
+ [2023-09-14 14:15:17,584][43670] Loop rollout_proc0_evt_loop terminating...
924
+ [2023-09-14 14:15:17,584][43675] Loop rollout_proc5_evt_loop terminating...
925
+ [2023-09-14 14:15:17,584][43728] Loop rollout_proc7_evt_loop terminating...
926
+ [2023-09-14 14:15:17,584][43672] Loop rollout_proc2_evt_loop terminating...
927
+ [2023-09-14 14:15:17,587][43669] Weights refcount: 2 0
928
+ [2023-09-14 14:15:17,587][43668] Weights refcount: 2 0
929
+ [2023-09-14 14:15:17,588][43726] Stopping RolloutWorker_w6...
930
+ [2023-09-14 14:15:17,588][43726] Loop rollout_proc6_evt_loop terminating...
931
+ [2023-09-14 14:15:17,588][43673] Stopping RolloutWorker_w3...
932
+ [2023-09-14 14:15:17,589][43673] Loop rollout_proc3_evt_loop terminating...
933
+ [2023-09-14 14:15:17,589][43669] Stopping InferenceWorker_p1-w0...
934
+ [2023-09-14 14:15:17,589][43668] Stopping InferenceWorker_p0-w0...
935
+ [2023-09-14 14:15:17,589][43669] Loop inference_proc1-0_evt_loop terminating...
936
+ [2023-09-14 14:15:17,589][43668] Loop inference_proc0-0_evt_loop terminating...
937
+ [2023-09-14 14:15:17,593][43674] Stopping RolloutWorker_w4...
938
+ [2023-09-14 14:15:17,594][43674] Loop rollout_proc4_evt_loop terminating...
939
+ [2023-09-14 14:15:17,639][43415] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000002179_8925184.pth
940
+ [2023-09-14 14:15:17,643][43239] Removing /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000002618_10723328.pth
941
+ [2023-09-14 14:15:17,650][43415] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p1/checkpoint_000002442_10002432.pth...
942
+ [2023-09-14 14:15:17,653][43239] Saving new best policy, reward=7.781!
943
+ [2023-09-14 14:15:17,741][43239] Saving /home/cogstack/Documents/optuna/environments/sample_factory/train_dir/default_experiment/checkpoint_p0/checkpoint_000002950_12083200.pth...
944
+ [2023-09-14 14:15:17,745][43415] Stopping LearnerWorker_p1...
945
+ [2023-09-14 14:15:17,746][43415] Loop learner_proc1_evt_loop terminating...
946
+ [2023-09-14 14:15:17,837][43239] Stopping LearnerWorker_p0...
947
+ [2023-09-14 14:15:17,837][43239] Loop learner_proc0_evt_loop terminating...