PranayPalem commited on
Commit
8051b24
·
verified ·
1 Parent(s): f07af4c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
.summary/0/events.out.tfevents.1749692327.pranayROG ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97d83b5ab5d20210cade3a6852c1191b898268672162e5a8e133da7ae92cf8a2
3
+ size 72185
.summary/0/events.out.tfevents.1749695929.pranayROG ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfce874744192000d3d5f4873171e7870a402347bb657d22e18aa0aadb7862a6
3
+ size 74189
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sample-factory
3
+ tags:
4
+ - deep-reinforcement-learning
5
+ - reinforcement-learning
6
+ - sample-factory
7
+ model-index:
8
+ - name: APPO
9
+ results:
10
+ - task:
11
+ type: reinforcement-learning
12
+ name: reinforcement-learning
13
+ dataset:
14
+ name: doom_health_gathering_supreme
15
+ type: doom_health_gathering_supreme
16
+ metrics:
17
+ - type: mean_reward
18
+ value: 12.28 +/- 5.66
19
+ name: mean_reward
20
+ verified: false
21
+ ---
22
+
23
+ A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
24
+
25
+ This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
26
+ Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
27
+
28
+
29
+ ## Downloading the model
30
+
31
+ After installing Sample-Factory, download the model with:
32
+ ```
33
+ python -m sample_factory.huggingface.load_from_hub -r PranayPalem/vizdoom_laptop_optimized
34
+ ```
35
+
36
+
37
+ ## Using the model
38
+
39
+ To run the model after download, use the `enjoy` script corresponding to this environment:
40
+ ```
41
+ python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=vizdoom_laptop_optimized
42
+ ```
43
+
44
+
45
+ You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
46
+ See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
47
+
48
+ ## Training with this model
49
+
50
+ To continue training with this model, use the `train` script corresponding to this environment:
51
+ ```
52
+ python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=vizdoom_laptop_optimized --restart_behavior=resume --train_for_env_steps=10000000000
53
+ ```
54
+
55
+ Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
56
+
checkpoint_p0/best_000000976_3997696_reward_24.448.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:231d20bd5c40b6b84cdee174b106b5cab497d0006d86773aea6ea607f496362d
3
+ size 34929243
checkpoint_p0/checkpoint_000000490_2007040.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8166503ee1b569cc58545aa39726b73f28c263b6ee2ee902efea68cc5f05182
3
+ size 34929477
checkpoint_p0/checkpoint_000000978_4005888.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d8a98dc2c3586ab4fbf2be9289829e2b0793afd05b3c0221d44242e0e2473a6
3
+ size 34929669
config.json ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "doom_health_gathering_supreme",
5
+ "experiment": "vizdoom_laptop_optimized",
6
+ "train_dir": "/home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": 3333,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.001,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "symmetric_kl",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 0.2,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "lr_adaptive_min": 1e-06,
50
+ "lr_adaptive_max": 0.01,
51
+ "obs_subtract_mean": 0.0,
52
+ "obs_scale": 255.0,
53
+ "normalize_input": true,
54
+ "normalize_input_keys": null,
55
+ "decorrelate_experience_max_seconds": 0,
56
+ "decorrelate_envs_on_one_worker": true,
57
+ "actor_worker_gpus": [],
58
+ "set_workers_cpu_affinity": true,
59
+ "force_envs_single_thread": false,
60
+ "default_niceness": 0,
61
+ "log_to_file": true,
62
+ "experiment_summaries_interval": 10,
63
+ "flush_summaries_interval": 30,
64
+ "stats_avg": 100,
65
+ "summaries_use_frameskip": true,
66
+ "heartbeat_interval": 20,
67
+ "heartbeat_reporting_interval": 600,
68
+ "train_for_env_steps": 4000000,
69
+ "train_for_seconds": 10000000000,
70
+ "save_every_sec": 120,
71
+ "keep_checkpoints": 2,
72
+ "load_checkpoint_kind": "latest",
73
+ "save_milestones_sec": -1,
74
+ "save_best_every_sec": 5,
75
+ "save_best_metric": "reward",
76
+ "save_best_after": 100000,
77
+ "benchmark": false,
78
+ "encoder_mlp_layers": [
79
+ 512,
80
+ 512
81
+ ],
82
+ "encoder_conv_architecture": "convnet_simple",
83
+ "encoder_conv_mlp_layers": [
84
+ 512
85
+ ],
86
+ "use_rnn": true,
87
+ "rnn_size": 512,
88
+ "rnn_type": "gru",
89
+ "rnn_num_layers": 1,
90
+ "decoder_mlp_layers": [],
91
+ "nonlinearity": "elu",
92
+ "policy_initialization": "orthogonal",
93
+ "policy_init_gain": 1.0,
94
+ "actor_critic_share_weights": true,
95
+ "adaptive_stddev": true,
96
+ "continuous_tanh_scale": 0.0,
97
+ "initial_stddev": 1.0,
98
+ "use_env_info_cache": false,
99
+ "env_gpu_actions": false,
100
+ "env_gpu_observations": true,
101
+ "env_frameskip": 4,
102
+ "env_framestack": 1,
103
+ "pixel_format": "CHW",
104
+ "use_record_episode_statistics": false,
105
+ "with_wandb": false,
106
+ "wandb_user": null,
107
+ "wandb_project": "sample_factory",
108
+ "wandb_group": null,
109
+ "wandb_job_type": "SF",
110
+ "wandb_tags": [],
111
+ "with_pbt": false,
112
+ "pbt_mix_policies_in_one_env": true,
113
+ "pbt_period_env_steps": 5000000,
114
+ "pbt_start_mutation": 20000000,
115
+ "pbt_replace_fraction": 0.3,
116
+ "pbt_mutation_rate": 0.15,
117
+ "pbt_replace_reward_gap": 0.1,
118
+ "pbt_replace_reward_gap_absolute": 1e-06,
119
+ "pbt_optimize_gamma": false,
120
+ "pbt_target_objective": "true_objective",
121
+ "pbt_perturb_min": 1.1,
122
+ "pbt_perturb_max": 1.5,
123
+ "num_agents": -1,
124
+ "num_humans": 0,
125
+ "num_bots": -1,
126
+ "start_bot_difficulty": null,
127
+ "timelimit": null,
128
+ "res_w": 128,
129
+ "res_h": 72,
130
+ "wide_aspect_ratio": false,
131
+ "eval_env_frameskip": 1,
132
+ "fps": 35,
133
+ "command_line": "--env=doom_health_gathering_supreme --experiment=vizdoom_laptop_optimized --algo=APPO --device=gpu --use_rnn=True --seed=3333 --num_workers=8 --num_envs_per_worker=4 --batch_size=1024 --rollout=32 --recurrence=32 --num_epochs=1 --learning_rate=0.0001 --train_for_env_steps=2000000 --reward_clip=1000.0",
134
+ "cli_args": {
135
+ "algo": "APPO",
136
+ "env": "doom_health_gathering_supreme",
137
+ "experiment": "vizdoom_laptop_optimized",
138
+ "device": "gpu",
139
+ "seed": 3333,
140
+ "num_workers": 8,
141
+ "num_envs_per_worker": 4,
142
+ "batch_size": 1024,
143
+ "num_epochs": 1,
144
+ "rollout": 32,
145
+ "recurrence": 32,
146
+ "reward_clip": 1000.0,
147
+ "learning_rate": 0.0001,
148
+ "train_for_env_steps": 2000000,
149
+ "use_rnn": true
150
+ },
151
+ "git_hash": "unknown",
152
+ "git_repo_name": "not a git repository"
153
+ }
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5618afae64f674d6049e6a1236c81f5670e8327dabd1e3dea7c6be1645d0b816
3
+ size 23458751
sf_log.txt ADDED
@@ -0,0 +1,961 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-06-11 18:38:50,934][196496] Saving configuration to /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/config.json...
2
+ [2025-06-11 18:38:50,935][196496] Rollout worker 0 uses device cpu
3
+ [2025-06-11 18:38:50,935][196496] Rollout worker 1 uses device cpu
4
+ [2025-06-11 18:38:50,935][196496] Rollout worker 2 uses device cpu
5
+ [2025-06-11 18:38:50,935][196496] Rollout worker 3 uses device cpu
6
+ [2025-06-11 18:38:50,935][196496] Rollout worker 4 uses device cpu
7
+ [2025-06-11 18:38:50,935][196496] Rollout worker 5 uses device cpu
8
+ [2025-06-11 18:38:50,935][196496] Rollout worker 6 uses device cpu
9
+ [2025-06-11 18:38:50,935][196496] Rollout worker 7 uses device cpu
10
+ [2025-06-11 18:38:51,017][196496] Using GPUs [0] for process 0 (actually maps to GPUs [0])
11
+ [2025-06-11 18:38:51,017][196496] InferenceWorker_p0-w0: min num requests: 2
12
+ [2025-06-11 18:38:51,041][196496] Starting all processes...
13
+ [2025-06-11 18:38:51,041][196496] Starting process learner_proc0
14
+ [2025-06-11 18:38:52,017][196496] Starting all processes...
15
+ [2025-06-11 18:38:52,019][196677] Using GPUs [0] for process 0 (actually maps to GPUs [0])
16
+ [2025-06-11 18:38:52,019][196677] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
17
+ [2025-06-11 18:38:52,021][196496] Starting process inference_proc0-0
18
+ [2025-06-11 18:38:52,022][196496] Starting process rollout_proc0
19
+ [2025-06-11 18:38:52,022][196496] Starting process rollout_proc1
20
+ [2025-06-11 18:38:52,025][196496] Starting process rollout_proc2
21
+ [2025-06-11 18:38:52,032][196677] Num visible devices: 1
22
+ [2025-06-11 18:38:52,037][196677] Setting fixed seed 3333
23
+ [2025-06-11 18:38:52,040][196677] Using GPUs [0] for process 0 (actually maps to GPUs [0])
24
+ [2025-06-11 18:38:52,040][196677] Initializing actor-critic model on device cuda:0
25
+ [2025-06-11 18:38:52,040][196677] RunningMeanStd input shape: (3, 72, 128)
26
+ [2025-06-11 18:38:52,041][196677] RunningMeanStd input shape: (1,)
27
+ [2025-06-11 18:38:52,039][196496] Starting process rollout_proc3
28
+ [2025-06-11 18:38:52,053][196496] Starting process rollout_proc4
29
+ [2025-06-11 18:38:52,053][196496] Starting process rollout_proc5
30
+ [2025-06-11 18:38:52,054][196496] Starting process rollout_proc6
31
+ [2025-06-11 18:38:52,057][196496] Starting process rollout_proc7
32
+ [2025-06-11 18:38:52,113][196677] ConvEncoder: input_channels=3
33
+ [2025-06-11 18:38:52,180][196677] Conv encoder output size: 512
34
+ [2025-06-11 18:38:52,180][196677] Policy head output size: 512
35
+ [2025-06-11 18:38:52,188][196677] Created Actor Critic model with architecture:
36
+ [2025-06-11 18:38:52,189][196677] ActorCriticSharedWeights(
37
+ (obs_normalizer): ObservationNormalizer(
38
+ (running_mean_std): RunningMeanStdDictInPlace(
39
+ (running_mean_std): ModuleDict(
40
+ (obs): RunningMeanStdInPlace()
41
+ )
42
+ )
43
+ )
44
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
45
+ (encoder): VizdoomEncoder(
46
+ (basic_encoder): ConvEncoder(
47
+ (enc): RecursiveScriptModule(
48
+ original_name=ConvEncoderImpl
49
+ (conv_head): RecursiveScriptModule(
50
+ original_name=Sequential
51
+ (0): RecursiveScriptModule(original_name=Conv2d)
52
+ (1): RecursiveScriptModule(original_name=ELU)
53
+ (2): RecursiveScriptModule(original_name=Conv2d)
54
+ (3): RecursiveScriptModule(original_name=ELU)
55
+ (4): RecursiveScriptModule(original_name=Conv2d)
56
+ (5): RecursiveScriptModule(original_name=ELU)
57
+ )
58
+ (mlp_layers): RecursiveScriptModule(
59
+ original_name=Sequential
60
+ (0): RecursiveScriptModule(original_name=Linear)
61
+ (1): RecursiveScriptModule(original_name=ELU)
62
+ )
63
+ )
64
+ )
65
+ )
66
+ (core): ModelCoreRNN(
67
+ (core): GRU(512, 512)
68
+ )
69
+ (decoder): MlpDecoder(
70
+ (mlp): Identity()
71
+ )
72
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
73
+ (action_parameterization): ActionParameterizationDefault(
74
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
75
+ )
76
+ )
77
+ [2025-06-11 18:38:52,405][196677] Using optimizer <class 'torch.optim.adam.Adam'>
78
+ [2025-06-11 18:38:53,154][196677] No checkpoints found
79
+ [2025-06-11 18:38:53,154][196677] Did not load from checkpoint, starting from scratch!
80
+ [2025-06-11 18:38:53,155][196677] Initialized policy 0 weights for model version 0
81
+ [2025-06-11 18:38:53,158][196677] LearnerWorker_p0 finished initialization!
82
+ [2025-06-11 18:38:53,158][196677] Using GPUs [0] for process 0 (actually maps to GPUs [0])
83
+ [2025-06-11 18:38:53,393][196727] Worker 4 uses CPU cores [8, 9]
84
+ [2025-06-11 18:38:53,415][196724] Worker 0 uses CPU cores [0, 1]
85
+ [2025-06-11 18:38:53,440][196726] Worker 3 uses CPU cores [6, 7]
86
+ [2025-06-11 18:38:53,467][196721] Worker 1 uses CPU cores [2, 3]
87
+ [2025-06-11 18:38:53,477][196728] Worker 6 uses CPU cores [12, 13]
88
+ [2025-06-11 18:38:53,504][196725] Using GPUs [0] for process 0 (actually maps to GPUs [0])
89
+ [2025-06-11 18:38:53,504][196725] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
90
+ [2025-06-11 18:38:53,516][196725] Num visible devices: 1
91
+ [2025-06-11 18:38:53,585][196733] Worker 7 uses CPU cores [14, 15]
92
+ [2025-06-11 18:38:53,631][196725] RunningMeanStd input shape: (3, 72, 128)
93
+ [2025-06-11 18:38:53,632][196725] RunningMeanStd input shape: (1,)
94
+ [2025-06-11 18:38:53,638][196496] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
95
+ [2025-06-11 18:38:53,638][196731] Worker 5 uses CPU cores [10, 11]
96
+ [2025-06-11 18:38:53,638][196723] Worker 2 uses CPU cores [4, 5]
97
+ [2025-06-11 18:38:53,680][196725] ConvEncoder: input_channels=3
98
+ [2025-06-11 18:38:53,735][196725] Conv encoder output size: 512
99
+ [2025-06-11 18:38:53,736][196725] Policy head output size: 512
100
+ [2025-06-11 18:38:53,763][196496] Inference worker 0-0 is ready!
101
+ [2025-06-11 18:38:53,763][196496] All inference workers are ready! Signal rollout workers to start!
102
+ [2025-06-11 18:38:53,789][196723] Doom resolution: 160x120, resize resolution: (128, 72)
103
+ [2025-06-11 18:38:53,790][196731] Doom resolution: 160x120, resize resolution: (128, 72)
104
+ [2025-06-11 18:38:53,790][196721] Doom resolution: 160x120, resize resolution: (128, 72)
105
+ [2025-06-11 18:38:53,790][196724] Doom resolution: 160x120, resize resolution: (128, 72)
106
+ [2025-06-11 18:38:53,790][196728] Doom resolution: 160x120, resize resolution: (128, 72)
107
+ [2025-06-11 18:38:53,790][196733] Doom resolution: 160x120, resize resolution: (128, 72)
108
+ [2025-06-11 18:38:53,790][196727] Doom resolution: 160x120, resize resolution: (128, 72)
109
+ [2025-06-11 18:38:53,790][196726] Doom resolution: 160x120, resize resolution: (128, 72)
110
+ [2025-06-11 18:38:53,959][196733] Decorrelating experience for 0 frames...
111
+ [2025-06-11 18:38:53,960][196724] Decorrelating experience for 0 frames...
112
+ [2025-06-11 18:38:53,960][196721] Decorrelating experience for 0 frames...
113
+ [2025-06-11 18:38:54,027][196723] Decorrelating experience for 0 frames...
114
+ [2025-06-11 18:38:54,027][196728] Decorrelating experience for 0 frames...
115
+ [2025-06-11 18:38:54,112][196733] Decorrelating experience for 32 frames...
116
+ [2025-06-11 18:38:54,152][196721] Decorrelating experience for 32 frames...
117
+ [2025-06-11 18:38:54,155][196731] Decorrelating experience for 0 frames...
118
+ [2025-06-11 18:38:54,250][196728] Decorrelating experience for 32 frames...
119
+ [2025-06-11 18:38:54,250][196723] Decorrelating experience for 32 frames...
120
+ [2025-06-11 18:38:54,313][196733] Decorrelating experience for 64 frames...
121
+ [2025-06-11 18:38:54,322][196731] Decorrelating experience for 32 frames...
122
+ [2025-06-11 18:38:54,328][196727] Decorrelating experience for 0 frames...
123
+ [2025-06-11 18:38:54,427][196724] Decorrelating experience for 32 frames...
124
+ [2025-06-11 18:38:54,476][196721] Decorrelating experience for 64 frames...
125
+ [2025-06-11 18:38:54,508][196728] Decorrelating experience for 64 frames...
126
+ [2025-06-11 18:38:54,510][196723] Decorrelating experience for 64 frames...
127
+ [2025-06-11 18:38:54,525][196733] Decorrelating experience for 96 frames...
128
+ [2025-06-11 18:38:54,565][196726] Decorrelating experience for 0 frames...
129
+ [2025-06-11 18:38:54,592][196727] Decorrelating experience for 32 frames...
130
+ [2025-06-11 18:38:54,689][196721] Decorrelating experience for 96 frames...
131
+ [2025-06-11 18:38:54,721][196728] Decorrelating experience for 96 frames...
132
+ [2025-06-11 18:38:54,721][196723] Decorrelating experience for 96 frames...
133
+ [2025-06-11 18:38:54,731][196726] Decorrelating experience for 32 frames...
134
+ [2025-06-11 18:38:54,735][196731] Decorrelating experience for 64 frames...
135
+ [2025-06-11 18:38:54,796][196724] Decorrelating experience for 64 frames...
136
+ [2025-06-11 18:38:54,804][196727] Decorrelating experience for 64 frames...
137
+ [2025-06-11 18:38:54,911][196731] Decorrelating experience for 96 frames...
138
+ [2025-06-11 18:38:54,974][196724] Decorrelating experience for 96 frames...
139
+ [2025-06-11 18:38:54,984][196727] Decorrelating experience for 96 frames...
140
+ [2025-06-11 18:38:55,099][196726] Decorrelating experience for 64 frames...
141
+ [2025-06-11 18:38:55,300][196726] Decorrelating experience for 96 frames...
142
+ [2025-06-11 18:38:55,325][196677] Signal inference workers to stop experience collection...
143
+ [2025-06-11 18:38:55,329][196725] InferenceWorker_p0-w0: stopping experience collection
144
+ [2025-06-11 18:38:56,167][196677] Signal inference workers to resume experience collection...
145
+ [2025-06-11 18:38:56,168][196725] InferenceWorker_p0-w0: resuming experience collection
146
+ [2025-06-11 18:38:57,236][196496] Fps is (10 sec: 9108.3, 60 sec: 9108.3, 300 sec: 9108.3). Total num frames: 32768. Throughput: 0: 1618.9. Samples: 5824. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
147
+ [2025-06-11 18:38:57,236][196496] Avg episode reward: [(0, '3.884')]
148
+ [2025-06-11 18:38:57,399][196725] Updated weights for policy 0, policy_version 10 (0.0097)
149
+ [2025-06-11 18:38:58,740][196725] Updated weights for policy 0, policy_version 20 (0.0007)
150
+ [2025-06-11 18:39:00,062][196725] Updated weights for policy 0, policy_version 30 (0.0006)
151
+ [2025-06-11 18:39:01,409][196725] Updated weights for policy 0, policy_version 40 (0.0006)
152
+ [2025-06-11 18:39:02,236][196496] Fps is (10 sec: 21915.1, 60 sec: 21915.1, 300 sec: 21915.1). Total num frames: 188416. Throughput: 0: 3386.1. Samples: 29112. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
153
+ [2025-06-11 18:39:02,236][196496] Avg episode reward: [(0, '4.335')]
154
+ [2025-06-11 18:39:02,236][196677] Saving new best policy, reward=4.335!
155
+ [2025-06-11 18:39:02,801][196725] Updated weights for policy 0, policy_version 50 (0.0006)
156
+ [2025-06-11 18:39:04,171][196725] Updated weights for policy 0, policy_version 60 (0.0006)
157
+ [2025-06-11 18:39:05,523][196725] Updated weights for policy 0, policy_version 70 (0.0006)
158
+ [2025-06-11 18:39:07,005][196725] Updated weights for policy 0, policy_version 80 (0.0008)
159
+ [2025-06-11 18:39:07,236][196496] Fps is (10 sec: 29900.9, 60 sec: 24399.7, 300 sec: 24399.7). Total num frames: 331776. Throughput: 0: 5459.8. Samples: 74240. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
160
+ [2025-06-11 18:39:07,236][196496] Avg episode reward: [(0, '4.753')]
161
+ [2025-06-11 18:39:07,238][196677] Saving new best policy, reward=4.753!
162
+ [2025-06-11 18:39:08,366][196725] Updated weights for policy 0, policy_version 90 (0.0006)
163
+ [2025-06-11 18:39:09,756][196725] Updated weights for policy 0, policy_version 100 (0.0008)
164
+ [2025-06-11 18:39:10,965][196496] Heartbeat connected on Batcher_0
165
+ [2025-06-11 18:39:11,019][196496] Heartbeat connected on LearnerWorker_p0
166
+ [2025-06-11 18:39:11,021][196496] Heartbeat connected on RolloutWorker_w0
167
+ [2025-06-11 18:39:11,021][196496] Heartbeat connected on InferenceWorker_p0-w0
168
+ [2025-06-11 18:39:11,023][196496] Heartbeat connected on RolloutWorker_w1
169
+ [2025-06-11 18:39:11,026][196496] Heartbeat connected on RolloutWorker_w2
170
+ [2025-06-11 18:39:11,027][196496] Heartbeat connected on RolloutWorker_w3
171
+ [2025-06-11 18:39:11,033][196496] Heartbeat connected on RolloutWorker_w4
172
+ [2025-06-11 18:39:11,036][196496] Heartbeat connected on RolloutWorker_w5
173
+ [2025-06-11 18:39:11,038][196496] Heartbeat connected on RolloutWorker_w6
174
+ [2025-06-11 18:39:11,041][196496] Heartbeat connected on RolloutWorker_w7
175
+ [2025-06-11 18:39:11,157][196725] Updated weights for policy 0, policy_version 110 (0.0006)
176
+ [2025-06-11 18:39:12,236][196496] Fps is (10 sec: 29081.5, 60 sec: 25768.5, 300 sec: 25768.5). Total num frames: 479232. Throughput: 0: 6334.7. Samples: 117810. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
177
+ [2025-06-11 18:39:12,236][196496] Avg episode reward: [(0, '4.721')]
178
+ [2025-06-11 18:39:12,547][196725] Updated weights for policy 0, policy_version 120 (0.0007)
179
+ [2025-06-11 18:39:13,920][196725] Updated weights for policy 0, policy_version 130 (0.0007)
180
+ [2025-06-11 18:39:15,286][196725] Updated weights for policy 0, policy_version 140 (0.0006)
181
+ [2025-06-11 18:39:16,655][196725] Updated weights for policy 0, policy_version 150 (0.0006)
182
+ [2025-06-11 18:39:17,236][196496] Fps is (10 sec: 29900.6, 60 sec: 26730.8, 300 sec: 26730.8). Total num frames: 630784. Throughput: 0: 5940.3. Samples: 140178. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
183
+ [2025-06-11 18:39:17,236][196496] Avg episode reward: [(0, '4.775')]
184
+ [2025-06-11 18:39:17,238][196677] Saving new best policy, reward=4.775!
185
+ [2025-06-11 18:39:18,033][196725] Updated weights for policy 0, policy_version 160 (0.0006)
186
+ [2025-06-11 18:39:19,403][196725] Updated weights for policy 0, policy_version 170 (0.0006)
187
+ [2025-06-11 18:39:20,760][196725] Updated weights for policy 0, policy_version 180 (0.0006)
188
+ [2025-06-11 18:39:22,118][196725] Updated weights for policy 0, policy_version 190 (0.0006)
189
+ [2025-06-11 18:39:22,236][196496] Fps is (10 sec: 29900.7, 60 sec: 27213.5, 300 sec: 27213.5). Total num frames: 778240. Throughput: 0: 6471.8. Samples: 185078. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
190
+ [2025-06-11 18:39:22,236][196496] Avg episode reward: [(0, '5.121')]
191
+ [2025-06-11 18:39:22,256][196677] Saving new best policy, reward=5.121!
192
+ [2025-06-11 18:39:23,480][196725] Updated weights for policy 0, policy_version 200 (0.0006)
193
+ [2025-06-11 18:39:24,852][196725] Updated weights for policy 0, policy_version 210 (0.0006)
194
+ [2025-06-11 18:39:26,236][196725] Updated weights for policy 0, policy_version 220 (0.0006)
195
+ [2025-06-11 18:39:27,236][196496] Fps is (10 sec: 29900.9, 60 sec: 27674.3, 300 sec: 27674.3). Total num frames: 929792. Throughput: 0: 6844.8. Samples: 229968. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
196
+ [2025-06-11 18:39:27,236][196496] Avg episode reward: [(0, '5.324')]
197
+ [2025-06-11 18:39:27,238][196677] Saving new best policy, reward=5.324!
198
+ [2025-06-11 18:39:27,612][196725] Updated weights for policy 0, policy_version 230 (0.0006)
199
+ [2025-06-11 18:39:28,991][196725] Updated weights for policy 0, policy_version 240 (0.0006)
200
+ [2025-06-11 18:39:30,359][196725] Updated weights for policy 0, policy_version 250 (0.0006)
201
+ [2025-06-11 18:39:31,749][196725] Updated weights for policy 0, policy_version 260 (0.0006)
202
+ [2025-06-11 18:39:32,236][196496] Fps is (10 sec: 29900.9, 60 sec: 27909.7, 300 sec: 27909.7). Total num frames: 1077248. Throughput: 0: 6537.2. Samples: 252320. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
203
+ [2025-06-11 18:39:32,236][196496] Avg episode reward: [(0, '6.289')]
204
+ [2025-06-11 18:39:32,236][196677] Saving new best policy, reward=6.289!
205
+ [2025-06-11 18:39:33,146][196725] Updated weights for policy 0, policy_version 270 (0.0006)
206
+ [2025-06-11 18:39:34,514][196725] Updated weights for policy 0, policy_version 280 (0.0006)
207
+ [2025-06-11 18:39:35,856][196725] Updated weights for policy 0, policy_version 290 (0.0007)
208
+ [2025-06-11 18:39:37,223][196725] Updated weights for policy 0, policy_version 300 (0.0006)
209
+ [2025-06-11 18:39:37,237][196496] Fps is (10 sec: 29898.7, 60 sec: 28184.6, 300 sec: 28184.6). Total num frames: 1228800. Throughput: 0: 6811.7. Samples: 296978. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
210
+ [2025-06-11 18:39:37,237][196496] Avg episode reward: [(0, '8.575')]
211
+ [2025-06-11 18:39:37,240][196677] Saving new best policy, reward=8.575!
212
+ [2025-06-11 18:39:38,595][196725] Updated weights for policy 0, policy_version 310 (0.0007)
213
+ [2025-06-11 18:39:39,967][196725] Updated weights for policy 0, policy_version 320 (0.0007)
214
+ [2025-06-11 18:39:41,342][196725] Updated weights for policy 0, policy_version 330 (0.0006)
215
+ [2025-06-11 18:39:42,236][196496] Fps is (10 sec: 29900.9, 60 sec: 28319.4, 300 sec: 28319.4). Total num frames: 1376256. Throughput: 0: 7463.8. Samples: 341694. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
216
+ [2025-06-11 18:39:42,236][196496] Avg episode reward: [(0, '10.651')]
217
+ [2025-06-11 18:39:42,236][196677] Saving new best policy, reward=10.651!
218
+ [2025-06-11 18:39:42,752][196725] Updated weights for policy 0, policy_version 340 (0.0006)
219
+ [2025-06-11 18:39:44,136][196725] Updated weights for policy 0, policy_version 350 (0.0006)
220
+ [2025-06-11 18:39:45,518][196725] Updated weights for policy 0, policy_version 360 (0.0006)
221
+ [2025-06-11 18:39:46,869][196725] Updated weights for policy 0, policy_version 370 (0.0007)
222
+ [2025-06-11 18:39:47,236][196496] Fps is (10 sec: 29493.3, 60 sec: 28428.7, 300 sec: 28428.7). Total num frames: 1523712. Throughput: 0: 7440.8. Samples: 363950. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
223
+ [2025-06-11 18:39:47,236][196496] Avg episode reward: [(0, '10.591')]
224
+ [2025-06-11 18:39:48,244][196725] Updated weights for policy 0, policy_version 380 (0.0006)
225
+ [2025-06-11 18:39:49,630][196725] Updated weights for policy 0, policy_version 390 (0.0006)
226
+ [2025-06-11 18:39:51,015][196725] Updated weights for policy 0, policy_version 400 (0.0007)
227
+ [2025-06-11 18:39:52,236][196496] Fps is (10 sec: 29491.1, 60 sec: 28519.4, 300 sec: 28519.4). Total num frames: 1671168. Throughput: 0: 7426.8. Samples: 408446. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
228
+ [2025-06-11 18:39:52,236][196496] Avg episode reward: [(0, '12.264')]
229
+ [2025-06-11 18:39:52,263][196677] Saving new best policy, reward=12.264!
230
+ [2025-06-11 18:39:52,409][196725] Updated weights for policy 0, policy_version 410 (0.0007)
231
+ [2025-06-11 18:39:53,774][196725] Updated weights for policy 0, policy_version 420 (0.0007)
232
+ [2025-06-11 18:39:55,159][196725] Updated weights for policy 0, policy_version 430 (0.0007)
233
+ [2025-06-11 18:39:56,538][196725] Updated weights for policy 0, policy_version 440 (0.0007)
234
+ [2025-06-11 18:39:57,236][196496] Fps is (10 sec: 29900.9, 60 sec: 29832.5, 300 sec: 28660.2). Total num frames: 1822720. Throughput: 0: 7448.3. Samples: 452982. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
235
+ [2025-06-11 18:39:57,236][196496] Avg episode reward: [(0, '12.874')]
236
+ [2025-06-11 18:39:57,238][196677] Saving new best policy, reward=12.874!
237
+ [2025-06-11 18:39:57,923][196725] Updated weights for policy 0, policy_version 450 (0.0006)
238
+ [2025-06-11 18:39:59,340][196725] Updated weights for policy 0, policy_version 460 (0.0007)
239
+ [2025-06-11 18:40:00,725][196725] Updated weights for policy 0, policy_version 470 (0.0006)
240
+ [2025-06-11 18:40:02,120][196725] Updated weights for policy 0, policy_version 480 (0.0006)
241
+ [2025-06-11 18:40:02,236][196496] Fps is (10 sec: 29491.2, 60 sec: 29627.7, 300 sec: 28661.1). Total num frames: 1966080. Throughput: 0: 7434.4. Samples: 474726. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
242
+ [2025-06-11 18:40:02,236][196496] Avg episode reward: [(0, '14.989')]
243
+ [2025-06-11 18:40:02,263][196677] Saving new best policy, reward=14.989!
244
+ [2025-06-11 18:40:03,515][196677] Stopping Batcher_0...
245
+ [2025-06-11 18:40:03,515][196496] Component Batcher_0 stopped!
246
+ [2025-06-11 18:40:03,515][196725] Updated weights for policy 0, policy_version 490 (0.0007)
247
+ [2025-06-11 18:40:03,515][196677] Loop batcher_evt_loop terminating...
248
+ [2025-06-11 18:40:03,515][196677] Saving /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/checkpoint_p0/checkpoint_000000490_2007040.pth...
249
+ [2025-06-11 18:40:03,541][196723] Stopping RolloutWorker_w2...
250
+ [2025-06-11 18:40:03,542][196496] Component RolloutWorker_w2 stopped!
251
+ [2025-06-11 18:40:03,542][196723] Loop rollout_proc2_evt_loop terminating...
252
+ [2025-06-11 18:40:03,542][196496] Component RolloutWorker_w6 stopped!
253
+ [2025-06-11 18:40:03,542][196728] Stopping RolloutWorker_w6...
254
+ [2025-06-11 18:40:03,543][196728] Loop rollout_proc6_evt_loop terminating...
255
+ [2025-06-11 18:40:03,546][196496] Component RolloutWorker_w4 stopped!
256
+ [2025-06-11 18:40:03,546][196727] Stopping RolloutWorker_w4...
257
+ [2025-06-11 18:40:03,547][196727] Loop rollout_proc4_evt_loop terminating...
258
+ [2025-06-11 18:40:03,547][196496] Component RolloutWorker_w0 stopped!
259
+ [2025-06-11 18:40:03,547][196724] Stopping RolloutWorker_w0...
260
+ [2025-06-11 18:40:03,548][196724] Loop rollout_proc0_evt_loop terminating...
261
+ [2025-06-11 18:40:03,548][196496] Component RolloutWorker_w7 stopped!
262
+ [2025-06-11 18:40:03,548][196733] Stopping RolloutWorker_w7...
263
+ [2025-06-11 18:40:03,548][196496] Component RolloutWorker_w5 stopped!
264
+ [2025-06-11 18:40:03,548][196733] Loop rollout_proc7_evt_loop terminating...
265
+ [2025-06-11 18:40:03,548][196731] Stopping RolloutWorker_w5...
266
+ [2025-06-11 18:40:03,549][196731] Loop rollout_proc5_evt_loop terminating...
267
+ [2025-06-11 18:40:03,551][196496] Component RolloutWorker_w3 stopped!
268
+ [2025-06-11 18:40:03,551][196726] Stopping RolloutWorker_w3...
269
+ [2025-06-11 18:40:03,552][196726] Loop rollout_proc3_evt_loop terminating...
270
+ [2025-06-11 18:40:03,552][196496] Component RolloutWorker_w1 stopped!
271
+ [2025-06-11 18:40:03,552][196721] Stopping RolloutWorker_w1...
272
+ [2025-06-11 18:40:03,552][196721] Loop rollout_proc1_evt_loop terminating...
273
+ [2025-06-11 18:40:03,553][196725] Weights refcount: 2 0
274
+ [2025-06-11 18:40:03,554][196725] Stopping InferenceWorker_p0-w0...
275
+ [2025-06-11 18:40:03,555][196496] Component InferenceWorker_p0-w0 stopped!
276
+ [2025-06-11 18:40:03,555][196725] Loop inference_proc0-0_evt_loop terminating...
277
+ [2025-06-11 18:40:03,562][196677] Saving new best policy, reward=16.065!
278
+ [2025-06-11 18:40:03,613][196677] Saving /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/checkpoint_p0/checkpoint_000000490_2007040.pth...
279
+ [2025-06-11 18:40:03,674][196677] Stopping LearnerWorker_p0...
280
+ [2025-06-11 18:40:03,674][196677] Loop learner_proc0_evt_loop terminating...
281
+ [2025-06-11 18:40:03,674][196496] Component LearnerWorker_p0 stopped!
282
+ [2025-06-11 18:40:03,675][196496] Waiting for process learner_proc0 to stop...
283
+ [2025-06-11 18:40:04,416][196496] Waiting for process inference_proc0-0 to join...
284
+ [2025-06-11 18:40:04,416][196496] Waiting for process rollout_proc0 to join...
285
+ [2025-06-11 18:40:04,417][196496] Waiting for process rollout_proc1 to join...
286
+ [2025-06-11 18:40:04,417][196496] Waiting for process rollout_proc2 to join...
287
+ [2025-06-11 18:40:04,417][196496] Waiting for process rollout_proc3 to join...
288
+ [2025-06-11 18:40:04,417][196496] Waiting for process rollout_proc4 to join...
289
+ [2025-06-11 18:40:04,417][196496] Waiting for process rollout_proc5 to join...
290
+ [2025-06-11 18:40:04,418][196496] Waiting for process rollout_proc6 to join...
291
+ [2025-06-11 18:40:04,418][196496] Waiting for process rollout_proc7 to join...
292
+ [2025-06-11 18:40:04,418][196496] Batcher 0 profile tree view:
293
+ batching: 3.7070, releasing_batches: 0.0102
294
+ [2025-06-11 18:40:04,418][196496] InferenceWorker_p0-w0 profile tree view:
295
+ wait_policy: 0.0000
296
+ wait_policy_total: 1.3650
297
+ update_model: 1.0523
298
+ weight_update: 0.0007
299
+ one_step: 0.0024
300
+ handle_policy_step: 63.8188
301
+ deserialize: 2.1940, stack: 0.3450, obs_to_device_normalize: 14.4329, forward: 34.6100, send_messages: 3.1500
302
+ prepare_outputs: 6.9549
303
+ to_cpu: 4.3562
304
+ [2025-06-11 18:40:04,418][196496] Learner 0 profile tree view:
305
+ misc: 0.0022, prepare_batch: 4.7394
306
+ train: 10.4747
307
+ epoch_init: 0.0015, minibatch_init: 0.0017, losses_postprocess: 0.0679, kl_divergence: 0.0776, after_optimizer: 4.2096
308
+ calculate_losses: 4.0789
309
+ losses_init: 0.0008, forward_head: 0.3356, bptt_initial: 2.9485, tail: 0.1706, advantages_returns: 0.0429, losses: 0.2591
310
+ bptt: 0.2752
311
+ bptt_forward_core: 0.2633
312
+ update: 1.9318
313
+ clip: 0.2056
314
+ [2025-06-11 18:40:04,418][196496] RolloutWorker_w0 profile tree view:
315
+ wait_for_trajectories: 0.0366, enqueue_policy_requests: 2.1933, env_step: 29.8171, overhead: 2.4029, complete_rollouts: 0.1670
316
+ save_policy_outputs: 2.1456
317
+ split_output_tensors: 1.0670
318
+ [2025-06-11 18:40:04,418][196496] RolloutWorker_w7 profile tree view:
319
+ wait_for_trajectories: 0.0345, enqueue_policy_requests: 2.1740, env_step: 30.5282, overhead: 2.3833, complete_rollouts: 0.1411
320
+ save_policy_outputs: 2.1498
321
+ split_output_tensors: 1.0813
322
+ [2025-06-11 18:40:04,419][196496] Loop Runner_EvtLoop terminating...
323
+ [2025-06-11 18:40:04,419][196496] Runner profile tree view:
324
+ main_loop: 73.3782
325
+ [2025-06-11 18:40:04,419][196496] Collected {0: 2007040}, FPS: 27352.0
326
+ [2025-06-11 18:40:04,424][196496] Loading existing experiment configuration from /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/config.json
327
+ [2025-06-11 18:40:04,424][196496] Overriding arg 'num_workers' with value 1 passed from command line
328
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'no_render'=True that is not in the saved config file!
329
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'save_video'=True that is not in the saved config file!
330
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
331
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'video_name'=None that is not in the saved config file!
332
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
333
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
334
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'push_to_hub'=True that is not in the saved config file!
335
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'hf_repository'='PranayPalem/vizdoom_laptop_optimized' that is not in the saved config file!
336
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'policy_index'=0 that is not in the saved config file!
337
+ [2025-06-11 18:40:04,424][196496] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
338
+ [2025-06-11 18:40:04,425][196496] Adding new argument 'train_script'=None that is not in the saved config file!
339
+ [2025-06-11 18:40:04,425][196496] Adding new argument 'enjoy_script'=None that is not in the saved config file!
340
+ [2025-06-11 18:40:04,425][196496] Using frameskip 1 and render_action_repeat=4 for evaluation
341
+ [2025-06-11 18:40:04,441][196496] Doom resolution: 160x120, resize resolution: (128, 72)
342
+ [2025-06-11 18:40:04,442][196496] RunningMeanStd input shape: (3, 72, 128)
343
+ [2025-06-11 18:40:04,443][196496] RunningMeanStd input shape: (1,)
344
+ [2025-06-11 18:40:04,452][196496] ConvEncoder: input_channels=3
345
+ [2025-06-11 18:40:04,502][196496] Conv encoder output size: 512
346
+ [2025-06-11 18:40:04,503][196496] Policy head output size: 512
347
+ [2025-06-11 18:40:04,677][196496] Loading state from checkpoint /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/checkpoint_p0/checkpoint_000000490_2007040.pth...
348
+ [2025-06-11 18:40:05,082][196496] Num frames 100...
349
+ [2025-06-11 18:40:05,141][196496] Num frames 200...
350
+ [2025-06-11 18:40:05,206][196496] Num frames 300...
351
+ [2025-06-11 18:40:05,271][196496] Num frames 400...
352
+ [2025-06-11 18:40:05,333][196496] Num frames 500...
353
+ [2025-06-11 18:40:05,395][196496] Num frames 600...
354
+ [2025-06-11 18:40:05,455][196496] Num frames 700...
355
+ [2025-06-11 18:40:05,516][196496] Num frames 800...
356
+ [2025-06-11 18:40:05,577][196496] Num frames 900...
357
+ [2025-06-11 18:40:05,637][196496] Num frames 1000...
358
+ [2025-06-11 18:40:05,699][196496] Num frames 1100...
359
+ [2025-06-11 18:40:05,760][196496] Num frames 1200...
360
+ [2025-06-11 18:40:05,822][196496] Num frames 1300...
361
+ [2025-06-11 18:40:05,883][196496] Num frames 1400...
362
+ [2025-06-11 18:40:05,946][196496] Num frames 1500...
363
+ [2025-06-11 18:40:06,009][196496] Num frames 1600...
364
+ [2025-06-11 18:40:06,071][196496] Num frames 1700...
365
+ [2025-06-11 18:40:06,131][196496] Num frames 1800...
366
+ [2025-06-11 18:40:06,192][196496] Num frames 1900...
367
+ [2025-06-11 18:40:06,257][196496] Avg episode rewards: #0: 54.199, true rewards: #0: 19.200
368
+ [2025-06-11 18:40:06,257][196496] Avg episode reward: 54.199, avg true_objective: 19.200
369
+ [2025-06-11 18:40:06,311][196496] Num frames 2000...
370
+ [2025-06-11 18:40:06,372][196496] Num frames 2100...
371
+ [2025-06-11 18:40:06,434][196496] Num frames 2200...
372
+ [2025-06-11 18:40:06,494][196496] Num frames 2300...
373
+ [2025-06-11 18:40:06,555][196496] Num frames 2400...
374
+ [2025-06-11 18:40:06,622][196496] Num frames 2500...
375
+ [2025-06-11 18:40:06,684][196496] Num frames 2600...
376
+ [2025-06-11 18:40:06,744][196496] Num frames 2700...
377
+ [2025-06-11 18:40:06,806][196496] Num frames 2800...
378
+ [2025-06-11 18:40:06,869][196496] Num frames 2900...
379
+ [2025-06-11 18:40:06,931][196496] Num frames 3000...
380
+ [2025-06-11 18:40:06,997][196496] Num frames 3100...
381
+ [2025-06-11 18:40:07,062][196496] Num frames 3200...
382
+ [2025-06-11 18:40:07,113][196496] Avg episode rewards: #0: 42.000, true rewards: #0: 16.000
383
+ [2025-06-11 18:40:07,113][196496] Avg episode reward: 42.000, avg true_objective: 16.000
384
+ [2025-06-11 18:40:07,175][196496] Num frames 3300...
385
+ [2025-06-11 18:40:07,234][196496] Num frames 3400...
386
+ [2025-06-11 18:40:07,309][196496] Num frames 3500...
387
+ [2025-06-11 18:40:07,368][196496] Num frames 3600...
388
+ [2025-06-11 18:40:07,453][196496] Avg episode rewards: #0: 29.826, true rewards: #0: 12.160
389
+ [2025-06-11 18:40:07,453][196496] Avg episode reward: 29.826, avg true_objective: 12.160
390
+ [2025-06-11 18:40:07,489][196496] Num frames 3700...
391
+ [2025-06-11 18:40:07,548][196496] Num frames 3800...
392
+ [2025-06-11 18:40:07,610][196496] Num frames 3900...
393
+ [2025-06-11 18:40:07,673][196496] Num frames 4000...
394
+ [2025-06-11 18:40:07,734][196496] Num frames 4100...
395
+ [2025-06-11 18:40:07,797][196496] Num frames 4200...
396
+ [2025-06-11 18:40:07,860][196496] Num frames 4300...
397
+ [2025-06-11 18:40:07,924][196496] Num frames 4400...
398
+ [2025-06-11 18:40:07,990][196496] Num frames 4500...
399
+ [2025-06-11 18:40:08,076][196496] Avg episode rewards: #0: 28.367, true rewards: #0: 11.367
400
+ [2025-06-11 18:40:08,077][196496] Avg episode reward: 28.367, avg true_objective: 11.367
401
+ [2025-06-11 18:40:08,111][196496] Num frames 4600...
402
+ [2025-06-11 18:40:08,171][196496] Num frames 4700...
403
+ [2025-06-11 18:40:08,230][196496] Num frames 4800...
404
+ [2025-06-11 18:40:08,291][196496] Num frames 4900...
405
+ [2025-06-11 18:40:08,352][196496] Num frames 5000...
406
+ [2025-06-11 18:40:08,412][196496] Num frames 5100...
407
+ [2025-06-11 18:40:08,482][196496] Avg episode rewards: #0: 25.454, true rewards: #0: 10.254
408
+ [2025-06-11 18:40:08,483][196496] Avg episode reward: 25.454, avg true_objective: 10.254
409
+ [2025-06-11 18:40:08,532][196496] Num frames 5200...
410
+ [2025-06-11 18:40:08,592][196496] Num frames 5300...
411
+ [2025-06-11 18:40:08,655][196496] Num frames 5400...
412
+ [2025-06-11 18:40:08,715][196496] Num frames 5500...
413
+ [2025-06-11 18:40:08,806][196496] Avg episode rewards: #0: 22.252, true rewards: #0: 9.252
414
+ [2025-06-11 18:40:08,806][196496] Avg episode reward: 22.252, avg true_objective: 9.252
415
+ [2025-06-11 18:40:08,841][196496] Num frames 5600...
416
+ [2025-06-11 18:40:08,900][196496] Num frames 5700...
417
+ [2025-06-11 18:40:08,965][196496] Num frames 5800...
418
+ [2025-06-11 18:40:09,028][196496] Num frames 5900...
419
+ [2025-06-11 18:40:09,090][196496] Num frames 6000...
420
+ [2025-06-11 18:40:09,150][196496] Num frames 6100...
421
+ [2025-06-11 18:40:09,212][196496] Num frames 6200...
422
+ [2025-06-11 18:40:09,274][196496] Num frames 6300...
423
+ [2025-06-11 18:40:09,335][196496] Num frames 6400...
424
+ [2025-06-11 18:40:09,396][196496] Num frames 6500...
425
+ [2025-06-11 18:40:09,457][196496] Num frames 6600...
426
+ [2025-06-11 18:40:09,519][196496] Num frames 6700...
427
+ [2025-06-11 18:40:09,632][196496] Avg episode rewards: #0: 22.981, true rewards: #0: 9.696
428
+ [2025-06-11 18:40:09,633][196496] Avg episode reward: 22.981, avg true_objective: 9.696
429
+ [2025-06-11 18:40:09,641][196496] Num frames 6800...
430
+ [2025-06-11 18:40:09,704][196496] Num frames 6900...
431
+ [2025-06-11 18:40:09,764][196496] Num frames 7000...
432
+ [2025-06-11 18:40:09,825][196496] Num frames 7100...
433
+ [2025-06-11 18:40:09,889][196496] Num frames 7200...
434
+ [2025-06-11 18:40:09,954][196496] Num frames 7300...
435
+ [2025-06-11 18:40:10,017][196496] Avg episode rewards: #0: 21.020, true rewards: #0: 9.145
436
+ [2025-06-11 18:40:10,017][196496] Avg episode reward: 21.020, avg true_objective: 9.145
437
+ [2025-06-11 18:40:10,074][196496] Num frames 7400...
438
+ [2025-06-11 18:40:10,137][196496] Num frames 7500...
439
+ [2025-06-11 18:40:10,199][196496] Num frames 7600...
440
+ [2025-06-11 18:40:10,259][196496] Num frames 7700...
441
+ [2025-06-11 18:40:10,321][196496] Num frames 7800...
442
+ [2025-06-11 18:40:10,384][196496] Num frames 7900...
443
+ [2025-06-11 18:40:10,448][196496] Num frames 8000...
444
+ [2025-06-11 18:40:10,510][196496] Num frames 8100...
445
+ [2025-06-11 18:40:10,574][196496] Num frames 8200...
446
+ [2025-06-11 18:40:10,635][196496] Num frames 8300...
447
+ [2025-06-11 18:40:10,697][196496] Num frames 8400...
448
+ [2025-06-11 18:40:10,761][196496] Num frames 8500...
449
+ [2025-06-11 18:40:10,823][196496] Num frames 8600...
450
+ [2025-06-11 18:40:10,885][196496] Num frames 8700...
451
+ [2025-06-11 18:40:10,948][196496] Num frames 8800...
452
+ [2025-06-11 18:40:11,015][196496] Num frames 8900...
453
+ [2025-06-11 18:40:11,081][196496] Num frames 9000...
454
+ [2025-06-11 18:40:11,143][196496] Num frames 9100...
455
+ [2025-06-11 18:40:11,210][196496] Num frames 9200...
456
+ [2025-06-11 18:40:11,274][196496] Num frames 9300...
457
+ [2025-06-11 18:40:11,341][196496] Num frames 9400...
458
+ [2025-06-11 18:40:11,408][196496] Avg episode rewards: #0: 24.795, true rewards: #0: 10.462
459
+ [2025-06-11 18:40:11,408][196496] Avg episode reward: 24.795, avg true_objective: 10.462
460
+ [2025-06-11 18:40:11,463][196496] Num frames 9500...
461
+ [2025-06-11 18:40:11,523][196496] Num frames 9600...
462
+ [2025-06-11 18:40:11,585][196496] Num frames 9700...
463
+ [2025-06-11 18:40:11,646][196496] Num frames 9800...
464
+ [2025-06-11 18:40:11,709][196496] Num frames 9900...
465
+ [2025-06-11 18:40:11,773][196496] Num frames 10000...
466
+ [2025-06-11 18:40:11,842][196496] Num frames 10100...
467
+ [2025-06-11 18:40:11,906][196496] Num frames 10200...
468
+ [2025-06-11 18:40:11,970][196496] Num frames 10300...
469
+ [2025-06-11 18:40:12,031][196496] Avg episode rewards: #0: 24.112, true rewards: #0: 10.312
470
+ [2025-06-11 18:40:12,031][196496] Avg episode reward: 24.112, avg true_objective: 10.312
471
+ [2025-06-11 18:40:22,551][196496] Replay video saved to /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/replay.mp4!
472
+ [2025-06-11 18:41:01,389][196496] The model has been pushed to https://huggingface.co/PranayPalem/vizdoom_laptop_optimized
473
+ [2025-06-11 19:38:53,674][258077] Saving configuration to /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/config.json...
474
+ [2025-06-11 19:38:53,674][258077] Rollout worker 0 uses device cpu
475
+ [2025-06-11 19:38:53,675][258077] Rollout worker 1 uses device cpu
476
+ [2025-06-11 19:38:53,675][258077] Rollout worker 2 uses device cpu
477
+ [2025-06-11 19:38:53,675][258077] Rollout worker 3 uses device cpu
478
+ [2025-06-11 19:38:53,675][258077] Rollout worker 4 uses device cpu
479
+ [2025-06-11 19:38:53,675][258077] Rollout worker 5 uses device cpu
480
+ [2025-06-11 19:38:53,675][258077] Rollout worker 6 uses device cpu
481
+ [2025-06-11 19:38:53,675][258077] Rollout worker 7 uses device cpu
482
+ [2025-06-11 19:38:53,767][258077] Using GPUs [0] for process 0 (actually maps to GPUs [0])
483
+ [2025-06-11 19:38:53,767][258077] InferenceWorker_p0-w0: min num requests: 2
484
+ [2025-06-11 19:38:53,789][258077] Starting all processes...
485
+ [2025-06-11 19:38:53,789][258077] Starting process learner_proc0
486
+ [2025-06-11 19:38:54,917][258077] Starting all processes...
487
+ [2025-06-11 19:38:54,921][258224] Using GPUs [0] for process 0 (actually maps to GPUs [0])
488
+ [2025-06-11 19:38:54,921][258224] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
489
+ [2025-06-11 19:38:54,922][258077] Starting process inference_proc0-0
490
+ [2025-06-11 19:38:54,922][258077] Starting process rollout_proc0
491
+ [2025-06-11 19:38:54,922][258077] Starting process rollout_proc1
492
+ [2025-06-11 19:38:54,922][258077] Starting process rollout_proc2
493
+ [2025-06-11 19:38:54,935][258224] Num visible devices: 1
494
+ [2025-06-11 19:38:54,940][258224] Setting fixed seed 3333
495
+ [2025-06-11 19:38:54,941][258224] Using GPUs [0] for process 0 (actually maps to GPUs [0])
496
+ [2025-06-11 19:38:54,942][258224] Initializing actor-critic model on device cuda:0
497
+ [2025-06-11 19:38:54,942][258224] RunningMeanStd input shape: (3, 72, 128)
498
+ [2025-06-11 19:38:54,942][258224] RunningMeanStd input shape: (1,)
499
+ [2025-06-11 19:38:54,922][258077] Starting process rollout_proc3
500
+ [2025-06-11 19:38:54,923][258077] Starting process rollout_proc4
501
+ [2025-06-11 19:38:54,925][258077] Starting process rollout_proc5
502
+ [2025-06-11 19:38:54,925][258077] Starting process rollout_proc6
503
+ [2025-06-11 19:38:54,925][258077] Starting process rollout_proc7
504
+ [2025-06-11 19:38:55,016][258224] ConvEncoder: input_channels=3
505
+ [2025-06-11 19:38:55,116][258224] Conv encoder output size: 512
506
+ [2025-06-11 19:38:55,117][258224] Policy head output size: 512
507
+ [2025-06-11 19:38:55,132][258224] Created Actor Critic model with architecture:
508
+ [2025-06-11 19:38:55,132][258224] ActorCriticSharedWeights(
509
+ (obs_normalizer): ObservationNormalizer(
510
+ (running_mean_std): RunningMeanStdDictInPlace(
511
+ (running_mean_std): ModuleDict(
512
+ (obs): RunningMeanStdInPlace()
513
+ )
514
+ )
515
+ )
516
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
517
+ (encoder): VizdoomEncoder(
518
+ (basic_encoder): ConvEncoder(
519
+ (enc): RecursiveScriptModule(
520
+ original_name=ConvEncoderImpl
521
+ (conv_head): RecursiveScriptModule(
522
+ original_name=Sequential
523
+ (0): RecursiveScriptModule(original_name=Conv2d)
524
+ (1): RecursiveScriptModule(original_name=ELU)
525
+ (2): RecursiveScriptModule(original_name=Conv2d)
526
+ (3): RecursiveScriptModule(original_name=ELU)
527
+ (4): RecursiveScriptModule(original_name=Conv2d)
528
+ (5): RecursiveScriptModule(original_name=ELU)
529
+ )
530
+ (mlp_layers): RecursiveScriptModule(
531
+ original_name=Sequential
532
+ (0): RecursiveScriptModule(original_name=Linear)
533
+ (1): RecursiveScriptModule(original_name=ELU)
534
+ )
535
+ )
536
+ )
537
+ )
538
+ (core): ModelCoreRNN(
539
+ (core): GRU(512, 512)
540
+ )
541
+ (decoder): MlpDecoder(
542
+ (mlp): Identity()
543
+ )
544
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
545
+ (action_parameterization): ActionParameterizationDefault(
546
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
547
+ )
548
+ )
549
+ [2025-06-11 19:38:55,350][258224] Using optimizer <class 'torch.optim.adam.Adam'>
550
+ [2025-06-11 19:38:56,154][258224] Loading state from checkpoint /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/checkpoint_p0/checkpoint_000000490_2007040.pth...
551
+ [2025-06-11 19:38:56,189][258224] Loading model from checkpoint
552
+ [2025-06-11 19:38:56,190][258224] Loaded experiment state at self.train_step=490, self.env_steps=2007040
553
+ [2025-06-11 19:38:56,190][258224] Initialized policy 0 weights for model version 490
554
+ [2025-06-11 19:38:56,193][258224] LearnerWorker_p0 finished initialization!
555
+ [2025-06-11 19:38:56,193][258224] Using GPUs [0] for process 0 (actually maps to GPUs [0])
556
+ [2025-06-11 19:38:56,387][258317] Worker 4 uses CPU cores [8, 9]
557
+ [2025-06-11 19:38:56,392][258314] Worker 0 uses CPU cores [0, 1]
558
+ [2025-06-11 19:38:56,418][258319] Worker 6 uses CPU cores [12, 13]
559
+ [2025-06-11 19:38:56,451][258296] Using GPUs [0] for process 0 (actually maps to GPUs [0])
560
+ [2025-06-11 19:38:56,451][258296] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
561
+ [2025-06-11 19:38:56,464][258322] Worker 7 uses CPU cores [14, 15]
562
+ [2025-06-11 19:38:56,464][258296] Num visible devices: 1
563
+ [2025-06-11 19:38:56,476][258313] Worker 1 uses CPU cores [2, 3]
564
+ [2025-06-11 19:38:56,549][258321] Worker 5 uses CPU cores [10, 11]
565
+ [2025-06-11 19:38:56,597][258296] RunningMeanStd input shape: (3, 72, 128)
566
+ [2025-06-11 19:38:56,598][258296] RunningMeanStd input shape: (1,)
567
+ [2025-06-11 19:38:56,606][258316] Worker 3 uses CPU cores [6, 7]
568
+ [2025-06-11 19:38:56,662][258315] Worker 2 uses CPU cores [4, 5]
569
+ [2025-06-11 19:38:56,666][258296] ConvEncoder: input_channels=3
570
+ [2025-06-11 19:38:56,715][258296] Conv encoder output size: 512
571
+ [2025-06-11 19:38:56,715][258296] Policy head output size: 512
572
+ [2025-06-11 19:38:56,742][258077] Inference worker 0-0 is ready!
573
+ [2025-06-11 19:38:56,743][258077] All inference workers are ready! Signal rollout workers to start!
574
+ [2025-06-11 19:38:56,776][258322] Doom resolution: 160x120, resize resolution: (128, 72)
575
+ [2025-06-11 19:38:56,776][258317] Doom resolution: 160x120, resize resolution: (128, 72)
576
+ [2025-06-11 19:38:56,776][258314] Doom resolution: 160x120, resize resolution: (128, 72)
577
+ [2025-06-11 19:38:56,782][258316] Doom resolution: 160x120, resize resolution: (128, 72)
578
+ [2025-06-11 19:38:56,788][258313] Doom resolution: 160x120, resize resolution: (128, 72)
579
+ [2025-06-11 19:38:56,788][258319] Doom resolution: 160x120, resize resolution: (128, 72)
580
+ [2025-06-11 19:38:56,788][258315] Doom resolution: 160x120, resize resolution: (128, 72)
581
+ [2025-06-11 19:38:56,788][258321] Doom resolution: 160x120, resize resolution: (128, 72)
582
+ [2025-06-11 19:38:56,956][258317] Decorrelating experience for 0 frames...
583
+ [2025-06-11 19:38:57,024][258322] Decorrelating experience for 0 frames...
584
+ [2025-06-11 19:38:57,030][258316] Decorrelating experience for 0 frames...
585
+ [2025-06-11 19:38:57,030][258321] Decorrelating experience for 0 frames...
586
+ [2025-06-11 19:38:57,036][258315] Decorrelating experience for 0 frames...
587
+ [2025-06-11 19:38:57,036][258313] Decorrelating experience for 0 frames...
588
+ [2025-06-11 19:38:57,037][258319] Decorrelating experience for 0 frames...
589
+ [2025-06-11 19:38:57,113][258317] Decorrelating experience for 32 frames...
590
+ [2025-06-11 19:38:57,113][258314] Decorrelating experience for 0 frames...
591
+ [2025-06-11 19:38:57,200][258313] Decorrelating experience for 32 frames...
592
+ [2025-06-11 19:38:57,210][258315] Decorrelating experience for 32 frames...
593
+ [2025-06-11 19:38:57,211][258319] Decorrelating experience for 32 frames...
594
+ [2025-06-11 19:38:57,220][258316] Decorrelating experience for 32 frames...
595
+ [2025-06-11 19:38:57,287][258322] Decorrelating experience for 32 frames...
596
+ [2025-06-11 19:38:57,291][258314] Decorrelating experience for 32 frames...
597
+ [2025-06-11 19:38:57,333][258317] Decorrelating experience for 64 frames...
598
+ [2025-06-11 19:38:57,423][258319] Decorrelating experience for 64 frames...
599
+ [2025-06-11 19:38:57,438][258316] Decorrelating experience for 64 frames...
600
+ [2025-06-11 19:38:57,438][258321] Decorrelating experience for 32 frames...
601
+ [2025-06-11 19:38:57,500][258314] Decorrelating experience for 64 frames...
602
+ [2025-06-11 19:38:57,597][258319] Decorrelating experience for 96 frames...
603
+ [2025-06-11 19:38:57,604][258313] Decorrelating experience for 64 frames...
604
+ [2025-06-11 19:38:57,646][258321] Decorrelating experience for 64 frames...
605
+ [2025-06-11 19:38:57,763][258317] Decorrelating experience for 96 frames...
606
+ [2025-06-11 19:38:57,763][258316] Decorrelating experience for 96 frames...
607
+ [2025-06-11 19:38:57,817][258313] Decorrelating experience for 96 frames...
608
+ [2025-06-11 19:38:57,937][258321] Decorrelating experience for 96 frames...
609
+ [2025-06-11 19:38:57,945][258322] Decorrelating experience for 64 frames...
610
+ [2025-06-11 19:38:58,131][258322] Decorrelating experience for 96 frames...
611
+ [2025-06-11 19:38:58,141][258314] Decorrelating experience for 96 frames...
612
+ [2025-06-11 19:38:58,353][258224] Signal inference workers to stop experience collection...
613
+ [2025-06-11 19:38:58,358][258296] InferenceWorker_p0-w0: stopping experience collection
614
+ [2025-06-11 19:38:58,370][258315] Decorrelating experience for 64 frames...
615
+ [2025-06-11 19:38:58,538][258315] Decorrelating experience for 96 frames...
616
+ [2025-06-11 19:38:59,178][258224] Signal inference workers to resume experience collection...
617
+ [2025-06-11 19:38:59,179][258296] InferenceWorker_p0-w0: resuming experience collection
618
+ [2025-06-11 19:38:59,739][258077] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 2023424. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
619
+ [2025-06-11 19:38:59,739][258077] Avg episode reward: [(0, '5.474')]
620
+ [2025-06-11 19:39:00,587][258296] Updated weights for policy 0, policy_version 500 (0.0099)
621
+ [2025-06-11 19:39:02,085][258296] Updated weights for policy 0, policy_version 510 (0.0009)
622
+ [2025-06-11 19:39:03,566][258296] Updated weights for policy 0, policy_version 520 (0.0007)
623
+ [2025-06-11 19:39:04,739][258077] Fps is (10 sec: 27033.3, 60 sec: 27033.3, 300 sec: 27033.3). Total num frames: 2158592. Throughput: 0: 5101.1. Samples: 25506. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
624
+ [2025-06-11 19:39:04,739][258077] Avg episode reward: [(0, '18.715')]
625
+ [2025-06-11 19:39:04,761][258224] Saving new best policy, reward=18.715!
626
+ [2025-06-11 19:39:05,085][258296] Updated weights for policy 0, policy_version 530 (0.0007)
627
+ [2025-06-11 19:39:06,592][258296] Updated weights for policy 0, policy_version 540 (0.0007)
628
+ [2025-06-11 19:39:08,108][258296] Updated weights for policy 0, policy_version 550 (0.0007)
629
+ [2025-06-11 19:39:09,633][258296] Updated weights for policy 0, policy_version 560 (0.0007)
630
+ [2025-06-11 19:39:09,739][258077] Fps is (10 sec: 27033.4, 60 sec: 27033.4, 300 sec: 27033.4). Total num frames: 2293760. Throughput: 0: 6609.4. Samples: 66094. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
631
+ [2025-06-11 19:39:09,739][258077] Avg episode reward: [(0, '20.614')]
632
+ [2025-06-11 19:39:09,740][258224] Saving new best policy, reward=20.614!
633
+ [2025-06-11 19:39:11,278][258296] Updated weights for policy 0, policy_version 570 (0.0007)
634
+ [2025-06-11 19:39:12,804][258296] Updated weights for policy 0, policy_version 580 (0.0007)
635
+ [2025-06-11 19:39:13,704][258077] Heartbeat connected on Batcher_0
636
+ [2025-06-11 19:39:13,764][258077] Heartbeat connected on LearnerWorker_p0
637
+ [2025-06-11 19:39:13,769][258077] Heartbeat connected on InferenceWorker_p0-w0
638
+ [2025-06-11 19:39:13,770][258077] Heartbeat connected on RolloutWorker_w0
639
+ [2025-06-11 19:39:13,773][258077] Heartbeat connected on RolloutWorker_w1
640
+ [2025-06-11 19:39:13,776][258077] Heartbeat connected on RolloutWorker_w2
641
+ [2025-06-11 19:39:13,779][258077] Heartbeat connected on RolloutWorker_w3
642
+ [2025-06-11 19:39:13,781][258077] Heartbeat connected on RolloutWorker_w4
643
+ [2025-06-11 19:39:13,784][258077] Heartbeat connected on RolloutWorker_w5
644
+ [2025-06-11 19:39:13,787][258077] Heartbeat connected on RolloutWorker_w6
645
+ [2025-06-11 19:39:13,789][258077] Heartbeat connected on RolloutWorker_w7
646
+ [2025-06-11 19:39:14,346][258296] Updated weights for policy 0, policy_version 590 (0.0007)
647
+ [2025-06-11 19:39:14,738][258077] Fps is (10 sec: 26624.3, 60 sec: 26760.6, 300 sec: 26760.6). Total num frames: 2424832. Throughput: 0: 5698.0. Samples: 85470. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
648
+ [2025-06-11 19:39:14,739][258077] Avg episode reward: [(0, '19.710')]
649
+ [2025-06-11 19:39:15,890][258296] Updated weights for policy 0, policy_version 600 (0.0007)
650
+ [2025-06-11 19:39:17,420][258296] Updated weights for policy 0, policy_version 610 (0.0007)
651
+ [2025-06-11 19:39:18,958][258296] Updated weights for policy 0, policy_version 620 (0.0007)
652
+ [2025-06-11 19:39:19,738][258077] Fps is (10 sec: 26624.4, 60 sec: 26828.9, 300 sec: 26828.9). Total num frames: 2560000. Throughput: 0: 6276.0. Samples: 125520. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
653
+ [2025-06-11 19:39:19,739][258077] Avg episode reward: [(0, '21.066')]
654
+ [2025-06-11 19:39:19,741][258224] Saving new best policy, reward=21.066!
655
+ [2025-06-11 19:39:20,514][258296] Updated weights for policy 0, policy_version 630 (0.0007)
656
+ [2025-06-11 19:39:22,075][258296] Updated weights for policy 0, policy_version 640 (0.0007)
657
+ [2025-06-11 19:39:23,618][258296] Updated weights for policy 0, policy_version 650 (0.0007)
658
+ [2025-06-11 19:39:24,739][258077] Fps is (10 sec: 26623.9, 60 sec: 26705.9, 300 sec: 26705.9). Total num frames: 2691072. Throughput: 0: 6608.4. Samples: 165210. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
659
+ [2025-06-11 19:39:24,739][258077] Avg episode reward: [(0, '19.379')]
660
+ [2025-06-11 19:39:25,157][258296] Updated weights for policy 0, policy_version 660 (0.0007)
661
+ [2025-06-11 19:39:26,685][258296] Updated weights for policy 0, policy_version 670 (0.0007)
662
+ [2025-06-11 19:39:28,256][258296] Updated weights for policy 0, policy_version 680 (0.0007)
663
+ [2025-06-11 19:39:29,738][258077] Fps is (10 sec: 26214.3, 60 sec: 26624.0, 300 sec: 26624.0). Total num frames: 2822144. Throughput: 0: 6173.9. Samples: 185216. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
664
+ [2025-06-11 19:39:29,739][258077] Avg episode reward: [(0, '20.191')]
665
+ [2025-06-11 19:39:29,790][258296] Updated weights for policy 0, policy_version 690 (0.0007)
666
+ [2025-06-11 19:39:31,335][258296] Updated weights for policy 0, policy_version 700 (0.0008)
667
+ [2025-06-11 19:39:32,880][258296] Updated weights for policy 0, policy_version 710 (0.0007)
668
+ [2025-06-11 19:39:34,423][258296] Updated weights for policy 0, policy_version 720 (0.0007)
669
+ [2025-06-11 19:39:34,738][258077] Fps is (10 sec: 26624.2, 60 sec: 26682.6, 300 sec: 26682.6). Total num frames: 2957312. Throughput: 0: 6423.7. Samples: 224828. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
670
+ [2025-06-11 19:39:34,739][258077] Avg episode reward: [(0, '22.561')]
671
+ [2025-06-11 19:39:34,739][258224] Saving new best policy, reward=22.561!
672
+ [2025-06-11 19:39:35,992][258296] Updated weights for policy 0, policy_version 730 (0.0007)
673
+ [2025-06-11 19:39:37,558][258296] Updated weights for policy 0, policy_version 740 (0.0007)
674
+ [2025-06-11 19:39:39,111][258296] Updated weights for policy 0, policy_version 750 (0.0007)
675
+ [2025-06-11 19:39:39,739][258077] Fps is (10 sec: 26623.8, 60 sec: 26624.0, 300 sec: 26624.0). Total num frames: 3088384. Throughput: 0: 6609.0. Samples: 264360. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
676
+ [2025-06-11 19:39:39,739][258077] Avg episode reward: [(0, '22.660')]
677
+ [2025-06-11 19:39:39,740][258224] Saving new best policy, reward=22.660!
678
+ [2025-06-11 19:39:40,672][258296] Updated weights for policy 0, policy_version 760 (0.0007)
679
+ [2025-06-11 19:39:42,225][258296] Updated weights for policy 0, policy_version 770 (0.0007)
680
+ [2025-06-11 19:39:43,792][258296] Updated weights for policy 0, policy_version 780 (0.0007)
681
+ [2025-06-11 19:39:44,738][258077] Fps is (10 sec: 26214.4, 60 sec: 26578.5, 300 sec: 26578.5). Total num frames: 3219456. Throughput: 0: 6311.3. Samples: 284008. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
682
+ [2025-06-11 19:39:44,739][258077] Avg episode reward: [(0, '22.866')]
683
+ [2025-06-11 19:39:44,739][258224] Saving new best policy, reward=22.866!
684
+ [2025-06-11 19:39:45,363][258296] Updated weights for policy 0, policy_version 790 (0.0007)
685
+ [2025-06-11 19:39:46,936][258296] Updated weights for policy 0, policy_version 800 (0.0007)
686
+ [2025-06-11 19:39:48,504][258296] Updated weights for policy 0, policy_version 810 (0.0007)
687
+ [2025-06-11 19:39:49,739][258077] Fps is (10 sec: 25805.0, 60 sec: 26460.2, 300 sec: 26460.2). Total num frames: 3346432. Throughput: 0: 6617.6. Samples: 323298. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
688
+ [2025-06-11 19:39:49,739][258077] Avg episode reward: [(0, '22.551')]
689
+ [2025-06-11 19:39:50,056][258296] Updated weights for policy 0, policy_version 820 (0.0007)
690
+ [2025-06-11 19:39:51,605][258296] Updated weights for policy 0, policy_version 830 (0.0007)
691
+ [2025-06-11 19:39:53,176][258296] Updated weights for policy 0, policy_version 840 (0.0007)
692
+ [2025-06-11 19:39:54,738][258077] Fps is (10 sec: 25804.7, 60 sec: 26437.8, 300 sec: 26437.8). Total num frames: 3477504. Throughput: 0: 6589.5. Samples: 362620. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
693
+ [2025-06-11 19:39:54,739][258077] Avg episode reward: [(0, '19.912')]
694
+ [2025-06-11 19:39:54,771][258296] Updated weights for policy 0, policy_version 850 (0.0007)
695
+ [2025-06-11 19:39:56,330][258296] Updated weights for policy 0, policy_version 860 (0.0007)
696
+ [2025-06-11 19:39:57,898][258296] Updated weights for policy 0, policy_version 870 (0.0007)
697
+ [2025-06-11 19:39:59,466][258296] Updated weights for policy 0, policy_version 880 (0.0007)
698
+ [2025-06-11 19:39:59,739][258077] Fps is (10 sec: 26214.4, 60 sec: 26419.2, 300 sec: 26419.2). Total num frames: 3608576. Throughput: 0: 6593.6. Samples: 382184. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
699
+ [2025-06-11 19:39:59,739][258077] Avg episode reward: [(0, '23.099')]
700
+ [2025-06-11 19:39:59,741][258224] Saving new best policy, reward=23.099!
701
+ [2025-06-11 19:40:01,065][258296] Updated weights for policy 0, policy_version 890 (0.0007)
702
+ [2025-06-11 19:40:02,631][258296] Updated weights for policy 0, policy_version 900 (0.0007)
703
+ [2025-06-11 19:40:04,197][258296] Updated weights for policy 0, policy_version 910 (0.0008)
704
+ [2025-06-11 19:40:04,739][258077] Fps is (10 sec: 26214.4, 60 sec: 26351.0, 300 sec: 26403.5). Total num frames: 3739648. Throughput: 0: 6568.7. Samples: 421110. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
705
+ [2025-06-11 19:40:04,739][258077] Avg episode reward: [(0, '19.705')]
706
+ [2025-06-11 19:40:05,764][258296] Updated weights for policy 0, policy_version 920 (0.0007)
707
+ [2025-06-11 19:40:07,364][258296] Updated weights for policy 0, policy_version 930 (0.0007)
708
+ [2025-06-11 19:40:08,967][258296] Updated weights for policy 0, policy_version 940 (0.0007)
709
+ [2025-06-11 19:40:09,739][258077] Fps is (10 sec: 25804.6, 60 sec: 26214.4, 300 sec: 26331.4). Total num frames: 3866624. Throughput: 0: 6551.9. Samples: 460048. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
710
+ [2025-06-11 19:40:09,739][258077] Avg episode reward: [(0, '22.629')]
711
+ [2025-06-11 19:40:10,565][258296] Updated weights for policy 0, policy_version 950 (0.0007)
712
+ [2025-06-11 19:40:12,139][258296] Updated weights for policy 0, policy_version 960 (0.0007)
713
+ [2025-06-11 19:40:13,717][258296] Updated weights for policy 0, policy_version 970 (0.0007)
714
+ [2025-06-11 19:40:14,739][258077] Fps is (10 sec: 25804.8, 60 sec: 26214.4, 300 sec: 26323.6). Total num frames: 3997696. Throughput: 0: 6537.8. Samples: 479416. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
715
+ [2025-06-11 19:40:14,739][258077] Avg episode reward: [(0, '24.448')]
716
+ [2025-06-11 19:40:14,739][258224] Saving new best policy, reward=24.448!
717
+ [2025-06-11 19:40:14,985][258077] Component Batcher_0 stopped!
718
+ [2025-06-11 19:40:14,985][258224] Stopping Batcher_0...
719
+ [2025-06-11 19:40:14,986][258224] Saving /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/checkpoint_p0/checkpoint_000000978_4005888.pth...
720
+ [2025-06-11 19:40:14,986][258224] Loop batcher_evt_loop terminating...
721
+ [2025-06-11 19:40:15,021][258314] Stopping RolloutWorker_w0...
722
+ [2025-06-11 19:40:15,021][258317] Stopping RolloutWorker_w4...
723
+ [2025-06-11 19:40:15,021][258077] Component RolloutWorker_w0 stopped!
724
+ [2025-06-11 19:40:15,021][258321] Stopping RolloutWorker_w5...
725
+ [2025-06-11 19:40:15,021][258316] Stopping RolloutWorker_w3...
726
+ [2025-06-11 19:40:15,021][258322] Stopping RolloutWorker_w7...
727
+ [2025-06-11 19:40:15,021][258077] Component RolloutWorker_w4 stopped!
728
+ [2025-06-11 19:40:15,021][258314] Loop rollout_proc0_evt_loop terminating...
729
+ [2025-06-11 19:40:15,021][258317] Loop rollout_proc4_evt_loop terminating...
730
+ [2025-06-11 19:40:15,022][258316] Loop rollout_proc3_evt_loop terminating...
731
+ [2025-06-11 19:40:15,022][258077] Component RolloutWorker_w3 stopped!
732
+ [2025-06-11 19:40:15,022][258321] Loop rollout_proc5_evt_loop terminating...
733
+ [2025-06-11 19:40:15,022][258322] Loop rollout_proc7_evt_loop terminating...
734
+ [2025-06-11 19:40:15,022][258077] Component RolloutWorker_w5 stopped!
735
+ [2025-06-11 19:40:15,022][258077] Component RolloutWorker_w7 stopped!
736
+ [2025-06-11 19:40:15,022][258296] Weights refcount: 2 0
737
+ [2025-06-11 19:40:15,023][258077] Component RolloutWorker_w6 stopped!
738
+ [2025-06-11 19:40:15,023][258319] Stopping RolloutWorker_w6...
739
+ [2025-06-11 19:40:15,023][258319] Loop rollout_proc6_evt_loop terminating...
740
+ [2025-06-11 19:40:15,023][258296] Stopping InferenceWorker_p0-w0...
741
+ [2025-06-11 19:40:15,024][258077] Component InferenceWorker_p0-w0 stopped!
742
+ [2025-06-11 19:40:15,024][258296] Loop inference_proc0-0_evt_loop terminating...
743
+ [2025-06-11 19:40:15,026][258077] Component RolloutWorker_w1 stopped!
744
+ [2025-06-11 19:40:15,026][258313] Stopping RolloutWorker_w1...
745
+ [2025-06-11 19:40:15,027][258313] Loop rollout_proc1_evt_loop terminating...
746
+ [2025-06-11 19:40:15,027][258077] Component RolloutWorker_w2 stopped!
747
+ [2025-06-11 19:40:15,027][258315] Stopping RolloutWorker_w2...
748
+ [2025-06-11 19:40:15,028][258315] Loop rollout_proc2_evt_loop terminating...
749
+ [2025-06-11 19:40:15,039][258224] Saving /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/checkpoint_p0/checkpoint_000000978_4005888.pth...
750
+ [2025-06-11 19:40:15,115][258224] Stopping LearnerWorker_p0...
751
+ [2025-06-11 19:40:15,115][258224] Loop learner_proc0_evt_loop terminating...
752
+ [2025-06-11 19:40:15,115][258077] Component LearnerWorker_p0 stopped!
753
+ [2025-06-11 19:40:15,116][258077] Waiting for process learner_proc0 to stop...
754
+ [2025-06-11 19:40:15,954][258077] Waiting for process inference_proc0-0 to join...
755
+ [2025-06-11 19:40:15,956][258077] Waiting for process rollout_proc0 to join...
756
+ [2025-06-11 19:40:15,956][258077] Waiting for process rollout_proc1 to join...
757
+ [2025-06-11 19:40:15,956][258077] Waiting for process rollout_proc2 to join...
758
+ [2025-06-11 19:40:15,957][258077] Waiting for process rollout_proc3 to join...
759
+ [2025-06-11 19:40:15,957][258077] Waiting for process rollout_proc4 to join...
760
+ [2025-06-11 19:40:15,957][258077] Waiting for process rollout_proc5 to join...
761
+ [2025-06-11 19:40:15,957][258077] Waiting for process rollout_proc6 to join...
762
+ [2025-06-11 19:40:15,957][258077] Waiting for process rollout_proc7 to join...
763
+ [2025-06-11 19:40:15,957][258077] Batcher 0 profile tree view:
764
+ batching: 3.2483, releasing_batches: 0.0113
765
+ [2025-06-11 19:40:15,957][258077] InferenceWorker_p0-w0 profile tree view:
766
+ wait_policy: 0.0000
767
+ wait_policy_total: 1.4908
768
+ update_model: 1.2357
769
+ weight_update: 0.0008
770
+ one_step: 0.0025
771
+ handle_policy_step: 71.6643
772
+ deserialize: 2.7464, stack: 0.4336, obs_to_device_normalize: 17.1179, forward: 38.1951, send_messages: 3.2479
773
+ prepare_outputs: 7.3213
774
+ to_cpu: 4.4621
775
+ [2025-06-11 19:40:15,957][258077] Learner 0 profile tree view:
776
+ misc: 0.0024, prepare_batch: 4.8330
777
+ train: 11.1145
778
+ epoch_init: 0.0016, minibatch_init: 0.0018, losses_postprocess: 0.0715, kl_divergence: 0.0804, after_optimizer: 0.2072
779
+ calculate_losses: 4.2003
780
+ losses_init: 0.0008, forward_head: 0.3476, bptt_initial: 3.0037, tail: 0.1751, advantages_returns: 0.0468, losses: 0.2745
781
+ bptt: 0.2993
782
+ bptt_forward_core: 0.2866
783
+ update: 6.4357
784
+ clip: 0.2142
785
+ [2025-06-11 19:40:15,958][258077] RolloutWorker_w0 profile tree view:
786
+ wait_for_trajectories: 0.0366, enqueue_policy_requests: 2.3944, env_step: 33.6083, overhead: 2.9035, complete_rollouts: 0.1489
787
+ save_policy_outputs: 2.3112
788
+ split_output_tensors: 1.1337
789
+ [2025-06-11 19:40:15,958][258077] RolloutWorker_w7 profile tree view:
790
+ wait_for_trajectories: 0.0380, enqueue_policy_requests: 2.4850, env_step: 33.2989, overhead: 2.8466, complete_rollouts: 0.1510
791
+ save_policy_outputs: 2.3348
792
+ split_output_tensors: 1.1538
793
+ [2025-06-11 19:40:15,958][258077] Loop Runner_EvtLoop terminating...
794
+ [2025-06-11 19:40:15,958][258077] Runner profile tree view:
795
+ main_loop: 82.1689
796
+ [2025-06-11 19:40:15,958][258077] Collected {0: 4005888}, FPS: 24326.1
797
+ [2025-06-11 19:40:15,963][258077] Loading existing experiment configuration from /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/config.json
798
+ [2025-06-11 19:40:15,963][258077] Overriding arg 'num_workers' with value 1 passed from command line
799
+ [2025-06-11 19:40:15,963][258077] Adding new argument 'no_render'=True that is not in the saved config file!
800
+ [2025-06-11 19:40:15,963][258077] Adding new argument 'save_video'=True that is not in the saved config file!
801
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
802
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'video_name'=None that is not in the saved config file!
803
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
804
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
805
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'push_to_hub'=True that is not in the saved config file!
806
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'hf_repository'='PranayPalem/vizdoom_laptop_optimized' that is not in the saved config file!
807
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'policy_index'=0 that is not in the saved config file!
808
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
809
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'train_script'=None that is not in the saved config file!
810
+ [2025-06-11 19:40:15,964][258077] Adding new argument 'enjoy_script'=None that is not in the saved config file!
811
+ [2025-06-11 19:40:15,964][258077] Using frameskip 1 and render_action_repeat=4 for evaluation
812
+ [2025-06-11 19:40:15,981][258077] Doom resolution: 160x120, resize resolution: (128, 72)
813
+ [2025-06-11 19:40:15,983][258077] RunningMeanStd input shape: (3, 72, 128)
814
+ [2025-06-11 19:40:15,983][258077] RunningMeanStd input shape: (1,)
815
+ [2025-06-11 19:40:15,992][258077] ConvEncoder: input_channels=3
816
+ [2025-06-11 19:40:16,043][258077] Conv encoder output size: 512
817
+ [2025-06-11 19:40:16,043][258077] Policy head output size: 512
818
+ [2025-06-11 19:40:16,228][258077] Loading state from checkpoint /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/checkpoint_p0/checkpoint_000000978_4005888.pth...
819
+ [2025-06-11 19:40:16,648][258077] Num frames 100...
820
+ [2025-06-11 19:40:16,711][258077] Num frames 200...
821
+ [2025-06-11 19:40:16,775][258077] Num frames 300...
822
+ [2025-06-11 19:40:16,842][258077] Num frames 400...
823
+ [2025-06-11 19:40:16,909][258077] Num frames 500...
824
+ [2025-06-11 19:40:16,973][258077] Num frames 600...
825
+ [2025-06-11 19:40:17,048][258077] Num frames 700...
826
+ [2025-06-11 19:40:17,111][258077] Num frames 800...
827
+ [2025-06-11 19:40:17,180][258077] Num frames 900...
828
+ [2025-06-11 19:40:17,246][258077] Num frames 1000...
829
+ [2025-06-11 19:40:17,312][258077] Num frames 1100...
830
+ [2025-06-11 19:40:17,381][258077] Num frames 1200...
831
+ [2025-06-11 19:40:17,450][258077] Num frames 1300...
832
+ [2025-06-11 19:40:17,518][258077] Num frames 1400...
833
+ [2025-06-11 19:40:17,587][258077] Num frames 1500...
834
+ [2025-06-11 19:40:17,688][258077] Avg episode rewards: #0: 34.680, true rewards: #0: 15.680
835
+ [2025-06-11 19:40:17,689][258077] Avg episode reward: 34.680, avg true_objective: 15.680
836
+ [2025-06-11 19:40:17,715][258077] Num frames 1600...
837
+ [2025-06-11 19:40:17,779][258077] Num frames 1700...
838
+ [2025-06-11 19:40:17,840][258077] Num frames 1800...
839
+ [2025-06-11 19:40:17,901][258077] Num frames 1900...
840
+ [2025-06-11 19:40:17,963][258077] Num frames 2000...
841
+ [2025-06-11 19:40:18,027][258077] Num frames 2100...
842
+ [2025-06-11 19:40:18,091][258077] Num frames 2200...
843
+ [2025-06-11 19:40:18,153][258077] Num frames 2300...
844
+ [2025-06-11 19:40:18,215][258077] Num frames 2400...
845
+ [2025-06-11 19:40:18,287][258077] Num frames 2500...
846
+ [2025-06-11 19:40:18,398][258077] Avg episode rewards: #0: 27.960, true rewards: #0: 12.960
847
+ [2025-06-11 19:40:18,398][258077] Avg episode reward: 27.960, avg true_objective: 12.960
848
+ [2025-06-11 19:40:18,404][258077] Num frames 2600...
849
+ [2025-06-11 19:40:18,465][258077] Num frames 2700...
850
+ [2025-06-11 19:40:18,528][258077] Num frames 2800...
851
+ [2025-06-11 19:40:18,591][258077] Num frames 2900...
852
+ [2025-06-11 19:40:18,652][258077] Num frames 3000...
853
+ [2025-06-11 19:40:18,715][258077] Num frames 3100...
854
+ [2025-06-11 19:40:18,779][258077] Num frames 3200...
855
+ [2025-06-11 19:40:18,831][258077] Avg episode rewards: #0: 22.000, true rewards: #0: 10.667
856
+ [2025-06-11 19:40:18,831][258077] Avg episode reward: 22.000, avg true_objective: 10.667
857
+ [2025-06-11 19:40:18,894][258077] Num frames 3300...
858
+ [2025-06-11 19:40:18,956][258077] Num frames 3400...
859
+ [2025-06-11 19:40:19,021][258077] Num frames 3500...
860
+ [2025-06-11 19:40:19,083][258077] Num frames 3600...
861
+ [2025-06-11 19:40:19,147][258077] Num frames 3700...
862
+ [2025-06-11 19:40:19,215][258077] Num frames 3800...
863
+ [2025-06-11 19:40:19,316][258077] Avg episode rewards: #0: 20.180, true rewards: #0: 9.680
864
+ [2025-06-11 19:40:19,316][258077] Avg episode reward: 20.180, avg true_objective: 9.680
865
+ [2025-06-11 19:40:19,334][258077] Num frames 3900...
866
+ [2025-06-11 19:40:19,390][258077] Num frames 4000...
867
+ [2025-06-11 19:40:19,455][258077] Num frames 4100...
868
+ [2025-06-11 19:40:19,526][258077] Num frames 4200...
869
+ [2025-06-11 19:40:19,589][258077] Num frames 4300...
870
+ [2025-06-11 19:40:19,650][258077] Num frames 4400...
871
+ [2025-06-11 19:40:19,734][258077] Avg episode rewards: #0: 18.696, true rewards: #0: 8.896
872
+ [2025-06-11 19:40:19,735][258077] Avg episode reward: 18.696, avg true_objective: 8.896
873
+ [2025-06-11 19:40:19,770][258077] Num frames 4500...
874
+ [2025-06-11 19:40:19,830][258077] Num frames 4600...
875
+ [2025-06-11 19:40:19,897][258077] Num frames 4700...
876
+ [2025-06-11 19:40:19,960][258077] Num frames 4800...
877
+ [2025-06-11 19:40:20,023][258077] Num frames 4900...
878
+ [2025-06-11 19:40:20,085][258077] Num frames 5000...
879
+ [2025-06-11 19:40:20,149][258077] Num frames 5100...
880
+ [2025-06-11 19:40:20,210][258077] Num frames 5200...
881
+ [2025-06-11 19:40:20,273][258077] Num frames 5300...
882
+ [2025-06-11 19:40:20,334][258077] Num frames 5400...
883
+ [2025-06-11 19:40:20,395][258077] Num frames 5500...
884
+ [2025-06-11 19:40:20,469][258077] Num frames 5600...
885
+ [2025-06-11 19:40:20,532][258077] Num frames 5700...
886
+ [2025-06-11 19:40:20,595][258077] Num frames 5800...
887
+ [2025-06-11 19:40:20,657][258077] Num frames 5900...
888
+ [2025-06-11 19:40:20,718][258077] Num frames 6000...
889
+ [2025-06-11 19:40:20,780][258077] Num frames 6100...
890
+ [2025-06-11 19:40:20,844][258077] Num frames 6200...
891
+ [2025-06-11 19:40:20,907][258077] Num frames 6300...
892
+ [2025-06-11 19:40:20,968][258077] Num frames 6400...
893
+ [2025-06-11 19:40:21,029][258077] Num frames 6500...
894
+ [2025-06-11 19:40:21,112][258077] Avg episode rewards: #0: 26.413, true rewards: #0: 10.913
895
+ [2025-06-11 19:40:21,112][258077] Avg episode reward: 26.413, avg true_objective: 10.913
896
+ [2025-06-11 19:40:21,147][258077] Num frames 6600...
897
+ [2025-06-11 19:40:21,209][258077] Num frames 6700...
898
+ [2025-06-11 19:40:21,268][258077] Num frames 6800...
899
+ [2025-06-11 19:40:21,329][258077] Num frames 6900...
900
+ [2025-06-11 19:40:21,391][258077] Num frames 7000...
901
+ [2025-06-11 19:40:21,453][258077] Num frames 7100...
902
+ [2025-06-11 19:40:21,515][258077] Num frames 7200...
903
+ [2025-06-11 19:40:21,583][258077] Num frames 7300...
904
+ [2025-06-11 19:40:21,646][258077] Num frames 7400...
905
+ [2025-06-11 19:40:21,715][258077] Num frames 7500...
906
+ [2025-06-11 19:40:21,780][258077] Num frames 7600...
907
+ [2025-06-11 19:40:21,843][258077] Num frames 7700...
908
+ [2025-06-11 19:40:21,905][258077] Num frames 7800...
909
+ [2025-06-11 19:40:21,968][258077] Num frames 7900...
910
+ [2025-06-11 19:40:22,033][258077] Num frames 8000...
911
+ [2025-06-11 19:40:22,094][258077] Num frames 8100...
912
+ [2025-06-11 19:40:22,159][258077] Num frames 8200...
913
+ [2025-06-11 19:40:22,262][258077] Avg episode rewards: #0: 28.823, true rewards: #0: 11.823
914
+ [2025-06-11 19:40:22,263][258077] Avg episode reward: 28.823, avg true_objective: 11.823
915
+ [2025-06-11 19:40:22,279][258077] Num frames 8300...
916
+ [2025-06-11 19:40:22,339][258077] Num frames 8400...
917
+ [2025-06-11 19:40:22,402][258077] Num frames 8500...
918
+ [2025-06-11 19:40:22,463][258077] Num frames 8600...
919
+ [2025-06-11 19:40:22,524][258077] Num frames 8700...
920
+ [2025-06-11 19:40:22,585][258077] Num frames 8800...
921
+ [2025-06-11 19:40:22,646][258077] Num frames 8900...
922
+ [2025-06-11 19:40:22,709][258077] Num frames 9000...
923
+ [2025-06-11 19:40:22,770][258077] Num frames 9100...
924
+ [2025-06-11 19:40:22,832][258077] Num frames 9200...
925
+ [2025-06-11 19:40:22,936][258077] Avg episode rewards: #0: 28.102, true rewards: #0: 11.602
926
+ [2025-06-11 19:40:22,937][258077] Avg episode reward: 28.102, avg true_objective: 11.602
927
+ [2025-06-11 19:40:22,948][258077] Num frames 9300...
928
+ [2025-06-11 19:40:23,011][258077] Num frames 9400...
929
+ [2025-06-11 19:40:23,074][258077] Num frames 9500...
930
+ [2025-06-11 19:40:23,136][258077] Num frames 9600...
931
+ [2025-06-11 19:40:23,199][258077] Num frames 9700...
932
+ [2025-06-11 19:40:23,262][258077] Num frames 9800...
933
+ [2025-06-11 19:40:23,326][258077] Num frames 9900...
934
+ [2025-06-11 19:40:23,390][258077] Num frames 10000...
935
+ [2025-06-11 19:40:23,454][258077] Num frames 10100...
936
+ [2025-06-11 19:40:23,516][258077] Num frames 10200...
937
+ [2025-06-11 19:40:23,578][258077] Num frames 10300...
938
+ [2025-06-11 19:40:23,642][258077] Num frames 10400...
939
+ [2025-06-11 19:40:23,706][258077] Num frames 10500...
940
+ [2025-06-11 19:40:23,771][258077] Num frames 10600...
941
+ [2025-06-11 19:40:23,843][258077] Num frames 10700...
942
+ [2025-06-11 19:40:23,904][258077] Num frames 10800...
943
+ [2025-06-11 19:40:23,968][258077] Num frames 10900...
944
+ [2025-06-11 19:40:24,033][258077] Num frames 11000...
945
+ [2025-06-11 19:40:24,095][258077] Num frames 11100...
946
+ [2025-06-11 19:40:24,158][258077] Num frames 11200...
947
+ [2025-06-11 19:40:24,219][258077] Num frames 11300...
948
+ [2025-06-11 19:40:24,325][258077] Avg episode rewards: #0: 31.869, true rewards: #0: 12.647
949
+ [2025-06-11 19:40:24,325][258077] Avg episode reward: 31.869, avg true_objective: 12.647
950
+ [2025-06-11 19:40:24,336][258077] Num frames 11400...
951
+ [2025-06-11 19:40:24,400][258077] Num frames 11500...
952
+ [2025-06-11 19:40:24,462][258077] Num frames 11600...
953
+ [2025-06-11 19:40:24,522][258077] Num frames 11700...
954
+ [2025-06-11 19:40:24,583][258077] Num frames 11800...
955
+ [2025-06-11 19:40:24,645][258077] Num frames 11900...
956
+ [2025-06-11 19:40:24,709][258077] Num frames 12000...
957
+ [2025-06-11 19:40:24,771][258077] Num frames 12100...
958
+ [2025-06-11 19:40:24,834][258077] Num frames 12200...
959
+ [2025-06-11 19:40:24,939][258077] Avg episode rewards: #0: 30.584, true rewards: #0: 12.284
960
+ [2025-06-11 19:40:24,940][258077] Avg episode reward: 30.584, avg true_objective: 12.284
961
+ [2025-06-11 19:40:37,835][258077] Replay video saved to /home/pranaypalem/Documents/Reinforcement_Learning/RL_Testing_Pranay/DoomHealth/train_dir/vizdoom_laptop_optimized/replay.mp4!