PyramidsRND / run_logs /timers.json
Nfanlo's picture
First Push
e00a693 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.40420493483543396,
"min": 0.39900603890419006,
"max": 0.4830927848815918,
"count": 7
},
"Pyramids.Policy.Entropy.sum": {
"value": 12132.615234375,
"min": 12021.25390625,
"max": 14948.8232421875,
"count": 7
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 392.73417721518985,
"min": 364.92537313432837,
"max": 548.280701754386,
"count": 7
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31026.0,
"min": 24450.0,
"max": 31714.0,
"count": 7
},
"Pyramids.Step.mean": {
"value": 989880.0,
"min": 809998.0,
"max": 989880.0,
"count": 7
},
"Pyramids.Step.sum": {
"value": 989880.0,
"min": 809998.0,
"max": 989880.0,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.44328415393829346,
"min": 0.30201780796051025,
"max": 0.44328415393829346,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 121.01657104492188,
"min": 77.61857604980469,
"max": 121.01657104492188,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.005800439510494471,
"min": -0.011447438970208168,
"max": 0.02458082139492035,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.5835199356079102,
"min": -3.1365983486175537,
"max": 6.464756011962891,
"count": 7
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5313063112995293,
"min": 1.0754447967327874,
"max": 1.5454865443839956,
"count": 7
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 120.97319859266281,
"min": 62.37579821050167,
"max": 120.97319859266281,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5313063112995293,
"min": 1.0754447967327874,
"max": 1.5454865443839956,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 120.97319859266281,
"min": 62.37579821050167,
"max": 120.97319859266281,
"count": 7
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.043718275764391204,
"min": 0.043718275764391204,
"max": 0.06089715182867527,
"count": 7
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.453743785386905,
"min": 3.2254289740812965,
"max": 3.7782387666520663,
"count": 7
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06900275927196785,
"min": 0.06511649959083157,
"max": 0.07208056387311193,
"count": 7
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0350413890795178,
"min": 0.9116309942716421,
"max": 1.0350413890795178,
"count": 7
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015598284871126465,
"min": 0.012546473937358573,
"max": 0.015826248938507332,
"count": 7
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23397427306689697,
"min": 0.17565063512302004,
"max": 0.23397427306689697,
"count": 7
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.495737501453336e-06,
"min": 7.495737501453336e-06,
"max": 6.120948673972143e-05,
"count": 7
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011243606252180004,
"min": 0.00011243606252180004,
"max": 0.0008569328143561002,
"count": 7
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249854666666666,
"min": 0.10249854666666666,
"max": 0.12040313571428572,
"count": 7
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5374782,
"min": 1.4780311,
"max": 1.6856439,
"count": 7
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025960481200000015,
"min": 0.00025960481200000015,
"max": 0.002048273257857144,
"count": 7
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038940721800000025,
"min": 0.0038940721800000025,
"max": 0.02867582561000001,
"count": 7
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01057474222034216,
"min": 0.01057474222034216,
"max": 0.01273829024285078,
"count": 7
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15862113237380981,
"min": 0.1546945869922638,
"max": 0.17833606898784637,
"count": 7
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739039481",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739040012"
},
"total": 531.513850113,
"count": 1,
"self": 0.48228738699936,
"children": {
"run_training.setup": {
"total": 0.055961961000321025,
"count": 1,
"self": 0.055961961000321025
},
"TrainerController.start_learning": {
"total": 530.9756007650003,
"count": 1,
"self": 0.3862626329973864,
"children": {
"TrainerController._reset_env": {
"total": 3.985030438999729,
"count": 1,
"self": 3.985030438999729
},
"TrainerController.advance": {
"total": 526.5141532140037,
"count": 14201,
"self": 0.37557371399998374,
"children": {
"env_step": {
"total": 372.5937489519779,
"count": 14201,
"self": 334.7986020489625,
"children": {
"SubprocessEnvManager._take_step": {
"total": 37.57660660101101,
"count": 14201,
"self": 1.1129458040259124,
"children": {
"TorchPolicy.evaluate": {
"total": 36.463660796985096,
"count": 13789,
"self": 36.463660796985096
}
}
},
"workers": {
"total": 0.218540302004385,
"count": 14201,
"self": 0.0,
"children": {
"worker_root": {
"total": 529.4517286559958,
"count": 14201,
"is_parallel": true,
"self": 222.94533792194898,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024672900003679388,
"count": 1,
"is_parallel": true,
"self": 0.0007143660004658159,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017529239999021229,
"count": 8,
"is_parallel": true,
"self": 0.0017529239999021229
}
}
},
"UnityEnvironment.step": {
"total": 0.049989174000074854,
"count": 1,
"is_parallel": true,
"self": 0.0005364729995562811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046456600011879345,
"count": 1,
"is_parallel": true,
"self": 0.00046456600011879345
},
"communicator.exchange": {
"total": 0.047418617000403174,
"count": 1,
"is_parallel": true,
"self": 0.047418617000403174
},
"steps_from_proto": {
"total": 0.0015695179999966058,
"count": 1,
"is_parallel": true,
"self": 0.00033694400099193444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012325739990046714,
"count": 8,
"is_parallel": true,
"self": 0.0012325739990046714
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 306.5063907340468,
"count": 14200,
"is_parallel": true,
"self": 7.282236440089491,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.133130172991969,
"count": 14200,
"is_parallel": true,
"self": 5.133130172991969
},
"communicator.exchange": {
"total": 272.3355497059879,
"count": 14200,
"is_parallel": true,
"self": 272.3355497059879
},
"steps_from_proto": {
"total": 21.75547441497747,
"count": 14200,
"is_parallel": true,
"self": 4.6582729780766385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 17.097201436900832,
"count": 113600,
"is_parallel": true,
"self": 17.097201436900832
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 153.54483054802586,
"count": 14201,
"self": 0.781521444045211,
"children": {
"process_trajectory": {
"total": 29.707514030981656,
"count": 14201,
"self": 29.543071906981822,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1644421239998337,
"count": 1,
"self": 0.1644421239998337
}
}
},
"_update_policy": {
"total": 123.05579507299899,
"count": 103,
"self": 67.80614840899989,
"children": {
"TorchPPOOptimizer.update": {
"total": 55.2496466639991,
"count": 4956,
"self": 55.2496466639991
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1069996617152356e-06,
"count": 1,
"self": 1.1069996617152356e-06
},
"TrainerController._save_models": {
"total": 0.09015337199980422,
"count": 1,
"self": 0.0020727119999719434,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08808065999983228,
"count": 1,
"self": 0.08808065999983228
}
}
}
}
}
}
}