Pyramid-ppo / run_logs /timers.json
armeiski's picture
Initial commit
7120867 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.32627230882644653,
"min": 0.310393750667572,
"max": 1.4600708484649658,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9709.8642578125,
"min": 9212.486328125,
"max": 44292.7109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5712558031082153,
"min": -0.04444899410009384,
"max": 0.6565734148025513,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 160.52288818359375,
"min": -10.712207794189453,
"max": 188.4365692138672,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.04985835775732994,
"min": -0.014220662415027618,
"max": 0.30632174015045166,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 14.010198593139648,
"min": -3.6262688636779785,
"max": 73.51721954345703,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06580059672641153,
"min": 0.06529236195363208,
"max": 0.07207587967457672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9870089508961729,
"min": 0.4868131455869666,
"max": 1.072660699253902,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017535264942691555,
"min": 0.002190011774054695,
"max": 0.018045665456318402,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.26302897414037335,
"min": 0.020826573471744558,
"max": 0.270684981844776,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.525017491693335e-06,
"min": 7.525017491693335e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011287526237540003,
"min": 0.00011287526237540003,
"max": 0.003757280847573099,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250830666666669,
"min": 0.10250830666666669,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376246000000002,
"min": 1.3886848,
"max": 2.6524269,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026057983600000017,
"min": 0.00026057983600000017,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003908697540000002,
"min": 0.003908697540000002,
"max": 0.12525744731000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010826599784195423,
"min": 0.010051325894892216,
"max": 0.4678378701210022,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16239899396896362,
"min": 0.14071856439113617,
"max": 3.27486515045166,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 293.8229166666667,
"min": 278.71,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28207.0,
"min": 15984.0,
"max": 33136.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6835443167035113,
"min": -1.0000000521540642,
"max": 1.7221088946455776,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 163.3037987202406,
"min": -26.99300181120634,
"max": 173.93299835920334,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6835443167035113,
"min": -1.0000000521540642,
"max": 1.7221088946455776,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 163.3037987202406,
"min": -26.99300181120634,
"max": 173.93299835920334,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03284699922549351,
"min": 0.031820773443271694,
"max": 9.464617356657982,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1861589248728706,
"min": 3.1861589248728706,
"max": 151.4338777065277,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756804276",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756806542"
},
"total": 2265.538490375,
"count": 1,
"self": 0.7097213980005108,
"children": {
"run_training.setup": {
"total": 0.02161192299990944,
"count": 1,
"self": 0.02161192299990944
},
"TrainerController.start_learning": {
"total": 2264.8071570539996,
"count": 1,
"self": 1.337865490157128,
"children": {
"TrainerController._reset_env": {
"total": 2.013143256999683,
"count": 1,
"self": 2.013143256999683
},
"TrainerController.advance": {
"total": 2261.342070297843,
"count": 64219,
"self": 1.321271180756412,
"children": {
"env_step": {
"total": 1610.0700564990575,
"count": 64219,
"self": 1463.793569982181,
"children": {
"SubprocessEnvManager._take_step": {
"total": 145.48931249898033,
"count": 64219,
"self": 4.5171162087735865,
"children": {
"TorchPolicy.evaluate": {
"total": 140.97219629020674,
"count": 62559,
"self": 140.97219629020674
}
}
},
"workers": {
"total": 0.7871740178961772,
"count": 64219,
"self": 0.0,
"children": {
"worker_root": {
"total": 2259.979646944008,
"count": 64219,
"is_parallel": true,
"self": 907.2696887760949,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017344709995086305,
"count": 1,
"is_parallel": true,
"self": 0.0005573899989030906,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011770810006055399,
"count": 8,
"is_parallel": true,
"self": 0.0011770810006055399
}
}
},
"UnityEnvironment.step": {
"total": 0.04652428699955635,
"count": 1,
"is_parallel": true,
"self": 0.0005137399984960211,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004665710002882406,
"count": 1,
"is_parallel": true,
"self": 0.0004665710002882406
},
"communicator.exchange": {
"total": 0.043987576000290574,
"count": 1,
"is_parallel": true,
"self": 0.043987576000290574
},
"steps_from_proto": {
"total": 0.001556400000481517,
"count": 1,
"is_parallel": true,
"self": 0.00034161600069637643,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012147839997851406,
"count": 8,
"is_parallel": true,
"self": 0.0012147839997851406
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1352.709958167913,
"count": 64218,
"is_parallel": true,
"self": 31.478607767657195,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.712420443089286,
"count": 64218,
"is_parallel": true,
"self": 22.712420443089286
},
"communicator.exchange": {
"total": 1202.8050685101434,
"count": 64218,
"is_parallel": true,
"self": 1202.8050685101434
},
"steps_from_proto": {
"total": 95.71386144702319,
"count": 64218,
"is_parallel": true,
"self": 18.906401823379383,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.80745962364381,
"count": 513744,
"is_parallel": true,
"self": 76.80745962364381
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 649.9507426180289,
"count": 64219,
"self": 2.6976431360044444,
"children": {
"process_trajectory": {
"total": 122.98024513103064,
"count": 64219,
"self": 122.7603045140313,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2199406169993381,
"count": 2,
"self": 0.2199406169993381
}
}
},
"_update_policy": {
"total": 524.2728543509938,
"count": 457,
"self": 293.6725083270221,
"children": {
"TorchPPOOptimizer.update": {
"total": 230.60034602397172,
"count": 22767,
"self": 230.60034602397172
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2150003385613672e-06,
"count": 1,
"self": 1.2150003385613672e-06
},
"TrainerController._save_models": {
"total": 0.11407679399962944,
"count": 1,
"self": 0.0016552679990127217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11242152600061672,
"count": 1,
"self": 0.11242152600061672
}
}
}
}
}
}
}