ppo-pyramids / run_logs /timers.json
anthonyx's picture
zzz
0bc2e15
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8067610859870911,
"min": 0.8067610859870911,
"max": 1.4487583637237549,
"count": 4
},
"Pyramids.Policy.Entropy.sum": {
"value": 24409.36328125,
"min": 24409.36328125,
"max": 43949.53515625,
"count": 4
},
"Pyramids.Step.mean": {
"value": 119932.0,
"min": 29920.0,
"max": 119932.0,
"count": 4
},
"Pyramids.Step.sum": {
"value": 119932.0,
"min": 29920.0,
"max": 119932.0,
"count": 4
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07677312195301056,
"min": -0.16037100553512573,
"max": -0.05409104377031326,
"count": 4
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -18.502323150634766,
"min": -38.16830062866211,
"max": -12.981850624084473,
"count": 4
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.09420567750930786,
"min": 0.09420567750930786,
"max": 0.25200021266937256,
"count": 4
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 22.703567504882812,
"min": 22.703567504882812,
"max": 59.976051330566406,
"count": 4
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06633208006348082,
"min": 0.06633208006348082,
"max": 0.07040727484245689,
"count": 4
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8623170408252506,
"min": 0.5398470783203401,
"max": 0.8623170408252506,
"count": 4
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.002476732693704179,
"min": 0.0012567250570238602,
"max": 0.006855768857662795,
"count": 4
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.03219752501815432,
"min": 0.013823975627262463,
"max": 0.05484615086130236,
"count": 4
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.95589329675641e-05,
"min": 3.95589329675641e-05,
"max": 0.0002564093895302083,
"count": 4
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0005142661285783333,
"min": 0.0005142661285783333,
"max": 0.0020873604042133333,
"count": 4
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.11318628205128206,
"min": 0.11318628205128206,
"max": 0.1854697916666667,
"count": 4
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4714216666666668,
"min": 1.4714216666666668,
"max": 1.795786666666667,
"count": 4
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0013273095769230773,
"min": 0.0013273095769230773,
"max": 0.0085484321875,
"count": 4
},
"Pyramids.Policy.Beta.sum": {
"value": 0.017255024500000004,
"min": 0.017255024500000004,
"max": 0.06961908800000001,
"count": 4
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.07761820405721664,
"min": 0.07761820405721664,
"max": 0.42689937353134155,
"count": 4
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.009036660194397,
"min": 1.009036660194397,
"max": 3.4151949882507324,
"count": 4
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 909.8529411764706,
"min": 909.8529411764706,
"max": 999.0,
"count": 4
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30935.0,
"min": 18253.0,
"max": 31722.0,
"count": 4
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.6164647544569829,
"min": -0.9999226324500576,
"max": -0.6164647544569829,
"count": 4
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -20.95980165153742,
"min": -30.997601605951786,
"max": -12.269001007080078,
"count": 4
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.6164647544569829,
"min": -0.9999226324500576,
"max": -0.6164647544569829,
"count": 4
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -20.95980165153742,
"min": -30.997601605951786,
"max": -12.269001007080078,
"count": 4
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.7570985329940039,
"min": 0.7570985329940039,
"max": 8.402276820478667,
"count": 4
},
"Pyramids.Policy.RndReward.sum": {
"value": 25.74135012179613,
"min": 25.74135012179613,
"max": 159.64325958909467,
"count": 4
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679812995",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679813227"
},
"total": 232.41987493599936,
"count": 1,
"self": 0.4393240589997731,
"children": {
"run_training.setup": {
"total": 0.10581023299982917,
"count": 1,
"self": 0.10581023299982917
},
"TrainerController.start_learning": {
"total": 231.87474064399976,
"count": 1,
"self": 0.14862824799547525,
"children": {
"TrainerController._reset_env": {
"total": 5.866247106000628,
"count": 1,
"self": 5.866247106000628
},
"TrainerController.advance": {
"total": 225.74433102800322,
"count": 7560,
"self": 0.1602690290501414,
"children": {
"env_step": {
"total": 150.43818239299526,
"count": 7560,
"self": 137.5409482530049,
"children": {
"SubprocessEnvManager._take_step": {
"total": 12.802475685978607,
"count": 7560,
"self": 0.5563037989759323,
"children": {
"TorchPolicy.evaluate": {
"total": 12.246171887002674,
"count": 7536,
"self": 12.246171887002674
}
}
},
"workers": {
"total": 0.0947584540117532,
"count": 7560,
"self": 0.0,
"children": {
"worker_root": {
"total": 231.32033019599749,
"count": 7560,
"is_parallel": true,
"self": 107.00859760601907,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020798989999093465,
"count": 1,
"is_parallel": true,
"self": 0.0006237529987629387,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014561460011464078,
"count": 8,
"is_parallel": true,
"self": 0.0014561460011464078
}
}
},
"UnityEnvironment.step": {
"total": 0.04892549599935592,
"count": 1,
"is_parallel": true,
"self": 0.0005757109993282938,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005213349995756289,
"count": 1,
"is_parallel": true,
"self": 0.0005213349995756289
},
"communicator.exchange": {
"total": 0.045852938999814796,
"count": 1,
"is_parallel": true,
"self": 0.045852938999814796
},
"steps_from_proto": {
"total": 0.001975511000637198,
"count": 1,
"is_parallel": true,
"self": 0.00042802699954336276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001547484001093835,
"count": 8,
"is_parallel": true,
"self": 0.001547484001093835
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 124.31173258997842,
"count": 7559,
"is_parallel": true,
"self": 3.7390943469490594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.8823625990453365,
"count": 7559,
"is_parallel": true,
"self": 2.8823625990453365
},
"communicator.exchange": {
"total": 106.31924537299165,
"count": 7559,
"is_parallel": true,
"self": 106.31924537299165
},
"steps_from_proto": {
"total": 11.371030270992378,
"count": 7559,
"is_parallel": true,
"self": 2.421572046003348,
"children": {
"_process_rank_one_or_two_observation": {
"total": 8.94945822498903,
"count": 60472,
"is_parallel": true,
"self": 8.94945822498903
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 75.14587960595782,
"count": 7560,
"self": 0.21005456297734781,
"children": {
"process_trajectory": {
"total": 13.944530994982415,
"count": 7560,
"self": 13.944530994982415
},
"_update_policy": {
"total": 60.99129404799805,
"count": 44,
"self": 39.29968475802525,
"children": {
"TorchPPOOptimizer.update": {
"total": 21.691609289972803,
"count": 2715,
"self": 21.691609289972803
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.1600031737471e-07,
"count": 1,
"self": 9.1600031737471e-07
},
"TrainerController._save_models": {
"total": 0.11553334600012022,
"count": 1,
"self": 0.0014134459997876547,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11411990000033256,
"count": 1,
"self": 0.11411990000033256
}
}
}
}
}
}
}