Piramid / run_logs /timers.json
jonalkw's picture
First commit
adbbb8f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4753119945526123,
"min": 0.4753119945526123,
"max": 1.5220084190368652,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14198.51953125,
"min": 14198.51953125,
"max": 46171.6484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989893.0,
"min": 29937.0,
"max": 989893.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989893.0,
"min": 29937.0,
"max": 989893.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.43835026025772095,
"min": -0.24129898846149445,
"max": 0.5278897881507874,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 117.47786712646484,
"min": -57.18785858154297,
"max": 146.2254638671875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.11484821885824203,
"min": -0.10131924599409103,
"max": 0.20836420357227325,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 30.77932357788086,
"min": -28.065431594848633,
"max": 50.21577453613281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06938125960119114,
"min": 0.06618764120535939,
"max": 0.07324900247161568,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.971337634416676,
"min": 0.4791909821633244,
"max": 1.0603118833775322,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017872745063578506,
"min": 0.00015113684035291273,
"max": 0.017872745063578506,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2502184308900991,
"min": 0.0018136420842349529,
"max": 0.2502184308900991,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.615940318528572e-06,
"min": 7.615940318528572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010662316445940001,
"min": 0.00010662316445940001,
"max": 0.0032240113253296,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253861428571429,
"min": 0.10253861428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355406,
"min": 1.3886848,
"max": 2.3464632000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026360756714285716,
"min": 0.00026360756714285716,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00369050594,
"min": 0.00369050594,
"max": 0.10747957296000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012073351070284843,
"min": 0.011469056829810143,
"max": 0.37238484621047974,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1690269112586975,
"min": 0.1605667918920517,
"max": 2.606693983078003,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 441.1060606060606,
"min": 363.39506172839504,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29113.0,
"min": 16864.0,
"max": 33052.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3770302777940577,
"min": -0.9999806972280625,
"max": 1.5357438846513993,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 90.8839983344078,
"min": -31.998801663517952,
"max": 125.93099854141474,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3770302777940577,
"min": -0.9999806972280625,
"max": 1.5357438846513993,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 90.8839983344078,
"min": -31.998801663517952,
"max": 125.93099854141474,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05481062834681352,
"min": 0.04805379930142563,
"max": 7.493379457908518,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6175014708896924,
"min": 3.4257452879974153,
"max": 127.38745078444481,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691800563",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691802952"
},
"total": 2389.097337874,
"count": 1,
"self": 0.47545305299991014,
"children": {
"run_training.setup": {
"total": 0.04024069599995528,
"count": 1,
"self": 0.04024069599995528
},
"TrainerController.start_learning": {
"total": 2388.581644125,
"count": 1,
"self": 1.598986452004283,
"children": {
"TrainerController._reset_env": {
"total": 4.21852517100001,
"count": 1,
"self": 4.21852517100001
},
"TrainerController.advance": {
"total": 2382.6689137779954,
"count": 63570,
"self": 1.6430769799753762,
"children": {
"env_step": {
"total": 1666.2673258249968,
"count": 63570,
"self": 1543.5690723430703,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.7312342589887,
"count": 63570,
"self": 5.176565853921829,
"children": {
"TorchPolicy.evaluate": {
"total": 116.55466840506688,
"count": 62566,
"self": 116.55466840506688
}
}
},
"workers": {
"total": 0.967019222937779,
"count": 63570,
"self": 0.0,
"children": {
"worker_root": {
"total": 2382.7970181939913,
"count": 63570,
"is_parallel": true,
"self": 966.5962653240078,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017950869998912822,
"count": 1,
"is_parallel": true,
"self": 0.0005462480000915093,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012488389997997729,
"count": 8,
"is_parallel": true,
"self": 0.0012488389997997729
}
}
},
"UnityEnvironment.step": {
"total": 0.05114475300001686,
"count": 1,
"is_parallel": true,
"self": 0.000626937000106409,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005510510000021895,
"count": 1,
"is_parallel": true,
"self": 0.0005510510000021895
},
"communicator.exchange": {
"total": 0.047857669999984864,
"count": 1,
"is_parallel": true,
"self": 0.047857669999984864
},
"steps_from_proto": {
"total": 0.0021090949999234,
"count": 1,
"is_parallel": true,
"self": 0.0004359189997558133,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016731760001675866,
"count": 8,
"is_parallel": true,
"self": 0.0016731760001675866
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1416.2007528699835,
"count": 63569,
"is_parallel": true,
"self": 36.691256947886814,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.3596872190235,
"count": 63569,
"is_parallel": true,
"self": 24.3596872190235
},
"communicator.exchange": {
"total": 1240.4667821130604,
"count": 63569,
"is_parallel": true,
"self": 1240.4667821130604
},
"steps_from_proto": {
"total": 114.68302659001279,
"count": 63569,
"is_parallel": true,
"self": 22.693708038008026,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.98931855200476,
"count": 508552,
"is_parallel": true,
"self": 91.98931855200476
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 714.7585109730232,
"count": 63570,
"self": 2.8526894390022335,
"children": {
"process_trajectory": {
"total": 121.1139916920265,
"count": 63570,
"self": 120.90221003402667,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21178165799983617,
"count": 2,
"self": 0.21178165799983617
}
}
},
"_update_policy": {
"total": 590.7918298419945,
"count": 446,
"self": 384.56982397698516,
"children": {
"TorchPPOOptimizer.update": {
"total": 206.22200586500935,
"count": 22851,
"self": 206.22200586500935
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.780001164472196e-07,
"count": 1,
"self": 9.780001164472196e-07
},
"TrainerController._save_models": {
"total": 0.0952177459998893,
"count": 1,
"self": 0.0015020899995761283,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09371565600031317,
"count": 1,
"self": 0.09371565600031317
}
}
}
}
}
}
}