pyramids-unity / run_logs /timers.json
Manaro's picture
First Push
397699d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.45258909463882446,
"min": 0.43814757466316223,
"max": 1.4355192184448242,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13541.4658203125,
"min": 13022.3828125,
"max": 43547.91015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989962.0,
"min": 29932.0,
"max": 989962.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989962.0,
"min": 29932.0,
"max": 989962.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.44720879197120667,
"min": -0.06426848471164703,
"max": 0.5496165752410889,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 118.9575424194336,
"min": -15.42443561553955,
"max": 153.343017578125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.007702940609306097,
"min": -0.007702940609306097,
"max": 0.28451427817344666,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.0489821434020996,
"min": -2.0489821434020996,
"max": 68.56793975830078,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0669531777329255,
"min": 0.06418856106263504,
"max": 0.07315886364709254,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9373444882609571,
"min": 0.5799136728916079,
"max": 1.0841321698389947,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014745346955216104,
"min": 0.0009476999942720192,
"max": 0.016067235355795774,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20643485737302544,
"min": 0.013267799919808268,
"max": 0.2410085303369366,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.63610459752143e-06,
"min": 7.63610459752143e-06,
"max": 0.0002948427017191,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001069054643653,
"min": 0.0001069054643653,
"max": 0.0036329905890031995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254533571428572,
"min": 0.10254533571428572,
"max": 0.19828090000000004,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356347,
"min": 1.4356347,
"max": 2.6109967999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026427903785714295,
"min": 0.00026427903785714295,
"max": 0.00982826191,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036999065300000016,
"min": 0.0036999065300000016,
"max": 0.12111858032,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010372297838330269,
"min": 0.010372297838330269,
"max": 0.5342194437980652,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14521217346191406,
"min": 0.14521217346191406,
"max": 4.2737555503845215,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 440.768115942029,
"min": 338.7375,
"max": 991.96875,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30413.0,
"min": 16858.0,
"max": 32427.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4432666468015616,
"min": -0.867806299822405,
"max": 1.584131635631187,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 99.58539862930775,
"min": -27.76980159431696,
"max": 129.66739765554667,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4432666468015616,
"min": -0.867806299822405,
"max": 1.584131635631187,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 99.58539862930775,
"min": -27.76980159431696,
"max": 129.66739765554667,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0474270057817668,
"min": 0.04181055792480636,
"max": 10.579345943251004,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.272463398941909,
"min": 3.272463398941909,
"max": 190.42822697851807,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685313227",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685315495"
},
"total": 2267.802993951,
"count": 1,
"self": 0.48006245299984585,
"children": {
"run_training.setup": {
"total": 0.06018648700000995,
"count": 1,
"self": 0.06018648700000995
},
"TrainerController.start_learning": {
"total": 2267.2627450110003,
"count": 1,
"self": 1.3122973627801002,
"children": {
"TrainerController._reset_env": {
"total": 5.161210052000115,
"count": 1,
"self": 5.161210052000115
},
"TrainerController.advance": {
"total": 2260.700102805219,
"count": 63821,
"self": 1.3514135983091364,
"children": {
"env_step": {
"total": 1608.6989295230878,
"count": 63821,
"self": 1502.0613850281134,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.84057527694586,
"count": 63821,
"self": 4.743688395873505,
"children": {
"TorchPolicy.evaluate": {
"total": 101.09688688107235,
"count": 62547,
"self": 101.09688688107235
}
}
},
"workers": {
"total": 0.7969692180286074,
"count": 63821,
"self": 0.0,
"children": {
"worker_root": {
"total": 2262.1474579489345,
"count": 63821,
"is_parallel": true,
"self": 872.0164611189084,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026017109998974774,
"count": 1,
"is_parallel": true,
"self": 0.0007088199999998324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001892890999897645,
"count": 8,
"is_parallel": true,
"self": 0.001892890999897645
}
}
},
"UnityEnvironment.step": {
"total": 0.048915236000084406,
"count": 1,
"is_parallel": true,
"self": 0.0005412089994933922,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047054800006662845,
"count": 1,
"is_parallel": true,
"self": 0.00047054800006662845
},
"communicator.exchange": {
"total": 0.04589184800033763,
"count": 1,
"is_parallel": true,
"self": 0.04589184800033763
},
"steps_from_proto": {
"total": 0.0020116310001867532,
"count": 1,
"is_parallel": true,
"self": 0.00039613199942323263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016154990007635206,
"count": 8,
"is_parallel": true,
"self": 0.0016154990007635206
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1390.130996830026,
"count": 63820,
"is_parallel": true,
"self": 32.78478030020278,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.935648423909697,
"count": 63820,
"is_parallel": true,
"self": 22.935648423909697
},
"communicator.exchange": {
"total": 1234.7858288999519,
"count": 63820,
"is_parallel": true,
"self": 1234.7858288999519
},
"steps_from_proto": {
"total": 99.62473920596176,
"count": 63820,
"is_parallel": true,
"self": 20.074739841846167,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.54999936411559,
"count": 510560,
"is_parallel": true,
"self": 79.54999936411559
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.6497596838221,
"count": 63821,
"self": 2.627454681888139,
"children": {
"process_trajectory": {
"total": 109.9296894859267,
"count": 63821,
"self": 109.71693135492615,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2127581310005553,
"count": 2,
"self": 0.2127581310005553
}
}
},
"_update_policy": {
"total": 538.0926155160073,
"count": 459,
"self": 349.06568278599434,
"children": {
"TorchPPOOptimizer.update": {
"total": 189.02693273001296,
"count": 22758,
"self": 189.02693273001296
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.170007615466602e-07,
"count": 1,
"self": 9.170007615466602e-07
},
"TrainerController._save_models": {
"total": 0.08913387400025385,
"count": 1,
"self": 0.001339168999948015,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08779470500030584,
"count": 1,
"self": 0.08779470500030584
}
}
}
}
}
}
}