Pyramids / run_logs /timers.json
RealityAdmin's picture
First push
e216770 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.323770672082901,
"min": 0.323770672082901,
"max": 1.4582098722457886,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9759.7431640625,
"min": 9759.7431640625,
"max": 44236.25390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989895.0,
"min": 29915.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989895.0,
"min": 29915.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.49555015563964844,
"min": -0.13686437904834747,
"max": 0.6415801048278809,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 130.8252410888672,
"min": -32.57372283935547,
"max": 181.56716918945312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.011162635870277882,
"min": -0.011162635870277882,
"max": 0.26784929633140564,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.9469358921051025,
"min": -2.9469358921051025,
"max": 64.55168151855469,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06477152436143821,
"min": 0.06477152436143821,
"max": 0.07273734795738009,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9715728654215732,
"min": 0.5766963413077729,
"max": 1.0643582190021728,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01423398255525778,
"min": 0.0005055835248906022,
"max": 0.015414487217833889,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2135097383288667,
"min": 0.006067002298687226,
"max": 0.21986013003333937,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.521337492920005e-06,
"min": 7.521337492920005e-06,
"max": 0.0002948502392165875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011282006239380008,
"min": 0.00011282006239380008,
"max": 0.003508413530528899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250708000000001,
"min": 0.10250708000000001,
"max": 0.1982834125,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376062000000001,
"min": 1.4776548,
"max": 2.5694710999999995,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026045729200000015,
"min": 0.00026045729200000015,
"max": 0.00982851290875,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003906859380000002,
"min": 0.003906859380000002,
"max": 0.11697016289000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01812346652150154,
"min": 0.01812346652150154,
"max": 0.4680212736129761,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2718519866466522,
"min": 0.2669598162174225,
"max": 3.7441701889038086,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 389.17283950617286,
"min": 336.96774193548384,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31523.0,
"min": 16969.0,
"max": 32699.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4132493548555138,
"min": -0.9999500517733395,
"max": 1.571856159172701,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 114.47319774329662,
"min": -31.998401656746864,
"max": 144.65899857878685,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4132493548555138,
"min": -0.9999500517733395,
"max": 1.571856159172701,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 114.47319774329662,
"min": -31.998401656746864,
"max": 144.65899857878685,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07258195938962156,
"min": 0.06830586166168222,
"max": 8.162659615278244,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.879138710559346,
"min": 5.396163071272895,
"max": 146.9278730750084,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738450332",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738452704"
},
"total": 2371.3287003059995,
"count": 1,
"self": 0.5264948510002796,
"children": {
"run_training.setup": {
"total": 0.030683930999657605,
"count": 1,
"self": 0.030683930999657605
},
"TrainerController.start_learning": {
"total": 2370.7715215239996,
"count": 1,
"self": 1.481143858023188,
"children": {
"TrainerController._reset_env": {
"total": 2.244592248999652,
"count": 1,
"self": 2.244592248999652
},
"TrainerController.advance": {
"total": 2366.951290260977,
"count": 63824,
"self": 1.5431029529468105,
"children": {
"env_step": {
"total": 1648.2647548129257,
"count": 63824,
"self": 1479.8927231679654,
"children": {
"SubprocessEnvManager._take_step": {
"total": 167.49660390002782,
"count": 63824,
"self": 5.003764071965179,
"children": {
"TorchPolicy.evaluate": {
"total": 162.49283982806264,
"count": 62557,
"self": 162.49283982806264
}
}
},
"workers": {
"total": 0.8754277449324945,
"count": 63824,
"self": 0.0,
"children": {
"worker_root": {
"total": 2365.041919946991,
"count": 63824,
"is_parallel": true,
"self": 1008.47313007404,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002063453999653575,
"count": 1,
"is_parallel": true,
"self": 0.0006883389992253797,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013751150004281953,
"count": 8,
"is_parallel": true,
"self": 0.0013751150004281953
}
}
},
"UnityEnvironment.step": {
"total": 0.052112123999904725,
"count": 1,
"is_parallel": true,
"self": 0.0005389189996094501,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007047650001368311,
"count": 1,
"is_parallel": true,
"self": 0.0007047650001368311
},
"communicator.exchange": {
"total": 0.04913821300033305,
"count": 1,
"is_parallel": true,
"self": 0.04913821300033305
},
"steps_from_proto": {
"total": 0.0017302269998253905,
"count": 1,
"is_parallel": true,
"self": 0.00035096299961878685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013792640002066037,
"count": 8,
"is_parallel": true,
"self": 0.0013792640002066037
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1356.5687898729511,
"count": 63823,
"is_parallel": true,
"self": 33.35161664508223,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.426625246936965,
"count": 63823,
"is_parallel": true,
"self": 24.426625246936965
},
"communicator.exchange": {
"total": 1195.464377916011,
"count": 63823,
"is_parallel": true,
"self": 1195.464377916011
},
"steps_from_proto": {
"total": 103.32617006492092,
"count": 63823,
"is_parallel": true,
"self": 20.968925834036327,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.35724423088459,
"count": 510584,
"is_parallel": true,
"self": 82.35724423088459
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 717.1434324951047,
"count": 63824,
"self": 2.8409659511489735,
"children": {
"process_trajectory": {
"total": 139.40505516995745,
"count": 63824,
"self": 139.19528470095793,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20977046899952256,
"count": 2,
"self": 0.20977046899952256
}
}
},
"_update_policy": {
"total": 574.8974113739982,
"count": 456,
"self": 316.4368406340377,
"children": {
"TorchPPOOptimizer.update": {
"total": 258.46057073996053,
"count": 22773,
"self": 258.46057073996053
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1829997674794868e-06,
"count": 1,
"self": 1.1829997674794868e-06
},
"TrainerController._save_models": {
"total": 0.09449397299977136,
"count": 1,
"self": 0.0014072179992581368,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09308675500051322,
"count": 1,
"self": 0.09308675500051322
}
}
}
}
}
}
}