Pyramid / run_logs /timers.json
Adulala20's picture
Pyramid first commit
b28caf2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4795747399330139,
"min": 0.4795747399330139,
"max": 1.4618937969207764,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14563.7255859375,
"min": 14563.7255859375,
"max": 44348.01171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5876201391220093,
"min": -0.16776585578918457,
"max": 0.5876201391220093,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 162.77078247070312,
"min": -39.7605094909668,
"max": 162.77078247070312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.26975661516189575,
"min": -0.013640335761010647,
"max": 0.3809826076030731,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 74.72257995605469,
"min": -3.601048707962036,
"max": 90.29287719726562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06357700541460266,
"min": 0.06357700541460266,
"max": 0.07324024395895355,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9536550812190399,
"min": 0.5061449119265876,
"max": 1.0696497061957524,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017984554277629485,
"min": 0.00032887019811460023,
"max": 0.017984554277629485,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2697683141644423,
"min": 0.003946442377375203,
"max": 0.2697683141644423,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4614375128866656e-06,
"min": 7.4614375128866656e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011192156269329998,
"min": 0.00011192156269329998,
"max": 0.0036354898881700995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248711333333335,
"min": 0.10248711333333335,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373067000000002,
"min": 1.3886848,
"max": 2.6118299,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000258462622,
"min": 0.000258462622,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038769393300000002,
"min": 0.0038769393300000002,
"max": 0.12120180700999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008419668301939964,
"min": 0.00836948398500681,
"max": 0.46496105194091797,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1262950301170349,
"min": 0.11717277765274048,
"max": 3.254727363586426,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 329.13793103448273,
"min": 329.13793103448273,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28635.0,
"min": 15984.0,
"max": 33059.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6278772479431196,
"min": -1.0000000521540642,
"max": 1.6278772479431196,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 143.25319781899452,
"min": -31.995601668953896,
"max": 151.10519791394472,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6278772479431196,
"min": -1.0000000521540642,
"max": 1.6278772479431196,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 143.25319781899452,
"min": -31.995601668953896,
"max": 151.10519791394472,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02871607588828986,
"min": 0.02871607588828986,
"max": 9.456846978515387,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.5270146781695075,
"min": 2.5270146781695075,
"max": 151.30955165624619,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692597425",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692599689"
},
"total": 2264.182567473,
"count": 1,
"self": 0.487614270999984,
"children": {
"run_training.setup": {
"total": 0.043287183999950685,
"count": 1,
"self": 0.043287183999950685
},
"TrainerController.start_learning": {
"total": 2263.651666018,
"count": 1,
"self": 1.39233449898029,
"children": {
"TrainerController._reset_env": {
"total": 4.53210393400002,
"count": 1,
"self": 4.53210393400002
},
"TrainerController.advance": {
"total": 2257.6228395030193,
"count": 63814,
"self": 1.411756680057806,
"children": {
"env_step": {
"total": 1597.6274045459904,
"count": 63814,
"self": 1486.2651131489697,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.49027684602186,
"count": 63814,
"self": 4.823317127010455,
"children": {
"TorchPolicy.evaluate": {
"total": 105.6669597190114,
"count": 62562,
"self": 105.6669597190114
}
}
},
"workers": {
"total": 0.8720145509988697,
"count": 63814,
"self": 0.0,
"children": {
"worker_root": {
"total": 2258.643598209958,
"count": 63814,
"is_parallel": true,
"self": 889.9842033819823,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0053855469999462,
"count": 1,
"is_parallel": true,
"self": 0.003914586000064446,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001470960999881754,
"count": 8,
"is_parallel": true,
"self": 0.001470960999881754
}
}
},
"UnityEnvironment.step": {
"total": 0.04880758199999491,
"count": 1,
"is_parallel": true,
"self": 0.000603139999952873,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004745170000433063,
"count": 1,
"is_parallel": true,
"self": 0.0004745170000433063
},
"communicator.exchange": {
"total": 0.04573494999999639,
"count": 1,
"is_parallel": true,
"self": 0.04573494999999639
},
"steps_from_proto": {
"total": 0.0019949750000023414,
"count": 1,
"is_parallel": true,
"self": 0.00037864500018258695,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016163299998197544,
"count": 8,
"is_parallel": true,
"self": 0.0016163299998197544
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1368.6593948279756,
"count": 63813,
"is_parallel": true,
"self": 35.28641713501315,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.38670791600657,
"count": 63813,
"is_parallel": true,
"self": 23.38670791600657
},
"communicator.exchange": {
"total": 1200.7392607100014,
"count": 63813,
"is_parallel": true,
"self": 1200.7392607100014
},
"steps_from_proto": {
"total": 109.24700906695443,
"count": 63813,
"is_parallel": true,
"self": 21.26820899389429,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.97880007306014,
"count": 510504,
"is_parallel": true,
"self": 87.97880007306014
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 658.5836782769711,
"count": 63814,
"self": 2.6859923669653654,
"children": {
"process_trajectory": {
"total": 112.6829998620077,
"count": 63814,
"self": 112.47036019200766,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21263967000004413,
"count": 2,
"self": 0.21263967000004413
}
}
},
"_update_policy": {
"total": 543.214686047998,
"count": 454,
"self": 354.2356010110324,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.97908503696567,
"count": 22749,
"self": 188.97908503696567
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3030003174208105e-06,
"count": 1,
"self": 1.3030003174208105e-06
},
"TrainerController._save_models": {
"total": 0.10438677899992399,
"count": 1,
"self": 0.0013426810000964906,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1030440979998275,
"count": 1,
"self": 0.1030440979998275
}
}
}
}
}
}
}