pyramids1 / run_logs /timers.json
catrabbitbear's picture
first commit of pyramids
7531c19
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8009202480316162,
"min": 0.7395164370536804,
"max": 1.5082975625991821,
"count": 14
},
"Pyramids.Policy.Entropy.sum": {
"value": 23963.533203125,
"min": 22239.3671875,
"max": 45755.71484375,
"count": 14
},
"Pyramids.Step.mean": {
"value": 419926.0,
"min": 29952.0,
"max": 419926.0,
"count": 14
},
"Pyramids.Step.sum": {
"value": 419926.0,
"min": 29952.0,
"max": 419926.0,
"count": 14
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.007818647660315037,
"min": -0.011158403940498829,
"max": 0.007818647660315037,
"count": 14
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.9233874082565308,
"min": -2.6445417404174805,
"max": 1.9233874082565308,
"count": 14
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05570051074028015,
"min": 0.04640258848667145,
"max": 0.5477548241615295,
"count": 14
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 13.702325820922852,
"min": 11.275829315185547,
"max": 129.81788635253906,
"count": 14
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.050763980583204624,
"min": 0.04681522059977324,
"max": 0.05453468236254946,
"count": 14
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.7106957281648647,
"min": 0.36650751996369635,
"max": 0.7207024861127138,
"count": 14
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0012189357651907682,
"min": 0.00033678642998324904,
"max": 0.009745490534534875,
"count": 14
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.017065100712670755,
"min": 0.004033944906528372,
"max": 0.06821843374174413,
"count": 14
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0002190358127023571,
"min": 0.0002190358127023571,
"max": 0.00029676708679192377,
"count": 14
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0030665013778329994,
"min": 0.0020382272205909328,
"max": 0.0039150343949885995,
"count": 14
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.17301192857142864,
"min": 0.17301192857142864,
"max": 0.19892236190476192,
"count": 14
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.422167000000001,
"min": 1.3794090666666667,
"max": 2.8050114000000006,
"count": 14
},
"Pyramids.Policy.Beta.mean": {
"value": 0.007303891664285715,
"min": 0.007303891664285715,
"max": 0.009892343954285714,
"count": 14
},
"Pyramids.Policy.Beta.sum": {
"value": 0.1022544833,
"min": 0.06794296576,
"max": 0.13052063885999998,
"count": 14
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.033260494470596313,
"min": 0.033260494470596313,
"max": 0.7636728882789612,
"count": 14
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.4656469225883484,
"min": 0.4656469225883484,
"max": 5.345710277557373,
"count": 14
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 869.9411764705883,
"min": 811.0,
"max": 999.0,
"count": 14
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29578.0,
"min": 15984.0,
"max": 32098.0,
"count": 14
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.16451180989251418,
"min": -1.0000000521540642,
"max": 0.10759996280476854,
"count": 14
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -5.593401536345482,
"min": -32.000001668930054,
"max": 3.981198623776436,
"count": 14
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.16451180989251418,
"min": -1.0000000521540642,
"max": 0.10759996280476854,
"count": 14
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -5.593401536345482,
"min": -32.000001668930054,
"max": 3.981198623776436,
"count": 14
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.30343032596112396,
"min": 0.30343032596112396,
"max": 14.843034541234374,
"count": 14
},
"Pyramids.Policy.RndReward.sum": {
"value": 10.316631082678214,
"min": 10.316631082678214,
"max": 237.48855265974998,
"count": 14
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687952159",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids-Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687953065"
},
"total": 905.8644368479999,
"count": 1,
"self": 0.23412299599976905,
"children": {
"run_training.setup": {
"total": 0.03994181300004129,
"count": 1,
"self": 0.03994181300004129
},
"TrainerController.start_learning": {
"total": 905.590372039,
"count": 1,
"self": 0.5443300249787626,
"children": {
"TrainerController._reset_env": {
"total": 5.302121001000046,
"count": 1,
"self": 5.302121001000046
},
"TrainerController.advance": {
"total": 899.4936765480213,
"count": 26748,
"self": 0.5566726750208773,
"children": {
"env_step": {
"total": 606.7365380339949,
"count": 26748,
"self": 553.2975351919897,
"children": {
"SubprocessEnvManager._take_step": {
"total": 53.12541479800734,
"count": 26748,
"self": 1.9499418160000914,
"children": {
"TorchPolicy.evaluate": {
"total": 51.17547298200725,
"count": 26509,
"self": 51.17547298200725
}
}
},
"workers": {
"total": 0.31358804399781093,
"count": 26747,
"self": 0.0,
"children": {
"worker_root": {
"total": 903.529167836978,
"count": 26747,
"is_parallel": true,
"self": 395.44945059797385,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001877981000006912,
"count": 1,
"is_parallel": true,
"self": 0.0006266419999292339,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001251339000077678,
"count": 8,
"is_parallel": true,
"self": 0.001251339000077678
}
}
},
"UnityEnvironment.step": {
"total": 0.08084031099997446,
"count": 1,
"is_parallel": true,
"self": 0.0005698239998537247,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005142130000876932,
"count": 1,
"is_parallel": true,
"self": 0.0005142130000876932
},
"communicator.exchange": {
"total": 0.07766689699997187,
"count": 1,
"is_parallel": true,
"self": 0.07766689699997187
},
"steps_from_proto": {
"total": 0.0020893770000611767,
"count": 1,
"is_parallel": true,
"self": 0.00040608200015412876,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001683294999907048,
"count": 8,
"is_parallel": true,
"self": 0.001683294999907048
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 508.07971723900414,
"count": 26746,
"is_parallel": true,
"self": 14.402299893028271,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.814417236978898,
"count": 26746,
"is_parallel": true,
"self": 9.814417236978898
},
"communicator.exchange": {
"total": 441.25282318099823,
"count": 26746,
"is_parallel": true,
"self": 441.25282318099823
},
"steps_from_proto": {
"total": 42.61017692799874,
"count": 26746,
"is_parallel": true,
"self": 8.167402191992778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.44277473600596,
"count": 213968,
"is_parallel": true,
"self": 34.44277473600596
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 292.2004658390056,
"count": 26747,
"self": 0.9772115710293292,
"children": {
"process_trajectory": {
"total": 51.72999506797498,
"count": 26747,
"self": 51.72999506797498
},
"_update_policy": {
"total": 239.49325920000126,
"count": 178,
"self": 173.15960590700115,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.33365329300011,
"count": 6392,
"self": 66.33365329300011
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.354000005449052e-06,
"count": 1,
"self": 1.354000005449052e-06
},
"TrainerController._save_models": {
"total": 0.2502431109999179,
"count": 1,
"self": 0.0011992599997938669,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24904385100012405,
"count": 1,
"self": 0.24904385100012405
}
}
}
}
}
}
}