ppo-Pyramids / run_logs /timers.json
PikaMiju's picture
First Push
e88743d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7800024747848511,
"min": 0.7800024747848511,
"max": 1.492328405380249,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 23300.234375,
"min": 23300.234375,
"max": 45271.2734375,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479928.0,
"min": 29952.0,
"max": 479928.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479928.0,
"min": 29952.0,
"max": 479928.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1669306755065918,
"min": -0.10548119992017746,
"max": 0.1669306755065918,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 42.400390625,
"min": -25.526451110839844,
"max": 42.400390625,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.009646894410252571,
"min": 0.009646894410252571,
"max": 0.2548985183238983,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.4503111839294434,
"min": 2.4503111839294434,
"max": 61.17564392089844,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06815127445472416,
"min": 0.06553023293326997,
"max": 0.07233980885472154,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9541178423661382,
"min": 0.49319122671002485,
"max": 1.036600200171176,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0103505900614862,
"min": 0.0005839634877544695,
"max": 0.0103505900614862,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1449082608608068,
"min": 0.006423598365299164,
"max": 0.1449082608608068,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0494378882857143e-05,
"min": 2.0494378882857143e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00028692130436,
"min": 0.00028692130436,
"max": 0.0028540851486383995,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10683142857142858,
"min": 0.10683142857142858,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.49564,
"min": 1.3773696000000002,
"max": 2.3389772,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0006924597142857143,
"min": 0.0006924597142857143,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.009694436,
"min": 0.009694436,
"max": 0.09517102384000001,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.02181384339928627,
"min": 0.02181384339928627,
"max": 0.496464341878891,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3053938150405884,
"min": 0.3053938150405884,
"max": 3.475250482559204,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 637.0652173913044,
"min": 637.0652173913044,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29305.0,
"min": 15984.0,
"max": 32892.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8410564916937248,
"min": -1.0000000521540642,
"max": 0.8410564916937248,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 38.68859861791134,
"min": -29.894001699984074,
"max": 38.68859861791134,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8410564916937248,
"min": -1.0000000521540642,
"max": 0.8410564916937248,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 38.68859861791134,
"min": -29.894001699984074,
"max": 38.68859861791134,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.14235590836893686,
"min": 0.14235590836893686,
"max": 11.02326258085668,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.548371784971096,
"min": 6.548371784971096,
"max": 176.3722012937069,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704266120",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704267188"
},
"total": 1067.9648398160002,
"count": 1,
"self": 0.8652804830003333,
"children": {
"run_training.setup": {
"total": 0.045333991999996215,
"count": 1,
"self": 0.045333991999996215
},
"TrainerController.start_learning": {
"total": 1067.054225341,
"count": 1,
"self": 0.6509999029633491,
"children": {
"TrainerController._reset_env": {
"total": 2.088056508999898,
"count": 1,
"self": 2.088056508999898
},
"TrainerController.advance": {
"total": 1064.1860013190367,
"count": 31656,
"self": 0.705398285066849,
"children": {
"env_step": {
"total": 746.373165559979,
"count": 31656,
"self": 680.9882246490295,
"children": {
"SubprocessEnvManager._take_step": {
"total": 64.97696937499086,
"count": 31656,
"self": 2.3078984269579905,
"children": {
"TorchPolicy.evaluate": {
"total": 62.66907094803287,
"count": 31311,
"self": 62.66907094803287
}
}
},
"workers": {
"total": 0.4079715359587226,
"count": 31656,
"self": 0.0,
"children": {
"worker_root": {
"total": 1064.4436975569656,
"count": 31656,
"is_parallel": true,
"self": 441.608159535989,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016721250001410226,
"count": 1,
"is_parallel": true,
"self": 0.0005468800000016927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00112524500013933,
"count": 8,
"is_parallel": true,
"self": 0.00112524500013933
}
}
},
"UnityEnvironment.step": {
"total": 0.04903191699986564,
"count": 1,
"is_parallel": true,
"self": 0.0006089679995966435,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004832930001157365,
"count": 1,
"is_parallel": true,
"self": 0.0004832930001157365
},
"communicator.exchange": {
"total": 0.04619089100015117,
"count": 1,
"is_parallel": true,
"self": 0.04619089100015117
},
"steps_from_proto": {
"total": 0.001748765000002095,
"count": 1,
"is_parallel": true,
"self": 0.00037014700023974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013786179997623549,
"count": 8,
"is_parallel": true,
"self": 0.0013786179997623549
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 622.8355380209766,
"count": 31655,
"is_parallel": true,
"self": 17.692013266958384,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.423755271042182,
"count": 31655,
"is_parallel": true,
"self": 12.423755271042182
},
"communicator.exchange": {
"total": 542.9301641639572,
"count": 31655,
"is_parallel": true,
"self": 542.9301641639572
},
"steps_from_proto": {
"total": 49.78960531901885,
"count": 31655,
"is_parallel": true,
"self": 9.850868022121858,
"children": {
"_process_rank_one_or_two_observation": {
"total": 39.93873729689699,
"count": 253240,
"is_parallel": true,
"self": 39.93873729689699
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 317.1074374739908,
"count": 31656,
"self": 1.2384242139783055,
"children": {
"process_trajectory": {
"total": 61.082611301011866,
"count": 31656,
"self": 60.939489579011706,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14312172200015993,
"count": 1,
"self": 0.14312172200015993
}
}
},
"_update_policy": {
"total": 254.7864019590006,
"count": 215,
"self": 151.89530220597157,
"children": {
"TorchPPOOptimizer.update": {
"total": 102.89109975302904,
"count": 11397,
"self": 102.89109975302904
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1940001058974303e-06,
"count": 1,
"self": 1.1940001058974303e-06
},
"TrainerController._save_models": {
"total": 0.12916641599986178,
"count": 1,
"self": 0.0022945889995753532,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12687182700028643,
"count": 1,
"self": 0.12687182700028643
}
}
}
}
}
}
}