ppo-Pyramids / run_logs /timers.json
alexchilton's picture
First Push
263ad83 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4578075408935547,
"min": 0.4578075408935547,
"max": 0.4578075408935547,
"count": 1
},
"Pyramids.Policy.Entropy.sum": {
"value": 9602.970703125,
"min": 9602.970703125,
"max": 9602.970703125,
"count": 1
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 326.31481481481484,
"min": 326.31481481481484,
"max": 326.31481481481484,
"count": 1
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 17621.0,
"min": 17621.0,
"max": 17621.0,
"count": 1
},
"Pyramids.Step.mean": {
"value": 1019988.0,
"min": 1019988.0,
"max": 1019988.0,
"count": 1
},
"Pyramids.Step.sum": {
"value": 1019988.0,
"min": 1019988.0,
"max": 1019988.0,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5122177004814148,
"min": 0.5122177004814148,
"max": 0.5122177004814148,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 93.73583984375,
"min": 93.73583984375,
"max": 93.73583984375,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012859730049967766,
"min": 0.012859730049967766,
"max": 0.012859730049967766,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.353330612182617,
"min": 2.353330612182617,
"max": 2.353330612182617,
"count": 1
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5949886622575093,
"min": 1.5949886622575093,
"max": 1.5949886622575093,
"count": 1
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 84.534399099648,
"min": 84.534399099648,
"max": 84.534399099648,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5949886622575093,
"min": 1.5949886622575093,
"max": 1.5949886622575093,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 84.534399099648,
"min": 84.534399099648,
"max": 84.534399099648,
"count": 1
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.020392710874491383,
"min": 0.020392710874491383,
"max": 0.020392710874491383,
"count": 1
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.0808136763480434,
"min": 1.0808136763480434,
"max": 1.0808136763480434,
"count": 1
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06944852790049345,
"min": 0.06944852790049345,
"max": 0.06944852790049345,
"count": 1
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.6250367511044411,
"min": 0.6250367511044411,
"max": 0.6250367511044411,
"count": 1
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012599080450686247,
"min": 0.012599080450686247,
"max": 0.012599080450686247,
"count": 1
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.11339172405617623,
"min": 0.11339172405617623,
"max": 0.11339172405617623,
"count": 1
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.811014095708062e-06,
"min": 2.811014095708062e-06,
"max": 2.811014095708062e-06,
"count": 1
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.5299126861372555e-05,
"min": 2.5299126861372555e-05,
"max": 2.5299126861372555e-05,
"count": 1
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10093697167755991,
"min": 0.10093697167755991,
"max": 0.10093697167755991,
"count": 1
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.9084327450980392,
"min": 0.9084327450980392,
"max": 0.9084327450980392,
"count": 1
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00010360347058823532,
"min": 0.00010360347058823532,
"max": 0.00010360347058823532,
"count": 1
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0009324312352941179,
"min": 0.0009324312352941179,
"max": 0.0009324312352941179,
"count": 1
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005835366901010275,
"min": 0.005835366901010275,
"max": 0.005835366901010275,
"count": 1
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.05251830071210861,
"min": 0.05251830071210861,
"max": 0.05251830071210861,
"count": 1
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718116846",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718116921"
},
"total": 74.91216858000007,
"count": 1,
"self": 0.629856010000367,
"children": {
"run_training.setup": {
"total": 0.10566821300017182,
"count": 1,
"self": 0.10566821300017182
},
"TrainerController.start_learning": {
"total": 74.17664435699953,
"count": 1,
"self": 0.0484158110093631,
"children": {
"TrainerController._reset_env": {
"total": 4.1516817900001115,
"count": 1,
"self": 4.1516817900001115
},
"TrainerController.advance": {
"total": 69.88113111299026,
"count": 1357,
"self": 0.057712297982106975,
"children": {
"env_step": {
"total": 46.601711384003465,
"count": 1357,
"self": 43.37167667501126,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3.2015309969956434,
"count": 1357,
"self": 0.14985691299807513,
"children": {
"TorchPolicy.evaluate": {
"total": 3.0516740839975682,
"count": 1311,
"self": 3.0516740839975682
}
}
},
"workers": {
"total": 0.028503711996563652,
"count": 1357,
"self": 0.0,
"children": {
"worker_root": {
"total": 74.02612100599708,
"count": 1357,
"is_parallel": true,
"self": 34.46452728899294,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003315939999993134,
"count": 1,
"is_parallel": true,
"self": 0.0011167520005983533,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021991879993947805,
"count": 8,
"is_parallel": true,
"self": 0.0021991879993947805
}
}
},
"UnityEnvironment.step": {
"total": 0.06482181900082651,
"count": 1,
"is_parallel": true,
"self": 0.0008052000002862769,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005295939999996335,
"count": 1,
"is_parallel": true,
"self": 0.0005295939999996335
},
"communicator.exchange": {
"total": 0.0613688360008382,
"count": 1,
"is_parallel": true,
"self": 0.0613688360008382
},
"steps_from_proto": {
"total": 0.0021181889997023973,
"count": 1,
"is_parallel": true,
"self": 0.0004406279995237128,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016775610001786845,
"count": 8,
"is_parallel": true,
"self": 0.0016775610001786845
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 39.56159371700414,
"count": 1356,
"is_parallel": true,
"self": 1.0554544790356886,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.6629031569609651,
"count": 1356,
"is_parallel": true,
"self": 0.6629031569609651
},
"communicator.exchange": {
"total": 35.10112359998402,
"count": 1356,
"is_parallel": true,
"self": 35.10112359998402
},
"steps_from_proto": {
"total": 2.7421124810234687,
"count": 1356,
"is_parallel": true,
"self": 0.6471835399688644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.0949289410546044,
"count": 10848,
"is_parallel": true,
"self": 2.0949289410546044
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 23.22170743100469,
"count": 1357,
"self": 0.0930325040126263,
"children": {
"process_trajectory": {
"total": 3.5267437559905375,
"count": 1357,
"self": 3.3817300949904165,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14501366100012092,
"count": 1,
"self": 0.14501366100012092
}
}
},
"_update_policy": {
"total": 19.601931171001524,
"count": 9,
"self": 8.272779682001783,
"children": {
"TorchPPOOptimizer.update": {
"total": 11.329151488999742,
"count": 432,
"self": 11.329151488999742
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.589994078851305e-07,
"count": 1,
"self": 9.589994078851305e-07
},
"TrainerController._save_models": {
"total": 0.09541468400038866,
"count": 1,
"self": 0.002416738000647456,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0929979459997412,
"count": 1,
"self": 0.0929979459997412
}
}
}
}
}
}
}