ppo-Pyramids1 / run_logs /timers.json
danmorris427's picture
First Push
eb8b3db
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.40959498286247253,
"min": 0.4047621190547943,
"max": 1.4186512231826782,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12241.974609375,
"min": 12104.0068359375,
"max": 43036.203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989916.0,
"min": 29952.0,
"max": 989916.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989916.0,
"min": 29952.0,
"max": 989916.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3731522560119629,
"min": -0.10391656309366226,
"max": 0.43437299132347107,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 98.51219177246094,
"min": -25.014480590820312,
"max": 117.28070831298828,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00047859514597803354,
"min": -0.019790776073932648,
"max": 0.632742166519165,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.12634912133216858,
"min": -5.185183525085449,
"max": 149.95989990234375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06900106806867932,
"min": 0.06725016271466705,
"max": 0.07384413104381736,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9660149529615106,
"min": 0.47968182567876344,
"max": 1.0625315363092036,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014010273966137207,
"min": 0.0013340517167376251,
"max": 0.019984178108249123,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1961438355259209,
"min": 0.009338362017163376,
"max": 0.2331306774592195,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.649990307178567e-06,
"min": 7.649990307178567e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010709986430049994,
"min": 0.00010709986430049994,
"max": 0.0036333019888993997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025499642857143,
"min": 0.1025499642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356995000000004,
"min": 1.3691136000000002,
"max": 2.6111006000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026474143214285707,
"min": 0.00026474143214285707,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003706380049999999,
"min": 0.003706380049999999,
"max": 0.12112894994000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012100758031010628,
"min": 0.012071436271071434,
"max": 0.7696196436882019,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1694106161594391,
"min": 0.16900010406970978,
"max": 5.387337684631348,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 489.5625,
"min": 396.2739726027397,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31332.0,
"min": 15984.0,
"max": 34641.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3158222011157445,
"min": -1.0000000521540642,
"max": 1.4667123115226015,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 82.8967986702919,
"min": -32.000001668930054,
"max": 107.0699987411499,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3158222011157445,
"min": -1.0000000521540642,
"max": 1.4667123115226015,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 82.8967986702919,
"min": -32.000001668930054,
"max": 107.0699987411499,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06150035002492442,
"min": 0.05122127555659972,
"max": 17.509382005780935,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8745220515702385,
"min": 3.6469270377856446,
"max": 280.15011209249496,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674441401",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674443360"
},
"total": 1958.5646042980002,
"count": 1,
"self": 0.4242224270001316,
"children": {
"run_training.setup": {
"total": 0.0976511640001263,
"count": 1,
"self": 0.0976511640001263
},
"TrainerController.start_learning": {
"total": 1958.042730707,
"count": 1,
"self": 1.1312351699839382,
"children": {
"TrainerController._reset_env": {
"total": 5.923212057000001,
"count": 1,
"self": 5.923212057000001
},
"TrainerController.advance": {
"total": 1950.9049021810156,
"count": 63692,
"self": 1.2143072010744618,
"children": {
"env_step": {
"total": 1317.7976884798763,
"count": 63692,
"self": 1219.1280200168844,
"children": {
"SubprocessEnvManager._take_step": {
"total": 97.96011978295792,
"count": 63692,
"self": 4.0932523960195795,
"children": {
"TorchPolicy.evaluate": {
"total": 93.86686738693834,
"count": 62561,
"self": 31.47182368395397,
"children": {
"TorchPolicy.sample_actions": {
"total": 62.39504370298437,
"count": 62561,
"self": 62.39504370298437
}
}
}
}
},
"workers": {
"total": 0.7095486800340041,
"count": 63692,
"self": 0.0,
"children": {
"worker_root": {
"total": 1954.071290269996,
"count": 63692,
"is_parallel": true,
"self": 826.7226954809194,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017009720004352857,
"count": 1,
"is_parallel": true,
"self": 0.0005888909995519498,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001112081000883336,
"count": 8,
"is_parallel": true,
"self": 0.001112081000883336
}
}
},
"UnityEnvironment.step": {
"total": 0.04236845200011885,
"count": 1,
"is_parallel": true,
"self": 0.00047539500019411207,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041321500020785606,
"count": 1,
"is_parallel": true,
"self": 0.00041321500020785606
},
"communicator.exchange": {
"total": 0.03998766399990927,
"count": 1,
"is_parallel": true,
"self": 0.03998766399990927
},
"steps_from_proto": {
"total": 0.001492177999807609,
"count": 1,
"is_parallel": true,
"self": 0.0003847919997497229,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011073860000578861,
"count": 8,
"is_parallel": true,
"self": 0.0011073860000578861
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1127.3485947890767,
"count": 63691,
"is_parallel": true,
"self": 26.714069345055123,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.894278822995147,
"count": 63691,
"is_parallel": true,
"self": 21.894278822995147
},
"communicator.exchange": {
"total": 983.0347065870674,
"count": 63691,
"is_parallel": true,
"self": 983.0347065870674
},
"steps_from_proto": {
"total": 95.7055400339591,
"count": 63691,
"is_parallel": true,
"self": 20.60668030789884,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.09885972606025,
"count": 509528,
"is_parallel": true,
"self": 75.09885972606025
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 631.8929065000648,
"count": 63692,
"self": 2.2541177321045325,
"children": {
"process_trajectory": {
"total": 137.99198212095143,
"count": 63692,
"self": 137.81323727895142,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17874484200001461,
"count": 2,
"self": 0.17874484200001461
}
}
},
"_update_policy": {
"total": 491.6468066470088,
"count": 451,
"self": 187.58034924899766,
"children": {
"TorchPPOOptimizer.update": {
"total": 304.06645739801115,
"count": 22815,
"self": 304.06645739801115
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.049999789567664e-07,
"count": 1,
"self": 9.049999789567664e-07
},
"TrainerController._save_models": {
"total": 0.08338039400041453,
"count": 1,
"self": 0.0015334150002672686,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08184697900014726,
"count": 1,
"self": 0.08184697900014726
}
}
}
}
}
}
}