| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.8167691230773926, | |
| "min": 0.8167691230773926, | |
| "max": 1.452033519744873, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 24463.869140625, | |
| "min": 24463.869140625, | |
| "max": 44048.890625, | |
| "count": 3 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 89943.0, | |
| "min": 29952.0, | |
| "max": 89943.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 89943.0, | |
| "min": 29952.0, | |
| "max": 89943.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": -0.09244344383478165, | |
| "min": -0.10529731214046478, | |
| "max": -0.09244344383478165, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": -22.186426162719727, | |
| "min": -24.955463409423828, | |
| "max": -22.186426162719727, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.1690806895494461, | |
| "min": 0.16874651610851288, | |
| "max": 0.25449445843696594, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 40.57936477661133, | |
| "min": 39.992923736572266, | |
| "max": 61.33316421508789, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.06830564729005183, | |
| "min": 0.06830564729005183, | |
| "max": 0.0721332325220352, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.6147508256104665, | |
| "min": 0.5049326276542464, | |
| "max": 0.6447482374010963, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.0002378650806171975, | |
| "min": 0.0002378650806171975, | |
| "max": 0.006066062268878333, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.0021407857255547774, | |
| "min": 0.0021407857255547774, | |
| "max": 0.04246243588214833, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.475240841588888e-05, | |
| "min": 7.475240841588888e-05, | |
| "max": 0.0002515063018788571, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.0006727716757429999, | |
| "min": 0.0006727716757429999, | |
| "max": 0.0017605441131519997, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.12491744444444447, | |
| "min": 0.12491744444444447, | |
| "max": 0.1838354285714286, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.1242570000000003, | |
| "min": 1.1242570000000003, | |
| "max": 1.38635, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0024992527000000002, | |
| "min": 0.0024992527000000002, | |
| "max": 0.008385159314285713, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.0224932743, | |
| "min": 0.0224932743, | |
| "max": 0.058696115199999996, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.10023115575313568, | |
| "min": 0.10023115575313568, | |
| "max": 0.3753523826599121, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.9020804166793823, | |
| "min": 0.9020804166793823, | |
| "max": 2.6274666786193848, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 999.0, | |
| "min": 986.8484848484849, | |
| "max": 999.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 31968.0, | |
| "min": 15984.0, | |
| "max": 32566.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": -1.0000000521540642, | |
| "min": -1.0000000521540642, | |
| "max": -0.9272121714823174, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": -32.000001668930054, | |
| "min": -32.000001668930054, | |
| "max": -16.000000834465027, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": -1.0000000521540642, | |
| "min": -1.0000000521540642, | |
| "max": -0.9272121714823174, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": -32.000001668930054, | |
| "min": -32.000001668930054, | |
| "max": -16.000000834465027, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 1.1914042222779244, | |
| "min": 1.1914042222779244, | |
| "max": 7.563053228892386, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 38.12493511289358, | |
| "min": 38.12493511289358, | |
| "max": 121.00885166227818, | |
| "count": 3 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 3 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1679464280", | |
| "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "0.31.0.dev0", | |
| "mlagents_envs_version": "0.31.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "1.11.0+cu102", | |
| "numpy_version": "1.21.2", | |
| "end_time_seconds": "1679464475" | |
| }, | |
| "total": 194.77187541000012, | |
| "count": 1, | |
| "self": 0.7457656650001354, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.10276488299996345, | |
| "count": 1, | |
| "self": 0.10276488299996345 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 193.92334486200002, | |
| "count": 1, | |
| "self": 0.1460976430021219, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 7.096231325999952, | |
| "count": 1, | |
| "self": 7.096231325999952 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 186.5140350829979, | |
| "count": 6256, | |
| "self": 0.145697865999864, | |
| "children": { | |
| "env_step": { | |
| "total": 122.86093506799784, | |
| "count": 6256, | |
| "self": 111.49307102498847, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 11.28470693700774, | |
| "count": 6256, | |
| "self": 0.46258730100907997, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 10.82211963599866, | |
| "count": 6256, | |
| "self": 10.82211963599866 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.08315710600163584, | |
| "count": 6256, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 193.0190538669999, | |
| "count": 6256, | |
| "is_parallel": true, | |
| "self": 92.52163129399855, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0017337829999632959, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00057002599965017, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.001163757000313126, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.001163757000313126 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.046447873999795775, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005278359999465465, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0004309050000301795, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0004309050000301795 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.0435790009998982, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0435790009998982 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.00191013199992085, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0003737609999916458, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0015363709999292041, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0015363709999292041 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 100.49742257300136, | |
| "count": 6255, | |
| "is_parallel": true, | |
| "self": 3.072848388999546, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 2.257832362995032, | |
| "count": 6255, | |
| "is_parallel": true, | |
| "self": 2.257832362995032 | |
| }, | |
| "communicator.exchange": { | |
| "total": 86.17355633900547, | |
| "count": 6255, | |
| "is_parallel": true, | |
| "self": 86.17355633900547 | |
| }, | |
| "steps_from_proto": { | |
| "total": 8.993185482001309, | |
| "count": 6255, | |
| "is_parallel": true, | |
| "self": 1.9547386239839852, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 7.038446858017323, | |
| "count": 50040, | |
| "is_parallel": true, | |
| "self": 7.038446858017323 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 63.5074021490002, | |
| "count": 6256, | |
| "self": 0.2029007779967742, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 11.347182169003645, | |
| "count": 6256, | |
| "self": 11.347182169003645 | |
| }, | |
| "_update_policy": { | |
| "total": 51.95731920199978, | |
| "count": 28, | |
| "self": 33.21267860700118, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 18.7446405949986, | |
| "count": 2286, | |
| "self": 18.7446405949986 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 1.2230000265844865e-06, | |
| "count": 1, | |
| "self": 1.2230000265844865e-06 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.16697958700001436, | |
| "count": 1, | |
| "self": 0.00206316899993908, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.16491641800007528, | |
| "count": 1, | |
| "self": 0.16491641800007528 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |