HorusMorales's picture
First Push
6fa60d2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7945910096168518,
"min": 0.7279884219169617,
"max": 1.477802038192749,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 24053.859375,
"min": 21618.34375,
"max": 44830.6015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989888.0,
"min": 29936.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989888.0,
"min": 29936.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.177419051527977,
"min": -0.09949477761983871,
"max": 0.31257808208465576,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 44.70960235595703,
"min": -24.077735900878906,
"max": 82.83319091796875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -9.262030601501465,
"min": -9.674857139587402,
"max": 0.3218729794025421,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2334.03173828125,
"min": -2476.763427734375,
"max": 83.68697357177734,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06952561540605358,
"min": 0.06507565919750079,
"max": 0.07300530304327309,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.97335861568475,
"min": 0.48709853741038595,
"max": 1.039012470835587,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 21.92978182948959,
"min": 4.787212734434234e-05,
"max": 21.92978182948959,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 307.0169456128542,
"min": 0.0006223376554764504,
"max": 307.0169456128542,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.768361696292855e-06,
"min": 7.768361696292855e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010875706374809997,
"min": 0.00010875706374809997,
"max": 0.0036090153969948993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10258942142857144,
"min": 0.10258942142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4362519000000002,
"min": 1.3886848,
"max": 2.5690767,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026868320071428566,
"min": 0.00026868320071428566,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003761564809999999,
"min": 0.003761564809999999,
"max": 0.12031020949,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006236549932509661,
"min": 0.006236549932509661,
"max": 0.3460303246974945,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08731169998645782,
"min": 0.08731169998645782,
"max": 2.4222123622894287,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 638.7441860465116,
"min": 481.1774193548387,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27466.0,
"min": 16863.0,
"max": 32337.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8494883401449337,
"min": -0.9999290848931959,
"max": 1.2673694731320364,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 36.52799862623215,
"min": -30.99780163168907,
"max": 77.92439848184586,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8494883401449337,
"min": -0.9999290848931959,
"max": 1.2673694731320364,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 36.52799862623215,
"min": -30.99780163168907,
"max": 77.92439848184586,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.042039520312797785,
"min": 0.03562061621351094,
"max": 6.4404986272839935,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8076993734503048,
"min": 1.8076993734503048,
"max": 109.4884766638279,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1736262948",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1736266466"
},
"total": 3518.0058308730004,
"count": 1,
"self": 0.6387325229998169,
"children": {
"run_training.setup": {
"total": 0.08458571400001347,
"count": 1,
"self": 0.08458571400001347
},
"TrainerController.start_learning": {
"total": 3517.2825126360003,
"count": 1,
"self": 2.873206658946401,
"children": {
"TrainerController._reset_env": {
"total": 3.084093101999997,
"count": 1,
"self": 3.084093101999997
},
"TrainerController.advance": {
"total": 3511.2329150440532,
"count": 63404,
"self": 2.9549277619958048,
"children": {
"env_step": {
"total": 2294.8175096110344,
"count": 63404,
"self": 2091.9090794200383,
"children": {
"SubprocessEnvManager._take_step": {
"total": 201.22669548900944,
"count": 63404,
"self": 8.240255661963602,
"children": {
"TorchPolicy.evaluate": {
"total": 192.98643982704584,
"count": 62567,
"self": 192.98643982704584
}
}
},
"workers": {
"total": 1.6817347019866702,
"count": 63404,
"self": 0.0,
"children": {
"worker_root": {
"total": 3509.0422792578947,
"count": 63404,
"is_parallel": true,
"self": 1628.9950819799283,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0036418350000531063,
"count": 1,
"is_parallel": true,
"self": 0.0011698420005359367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024719929995171697,
"count": 8,
"is_parallel": true,
"self": 0.0024719929995171697
}
}
},
"UnityEnvironment.step": {
"total": 0.10704589700003453,
"count": 1,
"is_parallel": true,
"self": 0.0009220219999406254,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006511740000405553,
"count": 1,
"is_parallel": true,
"self": 0.0006511740000405553
},
"communicator.exchange": {
"total": 0.1031088180000097,
"count": 1,
"is_parallel": true,
"self": 0.1031088180000097
},
"steps_from_proto": {
"total": 0.002363883000043643,
"count": 1,
"is_parallel": true,
"self": 0.0005069949997960066,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018568880002476362,
"count": 8,
"is_parallel": true,
"self": 0.0018568880002476362
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1880.0471972779665,
"count": 63403,
"is_parallel": true,
"self": 55.67691683320163,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 33.45732223491541,
"count": 63403,
"is_parallel": true,
"self": 33.45732223491541
},
"communicator.exchange": {
"total": 1649.0304935789025,
"count": 63403,
"is_parallel": true,
"self": 1649.0304935789025
},
"steps_from_proto": {
"total": 141.88246463094697,
"count": 63403,
"is_parallel": true,
"self": 31.425059455740666,
"children": {
"_process_rank_one_or_two_observation": {
"total": 110.4574051752063,
"count": 507224,
"is_parallel": true,
"self": 110.4574051752063
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1213.460477671023,
"count": 63404,
"self": 5.657545483058584,
"children": {
"process_trajectory": {
"total": 195.57201219895865,
"count": 63404,
"self": 195.25629988395917,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31571231499947316,
"count": 2,
"self": 0.31571231499947316
}
}
},
"_update_policy": {
"total": 1012.2309199890058,
"count": 451,
"self": 399.00318520610585,
"children": {
"TorchPPOOptimizer.update": {
"total": 613.2277347828999,
"count": 22758,
"self": 613.2277347828999
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3960006981506012e-06,
"count": 1,
"self": 1.3960006981506012e-06
},
"TrainerController._save_models": {
"total": 0.09229643500020757,
"count": 1,
"self": 0.0037344009997468675,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0885620340004607,
"count": 1,
"self": 0.0885620340004607
}
}
}
}
}
}
}