| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.8421513438224792, | |
| "min": 0.8421513438224792, | |
| "max": 1.4279040098190308, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 25453.181640625, | |
| "min": 25453.181640625, | |
| "max": 43316.89453125, | |
| "count": 3 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 89901.0, | |
| "min": 29952.0, | |
| "max": 89901.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 89901.0, | |
| "min": 29952.0, | |
| "max": 89901.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": -0.05049468204379082, | |
| "min": -0.1536472886800766, | |
| "max": -0.05049468204379082, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": -12.169218063354492, | |
| "min": -36.414405822753906, | |
| "max": -12.169218063354492, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.1693786084651947, | |
| "min": 0.1693786084651947, | |
| "max": 0.2747419476509094, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 40.82024383544922, | |
| "min": 40.82024383544922, | |
| "max": 66.487548828125, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.06846776156642985, | |
| "min": 0.06846776156642985, | |
| "max": 0.07196624864031591, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.8216131387971582, | |
| "min": 0.49820701944019213, | |
| "max": 0.8216131387971582, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.002095540043834882, | |
| "min": 0.002095540043834882, | |
| "max": 0.006562098728055988, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.02514648052601858, | |
| "min": 0.0217551727780026, | |
| "max": 0.045934691096391915, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.848182383941666e-05, | |
| "min": 7.848182383941666e-05, | |
| "max": 0.0002515063018788571, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.000941781886073, | |
| "min": 0.000941781886073, | |
| "max": 0.0017605441131519997, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.12616058333333333, | |
| "min": 0.12616058333333333, | |
| "max": 0.1838354285714286, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.513927, | |
| "min": 1.2868480000000002, | |
| "max": 1.513927, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0026234422750000003, | |
| "min": 0.0026234422750000003, | |
| "max": 0.008385159314285713, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.031481307300000004, | |
| "min": 0.031481307300000004, | |
| "max": 0.058696115199999996, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.11525529623031616, | |
| "min": 0.11525529623031616, | |
| "max": 0.49059638381004333, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 1.383063554763794, | |
| "min": 1.383063554763794, | |
| "max": 3.4341747760772705, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 971.125, | |
| "min": 962.0, | |
| "max": 999.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 31076.0, | |
| "min": 15984.0, | |
| "max": 33670.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": -0.8470062969718128, | |
| "min": -1.0000000521540642, | |
| "max": -0.7914857677050999, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": -27.10420150309801, | |
| "min": -27.702001869678497, | |
| "max": -16.000000834465027, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": -0.8470062969718128, | |
| "min": -1.0000000521540642, | |
| "max": -0.7914857677050999, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": -27.10420150309801, | |
| "min": -27.702001869678497, | |
| "max": -16.000000834465027, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 1.2553130452288315, | |
| "min": 1.2553130452288315, | |
| "max": 10.210595269687474, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 40.17001744732261, | |
| "min": 40.17001744732261, | |
| "max": 163.36952431499958, | |
| "count": 3 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 3 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1713439258", | |
| "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "1.1.0.dev0", | |
| "mlagents_envs_version": "1.1.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "2.2.1+cu121", | |
| "numpy_version": "1.23.5", | |
| "end_time_seconds": "1713439556" | |
| }, | |
| "total": 298.24101804899993, | |
| "count": 1, | |
| "self": 0.6387142049999284, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.06710730600002535, | |
| "count": 1, | |
| "self": 0.06710730600002535 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 297.535196538, | |
| "count": 1, | |
| "self": 0.20759824399340232, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 3.136738426000022, | |
| "count": 1, | |
| "self": 3.136738426000022 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 294.03977961400653, | |
| "count": 6305, | |
| "self": 0.23763696300426318, | |
| "children": { | |
| "env_step": { | |
| "total": 190.7710552949979, | |
| "count": 6305, | |
| "self": 175.57253945799755, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 15.069660322000175, | |
| "count": 6305, | |
| "self": 0.6626234760024658, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 14.407036845997709, | |
| "count": 6297, | |
| "self": 14.407036845997709 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.12885551500016845, | |
| "count": 6305, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 296.8999725839998, | |
| "count": 6305, | |
| "is_parallel": true, | |
| "self": 138.3617462400012, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.007409946999985095, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00333795099999179, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.004071995999993305, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.004071995999993305 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.057863819000033345, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0007494500000007065, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0005209889999946427, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005209889999946427 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.05462544700003491, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.05462544700003491 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.001967933000003086, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0004303680000816712, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0015375649999214147, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0015375649999214147 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 158.5382263439986, | |
| "count": 6304, | |
| "is_parallel": true, | |
| "self": 4.9335431810033015, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 2.6564381849967162, | |
| "count": 6304, | |
| "is_parallel": true, | |
| "self": 2.6564381849967162 | |
| }, | |
| "communicator.exchange": { | |
| "total": 138.00420611499914, | |
| "count": 6304, | |
| "is_parallel": true, | |
| "self": 138.00420611499914 | |
| }, | |
| "steps_from_proto": { | |
| "total": 12.944038862999435, | |
| "count": 6304, | |
| "is_parallel": true, | |
| "self": 2.7626011990102484, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 10.181437663989186, | |
| "count": 50432, | |
| "is_parallel": true, | |
| "self": 10.181437663989186 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 103.03108735600438, | |
| "count": 6305, | |
| "self": 0.3195928790049152, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 15.509347857999614, | |
| "count": 6305, | |
| "self": 15.509347857999614 | |
| }, | |
| "_update_policy": { | |
| "total": 87.20214661899985, | |
| "count": 33, | |
| "self": 35.90319550999601, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 51.29895110900384, | |
| "count": 2307, | |
| "self": 51.29895110900384 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 1.0930000371445203e-06, | |
| "count": 1, | |
| "self": 1.0930000371445203e-06 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.1510791609999842, | |
| "count": 1, | |
| "self": 0.0028678099999979167, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.14821135099998628, | |
| "count": 1, | |
| "self": 0.14821135099998628 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |