| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.5778467655181885, | |
| "min": 0.5644880533218384, | |
| "max": 1.0903167724609375, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 17113.509765625, | |
| "min": 14867.677734375, | |
| "max": 32803.0390625, | |
| "count": 27 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 989926.0, | |
| "min": 209991.0, | |
| "max": 989926.0, | |
| "count": 27 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 989926.0, | |
| "min": 209991.0, | |
| "max": 989926.0, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.39801111817359924, | |
| "min": -0.10163868963718414, | |
| "max": 0.39801111817359924, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 107.06498718261719, | |
| "min": -24.5965633392334, | |
| "max": 107.06498718261719, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.11363773047924042, | |
| "min": -0.05971188470721245, | |
| "max": 0.11363773047924042, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 30.56855010986328, | |
| "min": -16.002784729003906, | |
| "max": 30.56855010986328, | |
| "count": 27 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.06602687373790932, | |
| "min": 0.06602687373790932, | |
| "max": 0.0725631415288219, | |
| "count": 27 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.9243762323307304, | |
| "min": 0.21228088629626046, | |
| "max": 1.07178095341078, | |
| "count": 27 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.016520114454025618, | |
| "min": 0.0006235202636397278, | |
| "max": 0.016520114454025618, | |
| "count": 27 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.23128160235635867, | |
| "min": 0.0023454733289251335, | |
| "max": 0.23128160235635867, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.6023974659e-06, | |
| "min": 7.6023974659e-06, | |
| "max": 0.00023887712037429993, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.0001064335645226, | |
| "min": 0.0001064335645226, | |
| "max": 0.0027838167720612003, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.1025341, | |
| "min": 0.1025341, | |
| "max": 0.17962570000000003, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4354774000000001, | |
| "min": 0.5388771000000001, | |
| "max": 2.2747844, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0002631565900000001, | |
| "min": 0.0002631565900000001, | |
| "max": 0.00796460743, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.0036841922600000013, | |
| "min": 0.0036841922600000013, | |
| "max": 0.09283108612, | |
| "count": 27 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.008909021504223347, | |
| "min": 0.008909021504223347, | |
| "max": 0.03914414346218109, | |
| "count": 27 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.1247262954711914, | |
| "min": 0.11743243038654327, | |
| "max": 0.3834875822067261, | |
| "count": 27 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 434.23943661971833, | |
| "min": 434.23943661971833, | |
| "max": 999.0, | |
| "count": 27 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 30831.0, | |
| "min": 15984.0, | |
| "max": 33030.0, | |
| "count": 27 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 1.368518294687842, | |
| "min": -1.0000000521540642, | |
| "max": 1.4634382135289556, | |
| "count": 27 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 97.16479892283678, | |
| "min": -28.924801647663116, | |
| "max": 99.51379851996899, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 1.368518294687842, | |
| "min": -1.0000000521540642, | |
| "max": 1.4634382135289556, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 97.16479892283678, | |
| "min": -28.924801647663116, | |
| "max": 99.51379851996899, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.03918409688820145, | |
| "min": 0.03918409688820145, | |
| "max": 0.38616177391712414, | |
| "count": 27 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 2.782070879062303, | |
| "min": 0.3267418332397938, | |
| "max": 12.743338539265096, | |
| "count": 27 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 27 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 27 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1689474029", | |
| "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume", | |
| "mlagents_version": "0.31.0.dev0", | |
| "mlagents_envs_version": "0.31.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "1.11.0+cu102", | |
| "numpy_version": "1.21.2", | |
| "end_time_seconds": "1689475872" | |
| }, | |
| "total": 1843.0409032699995, | |
| "count": 1, | |
| "self": 1.365671194999777, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.04236321200005477, | |
| "count": 1, | |
| "self": 0.04236321200005477 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 1841.6328688629997, | |
| "count": 1, | |
| "self": 1.1762784020093022, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 4.134572570000273, | |
| "count": 1, | |
| "self": 4.134572570000273 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 1836.1390447609897, | |
| "count": 51208, | |
| "self": 1.229169223957797, | |
| "children": { | |
| "env_step": { | |
| "total": 1297.7533023260148, | |
| "count": 51208, | |
| "self": 1201.0127104020044, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 96.03883649498175, | |
| "count": 51208, | |
| "self": 3.965214081995782, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 92.07362241298597, | |
| "count": 50348, | |
| "self": 92.07362241298597 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.701755429028708, | |
| "count": 51208, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 1836.9434975340218, | |
| "count": 51208, | |
| "is_parallel": true, | |
| "self": 732.3744857390957, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0020213419998071913, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0006635869999627175, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0013577549998444738, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0013577549998444738 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.051140615999884176, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005900709998059028, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0004880229998889263, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0004880229998889263 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.04812828800004354, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.04812828800004354 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.001934234000145807, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0003475619996606838, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0015866720004851231, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0015866720004851231 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 1104.5690117949262, | |
| "count": 51207, | |
| "is_parallel": true, | |
| "self": 28.940049437850575, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 19.14033584301842, | |
| "count": 51207, | |
| "is_parallel": true, | |
| "self": 19.14033584301842 | |
| }, | |
| "communicator.exchange": { | |
| "total": 967.6035714390009, | |
| "count": 51207, | |
| "is_parallel": true, | |
| "self": 967.6035714390009 | |
| }, | |
| "steps_from_proto": { | |
| "total": 88.88505507505624, | |
| "count": 51207, | |
| "is_parallel": true, | |
| "self": 17.27394227902687, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 71.61111279602937, | |
| "count": 409656, | |
| "is_parallel": true, | |
| "self": 71.61111279602937 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 537.1565732110171, | |
| "count": 51208, | |
| "self": 2.3467337829865755, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 90.33775859803154, | |
| "count": 51208, | |
| "self": 90.05513052603146, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.28262807200007956, | |
| "count": 2, | |
| "self": 0.28262807200007956 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 444.47208082999896, | |
| "count": 364, | |
| "self": 285.10498347500425, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 159.3670973549947, | |
| "count": 18294, | |
| "self": 159.3670973549947 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 1.2210002751089633e-06, | |
| "count": 1, | |
| "self": 1.2210002751089633e-06 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.18297190900011628, | |
| "count": 1, | |
| "self": 0.004209181000078388, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.1787627280000379, | |
| "count": 1, | |
| "self": 0.1787627280000379 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |