| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.500122606754303, | |
| "min": 0.4848885238170624, | |
| "max": 1.4408429861068726, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 14963.66796875, | |
| "min": 14639.75390625, | |
| "max": 43709.4140625, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 989994.0, | |
| "min": 29952.0, | |
| "max": 989994.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 989994.0, | |
| "min": 29952.0, | |
| "max": 989994.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.5206454992294312, | |
| "min": -0.12462763488292694, | |
| "max": 0.5394735336303711, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 143.17750549316406, | |
| "min": -29.910633087158203, | |
| "max": 149.97364807128906, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.03783247247338295, | |
| "min": -0.10501954704523087, | |
| "max": 0.5302673578262329, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 10.403929710388184, | |
| "min": -27.725160598754883, | |
| "max": 125.6733627319336, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.07023699670075895, | |
| "min": 0.06597248561785717, | |
| "max": 0.07425791147823158, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.9833179538106253, | |
| "min": 0.519805380347621, | |
| "max": 1.027950287486116, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.014703587894891264, | |
| "min": 0.0004279264805194013, | |
| "max": 0.014822875262845108, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.2058502305284777, | |
| "min": 0.005135117766232816, | |
| "max": 0.2075202536798315, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.729275995035713e-06, | |
| "min": 7.729275995035713e-06, | |
| "max": 0.00029515063018788575, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.00010820986393049997, | |
| "min": 0.00010820986393049997, | |
| "max": 0.003507384230872, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10257639285714289, | |
| "min": 0.10257639285714289, | |
| "max": 0.19838354285714285, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4360695000000003, | |
| "min": 1.3886848, | |
| "max": 2.569127999999999, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.00026738164642857134, | |
| "min": 0.00026738164642857134, | |
| "max": 0.00983851593142857, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.0037433430499999986, | |
| "min": 0.0037433430499999986, | |
| "max": 0.1169358872, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.013480921275913715, | |
| "min": 0.013480921275913715, | |
| "max": 0.4635303318500519, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.18873289227485657, | |
| "min": 0.18873289227485657, | |
| "max": 3.2447123527526855, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 381.78205128205127, | |
| "min": 345.87951807228916, | |
| "max": 999.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 29779.0, | |
| "min": 15984.0, | |
| "max": 33329.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 1.541266640409445, | |
| "min": -1.0000000521540642, | |
| "max": 1.60591082292867, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 120.21879795193672, | |
| "min": -31.9980016797781, | |
| "max": 133.57919857650995, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 1.541266640409445, | |
| "min": -1.0000000521540642, | |
| "max": 1.60591082292867, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 120.21879795193672, | |
| "min": -31.9980016797781, | |
| "max": 133.57919857650995, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.05351740965274169, | |
| "min": 0.05351740965274169, | |
| "max": 9.564930640161037, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 4.1743579529138515, | |
| "min": 4.1743579529138515, | |
| "max": 153.0388902425766, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1694296347", | |
| "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "0.31.0.dev0", | |
| "mlagents_envs_version": "0.31.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "1.11.0+cu102", | |
| "numpy_version": "1.21.2", | |
| "end_time_seconds": "1694298621" | |
| }, | |
| "total": 2274.348290792, | |
| "count": 1, | |
| "self": 0.49709009000025617, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.062165718999949604, | |
| "count": 1, | |
| "self": 0.062165718999949604 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 2273.789034983, | |
| "count": 1, | |
| "self": 1.5149559179499192, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 4.972383053000044, | |
| "count": 1, | |
| "self": 4.972383053000044 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 2267.20618570805, | |
| "count": 63639, | |
| "self": 1.5485799810394383, | |
| "children": { | |
| "env_step": { | |
| "total": 1605.2746480809813, | |
| "count": 63639, | |
| "self": 1487.4982249670254, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 116.86747741898046, | |
| "count": 63639, | |
| "self": 4.984044039062724, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 111.88343337991773, | |
| "count": 62570, | |
| "self": 111.88343337991773 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.9089456949753867, | |
| "count": 63639, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 2268.448401233021, | |
| "count": 63639, | |
| "is_parallel": true, | |
| "self": 904.1653022669996, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.011536839999962467, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.004451113999721201, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.007085726000241266, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.007085726000241266 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.05011343499995746, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005713739999464451, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0005243509999672824, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005243509999672824 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.04623542000001635, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.04623542000001635 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0027822900000273876, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00040830300008565246, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.002373986999941735, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.002373986999941735 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 1364.2830989660213, | |
| "count": 63638, | |
| "is_parallel": true, | |
| "self": 35.5208518650129, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 24.45025613105952, | |
| "count": 63638, | |
| "is_parallel": true, | |
| "self": 24.45025613105952 | |
| }, | |
| "communicator.exchange": { | |
| "total": 1191.1577530969682, | |
| "count": 63638, | |
| "is_parallel": true, | |
| "self": 1191.1577530969682 | |
| }, | |
| "steps_from_proto": { | |
| "total": 113.15423787298073, | |
| "count": 63638, | |
| "is_parallel": true, | |
| "self": 22.64952178791293, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 90.5047160850678, | |
| "count": 509104, | |
| "is_parallel": true, | |
| "self": 90.5047160850678 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 660.3829576460292, | |
| "count": 63639, | |
| "self": 2.6960527350526036, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 116.18525806197658, | |
| "count": 63639, | |
| "self": 115.97030958397681, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.21494847799976924, | |
| "count": 2, | |
| "self": 0.21494847799976924 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 541.501646849, | |
| "count": 450, | |
| "self": 351.28739255399637, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 190.21425429500368, | |
| "count": 22812, | |
| "self": 190.21425429500368 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 8.840002010401804e-07, | |
| "count": 1, | |
| "self": 8.840002010401804e-07 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.09550941999987117, | |
| "count": 1, | |
| "self": 0.0015049969997562584, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.09400442300011491, | |
| "count": 1, | |
| "self": 0.09400442300011491 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |