| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 1.0229613780975342, | |
| "min": 1.0229613780975342, | |
| "max": 1.4882289171218872, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 30639.740234375, | |
| "min": 30639.740234375, | |
| "max": 45146.9140625, | |
| "count": 3 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 89922.0, | |
| "min": 29952.0, | |
| "max": 89922.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 89922.0, | |
| "min": 29952.0, | |
| "max": 89922.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": -0.09439827501773834, | |
| "min": -0.09439827501773834, | |
| "max": -0.012767951935529709, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": -22.749984741210938, | |
| "min": -22.749984741210938, | |
| "max": -3.0260045528411865, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.20313036441802979, | |
| "min": 0.20313036441802979, | |
| "max": 0.34145307540893555, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 48.95441818237305, | |
| "min": 48.95441818237305, | |
| "max": 80.92437744140625, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.07180172525127432, | |
| "min": 0.06775372254005396, | |
| "max": 0.07217720247308011, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.6462155272614688, | |
| "min": 0.47427605778037774, | |
| "max": 0.6462155272614688, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.0011397290572731642, | |
| "min": 0.0006927422234180153, | |
| "max": 0.006533059602083988, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.010257561515458478, | |
| "min": 0.004849195563926107, | |
| "max": 0.04573141721458791, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.549340816888887e-05, | |
| "min": 7.549340816888887e-05, | |
| "max": 0.0002515063018788571, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.0006794406735199999, | |
| "min": 0.0006794406735199999, | |
| "max": 0.0017605441131519997, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.12516444444444444, | |
| "min": 0.12516444444444444, | |
| "max": 0.1838354285714286, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.12648, | |
| "min": 1.0911359999999999, | |
| "max": 1.2868480000000002, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.002523928, | |
| "min": 0.002523928, | |
| "max": 0.008385159314285713, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.022715352, | |
| "min": 0.022715352, | |
| "max": 0.058696115199999996, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.11212483793497086, | |
| "min": 0.11212483793497086, | |
| "max": 0.45865699648857117, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 1.0091235637664795, | |
| "min": 1.0091235637664795, | |
| "max": 3.210598945617676, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 993.0625, | |
| "min": 993.0625, | |
| "max": 999.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 31778.0, | |
| "min": 15984.0, | |
| "max": 31968.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": -0.9315313019324094, | |
| "min": -1.0000000521540642, | |
| "max": -0.9315313019324094, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": -29.8090016618371, | |
| "min": -32.000001668930054, | |
| "max": -16.000000834465027, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": -0.9315313019324094, | |
| "min": -1.0000000521540642, | |
| "max": -0.9315313019324094, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": -29.8090016618371, | |
| "min": -32.000001668930054, | |
| "max": -16.000000834465027, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 1.3172022206708789, | |
| "min": 1.3172022206708789, | |
| "max": 9.006343544460833, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 42.150471061468124, | |
| "min": 42.150471061468124, | |
| "max": 144.10149671137333, | |
| "count": 3 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 3 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1682255296", | |
| "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTrainingRND --no-graphics --force", | |
| "mlagents_version": "0.31.0.dev0", | |
| "mlagents_envs_version": "0.31.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "1.11.0+cu102", | |
| "numpy_version": "1.21.2", | |
| "end_time_seconds": "1682255497" | |
| }, | |
| "total": 201.13026439700002, | |
| "count": 1, | |
| "self": 0.47400341999991724, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.11000375300000087, | |
| "count": 1, | |
| "self": 0.11000375300000087 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 200.5462572240001, | |
| "count": 1, | |
| "self": 0.13975283000468153, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 4.0618279699999675, | |
| "count": 1, | |
| "self": 4.0618279699999675 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 196.22450164699558, | |
| "count": 6267, | |
| "self": 0.15010125499679816, | |
| "children": { | |
| "env_step": { | |
| "total": 138.04552506699724, | |
| "count": 6267, | |
| "self": 126.0181449070094, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 11.935498925005277, | |
| "count": 6267, | |
| "self": 0.5242217450118005, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 11.411277179993476, | |
| "count": 6265, | |
| "self": 11.411277179993476 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.09188123498256573, | |
| "count": 6267, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 200.05761350400144, | |
| "count": 6267, | |
| "is_parallel": true, | |
| "self": 86.16756258599503, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0018129279999357095, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005910349996156583, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0012218930003200512, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0012218930003200512 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.08381813799996962, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005884569998215738, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0005210179999721731, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005210179999721731 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.08082164200004627, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.08082164200004627 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0018870210001296073, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00045310300038181595, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0014339179997477913, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0014339179997477913 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 113.89005091800641, | |
| "count": 6266, | |
| "is_parallel": true, | |
| "self": 3.3515432050264735, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 2.52273048599136, | |
| "count": 6266, | |
| "is_parallel": true, | |
| "self": 2.52273048599136 | |
| }, | |
| "communicator.exchange": { | |
| "total": 97.72407457399459, | |
| "count": 6266, | |
| "is_parallel": true, | |
| "self": 97.72407457399459 | |
| }, | |
| "steps_from_proto": { | |
| "total": 10.29170265299399, | |
| "count": 6266, | |
| "is_parallel": true, | |
| "self": 2.228655433984386, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 8.063047219009604, | |
| "count": 50128, | |
| "is_parallel": true, | |
| "self": 8.063047219009604 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 58.028875325001536, | |
| "count": 6267, | |
| "self": 0.16840663901984954, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 10.285660992981775, | |
| "count": 6267, | |
| "self": 10.285660992981775 | |
| }, | |
| "_update_policy": { | |
| "total": 47.57480769299991, | |
| "count": 27, | |
| "self": 30.01514391399337, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 17.55966377900654, | |
| "count": 2292, | |
| "self": 17.55966377900654 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 1.0639998890837887e-06, | |
| "count": 1, | |
| "self": 1.0639998890837887e-06 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.12017371299998558, | |
| "count": 1, | |
| "self": 0.0014467399998920882, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.1187269730000935, | |
| "count": 1, | |
| "self": 0.1187269730000935 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |