{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.6544572114944458, "min": 0.5971586108207703, "max": 1.3204686641693115, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 19539.474609375, "min": 17924.3125, "max": 40057.73828125, "count": 33 }, "Pyramids.Step.mean": { "value": 989901.0, "min": 29905.0, "max": 989901.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989901.0, "min": 29905.0, "max": 989901.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.23381727933883667, "min": -0.10308179259300232, "max": 0.23381727933883667, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 60.55867385864258, "min": -24.84271240234375, "max": 60.55867385864258, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.01532779447734356, "min": 0.009356344118714333, "max": 0.6371363401412964, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 3.9698987007141113, "min": 2.2548789978027344, "max": 151.00131225585938, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.07052791839940291, "min": 0.06572190059751828, "max": 0.07555467268336015, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9873908575916408, "min": 0.5288827087835211, "max": 1.0778166331983097, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.012037281072439494, "min": 0.00014839098045144077, "max": 0.013313138820001931, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.1685219350141529, "min": 0.00192908274586873, "max": 0.17698423672449923, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.7808974064e-06, "min": 7.7808974064e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.0001089325636896, "min": 0.0001089325636896, "max": 0.0033818189727271002, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.1025936, "min": 0.1025936, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4363104, "min": 1.3886848, "max": 2.5272729, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.00026910063999999997, "min": 0.00026910063999999997, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.0037674089599999995, "min": 0.0037674089599999995, "max": 0.11275456271000002, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.010950866155326366, "min": 0.010675571858882904, "max": 0.605381429195404, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.15331213176250458, "min": 0.14945800602436066, "max": 4.237669944763184, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 608.622641509434, "min": 608.622641509434, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 32257.0, "min": 16832.0, "max": 33516.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 0.9097740440456955, "min": -0.9999290844125133, "max": 0.9097740440456955, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 49.12779837846756, "min": -31.997201666235924, "max": 49.12779837846756, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 0.9097740440456955, "min": -0.9999290844125133, "max": 0.9097740440456955, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 49.12779837846756, "min": -31.997201666235924, "max": 49.12779837846756, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.06893867443216516, "min": 0.06893867443216516, "max": 12.381299756029073, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 3.7226884193369187, "min": 3.1808168863062747, "max": 210.48209585249424, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1674808937", "python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1674810820" }, "total": 1882.5465722349995, "count": 1, "self": 0.44494050100001914, "children": { "run_training.setup": { "total": 0.09785167699965314, "count": 1, "self": 0.09785167699965314 }, "TrainerController.start_learning": { "total": 1882.0037800569999, "count": 1, "self": 1.098713159001818, "children": { "TrainerController._reset_env": { "total": 5.890943396000239, "count": 1, "self": 5.890943396000239 }, "TrainerController.advance": { "total": 1874.9313149869972, "count": 63247, "self": 1.1610906399732812, "children": { "env_step": { "total": 1249.5489751540126, "count": 63247, "self": 1152.8465367319563, "children": { "SubprocessEnvManager._take_step": { "total": 96.02336331405559, "count": 63247, "self": 3.9720894380857317, "children": { "TorchPolicy.evaluate": { "total": 92.05127387596985, "count": 62572, "self": 31.10022710598105, "children": { "TorchPolicy.sample_actions": { "total": 60.95104676998881, "count": 62572, "self": 60.95104676998881 } } } } }, "workers": { "total": 0.6790751080006885, "count": 63247, "self": 0.0, "children": { "worker_root": { "total": 1879.4027185421164, "count": 63247, "is_parallel": true, "self": 814.4259908040667, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0017525010002827912, "count": 1, "is_parallel": true, "self": 0.0006469799996011716, "children": { "_process_rank_one_or_two_observation": { "total": 0.0011055210006816196, "count": 8, "is_parallel": true, "self": 0.0011055210006816196 } } }, "UnityEnvironment.step": { "total": 0.041232881999803794, "count": 1, "is_parallel": true, "self": 0.0004557499992188241, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00042342799997641123, "count": 1, "is_parallel": true, "self": 0.00042342799997641123 }, "communicator.exchange": { "total": 0.03886007800019797, "count": 1, "is_parallel": true, "self": 0.03886007800019797 }, "steps_from_proto": { "total": 0.0014936260004105861, "count": 1, "is_parallel": true, "self": 0.00038568099989788607, "children": { "_process_rank_one_or_two_observation": { "total": 0.0011079450005127, "count": 8, "is_parallel": true, "self": 0.0011079450005127 } } } } } } }, "UnityEnvironment.step": { "total": 1064.9767277380497, "count": 63246, "is_parallel": true, "self": 25.90220179908647, "children": { "UnityEnvironment._generate_step_input": { "total": 20.849585505045525, "count": 63246, "is_parallel": true, "self": 20.849585505045525 }, "communicator.exchange": { "total": 933.3841610669201, "count": 63246, "is_parallel": true, "self": 933.3841610669201 }, "steps_from_proto": { "total": 84.8407793669976, "count": 63246, "is_parallel": true, "self": 19.50563916227793, "children": { "_process_rank_one_or_two_observation": { "total": 65.33514020471966, "count": 505968, "is_parallel": true, "self": 65.33514020471966 } } } } } } } } } } }, "trainer_advance": { "total": 624.2212491930113, "count": 63247, "self": 2.0043483190784173, "children": { "process_trajectory": { "total": 136.63881536392864, "count": 63247, "self": 136.4577922489293, "children": { "RLTrainer._checkpoint": { "total": 0.1810231149993342, "count": 2, "self": 0.1810231149993342 } } }, "_update_policy": { "total": 485.57808551000426, "count": 447, "self": 180.1857127430858, "children": { "TorchPPOOptimizer.update": { "total": 305.39237276691847, "count": 22809, "self": 305.39237276691847 } } } } } } }, "trainer_threads": { "total": 9.030000001075678e-07, "count": 1, "self": 9.030000001075678e-07 }, "TrainerController._save_models": { "total": 0.08280761200057896, "count": 1, "self": 0.001353973000732367, "children": { "RLTrainer._checkpoint": { "total": 0.08145363899984659, "count": 1, "self": 0.08145363899984659 } } } } } } }