{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.11759261786937714, "min": 0.11302082985639572, "max": 1.4565541744232178, "count": 100 }, "Pyramids.Policy.Entropy.sum": { "value": 3531.54150390625, "min": 3457.533203125, "max": 44186.02734375, "count": 100 }, "Pyramids.Step.mean": { "value": 2999910.0, "min": 29952.0, "max": 2999910.0, "count": 100 }, "Pyramids.Step.sum": { "value": 2999910.0, "min": 29952.0, "max": 2999910.0, "count": 100 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.7589465975761414, "min": -0.09696014970541, "max": 0.8105674982070923, "count": 100 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 219.3355712890625, "min": -23.367395401000977, "max": 237.49627685546875, "count": 100 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": -0.010551673360168934, "min": -0.04731383174657822, "max": 0.3173885643482208, "count": 100 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": -3.049433469772339, "min": -13.342500686645508, "max": 76.80802917480469, "count": 100 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.0675092917919669, "min": 0.06429030490800783, "max": 0.07610823091198105, "count": 100 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9451300850875365, "min": 0.5121683190812909, "max": 1.09012978748811, "count": 100 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.01485941252030898, "min": 0.0006757507655948534, "max": 0.0166872980265734, "count": 100 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.20803177528432573, "min": 0.008522922106913026, "max": 0.2336221723720276, "count": 100 }, "Pyramids.Policy.LearningRate.mean": { "value": 1.4294066664214311e-06, "min": 1.4294066664214311e-06, "max": 0.00029838354339596195, "count": 100 }, "Pyramids.Policy.LearningRate.sum": { "value": 2.0011693329900035e-05, "min": 2.0011693329900035e-05, "max": 0.0038861257046247997, "count": 100 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10047643571428573, "min": 0.10047643571428573, "max": 0.19946118095238097, "count": 100 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4066701000000001, "min": 1.3962282666666668, "max": 2.7825402, "count": 100 }, "Pyramids.Policy.Beta.mean": { "value": 5.759592785714296e-05, "min": 5.759592785714296e-05, "max": 0.009946171977142856, "count": 100 }, "Pyramids.Policy.Beta.sum": { "value": 0.0008063429900000014, "min": 0.0008063429900000014, "max": 0.12954798248000002, "count": 100 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.00744998874142766, "min": 0.007240835577249527, "max": 0.48173803091049194, "count": 100 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.10429984331130981, "min": 0.10137169808149338, "max": 3.372166156768799, "count": 100 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 259.5258620689655, "min": 237.3170731707317, "max": 999.0, "count": 100 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30105.0, "min": 15984.0, "max": 33204.0, "count": 100 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.7059810173151821, "min": -1.0000000521540642, "max": 1.7460495822429658, "count": 100 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 197.89379800856113, "min": -29.235801719129086, "max": 218.2561977803707, "count": 100 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.7059810173151821, "min": -1.0000000521540642, "max": 1.7460495822429658, "count": 100 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 197.89379800856113, "min": -29.235801719129086, "max": 218.2561977803707, "count": 100 }, "Pyramids.Policy.RndReward.mean": { "value": 0.019969008787076494, "min": 0.018811473136552896, "max": 9.43836387526244, "count": 100 }, "Pyramids.Policy.RndReward.sum": { "value": 2.3164050193008734, "min": 2.2003511432849336, "max": 151.01382200419903, "count": 100 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1720647128", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1720654003" }, "total": 6874.909274339, "count": 1, "self": 0.5282636589990943, "children": { "run_training.setup": { "total": 0.05061716499994873, "count": 1, "self": 0.05061716499994873 }, "TrainerController.start_learning": { "total": 6874.330393515001, "count": 1, "self": 3.699055911128198, "children": { "TrainerController._reset_env": { "total": 2.251565669999991, "count": 1, "self": 2.251565669999991 }, "TrainerController.advance": { "total": 6868.293637442873, "count": 194044, "self": 4.018873070678637, "children": { "env_step": { "total": 5028.618360397986, "count": 194044, "self": 4651.767854060857, "children": { "SubprocessEnvManager._take_step": { "total": 374.5671864120857, "count": 194044, "self": 13.879027396132642, "children": { "TorchPolicy.evaluate": { "total": 360.68815901595303, "count": 187561, "self": 360.68815901595303 } } }, "workers": { "total": 2.2833199250439975, "count": 194044, "self": 0.0, "children": { "worker_root": { "total": 6860.314598699786, "count": 194044, "is_parallel": true, "self": 2558.200381880767, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002209683999808476, "count": 1, "is_parallel": true, "self": 0.000641375999975935, "children": { "_process_rank_one_or_two_observation": { "total": 0.001568307999832541, "count": 8, "is_parallel": true, "self": 0.001568307999832541 } } }, "UnityEnvironment.step": { "total": 0.05364345099997081, "count": 1, "is_parallel": true, "self": 0.0006529440001941111, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005208419997870806, "count": 1, "is_parallel": true, "self": 0.0005208419997870806 }, "communicator.exchange": { "total": 0.05077906100018481, "count": 1, "is_parallel": true, "self": 0.05077906100018481 }, "steps_from_proto": { "total": 0.0016906039998048072, "count": 1, "is_parallel": true, "self": 0.0003580149996196269, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013325890001851803, "count": 8, "is_parallel": true, "self": 0.0013325890001851803 } } } } } } }, "UnityEnvironment.step": { "total": 4302.114216819019, "count": 194043, "is_parallel": true, "self": 98.73687450082161, "children": { "UnityEnvironment._generate_step_input": { "total": 69.20014546312336, "count": 194043, "is_parallel": true, "self": 69.20014546312336 }, "communicator.exchange": { "total": 3840.3856010429663, "count": 194043, "is_parallel": true, "self": 3840.3856010429663 }, "steps_from_proto": { "total": 293.7915958121073, "count": 194043, "is_parallel": true, "self": 58.8769616019938, "children": { "_process_rank_one_or_two_observation": { "total": 234.9146342101135, "count": 1552344, "is_parallel": true, "self": 234.9146342101135 } } } } } } } } } } }, "trainer_advance": { "total": 1835.656403974208, "count": 194044, "self": 7.415031948164369, "children": { "process_trajectory": { "total": 378.07971819103636, "count": 194044, "self": 377.504450521036, "children": { "RLTrainer._checkpoint": { "total": 0.5752676700003576, "count": 6, "self": 0.5752676700003576 } } }, "_update_policy": { "total": 1450.1616538350072, "count": 1400, "self": 854.7573371970859, "children": { "TorchPPOOptimizer.update": { "total": 595.4043166379213, "count": 68400, "self": 595.4043166379213 } } } } } } }, "trainer_threads": { "total": 7.810003808117472e-07, "count": 1, "self": 7.810003808117472e-07 }, "TrainerController._save_models": { "total": 0.08613370999955805, "count": 1, "self": 0.0017549300000609946, "children": { "RLTrainer._checkpoint": { "total": 0.08437877999949706, "count": 1, "self": 0.08437877999949706 } } } } } } }