{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.2755347788333893, "min": 0.24827715754508972, "max": 1.416056513786316, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 8199.9150390625, "min": 7432.4248046875, "max": 42957.4921875, "count": 33 }, "Pyramids.Step.mean": { "value": 989924.0, "min": 29952.0, "max": 989924.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989924.0, "min": 29952.0, "max": 989924.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.6539541482925415, "min": -0.08198705315589905, "max": 0.6539541482925415, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 183.7611083984375, "min": -19.840866088867188, "max": 184.78421020507812, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.0027792686596512794, "min": 0.0027792686596512794, "max": 0.42529433965682983, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 0.7809745073318481, "min": 0.7809745073318481, "max": 100.79476165771484, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06642237757928558, "min": 0.0635752631751866, "max": 0.07586022215712417, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9299132861099981, "min": 0.5011392970821665, "max": 1.0653333288227098, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.016811673073486116, "min": 0.0005199830136122751, "max": 0.017390320828348023, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.23536342302880564, "min": 0.006759779176959575, "max": 0.26020822213225375, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.5773903313785715e-06, "min": 7.5773903313785715e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.0001060834646393, "min": 0.0001060834646393, "max": 0.0035090660303114, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10252576428571428, "min": 0.10252576428571428, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4353607, "min": 1.3886848, "max": 2.5696886000000005, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.00026232385214285716, "min": 0.00026232385214285716, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.00367253393, "min": 0.00367253393, "max": 0.11699189114, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.015187530778348446, "min": 0.015187530778348446, "max": 0.5200672745704651, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.2126254290342331, "min": 0.2126254290342331, "max": 3.6404707431793213, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 285.7821782178218, "min": 281.2232142857143, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 28864.0, "min": 15984.0, "max": 33779.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.6547979995018185, "min": -1.0000000521540642, "max": 1.683667239032488, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 167.13459794968367, "min": -29.994801551103592, "max": 190.25439801067114, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.6547979995018185, "min": -1.0000000521540642, "max": 1.683667239032488, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 167.13459794968367, "min": -29.994801551103592, "max": 190.25439801067114, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.04475060600910156, "min": 0.04475060600910156, "max": 11.373648265376687, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 4.5198112069192575, "min": 4.5198112069192575, "max": 181.978372246027, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1731079464", "python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.2.0.dev0", "mlagents_envs_version": "1.2.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.5.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1731081613" }, "total": 2149.87468979, "count": 1, "self": 0.49002891600048315, "children": { "run_training.setup": { "total": 0.05207508999978927, "count": 1, "self": 0.05207508999978927 }, "TrainerController.start_learning": { "total": 2149.332585784, "count": 1, "self": 1.3008672699779709, "children": { "TrainerController._reset_env": { "total": 2.592487983999945, "count": 1, "self": 2.592487983999945 }, "TrainerController.advance": { "total": 2145.356232687022, "count": 63937, "self": 1.3561657381028454, "children": { "env_step": { "total": 1474.7933231259753, "count": 63937, "self": 1328.5136448400667, "children": { "SubprocessEnvManager._take_step": { "total": 145.4996730619282, "count": 63937, "self": 4.459911663851926, "children": { "TorchPolicy.evaluate": { "total": 141.03976139807628, "count": 62564, "self": 141.03976139807628 } } }, "workers": { "total": 0.7800052239804245, "count": 63937, "self": 0.0, "children": { "worker_root": { "total": 2145.0218759470667, "count": 63937, "is_parallel": true, "self": 926.5758009700621, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0021229210001365573, "count": 1, "is_parallel": true, "self": 0.0006994230000145762, "children": { "_process_rank_one_or_two_observation": { "total": 0.001423498000121981, "count": 8, "is_parallel": true, "self": 0.001423498000121981 } } }, "UnityEnvironment.step": { "total": 0.10249720499996329, "count": 1, "is_parallel": true, "self": 0.0006519719997868378, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004156449999754841, "count": 1, "is_parallel": true, "self": 0.0004156449999754841 }, "communicator.exchange": { "total": 0.09976565700003448, "count": 1, "is_parallel": true, "self": 0.09976565700003448 }, "steps_from_proto": { "total": 0.0016639310001664853, "count": 1, "is_parallel": true, "self": 0.00035573700029090105, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013081939998755843, "count": 8, "is_parallel": true, "self": 0.0013081939998755843 } } } } } } }, "UnityEnvironment.step": { "total": 1218.4460749770046, "count": 63936, "is_parallel": true, "self": 32.73257503295076, "children": { "UnityEnvironment._generate_step_input": { "total": 23.081975038047403, "count": 63936, "is_parallel": true, "self": 23.081975038047403 }, "communicator.exchange": { "total": 1066.3283353119746, "count": 63936, "is_parallel": true, "self": 1066.3283353119746 }, "steps_from_proto": { "total": 96.30318959403189, "count": 63936, "is_parallel": true, "self": 19.566949659889815, "children": { "_process_rank_one_or_two_observation": { "total": 76.73623993414208, "count": 511488, "is_parallel": true, "self": 76.73623993414208 } } } } } } } } } } }, "trainer_advance": { "total": 669.2067438229435, "count": 63937, "self": 2.4583449569793174, "children": { "process_trajectory": { "total": 128.29734714196275, "count": 63937, "self": 128.09948033496312, "children": { "RLTrainer._checkpoint": { "total": 0.19786680699962744, "count": 2, "self": 0.19786680699962744 } } }, "_update_policy": { "total": 538.4510517240014, "count": 456, "self": 301.5205892630436, "children": { "TorchPPOOptimizer.update": { "total": 236.93046246095787, "count": 22824, "self": 236.93046246095787 } } } } } } }, "trainer_threads": { "total": 1.0650001058820635e-06, "count": 1, "self": 1.0650001058820635e-06 }, "TrainerController._save_models": { "total": 0.08299677800005156, "count": 1, "self": 0.001432797999768809, "children": { "RLTrainer._checkpoint": { "total": 0.08156398000028275, "count": 1, "self": 0.08156398000028275 } } } } } } }