{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.9544950723648071, "min": 0.9544950723648071, "max": 1.472453236579895, "count": 3 }, "Pyramids.Policy.Entropy.sum": { "value": 28589.037109375, "min": 28589.037109375, "max": 33870.51953125, "count": 3 }, "Pyramids.Step.mean": { "value": 89918.0, "min": 29952.0, "max": 89918.0, "count": 3 }, "Pyramids.Step.sum": { "value": 89918.0, "min": 29952.0, "max": 89918.0, "count": 3 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": -0.09227543324232101, "min": -0.12166137248277664, "max": -0.04000694304704666, "count": 3 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": -22.14610481262207, "min": -29.320390701293945, "max": -6.281090259552002, "count": 3 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.13011230528354645, "min": 0.13011230528354645, "max": 0.3095104992389679, "count": 3 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 31.226953506469727, "min": 31.226953506469727, "max": 55.984806060791016, "count": 3 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06934064822059739, "min": 0.06934064822059739, "max": 0.07116523094444493, "count": 3 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9014284268677661, "min": 0.28466092377777974, "max": 0.9014284268677661, "count": 3 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.0022016426029052094, "min": 0.0022016426029052094, "max": 0.003035719427161474, "count": 3 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.028621353837767722, "min": 0.011314746314425046, "max": 0.033392913698776214, "count": 3 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.89536121436923e-05, "min": 7.89536121436923e-05, "max": 0.000238848020384, "count": 3 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.001026396957868, "min": 0.000955392081536, "max": 0.0018515464828180001, "count": 3 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.12631784615384617, "min": 0.12631784615384617, "max": 0.17961600000000003, "count": 3 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.6421320000000001, "min": 0.7184640000000001, "max": 1.717182, "count": 3 }, "Pyramids.Policy.Beta.mean": { "value": 0.002639152830769231, "min": 0.002639152830769231, "max": 0.0079636384, "count": 3 }, "Pyramids.Policy.Beta.sum": { "value": 0.0343089868, "min": 0.0318545536, "max": 0.0617664818, "count": 3 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.09598085284233093, "min": 0.09598085284233093, "max": 0.280129075050354, "count": 3 }, "Pyramids.Losses.RNDLoss.sum": { "value": 1.2477511167526245, "min": 1.120516300201416, "max": 1.7352863550186157, "count": 3 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 960.8484848484849, "min": 960.8484848484849, "max": 999.0, "count": 3 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 31708.0, "min": 15984.0, "max": 31708.0, "count": 3 }, "Pyramids.Environment.CumulativeReward.mean": { "value": -0.772987550823018, "min": -1.0000000521540642, "max": -0.772987550823018, "count": 3 }, "Pyramids.Environment.CumulativeReward.sum": { "value": -24.735601626336575, "min": -24.88520159572363, "max": -16.000000834465027, "count": 3 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": -0.772987550823018, "min": -1.0000000521540642, "max": -0.772987550823018, "count": 3 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": -24.735601626336575, "min": -24.88520159572363, "max": -16.000000834465027, "count": 3 }, "Pyramids.Policy.RndReward.mean": { "value": 0.9588833225425333, "min": 0.9588833225425333, "max": 3.662998544983566, "count": 3 }, "Pyramids.Policy.RndReward.sum": { "value": 30.684266321361065, "min": 30.684266321361065, "max": 58.60797671973705, "count": 3 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 3 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 3 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1761582032", "python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0]", "command_line_arguments": "/usr/local/envs/py310/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume", "mlagents_version": "1.2.0.dev0", "mlagents_envs_version": "1.2.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.8.0+cu128", "numpy_version": "1.23.5", "end_time_seconds": "1761582213" }, "total": 181.10961878900014, "count": 1, "self": 0.4318455920000588, "children": { "run_training.setup": { "total": 0.019743612000070243, "count": 1, "self": 0.019743612000070243 }, "TrainerController.start_learning": { "total": 180.658029585, "count": 1, "self": 0.11765654099349376, "children": { "TrainerController._reset_env": { "total": 2.2267337709999993, "count": 1, "self": 2.2267337709999993 }, "TrainerController.advance": { "total": 178.1853787860067, "count": 5655, "self": 0.12343048800414635, "children": { "env_step": { "total": 122.07895445300687, "count": 5655, "self": 108.83141511399629, "children": { "SubprocessEnvManager._take_step": { "total": 13.17627923100872, "count": 5655, "self": 0.40538089600352123, "children": { "TorchPolicy.evaluate": { "total": 12.7708983350052, "count": 5640, "self": 12.7708983350052 } } }, "workers": { "total": 0.07126010800186577, "count": 5655, "self": 0.0, "children": { "worker_root": { "total": 179.73132612199913, "count": 5655, "is_parallel": true, "self": 81.30669783701126, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0018750010000303519, "count": 1, "is_parallel": true, "self": 0.0005830870001091171, "children": { "_process_rank_one_or_two_observation": { "total": 0.0012919139999212348, "count": 8, "is_parallel": true, "self": 0.0012919139999212348 } } }, "UnityEnvironment.step": { "total": 0.04719369100007498, "count": 1, "is_parallel": true, "self": 0.0005735930002401801, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004998549999299939, "count": 1, "is_parallel": true, "self": 0.0004998549999299939 }, "communicator.exchange": { "total": 0.04452522899998712, "count": 1, "is_parallel": true, "self": 0.04452522899998712 }, "steps_from_proto": { "total": 0.0015950139999176827, "count": 1, "is_parallel": true, "self": 0.00033905200007211533, "children": { "_process_rank_one_or_two_observation": { "total": 0.0012559619998455673, "count": 8, "is_parallel": true, "self": 0.0012559619998455673 } } } } } } }, "UnityEnvironment.step": { "total": 98.42462828498788, "count": 5654, "is_parallel": true, "self": 2.934380579981962, "children": { "UnityEnvironment._generate_step_input": { "total": 1.9746993540051108, "count": 5654, "is_parallel": true, "self": 1.9746993540051108 }, "communicator.exchange": { "total": 84.11720353699991, "count": 5654, "is_parallel": true, "self": 84.11720353699991 }, "steps_from_proto": { "total": 9.398344814000893, "count": 5654, "is_parallel": true, "self": 2.043247950983641, "children": { "_process_rank_one_or_two_observation": { "total": 7.355096863017252, "count": 45232, "is_parallel": true, "self": 7.355096863017252 } } } } } } } } } } }, "trainer_advance": { "total": 55.98299384499569, "count": 5655, "self": 0.16891419898968252, "children": { "process_trajectory": { "total": 10.133876768006303, "count": 5655, "self": 10.133876768006303 }, "_update_policy": { "total": 45.680202877999704, "count": 32, "self": 25.33424093800204, "children": { "TorchPPOOptimizer.update": { "total": 20.345961939997665, "count": 2031, "self": 20.345961939997665 } } } } } } }, "trainer_threads": { "total": 8.47999899633578e-07, "count": 1, "self": 8.47999899633578e-07 }, "TrainerController._save_models": { "total": 0.1282596389999071, "count": 1, "self": 0.0011812769998869044, "children": { "RLTrainer._checkpoint": { "total": 0.1270783620000202, "count": 1, "self": 0.1270783620000202 } } } } } } }