| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 1.4402799606323242, | |
| "min": 1.4402799606323242, | |
| "max": 1.4402799606323242, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 43692.33203125, | |
| "min": 43692.33203125, | |
| "max": 43692.33203125, | |
| "count": 1 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 29952.0, | |
| "min": 29952.0, | |
| "max": 29952.0, | |
| "count": 1 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 29952.0, | |
| "min": 29952.0, | |
| "max": 29952.0, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": -0.221921905875206, | |
| "min": -0.221921905875206, | |
| "max": -0.221921905875206, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": -52.59549331665039, | |
| "min": -52.59549331665039, | |
| "max": -52.59549331665039, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.4863353967666626, | |
| "min": 0.4863353967666626, | |
| "max": 0.4863353967666626, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 115.26148986816406, | |
| "min": 115.26148986816406, | |
| "max": 115.26148986816406, | |
| "count": 1 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.07355356168899187, | |
| "min": 0.07355356168899187, | |
| "max": 0.07355356168899187, | |
| "count": 1 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.5148749318229431, | |
| "min": 0.5148749318229431, | |
| "max": 0.5148749318229431, | |
| "count": 1 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.00873875612131816, | |
| "min": 0.00873875612131816, | |
| "max": 0.00873875612131816, | |
| "count": 1 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.06117129284922712, | |
| "min": 0.06117129284922712, | |
| "max": 0.06117129284922712, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 0.00029838354339596195, | |
| "min": 0.00029838354339596195, | |
| "max": 0.00029838354339596195, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.0020886848037717336, | |
| "min": 0.0020886848037717336, | |
| "max": 0.0020886848037717336, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.19946118095238097, | |
| "min": 0.19946118095238097, | |
| "max": 0.19946118095238097, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.3962282666666668, | |
| "min": 1.3962282666666668, | |
| "max": 1.3962282666666668, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.009946171977142856, | |
| "min": 0.009946171977142856, | |
| "max": 0.009946171977142856, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.06962320384, | |
| "min": 0.06962320384, | |
| "max": 0.06962320384, | |
| "count": 1 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.5651363730430603, | |
| "min": 0.5651363730430603, | |
| "max": 0.5651363730430603, | |
| "count": 1 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 3.9559547901153564, | |
| "min": 3.9559547901153564, | |
| "max": 3.9559547901153564, | |
| "count": 1 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 999.0, | |
| "min": 999.0, | |
| "max": 999.0, | |
| "count": 1 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 15984.0, | |
| "min": 15984.0, | |
| "max": 15984.0, | |
| "count": 1 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": -1.0000000521540642, | |
| "min": -1.0000000521540642, | |
| "max": -1.0000000521540642, | |
| "count": 1 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": -16.000000834465027, | |
| "min": -16.000000834465027, | |
| "max": -16.000000834465027, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": -1.0000000521540642, | |
| "min": -1.0000000521540642, | |
| "max": -1.0000000521540642, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": -16.000000834465027, | |
| "min": -16.000000834465027, | |
| "max": -16.000000834465027, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 12.426324671134353, | |
| "min": 12.426324671134353, | |
| "max": 12.426324671134353, | |
| "count": 1 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 198.82119473814964, | |
| "min": 198.82119473814964, | |
| "max": 198.82119473814964, | |
| "count": 1 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 1 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 1 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1724185621", | |
| "python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training --no-graphics", | |
| "mlagents_version": "1.1.0.dev0", | |
| "mlagents_envs_version": "1.1.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "2.3.1+cu121", | |
| "numpy_version": "1.23.5", | |
| "end_time_seconds": "1724185774" | |
| }, | |
| "total": 152.73451457800002, | |
| "count": 1, | |
| "self": 0.6564099619999979, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.07491729100001976, | |
| "count": 1, | |
| "self": 0.07491729100001976 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 152.003187325, | |
| "count": 1, | |
| "self": 0.11504668400084483, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 3.1649533120000797, | |
| "count": 1, | |
| "self": 3.1649533120000797 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 148.3503989669989, | |
| "count": 3000, | |
| "self": 0.13388741599374043, | |
| "children": { | |
| "env_step": { | |
| "total": 91.04525362799791, | |
| "count": 3000, | |
| "self": 81.83397648699247, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 9.137691959003405, | |
| "count": 3000, | |
| "self": 0.3984443300080329, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 8.739247628995372, | |
| "count": 3000, | |
| "self": 8.739247628995372 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.07358518200203434, | |
| "count": 2999, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 151.09217432901232, | |
| "count": 2999, | |
| "is_parallel": true, | |
| "self": 79.17709236801352, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0040168009998069465, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0013484149999385409, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0026683859998684056, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0026683859998684056 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.08099046699999235, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0008831420000205981, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0006552380000357516, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0006552380000357516 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.07697491199996875, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.07697491199996875 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0024771749999672465, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005402839997259434, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0019368910002413031, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0019368910002413031 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 71.9150819609988, | |
| "count": 2998, | |
| "is_parallel": true, | |
| "self": 2.5661998550094722, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 1.6024147429957338, | |
| "count": 2998, | |
| "is_parallel": true, | |
| "self": 1.6024147429957338 | |
| }, | |
| "communicator.exchange": { | |
| "total": 61.08742125099525, | |
| "count": 2998, | |
| "is_parallel": true, | |
| "self": 61.08742125099525 | |
| }, | |
| "steps_from_proto": { | |
| "total": 6.65904611199835, | |
| "count": 2998, | |
| "is_parallel": true, | |
| "self": 1.4718150589978904, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 5.18723105300046, | |
| "count": 23984, | |
| "is_parallel": true, | |
| "self": 5.18723105300046 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 57.17125792300726, | |
| "count": 2999, | |
| "self": 0.16945724301353948, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 8.388986830993417, | |
| "count": 2999, | |
| "self": 8.388986830993417 | |
| }, | |
| "_update_policy": { | |
| "total": 48.612813849000304, | |
| "count": 11, | |
| "self": 19.26792076600441, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 29.344893082995895, | |
| "count": 1038, | |
| "self": 29.344893082995895 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 1.9170001905877143e-06, | |
| "count": 1, | |
| "self": 1.9170001905877143e-06 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.37278644499997426, | |
| "count": 1, | |
| "self": 0.006381074000046283, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.366405370999928, | |
| "count": 1, | |
| "self": 0.366405370999928 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |