ppo-Pyramids / run_logs /timers.json
Mehditop68's picture
First Push
7b87d20 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5703490972518921,
"min": 0.5494472980499268,
"max": 1.4027103185653687,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17101.34765625,
"min": 16562.5390625,
"max": 42552.62109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989997.0,
"min": 29952.0,
"max": 989997.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989997.0,
"min": 29952.0,
"max": 989997.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.09232138842344284,
"min": -0.12184180319309235,
"max": 0.09232138842344284,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 22.988025665283203,
"min": -28.876506805419922,
"max": 22.988025665283203,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013315600343048573,
"min": 0.007485616020858288,
"max": 0.4585472345352173,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.315584421157837,
"min": 1.8264902830123901,
"max": 108.67569732666016,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06612502732708131,
"min": 0.06429373416306969,
"max": 0.07457556224764238,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9257503825791383,
"min": 0.5012756362031368,
"max": 1.049068538531976,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.006806938070403141,
"min": 0.00013274726171743837,
"max": 0.01624260973897305,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.09529713298564398,
"min": 0.0017257144023266986,
"max": 0.11369826817281137,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.546790341578573e-06,
"min": 7.546790341578573e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010565506478210003,
"min": 0.00010565506478210003,
"max": 0.0035071946309352,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025155642857143,
"min": 0.1025155642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352179000000003,
"min": 1.3886848,
"max": 2.5690648,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002613048721428573,
"min": 0.0002613048721428573,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003658268210000002,
"min": 0.003658268210000002,
"max": 0.11692957352,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014894582331180573,
"min": 0.014894582331180573,
"max": 0.5332163572311401,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20852415263652802,
"min": 0.20852415263652802,
"max": 3.7325146198272705,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 737.3255813953489,
"min": 737.3255813953489,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31705.0,
"min": 15984.0,
"max": 32528.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.28570693956558096,
"min": -1.0000000521540642,
"max": 0.28570693956558096,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 12.28539840131998,
"min": -31.994401648640633,
"max": 12.28539840131998,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.28570693956558096,
"min": -1.0000000521540642,
"max": 0.28570693956558096,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 12.28539840131998,
"min": -31.994401648640633,
"max": 12.28539840131998,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1159303622553125,
"min": 0.1159303622553125,
"max": 11.660213833674788,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.985005576978438,
"min": 4.483415444614366,
"max": 186.56342133879662,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1759783143",
"python_version": "3.10.12 (main, Aug 15 2025, 14:32:43) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1759785239"
},
"total": 2096.077959533999,
"count": 1,
"self": 0.4292326389977461,
"children": {
"run_training.setup": {
"total": 0.016560984000534518,
"count": 1,
"self": 0.016560984000534518
},
"TrainerController.start_learning": {
"total": 2095.632165911001,
"count": 1,
"self": 1.3077462239789384,
"children": {
"TrainerController._reset_env": {
"total": 1.6186284680006793,
"count": 1,
"self": 1.6186284680006793
},
"TrainerController.advance": {
"total": 2092.6537368150202,
"count": 63283,
"self": 1.3465351380928041,
"children": {
"env_step": {
"total": 1369.5665481859487,
"count": 63283,
"self": 1271.5928540321793,
"children": {
"SubprocessEnvManager._take_step": {
"total": 97.20022819699898,
"count": 63283,
"self": 4.6554433552519185,
"children": {
"TorchPolicy.evaluate": {
"total": 92.54478484174706,
"count": 62552,
"self": 92.54478484174706
}
}
},
"workers": {
"total": 0.7734659567704512,
"count": 63283,
"self": 0.0,
"children": {
"worker_root": {
"total": 2090.506994118112,
"count": 63283,
"is_parallel": true,
"self": 933.0590825298386,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023786649999237852,
"count": 1,
"is_parallel": true,
"self": 0.0007521420011471491,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001626522998776636,
"count": 8,
"is_parallel": true,
"self": 0.001626522998776636
}
}
},
"UnityEnvironment.step": {
"total": 0.04664285899980314,
"count": 1,
"is_parallel": true,
"self": 0.0005312780003805528,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004587119992720545,
"count": 1,
"is_parallel": true,
"self": 0.0004587119992720545
},
"communicator.exchange": {
"total": 0.04399000400007935,
"count": 1,
"is_parallel": true,
"self": 0.04399000400007935
},
"steps_from_proto": {
"total": 0.0016628650000711787,
"count": 1,
"is_parallel": true,
"self": 0.0003587480014175526,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001304116998653626,
"count": 8,
"is_parallel": true,
"self": 0.001304116998653626
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1157.4479115882732,
"count": 63282,
"is_parallel": true,
"self": 32.63660534833434,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.34338906888661,
"count": 63282,
"is_parallel": true,
"self": 23.34338906888661
},
"communicator.exchange": {
"total": 986.3345122918918,
"count": 63282,
"is_parallel": true,
"self": 986.3345122918918
},
"steps_from_proto": {
"total": 115.13340487916048,
"count": 63282,
"is_parallel": true,
"self": 25.490181528660287,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.6432233505002,
"count": 506256,
"is_parallel": true,
"self": 89.6432233505002
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 721.7406534909787,
"count": 63283,
"self": 2.3948062549025053,
"children": {
"process_trajectory": {
"total": 118.50225092205801,
"count": 63283,
"self": 118.29783259005853,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20441833199947723,
"count": 2,
"self": 0.20441833199947723
}
}
},
"_update_policy": {
"total": 600.8435963140182,
"count": 449,
"self": 282.71514406496317,
"children": {
"TorchPPOOptimizer.update": {
"total": 318.12845224905504,
"count": 22788,
"self": 318.12845224905504
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.739997040014714e-07,
"count": 1,
"self": 9.739997040014714e-07
},
"TrainerController._save_models": {
"total": 0.0520534300012514,
"count": 1,
"self": 0.0010650080021150643,
"children": {
"RLTrainer._checkpoint": {
"total": 0.050988421999136335,
"count": 1,
"self": 0.050988421999136335
}
}
}
}
}
}
}