pyramids-ppo / run_logs /timers.json
addy0606's picture
Trained PPO agent on Unity ML-Agents Pyramids environment
600923c verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2898741066455841,
"min": 0.2898741066455841,
"max": 1.4340291023254395,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8728.689453125,
"min": 8728.689453125,
"max": 43502.70703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7211350798606873,
"min": -0.11470767110586166,
"max": 0.7211350798606873,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 214.1771240234375,
"min": -27.629966735839844,
"max": 214.1771240234375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0013049918925389647,
"min": -0.0013049918925389647,
"max": 0.5387399196624756,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.38758260011672974,
"min": -0.38758260011672974,
"max": 127.68136596679688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06769554042280196,
"min": 0.06586795225580101,
"max": 0.07389445668446744,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9477375659192274,
"min": 0.48959047148698653,
"max": 1.0471246338953886,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017134472637325433,
"min": 0.001548702897332188,
"max": 0.018003718509320255,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23988261692255605,
"min": 0.020133137665318445,
"max": 0.24485631606269942,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.354511834242853e-06,
"min": 7.354511834242853e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010296316567939994,
"min": 0.00010296316567939994,
"max": 0.0037586437471187994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245147142857143,
"min": 0.10245147142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343206,
"min": 1.3886848,
"max": 2.6528812,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002549019957142856,
"min": 0.0002549019957142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003568627939999998,
"min": 0.003568627939999998,
"max": 0.12530283188,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015734326094388962,
"min": 0.015552767552435398,
"max": 0.7007458806037903,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.22028055787086487,
"min": 0.21967358887195587,
"max": 4.905220985412598,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 250.68595041322314,
"min": 250.68595041322314,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30333.0,
"min": 15984.0,
"max": 33866.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7327818056522322,
"min": -1.0000000521540642,
"max": 1.7359629421046487,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 209.6665984839201,
"min": -27.718001902103424,
"max": 209.6665984839201,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7327818056522322,
"min": -1.0000000521540642,
"max": 1.7359629421046487,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 209.6665984839201,
"min": -27.718001902103424,
"max": 209.6665984839201,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04048828251093668,
"min": 0.04048064887582837,
"max": 14.548090299591422,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.899082183823339,
"min": 4.618367761650006,
"max": 232.76944479346275,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1772861855",
"python_version": "3.10.12 (main, Jan 26 2026, 14:55:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1772864221"
},
"total": 2366.2381627879995,
"count": 1,
"self": 0.48534531199948105,
"children": {
"run_training.setup": {
"total": 0.035465578000184905,
"count": 1,
"self": 0.035465578000184905
},
"TrainerController.start_learning": {
"total": 2365.717351898,
"count": 1,
"self": 1.6498473809897405,
"children": {
"TrainerController._reset_env": {
"total": 2.781975977000002,
"count": 1,
"self": 2.781975977000002
},
"TrainerController.advance": {
"total": 2361.20115385801,
"count": 64394,
"self": 1.6074636149573962,
"children": {
"env_step": {
"total": 1670.825222266999,
"count": 64394,
"self": 1508.342397243976,
"children": {
"SubprocessEnvManager._take_step": {
"total": 161.5224315099822,
"count": 64394,
"self": 4.888869679025902,
"children": {
"TorchPolicy.evaluate": {
"total": 156.6335618309563,
"count": 62556,
"self": 156.6335618309563
}
}
},
"workers": {
"total": 0.960393513040799,
"count": 64394,
"self": 0.0,
"children": {
"worker_root": {
"total": 2358.8628637479005,
"count": 64394,
"is_parallel": true,
"self": 974.3007925029174,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00277610300008746,
"count": 1,
"is_parallel": true,
"self": 0.0007818019998921955,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019943010001952644,
"count": 8,
"is_parallel": true,
"self": 0.0019943010001952644
}
}
},
"UnityEnvironment.step": {
"total": 0.05870109699981185,
"count": 1,
"is_parallel": true,
"self": 0.0005332709997674101,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043237399995632586,
"count": 1,
"is_parallel": true,
"self": 0.00043237399995632586
},
"communicator.exchange": {
"total": 0.05585312099992734,
"count": 1,
"is_parallel": true,
"self": 0.05585312099992734
},
"steps_from_proto": {
"total": 0.0018823310001607751,
"count": 1,
"is_parallel": true,
"self": 0.0003789350002989522,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001503395999861823,
"count": 8,
"is_parallel": true,
"self": 0.001503395999861823
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1384.5620712449831,
"count": 64393,
"is_parallel": true,
"self": 34.12191828596906,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.435211848030576,
"count": 64393,
"is_parallel": true,
"self": 22.435211848030576
},
"communicator.exchange": {
"total": 1219.8042228299782,
"count": 64393,
"is_parallel": true,
"self": 1219.8042228299782
},
"steps_from_proto": {
"total": 108.20071828100527,
"count": 64393,
"is_parallel": true,
"self": 23.466313746827836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.73440453417743,
"count": 515144,
"is_parallel": true,
"self": 84.73440453417743
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 688.7684679760537,
"count": 64394,
"self": 3.21401342111767,
"children": {
"process_trajectory": {
"total": 131.1073588679351,
"count": 64394,
"self": 130.9182494849356,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1891093829995043,
"count": 2,
"self": 0.1891093829995043
}
}
},
"_update_policy": {
"total": 554.4470956870009,
"count": 457,
"self": 303.95629589295504,
"children": {
"TorchPPOOptimizer.update": {
"total": 250.49079979404587,
"count": 22806,
"self": 250.49079979404587
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.529996193828993e-07,
"count": 1,
"self": 8.529996193828993e-07
},
"TrainerController._save_models": {
"total": 0.08437382900046941,
"count": 1,
"self": 0.0013288480004121084,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0830449810000573,
"count": 1,
"self": 0.0830449810000573
}
}
}
}
}
}
}