ppo-Pyramids / run_logs /timers.json
AGuzhvenko's picture
First Push
2562151 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4814678132534027,
"min": 0.4814678132534027,
"max": 1.4757566452026367,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14544.1796875,
"min": 14535.201171875,
"max": 44768.5546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989989.0,
"min": 29877.0,
"max": 989989.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989989.0,
"min": 29877.0,
"max": 989989.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4016495943069458,
"min": -0.09800539165735245,
"max": 0.45544177293777466,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 108.44538879394531,
"min": -23.717304229736328,
"max": 121.60295104980469,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016832606866955757,
"min": -0.06303635239601135,
"max": 0.22253024578094482,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.544803619384766,
"min": -16.011234283447266,
"max": 53.852317810058594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0691474534920417,
"min": 0.06603029874815078,
"max": 0.07305688811150331,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0372118023806254,
"min": 0.582033922196243,
"max": 1.0958533216725497,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012539857543500451,
"min": 0.000873317718231119,
"max": 0.014219420939168361,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18809786315250676,
"min": 0.012226448055235666,
"max": 0.19907189314835705,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.449837516753334e-06,
"min": 7.449837516753334e-06,
"max": 0.00029500485166505,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001117475627513,
"min": 0.0001117475627513,
"max": 0.003758442747185799,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248324666666667,
"min": 0.10248324666666667,
"max": 0.19833494999999995,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372487,
"min": 1.4775653,
"max": 2.6528141999999995,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025807634200000003,
"min": 0.00025807634200000003,
"max": 0.009833661505,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038711451300000002,
"min": 0.0038711451300000002,
"max": 0.12529613858,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005538344383239746,
"min": 0.005341253709048033,
"max": 0.2800745368003845,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08307516574859619,
"min": 0.07477755099534988,
"max": 2.240596294403076,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 404.8285714285714,
"min": 404.8285714285714,
"max": 998.15625,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28338.0,
"min": 16164.0,
"max": 32742.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4247267423591143,
"min": -0.9364250514190644,
"max": 1.4247267423591143,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 101.15559870749712,
"min": -29.96560164541006,
"max": 101.15559870749712,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4247267423591143,
"min": -0.9364250514190644,
"max": 1.4247267423591143,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 101.15559870749712,
"min": -29.96560164541006,
"max": 101.15559870749712,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.023616251905455292,
"min": 0.023616251905455292,
"max": 5.296832508024047,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.6767538852873258,
"min": 1.6767538852873258,
"max": 90.0461526364088,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739184020",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739186222"
},
"total": 2201.792049945,
"count": 1,
"self": 0.48743215300055454,
"children": {
"run_training.setup": {
"total": 0.02108578899992608,
"count": 1,
"self": 0.02108578899992608
},
"TrainerController.start_learning": {
"total": 2201.283532003,
"count": 1,
"self": 1.4439467599950149,
"children": {
"TrainerController._reset_env": {
"total": 2.1165852449998965,
"count": 1,
"self": 2.1165852449998965
},
"TrainerController.advance": {
"total": 2197.6367989890055,
"count": 63628,
"self": 1.4420454469159267,
"children": {
"env_step": {
"total": 1503.4316107250752,
"count": 63628,
"self": 1341.8060046550115,
"children": {
"SubprocessEnvManager._take_step": {
"total": 160.79653164706156,
"count": 63628,
"self": 4.825377938144811,
"children": {
"TorchPolicy.evaluate": {
"total": 155.97115370891675,
"count": 62569,
"self": 155.97115370891675
}
}
},
"workers": {
"total": 0.8290744230021119,
"count": 63628,
"self": 0.0,
"children": {
"worker_root": {
"total": 2196.0852346659703,
"count": 63628,
"is_parallel": true,
"self": 972.5755670519841,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020212090000768512,
"count": 1,
"is_parallel": true,
"self": 0.0006434360004732298,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013777729996036214,
"count": 8,
"is_parallel": true,
"self": 0.0013777729996036214
}
}
},
"UnityEnvironment.step": {
"total": 0.049589081000021906,
"count": 1,
"is_parallel": true,
"self": 0.0005719190000945673,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046004399996490974,
"count": 1,
"is_parallel": true,
"self": 0.00046004399996490974
},
"communicator.exchange": {
"total": 0.04684542299992245,
"count": 1,
"is_parallel": true,
"self": 0.04684542299992245
},
"steps_from_proto": {
"total": 0.001711695000039981,
"count": 1,
"is_parallel": true,
"self": 0.00035319100038577744,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013585039996542037,
"count": 8,
"is_parallel": true,
"self": 0.0013585039996542037
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1223.5096676139863,
"count": 63627,
"is_parallel": true,
"self": 32.050181592014496,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.796937146007167,
"count": 63627,
"is_parallel": true,
"self": 23.796937146007167
},
"communicator.exchange": {
"total": 1068.842170234992,
"count": 63627,
"is_parallel": true,
"self": 1068.842170234992
},
"steps_from_proto": {
"total": 98.82037864097265,
"count": 63627,
"is_parallel": true,
"self": 19.813846435023834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.00653220594882,
"count": 509016,
"is_parallel": true,
"self": 79.00653220594882
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 692.7631428170143,
"count": 63628,
"self": 2.813987870056735,
"children": {
"process_trajectory": {
"total": 132.4805058139516,
"count": 63628,
"self": 132.26886879195104,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21163702200055923,
"count": 2,
"self": 0.21163702200055923
}
}
},
"_update_policy": {
"total": 557.468649133006,
"count": 457,
"self": 308.3323676199698,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.13628151303624,
"count": 22746,
"self": 249.13628151303624
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.369997885893099e-07,
"count": 1,
"self": 8.369997885893099e-07
},
"TrainerController._save_models": {
"total": 0.08620017199973518,
"count": 1,
"self": 0.0013787209995825833,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08482145100015259,
"count": 1,
"self": 0.08482145100015259
}
}
}
}
}
}
}