Pyramid-WSL1 / run_logs /timers.json
Asheron's picture
Pyramids with default hyperparameters, 1M episodes, local WSL GPU
13e76c6
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.486691951751709,
"min": 0.45491981506347656,
"max": 1.4656832218170166,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14577.3974609375,
"min": 13676.708984375,
"max": 44462.96484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989923.0,
"min": 29952.0,
"max": 989923.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989923.0,
"min": 29952.0,
"max": 989923.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4779071807861328,
"min": -0.08047345280647278,
"max": 0.5435807108879089,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 129.99075317382812,
"min": -19.394102096557617,
"max": 151.35604858398438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008904339745640755,
"min": -0.016002794727683067,
"max": 0.3366658091545105,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.421980381011963,
"min": -4.144723892211914,
"max": 79.789794921875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0695441907377071,
"min": 0.06276676603438854,
"max": 0.07262257475078861,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9736186703278994,
"min": 0.4931156157272407,
"max": 1.0678179527361256,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01299270672357774,
"min": 0.0006000278432520842,
"max": 0.014579348827671246,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18189789413008836,
"min": 0.006048870790651228,
"max": 0.20198167596633232,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.476676079235714e-06,
"min": 7.476676079235714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001046734651093,
"min": 0.0001046734651093,
"max": 0.0032556560147814,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249219285714287,
"min": 0.10249219285714287,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348907000000002,
"min": 1.3691136000000002,
"max": 2.4434631,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002589700664285714,
"min": 0.0002589700664285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036255809299999995,
"min": 0.0036255809299999995,
"max": 0.10854333813999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00594465434551239,
"min": 0.005903410725295544,
"max": 0.5019513368606567,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08322516083717346,
"min": 0.08264774829149246,
"max": 3.5136592388153076,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 440.27142857142854,
"min": 356.56962025316454,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30819.0,
"min": 15984.0,
"max": 32764.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4990173579342123,
"min": -1.0000000521540642,
"max": 1.6155538205534985,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 103.43219769746065,
"min": -32.000001668930054,
"max": 136.71519854664803,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4990173579342123,
"min": -1.0000000521540642,
"max": 1.6155538205534985,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 103.43219769746065,
"min": -32.000001668930054,
"max": 136.71519854664803,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.027220264729913797,
"min": 0.02202136175205501,
"max": 12.183438105508685,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.878198266364052,
"min": 1.717666216660291,
"max": 194.93500968813896,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701065334",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/home/dfotland/projects/wsl-reinforce/ml-agents/venv-agents/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701067300"
},
"total": 1965.304671260994,
"count": 1,
"self": 0.2677480579877738,
"children": {
"run_training.setup": {
"total": 0.009749623015522957,
"count": 1,
"self": 0.009749623015522957
},
"TrainerController.start_learning": {
"total": 1965.0271735799906,
"count": 1,
"self": 0.663557604275411,
"children": {
"TrainerController._reset_env": {
"total": 3.5269510479993187,
"count": 1,
"self": 3.5269510479993187
},
"TrainerController.advance": {
"total": 1960.7854194667307,
"count": 63746,
"self": 0.5966975034098141,
"children": {
"env_step": {
"total": 932.9096375313529,
"count": 63746,
"self": 578.2640396325733,
"children": {
"SubprocessEnvManager._take_step": {
"total": 354.20039880264085,
"count": 63746,
"self": 2.264477482822258,
"children": {
"TorchPolicy.evaluate": {
"total": 351.9359213198186,
"count": 62572,
"self": 351.9359213198186
}
}
},
"workers": {
"total": 0.4451990961388219,
"count": 63746,
"self": 0.0,
"children": {
"worker_root": {
"total": 1963.3190328038181,
"count": 63746,
"is_parallel": true,
"self": 1429.6696582139703,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012706799898296595,
"count": 1,
"is_parallel": true,
"self": 0.0004720049910247326,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007986749988049269,
"count": 8,
"is_parallel": true,
"self": 0.0007986749988049269
}
}
},
"UnityEnvironment.step": {
"total": 0.01873781898757443,
"count": 1,
"is_parallel": true,
"self": 0.00014780499623157084,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017723199562169611,
"count": 1,
"is_parallel": true,
"self": 0.00017723199562169611
},
"communicator.exchange": {
"total": 0.018010127008892596,
"count": 1,
"is_parallel": true,
"self": 0.018010127008892596
},
"steps_from_proto": {
"total": 0.0004026549868285656,
"count": 1,
"is_parallel": true,
"self": 0.00010492297587916255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00029773201094940305,
"count": 8,
"is_parallel": true,
"self": 0.00029773201094940305
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 533.6493745898479,
"count": 63745,
"is_parallel": true,
"self": 8.528648701729253,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.793877485208213,
"count": 63745,
"is_parallel": true,
"self": 5.793877485208213
},
"communicator.exchange": {
"total": 496.89784187753685,
"count": 63745,
"is_parallel": true,
"self": 496.89784187753685
},
"steps_from_proto": {
"total": 22.429006525373552,
"count": 63745,
"is_parallel": true,
"self": 5.222279333422193,
"children": {
"_process_rank_one_or_two_observation": {
"total": 17.20672719195136,
"count": 509960,
"is_parallel": true,
"self": 17.20672719195136
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1027.279084431968,
"count": 63746,
"self": 1.2593565461575054,
"children": {
"process_trajectory": {
"total": 129.66172403102973,
"count": 63746,
"self": 129.53123492203304,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13048910899669863,
"count": 2,
"self": 0.13048910899669863
}
}
},
"_update_policy": {
"total": 896.3580038547807,
"count": 443,
"self": 268.65442817352596,
"children": {
"TorchPPOOptimizer.update": {
"total": 627.7035756812547,
"count": 22830,
"self": 627.7035756812547
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.859757609665394e-07,
"count": 1,
"self": 4.859757609665394e-07
},
"TrainerController._save_models": {
"total": 0.05124497500946745,
"count": 1,
"self": 0.000507761025801301,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05073721398366615,
"count": 1,
"self": 0.05073721398366615
}
}
}
}
}
}
}