ppo-Pyramids / run_logs /timers.json
gonxatroll's picture
First push
a526e11
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.298994779586792,
"min": 0.298994779586792,
"max": 1.455559492111206,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8922.00390625,
"min": 8922.00390625,
"max": 44155.8515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5951288342475891,
"min": -0.10273827612400055,
"max": 0.5951288342475891,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.82632446289062,
"min": -24.759923934936523,
"max": 167.82632446289062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.027363643050193787,
"min": -0.04839562252163887,
"max": 0.270280659198761,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.71654748916626,
"min": -13.453983306884766,
"max": 64.86735534667969,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0709976014512081,
"min": 0.062307845604154664,
"max": 0.0736954469898248,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9939664203169134,
"min": 0.5032895741925372,
"max": 1.0404911754206598,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016221521657869397,
"min": 0.0011163409301227691,
"max": 0.017868304395233285,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22710130321017158,
"min": 0.008696662315742287,
"max": 0.26802456592849927,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3354618405928595e-06,
"min": 7.3354618405928595e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010269646576830003,
"min": 0.00010269646576830003,
"max": 0.0032577102140967,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244512142857144,
"min": 0.10244512142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342317000000002,
"min": 1.3691136000000002,
"max": 2.5274766,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002542676307142858,
"min": 0.0002542676307142858,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003559746830000001,
"min": 0.003559746830000001,
"max": 0.10862173967,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009958148002624512,
"min": 0.009958148002624512,
"max": 0.43389567732810974,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13941407203674316,
"min": 0.13941407203674316,
"max": 3.0372698307037354,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 321.6304347826087,
"min": 316.4639175257732,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29590.0,
"min": 15984.0,
"max": 32402.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6551208641011637,
"min": -1.0000000521540642,
"max": 1.6551208641011637,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 150.6159986332059,
"min": -32.000001668930054,
"max": 160.8313976302743,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6551208641011637,
"min": -1.0000000521540642,
"max": 1.6551208641011637,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 150.6159986332059,
"min": -32.000001668930054,
"max": 160.8313976302743,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0334390595946768,
"min": 0.0334390595946768,
"max": 9.24591356702149,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0429544231155887,
"min": 3.0429544231155887,
"max": 147.93461707234383,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702844952",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702847109"
},
"total": 2157.511885796,
"count": 1,
"self": 0.4768154900002628,
"children": {
"run_training.setup": {
"total": 0.04586315000005925,
"count": 1,
"self": 0.04586315000005925
},
"TrainerController.start_learning": {
"total": 2156.989207156,
"count": 1,
"self": 1.355790103938034,
"children": {
"TrainerController._reset_env": {
"total": 1.9440357630001017,
"count": 1,
"self": 1.9440357630001017
},
"TrainerController.advance": {
"total": 2153.6007212040618,
"count": 63896,
"self": 1.4014022630740328,
"children": {
"env_step": {
"total": 1525.1982764899913,
"count": 63896,
"self": 1394.740882413091,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.61764575391294,
"count": 63896,
"self": 4.686751131872597,
"children": {
"TorchPolicy.evaluate": {
"total": 124.93089462204034,
"count": 62552,
"self": 124.93089462204034
}
}
},
"workers": {
"total": 0.8397483229873615,
"count": 63896,
"self": 0.0,
"children": {
"worker_root": {
"total": 2151.888454610957,
"count": 63896,
"is_parallel": true,
"self": 877.9743223220098,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001664329000050202,
"count": 1,
"is_parallel": true,
"self": 0.0005204060000778554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011439229999723466,
"count": 8,
"is_parallel": true,
"self": 0.0011439229999723466
}
}
},
"UnityEnvironment.step": {
"total": 0.05296441700011201,
"count": 1,
"is_parallel": true,
"self": 0.0005742160003592289,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005029469998589775,
"count": 1,
"is_parallel": true,
"self": 0.0005029469998589775
},
"communicator.exchange": {
"total": 0.050216353999985586,
"count": 1,
"is_parallel": true,
"self": 0.050216353999985586
},
"steps_from_proto": {
"total": 0.0016708999999082152,
"count": 1,
"is_parallel": true,
"self": 0.00039878699931250594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012721130005957093,
"count": 8,
"is_parallel": true,
"self": 0.0012721130005957093
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1273.9141322889473,
"count": 63895,
"is_parallel": true,
"self": 34.933429909751794,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.83320170505999,
"count": 63895,
"is_parallel": true,
"self": 24.83320170505999
},
"communicator.exchange": {
"total": 1113.516335700075,
"count": 63895,
"is_parallel": true,
"self": 1113.516335700075
},
"steps_from_proto": {
"total": 100.63116497406054,
"count": 63895,
"is_parallel": true,
"self": 20.2012589581434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.42990601591714,
"count": 511160,
"is_parallel": true,
"self": 80.42990601591714
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 627.0010424509962,
"count": 63896,
"self": 2.655270620972942,
"children": {
"process_trajectory": {
"total": 126.98859662802192,
"count": 63896,
"self": 126.79614209402212,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1924545339998076,
"count": 2,
"self": 0.1924545339998076
}
}
},
"_update_policy": {
"total": 497.3571752020014,
"count": 447,
"self": 297.6529673510213,
"children": {
"TorchPPOOptimizer.update": {
"total": 199.70420785098008,
"count": 22821,
"self": 199.70420785098008
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.71000338520389e-07,
"count": 1,
"self": 8.71000338520389e-07
},
"TrainerController._save_models": {
"total": 0.08865921399956278,
"count": 1,
"self": 0.0018681449992072885,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08679106900035549,
"count": 1,
"self": 0.08679106900035549
}
}
}
}
}
}
}