Pyramids01 / run_logs /timers.json
Emericzhito's picture
First Push Piramids
41c6128 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.28850871324539185,
"min": 0.28850871324539185,
"max": 1.481791377067566,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8567.5546875,
"min": 8567.5546875,
"max": 44951.625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5664639472961426,
"min": -0.09751632064580917,
"max": 0.631919801235199,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 155.21112060546875,
"min": -23.598949432373047,
"max": 183.88865661621094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01418967917561531,
"min": -0.0036025254521518946,
"max": 0.26148971915245056,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.887972116470337,
"min": -0.9510667324066162,
"max": 62.75753402709961,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06711690029450915,
"min": 0.06351086551772564,
"max": 0.07293790684796754,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.939636604123128,
"min": 0.47759051730484314,
"max": 1.0276374082479345,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01679909037747642,
"min": 0.0004550705946399824,
"max": 0.016912353860159896,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2351872652846699,
"min": 0.003185494162479877,
"max": 0.23677295404223853,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.66640458742143e-06,
"min": 7.66640458742143e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010732966422390002,
"min": 0.00010732966422390002,
"max": 0.0032582546139151994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255543571428574,
"min": 0.10255543571428574,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357761000000004,
"min": 1.327104,
"max": 2.4821624,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000265288027857143,
"min": 0.000265288027857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003714032390000002,
"min": 0.003714032390000002,
"max": 0.10862987152000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014071226119995117,
"min": 0.014071226119995117,
"max": 0.46310392022132874,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19699716567993164,
"min": 0.19699716567993164,
"max": 3.241727352142334,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 352.32530120481925,
"min": 294.19,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29243.0,
"min": 15984.0,
"max": 33789.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6235662511134723,
"min": -1.0000000521540642,
"max": 1.7065959423780441,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 134.7559988424182,
"min": -32.000001668930054,
"max": 168.95299829542637,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6235662511134723,
"min": -1.0000000521540642,
"max": 1.7065959423780441,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 134.7559988424182,
"min": -32.000001668930054,
"max": 168.95299829542637,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05152049591780236,
"min": 0.04686120382775882,
"max": 8.98849476967007,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.276201161177596,
"min": 4.276201161177596,
"max": 143.8159163147211,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723518303",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723520901"
},
"total": 2598.839057718,
"count": 1,
"self": 0.694141536999723,
"children": {
"run_training.setup": {
"total": 0.09467989399990984,
"count": 1,
"self": 0.09467989399990984
},
"TrainerController.start_learning": {
"total": 2598.050236287,
"count": 1,
"self": 1.8244219039866039,
"children": {
"TrainerController._reset_env": {
"total": 3.4984390169997823,
"count": 1,
"self": 3.4984390169997823
},
"TrainerController.advance": {
"total": 2592.630405206014,
"count": 63859,
"self": 1.9964963559882563,
"children": {
"env_step": {
"total": 1854.2718894229915,
"count": 63859,
"self": 1690.0204778139505,
"children": {
"SubprocessEnvManager._take_step": {
"total": 163.11515019503804,
"count": 63859,
"self": 5.7586925060090834,
"children": {
"TorchPolicy.evaluate": {
"total": 157.35645768902896,
"count": 62551,
"self": 157.35645768902896
}
}
},
"workers": {
"total": 1.1362614140030018,
"count": 63859,
"self": 0.0,
"children": {
"worker_root": {
"total": 2591.892845802115,
"count": 63859,
"is_parallel": true,
"self": 1053.8497279190533,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0031065100001796964,
"count": 1,
"is_parallel": true,
"self": 0.0008254950007540174,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002281014999425679,
"count": 8,
"is_parallel": true,
"self": 0.002281014999425679
}
}
},
"UnityEnvironment.step": {
"total": 0.052211807999810844,
"count": 1,
"is_parallel": true,
"self": 0.0006785059999856458,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005047999998168962,
"count": 1,
"is_parallel": true,
"self": 0.0005047999998168962
},
"communicator.exchange": {
"total": 0.049128727999686816,
"count": 1,
"is_parallel": true,
"self": 0.049128727999686816
},
"steps_from_proto": {
"total": 0.0018997740003214858,
"count": 1,
"is_parallel": true,
"self": 0.00039312900025834097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015066450000631448,
"count": 8,
"is_parallel": true,
"self": 0.0015066450000631448
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1538.0431178830618,
"count": 63858,
"is_parallel": true,
"self": 41.35288127308104,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.412912180875537,
"count": 63858,
"is_parallel": true,
"self": 26.412912180875537
},
"communicator.exchange": {
"total": 1354.065577954002,
"count": 63858,
"is_parallel": true,
"self": 1354.065577954002
},
"steps_from_proto": {
"total": 116.21174647510315,
"count": 63858,
"is_parallel": true,
"self": 24.681452273079685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.53029420202347,
"count": 510864,
"is_parallel": true,
"self": 91.53029420202347
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 736.3620194270343,
"count": 63859,
"self": 3.4757341020690546,
"children": {
"process_trajectory": {
"total": 144.85352401197406,
"count": 63859,
"self": 144.63813858797357,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21538542400048755,
"count": 2,
"self": 0.21538542400048755
}
}
},
"_update_policy": {
"total": 588.0327613129912,
"count": 437,
"self": 350.96503967503804,
"children": {
"TorchPPOOptimizer.update": {
"total": 237.06772163795313,
"count": 22842,
"self": 237.06772163795313
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.442999746359419e-06,
"count": 1,
"self": 1.442999746359419e-06
},
"TrainerController._save_models": {
"total": 0.09696871699998155,
"count": 1,
"self": 0.0015329709995057783,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09543574600047577,
"count": 1,
"self": 0.09543574600047577
}
}
}
}
}
}
}