PyramidsRND / run_logs /timers.json
juanmi1234's picture
First Push
d82145f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.24058416485786438,
"min": 0.24058416485786438,
"max": 1.3921812772750854,
"count": 70
},
"Pyramids.Policy.Entropy.sum": {
"value": 7225.2236328125,
"min": 7225.2236328125,
"max": 42233.2109375,
"count": 70
},
"Pyramids.Step.mean": {
"value": 2099984.0,
"min": 29952.0,
"max": 2099984.0,
"count": 70
},
"Pyramids.Step.sum": {
"value": 2099984.0,
"min": 29952.0,
"max": 2099984.0,
"count": 70
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7431915402412415,
"min": -0.09014277905225754,
"max": 0.8283329606056213,
"count": 70
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 218.49832153320312,
"min": -21.724409103393555,
"max": 250.98489379882812,
"count": 70
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007535465527325869,
"min": -0.00922531820833683,
"max": 0.30421677231788635,
"count": 70
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.2154269218444824,
"min": -2.3801321983337402,
"max": 72.09937286376953,
"count": 70
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06804317341367197,
"min": 0.0618924740766589,
"max": 0.07483786047114768,
"count": 70
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0206476012050796,
"min": 0.4682683530082959,
"max": 1.0630749988314863,
"count": 70
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013696637823078263,
"min": 0.0005092636658207011,
"max": 0.016259862353243597,
"count": 70
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20544956734617395,
"min": 0.005166852896763885,
"max": 0.22763807294541039,
"count": 70
},
"Pyramids.Policy.LearningRate.mean": {
"value": 9.149695616770444e-05,
"min": 9.149695616770444e-05,
"max": 0.00029838354339596195,
"count": 70
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0013724543425155666,
"min": 0.0013238581587142664,
"max": 0.003801111532962866,
"count": 70
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.13049896222222226,
"min": 0.13049896222222226,
"max": 0.19946118095238097,
"count": 70
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.957484433333334,
"min": 1.3897045333333333,
"max": 2.737500766666667,
"count": 70
},
"Pyramids.Policy.Beta.mean": {
"value": 0.003056846326,
"min": 0.003056846326,
"max": 0.009946171977142856,
"count": 70
},
"Pyramids.Policy.Beta.sum": {
"value": 0.045852694890000004,
"min": 0.04422444476000001,
"max": 0.12671700962,
"count": 70
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004412406589835882,
"min": 0.004189548548310995,
"max": 0.3828447163105011,
"count": 70
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.0661861002445221,
"min": 0.058653682470321655,
"max": 2.67991304397583,
"count": 70
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 261.0877192982456,
"min": 219.91176470588235,
"max": 999.0,
"count": 70
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29764.0,
"min": 15984.0,
"max": 33364.0,
"count": 70
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6862631430359263,
"min": -1.0000000521540642,
"max": 1.757076910367379,
"count": 70
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 192.2339983060956,
"min": -32.000001668930054,
"max": 236.32119871675968,
"count": 70
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6862631430359263,
"min": -1.0000000521540642,
"max": 1.757076910367379,
"count": 70
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 192.2339983060956,
"min": -32.000001668930054,
"max": 236.32119871675968,
"count": 70
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.012169307715834165,
"min": 0.0099589949698047,
"max": 7.914530890993774,
"count": 70
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.3873010796050949,
"min": 1.336946286224702,
"max": 126.63249425590038,
"count": 70
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 70
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 70
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677037118",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1677042290"
},
"total": 5171.746010333,
"count": 1,
"self": 0.3395317320000686,
"children": {
"run_training.setup": {
"total": 0.1114660210000693,
"count": 1,
"self": 0.1114660210000693
},
"TrainerController.start_learning": {
"total": 5171.29501258,
"count": 1,
"self": 2.7409496970449254,
"children": {
"TrainerController._reset_env": {
"total": 10.186876602999973,
"count": 1,
"self": 10.186876602999973
},
"TrainerController.advance": {
"total": 5158.226997986954,
"count": 136878,
"self": 3.00958142102445,
"children": {
"env_step": {
"total": 3544.8925625278625,
"count": 136878,
"self": 3302.7738869408863,
"children": {
"SubprocessEnvManager._take_step": {
"total": 240.40450124701624,
"count": 136878,
"self": 9.721710267964568,
"children": {
"TorchPolicy.evaluate": {
"total": 230.68279097905167,
"count": 132540,
"self": 77.88010333010766,
"children": {
"TorchPolicy.sample_actions": {
"total": 152.80268764894402,
"count": 132540,
"self": 152.80268764894402
}
}
}
}
},
"workers": {
"total": 1.7141743399598681,
"count": 136877,
"self": 0.0,
"children": {
"worker_root": {
"total": 5160.678578907937,
"count": 136877,
"is_parallel": true,
"self": 2102.7928116408716,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00643023500003892,
"count": 1,
"is_parallel": true,
"self": 0.0036850420000291706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002745193000009749,
"count": 8,
"is_parallel": true,
"self": 0.002745193000009749
}
}
},
"UnityEnvironment.step": {
"total": 0.04667099700009203,
"count": 1,
"is_parallel": true,
"self": 0.0005486900001869799,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000498769999921933,
"count": 1,
"is_parallel": true,
"self": 0.000498769999921933
},
"communicator.exchange": {
"total": 0.04404981499999394,
"count": 1,
"is_parallel": true,
"self": 0.04404981499999394
},
"steps_from_proto": {
"total": 0.001573721999989175,
"count": 1,
"is_parallel": true,
"self": 0.0004126979999909963,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011610239999981786,
"count": 8,
"is_parallel": true,
"self": 0.0011610239999981786
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3057.885767267065,
"count": 136876,
"is_parallel": true,
"self": 68.16397103903591,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 48.71939049599314,
"count": 136876,
"is_parallel": true,
"self": 48.71939049599314
},
"communicator.exchange": {
"total": 2738.64240810599,
"count": 136876,
"is_parallel": true,
"self": 2738.64240810599
},
"steps_from_proto": {
"total": 202.35999762604615,
"count": 136876,
"is_parallel": true,
"self": 47.540812733948314,
"children": {
"_process_rank_one_or_two_observation": {
"total": 154.81918489209784,
"count": 1095008,
"is_parallel": true,
"self": 154.81918489209784
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1610.3248540380673,
"count": 136877,
"self": 5.613999056166449,
"children": {
"process_trajectory": {
"total": 352.00929691889417,
"count": 136877,
"self": 351.63072629889336,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3785706200008008,
"count": 4,
"self": 0.3785706200008008
}
}
},
"_update_policy": {
"total": 1252.7015580630066,
"count": 974,
"self": 486.90372908899894,
"children": {
"TorchPPOOptimizer.update": {
"total": 765.7978289740076,
"count": 48363,
"self": 765.7978289740076
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.698999767540954e-06,
"count": 1,
"self": 1.698999767540954e-06
},
"TrainerController._save_models": {
"total": 0.1401865940006246,
"count": 1,
"self": 0.0018829730006473255,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13830362099997728,
"count": 1,
"self": 0.13830362099997728
}
}
}
}
}
}
}