PyramidsRND / run_logs /timers.json
andrew-ye's picture
push agent
c848eb2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5147733688354492,
"min": 0.5147733688354492,
"max": 1.3915784358978271,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15533.8017578125,
"min": 15496.7490234375,
"max": 42214.921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3318574130535126,
"min": -0.1150316521525383,
"max": 0.33768603205680847,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 86.28292846679688,
"min": -27.556777954101562,
"max": 88.8114242553711,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.19503264129161835,
"min": -0.41228023171424866,
"max": 0.5452406406402588,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -50.70848846435547,
"min": -108.42970275878906,
"max": 129.22203063964844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0690451192798622,
"min": 0.06519956953121855,
"max": 0.07267262038112166,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9666316699180708,
"min": 0.49768081643292816,
"max": 1.0502006309300973,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016139083400731308,
"min": 0.0004566777733421893,
"max": 0.026887191072586046,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2259471676102383,
"min": 0.00639348882679065,
"max": 0.37642067501620463,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.507233211907138e-06,
"min": 7.507233211907138e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010510126496669993,
"min": 0.00010510126496669993,
"max": 0.003633186188938,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250237857142856,
"min": 0.10250237857142856,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350333,
"min": 1.3886848,
"max": 2.6110620000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025998761928571417,
"min": 0.00025998761928571417,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036398266699999987,
"min": 0.0036398266699999987,
"max": 0.1211250938,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015248218551278114,
"min": 0.015248218551278114,
"max": 0.6263422966003418,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2134750634431839,
"min": 0.2134750634431839,
"max": 4.384396076202393,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 543.2280701754386,
"min": 516.457627118644,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30964.0,
"min": 15984.0,
"max": 33185.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.176003475199666,
"min": -1.0000000521540642,
"max": 1.2940841834796102,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 67.03219808638096,
"min": -30.178401708602905,
"max": 73.76279845833778,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.176003475199666,
"min": -1.0000000521540642,
"max": 1.2940841834796102,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 67.03219808638096,
"min": -30.178401708602905,
"max": 73.76279845833778,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08676831916091032,
"min": 0.08512498271059789,
"max": 12.522377313114703,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.945794192171888,
"min": 4.630836577533046,
"max": 200.35803700983524,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704144425",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704146714"
},
"total": 2288.9394708699997,
"count": 1,
"self": 0.48836045599909994,
"children": {
"run_training.setup": {
"total": 0.048447914000007586,
"count": 1,
"self": 0.048447914000007586
},
"TrainerController.start_learning": {
"total": 2288.4026625000006,
"count": 1,
"self": 1.9298669879885892,
"children": {
"TrainerController._reset_env": {
"total": 2.0680211459998645,
"count": 1,
"self": 2.0680211459998645
},
"TrainerController.advance": {
"total": 2284.309719242011,
"count": 63378,
"self": 2.0623450781777137,
"children": {
"env_step": {
"total": 1627.9333346039411,
"count": 63378,
"self": 1467.1519761338936,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.61010757702888,
"count": 63378,
"self": 5.67159414404432,
"children": {
"TorchPolicy.evaluate": {
"total": 153.93851343298456,
"count": 62565,
"self": 153.93851343298456
}
}
},
"workers": {
"total": 1.1712508930186232,
"count": 63378,
"self": 0.0,
"children": {
"worker_root": {
"total": 2282.239479110946,
"count": 63378,
"is_parallel": true,
"self": 958.1015344209409,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016421860000264132,
"count": 1,
"is_parallel": true,
"self": 0.0005345989993656985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011075870006607147,
"count": 8,
"is_parallel": true,
"self": 0.0011075870006607147
}
}
},
"UnityEnvironment.step": {
"total": 0.050174135000361275,
"count": 1,
"is_parallel": true,
"self": 0.0006168850004542037,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00050783999995474,
"count": 1,
"is_parallel": true,
"self": 0.00050783999995474
},
"communicator.exchange": {
"total": 0.04726238400007787,
"count": 1,
"is_parallel": true,
"self": 0.04726238400007787
},
"steps_from_proto": {
"total": 0.0017870259998744586,
"count": 1,
"is_parallel": true,
"self": 0.00039355099897875334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013934750008957053,
"count": 8,
"is_parallel": true,
"self": 0.0013934750008957053
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1324.137944690005,
"count": 63377,
"is_parallel": true,
"self": 38.34160748796148,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.994674370017947,
"count": 63377,
"is_parallel": true,
"self": 26.994674370017947
},
"communicator.exchange": {
"total": 1147.7730831430135,
"count": 63377,
"is_parallel": true,
"self": 1147.7730831430135
},
"steps_from_proto": {
"total": 111.02857968901208,
"count": 63377,
"is_parallel": true,
"self": 23.82401518008828,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.2045645089238,
"count": 507016,
"is_parallel": true,
"self": 87.2045645089238
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 654.3140395598921,
"count": 63378,
"self": 3.5409865218221057,
"children": {
"process_trajectory": {
"total": 135.4733876830678,
"count": 63378,
"self": 135.21789240206817,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25549528099963936,
"count": 2,
"self": 0.25549528099963936
}
}
},
"_update_policy": {
"total": 515.2996653550022,
"count": 446,
"self": 304.63733262791493,
"children": {
"TorchPPOOptimizer.update": {
"total": 210.66233272708723,
"count": 22818,
"self": 210.66233272708723
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.10000380827114e-07,
"count": 1,
"self": 9.10000380827114e-07
},
"TrainerController._save_models": {
"total": 0.09505421400081104,
"count": 1,
"self": 0.0015733880009065615,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09348082599990448,
"count": 1,
"self": 0.09348082599990448
}
}
}
}
}
}
}