krisha-n's picture
First Push
415b22c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.2767457962036133,
"min": 1.2767457962036133,
"max": 1.5445855855941772,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 38241.08984375,
"min": 38241.08984375,
"max": 46856.546875,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89937.0,
"min": 29952.0,
"max": 89937.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89937.0,
"min": 29952.0,
"max": 89937.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.11164543777704239,
"min": -0.11164543777704239,
"max": 0.022019919008016586,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -26.794904708862305,
"min": -26.794904708862305,
"max": 5.21872091293335,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0791650339961052,
"min": -0.04996652528643608,
"max": 0.08993568271398544,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 18.999608993530273,
"min": -11.842066764831543,
"max": 21.67449951171875,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06853616477368611,
"min": 0.06853616477368611,
"max": 0.07202292907404835,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.616825482963175,
"min": 0.49139909251494707,
"max": 0.616825482963175,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00025812777231717015,
"min": 0.00025812777231717015,
"max": 0.0032718631194126957,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0023231499508545314,
"min": 0.0023231499508545314,
"max": 0.02290304183588887,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.47704084098889e-05,
"min": 7.47704084098889e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0006729336756890001,
"min": 0.0006729336756890001,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12492344444444442,
"min": 0.12492344444444442,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.1243109999999998,
"min": 1.1243109999999998,
"max": 1.2868480000000002,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0024998521,
"min": 0.0024998521,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.022498668899999998,
"min": 0.022498668899999998,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.05451652780175209,
"min": 0.05451652780175209,
"max": 0.23053312301635742,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.4906487464904785,
"min": 0.4906487464904785,
"max": 1.613731861114502,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 986.6666666666666,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31968.0,
"min": 15984.0,
"max": 32560.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.999962551984936,
"min": -1.0000000521540642,
"max": -0.9270303556413362,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -31.998801663517952,
"min": -31.998801663517952,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.999962551984936,
"min": -1.0000000521540642,
"max": -0.9270303556413362,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -31.998801663517952,
"min": -31.998801663517952,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.6388505452196114,
"min": 0.6388505452196114,
"max": 4.766432554461062,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 20.443217447027564,
"min": 20.443217447027564,
"max": 76.26292087137699,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714472980",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714473170"
},
"total": 189.72320730699994,
"count": 1,
"self": 0.7974911149997297,
"children": {
"run_training.setup": {
"total": 0.049424407000060455,
"count": 1,
"self": 0.049424407000060455
},
"TrainerController.start_learning": {
"total": 188.87629178500015,
"count": 1,
"self": 0.12908870599767397,
"children": {
"TrainerController._reset_env": {
"total": 2.5370169899999837,
"count": 1,
"self": 2.5370169899999837
},
"TrainerController.advance": {
"total": 186.0977826740027,
"count": 6259,
"self": 0.13193774801152358,
"children": {
"env_step": {
"total": 123.53175738999334,
"count": 6259,
"self": 110.20533517000308,
"children": {
"SubprocessEnvManager._take_step": {
"total": 13.247306128988384,
"count": 6259,
"self": 0.4603086729855477,
"children": {
"TorchPolicy.evaluate": {
"total": 12.786997456002837,
"count": 6256,
"self": 12.786997456002837
}
}
},
"workers": {
"total": 0.07911609100187889,
"count": 6259,
"self": 0.0,
"children": {
"worker_root": {
"total": 188.19121767800175,
"count": 6259,
"is_parallel": true,
"self": 89.16534186600438,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002008380000006582,
"count": 1,
"is_parallel": true,
"self": 0.0005774979999841889,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014308820000223932,
"count": 8,
"is_parallel": true,
"self": 0.0014308820000223932
}
}
},
"UnityEnvironment.step": {
"total": 0.08148452499995074,
"count": 1,
"is_parallel": true,
"self": 0.0006406700003935839,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004612399998222827,
"count": 1,
"is_parallel": true,
"self": 0.0004612399998222827
},
"communicator.exchange": {
"total": 0.07839408599988928,
"count": 1,
"is_parallel": true,
"self": 0.07839408599988928
},
"steps_from_proto": {
"total": 0.0019885289998455846,
"count": 1,
"is_parallel": true,
"self": 0.00037853500020901265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001609993999636572,
"count": 8,
"is_parallel": true,
"self": 0.001609993999636572
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 99.02587581199737,
"count": 6258,
"is_parallel": true,
"self": 3.3534848110157327,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.2744243189908957,
"count": 6258,
"is_parallel": true,
"self": 2.2744243189908957
},
"communicator.exchange": {
"total": 83.7679420869963,
"count": 6258,
"is_parallel": true,
"self": 83.7679420869963
},
"steps_from_proto": {
"total": 9.630024594994438,
"count": 6258,
"is_parallel": true,
"self": 1.916804170012938,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.7132204249815,
"count": 50064,
"is_parallel": true,
"self": 7.7132204249815
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 62.43408753599783,
"count": 6259,
"self": 0.16465694299790812,
"children": {
"process_trajectory": {
"total": 11.984148705000052,
"count": 6259,
"self": 11.984148705000052
},
"_update_policy": {
"total": 50.28528188799987,
"count": 27,
"self": 29.039742412005126,
"children": {
"TorchPPOOptimizer.update": {
"total": 21.245539475994747,
"count": 2289,
"self": 21.245539475994747
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.919999683916103e-07,
"count": 1,
"self": 9.919999683916103e-07
},
"TrainerController._save_models": {
"total": 0.11240242299982128,
"count": 1,
"self": 0.0014641039997513872,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1109383190000699,
"count": 1,
"self": 0.1109383190000699
}
}
}
}
}
}
}