junikeda's picture
First Push
2d8c2cd
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.33877208828926086,
"min": 0.33727115392684937,
"max": 1.4872770309448242,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10228.20703125,
"min": 10101.9453125,
"max": 45118.03515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5765272378921509,
"min": -0.08277822285890579,
"max": 0.6691191792488098,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 160.27456665039062,
"min": -19.618438720703125,
"max": 190.02984619140625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.022006597369909286,
"min": -0.02479897439479828,
"max": 0.2924167215824127,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -6.117834091186523,
"min": -6.968511581420898,
"max": 70.18001556396484,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06934189029802315,
"min": 0.0645636961387936,
"max": 0.07473253857217077,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9707864641723242,
"min": 0.5000410186539634,
"max": 1.0449825032264926,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.019753128488889517,
"min": 0.0012307270303317456,
"max": 0.019753128488889517,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.27654379884445324,
"min": 0.00861508921232222,
"max": 0.27654379884445324,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.578354616771425e-06,
"min": 7.578354616771425e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010609696463479994,
"min": 0.00010609696463479994,
"max": 0.0037579675473441996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252608571428572,
"min": 0.10252608571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353652000000001,
"min": 1.3691136000000002,
"max": 2.6526558,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002623559628571428,
"min": 0.0002623559628571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003672983479999999,
"min": 0.003672983479999999,
"max": 0.12528031442,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007311108056455851,
"min": 0.007311108056455851,
"max": 0.41977185010910034,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10235550999641418,
"min": 0.10235550999641418,
"max": 2.9384028911590576,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 337.7529411764706,
"min": 291.3942307692308,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28709.0,
"min": 15984.0,
"max": 33096.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6151670356883723,
"min": -1.0000000521540642,
"max": 1.688786389251936,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 137.28919803351164,
"min": -32.000001668930054,
"max": 173.9449980929494,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6151670356883723,
"min": -1.0000000521540642,
"max": 1.688786389251936,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 137.28919803351164,
"min": -32.000001668930054,
"max": 173.9449980929494,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.025721214450930025,
"min": 0.02461353027418898,
"max": 8.937071155756712,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.186303228329052,
"min": 2.186303228329052,
"max": 142.9931384921074,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692234816",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692237084"
},
"total": 2268.006400372,
"count": 1,
"self": 0.5278051460004463,
"children": {
"run_training.setup": {
"total": 0.04482449399984034,
"count": 1,
"self": 0.04482449399984034
},
"TrainerController.start_learning": {
"total": 2267.4337707319996,
"count": 1,
"self": 1.357420555997578,
"children": {
"TrainerController._reset_env": {
"total": 5.8853299100001095,
"count": 1,
"self": 5.8853299100001095
},
"TrainerController.advance": {
"total": 2260.0954500450025,
"count": 64055,
"self": 1.4199619780042667,
"children": {
"env_step": {
"total": 1611.9479417230202,
"count": 64055,
"self": 1502.0703968258042,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.04844567911869,
"count": 64055,
"self": 4.755208933123413,
"children": {
"TorchPolicy.evaluate": {
"total": 104.29323674599527,
"count": 62564,
"self": 104.29323674599527
}
}
},
"workers": {
"total": 0.8290992180973262,
"count": 64055,
"self": 0.0,
"children": {
"worker_root": {
"total": 2262.014207302008,
"count": 64055,
"is_parallel": true,
"self": 876.8510251279345,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002703125999914846,
"count": 1,
"is_parallel": true,
"self": 0.0007476870000573399,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001955438999857506,
"count": 8,
"is_parallel": true,
"self": 0.001955438999857506
}
}
},
"UnityEnvironment.step": {
"total": 0.10979249999991225,
"count": 1,
"is_parallel": true,
"self": 0.000558400999807418,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005422369999905641,
"count": 1,
"is_parallel": true,
"self": 0.0005422369999905641
},
"communicator.exchange": {
"total": 0.1022418130000915,
"count": 1,
"is_parallel": true,
"self": 0.1022418130000915
},
"steps_from_proto": {
"total": 0.006450049000022773,
"count": 1,
"is_parallel": true,
"self": 0.0004101039999113709,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006039945000111402,
"count": 8,
"is_parallel": true,
"self": 0.006039945000111402
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1385.1631821740734,
"count": 64054,
"is_parallel": true,
"self": 33.7912233169925,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.862670029966466,
"count": 64054,
"is_parallel": true,
"self": 23.862670029966466
},
"communicator.exchange": {
"total": 1219.1033712270878,
"count": 64054,
"is_parallel": true,
"self": 1219.1033712270878
},
"steps_from_proto": {
"total": 108.40591760002667,
"count": 64054,
"is_parallel": true,
"self": 21.32374468803164,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.08217291199503,
"count": 512432,
"is_parallel": true,
"self": 87.08217291199503
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 646.727546343978,
"count": 64055,
"self": 2.6087629029364052,
"children": {
"process_trajectory": {
"total": 115.03947876304665,
"count": 64055,
"self": 114.77165831404636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2678204490002827,
"count": 2,
"self": 0.2678204490002827
}
}
},
"_update_policy": {
"total": 529.079304677995,
"count": 452,
"self": 345.36385924199703,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.71544543599794,
"count": 22788,
"self": 183.71544543599794
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.870000212686136e-07,
"count": 1,
"self": 9.870000212686136e-07
},
"TrainerController._save_models": {
"total": 0.09556923399941297,
"count": 1,
"self": 0.0014177879993440001,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09415144600006897,
"count": 1,
"self": 0.09415144600006897
}
}
}
}
}
}
}