pyramids_run1 / run_logs /timers.json
slopezay's picture
First Training of the pyramids env
e4486b6
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3437712788581848,
"min": 0.33537760376930237,
"max": 1.4333080053329468,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10263.6357421875,
"min": 10093.5244140625,
"max": 43480.83203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989920.0,
"min": 29875.0,
"max": 989920.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989920.0,
"min": 29875.0,
"max": 989920.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7423909306526184,
"min": -0.1503501981496811,
"max": 0.799399733543396,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 216.0357666015625,
"min": -35.63299560546875,
"max": 230.2913818359375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022701449692249298,
"min": -0.0016920369816944003,
"max": 0.3304099440574646,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.606122016906738,
"min": -0.44162166118621826,
"max": 79.12263488769531,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06559150199089865,
"min": 0.06517917172365668,
"max": 0.07350564055702231,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.918281027872581,
"min": 0.5092075601174635,
"max": 1.0821176995009676,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013930804000535448,
"min": 0.0008228416610031504,
"max": 0.01658729403529183,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19503125600749627,
"min": 0.010696941593040955,
"max": 0.2322221164940856,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.657233161907148e-06,
"min": 7.657233161907148e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010720126426670007,
"min": 0.00010720126426670007,
"max": 0.003634311488562899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255237857142858,
"min": 0.10255237857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357333,
"min": 1.3886848,
"max": 2.6114371000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002649826192857145,
"min": 0.0002649826192857145,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037097566700000028,
"min": 0.0037097566700000028,
"max": 0.12116256629,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010915050283074379,
"min": 0.010915050283074379,
"max": 0.4682627320289612,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1528107076883316,
"min": 0.1528107076883316,
"max": 3.277839183807373,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 249.864406779661,
"min": 239.20634920634922,
"max": 991.34375,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29484.0,
"min": 16802.0,
"max": 32177.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7325310765945612,
"min": -0.9297875503543764,
"max": 1.7608015713474108,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 206.17119811475277,
"min": -29.753201611340046,
"max": 221.86099798977375,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7325310765945612,
"min": -0.9297875503543764,
"max": 1.7608015713474108,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 206.17119811475277,
"min": -29.753201611340046,
"max": 221.86099798977375,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028428802749681573,
"min": 0.02820228164692087,
"max": 8.735901765087071,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.383027527212107,
"min": 3.383027527212107,
"max": 148.51033000648022,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678201739",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678204127"
},
"total": 2388.10684482,
"count": 1,
"self": 0.4754432909994648,
"children": {
"run_training.setup": {
"total": 0.1146581870002592,
"count": 1,
"self": 0.1146581870002592
},
"TrainerController.start_learning": {
"total": 2387.516743342,
"count": 1,
"self": 1.4391384869982176,
"children": {
"TrainerController._reset_env": {
"total": 6.244204351000008,
"count": 1,
"self": 6.244204351000008
},
"TrainerController.advance": {
"total": 2379.7456068200013,
"count": 64388,
"self": 1.6047111389011661,
"children": {
"env_step": {
"total": 1631.165764272026,
"count": 64388,
"self": 1515.5709647820272,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.733885939007,
"count": 64388,
"self": 4.67662421812247,
"children": {
"TorchPolicy.evaluate": {
"total": 110.05726172088453,
"count": 62561,
"self": 36.954364754810285,
"children": {
"TorchPolicy.sample_actions": {
"total": 73.10289696607424,
"count": 62561,
"self": 73.10289696607424
}
}
}
}
},
"workers": {
"total": 0.8609135509918815,
"count": 64388,
"self": 0.0,
"children": {
"worker_root": {
"total": 2382.0723963959567,
"count": 64388,
"is_parallel": true,
"self": 985.8485068878772,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021285580000949267,
"count": 1,
"is_parallel": true,
"self": 0.0007235270004457561,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014050309996491706,
"count": 8,
"is_parallel": true,
"self": 0.0014050309996491706
}
}
},
"UnityEnvironment.step": {
"total": 0.046003017999737494,
"count": 1,
"is_parallel": true,
"self": 0.0005170179997548985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004415489997882105,
"count": 1,
"is_parallel": true,
"self": 0.0004415489997882105
},
"communicator.exchange": {
"total": 0.04335261899996112,
"count": 1,
"is_parallel": true,
"self": 0.04335261899996112
},
"steps_from_proto": {
"total": 0.001691832000233262,
"count": 1,
"is_parallel": true,
"self": 0.00045371700025498285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012381149999782792,
"count": 8,
"is_parallel": true,
"self": 0.0012381149999782792
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1396.2238895080795,
"count": 64387,
"is_parallel": true,
"self": 32.15003696677468,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.19595579812858,
"count": 64387,
"is_parallel": true,
"self": 23.19595579812858
},
"communicator.exchange": {
"total": 1246.9346158521012,
"count": 64387,
"is_parallel": true,
"self": 1246.9346158521012
},
"steps_from_proto": {
"total": 93.94328089107512,
"count": 64387,
"is_parallel": true,
"self": 22.761949703009577,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.18133118806554,
"count": 515096,
"is_parallel": true,
"self": 71.18133118806554
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 746.975131409074,
"count": 64388,
"self": 2.8486742391692133,
"children": {
"process_trajectory": {
"total": 164.23160884891104,
"count": 64388,
"self": 164.00509268991073,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22651615900031175,
"count": 2,
"self": 0.22651615900031175
}
}
},
"_update_policy": {
"total": 579.8948483209938,
"count": 459,
"self": 225.73924463998947,
"children": {
"TorchPPOOptimizer.update": {
"total": 354.1556036810043,
"count": 22767,
"self": 354.1556036810043
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.500001058564521e-07,
"count": 1,
"self": 8.500001058564521e-07
},
"TrainerController._save_models": {
"total": 0.08779283400053828,
"count": 1,
"self": 0.0013612080001621507,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08643162600037613,
"count": 1,
"self": 0.08643162600037613
}
}
}
}
}
}
}