ppo-Pyramids / run_logs /timers.json
jobeid1's picture
This better work.
c9ce3c4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8195495009422302,
"min": 0.8195495009422302,
"max": 1.4523032903671265,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 24547.146484375,
"min": 24547.146484375,
"max": 44057.07421875,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89984.0,
"min": 29982.0,
"max": 89984.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89984.0,
"min": 29982.0,
"max": 89984.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.1006389930844307,
"min": -0.1006389930844307,
"max": 0.038734257221221924,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -24.253997802734375,
"min": -24.253997802734375,
"max": 9.21875286102295,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.1561596542596817,
"min": 0.1561596542596817,
"max": 0.334995299577713,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 37.63447570800781,
"min": 37.63447570800781,
"max": 79.7288818359375,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06844947825292931,
"min": 0.06837438367408559,
"max": 0.07299376009678245,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.6844947825292931,
"min": 0.5109563206774771,
"max": 0.6844947825292931,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0007997566469354043,
"min": 0.0007997566469354043,
"max": 0.005236895444029538,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.007997566469354043,
"min": 0.007997566469354043,
"max": 0.036658268108206764,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.57182747606e-05,
"min": 7.57182747606e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0007571827476059999,
"min": 0.0007571827476059999,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12523940000000003,
"min": 0.12523940000000003,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.2523940000000002,
"min": 1.2523940000000002,
"max": 1.5543240000000003,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0025314160600000005,
"min": 0.0025314160600000005,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.025314160600000003,
"min": 0.025314160600000003,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.10603547096252441,
"min": 0.10603547096252441,
"max": 0.4401267170906067,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.0603547096252441,
"min": 1.0603547096252441,
"max": 3.0808870792388916,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 997.3125,
"min": 987.1176470588235,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31914.0,
"min": 16781.0,
"max": 31914.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9357563024386764,
"min": -1.0000000521540642,
"max": -0.8704118185183581,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -29.944201678037643,
"min": -31.00000161677599,
"max": -14.797000914812088,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9357563024386764,
"min": -1.0000000521540642,
"max": -0.8704118185183581,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -29.944201678037643,
"min": -31.00000161677599,
"max": -14.797000914812088,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.2415015193400905,
"min": 1.2415015193400905,
"max": 8.320037348305478,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 39.728048618882895,
"min": 39.728048618882895,
"max": 141.44063492119312,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687580809",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687581024"
},
"total": 215.19443564600004,
"count": 1,
"self": 0.4748994630000425,
"children": {
"run_training.setup": {
"total": 0.037180140999964806,
"count": 1,
"self": 0.037180140999964806
},
"TrainerController.start_learning": {
"total": 214.68235604200004,
"count": 1,
"self": 0.150043250999488,
"children": {
"TrainerController._reset_env": {
"total": 4.194030967999993,
"count": 1,
"self": 4.194030967999993
},
"TrainerController.advance": {
"total": 210.2167910130006,
"count": 6313,
"self": 0.16316186700260005,
"children": {
"env_step": {
"total": 143.87305496499852,
"count": 6313,
"self": 130.86981622400225,
"children": {
"SubprocessEnvManager._take_step": {
"total": 12.912490343999082,
"count": 6313,
"self": 0.5456891720008343,
"children": {
"TorchPolicy.evaluate": {
"total": 12.366801171998247,
"count": 6310,
"self": 12.366801171998247
}
}
},
"workers": {
"total": 0.09074839699718495,
"count": 6313,
"self": 0.0,
"children": {
"worker_root": {
"total": 214.04045604899994,
"count": 6313,
"is_parallel": true,
"self": 95.71284903899794,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.012745282000025782,
"count": 1,
"is_parallel": true,
"self": 0.010829926000042178,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019153559999836034,
"count": 8,
"is_parallel": true,
"self": 0.0019153559999836034
}
}
},
"UnityEnvironment.step": {
"total": 0.05177804099997729,
"count": 1,
"is_parallel": true,
"self": 0.0005615599999941878,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000518220999992991,
"count": 1,
"is_parallel": true,
"self": 0.000518220999992991
},
"communicator.exchange": {
"total": 0.048708025999985693,
"count": 1,
"is_parallel": true,
"self": 0.048708025999985693
},
"steps_from_proto": {
"total": 0.0019902340000044205,
"count": 1,
"is_parallel": true,
"self": 0.0003993900000978101,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015908439999066104,
"count": 8,
"is_parallel": true,
"self": 0.0015908439999066104
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 118.327607010002,
"count": 6312,
"is_parallel": true,
"self": 3.5433713479990274,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.519364049998728,
"count": 6312,
"is_parallel": true,
"self": 2.519364049998728
},
"communicator.exchange": {
"total": 100.9817579410045,
"count": 6312,
"is_parallel": true,
"self": 100.9817579410045
},
"steps_from_proto": {
"total": 11.28311367099974,
"count": 6312,
"is_parallel": true,
"self": 2.239674948974482,
"children": {
"_process_rank_one_or_two_observation": {
"total": 9.043438722025257,
"count": 50496,
"is_parallel": true,
"self": 9.043438722025257
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 66.18057418099949,
"count": 6313,
"self": 0.20280767199267302,
"children": {
"process_trajectory": {
"total": 11.796177776006914,
"count": 6313,
"self": 11.796177776006914
},
"_update_policy": {
"total": 54.1815887329999,
"count": 31,
"self": 35.24221509699959,
"children": {
"TorchPPOOptimizer.update": {
"total": 18.93937363600031,
"count": 2274,
"self": 18.93937363600031
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.453999971090525e-06,
"count": 1,
"self": 1.453999971090525e-06
},
"TrainerController._save_models": {
"total": 0.12148935599998367,
"count": 1,
"self": 0.001300324999874647,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12018903100010903,
"count": 1,
"self": 0.12018903100010903
}
}
}
}
}
}
}