ppo-Pyramids / run_logs /timers.json
jproman's picture
First commit
8b3f7e8
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7511314153671265,
"min": 0.7437450289726257,
"max": 1.4813114404678345,
"count": 10
},
"Pyramids.Policy.Entropy.sum": {
"value": 22798.33984375,
"min": 21872.0546875,
"max": 44937.0625,
"count": 10
},
"Pyramids.Step.mean": {
"value": 299964.0,
"min": 29883.0,
"max": 299964.0,
"count": 10
},
"Pyramids.Step.sum": {
"value": 299964.0,
"min": 29883.0,
"max": 299964.0,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03165360167622566,
"min": -0.09121362119913101,
"max": 0.03165360167622566,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 8.008360862731934,
"min": -21.98248291015625,
"max": 8.008360862731934,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05212663114070892,
"min": 0.05212663114070892,
"max": 0.32940182089805603,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 13.188037872314453,
"min": 13.188037872314453,
"max": 79.3858413696289,
"count": 10
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06784714669690607,
"min": 0.06709104594528344,
"max": 0.07447572749687571,
"count": 10
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9498600537566849,
"min": 0.52133009247813,
"max": 1.0095707249927446,
"count": 10
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.009227018150519215,
"min": 0.0011398649520432277,
"max": 0.009227018150519215,
"count": 10
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.12917825410726902,
"min": 0.011398649520432277,
"max": 0.12917825410726902,
"count": 10
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.482652362928571e-05,
"min": 1.482652362928571e-05,
"max": 0.0002841195767220476,
"count": 10
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00020757133080999995,
"min": 0.00020757133080999995,
"max": 0.002554399148533666,
"count": 10
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10494214285714287,
"min": 0.10494214285714287,
"max": 0.19470652380952386,
"count": 10
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4691900000000002,
"min": 1.362945666666667,
"max": 2.1704259999999995,
"count": 10
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0005037200714285714,
"min": 0.0005037200714285714,
"max": 0.00947118172857143,
"count": 10
},
"Pyramids.Policy.Beta.sum": {
"value": 0.007052081,
"min": 0.007052081,
"max": 0.08516148670000002,
"count": 10
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.045491062104701996,
"min": 0.045491062104701996,
"max": 0.522769033908844,
"count": 10
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.6368748545646667,
"min": 0.6368748545646667,
"max": 3.6593832969665527,
"count": 10
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 761.6304347826087,
"min": 761.6304347826087,
"max": 989.53125,
"count": 10
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 35035.0,
"min": 16554.0,
"max": 35035.0,
"count": 10
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.32505648145857063,
"min": -0.9279875513166189,
"max": 0.32505648145857063,
"count": 10
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 14.95259814709425,
"min": -29.695601642131805,
"max": 14.95259814709425,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.32505648145857063,
"min": -0.9279875513166189,
"max": 0.32505648145857063,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 14.95259814709425,
"min": -29.695601642131805,
"max": 14.95259814709425,
"count": 10
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.3617216365654831,
"min": 0.3617216365654831,
"max": 9.541951926315532,
"count": 10
},
"Pyramids.Policy.RndReward.sum": {
"value": 16.639195282012224,
"min": 13.375551637262106,
"max": 162.21318274736404,
"count": 10
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698002175",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698002821"
},
"total": 645.9526188479995,
"count": 1,
"self": 0.9879408690003402,
"children": {
"run_training.setup": {
"total": 0.050270141999135376,
"count": 1,
"self": 0.050270141999135376
},
"TrainerController.start_learning": {
"total": 644.914407837,
"count": 1,
"self": 0.4073048929903962,
"children": {
"TrainerController._reset_env": {
"total": 3.6032102939998367,
"count": 1,
"self": 3.6032102939998367
},
"TrainerController.advance": {
"total": 640.7697172650105,
"count": 18959,
"self": 0.43126614696848264,
"children": {
"env_step": {
"total": 440.18817085096,
"count": 18959,
"self": 397.93693827902007,
"children": {
"SubprocessEnvManager._take_step": {
"total": 41.99118895693664,
"count": 18959,
"self": 1.4151202619568721,
"children": {
"TorchPolicy.evaluate": {
"total": 40.57606869497977,
"count": 18814,
"self": 40.57606869497977
}
}
},
"workers": {
"total": 0.2600436150032692,
"count": 18959,
"self": 0.0,
"children": {
"worker_root": {
"total": 643.4193196740189,
"count": 18959,
"is_parallel": true,
"self": 280.13427952196434,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016816420002214727,
"count": 1,
"is_parallel": true,
"self": 0.0005361269986678963,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011455150015535764,
"count": 8,
"is_parallel": true,
"self": 0.0011455150015535764
}
}
},
"UnityEnvironment.step": {
"total": 0.051440174999697774,
"count": 1,
"is_parallel": true,
"self": 0.0005718729998989147,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005056119998698705,
"count": 1,
"is_parallel": true,
"self": 0.0005056119998698705
},
"communicator.exchange": {
"total": 0.04866153100010706,
"count": 1,
"is_parallel": true,
"self": 0.04866153100010706
},
"steps_from_proto": {
"total": 0.0017011589998219279,
"count": 1,
"is_parallel": true,
"self": 0.0003877240005749627,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013134349992469652,
"count": 8,
"is_parallel": true,
"self": 0.0013134349992469652
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 363.28504015205453,
"count": 18958,
"is_parallel": true,
"self": 10.745671602186121,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.37636088792442,
"count": 18958,
"is_parallel": true,
"self": 7.37636088792442
},
"communicator.exchange": {
"total": 315.1661472709302,
"count": 18958,
"is_parallel": true,
"self": 315.1661472709302
},
"steps_from_proto": {
"total": 29.996860391013797,
"count": 18958,
"is_parallel": true,
"self": 5.994754348300376,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.00210604271342,
"count": 151664,
"is_parallel": true,
"self": 24.00210604271342
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 200.15028026708205,
"count": 18959,
"self": 0.7030271330986579,
"children": {
"process_trajectory": {
"total": 36.11061589198198,
"count": 18959,
"self": 36.11061589198198
},
"_update_policy": {
"total": 163.33663724200142,
"count": 124,
"self": 97.92028195296552,
"children": {
"TorchPPOOptimizer.update": {
"total": 65.41635528903589,
"count": 6867,
"self": 65.41635528903589
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3779999790131114e-06,
"count": 1,
"self": 1.3779999790131114e-06
},
"TrainerController._save_models": {
"total": 0.13417400699927384,
"count": 1,
"self": 0.0018966119987453567,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13227739500052849,
"count": 1,
"self": 0.13227739500052849
}
}
}
}
}
}
}