downstrike-80m / run_logs /timers.json
mgmeskill's picture
No defaults - 80M
a908906
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 0.42757290601730347,
"min": 0.41054922342300415,
"max": 3.2957963943481445,
"count": 8000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 13011.8984375,
"min": 7466.0107421875,
"max": 567509.6875,
"count": 8000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 35.44927536231884,
"min": 31.05128205128205,
"max": 883.5,
"count": 8000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19568.0,
"min": 10940.0,
"max": 28272.0,
"count": 8000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1672.4888193352194,
"min": 1176.001658067304,
"max": 1702.0033237518546,
"count": 8000
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 461606.9141365206,
"min": 4759.7699943113175,
"max": 511612.92683277687,
"count": 8000
},
"SoccerTwos.Step.mean": {
"value": 79999973.0,
"min": 9786.0,
"max": 79999973.0,
"count": 8000
},
"SoccerTwos.Step.sum": {
"value": 79999973.0,
"min": 9786.0,
"max": 79999973.0,
"count": 8000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.02082935720682144,
"min": -0.13657967746257782,
"max": 0.23203706741333008,
"count": 8000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -5.707243919372559,
"min": -36.603355407714844,
"max": 36.66185760498047,
"count": 8000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.020973999053239822,
"min": -0.13677963614463806,
"max": 0.23210448026657104,
"count": 8000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.746875762939453,
"min": -36.656944274902344,
"max": 36.672508239746094,
"count": 8000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 8000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 8000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.045062771896376225,
"min": -0.40625,
"max": 0.6746017687616095,
"count": 8000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 12.347199499607086,
"min": -88.36600017547607,
"max": 84.6651998758316,
"count": 8000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.045062771896376225,
"min": -0.40625,
"max": 0.6746017687616095,
"count": 8000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 12.347199499607086,
"min": -88.36600017547607,
"max": 84.6651998758316,
"count": 8000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017892520788882392,
"min": 0.010355746978893876,
"max": 0.027166249975562096,
"count": 3886
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017892520788882392,
"min": 0.010355746978893876,
"max": 0.027166249975562096,
"count": 3886
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08989812855919202,
"min": 0.002147287126475324,
"max": 0.09608878170450529,
"count": 3886
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08989812855919202,
"min": 0.002147287126475324,
"max": 0.09608878170450529,
"count": 3886
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09013221909602483,
"min": 0.002146880060900003,
"max": 0.09640246530373892,
"count": 3886
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09013221909602483,
"min": 0.002146880060900003,
"max": 0.09640246530373892,
"count": 3886
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 2.4227491957515177e-08,
"min": 2.4227491957515177e-08,
"max": 0.00029990663253112243,
"count": 3886
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 2.4227491957515177e-08,
"min": 2.4227491957515177e-08,
"max": 0.00029990663253112243,
"count": 3886
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 3886
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 3886
},
"SoccerTwos.Policy.Beta.mean": {
"value": 1.08034457500005e-05,
"min": 1.08034457500005e-05,
"max": 0.009996890862249997,
"count": 3886
},
"SoccerTwos.Policy.Beta.sum": {
"value": 1.08034457500005e-05,
"min": 1.08034457500005e-05,
"max": 0.009996890862249997,
"count": 3886
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693581791",
"python_version": "3.9.17 (main, Jul 5 2023, 20:41:20) \n[GCC 11.2.0]",
"command_line_arguments": "/home/mgmeskill/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=Downstrike-80M --no-graphics --num-envs=8",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693649001"
},
"total": 67209.49005119223,
"count": 1,
"self": 0.2724677324295044,
"children": {
"run_training.setup": {
"total": 0.030434798914939165,
"count": 1,
"self": 0.030434798914939165
},
"TrainerController.start_learning": {
"total": 67209.18714866089,
"count": 1,
"self": 50.75303233368322,
"children": {
"TrainerController._reset_env": {
"total": 353.8180987960659,
"count": 3974,
"self": 353.8180987960659
},
"TrainerController.advance": {
"total": 66804.30756279826,
"count": 1761378,
"self": 38.17523550847545,
"children": {
"env_step": {
"total": 36444.456804587506,
"count": 1761378,
"self": 11990.753483265638,
"children": {
"SubprocessEnvManager._take_step": {
"total": 24409.10141524719,
"count": 7446067,
"self": 604.9324098099023,
"children": {
"TorchPolicy.evaluate": {
"total": 23804.16900543729,
"count": 13567236,
"self": 23804.16900543729
}
}
},
"workers": {
"total": 44.60190607467666,
"count": 1761378,
"self": 0.0,
"children": {
"worker_root": {
"total": 536307.4673094852,
"count": 7432347,
"is_parallel": true,
"self": 440223.55442031054,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.01738327043130994,
"count": 16,
"is_parallel": true,
"self": 0.004107940476387739,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0132753299549222,
"count": 64,
"is_parallel": true,
"self": 0.0132753299549222
}
}
},
"UnityEnvironment.step": {
"total": 0.18782201362773776,
"count": 8,
"is_parallel": true,
"self": 0.006433061324059963,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0054636732675135136,
"count": 8,
"is_parallel": true,
"self": 0.0054636732675135136
},
"communicator.exchange": {
"total": 0.1595055148936808,
"count": 8,
"is_parallel": true,
"self": 0.1595055148936808
},
"steps_from_proto": {
"total": 0.016419764142483473,
"count": 16,
"is_parallel": true,
"self": 0.002827553078532219,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.013592211063951254,
"count": 64,
"is_parallel": true,
"self": 0.013592211063951254
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 96002.2530998285,
"count": 7432339,
"is_parallel": true,
"self": 6241.116368692368,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3991.6639560568146,
"count": 7432339,
"is_parallel": true,
"self": 3991.6639560568146
},
"communicator.exchange": {
"total": 68832.0445730905,
"count": 7432339,
"is_parallel": true,
"self": 68832.0445730905
},
"steps_from_proto": {
"total": 16937.428201988805,
"count": 14864678,
"is_parallel": true,
"self": 2827.5420445734635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14109.886157415342,
"count": 59458712,
"is_parallel": true,
"self": 14109.886157415342
}
}
}
}
},
"steps_from_proto": {
"total": 81.6597893461585,
"count": 63568,
"is_parallel": true,
"self": 12.982048768084496,
"children": {
"_process_rank_one_or_two_observation": {
"total": 68.67774057807401,
"count": 254272,
"is_parallel": true,
"self": 68.67774057807401
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 30321.67552270228,
"count": 1761378,
"self": 647.2967732353136,
"children": {
"process_trajectory": {
"total": 12738.812847053166,
"count": 1761378,
"self": 12726.122279091273,
"children": {
"RLTrainer._checkpoint": {
"total": 12.690567961893976,
"count": 40,
"self": 12.690567961893976
}
}
},
"_update_policy": {
"total": 16935.5659024138,
"count": 3886,
"self": 9506.596401475836,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7428.969500937965,
"count": 116586,
"self": 7428.969500937965
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.820766091346741e-07,
"count": 1,
"self": 5.820766091346741e-07
},
"TrainerController._save_models": {
"total": 0.3084541507996619,
"count": 1,
"self": 0.0019163107499480247,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30653784004971385,
"count": 1,
"self": 0.30653784004971385
}
}
}
}
}
}
}