Keano2 / run_logs /timers.json
hectorjelly's picture
Fifth Team`
b3ce63b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 0.7722052335739136,
"min": 0.7508076429367065,
"max": 3.2957513332366943,
"count": 3103
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 20040.26953125,
"min": 421.8561706542969,
"max": 300887.625,
"count": 3103
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 58.50588235294118,
"min": 39.21774193548387,
"max": 999.0,
"count": 3103
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19892.0,
"min": 7992.0,
"max": 38800.0,
"count": 3103
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1655.4551733286996,
"min": 1194.1831918904513,
"max": 1726.8866978130923,
"count": 3091
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 281427.37946587894,
"min": 2392.139449323783,
"max": 397688.18605291774,
"count": 3091
},
"SoccerTwos.Step.mean": {
"value": 31029995.0,
"min": 9816.0,
"max": 31029995.0,
"count": 3103
},
"SoccerTwos.Step.sum": {
"value": 31029995.0,
"min": 9816.0,
"max": 31029995.0,
"count": 3103
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.012561212293803692,
"min": -0.10789082944393158,
"max": 0.14437440037727356,
"count": 3103
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 2.135406017303467,
"min": -21.25676918029785,
"max": 20.037342071533203,
"count": 3103
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.009676625020802021,
"min": -0.10898121446371078,
"max": 0.14241483807563782,
"count": 3103
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.6450262069702148,
"min": -21.707752227783203,
"max": 19.850662231445312,
"count": 3103
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3103
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3103
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.04213176544974832,
"min": -0.4895749995484948,
"max": 0.5014400005340576,
"count": 3103
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -7.162400126457214,
"min": -69.17599999904633,
"max": 64.46680080890656,
"count": 3103
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.04213176544974832,
"min": -0.4895749995484948,
"max": 0.5014400005340576,
"count": 3103
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -7.162400126457214,
"min": -69.17599999904633,
"max": 64.46680080890656,
"count": 3103
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3103
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3103
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01780883798146533,
"min": 0.012434850020023683,
"max": 0.024863777283462694,
"count": 754
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01780883798146533,
"min": 0.012434850020023683,
"max": 0.024863777283462694,
"count": 754
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09168587923049927,
"min": 0.0013380789212533272,
"max": 0.1214194405823946,
"count": 754
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09168587923049927,
"min": 0.0013380789212533272,
"max": 0.1214194405823946,
"count": 754
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09295500939091046,
"min": 0.0013492340386922783,
"max": 0.1252348172167937,
"count": 754
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09295500939091046,
"min": 0.0013492340386922783,
"max": 0.1252348172167937,
"count": 754
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 6.754008248666498e-05,
"min": 6.754008248666498e-05,
"max": 0.00029968566010478003,
"count": 754
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 6.754008248666498e-05,
"min": 6.754008248666498e-05,
"max": 0.00029968566010478003,
"count": 754
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.13377000250000004,
"min": 0.13377000250000004,
"max": 0.24984283000000002,
"count": 754
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.13377000250000004,
"min": 0.13377000250000004,
"max": 0.24984283000000002,
"count": 754
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.0011334154165,
"min": 0.0011334154165,
"max": 0.004994771477999998,
"count": 754
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.0011334154165,
"min": 0.0011334154165,
"max": 0.004994771477999998,
"count": 754
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675896938",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\hecto\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=Keano2 --no-graphics --num-envs=3",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1675951938"
},
"total": 55000.792964600005,
"count": 1,
"self": 0.5562214000019594,
"children": {
"run_training.setup": {
"total": 0.18205910000000003,
"count": 1,
"self": 0.18205910000000003
},
"TrainerController.start_learning": {
"total": 55000.0546841,
"count": 1,
"self": 44.47707250079111,
"children": {
"TrainerController._reset_env": {
"total": 8.62760900004237,
"count": 156,
"self": 8.62760900004237
},
"TrainerController.advance": {
"total": 54946.81010599917,
"count": 1921362,
"self": 38.6051973021531,
"children": {
"env_step": {
"total": 12445.740336898823,
"count": 1921362,
"self": 4190.723605199239,
"children": {
"SubprocessEnvManager._take_step": {
"total": 8232.318773904692,
"count": 2166682,
"self": 261.4100139086968,
"children": {
"TorchPolicy.evaluate": {
"total": 7970.908759995995,
"count": 3944166,
"self": 7970.908759995995
}
}
},
"workers": {
"total": 22.697957794893757,
"count": 1921362,
"self": 0.0,
"children": {
"worker_root": {
"total": 164782.59743910583,
"count": 2166473,
"is_parallel": true,
"self": 144693.15303580926,
"children": {
"steps_from_proto": {
"total": 0.6477376999521338,
"count": 936,
"is_parallel": true,
"self": 0.13200409975825123,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.5157336001938826,
"count": 3744,
"is_parallel": true,
"self": 0.5157336001938826
}
}
},
"UnityEnvironment.step": {
"total": 20088.79666559662,
"count": 2166473,
"is_parallel": true,
"self": 1055.1520245860447,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 855.6345047999873,
"count": 2166473,
"is_parallel": true,
"self": 855.6345047999873
},
"communicator.exchange": {
"total": 15023.869842703287,
"count": 2166473,
"is_parallel": true,
"self": 15023.869842703287
},
"steps_from_proto": {
"total": 3154.140293507301,
"count": 4332946,
"is_parallel": true,
"self": 643.5659081962363,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2510.574385311065,
"count": 17331784,
"is_parallel": true,
"self": 2510.574385311065
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 42462.46457179819,
"count": 1921362,
"self": 347.17189329535904,
"children": {
"process_trajectory": {
"total": 7602.200574202841,
"count": 1921362,
"self": 7591.637804902831,
"children": {
"RLTrainer._checkpoint": {
"total": 10.562769300010586,
"count": 62,
"self": 10.562769300010586
}
}
},
"_update_policy": {
"total": 34513.09210429999,
"count": 755,
"self": 4113.9223459994755,
"children": {
"TorchPOCAOptimizer.update": {
"total": 30399.169758300515,
"count": 45303,
"self": 30399.169758300515
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1000010999850929e-06,
"count": 1,
"self": 1.1000010999850929e-06
},
"TrainerController._save_models": {
"total": 0.1398955000040587,
"count": 1,
"self": 0.008740500001295004,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1311550000027637,
"count": 1,
"self": 0.1311550000027637
}
}
}
}
}
}
}