poca-SoccerTwos / run_logs /timers.json
Brain22's picture
First Push`
582c3c4
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1374824047088623,
"min": 3.094362258911133,
"max": 3.2957229614257812,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 63552.84375,
"min": 20378.162109375,
"max": 158889.484375,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 899.6,
"min": 394.72727272727275,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17992.0,
"min": 11060.0,
"max": 30128.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1191.9290993236582,
"min": 1186.0930386295388,
"max": 1197.6814780624672,
"count": 348
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7151.574595941949,
"min": 2372.1860772590776,
"max": 14370.206371868799,
"count": 348
},
"SoccerTwos.Step.mean": {
"value": 4999882.0,
"min": 9084.0,
"max": 4999882.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999882.0,
"min": 9084.0,
"max": 4999882.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.015382199548184872,
"min": -0.015382199548184872,
"max": 0.09476882964372635,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.16920419037342072,
"min": -0.2145426869392395,
"max": 1.3267344236373901,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.015508255921304226,
"min": -0.015508255921304226,
"max": 0.0947607234120369,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.1705908179283142,
"min": -0.20972783863544464,
"max": 1.326623797416687,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.24687272852117365,
"min": -0.7082285716065339,
"max": 0.32112500071525574,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.71560001373291,
"min": -9.915200002491474,
"max": 5.138000011444092,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.24687272852117365,
"min": -0.7082285716065339,
"max": 0.32112500071525574,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.71560001373291,
"min": -9.915200002491474,
"max": 5.138000011444092,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.009317992958191705,
"min": 0.007854099186025214,
"max": 0.01677937788675384,
"count": 98
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.009317992958191705,
"min": 0.007854099186025214,
"max": 0.01677937788675384,
"count": 98
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.002715616867256661,
"min": 6.014745774033751e-07,
"max": 0.016914054087397677,
"count": 98
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.002715616867256661,
"min": 6.014745774033751e-07,
"max": 0.016914054087397677,
"count": 98
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.002717463889469703,
"min": 8.684975246827283e-07,
"max": 0.008899358282279637,
"count": 98
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.002717463889469703,
"min": 8.684975246827283e-07,
"max": 0.008899358282279637,
"count": 98
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0006999999999999998,
"min": 0.0006999999999999998,
"max": 0.0006999999999999999,
"count": 98
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0006999999999999998,
"min": 0.0006999999999999998,
"max": 0.0006999999999999999,
"count": 98
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 98
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 98
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 98
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 98
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677309010",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "D:\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env ./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id SoccerTwos",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1677326469"
},
"total": 17458.895501299998,
"count": 1,
"self": 0.8008393999953114,
"children": {
"run_training.setup": {
"total": 0.1535616000000002,
"count": 1,
"self": 0.1535616000000002
},
"TrainerController.start_learning": {
"total": 17457.941100300002,
"count": 1,
"self": 9.912473699423572,
"children": {
"TrainerController._reset_env": {
"total": 9.265119099995008,
"count": 25,
"self": 9.265119099995008
},
"TrainerController.advance": {
"total": 17438.589616800582,
"count": 325488,
"self": 9.644766200846789,
"children": {
"env_step": {
"total": 7671.779533399882,
"count": 325488,
"self": 5911.1434806003,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1754.055834399649,
"count": 325488,
"self": 57.96416329839394,
"children": {
"TorchPolicy.evaluate": {
"total": 1696.091671101255,
"count": 646206,
"self": 1696.091671101255
}
}
},
"workers": {
"total": 6.580218399933306,
"count": 325488,
"self": 0.0,
"children": {
"worker_root": {
"total": 17437.00375710062,
"count": 325488,
"is_parallel": true,
"self": 12783.22742120004,
"children": {
"steps_from_proto": {
"total": 0.0510327999986524,
"count": 50,
"is_parallel": true,
"self": 0.010393599994758418,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04063920000389398,
"count": 200,
"is_parallel": true,
"self": 0.04063920000389398
}
}
},
"UnityEnvironment.step": {
"total": 4653.725303100584,
"count": 325488,
"is_parallel": true,
"self": 242.3979159001492,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 222.36855450069805,
"count": 325488,
"is_parallel": true,
"self": 222.36855450069805
},
"communicator.exchange": {
"total": 3374.4351308996124,
"count": 325488,
"is_parallel": true,
"self": 3374.4351308996124
},
"steps_from_proto": {
"total": 814.5237018001247,
"count": 650976,
"is_parallel": true,
"self": 161.05546790089534,
"children": {
"_process_rank_one_or_two_observation": {
"total": 653.4682338992294,
"count": 2603904,
"is_parallel": true,
"self": 653.4682338992294
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 9757.165317199851,
"count": 325488,
"self": 70.52170599961391,
"children": {
"process_trajectory": {
"total": 1305.1247541002367,
"count": 325488,
"self": 1303.4285935002363,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6961606000004394,
"count": 10,
"self": 1.6961606000004394
}
}
},
"_update_policy": {
"total": 8381.5188571,
"count": 98,
"self": 904.8173683000068,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7476.701488799993,
"count": 2745,
"self": 7476.701488799993
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1000010999850929e-06,
"count": 1,
"self": 1.1000010999850929e-06
},
"TrainerController._save_models": {
"total": 0.1738895999988017,
"count": 1,
"self": 0.009182999998301966,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16470660000049975,
"count": 1,
"self": 0.16470660000049975
}
}
}
}
}
}
}