soccertwos-v0 / run_logs /timers.json
giobin's picture
first try 13.5m steps soccertwos
f7abd48
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6177650690078735,
"min": 1.515626311302185,
"max": 3.2957265377044678,
"count": 1345
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30595.171875,
"min": 11503.9423828125,
"max": 118597.421875,
"count": 1345
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 78.6774193548387,
"min": 41.99145299145299,
"max": 999.0,
"count": 1345
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19512.0,
"min": 2172.0,
"max": 30544.0,
"count": 1345
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1611.2352713654395,
"min": 1195.9618564436194,
"max": 1650.41900545838,
"count": 1338
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 199793.1736493145,
"min": 2391.9237128872387,
"max": 339189.74968322134,
"count": 1338
},
"SoccerTwos.Step.mean": {
"value": 13449734.0,
"min": 9666.0,
"max": 13449734.0,
"count": 1345
},
"SoccerTwos.Step.sum": {
"value": 13449734.0,
"min": 9666.0,
"max": 13449734.0,
"count": 1345
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.023788360878825188,
"min": -0.13462801277637482,
"max": 0.1812068074941635,
"count": 1345
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.9497568607330322,
"min": -20.32883071899414,
"max": 27.91950035095215,
"count": 1345
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.025132087990641594,
"min": -0.13467352092266083,
"max": 0.18890243768692017,
"count": 1345
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.1163790225982666,
"min": -20.33570098876953,
"max": 29.30706024169922,
"count": 1345
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1345
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1345
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.03828870673333445,
"min": -0.6923076923076923,
"max": 0.4743757573041049,
"count": 1345
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 4.747799634933472,
"min": -60.09479999542236,
"max": 66.21499997377396,
"count": 1345
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.03828870673333445,
"min": -0.6923076923076923,
"max": 0.4743757573041049,
"count": 1345
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 4.747799634933472,
"min": -60.09479999542236,
"max": 66.21499997377396,
"count": 1345
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1345
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1345
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01278653058107011,
"min": 0.01065120606411559,
"max": 0.02399001268398327,
"count": 650
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01278653058107011,
"min": 0.01065120606411559,
"max": 0.02399001268398327,
"count": 650
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08569016729791959,
"min": 8.850246619355555e-05,
"max": 0.11445795744657516,
"count": 650
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08569016729791959,
"min": 8.850246619355555e-05,
"max": 0.11445795744657516,
"count": 650
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08679854248960812,
"min": 7.934097933078495e-05,
"max": 0.11678697044650714,
"count": 650
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08679854248960812,
"min": 7.934097933078495e-05,
"max": 0.11678697044650714,
"count": 650
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 650
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 650
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 650
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 650
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 650
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 650
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677687743",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:38:11) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/giovannibonetta/miniforge3/envs/rl_python39/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1677744880"
},
"total": 57137.046552875,
"count": 1,
"self": 0.1423110000032466,
"children": {
"run_training.setup": {
"total": 0.22231874999999945,
"count": 1,
"self": 0.22231874999999945
},
"TrainerController.start_learning": {
"total": 57136.681923125,
"count": 1,
"self": 8.112656235782197,
"children": {
"TrainerController._reset_env": {
"total": 4.501855917026199,
"count": 68,
"self": 4.501855917026199
},
"TrainerController.advance": {
"total": 57123.980660431196,
"count": 922522,
"self": 7.674306478977087,
"children": {
"env_step": {
"total": 47948.36645541004,
"count": 922522,
"self": 46774.37800006344,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1167.2670998154422,
"count": 922522,
"self": 34.97336674206167,
"children": {
"TorchPolicy.evaluate": {
"total": 1132.2937330733805,
"count": 1692000,
"self": 1132.2937330733805
}
}
},
"workers": {
"total": 6.721355531151934,
"count": 922521,
"self": 0.0,
"children": {
"worker_root": {
"total": 57120.07160796171,
"count": 922521,
"is_parallel": true,
"self": 11431.154959092913,
"children": {
"steps_from_proto": {
"total": 0.09950287595317375,
"count": 136,
"is_parallel": true,
"self": 0.011280740994617178,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08822213495855658,
"count": 544,
"is_parallel": true,
"self": 0.08822213495855658
}
}
},
"UnityEnvironment.step": {
"total": 45688.81714599285,
"count": 922521,
"is_parallel": true,
"self": 102.98286286155053,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 783.4894327746488,
"count": 922521,
"is_parallel": true,
"self": 783.4894327746488
},
"communicator.exchange": {
"total": 43188.64422051075,
"count": 922521,
"is_parallel": true,
"self": 43188.64422051075
},
"steps_from_proto": {
"total": 1613.7006298458919,
"count": 1845042,
"is_parallel": true,
"self": 171.07916981896346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1442.6214600269284,
"count": 7380168,
"is_parallel": true,
"self": 1442.6214600269284
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 9167.939898542178,
"count": 922521,
"self": 64.92592670319391,
"children": {
"process_trajectory": {
"total": 1846.5396612910154,
"count": 922521,
"self": 1844.5589116670117,
"children": {
"RLTrainer._checkpoint": {
"total": 1.9807496240036926,
"count": 26,
"self": 1.9807496240036926
}
}
},
"_update_policy": {
"total": 7256.474310547968,
"count": 650,
"self": 969.9330537860878,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6286.5412567618805,
"count": 19512,
"self": 6286.5412567618805
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.079943316057324e-07,
"count": 1,
"self": 7.079943316057324e-07
},
"TrainerController._save_models": {
"total": 0.08674983299715677,
"count": 1,
"self": 0.0013924169979873113,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08535741599916946,
"count": 1,
"self": 0.08535741599916946
}
}
}
}
}
}
}