SoccerTwos / run_logs /timers.json
NathanS-HuggingFace's picture
First Push
6a0c185
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2807910442352295,
"min": 3.2807910442352295,
"max": 3.295742988586426,
"count": 5
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 84723.1484375,
"min": 51235.0546875,
"max": 105463.7734375,
"count": 5
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 955.4,
"min": 745.2,
"max": 999.0,
"count": 5
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19108.0,
"min": 14904.0,
"max": 25888.0,
"count": 5
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1198.2960463941354,
"min": 1198.2960463941354,
"max": 1199.4207269131573,
"count": 4
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2396.5920927882707,
"min": 2396.5920927882707,
"max": 7196.524361478944,
"count": 4
},
"SoccerTwos.Step.mean": {
"value": 49974.0,
"min": 9960.0,
"max": 49974.0,
"count": 5
},
"SoccerTwos.Step.sum": {
"value": 49974.0,
"min": 9960.0,
"max": 49974.0,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.06636136025190353,
"min": -0.07005516439676285,
"max": -0.06636136025190353,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.7299749255180359,
"min": -0.9106462597846985,
"max": -0.6965510845184326,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06056693568825722,
"min": -0.07001491636037827,
"max": -0.06056693568825722,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.6662362813949585,
"min": -0.9100764989852905,
"max": -0.6329516172409058,
"count": 5
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.18181818181818182,
"min": -0.3241333334396283,
"max": 0.001360000018030405,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.0,
"min": -3.8896000012755394,
"max": 0.01360000018030405,
"count": 5
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.18181818181818182,
"min": -0.3241333334396283,
"max": 0.001360000018030405,
"count": 5
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.0,
"min": -3.8896000012755394,
"max": 0.01360000018030405,
"count": 5
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01809350042991961,
"min": 0.017385530701075267,
"max": 0.01809350042991961,
"count": 2
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01809350042991961,
"min": 0.017385530701075267,
"max": 0.01809350042991961,
"count": 2
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0007348242984638394,
"min": 0.0007348242984638394,
"max": 0.002954561379738152,
"count": 2
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0007348242984638394,
"min": 0.0007348242984638394,
"max": 0.002954561379738152,
"count": 2
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0007452838452688108,
"min": 0.0007452838452688108,
"max": 0.002745604452987512,
"count": 2
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0007452838452688108,
"min": 0.0007452838452688108,
"max": 0.002745604452987512,
"count": 2
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683590226",
"python_version": "3.9.13 (main, Jul 20 2022, 22:20:50) \n[GCC 11.3.0]",
"command_line_arguments": "/home/thekeymaster/Server/Projects/Hugging-Face-Course/Unit-7-AI-vs-AI/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683590430"
},
"total": 204.44136967899976,
"count": 1,
"self": 1.05795118399692,
"children": {
"run_training.setup": {
"total": 0.10620303400173725,
"count": 1,
"self": 0.10620303400173725
},
"TrainerController.start_learning": {
"total": 203.2772154610011,
"count": 1,
"self": 0.11539166511465737,
"children": {
"TrainerController._reset_env": {
"total": 42.49264526099978,
"count": 1,
"self": 42.49264526099978
},
"TrainerController.advance": {
"total": 153.98561443488506,
"count": 3819,
"self": 0.11292492894426687,
"children": {
"env_step": {
"total": 126.35035360599795,
"count": 3819,
"self": 104.39870043693008,
"children": {
"SubprocessEnvManager._take_step": {
"total": 21.879080556025656,
"count": 3819,
"self": 0.6794282059963734,
"children": {
"TorchPolicy.evaluate": {
"total": 21.199652350029282,
"count": 7614,
"self": 21.199652350029282
}
}
},
"workers": {
"total": 0.07257261304221174,
"count": 3819,
"self": 0.0,
"children": {
"worker_root": {
"total": 196.03046066292882,
"count": 3819,
"is_parallel": true,
"self": 108.00857298397568,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.021005970000260277,
"count": 2,
"is_parallel": true,
"self": 0.015773913997691125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005232056002569152,
"count": 8,
"is_parallel": true,
"self": 0.005232056002569152
}
}
},
"UnityEnvironment.step": {
"total": 0.03804310700070346,
"count": 1,
"is_parallel": true,
"self": 0.000784120000389521,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0015876180004852358,
"count": 1,
"is_parallel": true,
"self": 0.0015876180004852358
},
"communicator.exchange": {
"total": 0.0332690930008539,
"count": 1,
"is_parallel": true,
"self": 0.0332690930008539
},
"steps_from_proto": {
"total": 0.002402275998974801,
"count": 2,
"is_parallel": true,
"self": 0.0004890790005447343,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019131969984300667,
"count": 8,
"is_parallel": true,
"self": 0.0019131969984300667
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 88.02188767895314,
"count": 3818,
"is_parallel": true,
"self": 5.955326958886872,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.789865059981821,
"count": 3818,
"is_parallel": true,
"self": 4.789865059981821
},
"communicator.exchange": {
"total": 58.98325425498115,
"count": 3818,
"is_parallel": true,
"self": 58.98325425498115
},
"steps_from_proto": {
"total": 18.293441405103295,
"count": 7636,
"is_parallel": true,
"self": 3.5349990402028197,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14.758442364900475,
"count": 30544,
"is_parallel": true,
"self": 14.758442364900475
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 27.522335899942846,
"count": 3819,
"self": 1.270590202879248,
"children": {
"process_trajectory": {
"total": 7.35048717706195,
"count": 3819,
"self": 7.35048717706195
},
"_update_policy": {
"total": 18.901258520001647,
"count": 2,
"self": 14.493343388008725,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4.407915131992922,
"count": 66,
"self": 4.407915131992922
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6730009519960731e-06,
"count": 1,
"self": 1.6730009519960731e-06
},
"TrainerController._save_models": {
"total": 6.683562427000652,
"count": 1,
"self": 0.22492797700033407,
"children": {
"RLTrainer._checkpoint": {
"total": 6.458634450000318,
"count": 1,
"self": 6.458634450000318
}
}
}
}
}
}
}