sun-s's picture
First Push
c81b505 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4621256589889526,
"min": 1.280769944190979,
"max": 3.2957427501678467,
"count": 4999
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 25967.3515625,
"min": 18617.486328125,
"max": 155810.875,
"count": 4999
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 73.58461538461539,
"min": 38.80952380952381,
"max": 999.0,
"count": 4999
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19132.0,
"min": 11988.0,
"max": 31968.0,
"count": 4999
},
"SoccerTwos.Step.mean": {
"value": 49999950.0,
"min": 9000.0,
"max": 49999950.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999950.0,
"min": 9000.0,
"max": 49999950.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008677976205945015,
"min": -0.1454649567604065,
"max": 0.20084896683692932,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.1281368732452393,
"min": -29.965782165527344,
"max": 29.345890045166016,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.010827634483575821,
"min": -0.14905725419521332,
"max": 0.20505879819393158,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.407592535018921,
"min": -30.705793380737305,
"max": 29.85542869567871,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1060953873854417,
"min": -0.8461538461538461,
"max": 0.5513454499569806,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -13.792400360107422,
"min": -70.87159997224808,
"max": 58.947200298309326,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1060953873854417,
"min": -0.8461538461538461,
"max": 0.5513454499569806,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -13.792400360107422,
"min": -70.87159997224808,
"max": 58.947200298309326,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1698.3334663362982,
"min": 1180.150765225715,
"max": 1785.6079146689585,
"count": 4805
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 220783.35062371875,
"min": 2360.30153045143,
"max": 435539.1608671374,
"count": 4805
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02196764590917155,
"min": 0.009764704160382583,
"max": 0.025433018419425933,
"count": 2413
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02196764590917155,
"min": 0.009764704160382583,
"max": 0.025433018419425933,
"count": 2413
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09797533204158147,
"min": 3.4607449883594656e-08,
"max": 0.12879207953810692,
"count": 2413
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09797533204158147,
"min": 3.4607449883594656e-08,
"max": 0.12879207953810692,
"count": 2413
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09918076793352763,
"min": 7.535348440039039e-08,
"max": 0.12992177282770476,
"count": 2413
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09918076793352763,
"min": 7.535348440039039e-08,
"max": 0.12992177282770476,
"count": 2413
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2413
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2413
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2413
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2413
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2413
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2413
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731240825",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0]",
"command_line_arguments": "/media/kemove/17a64fef-179c-4620-a965-511df0584b38/home/yunpeng/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env ./training-envs-executables/linux/SoccerTwos/SoccerTwos --run-id SoccerTwos --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1731285865"
},
"total": 45039.967324682,
"count": 1,
"self": 0.16651835999800824,
"children": {
"run_training.setup": {
"total": 0.008755893999477848,
"count": 1,
"self": 0.008755893999477848
},
"TrainerController.start_learning": {
"total": 45039.792050428005,
"count": 1,
"self": 56.04159170886851,
"children": {
"TrainerController._reset_env": {
"total": 7.723141937996843,
"count": 250,
"self": 7.723141937996843
},
"TrainerController.advance": {
"total": 44975.63950396014,
"count": 3431775,
"self": 57.57684400135622,
"children": {
"env_step": {
"total": 34017.37396647387,
"count": 3431775,
"self": 24313.65534247293,
"children": {
"SubprocessEnvManager._take_step": {
"total": 9669.041964306514,
"count": 3431775,
"self": 285.4312219972089,
"children": {
"TorchPolicy.evaluate": {
"total": 9383.610742309305,
"count": 6299362,
"self": 9383.610742309305
}
}
},
"workers": {
"total": 34.67665969442169,
"count": 3431775,
"self": 0.0,
"children": {
"worker_root": {
"total": 44959.616609847806,
"count": 3431775,
"is_parallel": true,
"self": 25686.76858050111,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.029977671001688577,
"count": 2,
"is_parallel": true,
"self": 0.028926475999469403,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001051195002219174,
"count": 8,
"is_parallel": true,
"self": 0.001051195002219174
}
}
},
"UnityEnvironment.step": {
"total": 0.012011115002678707,
"count": 1,
"is_parallel": true,
"self": 0.00027799800227512605,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021334000120987184,
"count": 1,
"is_parallel": true,
"self": 0.00021334000120987184
},
"communicator.exchange": {
"total": 0.010756171999673825,
"count": 1,
"is_parallel": true,
"self": 0.010756171999673825
},
"steps_from_proto": {
"total": 0.0007636049995198846,
"count": 2,
"is_parallel": true,
"self": 0.00015008800619398244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006135169933259021,
"count": 8,
"is_parallel": true,
"self": 0.0006135169933259021
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 19272.63218522768,
"count": 3431774,
"is_parallel": true,
"self": 997.7300711314856,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 612.1265179692346,
"count": 3431774,
"is_parallel": true,
"self": 612.1265179692346
},
"communicator.exchange": {
"total": 14875.540548649002,
"count": 3431774,
"is_parallel": true,
"self": 14875.540548649002
},
"steps_from_proto": {
"total": 2787.2350474779596,
"count": 6863548,
"is_parallel": true,
"self": 545.6683956978122,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2241.5666517801474,
"count": 27454192,
"is_parallel": true,
"self": 2241.5666517801474
}
}
}
}
},
"steps_from_proto": {
"total": 0.21584411901494605,
"count": 498,
"is_parallel": true,
"self": 0.044062083903554594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.17178203511139145,
"count": 1992,
"is_parallel": true,
"self": 0.17178203511139145
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 10900.68869348492,
"count": 3431775,
"self": 425.6903976532012,
"children": {
"process_trajectory": {
"total": 4838.4257322837075,
"count": 3431775,
"self": 4801.599955648708,
"children": {
"RLTrainer._checkpoint": {
"total": 36.825776634999784,
"count": 100,
"self": 36.825776634999784
}
}
},
"_update_policy": {
"total": 5636.572563548012,
"count": 2413,
"self": 3284.43296428525,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2352.139599262762,
"count": 72402,
"self": 2352.139599262762
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.309986252337694e-07,
"count": 1,
"self": 4.309986252337694e-07
},
"TrainerController._save_models": {
"total": 0.38781238999945344,
"count": 1,
"self": 0.0008202030003303662,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3869921869991231,
"count": 1,
"self": 0.3869921869991231
}
}
}
}
}
}
}