poca-SoccerTwos / run_logs /timers.json
mxbonn's picture
5m steps
e9d0697
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.0764594078063965,
"min": 3.0412607192993164,
"max": 3.2957353591918945,
"count": 601
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 81513.8671875,
"min": 18346.26953125,
"max": 132065.5625,
"count": 601
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 617.5,
"min": 418.72727272727275,
"max": 999.0,
"count": 601
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19760.0,
"min": 15984.0,
"max": 23976.0,
"count": 601
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1170.9821872935167,
"min": 1170.3353594710438,
"max": 1208.5667328316927,
"count": 442
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 11709.821872935168,
"min": 2340.6707189420877,
"max": 19095.514784948613,
"count": 442
},
"SoccerTwos.Step.mean": {
"value": 6009111.0,
"min": 9588.0,
"max": 6009111.0,
"count": 601
},
"SoccerTwos.Step.sum": {
"value": 6009111.0,
"min": 9588.0,
"max": 6009111.0,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.014177175238728523,
"min": -0.07167401909828186,
"max": 0.02866666205227375,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.22683480381965637,
"min": -1.0751103162765503,
"max": 0.49374422430992126,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.015274898149073124,
"min": -0.07172051817178726,
"max": 0.03939242288470268,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.24439837038516998,
"min": -1.075807809829712,
"max": 0.6131031513214111,
"count": 601
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 601
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09020000137388706,
"min": -0.5333333333333333,
"max": 0.4209058889571358,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -1.443200021982193,
"min": -8.0,
"max": 7.155400112271309,
"count": 601
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09020000137388706,
"min": -0.5333333333333333,
"max": 0.4209058889571358,
"count": 601
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -1.443200021982193,
"min": -8.0,
"max": 7.155400112271309,
"count": 601
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 601
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 601
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015858291126884676,
"min": 0.009458757127382948,
"max": 0.023100173682905734,
"count": 280
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015858291126884676,
"min": 0.009458757127382948,
"max": 0.023100173682905734,
"count": 280
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0012052876234520226,
"min": 9.542278093022104e-07,
"max": 0.0068745495596279705,
"count": 280
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0012052876234520226,
"min": 9.542278093022104e-07,
"max": 0.0068745495596279705,
"count": 280
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0013348444209744534,
"min": 7.722158765470037e-07,
"max": 0.006865249574184417,
"count": 280
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0013348444209744534,
"min": 7.722158765470037e-07,
"max": 0.006865249574184417,
"count": 280
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 280
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 280
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 280
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 280
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 280
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 280
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680439984",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/opt/conda/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1680506990"
},
"total": 67005.98467451206,
"count": 1,
"self": 0.29482002300210297,
"children": {
"run_training.setup": {
"total": 0.028062219033017755,
"count": 1,
"self": 0.028062219033017755
},
"TrainerController.start_learning": {
"total": 67005.66179227002,
"count": 1,
"self": 11.09943679801654,
"children": {
"TrainerController._reset_env": {
"total": 16.256909901858307,
"count": 30,
"self": 16.256909901858307
},
"TrainerController.advance": {
"total": 66977.92828508397,
"count": 390880,
"self": 11.59298508882057,
"children": {
"env_step": {
"total": 64614.79285915708,
"count": 390880,
"self": 62496.70285693731,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2110.817979800515,
"count": 390880,
"self": 70.37861579773016,
"children": {
"TorchPolicy.evaluate": {
"total": 2040.4393640027847,
"count": 775946,
"self": 2040.4393640027847
}
}
},
"workers": {
"total": 7.2720224192598835,
"count": 390880,
"self": 0.0,
"children": {
"worker_root": {
"total": 66973.06515399704,
"count": 390880,
"is_parallel": true,
"self": 6015.309892007383,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004172573913820088,
"count": 2,
"is_parallel": true,
"self": 0.0008880466921254992,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0032845272216945887,
"count": 8,
"is_parallel": true,
"self": 0.0032845272216945887
}
}
},
"UnityEnvironment.step": {
"total": 0.15647262404672801,
"count": 1,
"is_parallel": true,
"self": 0.00046114902943372726,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0027907340554520488,
"count": 1,
"is_parallel": true,
"self": 0.0027907340554520488
},
"communicator.exchange": {
"total": 0.14855224499478936,
"count": 1,
"is_parallel": true,
"self": 0.14855224499478936
},
"steps_from_proto": {
"total": 0.004668495967052877,
"count": 2,
"is_parallel": true,
"self": 0.0008426269050687551,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003825869061984122,
"count": 8,
"is_parallel": true,
"self": 0.003825869061984122
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 60957.62362928677,
"count": 390879,
"is_parallel": true,
"self": 188.746058423887,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 991.8595139458776,
"count": 390879,
"is_parallel": true,
"self": 991.8595139458776
},
"communicator.exchange": {
"total": 58012.941004492226,
"count": 390879,
"is_parallel": true,
"self": 58012.941004492226
},
"steps_from_proto": {
"total": 1764.0770524247782,
"count": 781758,
"is_parallel": true,
"self": 295.1288285444025,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1468.9482238803757,
"count": 3127032,
"is_parallel": true,
"self": 1468.9482238803757
}
}
}
}
},
"steps_from_proto": {
"total": 0.13163270289078355,
"count": 58,
"is_parallel": true,
"self": 0.02210606832522899,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.10952663456555456,
"count": 232,
"is_parallel": true,
"self": 0.10952663456555456
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2351.542440838064,
"count": 390880,
"self": 111.82402387296315,
"children": {
"process_trajectory": {
"total": 728.1229058863828,
"count": 390880,
"self": 723.2153040413978,
"children": {
"RLTrainer._checkpoint": {
"total": 4.907601844985038,
"count": 12,
"self": 4.907601844985038
}
}
},
"_update_policy": {
"total": 1511.5955110787181,
"count": 281,
"self": 1096.1559466058388,
"children": {
"TorchPOCAOptimizer.update": {
"total": 415.4395644728793,
"count": 8418,
"self": 415.4395644728793
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4650868251919746e-06,
"count": 1,
"self": 1.4650868251919746e-06
},
"TrainerController._save_models": {
"total": 0.37715902109630406,
"count": 1,
"self": 0.015391648164950311,
"children": {
"RLTrainer._checkpoint": {
"total": 0.36176737293135375,
"count": 1,
"self": 0.36176737293135375
}
}
}
}
}
}
}