poca-SoccerTwos / run_logs /timers.json
partzel's picture
init: first commit
dc98448 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9334923028945923,
"min": 1.8815973997116089,
"max": 3.2957425117492676,
"count": 503
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 38731.71875,
"min": 22497.32421875,
"max": 112247.515625,
"count": 503
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.01162790697674,
"min": 42.51304347826087,
"max": 999.0,
"count": 503
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19268.0,
"min": 15984.0,
"max": 26280.0,
"count": 503
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1552.1420684787659,
"min": 1192.1221958572028,
"max": 1552.1420684787659,
"count": 462
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 266968.4357783477,
"min": 2384.2443917144055,
"max": 335867.8023813828,
"count": 462
},
"SoccerTwos.Step.mean": {
"value": 5029956.0,
"min": 9156.0,
"max": 5029956.0,
"count": 503
},
"SoccerTwos.Step.sum": {
"value": 5029956.0,
"min": 9156.0,
"max": 5029956.0,
"count": 503
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.04380891099572182,
"min": -0.08590860664844513,
"max": 0.2105739563703537,
"count": 503
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 7.535132884979248,
"min": -16.236726760864258,
"max": 37.21223449707031,
"count": 503
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.04433209449052811,
"min": -0.08740323036909103,
"max": 0.2108030915260315,
"count": 503
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 7.625120162963867,
"min": -16.519210815429688,
"max": 37.788970947265625,
"count": 503
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 503
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 503
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.03750232416529988,
"min": -0.644200000349277,
"max": 0.565936229367187,
"count": 503
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 6.45039975643158,
"min": -46.87839984893799,
"max": 68.64999973773956,
"count": 503
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.03750232416529988,
"min": -0.644200000349277,
"max": 0.565936229367187,
"count": 503
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 6.45039975643158,
"min": -46.87839984893799,
"max": 68.64999973773956,
"count": 503
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 503
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 503
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015188725812186022,
"min": 0.011012321790137019,
"max": 0.023777029931079597,
"count": 240
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015188725812186022,
"min": 0.011012321790137019,
"max": 0.023777029931079597,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11566716382900874,
"min": 2.190943362923766e-05,
"max": 0.1158981295923392,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11566716382900874,
"min": 2.190943362923766e-05,
"max": 0.1158981295923392,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11794047231475512,
"min": 2.288762464862278e-05,
"max": 0.11796871398886045,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11794047231475512,
"min": 2.288762464862278e-05,
"max": 0.11796871398886045,
"count": 240
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 240
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 240
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756880791",
"python_version": "3.10.11 (tags/v3.10.11:7d4cc5a, Apr 5 2023, 00:38:17) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "D:\\ml-agents\\venv\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=.\\training-envs-executables\\SoccerTwos\\SoccerTwos.exe --run-id=poca-SoccerTwos-Emperical --no-graphics --seed=42 --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu129",
"numpy_version": "1.23.5",
"end_time_seconds": "1756890924"
},
"total": 10133.404578000016,
"count": 1,
"self": 0.6511795000114944,
"children": {
"run_training.setup": {
"total": 0.07644730000174604,
"count": 1,
"self": 0.07644730000174604
},
"TrainerController.start_learning": {
"total": 10132.676951200003,
"count": 1,
"self": 6.867638498311862,
"children": {
"TrainerController._reset_env": {
"total": 5.424393700028304,
"count": 26,
"self": 5.424393700028304
},
"TrainerController.advance": {
"total": 10120.181617001654,
"count": 338860,
"self": 6.885640405351296,
"children": {
"env_step": {
"total": 7600.638616508688,
"count": 338860,
"self": 4060.9887591350125,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3535.391641784634,
"count": 338860,
"self": 43.85003118251916,
"children": {
"TorchPolicy.evaluate": {
"total": 3491.541610602115,
"count": 638363,
"self": 3491.541610602115
}
}
},
"workers": {
"total": 4.258215589041356,
"count": 338859,
"self": 0.0,
"children": {
"worker_root": {
"total": 10118.943959394877,
"count": 338859,
"is_parallel": true,
"self": 6842.734818988276,
"children": {
"steps_from_proto": {
"total": 0.036670400004368275,
"count": 52,
"is_parallel": true,
"self": 0.0076679998892359436,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.029002400115132332,
"count": 208,
"is_parallel": true,
"self": 0.029002400115132332
}
}
},
"UnityEnvironment.step": {
"total": 3276.172470006597,
"count": 338859,
"is_parallel": true,
"self": 158.17323432533885,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 118.66115308651933,
"count": 338859,
"is_parallel": true,
"self": 118.66115308651933
},
"communicator.exchange": {
"total": 2483.782628597197,
"count": 338859,
"is_parallel": true,
"self": 2483.782628597197
},
"steps_from_proto": {
"total": 515.5554539975419,
"count": 677718,
"is_parallel": true,
"self": 105.16972751313006,
"children": {
"_process_rank_one_or_two_observation": {
"total": 410.38572648441186,
"count": 2710872,
"is_parallel": true,
"self": 410.38572648441186
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2512.6573600876145,
"count": 338859,
"self": 47.446357485547196,
"children": {
"process_trajectory": {
"total": 1261.4484805018874,
"count": 338859,
"self": 1259.4507741019188,
"children": {
"RLTrainer._checkpoint": {
"total": 1.9977063999685925,
"count": 10,
"self": 1.9977063999685925
}
}
},
"_update_policy": {
"total": 1203.76252210018,
"count": 240,
"self": 608.5070467006299,
"children": {
"TorchPOCAOptimizer.update": {
"total": 595.25547539955,
"count": 7203,
"self": 595.25547539955
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.00000761449337e-06,
"count": 1,
"self": 1.00000761449337e-06
},
"TrainerController._save_models": {
"total": 0.20330100000137463,
"count": 1,
"self": 0.009993300016503781,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19330769998487085,
"count": 1,
"self": 0.19330769998487085
}
}
}
}
}
}
}