poca-SoccerTwos-2 / run_logs /timers.json
debisoft's picture
7th Push
06bf8bb verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.0193281173706055,
"min": 1.9643131494522095,
"max": 2.0792429447174072,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36832.546875,
"min": 33020.5546875,
"max": 50278.8203125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 87.18181818181819,
"min": 56.833333333333336,
"max": 113.6590909090909,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19180.0,
"min": 18308.0,
"max": 21300.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1410.7601003344073,
"min": 1379.8705983144878,
"max": 1465.220959065712,
"count": 500
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 155183.6110367848,
"min": 123247.55267462911,
"max": 248221.81810924233,
"count": 500
},
"SoccerTwos.Step.mean": {
"value": 14999767.0,
"min": 10009854.0,
"max": 14999767.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 14999767.0,
"min": 10009854.0,
"max": 14999767.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.013330723159015179,
"min": -0.15980811417102814,
"max": 0.0799807533621788,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.4797102212905884,
"min": -19.49658966064453,
"max": 9.91761302947998,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.018607420846819878,
"min": -0.159414604306221,
"max": 0.08092209696769714,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.0654237270355225,
"min": -19.77153205871582,
"max": 10.034339904785156,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.006893694937766135,
"min": -0.4453192295936438,
"max": 0.37280999720096586,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.765200138092041,
"min": -63.493199944496155,
"max": 44.737199664115906,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.006893694937766135,
"min": -0.4453192295936438,
"max": 0.37280999720096586,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.765200138092041,
"min": -63.493199944496155,
"max": 44.737199664115906,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0260514505886628,
"min": 0.017916234560713444,
"max": 0.02759317404900988,
"count": 121
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0260514505886628,
"min": 0.017916234560713444,
"max": 0.02759317404900988,
"count": 121
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08049535776178042,
"min": 0.07586482713619867,
"max": 0.09767879719535509,
"count": 121
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08049535776178042,
"min": 0.07586482713619867,
"max": 0.09767879719535509,
"count": 121
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08245965912938118,
"min": 0.07864355693260829,
"max": 0.09992946684360504,
"count": 121
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08245965912938118,
"min": 0.07864355693260829,
"max": 0.09992946684360504,
"count": 121
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 121
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 121
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 121
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 121
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 121
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 121
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1761039533",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/ada/work/ai/venv3.10/bin/mlagents-learn ./config/poca/SoccerTwosGo.yaml --resume --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.9.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1761048180"
},
"total": 8646.804316897003,
"count": 1,
"self": 0.5711837930139154,
"children": {
"run_training.setup": {
"total": 0.04321815399453044,
"count": 1,
"self": 0.04321815399453044
},
"TrainerController.start_learning": {
"total": 8646.189914949995,
"count": 1,
"self": 7.507484404166462,
"children": {
"TrainerController._reset_env": {
"total": 2.494118211994646,
"count": 26,
"self": 2.494118211994646
},
"TrainerController.advance": {
"total": 8630.252385538857,
"count": 339454,
"self": 6.080225770798279,
"children": {
"env_step": {
"total": 6664.653628120519,
"count": 339454,
"self": 4947.748722012417,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1711.8997630827362,
"count": 339454,
"self": 42.065065260307165,
"children": {
"TorchPolicy.evaluate": {
"total": 1669.834697822429,
"count": 628038,
"self": 1669.834697822429
}
}
},
"workers": {
"total": 5.005143025366124,
"count": 339454,
"self": 0.0,
"children": {
"worker_root": {
"total": 8624.998652902985,
"count": 339454,
"is_parallel": true,
"self": 4486.705685442663,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0034261269902344793,
"count": 2,
"is_parallel": true,
"self": 0.0007970519363880157,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026290750538464636,
"count": 8,
"is_parallel": true,
"self": 0.0026290750538464636
}
}
},
"UnityEnvironment.step": {
"total": 0.023506944009568542,
"count": 1,
"is_parallel": true,
"self": 0.0011191890225745738,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003908869985025376,
"count": 1,
"is_parallel": true,
"self": 0.0003908869985025376
},
"communicator.exchange": {
"total": 0.01935509900795296,
"count": 1,
"is_parallel": true,
"self": 0.01935509900795296
},
"steps_from_proto": {
"total": 0.0026417689805384725,
"count": 2,
"is_parallel": true,
"self": 0.0004645489971153438,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021772199834231287,
"count": 8,
"is_parallel": true,
"self": 0.0021772199834231287
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.05029912613099441,
"count": 50,
"is_parallel": true,
"self": 0.00849195200135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04180717412964441,
"count": 200,
"is_parallel": true,
"self": 0.04180717412964441
}
}
},
"UnityEnvironment.step": {
"total": 4138.242668334191,
"count": 339453,
"is_parallel": true,
"self": 235.49196220262093,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 162.76754256189452,
"count": 339453,
"is_parallel": true,
"self": 162.76754256189452
},
"communicator.exchange": {
"total": 3117.3282692046196,
"count": 339453,
"is_parallel": true,
"self": 3117.3282692046196
},
"steps_from_proto": {
"total": 622.654894365056,
"count": 678906,
"is_parallel": true,
"self": 104.93273744470207,
"children": {
"_process_rank_one_or_two_observation": {
"total": 517.7221569203539,
"count": 2715624,
"is_parallel": true,
"self": 517.7221569203539
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1959.51853164754,
"count": 339454,
"self": 60.41723086338607,
"children": {
"process_trajectory": {
"total": 824.4058585642197,
"count": 339454,
"self": 786.0357252132089,
"children": {
"RLTrainer._checkpoint": {
"total": 38.370133351010736,
"count": 10,
"self": 38.370133351010736
}
}
},
"_update_policy": {
"total": 1074.6954422199342,
"count": 121,
"self": 539.9536242478644,
"children": {
"TorchPOCAOptimizer.update": {
"total": 534.7418179720698,
"count": 3630,
"self": 534.7418179720698
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.809997674077749e-07,
"count": 1,
"self": 5.809997674077749e-07
},
"TrainerController._save_models": {
"total": 5.935926213976927,
"count": 1,
"self": 0.17000244598602876,
"children": {
"RLTrainer._checkpoint": {
"total": 5.765923767990898,
"count": 1,
"self": 5.765923767990898
}
}
}
}
}
}
}