poca-SoccerTwos / run_logs /timers.json
SSGoatt's picture
Upload 20 files
6b04346 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6781185865402222,
"min": 1.6529011726379395,
"max": 1.713287115097046,
"count": 5
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35280.765625,
"min": 4429.6650390625,
"max": 36458.75,
"count": 5
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 66.51315789473684,
"min": 38.57142857142857,
"max": 71.1,
"count": 5
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20220.0,
"min": 1080.0,
"max": 20220.0,
"count": 5
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1485.9358217837985,
"min": 1482.809120536105,
"max": 1489.7606724540767,
"count": 5
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 225862.24491113736,
"min": 20856.649414357074,
"max": 260974.40521435445,
"count": 5
},
"SoccerTwos.Step.mean": {
"value": 10579965.0,
"min": 10539985.0,
"max": 10579965.0,
"count": 5
},
"SoccerTwos.Step.sum": {
"value": 10579965.0,
"min": 10539985.0,
"max": 10579965.0,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.05356394499540329,
"min": -0.08563671261072159,
"max": -0.02636147290468216,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -8.141719818115234,
"min": -11.90350341796875,
"max": -1.0835962295532227,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.05095755308866501,
"min": -0.08749444037675858,
"max": -0.02714642882347107,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.745548248291016,
"min": -12.161726951599121,
"max": -1.0566155910491943,
"count": 5
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.12187368383533076,
"min": -0.2654517966208698,
"max": 0.06860555625624126,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -18.524799942970276,
"min": -36.8977997303009,
"max": 9.879200100898743,
"count": 5
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.12187368383533076,
"min": -0.2654517966208698,
"max": 0.06860555625624126,
"count": 5
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -18.524799942970276,
"min": -36.8977997303009,
"max": 9.879200100898743,
"count": 5
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019001084902417156,
"min": 0.019001084902417156,
"max": 0.019001084902417156,
"count": 1
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019001084902417156,
"min": 0.019001084902417156,
"max": 0.019001084902417156,
"count": 1
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09864224642515182,
"min": 0.09864224642515182,
"max": 0.09864224642515182,
"count": 1
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09864224642515182,
"min": 0.09864224642515182,
"max": 0.09864224642515182,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09997338106234868,
"min": 0.09997338106234868,
"max": 0.09997338106234868,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09997338106234868,
"min": 0.09997338106234868,
"max": 0.09997338106234868,
"count": 1
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 1
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 1
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 1
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1727923256",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Jason\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1727923307"
},
"total": 50.69039320000002,
"count": 1,
"self": 0.05998539999973218,
"children": {
"run_training.setup": {
"total": 0.06067530000018451,
"count": 1,
"self": 0.06067530000018451
},
"TrainerController.start_learning": {
"total": 50.5697325000001,
"count": 1,
"self": 0.04038660000287564,
"children": {
"TrainerController._reset_env": {
"total": 4.59870839999985,
"count": 2,
"self": 4.59870839999985
},
"TrainerController.advance": {
"total": 45.81762479999725,
"count": 3113,
"self": 0.03845090001414064,
"children": {
"env_step": {
"total": 34.328943299995444,
"count": 3113,
"self": 19.782017400002587,
"children": {
"SubprocessEnvManager._take_step": {
"total": 14.52314989999968,
"count": 3113,
"self": 0.29699180000625347,
"children": {
"TorchPolicy.evaluate": {
"total": 14.226158099993427,
"count": 5677,
"self": 14.226158099993427
}
}
},
"workers": {
"total": 0.02377599999317681,
"count": 3112,
"self": 0.0,
"children": {
"worker_root": {
"total": 46.39741940000613,
"count": 3112,
"is_parallel": true,
"self": 30.55800940002473,
"children": {
"steps_from_proto": {
"total": 0.0017437000001336855,
"count": 4,
"is_parallel": true,
"self": 0.00034060000007229974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014031000000613858,
"count": 16,
"is_parallel": true,
"self": 0.0014031000000613858
}
}
},
"UnityEnvironment.step": {
"total": 15.837666299981265,
"count": 3112,
"is_parallel": true,
"self": 0.8159926999514937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.5843700999921566,
"count": 3112,
"is_parallel": true,
"self": 0.5843700999921566
},
"communicator.exchange": {
"total": 11.985065100010615,
"count": 3112,
"is_parallel": true,
"self": 11.985065100010615
},
"steps_from_proto": {
"total": 2.4522384000269994,
"count": 6224,
"is_parallel": true,
"self": 0.4723676000198793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1.9798708000071201,
"count": 24896,
"is_parallel": true,
"self": 1.9798708000071201
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 11.450230599987663,
"count": 3112,
"self": 0.25559949998614684,
"children": {
"process_trajectory": {
"total": 6.208612800001447,
"count": 3112,
"self": 6.208612800001447
},
"_update_policy": {
"total": 4.986018300000069,
"count": 2,
"self": 3.089191299999129,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1.8968270000009397,
"count": 60,
"self": 1.8968270000009397
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.000000212225132e-07,
"count": 1,
"self": 6.000000212225132e-07
},
"TrainerController._save_models": {
"total": 0.11301210000010542,
"count": 1,
"self": 0.004226500000186206,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10878559999991921,
"count": 1,
"self": 0.10878559999991921
}
}
}
}
}
}
}