downstrike-320m / run_logs /timers.json
mgmeskill's picture
No defaults - 320M
9bba805
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 0.22209034860134125,
"min": 0.20746326446533203,
"max": 3.295814037322998,
"count": 32000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 4917.96875,
"min": 3382.24462890625,
"max": 537455.0,
"count": 32000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.8375,
"min": 30.496815286624205,
"max": 999.0,
"count": 31999
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18188.0,
"min": 12154.0,
"max": 31968.0,
"count": 31999
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1295.1033099340552,
"min": 1198.9833305085954,
"max": 1526.593576735275,
"count": 31998
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 209806.73620931694,
"min": 4799.166576164476,
"max": 459823.9773137702,
"count": 31998
},
"SoccerTwos.Step.mean": {
"value": 319999982.0,
"min": 9814.0,
"max": 319999982.0,
"count": 32000
},
"SoccerTwos.Step.sum": {
"value": 319999982.0,
"min": 9814.0,
"max": 319999982.0,
"count": 32000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.04039477929472923,
"min": -0.14004386961460114,
"max": 0.19318516552448273,
"count": 32000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.02869176864624,
"min": -33.47048568725586,
"max": 44.917381286621094,
"count": 32000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.040669914335012436,
"min": -0.14202876389026642,
"max": 0.19254568219184875,
"count": 32000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.076564788818359,
"min": -33.94487380981445,
"max": 43.32111358642578,
"count": 32000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 32000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 32000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.13182988797111073,
"min": -0.4755161693710053,
"max": 0.45742059236063676,
"count": 32000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -22.938400506973267,
"min": -84.72239971160889,
"max": 80.98259991407394,
"count": 32000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.13182988797111073,
"min": -0.4755161693710053,
"max": 0.45742059236063676,
"count": 32000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -22.938400506973267,
"min": -84.72239971160889,
"max": 80.98259991407394,
"count": 32000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 32000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 32000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01372652525678859,
"min": 0.009824401157311513,
"max": 0.02974080921073134,
"count": 15523
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01372652525678859,
"min": 0.009824401157311513,
"max": 0.02974080921073134,
"count": 15523
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0831478034456571,
"min": 0.003702893130815564,
"max": 0.09432442312439283,
"count": 15523
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0831478034456571,
"min": 0.003702893130815564,
"max": 0.09432442312439283,
"count": 15523
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08323371013005575,
"min": 0.003703783314006451,
"max": 0.09451528812448183,
"count": 15523
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08323371013005575,
"min": 0.003703783314006451,
"max": 0.09451528812448183,
"count": 15523
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 9.654059315318175e-09,
"min": 9.654059315318175e-09,
"max": 0.00029997494625835113,
"count": 15523
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 9.654059315318175e-09,
"min": 9.654059315318175e-09,
"max": 0.00029997494625835113,
"count": 15523
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 15523
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 15523
},
"SoccerTwos.Policy.Beta.mean": {
"value": 1.0318150281250186e-05,
"min": 1.0318150281250186e-05,
"max": 0.009999165710124998,
"count": 15523
},
"SoccerTwos.Policy.Beta.sum": {
"value": 1.0318150281250186e-05,
"min": 1.0318150281250186e-05,
"max": 0.009999165710124998,
"count": 15523
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693946942",
"python_version": "3.9.17 (main, Jul 5 2023, 20:41:20) \n[GCC 11.2.0]",
"command_line_arguments": "/home/mgmeskill/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=Downstrike-320M --no-graphics --num-envs=8",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694209087"
},
"total": 262144.4315176448,
"count": 1,
"self": 0.27110950043424964,
"children": {
"run_training.setup": {
"total": 0.02748629217967391,
"count": 1,
"self": 0.02748629217967391
},
"TrainerController.start_learning": {
"total": 262144.1329218522,
"count": 1,
"self": 195.11936002317816,
"children": {
"TrainerController._reset_env": {
"total": 1356.6803504824638,
"count": 15631,
"self": 1356.6803504824638
},
"TrainerController.advance": {
"total": 260592.02951219212,
"count": 5308235,
"self": 61.60288550425321,
"children": {
"env_step": {
"total": 260530.42662668787,
"count": 5308235,
"self": 85563.79976889957,
"children": {
"SubprocessEnvManager._take_step": {
"total": 174811.16231037444,
"count": 29456845,
"self": 2541.197320862673,
"children": {
"TorchPolicy.evaluate": {
"total": 172269.96498951176,
"count": 53900846,
"self": 172269.96498951176
}
}
},
"workers": {
"total": 155.46454741386697,
"count": 5308235,
"self": 0.0,
"children": {
"worker_root": {
"total": 2093413.5211095153,
"count": 29425763,
"is_parallel": true,
"self": 1814096.7023123018,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.019203658681362867,
"count": 16,
"is_parallel": true,
"self": 0.004011679440736771,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.015191979240626097,
"count": 64,
"is_parallel": true,
"self": 0.015191979240626097
}
}
},
"UnityEnvironment.step": {
"total": 0.16575361602008343,
"count": 8,
"is_parallel": true,
"self": 0.005824713036417961,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.004249729681760073,
"count": 8,
"is_parallel": true,
"self": 0.004249729681760073
},
"communicator.exchange": {
"total": 0.13958576088771224,
"count": 8,
"is_parallel": true,
"self": 0.13958576088771224
},
"steps_from_proto": {
"total": 0.016093412414193153,
"count": 16,
"is_parallel": true,
"self": 0.0028776959516108036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01321571646258235,
"count": 64,
"is_parallel": true,
"self": 0.01321571646258235
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 279010.91643110896,
"count": 29425755,
"is_parallel": true,
"self": 17052.918171677273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11083.002196655609,
"count": 29425755,
"is_parallel": true,
"self": 11083.002196655609
},
"communicator.exchange": {
"total": 203583.36522475723,
"count": 29425755,
"is_parallel": true,
"self": 203583.36522475723
},
"steps_from_proto": {
"total": 47291.63083801884,
"count": 58851510,
"is_parallel": true,
"self": 7972.866389919538,
"children": {
"_process_rank_one_or_two_observation": {
"total": 39318.764448099304,
"count": 235406040,
"is_parallel": true,
"self": 39318.764448099304
}
}
}
}
},
"steps_from_proto": {
"total": 305.90236610453576,
"count": 250080,
"is_parallel": true,
"self": 49.52144989045337,
"children": {
"_process_rank_one_or_two_observation": {
"total": 256.3809162140824,
"count": 1000320,
"is_parallel": true,
"self": 256.3809162140824
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.0568335205316544e-05,
"count": 1,
"self": 3.0568335205316544e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 260611.0573132881,
"count": 352518542,
"is_parallel": true,
"self": 9217.686727115884,
"children": {
"process_trajectory": {
"total": 177371.24861753872,
"count": 352518542,
"is_parallel": true,
"self": 177307.83409809368,
"children": {
"RLTrainer._checkpoint": {
"total": 63.41451944503933,
"count": 160,
"is_parallel": true,
"self": 63.41451944503933
}
}
},
"_update_policy": {
"total": 74022.1219686335,
"count": 15523,
"is_parallel": true,
"self": 38076.0070996345,
"children": {
"TorchPOCAOptimizer.update": {
"total": 35946.11486899899,
"count": 465699,
"is_parallel": true,
"self": 35946.11486899899
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.30366858607158065,
"count": 1,
"self": 0.001912000123411417,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30175658594816923,
"count": 1,
"self": 0.30175658594816923
}
}
}
}
}
}
}