poca-SoccerTwos / run_logs /timers.json
PakanunNoa's picture
First Push`
d8afa6f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2184481620788574,
"min": 2.180907726287842,
"max": 3.1490228176116943,
"count": 253
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 46569.6640625,
"min": 20782.927734375,
"max": 103459.953125,
"count": 253
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 47.20192307692308,
"min": 45.074074074074076,
"max": 999.0,
"count": 253
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19636.0,
"min": 6152.0,
"max": 30616.0,
"count": 253
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1425.1396089577818,
"min": 1175.7707974576704,
"max": 1440.0699810201352,
"count": 248
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 296429.0386632186,
"min": 2351.5415949153407,
"max": 313370.8677291428,
"count": 248
},
"SoccerTwos.Step.mean": {
"value": 5049995.0,
"min": 2529169.0,
"max": 5049995.0,
"count": 253
},
"SoccerTwos.Step.sum": {
"value": 5049995.0,
"min": 2529169.0,
"max": 5049995.0,
"count": 253
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.035185787826776505,
"min": -0.06764743477106094,
"max": 0.2115151286125183,
"count": 253
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 7.318643569946289,
"min": -13.326544761657715,
"max": 25.434736251831055,
"count": 253
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.029680820181965828,
"min": -0.07265296578407288,
"max": 0.2047213315963745,
"count": 253
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.173610687255859,
"min": -14.312634468078613,
"max": 26.4185791015625,
"count": 253
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 253
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 253
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.01409807801246643,
"min": -0.5,
"max": 0.7250105268076846,
"count": 253
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.9324002265930176,
"min": -38.029199957847595,
"max": 96.42640006542206,
"count": 253
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.01409807801246643,
"min": -0.5,
"max": 0.7250105268076846,
"count": 253
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.9324002265930176,
"min": -38.029199957847595,
"max": 96.42640006542206,
"count": 253
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 253
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 253
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02039484987423445,
"min": 0.01162280278513208,
"max": 0.022203327886139355,
"count": 120
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02039484987423445,
"min": 0.01162280278513208,
"max": 0.022203327886139355,
"count": 120
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10660565147797267,
"min": 0.00011941230356266412,
"max": 0.11658073390523592,
"count": 120
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10660565147797267,
"min": 0.00011941230356266412,
"max": 0.11658073390523592,
"count": 120
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10927488754192988,
"min": 0.00011765284604431751,
"max": 0.11751306727528572,
"count": 120
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10927488754192988,
"min": 0.00011765284604431751,
"max": 0.11751306727528572,
"count": 120
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 120
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 120
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 120
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 120
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 120
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 120
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678887565",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\Pakanun\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1678896694"
},
"total": 9128.9087821,
"count": 1,
"self": 0.20200159999876632,
"children": {
"run_training.setup": {
"total": 0.11179719999999982,
"count": 1,
"self": 0.11179719999999982
},
"TrainerController.start_learning": {
"total": 9128.594983300001,
"count": 1,
"self": 4.3544153001294035,
"children": {
"TrainerController._reset_env": {
"total": 4.89162420000041,
"count": 14,
"self": 4.89162420000041
},
"TrainerController.advance": {
"total": 9119.197556199872,
"count": 171145,
"self": 4.48981920037113,
"children": {
"env_step": {
"total": 3028.9736806997385,
"count": 171145,
"self": 2340.1365173996774,
"children": {
"SubprocessEnvManager._take_step": {
"total": 685.9917779002435,
"count": 171145,
"self": 24.994575899840584,
"children": {
"TorchPolicy.evaluate": {
"total": 660.9972020004029,
"count": 319826,
"self": 660.9972020004029
}
}
},
"workers": {
"total": 2.8453853998177774,
"count": 171144,
"self": 0.0,
"children": {
"worker_root": {
"total": 9117.869028600171,
"count": 171144,
"is_parallel": true,
"self": 7258.454243600431,
"children": {
"steps_from_proto": {
"total": 0.02397350000262044,
"count": 28,
"is_parallel": true,
"self": 0.005540500008457627,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.018432999994162813,
"count": 112,
"is_parallel": true,
"self": 0.018432999994162813
}
}
},
"UnityEnvironment.step": {
"total": 1859.3908114997375,
"count": 171144,
"is_parallel": true,
"self": 96.9284139002807,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.05341519989017,
"count": 171144,
"is_parallel": true,
"self": 82.05341519989017
},
"communicator.exchange": {
"total": 1369.1945277999705,
"count": 171144,
"is_parallel": true,
"self": 1369.1945277999705
},
"steps_from_proto": {
"total": 311.214454599596,
"count": 342288,
"is_parallel": true,
"self": 65.7823995000432,
"children": {
"_process_rank_one_or_two_observation": {
"total": 245.43205509955283,
"count": 1369152,
"is_parallel": true,
"self": 245.43205509955283
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 6085.734056299762,
"count": 171144,
"self": 31.451612799762188,
"children": {
"process_trajectory": {
"total": 811.3208516999999,
"count": 171144,
"self": 810.5837393000003,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7371123999996598,
"count": 5,
"self": 0.7371123999996598
}
}
},
"_update_policy": {
"total": 5242.9615918,
"count": 120,
"self": 396.02218710003217,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4846.939404699968,
"count": 3612,
"self": 4846.939404699968
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5000005078036338e-06,
"count": 1,
"self": 1.5000005078036338e-06
},
"TrainerController._save_models": {
"total": 0.15138610000030894,
"count": 1,
"self": 0.010110200000781333,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1412758999995276,
"count": 1,
"self": 0.1412758999995276
}
}
}
}
}
}
}