poca-SoccerTwos / run_logs /timers.json
Convolution's picture
First Push`
25dcfe4
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6004561185836792,
"min": 1.5689618587493896,
"max": 3.295738697052002,
"count": 1024
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30831.1875,
"min": 28175.873046875,
"max": 130293.6328125,
"count": 1024
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 68.30555555555556,
"min": 41.49152542372882,
"max": 999.0,
"count": 1024
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19672.0,
"min": 12472.0,
"max": 26544.0,
"count": 1024
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1652.450874411084,
"min": 1198.639780578417,
"max": 1658.6088839985746,
"count": 1006
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 237952.9259151961,
"min": 2397.279561156834,
"max": 375913.0378456756,
"count": 1006
},
"SoccerTwos.Step.mean": {
"value": 10239970.0,
"min": 9208.0,
"max": 10239970.0,
"count": 1024
},
"SoccerTwos.Step.sum": {
"value": 10239970.0,
"min": 9208.0,
"max": 10239970.0,
"count": 1024
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.016233595088124275,
"min": -0.11867652088403702,
"max": 0.14858490228652954,
"count": 1024
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.3376376628875732,
"min": -25.562240600585938,
"max": 24.890968322753906,
"count": 1024
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.01581796631217003,
"min": -0.11802703142166138,
"max": 0.14724300801753998,
"count": 1024
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.277787208557129,
"min": -25.596546173095703,
"max": 24.887928009033203,
"count": 1024
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1024
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1024
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.032191665636168584,
"min": -0.5714285714285714,
"max": 0.5155400015413761,
"count": 1024
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.635599851608276,
"min": -58.455799877643585,
"max": 55.565599858760834,
"count": 1024
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.032191665636168584,
"min": -0.5714285714285714,
"max": 0.5155400015413761,
"count": 1024
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.635599851608276,
"min": -58.455799877643585,
"max": 55.565599858760834,
"count": 1024
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1024
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1024
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019603986079649378,
"min": 0.009950978774577379,
"max": 0.025017476819145183,
"count": 494
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019603986079649378,
"min": 0.009950978774577379,
"max": 0.025017476819145183,
"count": 494
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08428130572040876,
"min": 8.73256238567895e-06,
"max": 0.12897534494598706,
"count": 494
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08428130572040876,
"min": 8.73256238567895e-06,
"max": 0.12897534494598706,
"count": 494
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08536892284949621,
"min": 8.60937705814043e-06,
"max": 0.13150523404280345,
"count": 494
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08536892284949621,
"min": 8.60937705814043e-06,
"max": 0.13150523404280345,
"count": 494
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 494
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 494
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 494
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 494
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 494
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 494
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677796003",
"python_version": "3.9.16 (main, Mar 1 2023, 18:30:21) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\Dtoma\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1677842865"
},
"total": 46862.5396648,
"count": 1,
"self": 2.5128371999962837,
"children": {
"run_training.setup": {
"total": 0.15924870000000002,
"count": 1,
"self": 0.15924870000000002
},
"TrainerController.start_learning": {
"total": 46859.867578900004,
"count": 1,
"self": 20.798509800530155,
"children": {
"TrainerController._reset_env": {
"total": 7.358011199997971,
"count": 52,
"self": 7.358011199997971
},
"TrainerController.advance": {
"total": 46831.613211199474,
"count": 704830,
"self": 22.408079000124417,
"children": {
"env_step": {
"total": 17365.559516898127,
"count": 704830,
"self": 13911.356851894923,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3440.487492101329,
"count": 704830,
"self": 128.03230620016757,
"children": {
"TorchPolicy.evaluate": {
"total": 3312.4551859011613,
"count": 1289232,
"self": 3312.4551859011613
}
}
},
"workers": {
"total": 13.715172901875253,
"count": 704830,
"self": 0.0,
"children": {
"worker_root": {
"total": 46813.56796740158,
"count": 704830,
"is_parallel": true,
"self": 35404.89834589947,
"children": {
"steps_from_proto": {
"total": 0.12525900000167578,
"count": 104,
"is_parallel": true,
"self": 0.028831699977910574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0964273000237652,
"count": 416,
"is_parallel": true,
"self": 0.0964273000237652
}
}
},
"UnityEnvironment.step": {
"total": 11408.544362502105,
"count": 704830,
"is_parallel": true,
"self": 627.7944330064365,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 597.4331059980443,
"count": 704830,
"is_parallel": true,
"self": 597.4331059980443
},
"communicator.exchange": {
"total": 8147.493739900348,
"count": 704830,
"is_parallel": true,
"self": 8147.493739900348
},
"steps_from_proto": {
"total": 2035.8230835972754,
"count": 1409660,
"is_parallel": true,
"self": 407.1948481040845,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1628.628235493191,
"count": 5638640,
"is_parallel": true,
"self": 1628.628235493191
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 29443.645615301222,
"count": 704830,
"self": 140.8990603024613,
"children": {
"process_trajectory": {
"total": 4343.400427598707,
"count": 704830,
"self": 4339.3005863987055,
"children": {
"RLTrainer._checkpoint": {
"total": 4.0998412000021744,
"count": 20,
"self": 4.0998412000021744
}
}
},
"_update_policy": {
"total": 24959.346127400055,
"count": 495,
"self": 1915.2646575000763,
"children": {
"TorchPOCAOptimizer.update": {
"total": 23044.08146989998,
"count": 14830,
"self": 23044.08146989998
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.0978467000022647,
"count": 1,
"self": 4.020000051241368e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09780650000175228,
"count": 1,
"self": 0.09780650000175228
}
}
}
}
}
}
}