poca-SoccerTwos / run_logs /timers.json
HuggingMachines's picture
First push
408d924 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.4907772541046143,
"min": 1.1685776710510254,
"max": 3.1593832969665527,
"count": 99
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 245570.71875,
"min": 107051.890625,
"max": 366161.34375,
"count": 99
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 984.2692307692307,
"min": 664.921052631579,
"max": 999.0,
"count": 99
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 102364.0,
"min": 63904.0,
"max": 121016.0,
"count": 99
},
"SoccerTwos.Step.mean": {
"value": 9999542.0,
"min": 5099622.0,
"max": 9999542.0,
"count": 99
},
"SoccerTwos.Step.sum": {
"value": 9999542.0,
"min": 5099622.0,
"max": 9999542.0,
"count": 99
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0001826496300054714,
"min": -0.01437555905431509,
"max": 0.017843715846538544,
"count": 99
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.009315131232142448,
"min": -0.8481580018997192,
"max": 0.9278731942176819,
"count": 99
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.00024330221640411764,
"min": -0.013751156628131866,
"max": 0.018771987408399582,
"count": 99
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.012408412992954254,
"min": -0.8792648315429688,
"max": 0.9761433005332947,
"count": 99
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 99
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 99
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.0392156862745098,
"min": -0.2734444447689586,
"max": 0.07507936538211883,
"count": 99
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.0,
"min": -19.68800002336502,
"max": 4.730000019073486,
"count": 99
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.0392156862745098,
"min": -0.2734444447689586,
"max": 0.07507936538211883,
"count": 99
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.0,
"min": -19.68800002336502,
"max": 4.730000019073486,
"count": 99
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 99
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 99
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017561657571786782,
"min": 0.01264058340683126,
"max": 0.019526855821762487,
"count": 98
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.035123315143573565,
"min": 0.013353993123746476,
"max": 0.05705379659581619,
"count": 98
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.00041136964250351184,
"min": 3.481565622218439e-10,
"max": 0.0038090936238101376,
"count": 98
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0008227392850070237,
"min": 6.220758618101661e-10,
"max": 0.010206666128942743,
"count": 98
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.00041130511440758594,
"min": 3.4311148498006936e-10,
"max": 0.0038083288168612246,
"count": 98
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0008226102288151719,
"min": 6.582947282963837e-10,
"max": 0.010377476842647108,
"count": 98
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 98
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0006,
"min": 0.0003,
"max": 0.0009,
"count": 98
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 98
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.40000000000000013,
"min": 0.19999999999999993,
"max": 0.6000000000000002,
"count": 98
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 98
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.010000000000000002,
"min": 0.005,
"max": 0.015000000000000003,
"count": 98
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1223.7534638982459,
"min": 1210.1671495708435,
"max": 1226.8819693882956,
"count": 32
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2447.5069277964917,
"min": 2420.334299141687,
"max": 41242.957665294176,
"count": 32
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1765546652",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0]",
"command_line_arguments": "/zhome/b6/d/203017/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1765554821"
},
"total": 8169.391451666976,
"count": 1,
"self": 0.3792129899957217,
"children": {
"run_training.setup": {
"total": 0.05216734399436973,
"count": 1,
"self": 0.05216734399436973
},
"TrainerController.start_learning": {
"total": 8168.9600713329855,
"count": 1,
"self": 6.423729272006312,
"children": {
"TrainerController._reset_env": {
"total": 6.558995136030717,
"count": 26,
"self": 6.558995136030717
},
"TrainerController.advance": {
"total": 8155.0617587179295,
"count": 317123,
"self": 6.362516603490803,
"children": {
"env_step": {
"total": 6535.787821058155,
"count": 317123,
"self": 4537.74635747078,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1994.202184700087,
"count": 317123,
"self": 46.02341712030466,
"children": {
"TorchPolicy.evaluate": {
"total": 1948.1787675797823,
"count": 630866,
"self": 1948.1787675797823
}
}
},
"workers": {
"total": 3.8392788872879464,
"count": 317123,
"self": 0.0,
"children": {
"worker_root": {
"total": 8155.599799093412,
"count": 317123,
"is_parallel": true,
"self": 4466.816637556563,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004833491984754801,
"count": 2,
"is_parallel": true,
"self": 0.0027776629431173205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020558290416374803,
"count": 8,
"is_parallel": true,
"self": 0.0020558290416374803
}
}
},
"UnityEnvironment.step": {
"total": 0.029922885005362332,
"count": 1,
"is_parallel": true,
"self": 0.0007064379460643977,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005496130033861846,
"count": 1,
"is_parallel": true,
"self": 0.0005496130033861846
},
"communicator.exchange": {
"total": 0.026616044022375718,
"count": 1,
"is_parallel": true,
"self": 0.026616044022375718
},
"steps_from_proto": {
"total": 0.002050790033536032,
"count": 2,
"is_parallel": true,
"self": 0.0004255690728314221,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016252209607046098,
"count": 8,
"is_parallel": true,
"self": 0.0016252209607046098
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.04827410803409293,
"count": 50,
"is_parallel": true,
"self": 0.009163186972727999,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.039110921061364934,
"count": 200,
"is_parallel": true,
"self": 0.039110921061364934
}
}
},
"UnityEnvironment.step": {
"total": 3688.7348874288145,
"count": 317122,
"is_parallel": true,
"self": 186.91520221577957,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 129.14363695765496,
"count": 317122,
"is_parallel": true,
"self": 129.14363695765496
},
"communicator.exchange": {
"total": 2813.859523506253,
"count": 317122,
"is_parallel": true,
"self": 2813.859523506253
},
"steps_from_proto": {
"total": 558.816524749127,
"count": 634244,
"is_parallel": true,
"self": 101.99654760936392,
"children": {
"_process_rank_one_or_two_observation": {
"total": 456.81997713976307,
"count": 2536976,
"is_parallel": true,
"self": 456.81997713976307
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1612.9114210562839,
"count": 317123,
"self": 66.02497788210167,
"children": {
"process_trajectory": {
"total": 460.47966692826594,
"count": 317123,
"self": 451.7735055142839,
"children": {
"RLTrainer._checkpoint": {
"total": 8.706161413982045,
"count": 10,
"self": 8.706161413982045
}
}
},
"_update_policy": {
"total": 1086.4067762459163,
"count": 208,
"self": 692.196635984903,
"children": {
"TorchPOCAOptimizer.update": {
"total": 394.2101402610133,
"count": 6801,
"self": 394.2101402610133
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.190116543322802e-07,
"count": 1,
"self": 9.190116543322802e-07
},
"TrainerController._save_models": {
"total": 0.915587288007373,
"count": 1,
"self": 0.03353284898912534,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8820544390182476,
"count": 1,
"self": 0.8820544390182476
}
}
}
}
}
}
}