poca-SoccerTwos / run_logs /timers.json
DTambu's picture
First Push`
5b03c29 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.5227997303009033,
"min": 2.495026111602783,
"max": 3.295741081237793,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 49245.05078125,
"min": 13297.7265625,
"max": 149981.515625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 63.09090909090909,
"min": 48.42424242424242,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19432.0,
"min": 11988.0,
"max": 26588.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1394.8453531361693,
"min": 1179.2109752393976,
"max": 1394.8453531361693,
"count": 380
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 214806.18438297007,
"min": 2362.70982351722,
"max": 274337.56381041656,
"count": 380
},
"SoccerTwos.Step.mean": {
"value": 4999945.0,
"min": 9310.0,
"max": 4999945.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999945.0,
"min": 9310.0,
"max": 4999945.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.05446227267384529,
"min": -0.025410765781998634,
"max": 0.17665481567382812,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 8.441652297973633,
"min": -3.262157440185547,
"max": 29.420944213867188,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.06463473290205002,
"min": -0.02619282342493534,
"max": 0.1755588799715042,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 10.018383979797363,
"min": -3.447781801223755,
"max": 29.843914031982422,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.2105161286169483,
"min": -0.5,
"max": 0.5013902405413185,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 32.629999935626984,
"min": -34.82779997587204,
"max": 57.70359981060028,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.2105161286169483,
"min": -0.5,
"max": 0.5013902405413185,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 32.629999935626984,
"min": -34.82779997587204,
"max": 57.70359981060028,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014238524492490493,
"min": 0.01136601523697512,
"max": 0.022922508319607005,
"count": 235
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014238524492490493,
"min": 0.01136601523697512,
"max": 0.022922508319607005,
"count": 235
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08317695582906405,
"min": 2.1802457676282452e-07,
"max": 0.08748890310525895,
"count": 235
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08317695582906405,
"min": 2.1802457676282452e-07,
"max": 0.08748890310525895,
"count": 235
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0856165091196696,
"min": 2.268967856859187e-07,
"max": 0.09040030663212141,
"count": 235
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0856165091196696,
"min": 2.268967856859187e-07,
"max": 0.09040030663212141,
"count": 235
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 235
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 235
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 235
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 235
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 235
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 235
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1746911009",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Damia\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./training-envs-executables/config.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1746933652"
},
"total": 22642.345460199984,
"count": 1,
"self": 2.3191181001020595,
"children": {
"run_training.setup": {
"total": 0.3814141999464482,
"count": 1,
"self": 0.3814141999464482
},
"TrainerController.start_learning": {
"total": 22639.644927899935,
"count": 1,
"self": 14.197132005356252,
"children": {
"TrainerController._reset_env": {
"total": 16.902062799897976,
"count": 25,
"self": 16.902062799897976
},
"TrainerController.advance": {
"total": 22608.34223879478,
"count": 327990,
"self": 15.136588304303586,
"children": {
"env_step": {
"total": 11338.499855270144,
"count": 327990,
"self": 8979.239047623007,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2350.448049331666,
"count": 327990,
"self": 84.43004472041503,
"children": {
"TorchPolicy.evaluate": {
"total": 2266.018004611251,
"count": 642236,
"self": 2266.018004611251
}
}
},
"workers": {
"total": 8.812758315471001,
"count": 327990,
"self": 0.0,
"children": {
"worker_root": {
"total": 22608.122430981835,
"count": 327990,
"is_parallel": true,
"self": 15464.97406711441,
"children": {
"steps_from_proto": {
"total": 0.08822700043674558,
"count": 50,
"is_parallel": true,
"self": 0.018342798692174256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06988420174457133,
"count": 200,
"is_parallel": true,
"self": 0.06988420174457133
}
}
},
"UnityEnvironment.step": {
"total": 7143.060136866989,
"count": 327990,
"is_parallel": true,
"self": 382.1356029380113,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 349.1365725407377,
"count": 327990,
"is_parallel": true,
"self": 349.1365725407377
},
"communicator.exchange": {
"total": 5122.170849552611,
"count": 327990,
"is_parallel": true,
"self": 5122.170849552611
},
"steps_from_proto": {
"total": 1289.6171118356287,
"count": 655980,
"is_parallel": true,
"self": 258.73005398595706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1030.8870578496717,
"count": 2623920,
"is_parallel": true,
"self": 1030.8870578496717
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 11254.705795220332,
"count": 327990,
"self": 90.2970273118699,
"children": {
"process_trajectory": {
"total": 1659.194473409094,
"count": 327990,
"self": 1656.8160068093566,
"children": {
"RLTrainer._checkpoint": {
"total": 2.378466599737294,
"count": 10,
"self": 2.378466599737294
}
}
},
"_update_policy": {
"total": 9505.214294499368,
"count": 235,
"self": 1130.8358330983901,
"children": {
"TorchPOCAOptimizer.update": {
"total": 8374.378461400978,
"count": 7056,
"self": 8374.378461400978
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.200009137392044e-06,
"count": 1,
"self": 1.200009137392044e-06
},
"TrainerController._save_models": {
"total": 0.20349309989251196,
"count": 1,
"self": 0.017519299872219563,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1859738000202924,
"count": 1,
"self": 0.1859738000202924
}
}
}
}
}
}
}