poca-SoccerTwos / run_logs /timers.json
DarkRodry's picture
Trained with basic config and 10M
475342e
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.717603087425232,
"min": 1.6788161993026733,
"max": 3.2957513332366943,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34187.171875,
"min": 16052.279296875,
"max": 126327.390625,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 65.91891891891892,
"min": 40.90833333333333,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19512.0,
"min": 16376.0,
"max": 23212.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1519.8814687975048,
"min": 1195.8070367856626,
"max": 1583.006886067003,
"count": 965
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 224942.4573820307,
"min": 2391.614073571325,
"max": 366637.20370691223,
"count": 965
},
"SoccerTwos.Step.mean": {
"value": 9999990.0,
"min": 9866.0,
"max": 9999990.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999990.0,
"min": 9866.0,
"max": 9999990.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.002796276705339551,
"min": -0.12312236428260803,
"max": 0.12712574005126953,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.4138489365577698,
"min": -20.1355037689209,
"max": 17.024127960205078,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.000668844673782587,
"min": -0.12191003561019897,
"max": 0.13009807467460632,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.09898900985717773,
"min": -20.114635467529297,
"max": 16.402507781982422,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.07380270635759509,
"min": -0.7474782622378805,
"max": 0.5537302299987438,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 10.922800540924072,
"min": -52.48040008544922,
"max": 48.76160025596619,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.07380270635759509,
"min": -0.7474782622378805,
"max": 0.5537302299987438,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 10.922800540924072,
"min": -52.48040008544922,
"max": 48.76160025596619,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01895880136289634,
"min": 0.011202606263880928,
"max": 0.024093158706091345,
"count": 482
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01895880136289634,
"min": 0.011202606263880928,
"max": 0.024093158706091345,
"count": 482
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09659117385745049,
"min": 2.6665867721931137e-06,
"max": 0.12355347375075022,
"count": 482
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09659117385745049,
"min": 2.6665867721931137e-06,
"max": 0.12355347375075022,
"count": 482
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09720723554491997,
"min": 2.336154261683987e-06,
"max": 0.12586785554885865,
"count": 482
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09720723554491997,
"min": 2.336154261683987e-06,
"max": 0.12586785554885865,
"count": 482
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 482
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 482
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 482
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 482
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 482
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 482
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695743592",
"python_version": "3.9.18 (main, Sep 11 2023, 08:25:10) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/rodrigo/.miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos-10M --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1695776242"
},
"total": 32650.11854575,
"count": 1,
"self": 0.3439869159992668,
"children": {
"run_training.setup": {
"total": 0.011776958999999976,
"count": 1,
"self": 0.011776958999999976
},
"TrainerController.start_learning": {
"total": 32649.762781875,
"count": 1,
"self": 7.509672714917542,
"children": {
"TrainerController._reset_env": {
"total": 5.307874040000537,
"count": 50,
"self": 5.307874040000537
},
"TrainerController.advance": {
"total": 32636.861919786083,
"count": 679560,
"self": 6.5365623659672565,
"children": {
"env_step": {
"total": 24771.07300220724,
"count": 679560,
"self": 23659.283091195626,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1106.9024087561663,
"count": 679560,
"self": 29.317578511510646,
"children": {
"TorchPolicy.evaluate": {
"total": 1077.5848302446557,
"count": 1260818,
"self": 1077.5848302446557
}
}
},
"workers": {
"total": 4.88750225544522,
"count": 679560,
"self": 0.0,
"children": {
"worker_root": {
"total": 32634.30448375009,
"count": 679560,
"is_parallel": true,
"self": 9931.002246671229,
"children": {
"steps_from_proto": {
"total": 0.059732371998689526,
"count": 100,
"is_parallel": true,
"self": 0.008212115002559806,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05152025699612972,
"count": 400,
"is_parallel": true,
"self": 0.05152025699612972
}
}
},
"UnityEnvironment.step": {
"total": 22703.24250470686,
"count": 679560,
"is_parallel": true,
"self": 68.39258318754946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 359.75749916194235,
"count": 679560,
"is_parallel": true,
"self": 359.75749916194235
},
"communicator.exchange": {
"total": 21507.914250131762,
"count": 679560,
"is_parallel": true,
"self": 21507.914250131762
},
"steps_from_proto": {
"total": 767.1781722256057,
"count": 1359120,
"is_parallel": true,
"self": 104.3607484637464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 662.8174237618593,
"count": 5436480,
"is_parallel": true,
"self": 662.8174237618593
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7859.252355212875,
"count": 679560,
"self": 62.63282610139413,
"children": {
"process_trajectory": {
"total": 1589.012163224458,
"count": 679560,
"self": 1587.0567692204652,
"children": {
"RLTrainer._checkpoint": {
"total": 1.955394003992751,
"count": 20,
"self": 1.955394003992751
}
}
},
"_update_policy": {
"total": 6207.607365887023,
"count": 482,
"self": 767.7746994859108,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5439.832666401112,
"count": 14460,
"self": 5439.832666401112
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.339991963002831e-07,
"count": 1,
"self": 3.339991963002831e-07
},
"TrainerController._save_models": {
"total": 0.08331499999985681,
"count": 1,
"self": 0.0006719160010106862,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08264308399884612,
"count": 1,
"self": 0.08264308399884612
}
}
}
}
}
}
}