FranticUser's picture
First run at creating the agent
b264375 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1925978660583496,
"min": 3.187406301498413,
"max": 3.2008535861968994,
"count": 5
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 67938.484375,
"min": 20216.755859375,
"max": 86243.796875,
"count": 5
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 971.8,
"min": 196.0,
"max": 971.8,
"count": 5
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19436.0,
"min": 784.0,
"max": 25908.0,
"count": 5
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1213.4479915604804,
"min": 1213.4479915604804,
"max": 1214.4592805977913,
"count": 5
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2426.895983120961,
"min": 2426.895983120961,
"max": 9715.67424478233,
"count": 5
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SoccerTwos.Step.mean": {
"value": 539170.0,
"min": 509240.0,
"max": 539170.0,
"count": 4
},
"SoccerTwos.Step.sum": {
"value": 539170.0,
"min": 509240.0,
"max": 539170.0,
"count": 4
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.010469825938344002,
"min": 0.010469825938344002,
"max": 0.011959057301282883,
"count": 4
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.10469825565814972,
"min": 0.10469825565814972,
"max": 0.1828768104314804,
"count": 4
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.010252533480525017,
"min": 0.010252533480525017,
"max": 0.011734726838767529,
"count": 4
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.10252533107995987,
"min": 0.10252533107995987,
"max": 0.17864379286766052,
"count": 4
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 4
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 4
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.02720000147819519,
"min": -0.2857142857142857,
"max": 0.03382500261068344,
"count": 4
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.2720000147819519,
"min": -4.0,
"max": 0.5412000417709351,
"count": 4
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.02720000147819519,
"min": -0.2857142857142857,
"max": 0.03382500261068344,
"count": 4
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.2720000147819519,
"min": -4.0,
"max": 0.5412000417709351,
"count": 4
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019564568462859217,
"min": 0.019564568462859217,
"max": 0.019564568462859217,
"count": 1
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019564568462859217,
"min": 0.019564568462859217,
"max": 0.019564568462859217,
"count": 1
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0042441216297447685,
"min": 0.0042441216297447685,
"max": 0.0042441216297447685,
"count": 1
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0042441216297447685,
"min": 0.0042441216297447685,
"max": 0.0042441216297447685,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.004248970032980044,
"min": 0.004248970032980044,
"max": 0.004248970032980044,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.004248970032980044,
"min": 0.004248970032980044,
"max": 0.004248970032980044,
"count": 1
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 1
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 1
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 1
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1770247968",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/poca/SoccerTwos.yaml --env=/content/ml-agents/training-envs-executables/linux/SoccerTwos.x86_64 --run-id=SoccerTwos --resume --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1770248259"
},
"total": 290.4850549319999,
"count": 1,
"self": 0.589065119001134,
"children": {
"run_training.setup": {
"total": 0.03679770799953985,
"count": 1,
"self": 0.03679770799953985
},
"TrainerController.start_learning": {
"total": 289.85919210499924,
"count": 1,
"self": 0.1604777939928681,
"children": {
"TrainerController._reset_env": {
"total": 2.400628872001107,
"count": 2,
"self": 2.400628872001107
},
"TrainerController.advance": {
"total": 287.297913461005,
"count": 3622,
"self": 0.1819868359962129,
"children": {
"env_step": {
"total": 142.40893211202547,
"count": 3622,
"self": 115.82877224097865,
"children": {
"SubprocessEnvManager._take_step": {
"total": 26.48407874102213,
"count": 3622,
"self": 0.9992314409637402,
"children": {
"TorchPolicy.evaluate": {
"total": 25.48484730005839,
"count": 7198,
"self": 25.48484730005839
}
}
},
"workers": {
"total": 0.09608113002468599,
"count": 3621,
"self": 0.0,
"children": {
"worker_root": {
"total": 289.2381557519957,
"count": 3621,
"is_parallel": true,
"self": 192.17278199397242,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003643929000645585,
"count": 2,
"is_parallel": true,
"self": 0.0009266570004911046,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027172720001544803,
"count": 8,
"is_parallel": true,
"self": 0.0027172720001544803
}
}
},
"UnityEnvironment.step": {
"total": 0.05352011200011475,
"count": 1,
"is_parallel": true,
"self": 0.001417980000951502,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.001037334000102419,
"count": 1,
"is_parallel": true,
"self": 0.001037334000102419
},
"communicator.exchange": {
"total": 0.04684423999970022,
"count": 1,
"is_parallel": true,
"self": 0.04684423999970022
},
"steps_from_proto": {
"total": 0.004220557999360608,
"count": 2,
"is_parallel": true,
"self": 0.0007840279986339738,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003436530000726634,
"count": 8,
"is_parallel": true,
"self": 0.003436530000726634
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.0023906520000309683,
"count": 2,
"is_parallel": true,
"self": 0.0004921380004816456,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018985139995493228,
"count": 8,
"is_parallel": true,
"self": 0.0018985139995493228
}
}
},
"UnityEnvironment.step": {
"total": 97.06298310602324,
"count": 3620,
"is_parallel": true,
"self": 5.7845666900011565,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.8399442199870464,
"count": 3620,
"is_parallel": true,
"self": 3.8399442199870464
},
"communicator.exchange": {
"total": 69.21014601205388,
"count": 3620,
"is_parallel": true,
"self": 69.21014601205388
},
"steps_from_proto": {
"total": 18.228326183981153,
"count": 7240,
"is_parallel": true,
"self": 3.559679079910893,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14.66864710407026,
"count": 28960,
"is_parallel": true,
"self": 14.66864710407026
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 144.70699451298333,
"count": 3621,
"self": 1.1028663920324107,
"children": {
"process_trajectory": {
"total": 21.67039900895088,
"count": 3621,
"self": 21.269432657951256,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4009663509996244,
"count": 1,
"self": 0.4009663509996244
}
}
},
"_update_policy": {
"total": 121.93372911200004,
"count": 2,
"self": 8.84772107099434,
"children": {
"TorchPOCAOptimizer.update": {
"total": 113.0860080410057,
"count": 60,
"self": 113.0860080410057
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.7059995773015544e-06,
"count": 1,
"self": 2.7059995773015544e-06
},
"TrainerController._save_models": {
"total": 0.00016927200067584636,
"count": 1,
"self": 9.028200020111399e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 7.899000047473237e-05,
"count": 1,
"self": 7.899000047473237e-05
}
}
}
}
}
}
}