gstaff's picture
First training of SnowballTarget
22879bd
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9299023747444153,
"min": 0.9299023747444153,
"max": 2.863887310028076,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8888.9365234375,
"min": 8888.9365234375,
"max": 29329.0703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.885068893432617,
"min": 0.22357647120952606,
"max": 12.885068893432617,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2512.58837890625,
"min": 43.373836517333984,
"max": 2602.298828125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06767243981691232,
"min": 0.0641197801094534,
"max": 0.07360546473030219,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27068975926764927,
"min": 0.2564791204378136,
"max": 0.368027323651511,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20591389753070533,
"min": 0.10107999989379415,
"max": 0.2848902639805102,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8236555901228213,
"min": 0.4043199995751766,
"max": 1.3331485992553187,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.386363636363637,
"min": 2.727272727272727,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1117.0,
"min": 120.0,
"max": 1393.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.386363636363637,
"min": 2.727272727272727,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1117.0,
"min": 120.0,
"max": 1393.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674266452",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674266867"
},
"total": 414.496731017,
"count": 1,
"self": 0.38361999899996135,
"children": {
"run_training.setup": {
"total": 0.1003757810000252,
"count": 1,
"self": 0.1003757810000252
},
"TrainerController.start_learning": {
"total": 414.012735237,
"count": 1,
"self": 0.5157598260078089,
"children": {
"TrainerController._reset_env": {
"total": 9.643348516000003,
"count": 1,
"self": 9.643348516000003
},
"TrainerController.advance": {
"total": 403.7391559869923,
"count": 18202,
"self": 0.2481656039894915,
"children": {
"env_step": {
"total": 403.4909903830028,
"count": 18202,
"self": 263.46619493400556,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.78136114000063,
"count": 18202,
"self": 1.3397556010125413,
"children": {
"TorchPolicy.evaluate": {
"total": 138.44160553898809,
"count": 18202,
"self": 30.21148550098468,
"children": {
"TorchPolicy.sample_actions": {
"total": 108.2301200380034,
"count": 18202,
"self": 108.2301200380034
}
}
}
}
},
"workers": {
"total": 0.2434343089965978,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 412.85006339200294,
"count": 18202,
"is_parallel": true,
"self": 199.8577089189925,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005952465999996548,
"count": 1,
"is_parallel": true,
"self": 0.0036241619999941577,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023283040000023902,
"count": 10,
"is_parallel": true,
"self": 0.0023283040000023902
}
}
},
"UnityEnvironment.step": {
"total": 0.03297752200001014,
"count": 1,
"is_parallel": true,
"self": 0.0004651909999893178,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039715500003012494,
"count": 1,
"is_parallel": true,
"self": 0.00039715500003012494
},
"communicator.exchange": {
"total": 0.030838921999986724,
"count": 1,
"is_parallel": true,
"self": 0.030838921999986724
},
"steps_from_proto": {
"total": 0.0012762540000039735,
"count": 1,
"is_parallel": true,
"self": 0.0003849100000365979,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008913439999673756,
"count": 10,
"is_parallel": true,
"self": 0.0008913439999673756
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 212.99235447301044,
"count": 18201,
"is_parallel": true,
"self": 8.170473316000937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.696459468001024,
"count": 18201,
"is_parallel": true,
"self": 4.696459468001024
},
"communicator.exchange": {
"total": 171.300057618011,
"count": 18201,
"is_parallel": true,
"self": 171.300057618011
},
"steps_from_proto": {
"total": 28.825364070997466,
"count": 18201,
"is_parallel": true,
"self": 6.135909955009538,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.689454115987928,
"count": 182010,
"is_parallel": true,
"self": 22.689454115987928
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.176799993478198e-05,
"count": 1,
"self": 5.176799993478198e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 400.9303810429818,
"count": 337480,
"is_parallel": true,
"self": 8.645226317946253,
"children": {
"process_trajectory": {
"total": 230.75277458103528,
"count": 337480,
"is_parallel": true,
"self": 230.04781827503518,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7049563060000992,
"count": 4,
"is_parallel": true,
"self": 0.7049563060000992
}
}
},
"_update_policy": {
"total": 161.53238014400029,
"count": 90,
"is_parallel": true,
"self": 43.073043960002224,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.45933618399806,
"count": 4587,
"is_parallel": true,
"self": 118.45933618399806
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11441913999999542,
"count": 1,
"self": 0.0012309629998981109,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11318817700009731,
"count": 1,
"self": 0.11318817700009731
}
}
}
}
}
}
}