Nazzyk's picture
ppo-v1
52c3e87
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8616356253623962,
"min": 0.8616356253623962,
"max": 2.86403751373291,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8236.375,
"min": 8236.375,
"max": 29330.607421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.897601127624512,
"min": 0.3083813190460205,
"max": 12.897601127624512,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2515.0322265625,
"min": 59.82597351074219,
"max": 2626.427490234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0689021427973,
"min": 0.061883377569143236,
"max": 0.07568563430515282,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2756085711892,
"min": 0.2620073739626427,
"max": 0.35519638217916577,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22605982152562515,
"min": 0.10783512277674734,
"max": 0.2796038035266832,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9042392861025006,
"min": 0.43134049110698935,
"max": 1.398019017633416,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.0013852560382480001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.961752,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.0230914248,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.681818181818183,
"min": 3.0681818181818183,
"max": 25.745454545454546,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1086.0,
"min": 135.0,
"max": 1416.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.681818181818183,
"min": 3.0681818181818183,
"max": 25.745454545454546,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1086.0,
"min": 135.0,
"max": 1416.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679756576",
"python_version": "3.9.9 | packaged by conda-forge | (main, Dec 20 2021, 02:40:17) \n[GCC 9.4.0]",
"command_line_arguments": "/home/nazar/anaconda3/envs/ml-agents/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.20.0",
"end_time_seconds": "1679757443"
},
"total": 866.885315903,
"count": 1,
"self": 0.438024000999917,
"children": {
"run_training.setup": {
"total": 0.024729399999955604,
"count": 1,
"self": 0.024729399999955604
},
"TrainerController.start_learning": {
"total": 866.4225625020001,
"count": 1,
"self": 0.7559212109869122,
"children": {
"TrainerController._reset_env": {
"total": 6.981739578999964,
"count": 1,
"self": 6.981739578999964
},
"TrainerController.advance": {
"total": 858.5370725120131,
"count": 18202,
"self": 0.33610550400646844,
"children": {
"env_step": {
"total": 858.2009670080066,
"count": 18202,
"self": 553.6597745330043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 304.2226872739941,
"count": 18202,
"self": 2.116667810010199,
"children": {
"TorchPolicy.evaluate": {
"total": 302.1060194639839,
"count": 18202,
"self": 68.5517974349774,
"children": {
"TorchPolicy.sample_actions": {
"total": 233.5542220290065,
"count": 18202,
"self": 233.5542220290065
}
}
}
}
},
"workers": {
"total": 0.31850520100817903,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 864.5826329959876,
"count": 18202,
"is_parallel": true,
"self": 406.01135185499913,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004090299999916169,
"count": 1,
"is_parallel": true,
"self": 0.0017994000000953747,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022908999998207946,
"count": 10,
"is_parallel": true,
"self": 0.0022908999998207946
}
}
},
"UnityEnvironment.step": {
"total": 0.0336838999999145,
"count": 1,
"is_parallel": true,
"self": 0.00032589999977972184,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023840000005748152,
"count": 1,
"is_parallel": true,
"self": 0.00023840000005748152
},
"communicator.exchange": {
"total": 0.032102200000053926,
"count": 1,
"is_parallel": true,
"self": 0.032102200000053926
},
"steps_from_proto": {
"total": 0.0010174000000233718,
"count": 1,
"is_parallel": true,
"self": 0.00027639999996154074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007410000000618311,
"count": 10,
"is_parallel": true,
"self": 0.0007410000000618311
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 458.5712811409885,
"count": 18201,
"is_parallel": true,
"self": 8.493286445009517,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.8733265139919695,
"count": 18201,
"is_parallel": true,
"self": 4.8733265139919695
},
"communicator.exchange": {
"total": 418.0503929209906,
"count": 18201,
"is_parallel": true,
"self": 418.0503929209906
},
"steps_from_proto": {
"total": 27.154275260996428,
"count": 18201,
"is_parallel": true,
"self": 6.616347938979857,
"children": {
"_process_rank_one_or_two_observation": {
"total": 20.53792732201657,
"count": 182010,
"is_parallel": true,
"self": 20.53792732201657
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015389999998660642,
"count": 1,
"self": 0.00015389999998660642,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 848.8109635720039,
"count": 758904,
"is_parallel": true,
"self": 26.260382023964553,
"children": {
"process_trajectory": {
"total": 464.6479051820389,
"count": 758904,
"is_parallel": true,
"self": 463.17262192403916,
"children": {
"RLTrainer._checkpoint": {
"total": 1.475283257999763,
"count": 4,
"is_parallel": true,
"self": 1.475283257999763
}
}
},
"_update_policy": {
"total": 357.9026763660004,
"count": 90,
"is_parallel": true,
"self": 48.933773986998176,
"children": {
"TorchPPOOptimizer.update": {
"total": 308.9689023790022,
"count": 4578,
"is_parallel": true,
"self": 308.9689023790022
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1476753000001736,
"count": 1,
"self": 0.0007767000001877022,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1468985999999859,
"count": 1,
"self": 0.1468985999999859
}
}
}
}
}
}
}