ppo-Huggy / run_logs /timers.json
fbeghell's picture
Huggy
bd46d15
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4096795320510864,
"min": 1.4096795320510864,
"max": 1.4280272722244263,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71174.71875,
"min": 68537.1640625,
"max": 75916.4765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.43177570093458,
"min": 80.50570962479608,
"max": 396.36507936507934,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49451.0,
"min": 49080.0,
"max": 49952.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999998.0,
"min": 49606.0,
"max": 1999998.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999998.0,
"min": 49606.0,
"max": 1999998.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.412562370300293,
"min": 0.06471562385559082,
"max": 2.462721586227417,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1290.7208251953125,
"min": 8.089452743530273,
"max": 1459.9420166015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6217129893392044,
"min": 1.8744677917957306,
"max": 4.01332095042821,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1937.6164492964745,
"min": 234.30847397446632,
"max": 2350.051864326,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6217129893392044,
"min": 1.8744677917957306,
"max": 4.01332095042821,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1937.6164492964745,
"min": 234.30847397446632,
"max": 2350.051864326,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019160711519200253,
"min": 0.013661849033087493,
"max": 0.021537751171854323,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.057482134557600754,
"min": 0.028892840495730827,
"max": 0.057482134557600754,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05184011852575673,
"min": 0.021477201798309885,
"max": 0.0657663169834349,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15552035557727018,
"min": 0.04295440359661977,
"max": 0.19729895095030467,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.359998880033337e-06,
"min": 3.359998880033337e-06,
"max": 0.00029536080154639994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.007999664010001e-05,
"min": 1.007999664010001e-05,
"max": 0.0008444358185214,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111996666666667,
"min": 0.10111996666666667,
"max": 0.1984536,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033599,
"min": 0.20738955000000006,
"max": 0.5814786000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.588633666666673e-05,
"min": 6.588633666666673e-05,
"max": 0.00492283464,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001976590100000002,
"min": 0.0001976590100000002,
"max": 0.014075782140000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673622286",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673624510"
},
"total": 2223.7893435200003,
"count": 1,
"self": 0.3964940160003607,
"children": {
"run_training.setup": {
"total": 0.10449049899989404,
"count": 1,
"self": 0.10449049899989404
},
"TrainerController.start_learning": {
"total": 2223.288359005,
"count": 1,
"self": 3.9668242398720395,
"children": {
"TrainerController._reset_env": {
"total": 10.536765569999943,
"count": 1,
"self": 10.536765569999943
},
"TrainerController.advance": {
"total": 2208.6742109261277,
"count": 232290,
"self": 3.994013538188028,
"children": {
"env_step": {
"total": 1736.0733271769086,
"count": 232290,
"self": 1460.2309678209485,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.2104009349549,
"count": 232290,
"self": 14.494059007027772,
"children": {
"TorchPolicy.evaluate": {
"total": 258.71634192792715,
"count": 223075,
"self": 64.78957556089688,
"children": {
"TorchPolicy.sample_actions": {
"total": 193.92676636703027,
"count": 223075,
"self": 193.92676636703027
}
}
}
}
},
"workers": {
"total": 2.6319584210051516,
"count": 232290,
"self": 0.0,
"children": {
"worker_root": {
"total": 2215.5324514579866,
"count": 232290,
"is_parallel": true,
"self": 1013.7717149149248,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018081460000303196,
"count": 1,
"is_parallel": true,
"self": 0.00032848599994395045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014796600000863691,
"count": 2,
"is_parallel": true,
"self": 0.0014796600000863691
}
}
},
"UnityEnvironment.step": {
"total": 0.03083297799992124,
"count": 1,
"is_parallel": true,
"self": 0.000290193000068939,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023502699991695408,
"count": 1,
"is_parallel": true,
"self": 0.00023502699991695408
},
"communicator.exchange": {
"total": 0.029386666999926092,
"count": 1,
"is_parallel": true,
"self": 0.029386666999926092
},
"steps_from_proto": {
"total": 0.0009210910000092554,
"count": 1,
"is_parallel": true,
"self": 0.0004150149999304631,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005060760000787923,
"count": 2,
"is_parallel": true,
"self": 0.0005060760000787923
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1201.7607365430617,
"count": 232289,
"is_parallel": true,
"self": 34.434198584176556,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.29520343992726,
"count": 232289,
"is_parallel": true,
"self": 76.29520343992726
},
"communicator.exchange": {
"total": 997.391046831932,
"count": 232289,
"is_parallel": true,
"self": 997.391046831932
},
"steps_from_proto": {
"total": 93.64028768702599,
"count": 232289,
"is_parallel": true,
"self": 38.2578178499665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.38246983705949,
"count": 464578,
"is_parallel": true,
"self": 55.38246983705949
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 468.6068702110309,
"count": 232290,
"self": 6.380061448953029,
"children": {
"process_trajectory": {
"total": 146.4569310310792,
"count": 232290,
"self": 145.23532665207983,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2216043789993591,
"count": 10,
"self": 1.2216043789993591
}
}
},
"_update_policy": {
"total": 315.7698777309987,
"count": 97,
"self": 261.57210632299086,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.19777140800784,
"count": 2910,
"self": 54.19777140800784
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.239999260695186e-07,
"count": 1,
"self": 8.239999260695186e-07
},
"TrainerController._save_models": {
"total": 0.11055744500026776,
"count": 1,
"self": 0.0019529410001268843,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10860450400014088,
"count": 1,
"self": 0.10860450400014088
}
}
}
}
}
}
}