ppo-Huggy / run_logs /timers.json
fx1H's picture
Huggy
0992345
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4066106081008911,
"min": 1.4066106081008911,
"max": 1.43050217628479,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70825.65625,
"min": 66886.609375,
"max": 75337.28125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 116.541567695962,
"min": 99.77445109780439,
"max": 391.3125,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49064.0,
"min": 48969.0,
"max": 50222.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999358.0,
"min": 49855.0,
"max": 1999358.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999358.0,
"min": 49855.0,
"max": 1999358.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.2850098609924316,
"min": 0.06485355645418167,
"max": 2.418700933456421,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 961.9891357421875,
"min": 8.236401557922363,
"max": 1157.572021484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4164793831152473,
"min": 1.6902434131291908,
"max": 3.8080791290998457,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1438.3378202915192,
"min": 214.66091346740723,
"max": 1904.039564549923,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4164793831152473,
"min": 1.6902434131291908,
"max": 3.8080791290998457,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1438.3378202915192,
"min": 214.66091346740723,
"max": 1904.039564549923,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018645598549725646,
"min": 0.012122762846411205,
"max": 0.019616695427167645,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03729119709945129,
"min": 0.027709915409407888,
"max": 0.058850086281502935,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04764610268175602,
"min": 0.023670885546339882,
"max": 0.056054911017417906,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09529220536351204,
"min": 0.04833814638356368,
"max": 0.1681647330522537,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.499873500074989e-06,
"min": 4.499873500074989e-06,
"max": 0.000295368526543825,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.999747000149977e-06,
"min": 8.999747000149977e-06,
"max": 0.0008439709686763498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10149992500000005,
"min": 0.10149992500000005,
"max": 0.198456175,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2029998500000001,
"min": 0.2029998500000001,
"max": 0.58132365,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.48462574999998e-05,
"min": 8.48462574999998e-05,
"max": 0.0049229631324999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001696925149999996,
"min": 0.0001696925149999996,
"max": 0.014068050135000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686755616",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686757937"
},
"total": 2320.492646253,
"count": 1,
"self": 0.43160662300033437,
"children": {
"run_training.setup": {
"total": 0.04069889499999135,
"count": 1,
"self": 0.04069889499999135
},
"TrainerController.start_learning": {
"total": 2320.020340735,
"count": 1,
"self": 4.374555099071586,
"children": {
"TrainerController._reset_env": {
"total": 4.058861869999987,
"count": 1,
"self": 4.058861869999987
},
"TrainerController.advance": {
"total": 2311.4671741599286,
"count": 231065,
"self": 4.377242479858069,
"children": {
"env_step": {
"total": 1810.1405986330249,
"count": 231065,
"self": 1518.7908529112499,
"children": {
"SubprocessEnvManager._take_step": {
"total": 288.66897491397447,
"count": 231065,
"self": 16.804652510933465,
"children": {
"TorchPolicy.evaluate": {
"total": 271.864322403041,
"count": 222978,
"self": 271.864322403041
}
}
},
"workers": {
"total": 2.6807708078005135,
"count": 231065,
"self": 0.0,
"children": {
"worker_root": {
"total": 2312.552905292045,
"count": 231065,
"is_parallel": true,
"self": 1072.3378064030426,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009169930000325621,
"count": 1,
"is_parallel": true,
"self": 0.0002665729999762334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006504200000563287,
"count": 2,
"is_parallel": true,
"self": 0.0006504200000563287
}
}
},
"UnityEnvironment.step": {
"total": 0.027862547999916387,
"count": 1,
"is_parallel": true,
"self": 0.0002716389999477542,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021043499998540938,
"count": 1,
"is_parallel": true,
"self": 0.00021043499998540938
},
"communicator.exchange": {
"total": 0.0266961080000101,
"count": 1,
"is_parallel": true,
"self": 0.0266961080000101
},
"steps_from_proto": {
"total": 0.0006843659999731244,
"count": 1,
"is_parallel": true,
"self": 0.0001857989999507481,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004985670000223763,
"count": 2,
"is_parallel": true,
"self": 0.0004985670000223763
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1240.2150988890025,
"count": 231064,
"is_parallel": true,
"self": 37.919234761893904,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.36042298295877,
"count": 231064,
"is_parallel": true,
"self": 76.36042298295877
},
"communicator.exchange": {
"total": 1034.2898844780193,
"count": 231064,
"is_parallel": true,
"self": 1034.2898844780193
},
"steps_from_proto": {
"total": 91.64555666613046,
"count": 231064,
"is_parallel": true,
"self": 32.83280432208255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.81275234404791,
"count": 462128,
"is_parallel": true,
"self": 58.81275234404791
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 496.9493330470457,
"count": 231065,
"self": 6.388920885108064,
"children": {
"process_trajectory": {
"total": 124.90888417993733,
"count": 231065,
"self": 123.5140284059371,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3948557740002343,
"count": 10,
"self": 1.3948557740002343
}
}
},
"_update_policy": {
"total": 365.6515279820003,
"count": 96,
"self": 306.9164355130106,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.735092468989706,
"count": 2880,
"self": 58.735092468989706
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.4600000011269e-07,
"count": 1,
"self": 9.4600000011269e-07
},
"TrainerController._save_models": {
"total": 0.1197486599999138,
"count": 1,
"self": 0.0018837309999071294,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11786492900000667,
"count": 1,
"self": 0.11786492900000667
}
}
}
}
}
}
}