ppo-Huggy / run_logs /timers.json
HamZurger's picture
Huggy
a6093e0
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4057362079620361,
"min": 1.4057362079620361,
"max": 1.428216814994812,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69039.921875,
"min": 69039.921875,
"max": 76085.3046875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 107.95814977973568,
"min": 91.37383177570094,
"max": 396.9920634920635,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49013.0,
"min": 48885.0,
"max": 50244.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999449.0,
"min": 49872.0,
"max": 1999449.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999449.0,
"min": 49872.0,
"max": 1999449.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.343102216720581,
"min": 0.153630331158638,
"max": 2.425747871398926,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1063.7684326171875,
"min": 19.20379066467285,
"max": 1271.5904541015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5102991600918876,
"min": 1.6973101737499237,
"max": 3.9114186046998713,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1593.675818681717,
"min": 212.16377171874046,
"max": 1995.0619245767593,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5102991600918876,
"min": 1.6973101737499237,
"max": 3.9114186046998713,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1593.675818681717,
"min": 212.16377171874046,
"max": 1995.0619245767593,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016368141851408936,
"min": 0.014175712274239534,
"max": 0.01897379639412975,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04910442555422681,
"min": 0.028351424548479068,
"max": 0.055895948520628735,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04715279166897138,
"min": 0.020476520620286466,
"max": 0.05632154997438192,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14145837500691413,
"min": 0.04095304124057293,
"max": 0.1665416775892178,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.264048912016659e-06,
"min": 3.264048912016659e-06,
"max": 0.0002952683265772249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.792146736049977e-06,
"min": 9.792146736049977e-06,
"max": 0.0008441386686204499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108798333333331,
"min": 0.10108798333333331,
"max": 0.19842277500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30326394999999995,
"min": 0.20731460000000002,
"max": 0.5813795500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.429036833333324e-05,
"min": 6.429036833333324e-05,
"max": 0.0049212964724999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019287110499999975,
"min": 0.00019287110499999975,
"max": 0.014070839545,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692966065",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692968440"
},
"total": 2375.135819628,
"count": 1,
"self": 0.4411460800001805,
"children": {
"run_training.setup": {
"total": 0.04451812299998892,
"count": 1,
"self": 0.04451812299998892
},
"TrainerController.start_learning": {
"total": 2374.650155425,
"count": 1,
"self": 4.138468593005655,
"children": {
"TrainerController._reset_env": {
"total": 4.770192691000034,
"count": 1,
"self": 4.770192691000034
},
"TrainerController.advance": {
"total": 2365.6166795019944,
"count": 231698,
"self": 4.297361002927573,
"children": {
"env_step": {
"total": 1818.2515231750197,
"count": 231698,
"self": 1528.267998298068,
"children": {
"SubprocessEnvManager._take_step": {
"total": 287.20852984496594,
"count": 231698,
"self": 16.55419502295115,
"children": {
"TorchPolicy.evaluate": {
"total": 270.6543348220148,
"count": 223017,
"self": 270.6543348220148
}
}
},
"workers": {
"total": 2.7749950319855543,
"count": 231698,
"self": 0.0,
"children": {
"worker_root": {
"total": 2367.562380819964,
"count": 231698,
"is_parallel": true,
"self": 1121.7741707899877,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010539369999378323,
"count": 1,
"is_parallel": true,
"self": 0.00033843999995042395,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007154969999874083,
"count": 2,
"is_parallel": true,
"self": 0.0007154969999874083
}
}
},
"UnityEnvironment.step": {
"total": 0.05751756600000135,
"count": 1,
"is_parallel": true,
"self": 0.000332756000034351,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020938399995884538,
"count": 1,
"is_parallel": true,
"self": 0.00020938399995884538
},
"communicator.exchange": {
"total": 0.05618851799999902,
"count": 1,
"is_parallel": true,
"self": 0.05618851799999902
},
"steps_from_proto": {
"total": 0.0007869080000091344,
"count": 1,
"is_parallel": true,
"self": 0.0002317830000038157,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005551250000053187,
"count": 2,
"is_parallel": true,
"self": 0.0005551250000053187
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1245.7882100299762,
"count": 231697,
"is_parallel": true,
"self": 39.02347094810057,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.72028495001018,
"count": 231697,
"is_parallel": true,
"self": 79.72028495001018
},
"communicator.exchange": {
"total": 1030.71417454992,
"count": 231697,
"is_parallel": true,
"self": 1030.71417454992
},
"steps_from_proto": {
"total": 96.33027958194532,
"count": 231697,
"is_parallel": true,
"self": 34.23633828387631,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.09394129806901,
"count": 463394,
"is_parallel": true,
"self": 62.09394129806901
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 543.0677953240471,
"count": 231698,
"self": 6.453913013124975,
"children": {
"process_trajectory": {
"total": 132.9194103489209,
"count": 231698,
"self": 131.6132297089215,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3061806399994111,
"count": 10,
"self": 1.3061806399994111
}
}
},
"_update_policy": {
"total": 403.6944719620012,
"count": 97,
"self": 343.7570088019927,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.93746316000852,
"count": 2910,
"self": 59.93746316000852
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1639999684120994e-06,
"count": 1,
"self": 1.1639999684120994e-06
},
"TrainerController._save_models": {
"total": 0.1248134749998826,
"count": 1,
"self": 0.002156703999844467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12265677100003813,
"count": 1,
"self": 0.12265677100003813
}
}
}
}
}
}
}