ppo-Huggy / run_logs /timers.json
Arindam1975's picture
Push Huggy to the Hub
ab45227
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4082837104797363,
"min": 1.4082837104797363,
"max": 1.4253687858581543,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69770.6015625,
"min": 69050.1171875,
"max": 77455.8125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 107.22795698924732,
"min": 82.00332225913621,
"max": 407.7479674796748,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49861.0,
"min": 48833.0,
"max": 50153.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999948.0,
"min": 49815.0,
"max": 1999948.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999948.0,
"min": 49815.0,
"max": 1999948.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3794212341308594,
"min": 0.19028061628341675,
"max": 2.4592249393463135,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1106.430908203125,
"min": 23.214235305786133,
"max": 1438.689453125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6032263132833666,
"min": 1.8746524869662817,
"max": 3.906424340538003,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1675.5002356767654,
"min": 228.70760340988636,
"max": 2232.5012858510017,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6032263132833666,
"min": 1.8746524869662817,
"max": 3.906424340538003,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1675.5002356767654,
"min": 228.70760340988636,
"max": 2232.5012858510017,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016912766880705022,
"min": 0.011608441312030968,
"max": 0.02057671589417522,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05073830064211507,
"min": 0.023216882624061935,
"max": 0.06173014768252566,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.048902254593041206,
"min": 0.021802706054101388,
"max": 0.06027702645709117,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14670676377912362,
"min": 0.043605412108202776,
"max": 0.17254648605982462,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1772989409333245e-06,
"min": 3.1772989409333245e-06,
"max": 0.00029535540154820004,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.531896822799973e-06,
"min": 9.531896822799973e-06,
"max": 0.0008441773686075501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105906666666666,
"min": 0.10105906666666666,
"max": 0.1984518,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031772,
"min": 0.20730345,
"max": 0.5813924500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.284742666666653e-05,
"min": 6.284742666666653e-05,
"max": 0.004922744820000002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001885422799999996,
"min": 0.0001885422799999996,
"max": 0.014071483255,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683604933",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683607325"
},
"total": 2392.44774888,
"count": 1,
"self": 0.44014773299977605,
"children": {
"run_training.setup": {
"total": 0.05995731400003024,
"count": 1,
"self": 0.05995731400003024
},
"TrainerController.start_learning": {
"total": 2391.9476438330003,
"count": 1,
"self": 4.324494751970178,
"children": {
"TrainerController._reset_env": {
"total": 4.844133953999972,
"count": 1,
"self": 4.844133953999972
},
"TrainerController.advance": {
"total": 2382.64024947303,
"count": 232041,
"self": 4.715081758043652,
"children": {
"env_step": {
"total": 1853.4043262499765,
"count": 232041,
"self": 1565.5770499290138,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.01192751598614,
"count": 232041,
"self": 16.330497127096805,
"children": {
"TorchPolicy.evaluate": {
"total": 268.68143038888934,
"count": 222985,
"self": 268.68143038888934
}
}
},
"workers": {
"total": 2.8153488049766793,
"count": 232041,
"self": 0.0,
"children": {
"worker_root": {
"total": 2383.9687180169117,
"count": 232041,
"is_parallel": true,
"self": 1103.5724777930386,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009036669999886726,
"count": 1,
"is_parallel": true,
"self": 0.0002694499999620348,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006342170000266378,
"count": 2,
"is_parallel": true,
"self": 0.0006342170000266378
}
}
},
"UnityEnvironment.step": {
"total": 0.027587562000007892,
"count": 1,
"is_parallel": true,
"self": 0.00032546099998853606,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019306600006530061,
"count": 1,
"is_parallel": true,
"self": 0.00019306600006530061
},
"communicator.exchange": {
"total": 0.026352731999963908,
"count": 1,
"is_parallel": true,
"self": 0.026352731999963908
},
"steps_from_proto": {
"total": 0.0007163029999901482,
"count": 1,
"is_parallel": true,
"self": 0.00020496400009051285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005113389998996354,
"count": 2,
"is_parallel": true,
"self": 0.0005113389998996354
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1280.396240223873,
"count": 232040,
"is_parallel": true,
"self": 38.54510818998688,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.3139882179288,
"count": 232040,
"is_parallel": true,
"self": 77.3139882179288
},
"communicator.exchange": {
"total": 1072.0241287019173,
"count": 232040,
"is_parallel": true,
"self": 1072.0241287019173
},
"steps_from_proto": {
"total": 92.51301511404006,
"count": 232040,
"is_parallel": true,
"self": 33.189235055123845,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.32378005891621,
"count": 464080,
"is_parallel": true,
"self": 59.32378005891621
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 524.52084146501,
"count": 232041,
"self": 6.902967917952537,
"children": {
"process_trajectory": {
"total": 132.29674014705813,
"count": 232041,
"self": 131.0114489990575,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2852911480006242,
"count": 10,
"self": 1.2852911480006242
}
}
},
"_update_policy": {
"total": 385.3211333999993,
"count": 97,
"self": 325.19532943900947,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.125803960989856,
"count": 2910,
"self": 60.125803960989856
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.970000635599717e-07,
"count": 1,
"self": 8.970000635599717e-07
},
"TrainerController._save_models": {
"total": 0.1387647569999899,
"count": 1,
"self": 0.0021815290001541143,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1365832279998358,
"count": 1,
"self": 0.1365832279998358
}
}
}
}
}
}
}