ppo-Huggy / run_logs /timers.json
dfm794's picture
Huggy
5f5a073
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4068331718444824,
"min": 1.4068331718444824,
"max": 1.4276225566864014,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68661.8984375,
"min": 68527.53125,
"max": 77600.9765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 100.63232323232323,
"min": 85.44887348353552,
"max": 386.03053435114504,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49813.0,
"min": 48754.0,
"max": 50570.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999565.0,
"min": 49951.0,
"max": 1999565.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999565.0,
"min": 49951.0,
"max": 1999565.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.386176109313965,
"min": 0.15234392881393433,
"max": 2.4544341564178467,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1181.1572265625,
"min": 19.804710388183594,
"max": 1382.03759765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5585576354855237,
"min": 1.7728424549102784,
"max": 3.9913609915635955,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1761.4860295653343,
"min": 230.46951913833618,
"max": 2267.0930432081223,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5585576354855237,
"min": 1.7728424549102784,
"max": 3.9913609915635955,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1761.4860295653343,
"min": 230.46951913833618,
"max": 2267.0930432081223,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0171758140987044,
"min": 0.013002161026654196,
"max": 0.019787421865880284,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051527442296113196,
"min": 0.027383549115620555,
"max": 0.05936226559764085,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.045523084907068155,
"min": 0.02157709194968144,
"max": 0.05500443807492653,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13656925472120446,
"min": 0.04315418389936288,
"max": 0.1641682431101799,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5322988225999973e-06,
"min": 3.5322988225999973e-06,
"max": 0.00029538450153849996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0596896467799992e-05,
"min": 1.0596896467799992e-05,
"max": 0.0008442832685722499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117740000000003,
"min": 0.10117740000000003,
"max": 0.1984615,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035322000000001,
"min": 0.20748175,
"max": 0.5814277500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.875225999999995e-05,
"min": 6.875225999999995e-05,
"max": 0.004923228850000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020625677999999985,
"min": 0.00020625677999999985,
"max": 0.014073244724999996,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670820540",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670822805"
},
"total": 2264.732630283,
"count": 1,
"self": 0.39402728499999284,
"children": {
"run_training.setup": {
"total": 0.10958331400001953,
"count": 1,
"self": 0.10958331400001953
},
"TrainerController.start_learning": {
"total": 2264.229019684,
"count": 1,
"self": 3.9998783030073355,
"children": {
"TrainerController._reset_env": {
"total": 10.075215138999965,
"count": 1,
"self": 10.075215138999965
},
"TrainerController.advance": {
"total": 2250.031966782992,
"count": 232174,
"self": 4.1808078289459445,
"children": {
"env_step": {
"total": 1781.0824407689806,
"count": 232174,
"self": 1495.904519441951,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.4643072359157,
"count": 232174,
"self": 14.66445585279996,
"children": {
"TorchPolicy.evaluate": {
"total": 267.79985138311577,
"count": 222964,
"self": 67.70086556621231,
"children": {
"TorchPolicy.sample_actions": {
"total": 200.09898581690345,
"count": 222964,
"self": 200.09898581690345
}
}
}
}
},
"workers": {
"total": 2.713614091113982,
"count": 232174,
"self": 0.0,
"children": {
"worker_root": {
"total": 2256.1161078890323,
"count": 232174,
"is_parallel": true,
"self": 1031.2078333430404,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019626170000037746,
"count": 1,
"is_parallel": true,
"self": 0.00037528500001826615,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015873319999855084,
"count": 2,
"is_parallel": true,
"self": 0.0015873319999855084
}
}
},
"UnityEnvironment.step": {
"total": 0.02677846699998554,
"count": 1,
"is_parallel": true,
"self": 0.00026075699997818447,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017257199999676232,
"count": 1,
"is_parallel": true,
"self": 0.00017257199999676232
},
"communicator.exchange": {
"total": 0.025626418999991074,
"count": 1,
"is_parallel": true,
"self": 0.025626418999991074
},
"steps_from_proto": {
"total": 0.0007187190000195187,
"count": 1,
"is_parallel": true,
"self": 0.00024565600006098975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047306299995852896,
"count": 2,
"is_parallel": true,
"self": 0.00047306299995852896
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1224.9082745459918,
"count": 232173,
"is_parallel": true,
"self": 35.13519466085381,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.83095825301632,
"count": 232173,
"is_parallel": true,
"self": 76.83095825301632
},
"communicator.exchange": {
"total": 1016.7226131891096,
"count": 232173,
"is_parallel": true,
"self": 1016.7226131891096
},
"steps_from_proto": {
"total": 96.21950844301227,
"count": 232173,
"is_parallel": true,
"self": 41.43221745116557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.7872909918467,
"count": 464346,
"is_parallel": true,
"self": 54.7872909918467
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 464.7687181850656,
"count": 232174,
"self": 6.4137646040297795,
"children": {
"process_trajectory": {
"total": 148.36833282103692,
"count": 232174,
"self": 147.90056402003654,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4677688010003749,
"count": 4,
"self": 0.4677688010003749
}
}
},
"_update_policy": {
"total": 309.9866207599989,
"count": 97,
"self": 256.4226545779983,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.56396618200063,
"count": 2910,
"self": 53.56396618200063
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.85000042419415e-07,
"count": 1,
"self": 9.85000042419415e-07
},
"TrainerController._save_models": {
"total": 0.12195847400016646,
"count": 1,
"self": 0.001956142000381078,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12000233199978538,
"count": 1,
"self": 0.12000233199978538
}
}
}
}
}
}
}