ppo-Huggy / run_logs /timers.json
ygaci's picture
Huggy
c1626d3
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4007062911987305,
"min": 1.4007062911987305,
"max": 1.4257272481918335,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70910.7578125,
"min": 69100.546875,
"max": 77379.1171875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.90273556231003,
"min": 73.54029850746268,
"max": 411.10655737704917,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49286.0,
"min": 48745.0,
"max": 50155.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999965.0,
"min": 49864.0,
"max": 1999965.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999965.0,
"min": 49864.0,
"max": 1999965.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4436609745025635,
"min": 0.03954571485519409,
"max": 2.5263845920562744,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1607.928955078125,
"min": 4.785031318664551,
"max": 1625.54296875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7753986705400298,
"min": 1.733818206166433,
"max": 3.9825124675898174,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2484.2123252153397,
"min": 209.79200294613838,
"max": 2527.74883043766,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7753986705400298,
"min": 1.733818206166433,
"max": 3.9825124675898174,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2484.2123252153397,
"min": 209.79200294613838,
"max": 2527.74883043766,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017124893301257143,
"min": 0.012642591121645333,
"max": 0.021260950651291445,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051374679903771425,
"min": 0.029151586178826013,
"max": 0.057985008695201644,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06033319011330604,
"min": 0.022120706581821047,
"max": 0.06263425548871358,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18099957033991812,
"min": 0.04424141316364209,
"max": 0.18790276646614074,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4501488499833263e-06,
"min": 3.4501488499833263e-06,
"max": 0.00029532232655922496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0350446549949979e-05,
"min": 1.0350446549949979e-05,
"max": 0.0008437932187355999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115001666666663,
"min": 0.10115001666666663,
"max": 0.19844077499999999,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034500499999999,
"min": 0.20742805000000009,
"max": 0.5812644,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.738583166666656e-05,
"min": 6.738583166666656e-05,
"max": 0.004922194672499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020215749499999966,
"min": 0.00020215749499999966,
"max": 0.01406509356,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694871695",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694874151"
},
"total": 2456.810339609,
"count": 1,
"self": 0.8545121040001504,
"children": {
"run_training.setup": {
"total": 0.05139960700000756,
"count": 1,
"self": 0.05139960700000756
},
"TrainerController.start_learning": {
"total": 2455.904427898,
"count": 1,
"self": 4.4624072119568154,
"children": {
"TrainerController._reset_env": {
"total": 4.707229319000021,
"count": 1,
"self": 4.707229319000021
},
"TrainerController.advance": {
"total": 2446.5531853550433,
"count": 233155,
"self": 4.544511632060676,
"children": {
"env_step": {
"total": 1884.9660877970227,
"count": 233155,
"self": 1597.3655002670132,
"children": {
"SubprocessEnvManager._take_step": {
"total": 284.6961195189942,
"count": 233155,
"self": 16.800192925008957,
"children": {
"TorchPolicy.evaluate": {
"total": 267.8959265939852,
"count": 223018,
"self": 267.8959265939852
}
}
},
"workers": {
"total": 2.9044680110154104,
"count": 233155,
"self": 0.0,
"children": {
"worker_root": {
"total": 2448.1577148369315,
"count": 233155,
"is_parallel": true,
"self": 1145.5450238730357,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000912034999998923,
"count": 1,
"is_parallel": true,
"self": 0.0002488490000018828,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006631859999970402,
"count": 2,
"is_parallel": true,
"self": 0.0006631859999970402
}
}
},
"UnityEnvironment.step": {
"total": 0.07229030199999897,
"count": 1,
"is_parallel": true,
"self": 0.00034986699995442905,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002155409999886615,
"count": 1,
"is_parallel": true,
"self": 0.0002155409999886615
},
"communicator.exchange": {
"total": 0.07091196200002514,
"count": 1,
"is_parallel": true,
"self": 0.07091196200002514
},
"steps_from_proto": {
"total": 0.0008129320000307416,
"count": 1,
"is_parallel": true,
"self": 0.00021963100004995795,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005933009999807837,
"count": 2,
"is_parallel": true,
"self": 0.0005933009999807837
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1302.6126909638958,
"count": 233154,
"is_parallel": true,
"self": 40.1657098419098,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.01662733194695,
"count": 233154,
"is_parallel": true,
"self": 81.01662733194695
},
"communicator.exchange": {
"total": 1082.426799568036,
"count": 233154,
"is_parallel": true,
"self": 1082.426799568036
},
"steps_from_proto": {
"total": 99.00355422200329,
"count": 233154,
"is_parallel": true,
"self": 35.294450444916606,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.70910377708668,
"count": 466308,
"is_parallel": true,
"self": 63.70910377708668
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 557.0425859259599,
"count": 233155,
"self": 6.78392518185683,
"children": {
"process_trajectory": {
"total": 140.9939229671034,
"count": 233155,
"self": 139.70622313210396,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2876998349994437,
"count": 10,
"self": 1.2876998349994437
}
}
},
"_update_policy": {
"total": 409.2647377769997,
"count": 97,
"self": 348.60188471500095,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.66285306199876,
"count": 2910,
"self": 60.66285306199876
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.612000232853461e-06,
"count": 1,
"self": 1.612000232853461e-06
},
"TrainerController._save_models": {
"total": 0.18160439999974187,
"count": 1,
"self": 0.0026809179998963373,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17892348199984554,
"count": 1,
"self": 0.17892348199984554
}
}
}
}
}
}
}