Huggy / run_logs /timers.json
GerardMR's picture
Huggy
935107a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4102596044540405,
"min": 1.4102596044540405,
"max": 1.4287683963775635,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69192.9765625,
"min": 68717.015625,
"max": 77783.71875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 103.4516806722689,
"min": 87.73835125448029,
"max": 385.0153846153846,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49243.0,
"min": 48849.0,
"max": 50083.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999856.0,
"min": 49954.0,
"max": 1999856.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999856.0,
"min": 49954.0,
"max": 1999856.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.338008403778076,
"min": 0.12175464630126953,
"max": 2.4355030059814453,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1112.8919677734375,
"min": 15.70634937286377,
"max": 1366.317138671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.639856197252995,
"min": 1.8349889364353447,
"max": 3.9591879316823397,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1732.5715498924255,
"min": 236.71357280015945,
"max": 2158.1801187992096,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.639856197252995,
"min": 1.8349889364353447,
"max": 3.9591879316823397,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1732.5715498924255,
"min": 236.71357280015945,
"max": 2158.1801187992096,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014997226185247806,
"min": 0.013212664630555083,
"max": 0.019953901386907093,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.02999445237049561,
"min": 0.026425329261110166,
"max": 0.05779540098895571,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.053272785743077596,
"min": 0.02289817202836275,
"max": 0.06266879476606846,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10654557148615519,
"min": 0.0457963440567255,
"max": 0.17527958924571674,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.62392345872501e-06,
"min": 4.62392345872501e-06,
"max": 0.00029530185156604997,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.24784691745002e-06,
"min": 9.24784691745002e-06,
"max": 0.0008437243687585499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101541275,
"min": 0.101541275,
"max": 0.19843395000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20308255,
"min": 0.20308255,
"max": 0.58124145,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.690962250000015e-05,
"min": 8.690962250000015e-05,
"max": 0.004921854105,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001738192450000003,
"min": 0.0001738192450000003,
"max": 0.014063948355,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698498537",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698501015"
},
"total": 2477.747150252,
"count": 1,
"self": 0.4265396960004182,
"children": {
"run_training.setup": {
"total": 0.04523002399992038,
"count": 1,
"self": 0.04523002399992038
},
"TrainerController.start_learning": {
"total": 2477.275380532,
"count": 1,
"self": 4.652140172177951,
"children": {
"TrainerController._reset_env": {
"total": 8.116869874000031,
"count": 1,
"self": 8.116869874000031
},
"TrainerController.advance": {
"total": 2464.402329101822,
"count": 231863,
"self": 4.787123913765299,
"children": {
"env_step": {
"total": 1964.2591937309815,
"count": 231863,
"self": 1612.4113497640687,
"children": {
"SubprocessEnvManager._take_step": {
"total": 348.955650621922,
"count": 231863,
"self": 16.912641342001734,
"children": {
"TorchPolicy.evaluate": {
"total": 332.0430092799203,
"count": 222878,
"self": 332.0430092799203
}
}
},
"workers": {
"total": 2.8921933449906874,
"count": 231863,
"self": 0.0,
"children": {
"worker_root": {
"total": 2469.5011601310157,
"count": 231863,
"is_parallel": true,
"self": 1155.5242802261614,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007608659999505107,
"count": 1,
"is_parallel": true,
"self": 0.00023219100000915205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005286749999413587,
"count": 2,
"is_parallel": true,
"self": 0.0005286749999413587
}
}
},
"UnityEnvironment.step": {
"total": 0.0368657679999842,
"count": 1,
"is_parallel": true,
"self": 0.000418935999846326,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021425300008104387,
"count": 1,
"is_parallel": true,
"self": 0.00021425300008104387
},
"communicator.exchange": {
"total": 0.03551564500003224,
"count": 1,
"is_parallel": true,
"self": 0.03551564500003224
},
"steps_from_proto": {
"total": 0.0007169340000245938,
"count": 1,
"is_parallel": true,
"self": 0.0002224250000608663,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004945089999637275,
"count": 2,
"is_parallel": true,
"self": 0.0004945089999637275
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1313.9768799048543,
"count": 231862,
"is_parallel": true,
"self": 39.920958218749774,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.01101527809374,
"count": 231862,
"is_parallel": true,
"self": 89.01101527809374
},
"communicator.exchange": {
"total": 1092.9323956299843,
"count": 231862,
"is_parallel": true,
"self": 1092.9323956299843
},
"steps_from_proto": {
"total": 92.11251077802649,
"count": 231862,
"is_parallel": true,
"self": 35.217538611909276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.89497216611721,
"count": 463724,
"is_parallel": true,
"self": 56.89497216611721
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 495.3560114570754,
"count": 231863,
"self": 6.734997520984734,
"children": {
"process_trajectory": {
"total": 155.86158564608968,
"count": 231863,
"self": 154.61818991708924,
"children": {
"RLTrainer._checkpoint": {
"total": 1.24339572900044,
"count": 10,
"self": 1.24339572900044
}
}
},
"_update_policy": {
"total": 332.759428290001,
"count": 96,
"self": 271.64064264900253,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.11878564099845,
"count": 2880,
"self": 61.11878564099845
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1349998203513678e-06,
"count": 1,
"self": 1.1349998203513678e-06
},
"TrainerController._save_models": {
"total": 0.10404024899980868,
"count": 1,
"self": 0.0018382029998065263,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10220204600000216,
"count": 1,
"self": 0.10220204600000216
}
}
}
}
}
}
}