ppo-Huggy / run_logs /timers.json
Englios's picture
Huggy
a18d978
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4198113679885864,
"min": 1.4195078611373901,
"max": 1.4246439933776855,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69657.3671875,
"min": 69625.265625,
"max": 77326.2734375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 97.09019607843138,
"min": 79.37299035369774,
"max": 415.6115702479339,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49516.0,
"min": 48742.0,
"max": 50289.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999908.0,
"min": 49829.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999908.0,
"min": 49829.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.405795097351074,
"min": 0.17598101496696472,
"max": 2.46203875541687,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1226.9554443359375,
"min": 21.117721557617188,
"max": 1504.0067138671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.728003032768474,
"min": 1.9280931438008944,
"max": 3.931875259659507,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1901.2815467119217,
"min": 231.37117725610733,
"max": 2367.610983788967,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.728003032768474,
"min": 1.9280931438008944,
"max": 3.931875259659507,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1901.2815467119217,
"min": 231.37117725610733,
"max": 2367.610983788967,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016226193154216164,
"min": 0.012976081135275309,
"max": 0.019985798568541213,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04867857946264849,
"min": 0.025952162270550617,
"max": 0.055422571033159326,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05888000087191661,
"min": 0.01998415591660887,
"max": 0.0840855292355021,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17664000261574983,
"min": 0.03996831183321774,
"max": 0.2522565877065063,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 1.2106987893999987e-06,
"min": 1.2106987893999987e-06,
"max": 9.843752656247501e-05,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 3.632096368199996e-06,
"min": 3.632096368199996e-06,
"max": 0.000281412018588,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012106,
"min": 0.1012106,
"max": 0.198437525,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036318,
"min": 0.2075537,
"max": 0.581412,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.040893999999994e-05,
"min": 7.040893999999994e-05,
"max": 0.004922032497499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021122681999999983,
"min": 0.00021122681999999983,
"max": 0.0140724588,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701169866",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701172369"
},
"total": 2503.554252854,
"count": 1,
"self": 0.44087583500004257,
"children": {
"run_training.setup": {
"total": 0.05638467300002503,
"count": 1,
"self": 0.05638467300002503
},
"TrainerController.start_learning": {
"total": 2503.056992346,
"count": 1,
"self": 4.68011146507024,
"children": {
"TrainerController._reset_env": {
"total": 3.5444378849999794,
"count": 1,
"self": 3.5444378849999794
},
"TrainerController.advance": {
"total": 2494.7279314709294,
"count": 232434,
"self": 4.731960286101639,
"children": {
"env_step": {
"total": 1883.6086727738902,
"count": 232434,
"self": 1561.014907070744,
"children": {
"SubprocessEnvManager._take_step": {
"total": 319.8126660991351,
"count": 232434,
"self": 16.77995295113476,
"children": {
"TorchPolicy.evaluate": {
"total": 303.0327131480003,
"count": 222987,
"self": 303.0327131480003
}
}
},
"workers": {
"total": 2.7810996040111604,
"count": 232434,
"self": 0.0,
"children": {
"worker_root": {
"total": 2495.3721281070048,
"count": 232434,
"is_parallel": true,
"self": 1226.9485265189292,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008170200001131889,
"count": 1,
"is_parallel": true,
"self": 0.00023881400011305232,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005782060000001366,
"count": 2,
"is_parallel": true,
"self": 0.0005782060000001366
}
}
},
"UnityEnvironment.step": {
"total": 0.04833673699999963,
"count": 1,
"is_parallel": true,
"self": 0.0002926580000348622,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002188559999467543,
"count": 1,
"is_parallel": true,
"self": 0.0002188559999467543
},
"communicator.exchange": {
"total": 0.04708947200003877,
"count": 1,
"is_parallel": true,
"self": 0.04708947200003877
},
"steps_from_proto": {
"total": 0.0007357509999792455,
"count": 1,
"is_parallel": true,
"self": 0.00021773200001007353,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000518018999969172,
"count": 2,
"is_parallel": true,
"self": 0.000518018999969172
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1268.4236015880756,
"count": 232433,
"is_parallel": true,
"self": 39.71614799098438,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.12131539199788,
"count": 232433,
"is_parallel": true,
"self": 83.12131539199788
},
"communicator.exchange": {
"total": 1057.5679392729944,
"count": 232433,
"is_parallel": true,
"self": 1057.5679392729944
},
"steps_from_proto": {
"total": 88.01819893209881,
"count": 232433,
"is_parallel": true,
"self": 31.33019942014289,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.68799951195592,
"count": 464866,
"is_parallel": true,
"self": 56.68799951195592
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 606.3872984109374,
"count": 232434,
"self": 6.619664272983755,
"children": {
"process_trajectory": {
"total": 152.7846296509514,
"count": 232434,
"self": 151.58913522295188,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1954944279995061,
"count": 10,
"self": 1.1954944279995061
}
}
},
"_update_policy": {
"total": 446.9830044870023,
"count": 97,
"self": 362.6728856860087,
"children": {
"TorchPPOOptimizer.update": {
"total": 84.31011880099356,
"count": 3880,
"self": 84.31011880099356
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0200001270277426e-06,
"count": 1,
"self": 1.0200001270277426e-06
},
"TrainerController._save_models": {
"total": 0.10451050500023484,
"count": 1,
"self": 0.0026865920003729116,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10182391299986193,
"count": 1,
"self": 0.10182391299986193
}
}
}
}
}
}
}