ppo-Huggy / run_logs /timers.json
rootacess's picture
Huggy
0254fb8
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4117660522460938,
"min": 1.4117587804794312,
"max": 1.4337464570999146,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72686.1875,
"min": 68724.3359375,
"max": 78480.3359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 117.91489361702128,
"min": 94.72466539196941,
"max": 398.5079365079365,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49878.0,
"min": 49286.0,
"max": 50247.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999616.0,
"min": 49677.0,
"max": 1999616.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999616.0,
"min": 49677.0,
"max": 1999616.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3062551021575928,
"min": 0.04291943460702896,
"max": 2.431624412536621,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 975.5458984375,
"min": 5.36492919921875,
"max": 1208.409423828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5606880630443563,
"min": 1.9023651509284973,
"max": 3.8472433543867535,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1506.1710506677628,
"min": 237.79564386606216,
"max": 1868.071015894413,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5606880630443563,
"min": 1.9023651509284973,
"max": 3.8472433543867535,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1506.1710506677628,
"min": 237.79564386606216,
"max": 1868.071015894413,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016556578953168356,
"min": 0.012387862155931847,
"max": 0.02116212643062075,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03311315790633671,
"min": 0.024775724311863694,
"max": 0.060107084305491304,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04598587844520807,
"min": 0.023324133300532898,
"max": 0.058455938038726646,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09197175689041615,
"min": 0.046648266601065795,
"max": 0.17078211046755315,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.371023543024997e-06,
"min": 4.371023543024997e-06,
"max": 0.00029531985156005006,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.742047086049995e-06,
"min": 8.742047086049995e-06,
"max": 0.0008440596186467999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101456975,
"min": 0.101456975,
"max": 0.19843994999999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20291395,
"min": 0.20291395,
"max": 0.5813532000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.270305249999995e-05,
"min": 8.270305249999995e-05,
"max": 0.0049221535050000004,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001654061049999999,
"min": 0.0001654061049999999,
"max": 0.01406952468,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673863765",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673865932"
},
"total": 2166.4987473909996,
"count": 1,
"self": 0.39301622599941766,
"children": {
"run_training.setup": {
"total": 0.10710030600000664,
"count": 1,
"self": 0.10710030600000664
},
"TrainerController.start_learning": {
"total": 2165.998630859,
"count": 1,
"self": 3.650877015991682,
"children": {
"TrainerController._reset_env": {
"total": 10.674677952000025,
"count": 1,
"self": 10.674677952000025
},
"TrainerController.advance": {
"total": 2151.5516960900086,
"count": 231017,
"self": 3.8730995740515937,
"children": {
"env_step": {
"total": 1689.7352487669348,
"count": 231017,
"self": 1419.7088077389155,
"children": {
"SubprocessEnvManager._take_step": {
"total": 267.5293765179771,
"count": 231017,
"self": 13.855142748961953,
"children": {
"TorchPolicy.evaluate": {
"total": 253.67423376901513,
"count": 223131,
"self": 64.61432083898433,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.0599129300308,
"count": 223131,
"self": 189.0599129300308
}
}
}
}
},
"workers": {
"total": 2.497064510042094,
"count": 231017,
"self": 0.0,
"children": {
"worker_root": {
"total": 2158.7078321558947,
"count": 231017,
"is_parallel": true,
"self": 988.8734368648338,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016980610000132401,
"count": 1,
"is_parallel": true,
"self": 0.0002777120000132527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014203489999999874,
"count": 2,
"is_parallel": true,
"self": 0.0014203489999999874
}
}
},
"UnityEnvironment.step": {
"total": 0.02808868300002132,
"count": 1,
"is_parallel": true,
"self": 0.00029053300005443816,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018022400001882488,
"count": 1,
"is_parallel": true,
"self": 0.00018022400001882488
},
"communicator.exchange": {
"total": 0.026673665999965124,
"count": 1,
"is_parallel": true,
"self": 0.026673665999965124
},
"steps_from_proto": {
"total": 0.0009442599999829326,
"count": 1,
"is_parallel": true,
"self": 0.0002460609999843655,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006981989999985672,
"count": 2,
"is_parallel": true,
"self": 0.0006981989999985672
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1169.834395291061,
"count": 231016,
"is_parallel": true,
"self": 33.68438667610985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.2007933559916,
"count": 231016,
"is_parallel": true,
"self": 75.2007933559916
},
"communicator.exchange": {
"total": 970.0268946419621,
"count": 231016,
"is_parallel": true,
"self": 970.0268946419621
},
"steps_from_proto": {
"total": 90.9223206169973,
"count": 231016,
"is_parallel": true,
"self": 37.235939507874946,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.68638110912235,
"count": 462032,
"is_parallel": true,
"self": 53.68638110912235
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 457.9433477490223,
"count": 231017,
"self": 5.848949524031866,
"children": {
"process_trajectory": {
"total": 139.12937602198957,
"count": 231017,
"self": 137.95097442798948,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1784015940000927,
"count": 10,
"self": 1.1784015940000927
}
}
},
"_update_policy": {
"total": 312.96502220300084,
"count": 96,
"self": 259.6416024589952,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.32341974400566,
"count": 2880,
"self": 53.32341974400566
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.799999366397969e-07,
"count": 1,
"self": 7.799999366397969e-07
},
"TrainerController._save_models": {
"total": 0.1213790209999388,
"count": 1,
"self": 0.001991031999750703,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1193879890001881,
"count": 1,
"self": 0.1193879890001881
}
}
}
}
}
}
}