ppo-Huggy / run_logs /timers.json
mcamara's picture
Huggy
60bff99
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4063433408737183,
"min": 1.4063433408737183,
"max": 1.4293767213821411,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 73361.8984375,
"min": 67151.640625,
"max": 79436.171875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 136.8857938718663,
"min": 105.30148619957536,
"max": 397.27777777777777,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49142.0,
"min": 49044.0,
"max": 50137.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999937.0,
"min": 49715.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999937.0,
"min": 49715.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.171731948852539,
"min": 0.19187907874584198,
"max": 2.3562533855438232,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 779.6517944335938,
"min": 23.98488426208496,
"max": 1069.739013671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.267155697252757,
"min": 1.866910888671875,
"max": 3.714902807525496,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1172.9088953137398,
"min": 233.36386108398438,
"max": 1680.2832539081573,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.267155697252757,
"min": 1.866910888671875,
"max": 3.714902807525496,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1172.9088953137398,
"min": 233.36386108398438,
"max": 1680.2832539081573,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017642016370738624,
"min": 0.012879492806193108,
"max": 0.020294346035370836,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03528403274147725,
"min": 0.025758985612386216,
"max": 0.055292674953428406,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.039927794598042965,
"min": 0.020183326303958894,
"max": 0.05625198402752479,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.07985558919608593,
"min": 0.04036665260791779,
"max": 0.15871732508142788,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.958298680599995e-06,
"min": 3.958298680599995e-06,
"max": 0.0002953421265526249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 7.91659736119999e-06,
"min": 7.91659736119999e-06,
"max": 0.0008440510686496499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10131940000000003,
"min": 0.10131940000000003,
"max": 0.19844737499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20263880000000006,
"min": 0.20263880000000006,
"max": 0.58135035,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.583805999999992e-05,
"min": 7.583805999999992e-05,
"max": 0.0049225240124999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00015167611999999985,
"min": 0.00015167611999999985,
"max": 0.014069382464999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688043800",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688046276"
},
"total": 2476.293886446,
"count": 1,
"self": 0.8040362470001128,
"children": {
"run_training.setup": {
"total": 0.04250731700005872,
"count": 1,
"self": 0.04250731700005872
},
"TrainerController.start_learning": {
"total": 2475.447342882,
"count": 1,
"self": 4.472045360912944,
"children": {
"TrainerController._reset_env": {
"total": 4.2011428419999675,
"count": 1,
"self": 4.2011428419999675
},
"TrainerController.advance": {
"total": 2466.590089727087,
"count": 230429,
"self": 4.730363059159117,
"children": {
"env_step": {
"total": 1937.307908661872,
"count": 230429,
"self": 1631.180570936876,
"children": {
"SubprocessEnvManager._take_step": {
"total": 303.16226905005783,
"count": 230429,
"self": 17.442250164113148,
"children": {
"TorchPolicy.evaluate": {
"total": 285.7200188859447,
"count": 223133,
"self": 285.7200188859447
}
}
},
"workers": {
"total": 2.9650686749383794,
"count": 230429,
"self": 0.0,
"children": {
"worker_root": {
"total": 2467.453381736975,
"count": 230429,
"is_parallel": true,
"self": 1133.5088471970694,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009301999999706823,
"count": 1,
"is_parallel": true,
"self": 0.00029486200003248086,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006353379999382014,
"count": 2,
"is_parallel": true,
"self": 0.0006353379999382014
}
}
},
"UnityEnvironment.step": {
"total": 0.0297865229999843,
"count": 1,
"is_parallel": true,
"self": 0.0003720559998328099,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00026573900004223105,
"count": 1,
"is_parallel": true,
"self": 0.00026573900004223105
},
"communicator.exchange": {
"total": 0.028323994000061248,
"count": 1,
"is_parallel": true,
"self": 0.028323994000061248
},
"steps_from_proto": {
"total": 0.0008247340000480108,
"count": 1,
"is_parallel": true,
"self": 0.00022784799989494786,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005968860001530629,
"count": 2,
"is_parallel": true,
"self": 0.0005968860001530629
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1333.9445345399058,
"count": 230428,
"is_parallel": true,
"self": 41.015270930912266,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.14941242198006,
"count": 230428,
"is_parallel": true,
"self": 82.14941242198006
},
"communicator.exchange": {
"total": 1112.208960404936,
"count": 230428,
"is_parallel": true,
"self": 1112.208960404936
},
"steps_from_proto": {
"total": 98.57089078207753,
"count": 230428,
"is_parallel": true,
"self": 34.58349443701945,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.98739634505807,
"count": 460856,
"is_parallel": true,
"self": 63.98739634505807
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 524.5518180060561,
"count": 230429,
"self": 7.105288861088752,
"children": {
"process_trajectory": {
"total": 130.3340456399684,
"count": 230429,
"self": 128.85723034996886,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4768152899995357,
"count": 10,
"self": 1.4768152899995357
}
}
},
"_update_policy": {
"total": 387.112483504999,
"count": 96,
"self": 326.55115251300435,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.56133099199462,
"count": 2880,
"self": 60.56133099199462
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4890001693856902e-06,
"count": 1,
"self": 1.4890001693856902e-06
},
"TrainerController._save_models": {
"total": 0.18406346299980214,
"count": 1,
"self": 0.0028703539996968175,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18119310900010532,
"count": 1,
"self": 0.18119310900010532
}
}
}
}
}
}
}