ppo-Huggy / run_logs /timers.json
saharM's picture
Huggy
422c739
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.400404930114746,
"min": 1.400404930114746,
"max": 1.422249436378479,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70992.125,
"min": 68119.4609375,
"max": 79069.71875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.43944636678201,
"min": 85.22586206896551,
"max": 412.37704918032784,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49384.0,
"min": 48856.0,
"max": 50310.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999992.0,
"min": 49720.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999992.0,
"min": 49720.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4564082622528076,
"min": 0.0189647413790226,
"max": 2.4955363273620605,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1419.803955078125,
"min": 2.294733762741089,
"max": 1419.803955078125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.730031937051397,
"min": 1.7410832652129418,
"max": 3.9791513256130426,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2155.9584596157074,
"min": 210.67107509076595,
"max": 2222.011102795601,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.730031937051397,
"min": 1.7410832652129418,
"max": 3.9791513256130426,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2155.9584596157074,
"min": 210.67107509076595,
"max": 2222.011102795601,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017377000415298324,
"min": 0.013468879802288333,
"max": 0.021641676495912383,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.052131001245894974,
"min": 0.026937759604576665,
"max": 0.06492502948773715,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.060294494777917866,
"min": 0.023640562345584235,
"max": 0.06272319853305817,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1808834843337536,
"min": 0.04728112469116847,
"max": 0.1808834843337536,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.556248814616657e-06,
"min": 3.556248814616657e-06,
"max": 0.0002953488015503999,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0668746443849971e-05,
"min": 1.0668746443849971e-05,
"max": 0.00084420946859685,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118538333333332,
"min": 0.10118538333333332,
"max": 0.1984496,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30355614999999997,
"min": 0.20750829999999998,
"max": 0.58140315,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.915062833333321e-05,
"min": 6.915062833333321e-05,
"max": 0.00492263504,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020745188499999962,
"min": 0.00020745188499999962,
"max": 0.014072017184999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672562969",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672565558"
},
"total": 2588.304334293,
"count": 1,
"self": 0.438293445999534,
"children": {
"run_training.setup": {
"total": 0.1132715090000147,
"count": 1,
"self": 0.1132715090000147
},
"TrainerController.start_learning": {
"total": 2587.7527693380002,
"count": 1,
"self": 4.821092278092692,
"children": {
"TrainerController._reset_env": {
"total": 8.004561078000052,
"count": 1,
"self": 8.004561078000052
},
"TrainerController.advance": {
"total": 2574.810977375908,
"count": 232407,
"self": 5.021011575885041,
"children": {
"env_step": {
"total": 2062.663155017025,
"count": 232407,
"self": 1729.7183680859723,
"children": {
"SubprocessEnvManager._take_step": {
"total": 329.68233614403744,
"count": 232407,
"self": 17.182020539012797,
"children": {
"TorchPolicy.evaluate": {
"total": 312.50031560502464,
"count": 222997,
"self": 78.55876835302524,
"children": {
"TorchPolicy.sample_actions": {
"total": 233.9415472519994,
"count": 222997,
"self": 233.9415472519994
}
}
}
}
},
"workers": {
"total": 3.2624507870152684,
"count": 232407,
"self": 0.0,
"children": {
"worker_root": {
"total": 2578.0592396259963,
"count": 232407,
"is_parallel": true,
"self": 1163.2343323629161,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002185113999985333,
"count": 1,
"is_parallel": true,
"self": 0.00037420899991502665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018109050000703064,
"count": 2,
"is_parallel": true,
"self": 0.0018109050000703064
}
}
},
"UnityEnvironment.step": {
"total": 0.029164834999960476,
"count": 1,
"is_parallel": true,
"self": 0.00033387699988907116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019233400007578894,
"count": 1,
"is_parallel": true,
"self": 0.00019233400007578894
},
"communicator.exchange": {
"total": 0.02782134500000666,
"count": 1,
"is_parallel": true,
"self": 0.02782134500000666
},
"steps_from_proto": {
"total": 0.0008172789999889574,
"count": 1,
"is_parallel": true,
"self": 0.0003231939999750466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004940850000139108,
"count": 2,
"is_parallel": true,
"self": 0.0004940850000139108
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1414.8249072630801,
"count": 232406,
"is_parallel": true,
"self": 40.043082196157684,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.01458128209151,
"count": 232406,
"is_parallel": true,
"self": 89.01458128209151
},
"communicator.exchange": {
"total": 1176.072500431975,
"count": 232406,
"is_parallel": true,
"self": 1176.072500431975
},
"steps_from_proto": {
"total": 109.69474335285611,
"count": 232406,
"is_parallel": true,
"self": 46.855755276001105,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.838988076855,
"count": 464812,
"is_parallel": true,
"self": 62.838988076855
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 507.12681078299795,
"count": 232407,
"self": 7.408185094948294,
"children": {
"process_trajectory": {
"total": 168.1141610770493,
"count": 232407,
"self": 166.85732300904908,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2568380680002065,
"count": 10,
"self": 1.2568380680002065
}
}
},
"_update_policy": {
"total": 331.60446461100037,
"count": 97,
"self": 276.35227317099634,
"children": {
"TorchPPOOptimizer.update": {
"total": 55.252191440004026,
"count": 2910,
"self": 55.252191440004026
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.649997991800774e-07,
"count": 1,
"self": 9.649997991800774e-07
},
"TrainerController._save_models": {
"total": 0.116137641000023,
"count": 1,
"self": 0.002158409000003303,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11397923200001969,
"count": 1,
"self": 0.11397923200001969
}
}
}
}
}
}
}