ppo-Huggy / run_logs /timers.json
Takano Tsuyoshi
Huggy
ae7c079
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4014055728912354,
"min": 1.4014055728912354,
"max": 1.4275743961334229,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69951.15625,
"min": 69595.1484375,
"max": 76132.0390625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 67.31830601092896,
"min": 65.40371845949535,
"max": 420.1764705882353,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49277.0,
"min": 49188.0,
"max": 50001.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999986.0,
"min": 49714.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999986.0,
"min": 49714.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5293543338775635,
"min": 0.06965820491313934,
"max": 2.5385539531707764,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1851.4873046875,
"min": 8.2196683883667,
"max": 1875.7392578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.988865204359013,
"min": 1.6485948807607262,
"max": 4.097599985150548,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2919.8493295907974,
"min": 194.5341959297657,
"max": 2934.195201218128,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.988865204359013,
"min": 1.6485948807607262,
"max": 4.097599985150548,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2919.8493295907974,
"min": 194.5341959297657,
"max": 2934.195201218128,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01636218381873833,
"min": 0.01324877430694566,
"max": 0.01861843695264219,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04908655145621499,
"min": 0.02844190030785588,
"max": 0.05585531085792657,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.058422054350376125,
"min": 0.023560032000144324,
"max": 0.061696228695412476,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17526616305112838,
"min": 0.04712006400028865,
"max": 0.18440457234779994,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.08624863795e-06,
"min": 4.08624863795e-06,
"max": 0.00029532862655712506,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.2258745913850001e-05,
"min": 1.2258745913850001e-05,
"max": 0.0008441580186139999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10136205000000002,
"min": 0.10136205000000002,
"max": 0.19844287500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30408615000000006,
"min": 0.20785520000000002,
"max": 0.5813860000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.796629499999998e-05,
"min": 7.796629499999998e-05,
"max": 0.0049222994625,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00023389888499999992,
"min": 0.00023389888499999992,
"max": 0.014071161399999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696928107",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1696930492"
},
"total": 2384.235711668,
"count": 1,
"self": 0.4410612240003502,
"children": {
"run_training.setup": {
"total": 0.05550562300004458,
"count": 1,
"self": 0.05550562300004458
},
"TrainerController.start_learning": {
"total": 2383.7391448209996,
"count": 1,
"self": 4.281205919891363,
"children": {
"TrainerController._reset_env": {
"total": 8.069273714000019,
"count": 1,
"self": 8.069273714000019
},
"TrainerController.advance": {
"total": 2371.2821805651083,
"count": 233893,
"self": 4.575359246181506,
"children": {
"env_step": {
"total": 1810.0734511399949,
"count": 233893,
"self": 1500.7284054999336,
"children": {
"SubprocessEnvManager._take_step": {
"total": 306.54765067993645,
"count": 233893,
"self": 16.64446385295247,
"children": {
"TorchPolicy.evaluate": {
"total": 289.903186826984,
"count": 222890,
"self": 289.903186826984
}
}
},
"workers": {
"total": 2.797394960124734,
"count": 233893,
"self": 0.0,
"children": {
"worker_root": {
"total": 2376.6714259529554,
"count": 233893,
"is_parallel": true,
"self": 1153.1377520409262,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008100570000237894,
"count": 1,
"is_parallel": true,
"self": 0.00023624100003871717,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005738159999850723,
"count": 2,
"is_parallel": true,
"self": 0.0005738159999850723
}
}
},
"UnityEnvironment.step": {
"total": 0.02850137600000835,
"count": 1,
"is_parallel": true,
"self": 0.0002959200000418605,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023890699998219134,
"count": 1,
"is_parallel": true,
"self": 0.00023890699998219134
},
"communicator.exchange": {
"total": 0.027236877000007098,
"count": 1,
"is_parallel": true,
"self": 0.027236877000007098
},
"steps_from_proto": {
"total": 0.0007296719999771994,
"count": 1,
"is_parallel": true,
"self": 0.00020913099996278106,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005205410000144184,
"count": 2,
"is_parallel": true,
"self": 0.0005205410000144184
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1223.5336739120291,
"count": 233892,
"is_parallel": true,
"self": 40.23205743817766,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.23030033098343,
"count": 233892,
"is_parallel": true,
"self": 80.23030033098343
},
"communicator.exchange": {
"total": 1007.366557646898,
"count": 233892,
"is_parallel": true,
"self": 1007.366557646898
},
"steps_from_proto": {
"total": 95.70475849597017,
"count": 233892,
"is_parallel": true,
"self": 34.38657334098883,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.318185154981336,
"count": 467784,
"is_parallel": true,
"self": 61.318185154981336
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 556.633370178932,
"count": 233893,
"self": 6.203809100955027,
"children": {
"process_trajectory": {
"total": 151.11309373497653,
"count": 233893,
"self": 149.89643925197691,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2166544829996155,
"count": 10,
"self": 1.2166544829996155
}
}
},
"_update_policy": {
"total": 399.3164673430004,
"count": 97,
"self": 338.53211255100786,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.78435479199254,
"count": 2910,
"self": 60.78435479199254
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.560003491060343e-07,
"count": 1,
"self": 9.560003491060343e-07
},
"TrainerController._save_models": {
"total": 0.10648366599980363,
"count": 1,
"self": 0.0018888390000029176,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10459482699980072,
"count": 1,
"self": 0.10459482699980072
}
}
}
}
}
}
}