ppo-Huggy / run_logs /timers.json
aronmal's picture
Huggy
8642e1d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.406847357749939,
"min": 1.406847357749939,
"max": 1.429370403289795,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71624.0078125,
"min": 68865.5625,
"max": 77090.84375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.92015968063872,
"min": 89.22965641952983,
"max": 403.75,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49559.0,
"min": 48975.0,
"max": 50065.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999995.0,
"min": 49690.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999995.0,
"min": 49690.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.367455244064331,
"min": 0.1243087574839592,
"max": 2.411776065826416,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1186.0950927734375,
"min": 15.289977073669434,
"max": 1313.554443359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6238032173729704,
"min": 1.8689450512572032,
"max": 3.8749045731840646,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1815.5254119038582,
"min": 229.880241304636,
"max": 2096.323374092579,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6238032173729704,
"min": 1.8689450512572032,
"max": 3.8749045731840646,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1815.5254119038582,
"min": 229.880241304636,
"max": 2096.323374092579,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015813765586547863,
"min": 0.01218735542728003,
"max": 0.019667549103420848,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04744129675964359,
"min": 0.02437471085456006,
"max": 0.05045147167984396,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05449694179826312,
"min": 0.0212100810992221,
"max": 0.05717981612930695,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16349082539478937,
"min": 0.0424201621984442,
"max": 0.16349082539478937,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1326989557999966e-06,
"min": 3.1326989557999966e-06,
"max": 0.00029533185155605,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.39809686739999e-06,
"min": 9.39809686739999e-06,
"max": 0.0008442648185784,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10104419999999997,
"min": 0.10104419999999997,
"max": 0.19844394999999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031325999999999,
"min": 0.20724400000000004,
"max": 0.5814216,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.210557999999996e-05,
"min": 6.210557999999996e-05,
"max": 0.004922353105,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018631673999999987,
"min": 0.00018631673999999987,
"max": 0.014072937839999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688367188",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688369492"
},
"total": 2304.709378306,
"count": 1,
"self": 0.4398720049998701,
"children": {
"run_training.setup": {
"total": 0.04191014999997833,
"count": 1,
"self": 0.04191014999997833
},
"TrainerController.start_learning": {
"total": 2304.227596151,
"count": 1,
"self": 3.974663892965509,
"children": {
"TrainerController._reset_env": {
"total": 4.065496493000012,
"count": 1,
"self": 4.065496493000012
},
"TrainerController.advance": {
"total": 2296.0641006550345,
"count": 231770,
"self": 4.380166264018953,
"children": {
"env_step": {
"total": 1796.380955805016,
"count": 231770,
"self": 1511.9632273180216,
"children": {
"SubprocessEnvManager._take_step": {
"total": 281.7612127779683,
"count": 231770,
"self": 16.481934431032812,
"children": {
"TorchPolicy.evaluate": {
"total": 265.2792783469355,
"count": 223044,
"self": 265.2792783469355
}
}
},
"workers": {
"total": 2.656515709025996,
"count": 231770,
"self": 0.0,
"children": {
"worker_root": {
"total": 2296.9622771610098,
"count": 231770,
"is_parallel": true,
"self": 1063.7604490899525,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009358249999991131,
"count": 1,
"is_parallel": true,
"self": 0.0002772589999722186,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006585660000268945,
"count": 2,
"is_parallel": true,
"self": 0.0006585660000268945
}
}
},
"UnityEnvironment.step": {
"total": 0.029436861000021963,
"count": 1,
"is_parallel": true,
"self": 0.00036283800000092015,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021798500000613785,
"count": 1,
"is_parallel": true,
"self": 0.00021798500000613785
},
"communicator.exchange": {
"total": 0.028114362999986042,
"count": 1,
"is_parallel": true,
"self": 0.028114362999986042
},
"steps_from_proto": {
"total": 0.000741675000028863,
"count": 1,
"is_parallel": true,
"self": 0.00020481600006405642,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005368589999648066,
"count": 2,
"is_parallel": true,
"self": 0.0005368589999648066
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1233.2018280710572,
"count": 231769,
"is_parallel": true,
"self": 38.403611074038054,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.62987456689922,
"count": 231769,
"is_parallel": true,
"self": 76.62987456689922
},
"communicator.exchange": {
"total": 1026.1215272660393,
"count": 231769,
"is_parallel": true,
"self": 1026.1215272660393
},
"steps_from_proto": {
"total": 92.04681516408061,
"count": 231769,
"is_parallel": true,
"self": 32.42246416111766,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.62435100296295,
"count": 463538,
"is_parallel": true,
"self": 59.62435100296295
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 495.30297858599965,
"count": 231770,
"self": 6.101918758972886,
"children": {
"process_trajectory": {
"total": 126.56733352602475,
"count": 231770,
"self": 125.34012241702465,
"children": {
"RLTrainer._checkpoint": {
"total": 1.227211109000109,
"count": 10,
"self": 1.227211109000109
}
}
},
"_update_policy": {
"total": 362.633726301002,
"count": 97,
"self": 303.8161442010167,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.817582099985316,
"count": 2910,
"self": 58.817582099985316
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.035999957821332e-06,
"count": 1,
"self": 1.035999957821332e-06
},
"TrainerController._save_models": {
"total": 0.12333407400001306,
"count": 1,
"self": 0.001958007999746769,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12137606600026629,
"count": 1,
"self": 0.12137606600026629
}
}
}
}
}
}
}