ppo-Huggy / run_logs /timers.json
neurator's picture
Huggy
f4260f3
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4070945978164673,
"min": 1.4070945978164673,
"max": 1.4284659624099731,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70235.125,
"min": 67883.4375,
"max": 79181.75,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 68.98179271708683,
"min": 65.94109772423026,
"max": 395.12598425196853,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49253.0,
"min": 49225.0,
"max": 50181.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999969.0,
"min": 49558.0,
"max": 1999969.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999969.0,
"min": 49558.0,
"max": 1999969.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.597696542739868,
"min": 0.08001773059368134,
"max": 2.597696542739868,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1854.75537109375,
"min": 10.082234382629395,
"max": 1870.92724609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 4.086492193429744,
"min": 1.8288161532273368,
"max": 4.086492193429744,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2917.755426108837,
"min": 230.43083530664444,
"max": 2917.755426108837,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 4.086492193429744,
"min": 1.8288161532273368,
"max": 4.086492193429744,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2917.755426108837,
"min": 230.43083530664444,
"max": 2917.755426108837,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014604052693984057,
"min": 0.01415121356791739,
"max": 0.020989246072713286,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04381215808195217,
"min": 0.03057959450331206,
"max": 0.05583624598851505,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05627144662042458,
"min": 0.022199221017460028,
"max": 0.0641590011616548,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16881433986127375,
"min": 0.044398442034920056,
"max": 0.19002082968751588,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7670487443500015e-06,
"min": 3.7670487443500015e-06,
"max": 0.000295331926556025,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1301146233050004e-05,
"min": 1.1301146233050004e-05,
"max": 0.0008437225687591499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10125565000000003,
"min": 0.10125565000000003,
"max": 0.198443975,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037669500000001,
"min": 0.20764034999999995,
"max": 0.5812408500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.265693500000005e-05,
"min": 7.265693500000005e-05,
"max": 0.0049223543525,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021797080500000015,
"min": 0.00021797080500000015,
"max": 0.014063918414999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675204599",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675206902"
},
"total": 2302.520942973,
"count": 1,
"self": 0.44290232599951196,
"children": {
"run_training.setup": {
"total": 0.11140579900006742,
"count": 1,
"self": 0.11140579900006742
},
"TrainerController.start_learning": {
"total": 2301.966634848,
"count": 1,
"self": 4.099578789032876,
"children": {
"TrainerController._reset_env": {
"total": 10.622737477999976,
"count": 1,
"self": 10.622737477999976
},
"TrainerController.advance": {
"total": 2287.1317919159674,
"count": 233463,
"self": 4.205890229828128,
"children": {
"env_step": {
"total": 1809.392509515055,
"count": 233463,
"self": 1518.4900486990873,
"children": {
"SubprocessEnvManager._take_step": {
"total": 288.1035227130387,
"count": 233463,
"self": 15.061733799108424,
"children": {
"TorchPolicy.evaluate": {
"total": 273.0417889139303,
"count": 222853,
"self": 68.16226331992118,
"children": {
"TorchPolicy.sample_actions": {
"total": 204.87952559400912,
"count": 222853,
"self": 204.87952559400912
}
}
}
}
},
"workers": {
"total": 2.798938102929128,
"count": 233463,
"self": 0.0,
"children": {
"worker_root": {
"total": 2293.4995952301047,
"count": 233463,
"is_parallel": true,
"self": 1045.9327829360486,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0037832640000488027,
"count": 1,
"is_parallel": true,
"self": 0.00035941700002695143,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0034238470000218513,
"count": 2,
"is_parallel": true,
"self": 0.0034238470000218513
}
}
},
"UnityEnvironment.step": {
"total": 0.027710622999961743,
"count": 1,
"is_parallel": true,
"self": 0.00031842500004586327,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019417399994381412,
"count": 1,
"is_parallel": true,
"self": 0.00019417399994381412
},
"communicator.exchange": {
"total": 0.026302312999973765,
"count": 1,
"is_parallel": true,
"self": 0.026302312999973765
},
"steps_from_proto": {
"total": 0.0008957109999983004,
"count": 1,
"is_parallel": true,
"self": 0.00041651299989098334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047919800010731706,
"count": 2,
"is_parallel": true,
"self": 0.00047919800010731706
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1247.566812294056,
"count": 233462,
"is_parallel": true,
"self": 35.622515135070444,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.41015317199083,
"count": 233462,
"is_parallel": true,
"self": 76.41015317199083
},
"communicator.exchange": {
"total": 1040.3426396488876,
"count": 233462,
"is_parallel": true,
"self": 1040.3426396488876
},
"steps_from_proto": {
"total": 95.19150433810739,
"count": 233462,
"is_parallel": true,
"self": 38.5139535021525,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.67755083595489,
"count": 466924,
"is_parallel": true,
"self": 56.67755083595489
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 473.5333921710843,
"count": 233463,
"self": 7.068640334009729,
"children": {
"process_trajectory": {
"total": 157.57718594707603,
"count": 233463,
"self": 156.42179335607614,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1553925909998952,
"count": 10,
"self": 1.1553925909998952
}
}
},
"_update_policy": {
"total": 308.88756588999854,
"count": 97,
"self": 254.2594349020128,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.62813098798574,
"count": 2910,
"self": 54.62813098798574
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.059999683813658e-07,
"count": 1,
"self": 9.059999683813658e-07
},
"TrainerController._save_models": {
"total": 0.11252575899970907,
"count": 1,
"self": 0.0020740370000567054,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11045172199965236,
"count": 1,
"self": 0.11045172199965236
}
}
}
}
}
}
}