ppo-Huggy / run_logs /timers.json
abarekatain's picture
Huggy
9f3b94e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4040826559066772,
"min": 1.4040826559066772,
"max": 1.4278944730758667,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69650.921875,
"min": 68412.5078125,
"max": 77878.46875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 122.43703703703704,
"min": 102.11387163561076,
"max": 390.0387596899225,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49587.0,
"min": 48958.0,
"max": 50315.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999993.0,
"min": 49695.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999993.0,
"min": 49695.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.196301221847534,
"min": 0.026159998029470444,
"max": 2.3311574459075928,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 889.501953125,
"min": 3.348479747772217,
"max": 1125.7044677734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4863408185817577,
"min": 1.8076969583053142,
"max": 3.7915526054006943,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1411.9680315256119,
"min": 231.38521066308022,
"max": 1789.7524199485779,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4863408185817577,
"min": 1.8076969583053142,
"max": 3.7915526054006943,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1411.9680315256119,
"min": 231.38521066308022,
"max": 1789.7524199485779,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015750501074702092,
"min": 0.013204676759293458,
"max": 0.020297174263881362,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.031501002149404184,
"min": 0.026409353518586916,
"max": 0.056660141151708865,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04032895208025972,
"min": 0.022484416204194228,
"max": 0.06144029576745299,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08065790416051943,
"min": 0.044968832408388455,
"max": 0.18432088730235896,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.368698543800005e-06,
"min": 4.368698543800005e-06,
"max": 0.00029527785157405003,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.73739708760001e-06,
"min": 8.73739708760001e-06,
"max": 0.0008438632687122498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10145619999999997,
"min": 0.10145619999999997,
"max": 0.19842595000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20291239999999994,
"min": 0.20291239999999994,
"max": 0.58128775,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.266438000000007e-05,
"min": 8.266438000000007e-05,
"max": 0.004921454905000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016532876000000015,
"min": 0.00016532876000000015,
"max": 0.014066258725,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676885850",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676888521"
},
"total": 2670.511189289,
"count": 1,
"self": 0.8445179620002818,
"children": {
"run_training.setup": {
"total": 0.1847372690000384,
"count": 1,
"self": 0.1847372690000384
},
"TrainerController.start_learning": {
"total": 2669.4819340579998,
"count": 1,
"self": 5.048161579071802,
"children": {
"TrainerController._reset_env": {
"total": 10.927719146000072,
"count": 1,
"self": 10.927719146000072
},
"TrainerController.advance": {
"total": 2653.3018924109274,
"count": 230774,
"self": 5.314520238661316,
"children": {
"env_step": {
"total": 2102.7815659481107,
"count": 230774,
"self": 1750.8270917351115,
"children": {
"SubprocessEnvManager._take_step": {
"total": 348.72163596803193,
"count": 230774,
"self": 17.790076807023183,
"children": {
"TorchPolicy.evaluate": {
"total": 330.93155916100875,
"count": 222964,
"self": 81.69807415889329,
"children": {
"TorchPolicy.sample_actions": {
"total": 249.23348500211546,
"count": 222964,
"self": 249.23348500211546
}
}
}
}
},
"workers": {
"total": 3.232838244967411,
"count": 230774,
"self": 0.0,
"children": {
"worker_root": {
"total": 2659.656498803114,
"count": 230774,
"is_parallel": true,
"self": 1241.5202409381827,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020190369999681934,
"count": 1,
"is_parallel": true,
"self": 0.0004727120000325158,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015463249999356776,
"count": 2,
"is_parallel": true,
"self": 0.0015463249999356776
}
}
},
"UnityEnvironment.step": {
"total": 0.031174063999969803,
"count": 1,
"is_parallel": true,
"self": 0.00032207100014147727,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020732899997710774,
"count": 1,
"is_parallel": true,
"self": 0.00020732899997710774
},
"communicator.exchange": {
"total": 0.029590132999942398,
"count": 1,
"is_parallel": true,
"self": 0.029590132999942398
},
"steps_from_proto": {
"total": 0.0010545309999088204,
"count": 1,
"is_parallel": true,
"self": 0.0005087119999416245,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005458189999671959,
"count": 2,
"is_parallel": true,
"self": 0.0005458189999671959
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1418.1362578649314,
"count": 230773,
"is_parallel": true,
"self": 41.90165329297315,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 91.08801278009605,
"count": 230773,
"is_parallel": true,
"self": 91.08801278009605
},
"communicator.exchange": {
"total": 1181.2134370368972,
"count": 230773,
"is_parallel": true,
"self": 1181.2134370368972
},
"steps_from_proto": {
"total": 103.93315475496513,
"count": 230773,
"is_parallel": true,
"self": 44.548842616932575,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.384312138032556,
"count": 461546,
"is_parallel": true,
"self": 59.384312138032556
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 545.2058062241553,
"count": 230774,
"self": 8.224842512216924,
"children": {
"process_trajectory": {
"total": 175.8152191739398,
"count": 230774,
"self": 174.4596408329396,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3555783410001823,
"count": 10,
"self": 1.3555783410001823
}
}
},
"_update_policy": {
"total": 361.16574453799853,
"count": 96,
"self": 303.0982876540089,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.06745688398962,
"count": 2880,
"self": 58.06745688398962
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.452000105928164e-06,
"count": 1,
"self": 1.452000105928164e-06
},
"TrainerController._save_models": {
"total": 0.20415947000037704,
"count": 1,
"self": 0.0029570320002676453,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2012024380001094,
"count": 1,
"self": 0.2012024380001094
}
}
}
}
}
}
}