ppo-Huggy / run_logs /timers.json
inkasaras's picture
Huggy
f237641
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4032377004623413,
"min": 1.4032377004623413,
"max": 1.4277970790863037,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68800.7421875,
"min": 68509.921875,
"max": 77495.3359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.6792828685259,
"min": 74.87253414264036,
"max": 378.54545454545456,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49537.0,
"min": 49008.0,
"max": 49971.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999943.0,
"min": 49777.0,
"max": 1999943.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999943.0,
"min": 49777.0,
"max": 1999943.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3484694957733154,
"min": 0.1382184773683548,
"max": 2.5188162326812744,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1178.931640625,
"min": 18.10662078857422,
"max": 1617.6368408203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.585334249107961,
"min": 1.9705844965144879,
"max": 4.069950046932043,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1799.8377930521965,
"min": 258.1465690433979,
"max": 2562.7885939478874,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.585334249107961,
"min": 1.9705844965144879,
"max": 4.069950046932043,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1799.8377930521965,
"min": 258.1465690433979,
"max": 2562.7885939478874,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015397018870054227,
"min": 0.01370705122681102,
"max": 0.019623118255306812,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04619105661016268,
"min": 0.02741410245362204,
"max": 0.05877691307008111,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04515591739780373,
"min": 0.020429366640746592,
"max": 0.06010250002145767,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1354677521934112,
"min": 0.040858733281493184,
"max": 0.18012392210463682,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4899488367166724e-06,
"min": 3.4899488367166724e-06,
"max": 0.00029536965154345,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0469846510150018e-05,
"min": 1.0469846510150018e-05,
"max": 0.0008442522185826001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116328333333335,
"min": 0.10116328333333335,
"max": 0.19845655,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30348985000000006,
"min": 0.20745595,
"max": 0.5814174,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.804783833333344e-05,
"min": 6.804783833333344e-05,
"max": 0.0049229818449999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020414351500000033,
"min": 0.00020414351500000033,
"max": 0.01407272826,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675278293",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675280408"
},
"total": 2115.29083649,
"count": 1,
"self": 0.3874110059996383,
"children": {
"run_training.setup": {
"total": 0.11394404800000757,
"count": 1,
"self": 0.11394404800000757
},
"TrainerController.start_learning": {
"total": 2114.789481436,
"count": 1,
"self": 3.6113296839334907,
"children": {
"TrainerController._reset_env": {
"total": 10.524548685000013,
"count": 1,
"self": 10.524548685000013
},
"TrainerController.advance": {
"total": 2100.5372697710673,
"count": 233027,
"self": 3.8515555050312287,
"children": {
"env_step": {
"total": 1653.4516739340434,
"count": 233027,
"self": 1390.321478221953,
"children": {
"SubprocessEnvManager._take_step": {
"total": 260.6536133610824,
"count": 233027,
"self": 13.939833090056482,
"children": {
"TorchPolicy.evaluate": {
"total": 246.71378027102594,
"count": 223038,
"self": 62.51012888703542,
"children": {
"TorchPolicy.sample_actions": {
"total": 184.20365138399052,
"count": 223038,
"self": 184.20365138399052
}
}
}
}
},
"workers": {
"total": 2.4765823510078917,
"count": 233027,
"self": 0.0,
"children": {
"worker_root": {
"total": 2107.1732869690472,
"count": 233027,
"is_parallel": true,
"self": 962.9618529239535,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022197430000687746,
"count": 1,
"is_parallel": true,
"self": 0.00038700400000379886,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018327390000649757,
"count": 2,
"is_parallel": true,
"self": 0.0018327390000649757
}
}
},
"UnityEnvironment.step": {
"total": 0.026847123000038664,
"count": 1,
"is_parallel": true,
"self": 0.00027855700011514273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020956600008048554,
"count": 1,
"is_parallel": true,
"self": 0.00020956600008048554
},
"communicator.exchange": {
"total": 0.025671993999935694,
"count": 1,
"is_parallel": true,
"self": 0.025671993999935694
},
"steps_from_proto": {
"total": 0.0006870059999073419,
"count": 1,
"is_parallel": true,
"self": 0.00022645299975465605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046055300015268585,
"count": 2,
"is_parallel": true,
"self": 0.00046055300015268585
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1144.2114340450937,
"count": 233026,
"is_parallel": true,
"self": 33.15463472501119,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 72.6389716010707,
"count": 233026,
"is_parallel": true,
"self": 72.6389716010707
},
"communicator.exchange": {
"total": 950.4866049899822,
"count": 233026,
"is_parallel": true,
"self": 950.4866049899822
},
"steps_from_proto": {
"total": 87.93122272902963,
"count": 233026,
"is_parallel": true,
"self": 36.34244444198271,
"children": {
"_process_rank_one_or_two_observation": {
"total": 51.58877828704692,
"count": 466052,
"is_parallel": true,
"self": 51.58877828704692
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 443.2340403319928,
"count": 233027,
"self": 5.755336000063039,
"children": {
"process_trajectory": {
"total": 139.4707467759306,
"count": 233027,
"self": 138.36961936793045,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1011274080001385,
"count": 10,
"self": 1.1011274080001385
}
}
},
"_update_policy": {
"total": 298.00795755599916,
"count": 97,
"self": 246.1693539329782,
"children": {
"TorchPPOOptimizer.update": {
"total": 51.83860362302096,
"count": 2910,
"self": 51.83860362302096
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.709998837730382e-07,
"count": 1,
"self": 8.709998837730382e-07
},
"TrainerController._save_models": {
"total": 0.1163324249996549,
"count": 1,
"self": 0.0019621659998847463,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11437025899977016,
"count": 1,
"self": 0.11437025899977016
}
}
}
}
}
}
}