ppo-Huggy / run_logs /timers.json
dyingc's picture
Huggy
92ad181
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.408666968345642,
"min": 1.408666968345642,
"max": 1.4287070035934448,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71160.21875,
"min": 69326.3984375,
"max": 75809.6484375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.06231454005935,
"min": 72.00729927007299,
"max": 423.4406779661017,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49918.0,
"min": 48621.0,
"max": 50099.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999877.0,
"min": 49954.0,
"max": 1999877.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999877.0,
"min": 49954.0,
"max": 1999877.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4913418292999268,
"min": 0.1406382918357849,
"max": 2.535187005996704,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1679.1644287109375,
"min": 16.454679489135742,
"max": 1693.8389892578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.819953240785825,
"min": 1.937120489585094,
"max": 4.020933025808477,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2574.648484289646,
"min": 226.643097281456,
"max": 2589.7497730255127,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.819953240785825,
"min": 1.937120489585094,
"max": 4.020933025808477,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2574.648484289646,
"min": 226.643097281456,
"max": 2589.7497730255127,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0157707668145627,
"min": 0.013827340263621105,
"max": 0.019958196191388805,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0473123004436881,
"min": 0.02765468052724221,
"max": 0.053339188800115754,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06317716580298212,
"min": 0.02085024602711201,
"max": 0.06593003558615844,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18953149740894634,
"min": 0.04170049205422402,
"max": 0.1977901067584753,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.8374987208666686e-06,
"min": 3.8374987208666686e-06,
"max": 0.0002953767015410999,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1512496162600005e-05,
"min": 1.1512496162600005e-05,
"max": 0.0008441809686063501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10127913333333337,
"min": 0.10127913333333337,
"max": 0.19845890000000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3038374000000001,
"min": 0.20773850000000002,
"max": 0.58139365,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.382875333333339e-05,
"min": 7.382875333333339e-05,
"max": 0.004923099109999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022148626000000017,
"min": 0.00022148626000000017,
"max": 0.014071543135,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677296618",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677299105"
},
"total": 2487.218742008,
"count": 1,
"self": 0.4961927719996311,
"children": {
"run_training.setup": {
"total": 0.12141572200005157,
"count": 1,
"self": 0.12141572200005157
},
"TrainerController.start_learning": {
"total": 2486.601133514,
"count": 1,
"self": 4.450549242949364,
"children": {
"TrainerController._reset_env": {
"total": 11.319899936999946,
"count": 1,
"self": 11.319899936999946
},
"TrainerController.advance": {
"total": 2470.7131226960505,
"count": 233365,
"self": 4.949398147045031,
"children": {
"env_step": {
"total": 1907.8630216880633,
"count": 233365,
"self": 1597.004613699986,
"children": {
"SubprocessEnvManager._take_step": {
"total": 307.95893218207675,
"count": 233365,
"self": 16.34268051218578,
"children": {
"TorchPolicy.evaluate": {
"total": 291.61625166989097,
"count": 222988,
"self": 72.7347971560082,
"children": {
"TorchPolicy.sample_actions": {
"total": 218.88145451388277,
"count": 222988,
"self": 218.88145451388277
}
}
}
}
},
"workers": {
"total": 2.8994758060005097,
"count": 233365,
"self": 0.0,
"children": {
"worker_root": {
"total": 2477.847384121067,
"count": 233365,
"is_parallel": true,
"self": 1180.142882722984,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001008303000048727,
"count": 1,
"is_parallel": true,
"self": 0.00035307700011344423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006552259999352827,
"count": 2,
"is_parallel": true,
"self": 0.0006552259999352827
}
}
},
"UnityEnvironment.step": {
"total": 0.0528041040000744,
"count": 1,
"is_parallel": true,
"self": 0.00031961100012267707,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004566479999539297,
"count": 1,
"is_parallel": true,
"self": 0.0004566479999539297
},
"communicator.exchange": {
"total": 0.05108582699995168,
"count": 1,
"is_parallel": true,
"self": 0.05108582699995168
},
"steps_from_proto": {
"total": 0.0009420180000461187,
"count": 1,
"is_parallel": true,
"self": 0.00025951400004942116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006825039999966975,
"count": 2,
"is_parallel": true,
"self": 0.0006825039999966975
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1297.704501398083,
"count": 233364,
"is_parallel": true,
"self": 40.48660324901539,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.76610339511467,
"count": 233364,
"is_parallel": true,
"self": 80.76610339511467
},
"communicator.exchange": {
"total": 1082.6341525170587,
"count": 233364,
"is_parallel": true,
"self": 1082.6341525170587
},
"steps_from_proto": {
"total": 93.81764223689424,
"count": 233364,
"is_parallel": true,
"self": 37.83616888199083,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.981473354903414,
"count": 466728,
"is_parallel": true,
"self": 55.981473354903414
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 557.900702860942,
"count": 233365,
"self": 6.742960981798888,
"children": {
"process_trajectory": {
"total": 174.78389380214333,
"count": 233365,
"self": 173.51921540714307,
"children": {
"RLTrainer._checkpoint": {
"total": 1.26467839500026,
"count": 10,
"self": 1.26467839500026
}
}
},
"_update_policy": {
"total": 376.3738480769998,
"count": 97,
"self": 315.8361243240008,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.537723752999,
"count": 2910,
"self": 60.537723752999
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.360002539120615e-07,
"count": 1,
"self": 8.360002539120615e-07
},
"TrainerController._save_models": {
"total": 0.11756080199984353,
"count": 1,
"self": 0.0029101380000611243,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11465066399978241,
"count": 1,
"self": 0.11465066399978241
}
}
}
}
}
}
}