ppo-Huggy / run_logs /timers.json
SharpNLight's picture
Huggy
c56d634
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4068306684494019,
"min": 1.4068306684494019,
"max": 1.4277244806289673,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68214.40625,
"min": 68214.40625,
"max": 77989.3359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.01897533206831,
"min": 78.55414012738854,
"max": 388.83720930232556,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49548.0,
"min": 49045.0,
"max": 50160.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999999.0,
"min": 49812.0,
"max": 1999999.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999999.0,
"min": 49812.0,
"max": 1999999.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.445918560028076,
"min": 0.12988781929016113,
"max": 2.47190260887146,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1288.9990234375,
"min": 16.625640869140625,
"max": 1512.929931640625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7935507600628675,
"min": 1.8223328054882586,
"max": 4.0264395185166,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1999.201250553131,
"min": 233.2585991024971,
"max": 2458.309578537941,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7935507600628675,
"min": 1.8223328054882586,
"max": 4.0264395185166,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1999.201250553131,
"min": 233.2585991024971,
"max": 2458.309578537941,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018984709714762477,
"min": 0.013416739830331101,
"max": 0.01975389842664299,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.056954129144287435,
"min": 0.028192001887752362,
"max": 0.056954129144287435,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.053046969655487275,
"min": 0.023398726899176835,
"max": 0.06096482916424672,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15914090896646182,
"min": 0.04679745379835367,
"max": 0.17343814720710118,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6611487796499995e-06,
"min": 3.6611487796499995e-06,
"max": 0.0002953460265513249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0983446338949998e-05,
"min": 1.0983446338949998e-05,
"max": 0.0008440626186457999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122035,
"min": 0.10122035,
"max": 0.19844867500000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30366105,
"min": 0.20757275000000008,
"max": 0.5813541999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.089546499999999e-05,
"min": 7.089546499999999e-05,
"max": 0.0049225888825,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021268639499999998,
"min": 0.00021268639499999998,
"max": 0.014069574579999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675707892",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675710242"
},
"total": 2349.91928749,
"count": 1,
"self": 0.4370559610001692,
"children": {
"run_training.setup": {
"total": 0.12088202199993248,
"count": 1,
"self": 0.12088202199993248
},
"TrainerController.start_learning": {
"total": 2349.361349507,
"count": 1,
"self": 3.9719542380939856,
"children": {
"TrainerController._reset_env": {
"total": 9.452023070999985,
"count": 1,
"self": 9.452023070999985
},
"TrainerController.advance": {
"total": 2335.8277429709055,
"count": 232631,
"self": 4.2758648998933495,
"children": {
"env_step": {
"total": 1869.0015598199693,
"count": 232631,
"self": 1568.7350608471093,
"children": {
"SubprocessEnvManager._take_step": {
"total": 297.51780716188955,
"count": 232631,
"self": 15.276979662961367,
"children": {
"TorchPolicy.evaluate": {
"total": 282.2408274989282,
"count": 222904,
"self": 70.1133772089263,
"children": {
"TorchPolicy.sample_actions": {
"total": 212.1274502900019,
"count": 222904,
"self": 212.1274502900019
}
}
}
}
},
"workers": {
"total": 2.748691810970513,
"count": 232631,
"self": 0.0,
"children": {
"worker_root": {
"total": 2340.639728455047,
"count": 232631,
"is_parallel": true,
"self": 1052.6138179010884,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002029389999961495,
"count": 1,
"is_parallel": true,
"self": 0.0003882539999722212,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016411359999892738,
"count": 2,
"is_parallel": true,
"self": 0.0016411359999892738
}
}
},
"UnityEnvironment.step": {
"total": 0.03558463500007747,
"count": 1,
"is_parallel": true,
"self": 0.0003376780000508006,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023478100001739222,
"count": 1,
"is_parallel": true,
"self": 0.00023478100001739222
},
"communicator.exchange": {
"total": 0.034179279999989376,
"count": 1,
"is_parallel": true,
"self": 0.034179279999989376
},
"steps_from_proto": {
"total": 0.0008328960000199004,
"count": 1,
"is_parallel": true,
"self": 0.0002902129999711178,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005426830000487826,
"count": 2,
"is_parallel": true,
"self": 0.0005426830000487826
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1288.0259105539587,
"count": 232630,
"is_parallel": true,
"self": 39.64703782804054,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.06424498603303,
"count": 232630,
"is_parallel": true,
"self": 89.06424498603303
},
"communicator.exchange": {
"total": 1059.3089913399426,
"count": 232630,
"is_parallel": true,
"self": 1059.3089913399426
},
"steps_from_proto": {
"total": 100.00563639994266,
"count": 232630,
"is_parallel": true,
"self": 43.38228379982229,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.62335260012037,
"count": 465260,
"is_parallel": true,
"self": 56.62335260012037
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 462.5503182510429,
"count": 232631,
"self": 6.122220797009845,
"children": {
"process_trajectory": {
"total": 154.30648858703182,
"count": 232631,
"self": 153.15330797403112,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1531806130006999,
"count": 10,
"self": 1.1531806130006999
}
}
},
"_update_policy": {
"total": 302.1216088670012,
"count": 97,
"self": 248.0924609279832,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.029147939018,
"count": 2910,
"self": 54.029147939018
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.340002750628628e-07,
"count": 1,
"self": 8.340002750628628e-07
},
"TrainerController._save_models": {
"total": 0.10962839300009364,
"count": 1,
"self": 0.0020163210001555854,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10761207199993805,
"count": 1,
"self": 0.10761207199993805
}
}
}
}
}
}
}