ppo-Huggy / run_logs /timers.json
apta's picture
Huggy
62cf1a5
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4033762216567993,
"min": 1.4033762216567993,
"max": 1.4282249212265015,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72610.6875,
"min": 68742.6953125,
"max": 79284.65625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.49462365591398,
"min": 77.54075235109718,
"max": 396.31496062992125,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49380.0,
"min": 49204.0,
"max": 50332.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999913.0,
"min": 49966.0,
"max": 1999913.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999913.0,
"min": 49966.0,
"max": 1999913.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4413139820098877,
"min": -0.052484989166259766,
"max": 2.519134998321533,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1362.253173828125,
"min": -6.6131086349487305,
"max": 1554.815673828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.689535982078976,
"min": 1.793407975326455,
"max": 4.0159613021187015,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2058.7610780000687,
"min": 225.9694048911333,
"max": 2430.1293600201607,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.689535982078976,
"min": 1.793407975326455,
"max": 4.0159613021187015,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2058.7610780000687,
"min": 225.9694048911333,
"max": 2430.1293600201607,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017675240794778804,
"min": 0.014600623966543935,
"max": 0.0195834295154782,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05302572238433641,
"min": 0.02920124793308787,
"max": 0.058750288546434604,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05153900194499228,
"min": 0.02533322169135014,
"max": 0.06135936689873536,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15461700583497684,
"min": 0.05066644338270028,
"max": 0.17529940629998844,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6918487694166603e-06,
"min": 3.6918487694166603e-06,
"max": 0.000295383976538675,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.107554630824998e-05,
"min": 1.107554630824998e-05,
"max": 0.0008443317185560999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10123058333333335,
"min": 0.10123058333333335,
"max": 0.19846132500000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30369175000000004,
"min": 0.20761655,
"max": 0.5814439,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.140610833333322e-05,
"min": 7.140610833333322e-05,
"max": 0.0049232201175,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021421832499999966,
"min": 0.00021421832499999966,
"max": 0.014074050610000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675390283",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675392474"
},
"total": 2190.9370394999996,
"count": 1,
"self": 0.3891604109999207,
"children": {
"run_training.setup": {
"total": 0.12149511500001609,
"count": 1,
"self": 0.12149511500001609
},
"TrainerController.start_learning": {
"total": 2190.4263839739997,
"count": 1,
"self": 3.6180113309569606,
"children": {
"TrainerController._reset_env": {
"total": 10.027602039000158,
"count": 1,
"self": 10.027602039000158
},
"TrainerController.advance": {
"total": 2176.6729078660424,
"count": 232896,
"self": 4.143455646876191,
"children": {
"env_step": {
"total": 1717.0506470081084,
"count": 232896,
"self": 1440.7636345469907,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.77357853105855,
"count": 232896,
"self": 14.397637456092752,
"children": {
"TorchPolicy.evaluate": {
"total": 259.3759410749658,
"count": 223070,
"self": 65.42980527301074,
"children": {
"TorchPolicy.sample_actions": {
"total": 193.94613580195505,
"count": 223070,
"self": 193.94613580195505
}
}
}
}
},
"workers": {
"total": 2.5134339300591364,
"count": 232896,
"self": 0.0,
"children": {
"worker_root": {
"total": 2182.7050340929386,
"count": 232896,
"is_parallel": true,
"self": 993.3210403550167,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018701230001170188,
"count": 1,
"is_parallel": true,
"self": 0.00033798800018303155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015321349999339873,
"count": 2,
"is_parallel": true,
"self": 0.0015321349999339873
}
}
},
"UnityEnvironment.step": {
"total": 0.044425121999893236,
"count": 1,
"is_parallel": true,
"self": 0.00032349599996450706,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001873859998795524,
"count": 1,
"is_parallel": true,
"self": 0.0001873859998795524
},
"communicator.exchange": {
"total": 0.043116607999991174,
"count": 1,
"is_parallel": true,
"self": 0.043116607999991174
},
"steps_from_proto": {
"total": 0.0007976320000580017,
"count": 1,
"is_parallel": true,
"self": 0.00021511700015253155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005825149999054702,
"count": 2,
"is_parallel": true,
"self": 0.0005825149999054702
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1189.3839937379219,
"count": 232895,
"is_parallel": true,
"self": 33.84827995579417,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.03738711405072,
"count": 232895,
"is_parallel": true,
"self": 75.03738711405072
},
"communicator.exchange": {
"total": 991.6428710260152,
"count": 232895,
"is_parallel": true,
"self": 991.6428710260152
},
"steps_from_proto": {
"total": 88.85545564206177,
"count": 232895,
"is_parallel": true,
"self": 36.378932390102136,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.476523251959634,
"count": 465790,
"is_parallel": true,
"self": 52.476523251959634
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 455.478805211058,
"count": 232896,
"self": 5.913534756088211,
"children": {
"process_trajectory": {
"total": 145.63760845296838,
"count": 232896,
"self": 144.49756342096725,
"children": {
"RLTrainer._checkpoint": {
"total": 1.140045032001126,
"count": 10,
"self": 1.140045032001126
}
}
},
"_update_policy": {
"total": 303.9276620020014,
"count": 97,
"self": 251.21383124801287,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.71383075398853,
"count": 2910,
"self": 52.71383075398853
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.929999578162096e-07,
"count": 1,
"self": 9.929999578162096e-07
},
"TrainerController._save_models": {
"total": 0.10786174500026391,
"count": 1,
"self": 0.001996519999920565,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10586522500034334,
"count": 1,
"self": 0.10586522500034334
}
}
}
}
}
}
}