ppo-Huggy / run_logs /timers.json
asubiabre's picture
Huggy
b979a3c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4017918109893799,
"min": 1.4017918109893799,
"max": 1.4302469491958618,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70058.75,
"min": 68417.875,
"max": 78240.6328125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.60861423220973,
"min": 92.60861423220973,
"max": 416.3884297520661,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49453.0,
"min": 48848.0,
"max": 50383.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999942.0,
"min": 49755.0,
"max": 1999942.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999942.0,
"min": 49755.0,
"max": 1999942.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3266587257385254,
"min": 0.11424937099218369,
"max": 2.424604892730713,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1242.435791015625,
"min": 13.709924697875977,
"max": 1245.7998046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5998595727516918,
"min": 1.8224018782377243,
"max": 3.859347196905894,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1922.3250118494034,
"min": 218.68822538852692,
"max": 1980.7402584552765,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5998595727516918,
"min": 1.8224018782377243,
"max": 3.859347196905894,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1922.3250118494034,
"min": 218.68822538852692,
"max": 1980.7402584552765,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01717562000315714,
"min": 0.014704266081874568,
"max": 0.02003659021122278,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03435124000631428,
"min": 0.029408532163749137,
"max": 0.06010977063366833,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054336488743623096,
"min": 0.020702746137976645,
"max": 0.06557262842026022,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10867297748724619,
"min": 0.04140549227595329,
"max": 0.19671788526078066,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.501673499475008e-06,
"min": 4.501673499475008e-06,
"max": 0.00029529232656922495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.003346998950015e-06,
"min": 9.003346998950015e-06,
"max": 0.0008438334187221999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10150052500000001,
"min": 0.10150052500000001,
"max": 0.19843077499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20300105000000002,
"min": 0.20300105000000002,
"max": 0.5812778,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.487619750000015e-05,
"min": 8.487619750000015e-05,
"max": 0.0049216956725,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001697523950000003,
"min": 0.0001697523950000003,
"max": 0.014065762219999996,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674085349",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674087569"
},
"total": 2219.731091243,
"count": 1,
"self": 0.3896072760003335,
"children": {
"run_training.setup": {
"total": 0.10124113599999873,
"count": 1,
"self": 0.10124113599999873
},
"TrainerController.start_learning": {
"total": 2219.2402428309997,
"count": 1,
"self": 3.663311464913022,
"children": {
"TrainerController._reset_env": {
"total": 10.119553344999986,
"count": 1,
"self": 10.119553344999986
},
"TrainerController.advance": {
"total": 2205.3476379810863,
"count": 231355,
"self": 3.9220389170814087,
"children": {
"env_step": {
"total": 1742.8806288819721,
"count": 231355,
"self": 1469.1991707211046,
"children": {
"SubprocessEnvManager._take_step": {
"total": 271.1428651609116,
"count": 231355,
"self": 14.512956446835176,
"children": {
"TorchPolicy.evaluate": {
"total": 256.6299087140764,
"count": 222954,
"self": 64.93692322805134,
"children": {
"TorchPolicy.sample_actions": {
"total": 191.69298548602507,
"count": 222954,
"self": 191.69298548602507
}
}
}
}
},
"workers": {
"total": 2.538592999955938,
"count": 231355,
"self": 0.0,
"children": {
"worker_root": {
"total": 2211.6403098099645,
"count": 231355,
"is_parallel": true,
"self": 998.4180277279027,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019242710000071384,
"count": 1,
"is_parallel": true,
"self": 0.0003126109999698201,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016116600000373182,
"count": 2,
"is_parallel": true,
"self": 0.0016116600000373182
}
}
},
"UnityEnvironment.step": {
"total": 0.027827334000050996,
"count": 1,
"is_parallel": true,
"self": 0.0003719530000694249,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017831999997497405,
"count": 1,
"is_parallel": true,
"self": 0.00017831999997497405
},
"communicator.exchange": {
"total": 0.0263057500000059,
"count": 1,
"is_parallel": true,
"self": 0.0263057500000059
},
"steps_from_proto": {
"total": 0.0009713110000006964,
"count": 1,
"is_parallel": true,
"self": 0.00024886300002435746,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007224479999763389,
"count": 2,
"is_parallel": true,
"self": 0.0007224479999763389
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1213.2222820820618,
"count": 231354,
"is_parallel": true,
"self": 34.19754273399417,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.6109028540119,
"count": 231354,
"is_parallel": true,
"self": 73.6109028540119
},
"communicator.exchange": {
"total": 979.9456844860565,
"count": 231354,
"is_parallel": true,
"self": 979.9456844860565
},
"steps_from_proto": {
"total": 125.46815200799932,
"count": 231354,
"is_parallel": true,
"self": 38.108921343966415,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.3592306640329,
"count": 462708,
"is_parallel": true,
"self": 87.3592306640329
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 458.5449701820328,
"count": 231355,
"self": 6.057893897090935,
"children": {
"process_trajectory": {
"total": 139.33504314894162,
"count": 231355,
"self": 138.24944320094164,
"children": {
"RLTrainer._checkpoint": {
"total": 1.085599947999981,
"count": 10,
"self": 1.085599947999981
}
}
},
"_update_policy": {
"total": 313.1520331360002,
"count": 96,
"self": 260.8093646029938,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.34266853300642,
"count": 2880,
"self": 52.34266853300642
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.480002750526182e-07,
"count": 1,
"self": 7.480002750526182e-07
},
"TrainerController._save_models": {
"total": 0.1097392920000857,
"count": 1,
"self": 0.0021550400001615344,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10758425199992416,
"count": 1,
"self": 0.10758425199992416
}
}
}
}
}
}
}