ppo-Huggy / run_logs /timers.json
eduardokapp's picture
Huggy
97d11c6
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4061858654022217,
"min": 1.4061858654022217,
"max": 1.4279611110687256,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70538.5,
"min": 68966.265625,
"max": 77369.703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.91616766467065,
"min": 79.44283413848632,
"max": 398.59842519685037,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49557.0,
"min": 49132.0,
"max": 50622.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999961.0,
"min": 49999.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999961.0,
"min": 49999.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.375443458557129,
"min": -0.020499026402831078,
"max": 2.4846105575561523,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1190.09716796875,
"min": -2.5828773975372314,
"max": 1432.0440673828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6520565999244265,
"min": 1.7764231535413908,
"max": 3.974800278283523,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1829.6803565621376,
"min": 223.82931734621525,
"max": 2341.310783624649,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6520565999244265,
"min": 1.7764231535413908,
"max": 3.974800278283523,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1829.6803565621376,
"min": 223.82931734621525,
"max": 2341.310783624649,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01613175543209056,
"min": 0.013570316220284439,
"max": 0.020028122731794912,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04839526629627168,
"min": 0.027140632440568878,
"max": 0.05695849452725573,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04948379053837723,
"min": 0.022619897220283746,
"max": 0.0609163548797369,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1484513716151317,
"min": 0.04523979444056749,
"max": 0.16695718082288902,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.24264891915e-06,
"min": 3.24264891915e-06,
"max": 0.000295328476557175,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.72794675745e-06,
"min": 9.72794675745e-06,
"max": 0.0008439111186963,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108085,
"min": 0.10108085,
"max": 0.19844282500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30324255,
"min": 0.2073448,
"max": 0.5813037,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.393441499999998e-05,
"min": 6.393441499999998e-05,
"max": 0.0049222969675,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019180324499999996,
"min": 0.00019180324499999996,
"max": 0.014067054629999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674422341",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674424614"
},
"total": 2272.9412937009997,
"count": 1,
"self": 0.38913805899983345,
"children": {
"run_training.setup": {
"total": 0.1046579850000171,
"count": 1,
"self": 0.1046579850000171
},
"TrainerController.start_learning": {
"total": 2272.447497657,
"count": 1,
"self": 3.81874942596869,
"children": {
"TrainerController._reset_env": {
"total": 10.656010913999978,
"count": 1,
"self": 10.656010913999978
},
"TrainerController.advance": {
"total": 2257.8627299550317,
"count": 232200,
"self": 4.091445423907317,
"children": {
"env_step": {
"total": 1796.3466084860684,
"count": 232200,
"self": 1510.0846730530448,
"children": {
"SubprocessEnvManager._take_step": {
"total": 283.5975677090179,
"count": 232200,
"self": 15.155042300003629,
"children": {
"TorchPolicy.evaluate": {
"total": 268.44252540901425,
"count": 222993,
"self": 67.15425758302871,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.28826782598554,
"count": 222993,
"self": 201.28826782598554
}
}
}
}
},
"workers": {
"total": 2.6643677240058423,
"count": 232200,
"self": 0.0,
"children": {
"worker_root": {
"total": 2261.66996896196,
"count": 232200,
"is_parallel": true,
"self": 1022.3612652729362,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016361570000071879,
"count": 1,
"is_parallel": true,
"self": 0.0003498919999742611,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012862650000329268,
"count": 2,
"is_parallel": true,
"self": 0.0012862650000329268
}
}
},
"UnityEnvironment.step": {
"total": 0.033157379000044784,
"count": 1,
"is_parallel": true,
"self": 0.0003126670000597187,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020792700001948106,
"count": 1,
"is_parallel": true,
"self": 0.00020792700001948106
},
"communicator.exchange": {
"total": 0.03180553999999347,
"count": 1,
"is_parallel": true,
"self": 0.03180553999999347
},
"steps_from_proto": {
"total": 0.0008312449999721139,
"count": 1,
"is_parallel": true,
"self": 0.0002978289999759909,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000533415999996123,
"count": 2,
"is_parallel": true,
"self": 0.000533415999996123
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1239.3087036890238,
"count": 232199,
"is_parallel": true,
"self": 34.988293657137774,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.13204070098408,
"count": 232199,
"is_parallel": true,
"self": 83.13204070098408
},
"communicator.exchange": {
"total": 1023.159873374989,
"count": 232199,
"is_parallel": true,
"self": 1023.159873374989
},
"steps_from_proto": {
"total": 98.02849595591277,
"count": 232199,
"is_parallel": true,
"self": 42.59683917989963,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.43165677601314,
"count": 464398,
"is_parallel": true,
"self": 55.43165677601314
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 457.42467604505606,
"count": 232200,
"self": 6.232584130029977,
"children": {
"process_trajectory": {
"total": 148.16373418602535,
"count": 232200,
"self": 147.0209494390253,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1427847470000643,
"count": 10,
"self": 1.1427847470000643
}
}
},
"_update_policy": {
"total": 303.02835772900073,
"count": 97,
"self": 250.14486896499068,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.88348876401005,
"count": 2910,
"self": 52.88348876401005
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1289998838037718e-06,
"count": 1,
"self": 1.1289998838037718e-06
},
"TrainerController._save_models": {
"total": 0.11000623299969448,
"count": 1,
"self": 0.001971381999737787,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10803485099995669,
"count": 1,
"self": 0.10803485099995669
}
}
}
}
}
}
}