ppo-Huggy / run_logs /timers.json
Nonin's picture
Huggy
08a0fc5
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4048821926116943,
"min": 1.4048821926116943,
"max": 1.4275891780853271,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69987.015625,
"min": 69034.640625,
"max": 76077.2265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.00341296928327,
"min": 77.69316375198729,
"max": 394.20472440944883,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49812.0,
"min": 48814.0,
"max": 50109.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999692.0,
"min": 49860.0,
"max": 1999692.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999692.0,
"min": 49860.0,
"max": 1999692.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3997390270233154,
"min": 0.013698962517082691,
"max": 2.4646308422088623,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1406.2470703125,
"min": 1.7260693311691284,
"max": 1511.4488525390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7899558723176296,
"min": 1.7614710704674796,
"max": 4.001732408359486,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2220.914141178131,
"min": 221.94535487890244,
"max": 2411.2956603765488,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7899558723176296,
"min": 1.7614710704674796,
"max": 4.001732408359486,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2220.914141178131,
"min": 221.94535487890244,
"max": 2411.2956603765488,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018136115106204265,
"min": 0.013708057897990025,
"max": 0.02153686702270837,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0544083453186128,
"min": 0.02741611579598005,
"max": 0.05794074624670126,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05289618861344125,
"min": 0.022494338049242897,
"max": 0.05808531327380075,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15868856584032376,
"min": 0.044988676098485794,
"max": 0.17425593982140225,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3812988729333404e-06,
"min": 3.3812988729333404e-06,
"max": 0.000295329676556775,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0143896618800022e-05,
"min": 1.0143896618800022e-05,
"max": 0.0008439198186933998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10112706666666667,
"min": 0.10112706666666667,
"max": 0.19844322499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033812,
"min": 0.2073934,
"max": 0.5813066000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.624062666666676e-05,
"min": 6.624062666666676e-05,
"max": 0.0049223169275,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001987218800000003,
"min": 0.0001987218800000003,
"max": 0.01406719934,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675890640",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675893388"
},
"total": 2747.342304495,
"count": 1,
"self": 0.44348583400005737,
"children": {
"run_training.setup": {
"total": 0.11151448999999047,
"count": 1,
"self": 0.11151448999999047
},
"TrainerController.start_learning": {
"total": 2746.787304171,
"count": 1,
"self": 5.404988927979048,
"children": {
"TrainerController._reset_env": {
"total": 12.226466002000052,
"count": 1,
"self": 12.226466002000052
},
"TrainerController.advance": {
"total": 2729.0454514540206,
"count": 232479,
"self": 5.496047287058445,
"children": {
"env_step": {
"total": 2156.039653610085,
"count": 232479,
"self": 1800.22328433999,
"children": {
"SubprocessEnvManager._take_step": {
"total": 352.2751795320031,
"count": 232479,
"self": 18.772298504970195,
"children": {
"TorchPolicy.evaluate": {
"total": 333.5028810270329,
"count": 222845,
"self": 84.41918286204668,
"children": {
"TorchPolicy.sample_actions": {
"total": 249.08369816498623,
"count": 222845,
"self": 249.08369816498623
}
}
}
}
},
"workers": {
"total": 3.5411897380921573,
"count": 232479,
"self": 0.0,
"children": {
"worker_root": {
"total": 2736.452441298873,
"count": 232479,
"is_parallel": true,
"self": 1275.448415823931,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025911919999543898,
"count": 1,
"is_parallel": true,
"self": 0.00036670899999080575,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002224482999963584,
"count": 2,
"is_parallel": true,
"self": 0.002224482999963584
}
}
},
"UnityEnvironment.step": {
"total": 0.04526589099998546,
"count": 1,
"is_parallel": true,
"self": 0.0003314669999099351,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020110299999487324,
"count": 1,
"is_parallel": true,
"self": 0.00020110299999487324
},
"communicator.exchange": {
"total": 0.04394914100009828,
"count": 1,
"is_parallel": true,
"self": 0.04394914100009828
},
"steps_from_proto": {
"total": 0.0007841799999823706,
"count": 1,
"is_parallel": true,
"self": 0.00025352499983455346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005306550001478172,
"count": 2,
"is_parallel": true,
"self": 0.0005306550001478172
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1461.004025474942,
"count": 232478,
"is_parallel": true,
"self": 44.369684334916656,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.24067781796657,
"count": 232478,
"is_parallel": true,
"self": 85.24067781796657
},
"communicator.exchange": {
"total": 1219.7801331349515,
"count": 232478,
"is_parallel": true,
"self": 1219.7801331349515
},
"steps_from_proto": {
"total": 111.61353018710713,
"count": 232478,
"is_parallel": true,
"self": 42.56623652416181,
"children": {
"_process_rank_one_or_two_observation": {
"total": 69.04729366294532,
"count": 464956,
"is_parallel": true,
"self": 69.04729366294532
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 567.5097505568768,
"count": 232479,
"self": 8.34302412582781,
"children": {
"process_trajectory": {
"total": 180.6696823460495,
"count": 232479,
"self": 179.2536548850495,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4160274609999988,
"count": 10,
"self": 1.4160274609999988
}
}
},
"_update_policy": {
"total": 378.4970440849995,
"count": 97,
"self": 318.11008871799584,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.38695536700368,
"count": 2910,
"self": 60.38695536700368
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.319998414663132e-07,
"count": 1,
"self": 8.319998414663132e-07
},
"TrainerController._save_models": {
"total": 0.1103969550003967,
"count": 1,
"self": 0.0021929940003246884,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10820396100007201,
"count": 1,
"self": 0.10820396100007201
}
}
}
}
}
}
}