ppo-Huggy / run_logs /timers.json
odedmou's picture
Huggy
a0d67f2
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407874345779419,
"min": 1.407874345779419,
"max": 1.4306583404541016,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70327.546875,
"min": 67851.2109375,
"max": 78241.375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.96065573770491,
"min": 79.67903225806451,
"max": 395.1653543307087,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49386.0,
"min": 49316.0,
"max": 50186.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999987.0,
"min": 49771.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999987.0,
"min": 49771.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4523632526397705,
"min": 0.1908518522977829,
"max": 2.4729502201080322,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1495.941650390625,
"min": 24.047332763671875,
"max": 1513.16259765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7517267092329556,
"min": 1.944370639170446,
"max": 3.930188416253339,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2288.553292632103,
"min": 244.9907005354762,
"max": 2391.100107252598,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7517267092329556,
"min": 1.944370639170446,
"max": 3.930188416253339,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2288.553292632103,
"min": 244.9907005354762,
"max": 2391.100107252598,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01689949623816776,
"min": 0.012517240336698402,
"max": 0.02020938422194579,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050698488714503276,
"min": 0.025034480673396805,
"max": 0.06062815266583736,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05565354033476776,
"min": 0.021084883840133746,
"max": 0.05892308540642262,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16696062100430328,
"min": 0.04216976768026749,
"max": 0.17537310210367044,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.405198864966672e-06,
"min": 3.405198864966672e-06,
"max": 0.00029525542658152495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0215596594900016e-05,
"min": 1.0215596594900016e-05,
"max": 0.0008439082686972498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10113503333333335,
"min": 0.10113503333333335,
"max": 0.198418475,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30340510000000004,
"min": 0.20741824999999997,
"max": 0.58130275,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.663816333333341e-05,
"min": 6.663816333333341e-05,
"max": 0.0049210819025,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019991449000000025,
"min": 0.00019991449000000025,
"max": 0.014067007224999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671435373",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671437626"
},
"total": 2252.9310398360003,
"count": 1,
"self": 0.39284511400001065,
"children": {
"run_training.setup": {
"total": 0.10755883500007712,
"count": 1,
"self": 0.10755883500007712
},
"TrainerController.start_learning": {
"total": 2252.4306358870003,
"count": 1,
"self": 4.109434773087742,
"children": {
"TrainerController._reset_env": {
"total": 8.361870265000107,
"count": 1,
"self": 8.361870265000107
},
"TrainerController.advance": {
"total": 2239.8446845539124,
"count": 232574,
"self": 4.364724040004148,
"children": {
"env_step": {
"total": 1768.4630320170313,
"count": 232574,
"self": 1488.6332122621209,
"children": {
"SubprocessEnvManager._take_step": {
"total": 277.08756515494247,
"count": 232574,
"self": 14.772336743939263,
"children": {
"TorchPolicy.evaluate": {
"total": 262.3152284110032,
"count": 222931,
"self": 65.7411311319978,
"children": {
"TorchPolicy.sample_actions": {
"total": 196.5740972790054,
"count": 222931,
"self": 196.5740972790054
}
}
}
}
},
"workers": {
"total": 2.742254599967964,
"count": 232574,
"self": 0.0,
"children": {
"worker_root": {
"total": 2244.5760330790754,
"count": 232574,
"is_parallel": true,
"self": 1020.9105647280796,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020633000000316315,
"count": 1,
"is_parallel": true,
"self": 0.00031261599997378653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001750684000057845,
"count": 2,
"is_parallel": true,
"self": 0.001750684000057845
}
}
},
"UnityEnvironment.step": {
"total": 0.03164126900003339,
"count": 1,
"is_parallel": true,
"self": 0.0003085240000473277,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017938199994205206,
"count": 1,
"is_parallel": true,
"self": 0.00017938199994205206
},
"communicator.exchange": {
"total": 0.030166086999997788,
"count": 1,
"is_parallel": true,
"self": 0.030166086999997788
},
"steps_from_proto": {
"total": 0.0009872760000462222,
"count": 1,
"is_parallel": true,
"self": 0.00024881400008780474,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007384619999584174,
"count": 2,
"is_parallel": true,
"self": 0.0007384619999584174
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1223.6654683509958,
"count": 232573,
"is_parallel": true,
"self": 35.26222603789165,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.03467663403626,
"count": 232573,
"is_parallel": true,
"self": 77.03467663403626
},
"communicator.exchange": {
"total": 1017.1590267220727,
"count": 232573,
"is_parallel": true,
"self": 1017.1590267220727
},
"steps_from_proto": {
"total": 94.20953895699517,
"count": 232573,
"is_parallel": true,
"self": 38.363173947148994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.84636500984618,
"count": 465146,
"is_parallel": true,
"self": 55.84636500984618
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 467.01692849687686,
"count": 232574,
"self": 6.408097216745091,
"children": {
"process_trajectory": {
"total": 149.02752859613054,
"count": 232574,
"self": 147.8318941991305,
"children": {
"RLTrainer._checkpoint": {
"total": 1.195634397000049,
"count": 10,
"self": 1.195634397000049
}
}
},
"_update_policy": {
"total": 311.58130268400123,
"count": 97,
"self": 258.0759313320051,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.50537135199613,
"count": 2910,
"self": 53.50537135199613
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.450001587334555e-07,
"count": 1,
"self": 8.450001587334555e-07
},
"TrainerController._save_models": {
"total": 0.11464544999989812,
"count": 1,
"self": 0.001952420999714377,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11269302900018374,
"count": 1,
"self": 0.11269302900018374
}
}
}
}
}
}
}