ppo-Huggy / run_logs /timers.json
FredericProtat's picture
Huggy
d98cc7f
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4014936685562134,
"min": 1.4014936685562134,
"max": 1.4209308624267578,
"count": 20
},
"Huggy.Policy.Entropy.sum": {
"value": 69209.9609375,
"min": 68469.4375,
"max": 78726.71875,
"count": 20
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.65543071161049,
"min": 92.16387337057728,
"max": 423.6386554621849,
"count": 20
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49478.0,
"min": 49166.0,
"max": 50413.0,
"count": 20
},
"Huggy.Step.mean": {
"value": 999997.0,
"min": 49936.0,
"max": 999997.0,
"count": 20
},
"Huggy.Step.sum": {
"value": 999997.0,
"min": 49936.0,
"max": 999997.0,
"count": 20
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.316720485687256,
"min": 0.10337124019861221,
"max": 2.3928511142730713,
"count": 20
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1237.1287841796875,
"min": 12.197806358337402,
"max": 1237.1287841796875,
"count": 20
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.696406583995855,
"min": 1.9392764144024606,
"max": 4.019030564286735,
"count": 20
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1973.8811158537865,
"min": 228.83461689949036,
"max": 2034.4308470487595,
"count": 20
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.696406583995855,
"min": 1.9392764144024606,
"max": 4.019030564286735,
"count": 20
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1973.8811158537865,
"min": 228.83461689949036,
"max": 2034.4308470487595,
"count": 20
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019508854223143617,
"min": 0.013560492175020044,
"max": 0.019508854223143617,
"count": 20
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.058526562669430855,
"min": 0.027120984350040088,
"max": 0.058526562669430855,
"count": 20
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05377725619408819,
"min": 0.021555157378315926,
"max": 0.05377725619408819,
"count": 20
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16133176858226458,
"min": 0.04311031475663185,
"max": 0.16133176858226458,
"count": 20
},
"Huggy.Policy.LearningRate.mean": {
"value": 8.697097100999997e-06,
"min": 8.697097100999997e-06,
"max": 0.00029056095314634997,
"count": 20
},
"Huggy.Policy.LearningRate.sum": {
"value": 2.6091291302999993e-05,
"min": 2.6091291302999993e-05,
"max": 0.0007875732374755999,
"count": 20
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10289900000000003,
"min": 0.10289900000000003,
"max": 0.19685365,
"count": 20
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3086970000000001,
"min": 0.21608970000000008,
"max": 0.5625244,
"count": 20
},
"Huggy.Policy.Beta.mean": {
"value": 0.0001546601,
"min": 0.0001546601,
"max": 0.004842997135,
"count": 20
},
"Huggy.Policy.Beta.sum": {
"value": 0.0004639803,
"min": 0.0004639803,
"max": 0.013129967559999997,
"count": 20
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690712861",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690714270"
},
"total": 1409.180272759,
"count": 1,
"self": 0.8060954229997606,
"children": {
"run_training.setup": {
"total": 0.03422981699998218,
"count": 1,
"self": 0.03422981699998218
},
"TrainerController.start_learning": {
"total": 1408.3399475190001,
"count": 1,
"self": 2.5385607980008444,
"children": {
"TrainerController._reset_env": {
"total": 5.25036774299997,
"count": 1,
"self": 5.25036774299997
},
"TrainerController.advance": {
"total": 1399.6844948989992,
"count": 115315,
"self": 2.603209687017852,
"children": {
"env_step": {
"total": 1064.3650384289958,
"count": 115315,
"self": 888.7020123719685,
"children": {
"SubprocessEnvManager._take_step": {
"total": 174.00334019204223,
"count": 115315,
"self": 9.162774786042974,
"children": {
"TorchPolicy.evaluate": {
"total": 164.84056540599926,
"count": 111517,
"self": 164.84056540599926
}
}
},
"workers": {
"total": 1.6596858649851356,
"count": 115315,
"self": 0.0,
"children": {
"worker_root": {
"total": 1402.507522992987,
"count": 115315,
"is_parallel": true,
"self": 677.7311896820065,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009422530000051665,
"count": 1,
"is_parallel": true,
"self": 0.00028129599996873367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006609570000364329,
"count": 2,
"is_parallel": true,
"self": 0.0006609570000364329
}
}
},
"UnityEnvironment.step": {
"total": 0.06981709199999386,
"count": 1,
"is_parallel": true,
"self": 0.0004136790000188739,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00025681500000018787,
"count": 1,
"is_parallel": true,
"self": 0.00025681500000018787
},
"communicator.exchange": {
"total": 0.06833572199997207,
"count": 1,
"is_parallel": true,
"self": 0.06833572199997207
},
"steps_from_proto": {
"total": 0.000810876000002736,
"count": 1,
"is_parallel": true,
"self": 0.00026542299997345253,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005454530000292834,
"count": 2,
"is_parallel": true,
"self": 0.0005454530000292834
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 724.7763333109806,
"count": 115314,
"is_parallel": true,
"self": 21.94055533093342,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 46.35788364499206,
"count": 115314,
"is_parallel": true,
"self": 46.35788364499206
},
"communicator.exchange": {
"total": 603.4598228040127,
"count": 115314,
"is_parallel": true,
"self": 603.4598228040127
},
"steps_from_proto": {
"total": 53.01807153104238,
"count": 115314,
"is_parallel": true,
"self": 20.454085545991347,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.56398598505103,
"count": 230628,
"is_parallel": true,
"self": 32.56398598505103
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 332.7162467829855,
"count": 115315,
"self": 3.6129976739747462,
"children": {
"process_trajectory": {
"total": 75.81877049801085,
"count": 115315,
"self": 74.37130274901085,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4474677489999976,
"count": 2,
"self": 1.4474677489999976
}
}
},
"_update_policy": {
"total": 253.28447861099988,
"count": 48,
"self": 185.84675691799885,
"children": {
"TorchPPOOptimizer.update": {
"total": 67.43772169300104,
"count": 1440,
"self": 67.43772169300104
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4690001535200281e-06,
"count": 1,
"self": 1.4690001535200281e-06
},
"TrainerController._save_models": {
"total": 0.8665226099999472,
"count": 1,
"self": 0.020756390999849827,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8457662190000974,
"count": 1,
"self": 0.8457662190000974
}
}
}
}
}
}
}