ppo-Huggy / run_logs /timers.json
EExe's picture
Huggy
d470708
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4088276624679565,
"min": 1.4088276624679565,
"max": 1.4299575090408325,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71530.40625,
"min": 68963.140625,
"max": 75605.7265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 130.89709762532982,
"min": 103.15625,
"max": 406.2016129032258,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49610.0,
"min": 48944.0,
"max": 50369.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999913.0,
"min": 49829.0,
"max": 1999913.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999913.0,
"min": 49829.0,
"max": 1999913.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.1526479721069336,
"min": 0.12128842622041702,
"max": 2.3272593021392822,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 815.8535766601562,
"min": 14.918476104736328,
"max": 1091.484619140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.18949689352418,
"min": 1.8749444375677806,
"max": 3.737160395383835,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1208.8193226456642,
"min": 230.61816582083702,
"max": 1741.1086474061012,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.18949689352418,
"min": 1.8749444375677806,
"max": 3.737160395383835,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1208.8193226456642,
"min": 230.61816582083702,
"max": 1741.1086474061012,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01707891729893163,
"min": 0.013652824091514049,
"max": 0.020517635555006564,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03415783459786326,
"min": 0.027305648183028098,
"max": 0.056772986789777255,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.040860711596906185,
"min": 0.022977974617646797,
"max": 0.06633031459318267,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08172142319381237,
"min": 0.04612578780700763,
"max": 0.198990943779548,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.621373459574986e-06,
"min": 4.621373459574986e-06,
"max": 0.00029537872654042494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.242746919149972e-06,
"min": 9.242746919149972e-06,
"max": 0.0008441170686276499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10154042499999998,
"min": 0.10154042499999998,
"max": 0.19845957499999994,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20308084999999995,
"min": 0.20308084999999995,
"max": 0.58137235,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.686720749999979e-05,
"min": 8.686720749999979e-05,
"max": 0.004923132792500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017373441499999958,
"min": 0.00017373441499999958,
"max": 0.014070480265000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677763799",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677766335"
},
"total": 2536.493612169,
"count": 1,
"self": 0.491596157999993,
"children": {
"run_training.setup": {
"total": 0.13660297100000207,
"count": 1,
"self": 0.13660297100000207
},
"TrainerController.start_learning": {
"total": 2535.86541304,
"count": 1,
"self": 4.545073635980316,
"children": {
"TrainerController._reset_env": {
"total": 11.67595758799996,
"count": 1,
"self": 11.67595758799996
},
"TrainerController.advance": {
"total": 2519.5281825220195,
"count": 230846,
"self": 4.712889129049017,
"children": {
"env_step": {
"total": 1969.4157777908795,
"count": 230846,
"self": 1643.8298338738916,
"children": {
"SubprocessEnvManager._take_step": {
"total": 322.64837976701835,
"count": 230846,
"self": 17.168135397160654,
"children": {
"TorchPolicy.evaluate": {
"total": 305.4802443698577,
"count": 223121,
"self": 76.36596679492357,
"children": {
"TorchPolicy.sample_actions": {
"total": 229.11427757493414,
"count": 223121,
"self": 229.11427757493414
}
}
}
}
},
"workers": {
"total": 2.9375641499694893,
"count": 230846,
"self": 0.0,
"children": {
"worker_root": {
"total": 2527.022148837054,
"count": 230846,
"is_parallel": true,
"self": 1194.4208038139104,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010857869999654213,
"count": 1,
"is_parallel": true,
"self": 0.00046801400003460003,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006177729999308212,
"count": 2,
"is_parallel": true,
"self": 0.0006177729999308212
}
}
},
"UnityEnvironment.step": {
"total": 0.030816037000022334,
"count": 1,
"is_parallel": true,
"self": 0.0003476009999303642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00026830300009805796,
"count": 1,
"is_parallel": true,
"self": 0.00026830300009805796
},
"communicator.exchange": {
"total": 0.0294124769999371,
"count": 1,
"is_parallel": true,
"self": 0.0294124769999371
},
"steps_from_proto": {
"total": 0.0007876560000568134,
"count": 1,
"is_parallel": true,
"self": 0.0002562710000120205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005313850000447928,
"count": 2,
"is_parallel": true,
"self": 0.0005313850000447928
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1332.6013450231435,
"count": 230845,
"is_parallel": true,
"self": 40.451161006324355,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.2582675108647,
"count": 230845,
"is_parallel": true,
"self": 82.2582675108647
},
"communicator.exchange": {
"total": 1113.754818019927,
"count": 230845,
"is_parallel": true,
"self": 1113.754818019927
},
"steps_from_proto": {
"total": 96.13709848602753,
"count": 230845,
"is_parallel": true,
"self": 38.429971521064545,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.70712696496298,
"count": 461690,
"is_parallel": true,
"self": 57.70712696496298
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 545.3995156020912,
"count": 230846,
"self": 7.551864688029241,
"children": {
"process_trajectory": {
"total": 160.93286183206487,
"count": 230846,
"self": 159.61368111406478,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3191807180000978,
"count": 10,
"self": 1.3191807180000978
}
}
},
"_update_policy": {
"total": 376.9147890819971,
"count": 96,
"self": 316.6353021959977,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.279486885999404,
"count": 2880,
"self": 60.279486885999404
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.219999472203199e-07,
"count": 1,
"self": 8.219999472203199e-07
},
"TrainerController._save_models": {
"total": 0.1161984720001783,
"count": 1,
"self": 0.002855256000202644,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11334321599997565,
"count": 1,
"self": 0.11334321599997565
}
}
}
}
}
}
}