ppo-Huggy / run_logs /timers.json
veluchs's picture
Huggy
6f65537
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4061355590820312,
"min": 1.4061355590820312,
"max": 1.4285329580307007,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69187.4921875,
"min": 67387.0703125,
"max": 76563.375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.3875,
"min": 85.9408695652174,
"max": 407.1707317073171,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49497.0,
"min": 48773.0,
"max": 50185.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999990.0,
"min": 49939.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999990.0,
"min": 49939.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.407057046890259,
"min": 0.07158200442790985,
"max": 2.467374563217163,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1347.951904296875,
"min": 8.733004570007324,
"max": 1376.8392333984375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7219791435769625,
"min": 1.908241763955257,
"max": 3.9823168379010507,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2084.308320403099,
"min": 232.80549520254135,
"max": 2226.86316126585,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7219791435769625,
"min": 1.908241763955257,
"max": 3.9823168379010507,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2084.308320403099,
"min": 232.80549520254135,
"max": 2226.86316126585,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01870160797585009,
"min": 0.013021977501557558,
"max": 0.01965528540616409,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05610482392755027,
"min": 0.026043955003115116,
"max": 0.05825011658016592,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.059618108678195204,
"min": 0.021168677664051453,
"max": 0.059768055876096085,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17885432603458562,
"min": 0.042337355328102906,
"max": 0.17885432603458562,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.477548840850005e-06,
"min": 3.477548840850005e-06,
"max": 0.0002953408515530499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0432646522550016e-05,
"min": 1.0432646522550016e-05,
"max": 0.0008441592186135998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115915000000003,
"min": 0.10115915000000003,
"max": 0.19844695,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034774500000001,
"min": 0.20745870000000002,
"max": 0.5813864,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.784158500000009e-05,
"min": 6.784158500000009e-05,
"max": 0.004922502804999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020352475500000027,
"min": 0.00020352475500000027,
"max": 0.014071181359999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689170466",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689173557"
},
"total": 3090.9957023489997,
"count": 1,
"self": 0.49077285399971515,
"children": {
"run_training.setup": {
"total": 0.04974464399998624,
"count": 1,
"self": 0.04974464399998624
},
"TrainerController.start_learning": {
"total": 3090.455184851,
"count": 1,
"self": 6.488584780922793,
"children": {
"TrainerController._reset_env": {
"total": 5.284394264000014,
"count": 1,
"self": 5.284394264000014
},
"TrainerController.advance": {
"total": 3078.5408005600775,
"count": 232202,
"self": 6.423788610023166,
"children": {
"env_step": {
"total": 2464.356242209136,
"count": 232202,
"self": 2076.8967580311305,
"children": {
"SubprocessEnvManager._take_step": {
"total": 383.2201279629835,
"count": 232202,
"self": 21.527690184844573,
"children": {
"TorchPolicy.evaluate": {
"total": 361.6924377781389,
"count": 222899,
"self": 361.6924377781389
}
}
},
"workers": {
"total": 4.239356215021928,
"count": 232202,
"self": 0.0,
"children": {
"worker_root": {
"total": 3080.0723542290016,
"count": 232202,
"is_parallel": true,
"self": 1391.5169895690094,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009616520000008677,
"count": 1,
"is_parallel": true,
"self": 0.0003014199999711309,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006602320000297368,
"count": 2,
"is_parallel": true,
"self": 0.0006602320000297368
}
}
},
"UnityEnvironment.step": {
"total": 0.0741941489999931,
"count": 1,
"is_parallel": true,
"self": 0.00037124400000720925,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002598959999886574,
"count": 1,
"is_parallel": true,
"self": 0.0002598959999886574
},
"communicator.exchange": {
"total": 0.0727338140000029,
"count": 1,
"is_parallel": true,
"self": 0.0727338140000029
},
"steps_from_proto": {
"total": 0.0008291949999943427,
"count": 1,
"is_parallel": true,
"self": 0.0002166660000284537,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006125289999658889,
"count": 2,
"is_parallel": true,
"self": 0.0006125289999658889
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1688.5553646599922,
"count": 232201,
"is_parallel": true,
"self": 50.99831800880088,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 96.87709583100803,
"count": 232201,
"is_parallel": true,
"self": 96.87709583100803
},
"communicator.exchange": {
"total": 1416.9542149690978,
"count": 232201,
"is_parallel": true,
"self": 1416.9542149690978
},
"steps_from_proto": {
"total": 123.72573585108546,
"count": 232201,
"is_parallel": true,
"self": 42.05009086199408,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.67564498909138,
"count": 464402,
"is_parallel": true,
"self": 81.67564498909138
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 607.7607697409184,
"count": 232202,
"self": 9.899256219003632,
"children": {
"process_trajectory": {
"total": 168.0410431609128,
"count": 232202,
"self": 166.23993465891334,
"children": {
"RLTrainer._checkpoint": {
"total": 1.8011085019994653,
"count": 10,
"self": 1.8011085019994653
}
}
},
"_update_policy": {
"total": 429.82047036100187,
"count": 97,
"self": 365.33716582100504,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.48330453999682,
"count": 2910,
"self": 64.48330453999682
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3909998415329028e-06,
"count": 1,
"self": 1.3909998415329028e-06
},
"TrainerController._save_models": {
"total": 0.14140385499968033,
"count": 1,
"self": 0.002158720999432262,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13924513400024807,
"count": 1,
"self": 0.13924513400024807
}
}
}
}
}
}
}