ppo-Huggy / run_logs /timers.json
gbellamy's picture
Huggy
721eaa7
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4023696184158325,
"min": 1.4023696184158325,
"max": 1.4271174669265747,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72593.6640625,
"min": 67202.7421875,
"max": 78490.9375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 99.51587301587301,
"min": 88.51526032315978,
"max": 402.6774193548387,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50156.0,
"min": 48888.0,
"max": 50196.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999916.0,
"min": 49313.0,
"max": 1999916.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999916.0,
"min": 49313.0,
"max": 1999916.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.377883195877075,
"min": 0.10597196966409683,
"max": 2.4152166843414307,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1198.453125,
"min": 13.034552574157715,
"max": 1314.3079833984375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7219183472177337,
"min": 1.7013426791361677,
"max": 4.089268566313244,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1875.8468469977379,
"min": 209.26514953374863,
"max": 2065.937722146511,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7219183472177337,
"min": 1.7013426791361677,
"max": 4.089268566313244,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1875.8468469977379,
"min": 209.26514953374863,
"max": 2065.937722146511,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016060174222770406,
"min": 0.012775510655774269,
"max": 0.01964326767483726,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04818052266831122,
"min": 0.025551021311548537,
"max": 0.057190803533255036,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05415187511179182,
"min": 0.021645391825586557,
"max": 0.05568818358911407,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16245562533537547,
"min": 0.043290783651173115,
"max": 0.16706455076734222,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.524798825100003e-06,
"min": 3.524798825100003e-06,
"max": 0.00029537595154135,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0574396475300008e-05,
"min": 1.0574396475300008e-05,
"max": 0.0008440188186603999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1011749,
"min": 0.1011749,
"max": 0.19845865000000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035247,
"min": 0.20753375000000002,
"max": 0.5813396000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.862751000000004e-05,
"min": 6.862751000000004e-05,
"max": 0.004923086635000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020588253000000014,
"min": 0.00020588253000000014,
"max": 0.014068846040000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687567885",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687570276"
},
"total": 2391.195481339,
"count": 1,
"self": 0.4393375939998805,
"children": {
"run_training.setup": {
"total": 0.05064222600003632,
"count": 1,
"self": 0.05064222600003632
},
"TrainerController.start_learning": {
"total": 2390.705501519,
"count": 1,
"self": 4.186984650130853,
"children": {
"TrainerController._reset_env": {
"total": 5.186214213999847,
"count": 1,
"self": 5.186214213999847
},
"TrainerController.advance": {
"total": 2381.205632270869,
"count": 231575,
"self": 4.379482236924105,
"children": {
"env_step": {
"total": 1853.4907503240158,
"count": 231575,
"self": 1559.6570460370706,
"children": {
"SubprocessEnvManager._take_step": {
"total": 291.06763011099315,
"count": 231575,
"self": 16.45463063404145,
"children": {
"TorchPolicy.evaluate": {
"total": 274.6129994769517,
"count": 223048,
"self": 274.6129994769517
}
}
},
"workers": {
"total": 2.766074175952099,
"count": 231575,
"self": 0.0,
"children": {
"worker_root": {
"total": 2383.1872333861497,
"count": 231575,
"is_parallel": true,
"self": 1109.3117913431831,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000933577999830959,
"count": 1,
"is_parallel": true,
"self": 0.0002628889999414241,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006706889998895349,
"count": 2,
"is_parallel": true,
"self": 0.0006706889998895349
}
}
},
"UnityEnvironment.step": {
"total": 0.07512873199993919,
"count": 1,
"is_parallel": true,
"self": 0.00032383200004915125,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002438629999232944,
"count": 1,
"is_parallel": true,
"self": 0.0002438629999232944
},
"communicator.exchange": {
"total": 0.07371106599998711,
"count": 1,
"is_parallel": true,
"self": 0.07371106599998711
},
"steps_from_proto": {
"total": 0.0008499709999796323,
"count": 1,
"is_parallel": true,
"self": 0.0002035760001035669,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006463949998760654,
"count": 2,
"is_parallel": true,
"self": 0.0006463949998760654
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1273.8754420429666,
"count": 231574,
"is_parallel": true,
"self": 39.04417436025892,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.28636380588046,
"count": 231574,
"is_parallel": true,
"self": 78.28636380588046
},
"communicator.exchange": {
"total": 1062.6362000099678,
"count": 231574,
"is_parallel": true,
"self": 1062.6362000099678
},
"steps_from_proto": {
"total": 93.90870386685947,
"count": 231574,
"is_parallel": true,
"self": 33.42471172090359,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.48399214595588,
"count": 463148,
"is_parallel": true,
"self": 60.48399214595588
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 523.3353997099293,
"count": 231575,
"self": 6.646302096852196,
"children": {
"process_trajectory": {
"total": 132.91736514907484,
"count": 231575,
"self": 131.66153771407517,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2558274349996736,
"count": 10,
"self": 1.2558274349996736
}
}
},
"_update_policy": {
"total": 383.7717324640023,
"count": 97,
"self": 322.7767503260004,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.99498213800189,
"count": 2910,
"self": 60.99498213800189
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0779999684018549e-06,
"count": 1,
"self": 1.0779999684018549e-06
},
"TrainerController._save_models": {
"total": 0.12666930600016713,
"count": 1,
"self": 0.0020431030002328043,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12462620299993432,
"count": 1,
"self": 0.12462620299993432
}
}
}
}
}
}
}