ppo-Huggy / run_logs /timers.json
giobin's picture
pushing Huggy to the hub
726859d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3998901844024658,
"min": 1.3998901844024658,
"max": 1.4287067651748657,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69713.1328125,
"min": 68445.1875,
"max": 78140.3515625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.28313253012048,
"min": 68.52433936022253,
"max": 381.55725190839695,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49324.0,
"min": 49258.0,
"max": 49984.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999846.0,
"min": 49978.0,
"max": 1999846.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999846.0,
"min": 49978.0,
"max": 1999846.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5153849124908447,
"min": 0.12736664712429047,
"max": 2.5335588455200195,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1670.215576171875,
"min": 16.55766487121582,
"max": 1764.59716796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 4.01243085396218,
"min": 1.734875643482575,
"max": 4.091868463785025,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2664.2540870308876,
"min": 225.53383365273476,
"max": 2762.1322083473206,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 4.01243085396218,
"min": 1.734875643482575,
"max": 4.091868463785025,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2664.2540870308876,
"min": 225.53383365273476,
"max": 2762.1322083473206,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01599631953043475,
"min": 0.013417540630972427,
"max": 0.02014944220023204,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.047988958591304254,
"min": 0.029630805421038534,
"max": 0.060448326600696115,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.056284126184052895,
"min": 0.02113232286646962,
"max": 0.0619375686471661,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16885237855215868,
"min": 0.04226464573293924,
"max": 0.17654209608832994,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.020998659699994e-06,
"min": 4.020998659699994e-06,
"max": 0.00029528287657237497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.2062995979099982e-05,
"min": 1.2062995979099982e-05,
"max": 0.00084401206866265,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1013403,
"min": 0.1013403,
"max": 0.198427625,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3040209,
"min": 0.20780985,
"max": 0.5813373500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.68809699999999e-05,
"min": 7.68809699999999e-05,
"max": 0.0049215384875,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002306429099999997,
"min": 0.0002306429099999997,
"max": 0.014068733764999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676559995",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676562377"
},
"total": 2381.785822795,
"count": 1,
"self": 0.4387311350001255,
"children": {
"run_training.setup": {
"total": 0.11778803899983359,
"count": 1,
"self": 0.11778803899983359
},
"TrainerController.start_learning": {
"total": 2381.2293036210003,
"count": 1,
"self": 4.123217922978256,
"children": {
"TrainerController._reset_env": {
"total": 10.123141165000106,
"count": 1,
"self": 10.123141165000106
},
"TrainerController.advance": {
"total": 2366.863461569022,
"count": 233526,
"self": 4.484354636882017,
"children": {
"env_step": {
"total": 1845.1681779400772,
"count": 233526,
"self": 1544.2883803120683,
"children": {
"SubprocessEnvManager._take_step": {
"total": 298.1341406220208,
"count": 233526,
"self": 15.303997972955813,
"children": {
"TorchPolicy.evaluate": {
"total": 282.830142649065,
"count": 222902,
"self": 70.83652970313119,
"children": {
"TorchPolicy.sample_actions": {
"total": 211.99361294593382,
"count": 222902,
"self": 211.99361294593382
}
}
}
}
},
"workers": {
"total": 2.7456570059880505,
"count": 233526,
"self": 0.0,
"children": {
"worker_root": {
"total": 2373.4046847380373,
"count": 233526,
"is_parallel": true,
"self": 1114.4509416089302,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022199649999947724,
"count": 1,
"is_parallel": true,
"self": 0.000345831000004182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018741339999905904,
"count": 2,
"is_parallel": true,
"self": 0.0018741339999905904
}
}
},
"UnityEnvironment.step": {
"total": 0.04455828300001485,
"count": 1,
"is_parallel": true,
"self": 0.0002981760003422096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019339299979037605,
"count": 1,
"is_parallel": true,
"self": 0.00019339299979037605
},
"communicator.exchange": {
"total": 0.043339724999896134,
"count": 1,
"is_parallel": true,
"self": 0.043339724999896134
},
"steps_from_proto": {
"total": 0.0007269889999861334,
"count": 1,
"is_parallel": true,
"self": 0.0002551630000198202,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047182599996631325,
"count": 2,
"is_parallel": true,
"self": 0.00047182599996631325
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1258.9537431291071,
"count": 233525,
"is_parallel": true,
"self": 38.0015809380302,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.85388828902273,
"count": 233525,
"is_parallel": true,
"self": 76.85388828902273
},
"communicator.exchange": {
"total": 1038.8551135641094,
"count": 233525,
"is_parallel": true,
"self": 1038.8551135641094
},
"steps_from_proto": {
"total": 105.2431603379448,
"count": 233525,
"is_parallel": true,
"self": 37.847484832783266,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.39567550516153,
"count": 467050,
"is_parallel": true,
"self": 67.39567550516153
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 517.2109289920629,
"count": 233526,
"self": 6.361679629132823,
"children": {
"process_trajectory": {
"total": 165.9670474549314,
"count": 233526,
"self": 164.779882973932,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1871644809993995,
"count": 10,
"self": 1.1871644809993995
}
}
},
"_update_policy": {
"total": 344.8822019079987,
"count": 97,
"self": 288.7258334509886,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.1563684570101,
"count": 2910,
"self": 56.1563684570101
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.959997674333863e-07,
"count": 1,
"self": 7.959997674333863e-07
},
"TrainerController._save_models": {
"total": 0.11948216800010414,
"count": 1,
"self": 0.0020683220000137226,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11741384600009042,
"count": 1,
"self": 0.11741384600009042
}
}
}
}
}
}
}