ppo-Huggy / run_logs /timers.json
Alan1999's picture
Huggy
b4c1beb
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4055209159851074,
"min": 1.4055209159851074,
"max": 1.4288486242294312,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71935.96875,
"min": 68562.8046875,
"max": 76198.1171875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.54792043399638,
"min": 81.07389162561576,
"max": 410.3360655737705,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49520.0,
"min": 49068.0,
"max": 50087.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999977.0,
"min": 49433.0,
"max": 1999977.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999977.0,
"min": 49433.0,
"max": 1999977.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.426661729812622,
"min": 0.04345109313726425,
"max": 2.502946615219116,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1339.5172119140625,
"min": 5.257582187652588,
"max": 1497.084228515625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7648431005469267,
"min": 1.7701677645533538,
"max": 4.004328120178378,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2078.1933915019035,
"min": 214.1902995109558,
"max": 2342.1956793665886,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7648431005469267,
"min": 1.7701677645533538,
"max": 4.004328120178378,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2078.1933915019035,
"min": 214.1902995109558,
"max": 2342.1956793665886,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015280086654679004,
"min": 0.013839008273983685,
"max": 0.02028355528503501,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04584025996403701,
"min": 0.028741859273577572,
"max": 0.05461815497255884,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05509945642617014,
"min": 0.02123293935631712,
"max": 0.06135733903696139,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1652983692785104,
"min": 0.04246587871263424,
"max": 0.17310076418022316,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.466098844666669e-06,
"min": 3.466098844666669e-06,
"max": 0.0002952771015743,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0398296534000006e-05,
"min": 1.0398296534000006e-05,
"max": 0.000843798018734,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115533333333332,
"min": 0.10115533333333332,
"max": 0.19842570000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30346599999999996,
"min": 0.20748175000000002,
"max": 0.581266,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.765113333333335e-05,
"min": 6.765113333333335e-05,
"max": 0.004921442429999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020295340000000008,
"min": 0.00020295340000000008,
"max": 0.0140651734,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670751583",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670753759"
},
"total": 2175.366189989,
"count": 1,
"self": 0.3895132949996878,
"children": {
"run_training.setup": {
"total": 0.11121493899997859,
"count": 1,
"self": 0.11121493899997859
},
"TrainerController.start_learning": {
"total": 2174.865461755,
"count": 1,
"self": 3.697392490048969,
"children": {
"TrainerController._reset_env": {
"total": 10.733989538000003,
"count": 1,
"self": 10.733989538000003
},
"TrainerController.advance": {
"total": 2160.3108753159513,
"count": 232926,
"self": 4.073069743968972,
"children": {
"env_step": {
"total": 1692.1576044480116,
"count": 232926,
"self": 1423.2416245101165,
"children": {
"SubprocessEnvManager._take_step": {
"total": 266.3706896169777,
"count": 232926,
"self": 14.221143254026856,
"children": {
"TorchPolicy.evaluate": {
"total": 252.14954636295084,
"count": 223054,
"self": 63.43881573894424,
"children": {
"TorchPolicy.sample_actions": {
"total": 188.7107306240066,
"count": 223054,
"self": 188.7107306240066
}
}
}
}
},
"workers": {
"total": 2.545290320917445,
"count": 232926,
"self": 0.0,
"children": {
"worker_root": {
"total": 2167.251368904988,
"count": 232926,
"is_parallel": true,
"self": 996.454990561982,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007093409999981759,
"count": 1,
"is_parallel": true,
"self": 0.0003118659999472584,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006781544000034501,
"count": 2,
"is_parallel": true,
"self": 0.006781544000034501
}
}
},
"UnityEnvironment.step": {
"total": 0.027689856999984386,
"count": 1,
"is_parallel": true,
"self": 0.0002814809999449608,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021308600003067113,
"count": 1,
"is_parallel": true,
"self": 0.00021308600003067113
},
"communicator.exchange": {
"total": 0.026374567000004845,
"count": 1,
"is_parallel": true,
"self": 0.026374567000004845
},
"steps_from_proto": {
"total": 0.000820723000003909,
"count": 1,
"is_parallel": true,
"self": 0.00025351199997203366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005672110000318753,
"count": 2,
"is_parallel": true,
"self": 0.0005672110000318753
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1170.796378343006,
"count": 232925,
"is_parallel": true,
"self": 34.38525092408668,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.90894701600985,
"count": 232925,
"is_parallel": true,
"self": 75.90894701600985
},
"communicator.exchange": {
"total": 970.0321800389365,
"count": 232925,
"is_parallel": true,
"self": 970.0321800389365
},
"steps_from_proto": {
"total": 90.47000036397299,
"count": 232925,
"is_parallel": true,
"self": 37.07128966990439,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.3987106940686,
"count": 465850,
"is_parallel": true,
"self": 53.3987106940686
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 464.0802011239709,
"count": 232926,
"self": 5.912864999890246,
"children": {
"process_trajectory": {
"total": 146.19122921107981,
"count": 232926,
"self": 145.70969582907975,
"children": {
"RLTrainer._checkpoint": {
"total": 0.481533382000066,
"count": 4,
"self": 0.481533382000066
}
}
},
"_update_policy": {
"total": 311.9761069130008,
"count": 97,
"self": 258.6218000450118,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.35430686798901,
"count": 2910,
"self": 53.35430686798901
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.219997991749551e-07,
"count": 1,
"self": 9.219997991749551e-07
},
"TrainerController._save_models": {
"total": 0.12320348899993405,
"count": 1,
"self": 0.002088541999910376,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12111494700002368,
"count": 1,
"self": 0.12111494700002368
}
}
}
}
}
}
}