ppo-Huggy / run_logs /timers.json
Nazzyk's picture
Huggy
feeff03
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4028072357177734,
"min": 1.4028072357177734,
"max": 1.4292473793029785,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70162.8046875,
"min": 69177.984375,
"max": 76888.171875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 93.45488721804512,
"min": 80.11669367909238,
"max": 409.827868852459,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49718.0,
"min": 49139.0,
"max": 50376.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999597.0,
"min": 49735.0,
"max": 1999597.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999597.0,
"min": 49735.0,
"max": 1999597.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.414720296859741,
"min": -0.03747570142149925,
"max": 2.4828925132751465,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1284.6312255859375,
"min": -4.534559726715088,
"max": 1494.487060546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7410479630518676,
"min": 1.9013965090444265,
"max": 3.9143728897882544,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1990.2375163435936,
"min": 230.0689775943756,
"max": 2342.9108209609985,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7410479630518676,
"min": 1.9013965090444265,
"max": 3.9143728897882544,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1990.2375163435936,
"min": 230.0689775943756,
"max": 2342.9108209609985,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015622296019703046,
"min": 0.01383979463395614,
"max": 0.020295062579680233,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04686688805910914,
"min": 0.02767958926791228,
"max": 0.05667665842246,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04993167047699293,
"min": 0.023476659196118514,
"max": 0.06157389904061954,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1497950114309788,
"min": 0.04695331839223703,
"max": 0.1773750178515911,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.649948783383332e-06,
"min": 3.649948783383332e-06,
"max": 0.000295324276558575,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0949846350149996e-05,
"min": 1.0949846350149996e-05,
"max": 0.0008441619186127,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10121661666666669,
"min": 0.10121661666666669,
"max": 0.19844142499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30364985000000005,
"min": 0.20760650000000008,
"max": 0.5813873000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.070917166666667e-05,
"min": 7.070917166666667e-05,
"max": 0.0049222271075,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021212751500000003,
"min": 0.00021212751500000003,
"max": 0.014071226269999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679334108",
"python_version": "3.9.9 | packaged by conda-forge | (main, Dec 20 2021, 02:40:17) \n[GCC 9.4.0]",
"command_line_arguments": "/home/nazar/anaconda3/envs/ml-agents/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.20.0",
"end_time_seconds": "1679337275"
},
"total": 3167.6730038,
"count": 1,
"self": 0.4265076999995472,
"children": {
"run_training.setup": {
"total": 0.014015900000003967,
"count": 1,
"self": 0.014015900000003967
},
"TrainerController.start_learning": {
"total": 3167.2324802000003,
"count": 1,
"self": 5.597242599867968,
"children": {
"TrainerController._reset_env": {
"total": 5.480540799999972,
"count": 1,
"self": 5.480540799999972
},
"TrainerController.advance": {
"total": 3156.0108269001325,
"count": 232420,
"self": 5.521681299996999,
"children": {
"env_step": {
"total": 2570.0961196001067,
"count": 232420,
"self": 1943.2064301002818,
"children": {
"SubprocessEnvManager._take_step": {
"total": 623.4971370999049,
"count": 232420,
"self": 17.57893809994016,
"children": {
"TorchPolicy.evaluate": {
"total": 605.9181989999647,
"count": 223019,
"self": 247.99598859999287,
"children": {
"TorchPolicy.sample_actions": {
"total": 357.92221039997185,
"count": 223019,
"self": 357.92221039997185
}
}
}
}
},
"workers": {
"total": 3.3925523999200777,
"count": 232420,
"self": 0.0,
"children": {
"worker_root": {
"total": 3154.2428143000575,
"count": 232420,
"is_parallel": true,
"self": 1496.130846600184,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007427000000461703,
"count": 1,
"is_parallel": true,
"self": 0.000277399999959016,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046530000008715433,
"count": 2,
"is_parallel": true,
"self": 0.00046530000008715433
}
}
},
"UnityEnvironment.step": {
"total": 0.02100560000008045,
"count": 1,
"is_parallel": true,
"self": 0.00016980000032162934,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00014769999984309834,
"count": 1,
"is_parallel": true,
"self": 0.00014769999984309834
},
"communicator.exchange": {
"total": 0.02025509999998576,
"count": 1,
"is_parallel": true,
"self": 0.02025509999998576
},
"steps_from_proto": {
"total": 0.00043299999992996163,
"count": 1,
"is_parallel": true,
"self": 0.00014979999969000346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00028320000023995817,
"count": 2,
"is_parallel": true,
"self": 0.00028320000023995817
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1658.1119676998735,
"count": 232419,
"is_parallel": true,
"self": 29.985049699682577,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 49.60361620004733,
"count": 232419,
"is_parallel": true,
"self": 49.60361620004733
},
"communicator.exchange": {
"total": 1504.987080300104,
"count": 232419,
"is_parallel": true,
"self": 1504.987080300104
},
"steps_from_proto": {
"total": 73.53622150003957,
"count": 232419,
"is_parallel": true,
"self": 30.21230839973282,
"children": {
"_process_rank_one_or_two_observation": {
"total": 43.323913100306754,
"count": 464838,
"is_parallel": true,
"self": 43.323913100306754
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 580.3930260000288,
"count": 232420,
"self": 7.389916399886488,
"children": {
"process_trajectory": {
"total": 179.7947605001425,
"count": 232420,
"self": 178.49525140014248,
"children": {
"RLTrainer._checkpoint": {
"total": 1.299509100000023,
"count": 10,
"self": 1.299509100000023
}
}
},
"_update_policy": {
"total": 393.20834909999985,
"count": 97,
"self": 256.0572289000047,
"children": {
"TorchPPOOptimizer.update": {
"total": 137.15112019999515,
"count": 2910,
"self": 137.15112019999515
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100000190490391e-06,
"count": 1,
"self": 1.100000190490391e-06
},
"TrainerController._save_models": {
"total": 0.14386879999983648,
"count": 1,
"self": 0.027938999999605585,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1159298000002309,
"count": 1,
"self": 0.1159298000002309
}
}
}
}
}
}
}