ppo-Huggy / run_logs /timers.json
Convolution's picture
Huggy
f71beb8
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4036035537719727,
"min": 1.4036035537719727,
"max": 1.4270000457763672,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69868.578125,
"min": 68991.34375,
"max": 77886.6875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 112.28473804100227,
"min": 82.79899497487438,
"max": 388.8139534883721,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49293.0,
"min": 49027.0,
"max": 50157.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999995.0,
"min": 49959.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999995.0,
"min": 49959.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.312316417694092,
"min": 0.20509867370128632,
"max": 2.4191551208496094,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1015.1068725585938,
"min": 26.25263023376465,
"max": 1399.408203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.471151205428912,
"min": 1.838357611646643,
"max": 3.890292937992609,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1523.8353791832924,
"min": 235.3097742907703,
"max": 2186.920107781887,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.471151205428912,
"min": 1.838357611646643,
"max": 3.890292937992609,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1523.8353791832924,
"min": 235.3097742907703,
"max": 2186.920107781887,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014595517161392813,
"min": 0.012302168877552807,
"max": 0.01924153425473681,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04378655148417844,
"min": 0.024604337755105614,
"max": 0.05730619498838981,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.046745044696662165,
"min": 0.020302615594118834,
"max": 0.05958426383634409,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1402351340899865,
"min": 0.04060523118823767,
"max": 0.1691768191754818,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.393698868799997e-06,
"min": 3.393698868799997e-06,
"max": 0.000295308976563675,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0181096606399991e-05,
"min": 1.0181096606399991e-05,
"max": 0.0008440671186442999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10113119999999999,
"min": 0.10113119999999999,
"max": 0.19843632499999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033936,
"min": 0.20744025,
"max": 0.5813556999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.644687999999995e-05,
"min": 6.644687999999995e-05,
"max": 0.004921972617500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019934063999999985,
"min": 0.00019934063999999985,
"max": 0.01406964943,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670793933",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670796102"
},
"total": 2168.670300396,
"count": 1,
"self": 0.38453834099982487,
"children": {
"run_training.setup": {
"total": 0.10602859100004025,
"count": 1,
"self": 0.10602859100004025
},
"TrainerController.start_learning": {
"total": 2168.179733464,
"count": 1,
"self": 3.7773962719420524,
"children": {
"TrainerController._reset_env": {
"total": 9.296338531000004,
"count": 1,
"self": 9.296338531000004
},
"TrainerController.advance": {
"total": 2154.9943098700583,
"count": 232061,
"self": 3.981401118992835,
"children": {
"env_step": {
"total": 1688.8927048600247,
"count": 232061,
"self": 1419.8360696589311,
"children": {
"SubprocessEnvManager._take_step": {
"total": 266.4632668580572,
"count": 232061,
"self": 14.159029884015126,
"children": {
"TorchPolicy.evaluate": {
"total": 252.30423697404206,
"count": 223013,
"self": 63.14683654204316,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.1574004319989,
"count": 223013,
"self": 189.1574004319989
}
}
}
}
},
"workers": {
"total": 2.5933683430363317,
"count": 232061,
"self": 0.0,
"children": {
"worker_root": {
"total": 2160.5869151449688,
"count": 232061,
"is_parallel": true,
"self": 994.7031146410229,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019990810000081183,
"count": 1,
"is_parallel": true,
"self": 0.0003405660000339594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016585149999741589,
"count": 2,
"is_parallel": true,
"self": 0.0016585149999741589
}
}
},
"UnityEnvironment.step": {
"total": 0.03809210399998619,
"count": 1,
"is_parallel": true,
"self": 0.0003476809999369834,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018370399999412257,
"count": 1,
"is_parallel": true,
"self": 0.00018370399999412257
},
"communicator.exchange": {
"total": 0.03685773300003348,
"count": 1,
"is_parallel": true,
"self": 0.03685773300003348
},
"steps_from_proto": {
"total": 0.0007029860000216104,
"count": 1,
"is_parallel": true,
"self": 0.00023191499997210485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047107100004950553,
"count": 2,
"is_parallel": true,
"self": 0.00047107100004950553
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1165.8838005039458,
"count": 232060,
"is_parallel": true,
"self": 34.37506789002555,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.21281026905865,
"count": 232060,
"is_parallel": true,
"self": 74.21281026905865
},
"communicator.exchange": {
"total": 966.3394162629079,
"count": 232060,
"is_parallel": true,
"self": 966.3394162629079
},
"steps_from_proto": {
"total": 90.95650608195393,
"count": 232060,
"is_parallel": true,
"self": 37.06291720984319,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.89358887211074,
"count": 464120,
"is_parallel": true,
"self": 53.89358887211074
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 462.1202038910406,
"count": 232061,
"self": 6.531912432091872,
"children": {
"process_trajectory": {
"total": 142.61665343894907,
"count": 232061,
"self": 142.14311326194849,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4735401770005865,
"count": 4,
"self": 0.4735401770005865
}
}
},
"_update_policy": {
"total": 312.97163801999966,
"count": 97,
"self": 259.1188763809937,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.85276163900596,
"count": 2910,
"self": 53.85276163900596
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.72999714576872e-07,
"count": 1,
"self": 9.72999714576872e-07
},
"TrainerController._save_models": {
"total": 0.111687818000064,
"count": 1,
"self": 0.0019724620001397852,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10971535599992421,
"count": 1,
"self": 0.10971535599992421
}
}
}
}
}
}
}