ppo-Huggy / run_logs /timers.json
brand25's picture
Huggy
31fab86
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4065587520599365,
"min": 1.4065587520599365,
"max": 1.4257826805114746,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68896.0625,
"min": 68896.0625,
"max": 76890.4140625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.787728026534,
"min": 78.77083333333333,
"max": 390.1875,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49318.0,
"min": 48963.0,
"max": 49984.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999961.0,
"min": 49800.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999961.0,
"min": 49800.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4620895385742188,
"min": 0.18055452406406403,
"max": 2.5086965560913086,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1484.6400146484375,
"min": 22.930423736572266,
"max": 1514.918701171875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8081939134431715,
"min": 1.7677200705282332,
"max": 3.96518067942291,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2296.3409298062325,
"min": 224.5004489570856,
"max": 2402.1327601075172,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8081939134431715,
"min": 1.7677200705282332,
"max": 3.96518067942291,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2296.3409298062325,
"min": 224.5004489570856,
"max": 2402.1327601075172,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015124656795847437,
"min": 0.013692865565826651,
"max": 0.01837433325450143,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04537397038754231,
"min": 0.027385731131653303,
"max": 0.05512299976350429,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05758011299702857,
"min": 0.02127292072400451,
"max": 0.07368604820221662,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1727403389910857,
"min": 0.04254584144800902,
"max": 0.1847958614428838,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.817848727416665e-06,
"min": 3.817848727416665e-06,
"max": 0.000295299976566675,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1453546182249995e-05,
"min": 1.1453546182249995e-05,
"max": 0.0008440471686509499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10127258333333333,
"min": 0.10127258333333333,
"max": 0.198433325,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30381775,
"min": 0.20767924999999998,
"max": 0.5813490499999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.35019083333333e-05,
"min": 7.35019083333333e-05,
"max": 0.004921822917500002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022050572499999989,
"min": 0.00022050572499999989,
"max": 0.014069317594999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673266008",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673268154"
},
"total": 2145.726278623,
"count": 1,
"self": 0.3918402429994785,
"children": {
"run_training.setup": {
"total": 0.11492893399997683,
"count": 1,
"self": 0.11492893399997683
},
"TrainerController.start_learning": {
"total": 2145.2195094460003,
"count": 1,
"self": 3.6885689780610846,
"children": {
"TrainerController._reset_env": {
"total": 8.121562856000082,
"count": 1,
"self": 8.121562856000082
},
"TrainerController.advance": {
"total": 2133.2951694419394,
"count": 232355,
"self": 3.912702603071466,
"children": {
"env_step": {
"total": 1673.1930759839427,
"count": 232355,
"self": 1406.8703419370477,
"children": {
"SubprocessEnvManager._take_step": {
"total": 263.8538025799463,
"count": 232355,
"self": 13.726510639055732,
"children": {
"TorchPolicy.evaluate": {
"total": 250.1272919408906,
"count": 222838,
"self": 63.686481508889415,
"children": {
"TorchPolicy.sample_actions": {
"total": 186.44081043200117,
"count": 222838,
"self": 186.44081043200117
}
}
}
}
},
"workers": {
"total": 2.468931466948675,
"count": 232355,
"self": 0.0,
"children": {
"worker_root": {
"total": 2137.625603779073,
"count": 232355,
"is_parallel": true,
"self": 978.8055820651093,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022196009999788657,
"count": 1,
"is_parallel": true,
"self": 0.00037000500003614434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018495959999427214,
"count": 2,
"is_parallel": true,
"self": 0.0018495959999427214
}
}
},
"UnityEnvironment.step": {
"total": 0.026973915000098714,
"count": 1,
"is_parallel": true,
"self": 0.00026985200008766697,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017317100002856023,
"count": 1,
"is_parallel": true,
"self": 0.00017317100002856023
},
"communicator.exchange": {
"total": 0.025818344000072102,
"count": 1,
"is_parallel": true,
"self": 0.025818344000072102
},
"steps_from_proto": {
"total": 0.0007125479999103845,
"count": 1,
"is_parallel": true,
"self": 0.0002319159998478426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004806320000625419,
"count": 2,
"is_parallel": true,
"self": 0.0004806320000625419
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1158.8200217139638,
"count": 232354,
"is_parallel": true,
"self": 33.45929799892019,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.98770870797841,
"count": 232354,
"is_parallel": true,
"self": 73.98770870797841
},
"communicator.exchange": {
"total": 960.432817168,
"count": 232354,
"is_parallel": true,
"self": 960.432817168
},
"steps_from_proto": {
"total": 90.94019783906526,
"count": 232354,
"is_parallel": true,
"self": 37.43152566311494,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.50867217595032,
"count": 464708,
"is_parallel": true,
"self": 53.50867217595032
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 456.18939085492525,
"count": 232355,
"self": 5.710659463892512,
"children": {
"process_trajectory": {
"total": 141.6314789950327,
"count": 232355,
"self": 140.46727224303288,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1642067519998136,
"count": 10,
"self": 1.1642067519998136
}
}
},
"_update_policy": {
"total": 308.84725239600004,
"count": 97,
"self": 256.6214610380008,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.22579135799924,
"count": 2910,
"self": 52.22579135799924
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.749999895168003e-07,
"count": 1,
"self": 7.749999895168003e-07
},
"TrainerController._save_models": {
"total": 0.114207394999994,
"count": 1,
"self": 0.002002470000206813,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11220492499978718,
"count": 1,
"self": 0.11220492499978718
}
}
}
}
}
}
}