ppo-Huggy / run_logs /timers.json
vieveks's picture
Huggy
aef5d1a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4133000373840332,
"min": 1.4133000373840332,
"max": 1.427808403968811,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72201.2578125,
"min": 69538.671875,
"max": 77514.6328125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.86744966442953,
"min": 77.15962441314554,
"max": 391.92248062015506,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49389.0,
"min": 49254.0,
"max": 50558.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999999.0,
"min": 49992.0,
"max": 1999999.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999999.0,
"min": 49992.0,
"max": 1999999.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4440696239471436,
"min": 0.09178345650434494,
"max": 2.478652238845825,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1456.66552734375,
"min": 11.748282432556152,
"max": 1549.5286865234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8148487151269146,
"min": 1.8560462966561317,
"max": 3.977709216014467,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2273.649834215641,
"min": 237.57392597198486,
"max": 2458.2242954969406,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8148487151269146,
"min": 1.8560462966561317,
"max": 3.977709216014467,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2273.649834215641,
"min": 237.57392597198486,
"max": 2458.2242954969406,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018457855042005474,
"min": 0.013179420534591675,
"max": 0.020577270264038813,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.055373565126016426,
"min": 0.02635884106918335,
"max": 0.05873283601055543,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06277555525302887,
"min": 0.022409161946011915,
"max": 0.06891588183740774,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18832666575908663,
"min": 0.04639791200558345,
"max": 0.1984800023337205,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7979487340500034e-06,
"min": 3.7979487340500034e-06,
"max": 0.000295281826572725,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.139384620215001e-05,
"min": 1.139384620215001e-05,
"max": 0.0008441061186312999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10126595000000005,
"min": 0.10126595000000005,
"max": 0.19842727499999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30379785000000015,
"min": 0.2076639,
"max": 0.5813687,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.317090500000006e-05,
"min": 7.317090500000006e-05,
"max": 0.0049215210225,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002195127150000002,
"min": 0.0002195127150000002,
"max": 0.014070298129999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677655325",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677657726"
},
"total": 2401.525034678,
"count": 1,
"self": 0.7423698839997996,
"children": {
"run_training.setup": {
"total": 0.11168849899996758,
"count": 1,
"self": 0.11168849899996758
},
"TrainerController.start_learning": {
"total": 2400.670976295,
"count": 1,
"self": 4.244031312923653,
"children": {
"TrainerController._reset_env": {
"total": 10.996580513000026,
"count": 1,
"self": 10.996580513000026
},
"TrainerController.advance": {
"total": 2385.2515189000765,
"count": 232956,
"self": 4.442957055012812,
"children": {
"env_step": {
"total": 1851.2518482460941,
"count": 232956,
"self": 1546.740952154989,
"children": {
"SubprocessEnvManager._take_step": {
"total": 301.7766086950941,
"count": 232956,
"self": 15.867737275112404,
"children": {
"TorchPolicy.evaluate": {
"total": 285.9088714199817,
"count": 223018,
"self": 72.32093074087601,
"children": {
"TorchPolicy.sample_actions": {
"total": 213.5879406791057,
"count": 223018,
"self": 213.5879406791057
}
}
}
}
},
"workers": {
"total": 2.734287396010984,
"count": 232956,
"self": 0.0,
"children": {
"worker_root": {
"total": 2391.7811076389758,
"count": 232956,
"is_parallel": true,
"self": 1135.4072243459082,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010168800000087685,
"count": 1,
"is_parallel": true,
"self": 0.0003545550000012554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006623250000075132,
"count": 2,
"is_parallel": true,
"self": 0.0006623250000075132
}
}
},
"UnityEnvironment.step": {
"total": 0.08122858600000882,
"count": 1,
"is_parallel": true,
"self": 0.0003184060000194222,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022158199999466888,
"count": 1,
"is_parallel": true,
"self": 0.00022158199999466888
},
"communicator.exchange": {
"total": 0.07988267899997936,
"count": 1,
"is_parallel": true,
"self": 0.07988267899997936
},
"steps_from_proto": {
"total": 0.0008059190000153649,
"count": 1,
"is_parallel": true,
"self": 0.0002616139999531697,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005443050000621952,
"count": 2,
"is_parallel": true,
"self": 0.0005443050000621952
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1256.3738832930676,
"count": 232955,
"is_parallel": true,
"self": 38.775963504027004,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.39500133293006,
"count": 232955,
"is_parallel": true,
"self": 79.39500133293006
},
"communicator.exchange": {
"total": 1045.8001081300574,
"count": 232955,
"is_parallel": true,
"self": 1045.8001081300574
},
"steps_from_proto": {
"total": 92.40281032605304,
"count": 232955,
"is_parallel": true,
"self": 37.51797328810068,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.884837037952366,
"count": 465910,
"is_parallel": true,
"self": 54.884837037952366
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 529.5567135989694,
"count": 232956,
"self": 6.504563210001379,
"children": {
"process_trajectory": {
"total": 168.9447761659689,
"count": 232956,
"self": 167.7437735089688,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2010026570001173,
"count": 10,
"self": 1.2010026570001173
}
}
},
"_update_policy": {
"total": 354.1073742229991,
"count": 97,
"self": 296.27072535300096,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.83664886999816,
"count": 2910,
"self": 57.83664886999816
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3029998626734596e-06,
"count": 1,
"self": 1.3029998626734596e-06
},
"TrainerController._save_models": {
"total": 0.17884426599994185,
"count": 1,
"self": 0.0030325789998641994,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17581168700007765,
"count": 1,
"self": 0.17581168700007765
}
}
}
}
}
}
}