ppo-Huggy / run_logs /timers.json
keyblade95's picture
Huggy
82d6a45
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4105809926986694,
"min": 1.4105809926986694,
"max": 1.4268718957901,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70265.2734375,
"min": 68710.75,
"max": 76344.5546875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 112.0762331838565,
"min": 82.4457429048414,
"max": 414.22314049586777,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49986.0,
"min": 49000.0,
"max": 50121.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999766.0,
"min": 49518.0,
"max": 1999766.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999766.0,
"min": 49518.0,
"max": 1999766.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.37058687210083,
"min": 0.1224227249622345,
"max": 2.4726107120513916,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1057.28173828125,
"min": 14.690727233886719,
"max": 1440.4541015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6168342919627645,
"min": 1.7851632945239544,
"max": 3.971958694019387,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1613.108094215393,
"min": 214.21959534287453,
"max": 2320.478026509285,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6168342919627645,
"min": 1.7851632945239544,
"max": 3.971958694019387,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1613.108094215393,
"min": 214.21959534287453,
"max": 2320.478026509285,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018159864795032062,
"min": 0.013828484701298294,
"max": 0.020607337766947845,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05447959438509618,
"min": 0.027656969402596588,
"max": 0.06182201330084353,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.046707010020812345,
"min": 0.021205093804746868,
"max": 0.06356543408085902,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14012103006243704,
"min": 0.042410187609493735,
"max": 0.1803464533140262,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.308398897233341e-06,
"min": 3.308398897233341e-06,
"max": 0.00029534070155309996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.925196691700024e-06,
"min": 9.925196691700024e-06,
"max": 0.0008443513685495501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10110276666666666,
"min": 0.10110276666666666,
"max": 0.1984469,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033083,
"min": 0.20733959999999996,
"max": 0.5814504500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.50280566666668e-05,
"min": 6.50280566666668e-05,
"max": 0.0049225003099999986,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019508417000000042,
"min": 0.00019508417000000042,
"max": 0.014074377455,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673623299",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673625548"
},
"total": 2248.6852283539997,
"count": 1,
"self": 0.43812222499946074,
"children": {
"run_training.setup": {
"total": 0.1344024690000083,
"count": 1,
"self": 0.1344024690000083
},
"TrainerController.start_learning": {
"total": 2248.11270366,
"count": 1,
"self": 3.8678690820015618,
"children": {
"TrainerController._reset_env": {
"total": 11.869063753999995,
"count": 1,
"self": 11.869063753999995
},
"TrainerController.advance": {
"total": 2232.254988806999,
"count": 232249,
"self": 4.137215702872254,
"children": {
"env_step": {
"total": 1756.8495006330356,
"count": 232249,
"self": 1479.050635693037,
"children": {
"SubprocessEnvManager._take_step": {
"total": 275.15364117603264,
"count": 232249,
"self": 14.497695585090298,
"children": {
"TorchPolicy.evaluate": {
"total": 260.65594559094234,
"count": 223008,
"self": 65.01782854896211,
"children": {
"TorchPolicy.sample_actions": {
"total": 195.63811704198022,
"count": 223008,
"self": 195.63811704198022
}
}
}
}
},
"workers": {
"total": 2.6452237639659586,
"count": 232249,
"self": 0.0,
"children": {
"worker_root": {
"total": 2240.3523095739456,
"count": 232249,
"is_parallel": true,
"self": 1020.3227922787987,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002322553999988486,
"count": 1,
"is_parallel": true,
"self": 0.0003735760000154187,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019489779999730672,
"count": 2,
"is_parallel": true,
"self": 0.0019489779999730672
}
}
},
"UnityEnvironment.step": {
"total": 0.043242585000001554,
"count": 1,
"is_parallel": true,
"self": 0.00029443499997228173,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018303400000263537,
"count": 1,
"is_parallel": true,
"self": 0.00018303400000263537
},
"communicator.exchange": {
"total": 0.042036750000022494,
"count": 1,
"is_parallel": true,
"self": 0.042036750000022494
},
"steps_from_proto": {
"total": 0.0007283660000041436,
"count": 1,
"is_parallel": true,
"self": 0.00022454800000559771,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005038179999985459,
"count": 2,
"is_parallel": true,
"self": 0.0005038179999985459
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1220.029517295147,
"count": 232248,
"is_parallel": true,
"self": 35.32418382117294,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.21845402503249,
"count": 232248,
"is_parallel": true,
"self": 78.21845402503249
},
"communicator.exchange": {
"total": 1012.6549586310119,
"count": 232248,
"is_parallel": true,
"self": 1012.6549586310119
},
"steps_from_proto": {
"total": 93.83192081792947,
"count": 232248,
"is_parallel": true,
"self": 38.81933829794616,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.01258251998331,
"count": 464496,
"is_parallel": true,
"self": 55.01258251998331
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 471.2682724710911,
"count": 232249,
"self": 5.892199654109845,
"children": {
"process_trajectory": {
"total": 147.41714297398025,
"count": 232249,
"self": 146.19218271998025,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2249602539999955,
"count": 10,
"self": 1.2249602539999955
}
}
},
"_update_policy": {
"total": 317.958929843001,
"count": 97,
"self": 263.4400705100014,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.51885933299957,
"count": 2910,
"self": 54.51885933299957
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.204999989568023e-06,
"count": 1,
"self": 1.204999989568023e-06
},
"TrainerController._save_models": {
"total": 0.12078081199979351,
"count": 1,
"self": 0.0022092529998190003,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11857155899997451,
"count": 1,
"self": 0.11857155899997451
}
}
}
}
}
}
}