ppo-Huggy / run_logs /timers.json
jayshim's picture
Huggy
47c4a09
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4045618772506714,
"min": 1.4045618772506714,
"max": 1.4291884899139404,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70799.75,
"min": 68890.4609375,
"max": 78007.71875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.5749063670412,
"min": 81.53553719008265,
"max": 400.584,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49435.0,
"min": 49149.0,
"max": 50122.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999961.0,
"min": 49776.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999961.0,
"min": 49776.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3829185962677,
"min": 0.036440931260585785,
"max": 2.442204236984253,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1272.478515625,
"min": 4.518675327301025,
"max": 1466.570556640625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6581810424836836,
"min": 1.92474329447554,
"max": 3.9658922989619896,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1953.468676686287,
"min": 238.66816851496696,
"max": 2318.376360118389,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6581810424836836,
"min": 1.92474329447554,
"max": 3.9658922989619896,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1953.468676686287,
"min": 238.66816851496696,
"max": 2318.376360118389,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01709298083207816,
"min": 0.012725892418529838,
"max": 0.01938430511460562,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05127894249623448,
"min": 0.025451784837059677,
"max": 0.057729656699423995,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.053835299362738925,
"min": 0.0240155881891648,
"max": 0.06281582166751226,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16150589808821678,
"min": 0.0480311763783296,
"max": 0.18844746500253678,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5487488171166758e-06,
"min": 3.5487488171166758e-06,
"max": 0.00029536530154489996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0646246451350028e-05,
"min": 1.0646246451350028e-05,
"max": 0.0008442091685969498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118288333333336,
"min": 0.10118288333333336,
"max": 0.19845510000000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035486500000001,
"min": 0.20749595000000004,
"max": 0.5814030500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.90258783333335e-05,
"min": 6.90258783333335e-05,
"max": 0.00492290949,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020707763500000048,
"min": 0.00020707763500000048,
"max": 0.014072012194999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672038525",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672040881"
},
"total": 2356.316210146,
"count": 1,
"self": 0.43927228299980925,
"children": {
"run_training.setup": {
"total": 0.11421333399999867,
"count": 1,
"self": 0.11421333399999867
},
"TrainerController.start_learning": {
"total": 2355.762724529,
"count": 1,
"self": 3.961137758014047,
"children": {
"TrainerController._reset_env": {
"total": 9.767841917999988,
"count": 1,
"self": 9.767841917999988
},
"TrainerController.advance": {
"total": 2341.911276410986,
"count": 232048,
"self": 4.191503512968666,
"children": {
"env_step": {
"total": 1842.5933339340258,
"count": 232048,
"self": 1552.41775907605,
"children": {
"SubprocessEnvManager._take_step": {
"total": 287.4579074270172,
"count": 232048,
"self": 15.209896114091066,
"children": {
"TorchPolicy.evaluate": {
"total": 272.2480113129261,
"count": 223021,
"self": 67.700040016005,
"children": {
"TorchPolicy.sample_actions": {
"total": 204.5479712969211,
"count": 223021,
"self": 204.5479712969211
}
}
}
}
},
"workers": {
"total": 2.7176674309585565,
"count": 232048,
"self": 0.0,
"children": {
"worker_root": {
"total": 2347.6242551190153,
"count": 232048,
"is_parallel": true,
"self": 1071.3202156510204,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002389055999969969,
"count": 1,
"is_parallel": true,
"self": 0.0003653279999866754,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020237279999832936,
"count": 2,
"is_parallel": true,
"self": 0.0020237279999832936
}
}
},
"UnityEnvironment.step": {
"total": 0.050986517000012554,
"count": 1,
"is_parallel": true,
"self": 0.00031972099998256454,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018566400001418515,
"count": 1,
"is_parallel": true,
"self": 0.00018566400001418515
},
"communicator.exchange": {
"total": 0.04972647499999994,
"count": 1,
"is_parallel": true,
"self": 0.04972647499999994
},
"steps_from_proto": {
"total": 0.0007546570000158681,
"count": 1,
"is_parallel": true,
"self": 0.00026012099999661586,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004945360000192522,
"count": 2,
"is_parallel": true,
"self": 0.0004945360000192522
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1276.304039467995,
"count": 232047,
"is_parallel": true,
"self": 36.320098051045306,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.73894362395691,
"count": 232047,
"is_parallel": true,
"self": 79.73894362395691
},
"communicator.exchange": {
"total": 1062.5312170680304,
"count": 232047,
"is_parallel": true,
"self": 1062.5312170680304
},
"steps_from_proto": {
"total": 97.71378072496236,
"count": 232047,
"is_parallel": true,
"self": 39.866146799956994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.84763392500537,
"count": 464094,
"is_parallel": true,
"self": 57.84763392500537
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 495.12643896399135,
"count": 232048,
"self": 6.329463074920341,
"children": {
"process_trajectory": {
"total": 155.80979657307046,
"count": 232048,
"self": 154.13521218507094,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6745843879995164,
"count": 10,
"self": 1.6745843879995164
}
}
},
"_update_policy": {
"total": 332.98717931600055,
"count": 97,
"self": 278.29191855399284,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.695260762007706,
"count": 2910,
"self": 54.695260762007706
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.949999366654083e-07,
"count": 1,
"self": 9.949999366654083e-07
},
"TrainerController._save_models": {
"total": 0.1224674469999627,
"count": 1,
"self": 0.0025382799999533745,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11992916700000933,
"count": 1,
"self": 0.11992916700000933
}
}
}
}
}
}
}