ppo-Huggy / run_logs /timers.json
shazled's picture
Huggy
b3a18a4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4056639671325684,
"min": 1.4056639671325684,
"max": 1.4293872117996216,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72328.4375,
"min": 68543.8984375,
"max": 76990.7265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 87.81882770870338,
"min": 87.81882770870338,
"max": 410.10655737704917,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49442.0,
"min": 48796.0,
"max": 50092.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999976.0,
"min": 49692.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999976.0,
"min": 49692.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.390286684036255,
"min": 0.15689384937286377,
"max": 2.4416329860687256,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1345.7314453125,
"min": 18.984155654907227,
"max": 1345.7314453125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7414432935562267,
"min": 1.9192102312549086,
"max": 3.9121699317081555,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2106.4325742721558,
"min": 232.22443798184395,
"max": 2111.3564236164093,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7414432935562267,
"min": 1.9192102312549086,
"max": 3.9121699317081555,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2106.4325742721558,
"min": 232.22443798184395,
"max": 2111.3564236164093,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01980091526042492,
"min": 0.012774458215183889,
"max": 0.02234072516730521,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05940274578127476,
"min": 0.025548916430367778,
"max": 0.05940274578127476,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05613370695047909,
"min": 0.021330346601704755,
"max": 0.06340244878083467,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16840112085143727,
"min": 0.04266069320340951,
"max": 0.176739589497447,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1769489410499974e-06,
"min": 3.1769489410499974e-06,
"max": 0.000295263376578875,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.530846823149992e-06,
"min": 9.530846823149992e-06,
"max": 0.0008438844187051999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105895,
"min": 0.10105895,
"max": 0.19842112499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30317685,
"min": 0.20725280000000001,
"max": 0.5812948,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.284160499999996e-05,
"min": 6.284160499999996e-05,
"max": 0.004921214137500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001885248149999999,
"min": 0.0001885248149999999,
"max": 0.014066610519999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677782754",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677785266"
},
"total": 2511.42860536,
"count": 1,
"self": 0.43892888700020194,
"children": {
"run_training.setup": {
"total": 0.10754592900002535,
"count": 1,
"self": 0.10754592900002535
},
"TrainerController.start_learning": {
"total": 2510.8821305439997,
"count": 1,
"self": 4.397031714982404,
"children": {
"TrainerController._reset_env": {
"total": 9.05831488900003,
"count": 1,
"self": 9.05831488900003
},
"TrainerController.advance": {
"total": 2497.3126689790174,
"count": 231481,
"self": 4.94189146405688,
"children": {
"env_step": {
"total": 1956.7444109559701,
"count": 231481,
"self": 1632.4236603979762,
"children": {
"SubprocessEnvManager._take_step": {
"total": 321.39318466001436,
"count": 231481,
"self": 17.41269280703034,
"children": {
"TorchPolicy.evaluate": {
"total": 303.980491852984,
"count": 223029,
"self": 76.12553832892587,
"children": {
"TorchPolicy.sample_actions": {
"total": 227.85495352405815,
"count": 223029,
"self": 227.85495352405815
}
}
}
}
},
"workers": {
"total": 2.927565897979605,
"count": 231481,
"self": 0.0,
"children": {
"worker_root": {
"total": 2501.8131924069803,
"count": 231481,
"is_parallel": true,
"self": 1178.6100293509787,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008929340000349839,
"count": 1,
"is_parallel": true,
"self": 0.0003449760000648894,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005479579999700945,
"count": 2,
"is_parallel": true,
"self": 0.0005479579999700945
}
}
},
"UnityEnvironment.step": {
"total": 0.06313757800000985,
"count": 1,
"is_parallel": true,
"self": 0.0003440060000343692,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002323179999734748,
"count": 1,
"is_parallel": true,
"self": 0.0002323179999734748
},
"communicator.exchange": {
"total": 0.056727948999991895,
"count": 1,
"is_parallel": true,
"self": 0.056727948999991895
},
"steps_from_proto": {
"total": 0.005833305000010114,
"count": 1,
"is_parallel": true,
"self": 0.00032622600008380687,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005507078999926307,
"count": 2,
"is_parallel": true,
"self": 0.005507078999926307
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1323.2031630560016,
"count": 231480,
"is_parallel": true,
"self": 38.80776033002144,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.33021053000755,
"count": 231480,
"is_parallel": true,
"self": 85.33021053000755
},
"communicator.exchange": {
"total": 1104.3136101489647,
"count": 231480,
"is_parallel": true,
"self": 1104.3136101489647
},
"steps_from_proto": {
"total": 94.75158204700796,
"count": 231480,
"is_parallel": true,
"self": 40.46966278298851,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.28191926401945,
"count": 462960,
"is_parallel": true,
"self": 54.28191926401945
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 535.6263665589906,
"count": 231481,
"self": 6.967511726027169,
"children": {
"process_trajectory": {
"total": 171.31696850396355,
"count": 231481,
"self": 169.98329018596377,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3336783179997838,
"count": 10,
"self": 1.3336783179997838
}
}
},
"_update_policy": {
"total": 357.34188632899986,
"count": 97,
"self": 299.02812011799784,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.31376621100202,
"count": 2910,
"self": 58.31376621100202
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.460001481580548e-07,
"count": 1,
"self": 8.460001481580548e-07
},
"TrainerController._save_models": {
"total": 0.11411411499966562,
"count": 1,
"self": 0.0020381369999995513,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11207597799966607,
"count": 1,
"self": 0.11207597799966607
}
}
}
}
}
}
}