SnowballTarget / run_logs /timers.json
MattStammers's picture
First Push
947a3fc
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1581305265426636,
"min": 1.1581305265426636,
"max": 2.8782541751861572,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11108.7880859375,
"min": 11108.7880859375,
"max": 29539.5234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.321924209594727,
"min": 0.4501103162765503,
"max": 12.321924209594727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2402.775146484375,
"min": 87.32140350341797,
"max": 2488.74560546875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06827242503070509,
"min": 0.06030330126226295,
"max": 0.07599485026969655,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27308970012282036,
"min": 0.2412132050490518,
"max": 0.36840799069442953,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2155262331898306,
"min": 0.11668531904134022,
"max": 0.296727734380493,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8621049327593224,
"min": 0.4667412761653609,
"max": 1.468994970122973,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.545454545454547,
"min": 3.090909090909091,
"max": 24.545454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1080.0,
"min": 136.0,
"max": 1335.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.545454545454547,
"min": 3.090909090909091,
"max": 24.545454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1080.0,
"min": 136.0,
"max": 1335.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691180965",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691181373"
},
"total": 408.0440758259999,
"count": 1,
"self": 0.3224431800001639,
"children": {
"run_training.setup": {
"total": 0.03897239699995225,
"count": 1,
"self": 0.03897239699995225
},
"TrainerController.start_learning": {
"total": 407.6826602489998,
"count": 1,
"self": 0.5437071829965134,
"children": {
"TrainerController._reset_env": {
"total": 4.034002839000095,
"count": 1,
"self": 4.034002839000095
},
"TrainerController.advance": {
"total": 402.96459743700325,
"count": 18204,
"self": 0.26049890101853634,
"children": {
"env_step": {
"total": 402.7040985359847,
"count": 18204,
"self": 288.16335626095383,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.279799371016,
"count": 18204,
"self": 1.6616863460326385,
"children": {
"TorchPolicy.evaluate": {
"total": 112.61811302498336,
"count": 18204,
"self": 112.61811302498336
}
}
},
"workers": {
"total": 0.2609429040148825,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 406.8504196630013,
"count": 18204,
"is_parallel": true,
"self": 204.64677485500806,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002026320999902964,
"count": 1,
"is_parallel": true,
"self": 0.0005743859996982792,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001451935000204685,
"count": 10,
"is_parallel": true,
"self": 0.001451935000204685
}
}
},
"UnityEnvironment.step": {
"total": 0.036847523999995246,
"count": 1,
"is_parallel": true,
"self": 0.00041479199967398017,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005174140001145133,
"count": 1,
"is_parallel": true,
"self": 0.0005174140001145133
},
"communicator.exchange": {
"total": 0.03458672100009608,
"count": 1,
"is_parallel": true,
"self": 0.03458672100009608
},
"steps_from_proto": {
"total": 0.0013285970001106762,
"count": 1,
"is_parallel": true,
"self": 0.000344322000273678,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009842749998369982,
"count": 10,
"is_parallel": true,
"self": 0.0009842749998369982
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 202.20364480799321,
"count": 18203,
"is_parallel": true,
"self": 9.922728076983276,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.01637647000166,
"count": 18203,
"is_parallel": true,
"self": 5.01637647000166
},
"communicator.exchange": {
"total": 155.28674794300764,
"count": 18203,
"is_parallel": true,
"self": 155.28674794300764
},
"steps_from_proto": {
"total": 31.97779231800064,
"count": 18203,
"is_parallel": true,
"self": 5.6191752820479905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.358617035952648,
"count": 182030,
"is_parallel": true,
"self": 26.358617035952648
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00017292500001531153,
"count": 1,
"self": 0.00017292500001531153,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 401.4413459419525,
"count": 228797,
"is_parallel": true,
"self": 5.043709702887554,
"children": {
"process_trajectory": {
"total": 221.16978317206667,
"count": 228797,
"is_parallel": true,
"self": 220.3793899460668,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7903932259998783,
"count": 4,
"is_parallel": true,
"self": 0.7903932259998783
}
}
},
"_update_policy": {
"total": 175.22785306699825,
"count": 90,
"is_parallel": true,
"self": 47.7203158620066,
"children": {
"TorchPPOOptimizer.update": {
"total": 127.50753720499165,
"count": 4587,
"is_parallel": true,
"self": 127.50753720499165
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1401798649999364,
"count": 1,
"self": 0.0006311509998795373,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13954871400005686,
"count": 1,
"self": 0.13954871400005686
}
}
}
}
}
}
}