SnowballTarget / run_logs /timers.json
SpookyWooky5's picture
First push
6b9814d
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9355095624923706,
"min": 0.9355095624923706,
"max": 2.8586983680725098,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8963.1171875,
"min": 8963.1171875,
"max": 29338.8203125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.792675971984863,
"min": 0.2781604826450348,
"max": 12.792675971984863,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2494.57177734375,
"min": 53.963130950927734,
"max": 2577.061767578125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07127476943134181,
"min": 0.06178741210226458,
"max": 0.07691625120136122,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28509907772536724,
"min": 0.24714964840905831,
"max": 0.3843904335802768,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19609053710512087,
"min": 0.11743096349413926,
"max": 0.2742423076986098,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7843621484204835,
"min": 0.469723853976557,
"max": 1.289528708831937,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.181818181818183,
"min": 3.1363636363636362,
"max": 25.204545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1108.0,
"min": 138.0,
"max": 1378.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.181818181818183,
"min": 3.1363636363636362,
"max": 25.204545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1108.0,
"min": 138.0,
"max": 1378.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679939099",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679939622"
},
"total": 523.337391701,
"count": 1,
"self": 0.8419446849999304,
"children": {
"run_training.setup": {
"total": 0.1803332830000386,
"count": 1,
"self": 0.1803332830000386
},
"TrainerController.start_learning": {
"total": 522.315113733,
"count": 1,
"self": 0.6819346729979543,
"children": {
"TrainerController._reset_env": {
"total": 9.293987587999993,
"count": 1,
"self": 9.293987587999993
},
"TrainerController.advance": {
"total": 512.105258727002,
"count": 18222,
"self": 0.33815786600086994,
"children": {
"env_step": {
"total": 511.76710086100115,
"count": 18222,
"self": 370.3070932840057,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.14105436699418,
"count": 18222,
"self": 2.5511634209974545,
"children": {
"TorchPolicy.evaluate": {
"total": 138.58989094599673,
"count": 18222,
"self": 138.58989094599673
}
}
},
"workers": {
"total": 0.3189532100012684,
"count": 18222,
"self": 0.0,
"children": {
"worker_root": {
"total": 520.5711483179914,
"count": 18222,
"is_parallel": true,
"self": 246.05900077799805,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0069030290000000605,
"count": 1,
"is_parallel": true,
"self": 0.004511496000020543,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023915329999795176,
"count": 10,
"is_parallel": true,
"self": 0.0023915329999795176
}
}
},
"UnityEnvironment.step": {
"total": 0.05235226400003512,
"count": 1,
"is_parallel": true,
"self": 0.00047178300002315154,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003095449999932498,
"count": 1,
"is_parallel": true,
"self": 0.0003095449999932498
},
"communicator.exchange": {
"total": 0.04987540199999785,
"count": 1,
"is_parallel": true,
"self": 0.04987540199999785
},
"steps_from_proto": {
"total": 0.0016955340000208707,
"count": 1,
"is_parallel": true,
"self": 0.0003813500001115244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013141839999093463,
"count": 10,
"is_parallel": true,
"self": 0.0013141839999093463
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 274.51214753999335,
"count": 18221,
"is_parallel": true,
"self": 10.901334580005539,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.962669892001088,
"count": 18221,
"is_parallel": true,
"self": 5.962669892001088
},
"communicator.exchange": {
"total": 221.08619323799525,
"count": 18221,
"is_parallel": true,
"self": 221.08619323799525
},
"steps_from_proto": {
"total": 36.56194982999148,
"count": 18221,
"is_parallel": true,
"self": 7.516236065021133,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.045713764970344,
"count": 182210,
"is_parallel": true,
"self": 29.045713764970344
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015669999993406236,
"count": 1,
"self": 0.00015669999993406236,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 508.22050142001126,
"count": 447437,
"is_parallel": true,
"self": 11.483790258038027,
"children": {
"process_trajectory": {
"total": 283.7604445959734,
"count": 447437,
"is_parallel": true,
"self": 281.9131805019734,
"children": {
"RLTrainer._checkpoint": {
"total": 1.8472640940000247,
"count": 4,
"is_parallel": true,
"self": 1.8472640940000247
}
}
},
"_update_policy": {
"total": 212.97626656599982,
"count": 90,
"is_parallel": true,
"self": 79.09837288599863,
"children": {
"TorchPPOOptimizer.update": {
"total": 133.8778936800012,
"count": 4581,
"is_parallel": true,
"self": 133.8778936800012
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23377604500001326,
"count": 1,
"self": 0.0012073009999085116,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23256874400010474,
"count": 1,
"self": 0.23256874400010474
}
}
}
}
}
}
}