bwalser's picture
First Push
60629ce verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0101943016052246,
"min": 1.0101943016052246,
"max": 2.839812994003296,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9645.3349609375,
"min": 9645.3349609375,
"max": 29269.953125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.793662071228027,
"min": 0.25998616218566895,
"max": 11.810311317443848,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2299.76416015625,
"min": 50.43731689453125,
"max": 2409.303466796875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07223837455254112,
"min": 0.062364808355271764,
"max": 0.07469677052946352,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28895349821016447,
"min": 0.25223274173913524,
"max": 0.3631257636713118,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20370641891278474,
"min": 0.11659122642367055,
"max": 0.2686379820108414,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.814825675651139,
"min": 0.4663649056946822,
"max": 1.3431899100542068,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.5,
"min": 3.227272727272727,
"max": 23.4,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 990.0,
"min": 142.0,
"max": 1287.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.5,
"min": 3.227272727272727,
"max": 23.4,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 990.0,
"min": 142.0,
"max": 1287.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724939431",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/SnowballTarget.yaml --env=./ml-agents/training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1v2 --no-graphics",
"mlagents_version": "1.0.0",
"mlagents_envs_version": "1.0.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.21.2",
"end_time_seconds": "1724939974"
},
"total": 543.5991986669999,
"count": 1,
"self": 0.47785627600001135,
"children": {
"run_training.setup": {
"total": 0.054553988999941794,
"count": 1,
"self": 0.054553988999941794
},
"TrainerController.start_learning": {
"total": 543.0667884019999,
"count": 1,
"self": 0.7619434519945116,
"children": {
"TrainerController._reset_env": {
"total": 2.189774151999927,
"count": 1,
"self": 2.189774151999927
},
"TrainerController.advance": {
"total": 540.0193874870056,
"count": 18201,
"self": 0.3617795149887115,
"children": {
"env_step": {
"total": 539.6576079720169,
"count": 18201,
"self": 352.7570594860017,
"children": {
"SubprocessEnvManager._take_step": {
"total": 186.5166578969986,
"count": 18201,
"self": 1.8277789890200893,
"children": {
"TorchPolicy.evaluate": {
"total": 184.6888789079785,
"count": 18201,
"self": 184.6888789079785
}
}
},
"workers": {
"total": 0.38389058901657336,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 541.3840490399941,
"count": 18201,
"is_parallel": true,
"self": 265.6341495700058,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025089739999657468,
"count": 1,
"is_parallel": true,
"self": 0.0008173450003141625,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016916289996515843,
"count": 10,
"is_parallel": true,
"self": 0.0016916289996515843
}
}
},
"UnityEnvironment.step": {
"total": 0.04095943800007262,
"count": 1,
"is_parallel": true,
"self": 0.0006672340000477561,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004918080001061753,
"count": 1,
"is_parallel": true,
"self": 0.0004918080001061753
},
"communicator.exchange": {
"total": 0.037323893999882785,
"count": 1,
"is_parallel": true,
"self": 0.037323893999882785
},
"steps_from_proto": {
"total": 0.0024765020000359073,
"count": 1,
"is_parallel": true,
"self": 0.0004399499998726242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002036552000163283,
"count": 10,
"is_parallel": true,
"self": 0.002036552000163283
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 275.74989946998835,
"count": 18200,
"is_parallel": true,
"self": 12.180957115959018,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.594429396015357,
"count": 18200,
"is_parallel": true,
"self": 6.594429396015357
},
"communicator.exchange": {
"total": 211.94975153501014,
"count": 18200,
"is_parallel": true,
"self": 211.94975153501014
},
"steps_from_proto": {
"total": 45.02476142300384,
"count": 18200,
"is_parallel": true,
"self": 8.377023530966653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.647737892037185,
"count": 182000,
"is_parallel": true,
"self": 36.647737892037185
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013666499989994918,
"count": 1,
"self": 0.00013666499989994918,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 533.394791664952,
"count": 806319,
"is_parallel": true,
"self": 17.17544031684156,
"children": {
"process_trajectory": {
"total": 291.53462325311034,
"count": 806319,
"is_parallel": true,
"self": 290.83716802711024,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6974552260001019,
"count": 4,
"is_parallel": true,
"self": 0.6974552260001019
}
}
},
"_update_policy": {
"total": 224.68472809500008,
"count": 90,
"is_parallel": true,
"self": 77.74309676400003,
"children": {
"TorchPPOOptimizer.update": {
"total": 146.94163133100005,
"count": 4584,
"is_parallel": true,
"self": 146.94163133100005
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09554664600000251,
"count": 1,
"self": 0.0009135520001564146,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0946330939998461,
"count": 1,
"self": 0.0946330939998461
}
}
}
}
}
}
}