Xekron's picture
First Push
bd0a0f8 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7644171118736267,
"min": 0.7624673247337341,
"max": 2.814574718475342,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7265.0205078125,
"min": 7265.0205078125,
"max": 28731.177734375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.95201587677002,
"min": 0.4296029508113861,
"max": 12.95201587677002,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2525.64306640625,
"min": 83.34297180175781,
"max": 2625.00244140625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07240919993737228,
"min": 0.062080395746092754,
"max": 0.07675540651942749,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28963679974948914,
"min": 0.24832158298437101,
"max": 0.3688545707811821,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18016676619356753,
"min": 0.14203216430877208,
"max": 0.29044583249910205,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7206670647742701,
"min": 0.5681286572350883,
"max": 1.3526703341334474,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.704545454545453,
"min": 4.2727272727272725,
"max": 25.704545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1131.0,
"min": 188.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.704545454545453,
"min": 4.2727272727272725,
"max": 25.704545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1131.0,
"min": 188.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744649311",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744649919"
},
"total": 607.6525821520002,
"count": 1,
"self": 0.6334756010001001,
"children": {
"run_training.setup": {
"total": 0.0321631119999779,
"count": 1,
"self": 0.0321631119999779
},
"TrainerController.start_learning": {
"total": 606.986943439,
"count": 1,
"self": 0.619475616983209,
"children": {
"TrainerController._reset_env": {
"total": 3.9626099520000366,
"count": 1,
"self": 3.9626099520000366
},
"TrainerController.advance": {
"total": 602.3125116680167,
"count": 18192,
"self": 0.6462097890118912,
"children": {
"env_step": {
"total": 404.2212283800031,
"count": 18192,
"self": 343.00665611100897,
"children": {
"SubprocessEnvManager._take_step": {
"total": 60.8254497989995,
"count": 18192,
"self": 2.3360801449997552,
"children": {
"TorchPolicy.evaluate": {
"total": 58.489369653999745,
"count": 18192,
"self": 58.489369653999745
}
}
},
"workers": {
"total": 0.38912246999461786,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 604.8095673110018,
"count": 18192,
"is_parallel": true,
"self": 309.1052410960165,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007284846999993988,
"count": 1,
"is_parallel": true,
"self": 0.004839723000031881,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024451239999621066,
"count": 10,
"is_parallel": true,
"self": 0.0024451239999621066
}
}
},
"UnityEnvironment.step": {
"total": 0.04905004899995902,
"count": 1,
"is_parallel": true,
"self": 0.0009295229999111143,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046595200001320336,
"count": 1,
"is_parallel": true,
"self": 0.00046595200001320336
},
"communicator.exchange": {
"total": 0.04503060000001824,
"count": 1,
"is_parallel": true,
"self": 0.04503060000001824
},
"steps_from_proto": {
"total": 0.00262397400001646,
"count": 1,
"is_parallel": true,
"self": 0.000619567000171628,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002004406999844832,
"count": 10,
"is_parallel": true,
"self": 0.002004406999844832
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 295.7043262149853,
"count": 18191,
"is_parallel": true,
"self": 14.939513467010897,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.929747530978602,
"count": 18191,
"is_parallel": true,
"self": 7.929747530978602
},
"communicator.exchange": {
"total": 228.88002635500146,
"count": 18191,
"is_parallel": true,
"self": 228.88002635500146
},
"steps_from_proto": {
"total": 43.95503886199435,
"count": 18191,
"is_parallel": true,
"self": 8.605523949992346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.34951491200201,
"count": 181910,
"is_parallel": true,
"self": 35.34951491200201
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 197.44507349900164,
"count": 18192,
"self": 0.8084506009979577,
"children": {
"process_trajectory": {
"total": 34.224002449002455,
"count": 18192,
"self": 33.71029331000261,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5137091389998432,
"count": 4,
"self": 0.5137091389998432
}
}
},
"_update_policy": {
"total": 162.41262044900122,
"count": 90,
"self": 59.772446366001304,
"children": {
"TorchPPOOptimizer.update": {
"total": 102.64017408299992,
"count": 4587,
"self": 102.64017408299992
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5400000847876072e-06,
"count": 1,
"self": 1.5400000847876072e-06
},
"TrainerController._save_models": {
"total": 0.0923446619999595,
"count": 1,
"self": 0.0017552629999499914,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09058939900000951,
"count": 1,
"self": 0.09058939900000951
}
}
}
}
}
}
}