Adriano26's picture
First Push
130e1a0 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8334105610847473,
"min": 0.8334105610847473,
"max": 2.853639602661133,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7920.73388671875,
"min": 7920.73388671875,
"max": 29129.953125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.146039009094238,
"min": 0.5185803174972534,
"max": 13.146039009094238,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2563.4775390625,
"min": 100.60458374023438,
"max": 2667.7265625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07235299806936882,
"min": 0.06164741838790409,
"max": 0.07727318956344342,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28941199227747527,
"min": 0.24658967355161637,
"max": 0.3863659478172171,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18580407422839426,
"min": 0.1619860933746631,
"max": 0.2801778262152391,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.743216296913577,
"min": 0.6479443734986524,
"max": 1.4008891310761957,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.84090909090909,
"min": 4.7727272727272725,
"max": 26.12727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1137.0,
"min": 210.0,
"max": 1437.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.84090909090909,
"min": 4.7727272727272725,
"max": 26.12727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1137.0,
"min": 210.0,
"max": 1437.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749147550",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749148147"
},
"total": 597.690357207,
"count": 1,
"self": 1.0140573240000776,
"children": {
"run_training.setup": {
"total": 0.03941825800006882,
"count": 1,
"self": 0.03941825800006882
},
"TrainerController.start_learning": {
"total": 596.6368816249999,
"count": 1,
"self": 0.66290315999413,
"children": {
"TrainerController._reset_env": {
"total": 4.501070288000051,
"count": 1,
"self": 4.501070288000051
},
"TrainerController.advance": {
"total": 591.3906533030055,
"count": 18192,
"self": 0.6827128540040803,
"children": {
"env_step": {
"total": 408.50426029600465,
"count": 18192,
"self": 347.0057050760214,
"children": {
"SubprocessEnvManager._take_step": {
"total": 61.11038957798371,
"count": 18192,
"self": 2.331219755970892,
"children": {
"TorchPolicy.evaluate": {
"total": 58.77916982201282,
"count": 18192,
"self": 58.77916982201282
}
}
},
"workers": {
"total": 0.3881656419995352,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 594.4259291279816,
"count": 18192,
"is_parallel": true,
"self": 295.5986663469996,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008537749999959487,
"count": 1,
"is_parallel": true,
"self": 0.006056523000097513,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002481226999861974,
"count": 10,
"is_parallel": true,
"self": 0.002481226999861974
}
}
},
"UnityEnvironment.step": {
"total": 0.04968119400007254,
"count": 1,
"is_parallel": true,
"self": 0.0009206950001043879,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005386029999954189,
"count": 1,
"is_parallel": true,
"self": 0.0005386029999954189
},
"communicator.exchange": {
"total": 0.04574494400003459,
"count": 1,
"is_parallel": true,
"self": 0.04574494400003459
},
"steps_from_proto": {
"total": 0.0024769519999381373,
"count": 1,
"is_parallel": true,
"self": 0.00046864500006904564,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020083069998690917,
"count": 10,
"is_parallel": true,
"self": 0.0020083069998690917
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 298.827262780982,
"count": 18191,
"is_parallel": true,
"self": 14.991797496016034,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.829110263988014,
"count": 18191,
"is_parallel": true,
"self": 7.829110263988014
},
"communicator.exchange": {
"total": 232.54226062699013,
"count": 18191,
"is_parallel": true,
"self": 232.54226062699013
},
"steps_from_proto": {
"total": 43.46409439398781,
"count": 18191,
"is_parallel": true,
"self": 8.49944307403598,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.96465131995183,
"count": 181910,
"is_parallel": true,
"self": 34.96465131995183
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 182.20368015299675,
"count": 18192,
"self": 0.8686401059889022,
"children": {
"process_trajectory": {
"total": 33.8664401010069,
"count": 18192,
"self": 33.37377141300681,
"children": {
"RLTrainer._checkpoint": {
"total": 0.49266868800009433,
"count": 4,
"self": 0.49266868800009433
}
}
},
"_update_policy": {
"total": 147.46859994600095,
"count": 90,
"self": 54.24056014999269,
"children": {
"TorchPPOOptimizer.update": {
"total": 93.22803979600826,
"count": 4587,
"self": 93.22803979600826
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.14800013761851e-06,
"count": 1,
"self": 1.14800013761851e-06
},
"TrainerController._save_models": {
"total": 0.08225372600008996,
"count": 1,
"self": 0.001197478999984014,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08105624700010594,
"count": 1,
"self": 0.08105624700010594
}
}
}
}
}
}
}