vhenric's picture
First Push
54791a0 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.943366289138794,
"min": 0.943366289138794,
"max": 2.870500326156616,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8965.7529296875,
"min": 8965.7529296875,
"max": 29302.068359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.241118431091309,
"min": 0.2853982150554657,
"max": 12.241118431091309,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2387.01806640625,
"min": 55.367252349853516,
"max": 2466.23193359375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06350853447357248,
"min": 0.06350853447357248,
"max": 0.07461988224405978,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2540341378942899,
"min": 0.2540341378942899,
"max": 0.37309941122029894,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19869183573652718,
"min": 0.11283515747773515,
"max": 0.2920500871597552,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7947673429461087,
"min": 0.4513406299109406,
"max": 1.460250435798776,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.363636363636363,
"min": 2.9318181818181817,
"max": 24.363636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1072.0,
"min": 129.0,
"max": 1330.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.363636363636363,
"min": 2.9318181818181817,
"max": 24.363636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1072.0,
"min": 129.0,
"max": 1330.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749477224",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749477682"
},
"total": 457.992815326,
"count": 1,
"self": 0.4324181030000318,
"children": {
"run_training.setup": {
"total": 0.022872946000006777,
"count": 1,
"self": 0.022872946000006777
},
"TrainerController.start_learning": {
"total": 457.537524277,
"count": 1,
"self": 0.4386859890008168,
"children": {
"TrainerController._reset_env": {
"total": 3.0417546130000233,
"count": 1,
"self": 3.0417546130000233
},
"TrainerController.advance": {
"total": 453.9737900959991,
"count": 18192,
"self": 0.4678481229967133,
"children": {
"env_step": {
"total": 327.55633908899944,
"count": 18192,
"self": 249.64961655899572,
"children": {
"SubprocessEnvManager._take_step": {
"total": 77.66004072700048,
"count": 18192,
"self": 1.3911260709941757,
"children": {
"TorchPolicy.evaluate": {
"total": 76.2689146560063,
"count": 18192,
"self": 76.2689146560063
}
}
},
"workers": {
"total": 0.24668180300324138,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 456.04168236699576,
"count": 18192,
"is_parallel": true,
"self": 237.78905363899787,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005303465999986656,
"count": 1,
"is_parallel": true,
"self": 0.0038473739999744794,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014560920000121769,
"count": 10,
"is_parallel": true,
"self": 0.0014560920000121769
}
}
},
"UnityEnvironment.step": {
"total": 0.035686438999960046,
"count": 1,
"is_parallel": true,
"self": 0.000548775999902773,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047312700002066777,
"count": 1,
"is_parallel": true,
"self": 0.00047312700002066777
},
"communicator.exchange": {
"total": 0.032888095000032536,
"count": 1,
"is_parallel": true,
"self": 0.032888095000032536
},
"steps_from_proto": {
"total": 0.0017764410000040698,
"count": 1,
"is_parallel": true,
"self": 0.00034798800004409713,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014284529999599727,
"count": 10,
"is_parallel": true,
"self": 0.0014284529999599727
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 218.2526287279979,
"count": 18191,
"is_parallel": true,
"self": 10.21255703698381,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.706546877003461,
"count": 18191,
"is_parallel": true,
"self": 5.706546877003461
},
"communicator.exchange": {
"total": 168.92423779001416,
"count": 18191,
"is_parallel": true,
"self": 168.92423779001416
},
"steps_from_proto": {
"total": 33.409287023996455,
"count": 18191,
"is_parallel": true,
"self": 6.184598684976095,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.22468833902036,
"count": 181910,
"is_parallel": true,
"self": 27.22468833902036
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 125.94960288400296,
"count": 18192,
"self": 0.5522230460022683,
"children": {
"process_trajectory": {
"total": 27.881557177000616,
"count": 18192,
"self": 27.46049718000063,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4210599969999862,
"count": 4,
"self": 0.4210599969999862
}
}
},
"_update_policy": {
"total": 97.51582266100007,
"count": 90,
"self": 39.400832950003235,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.11498971099684,
"count": 4587,
"self": 58.11498971099684
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6770000001997687e-06,
"count": 1,
"self": 1.6770000001997687e-06
},
"TrainerController._save_models": {
"total": 0.08329190200004177,
"count": 1,
"self": 0.0008603110001104142,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08243159099993136,
"count": 1,
"self": 0.08243159099993136
}
}
}
}
}
}
}