chenshuguang's picture
First Push
0750c09 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8993549942970276,
"min": 0.8993549942970276,
"max": 2.8404741287231445,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8547.4697265625,
"min": 8547.4697265625,
"max": 28995.560546875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.714906692504883,
"min": 0.4425339698791504,
"max": 12.714906692504883,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2479.40673828125,
"min": 85.85159301757812,
"max": 2558.693359375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07275756926123994,
"min": 0.06106921812394884,
"max": 0.07375606545218605,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.29103027704495976,
"min": 0.24427687249579536,
"max": 0.356165221374135,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2009584371219663,
"min": 0.1543384936601654,
"max": 0.2619113650976443,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8038337484878652,
"min": 0.6173539746406616,
"max": 1.3095568254882215,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.84090909090909,
"min": 4.386363636363637,
"max": 24.886363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1093.0,
"min": 193.0,
"max": 1362.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.84090909090909,
"min": 4.386363636363637,
"max": 24.886363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1093.0,
"min": 193.0,
"max": 1362.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1766639050",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/Miniconda3/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1766639515"
},
"total": 464.48152809300063,
"count": 1,
"self": 0.3720024399999602,
"children": {
"run_training.setup": {
"total": 0.03608389100008935,
"count": 1,
"self": 0.03608389100008935
},
"TrainerController.start_learning": {
"total": 464.0734417620006,
"count": 1,
"self": 0.5874694390513469,
"children": {
"TrainerController._reset_env": {
"total": 2.4294347179993565,
"count": 1,
"self": 2.4294347179993565
},
"TrainerController.advance": {
"total": 460.9627637919484,
"count": 18192,
"self": 0.6129393270566652,
"children": {
"env_step": {
"total": 316.6584301390476,
"count": 18192,
"self": 224.22886758510595,
"children": {
"SubprocessEnvManager._take_step": {
"total": 92.08414454888589,
"count": 18192,
"self": 2.183041628830324,
"children": {
"TorchPolicy.evaluate": {
"total": 89.90110292005556,
"count": 18192,
"self": 89.90110292005556
}
}
},
"workers": {
"total": 0.3454180050557625,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 462.9187864920041,
"count": 18192,
"is_parallel": true,
"self": 274.21723872403436,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012808530000256724,
"count": 1,
"is_parallel": true,
"self": 0.0003464800010988256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009343729989268468,
"count": 10,
"is_parallel": true,
"self": 0.0009343729989268468
}
}
},
"UnityEnvironment.step": {
"total": 0.02879238500008796,
"count": 1,
"is_parallel": true,
"self": 0.0003846119998343056,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002654589998201118,
"count": 1,
"is_parallel": true,
"self": 0.0002654589998201118
},
"communicator.exchange": {
"total": 0.027232481999817537,
"count": 1,
"is_parallel": true,
"self": 0.027232481999817537
},
"steps_from_proto": {
"total": 0.0009098320006160066,
"count": 1,
"is_parallel": true,
"self": 0.00021705000017391285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006927820004420937,
"count": 10,
"is_parallel": true,
"self": 0.0006927820004420937
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 188.7015477679697,
"count": 18191,
"is_parallel": true,
"self": 7.957730951069607,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.664177416991151,
"count": 18191,
"is_parallel": true,
"self": 4.664177416991151
},
"communicator.exchange": {
"total": 152.44799271099964,
"count": 18191,
"is_parallel": true,
"self": 152.44799271099964
},
"steps_from_proto": {
"total": 23.631646688909314,
"count": 18191,
"is_parallel": true,
"self": 5.268294112743206,
"children": {
"_process_rank_one_or_two_observation": {
"total": 18.363352576166108,
"count": 181910,
"is_parallel": true,
"self": 18.363352576166108
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 143.69139432584416,
"count": 18192,
"self": 0.7607257198051229,
"children": {
"process_trajectory": {
"total": 32.95966574803515,
"count": 18192,
"self": 32.51156138403621,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4481043639989366,
"count": 4,
"self": 0.4481043639989366
}
}
},
"_update_policy": {
"total": 109.97100285800389,
"count": 90,
"self": 39.09569811705205,
"children": {
"TorchPPOOptimizer.update": {
"total": 70.87530474095183,
"count": 4587,
"self": 70.87530474095183
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2310010788496584e-06,
"count": 1,
"self": 1.2310010788496584e-06
},
"TrainerController._save_models": {
"total": 0.09377258200038341,
"count": 1,
"self": 0.0005878800002392381,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09318470200014417,
"count": 1,
"self": 0.09318470200014417
}
}
}
}
}
}
}