debisoft's picture
Second Push
f0ae705 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1696356534957886,
"min": 1.1586006879806519,
"max": 2.8567614555358887,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11116.216796875,
"min": 11116.216796875,
"max": 29161.8203125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 80.743896484375,
"min": 0.37964004278182983,
"max": 80.743896484375,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 15745.0595703125,
"min": 73.6501693725586,
"max": 15833.3828125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06701226236170851,
"min": 0.05924823112587796,
"max": 0.07278921084590348,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26804904944683405,
"min": 0.2591948176844858,
"max": 0.3639460542295174,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.46281355677866465,
"min": 0.1529703062614335,
"max": 0.6265163567720674,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.8512542271146586,
"min": 0.611881225045734,
"max": 3.132581783860337,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.6940097306000015e-05,
"min": 2.6940097306000015e-05,
"max": 0.000972940002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00010776038922400006,
"min": 0.00010776038922400006,
"max": 0.004617200038280001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.636363636363637,
"min": 3.4318181818181817,
"max": 26.613636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1128.0,
"min": 151.0,
"max": 1420.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.636363636363637,
"min": 3.4318181818181817,
"max": 26.613636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1128.0,
"min": 151.0,
"max": 1420.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1760348875",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1760349445"
},
"total": 570.327507424,
"count": 1,
"self": 1.0985130230001232,
"children": {
"run_training.setup": {
"total": 0.04536594999990484,
"count": 1,
"self": 0.04536594999990484
},
"TrainerController.start_learning": {
"total": 569.183628451,
"count": 1,
"self": 0.6299554249917492,
"children": {
"TrainerController._reset_env": {
"total": 5.05053794899959,
"count": 1,
"self": 5.05053794899959
},
"TrainerController.advance": {
"total": 563.3698811720087,
"count": 18192,
"self": 0.6448444780307909,
"children": {
"env_step": {
"total": 406.57764273400517,
"count": 18192,
"self": 352.571894519976,
"children": {
"SubprocessEnvManager._take_step": {
"total": 53.61874586097201,
"count": 18192,
"self": 1.9992169238839779,
"children": {
"TorchPolicy.evaluate": {
"total": 51.619528937088035,
"count": 18192,
"self": 51.619528937088035
}
}
},
"workers": {
"total": 0.3870023530571416,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 566.2498391761146,
"count": 18192,
"is_parallel": true,
"self": 261.542008055093,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00426478200006386,
"count": 1,
"is_parallel": true,
"self": 0.0015539629976046854,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002710819002459175,
"count": 10,
"is_parallel": true,
"self": 0.002710819002459175
}
}
},
"UnityEnvironment.step": {
"total": 0.04402731299978768,
"count": 1,
"is_parallel": true,
"self": 0.0007118049998098286,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004511699999056873,
"count": 1,
"is_parallel": true,
"self": 0.0004511699999056873
},
"communicator.exchange": {
"total": 0.04069060400070157,
"count": 1,
"is_parallel": true,
"self": 0.04069060400070157
},
"steps_from_proto": {
"total": 0.0021737339993705973,
"count": 1,
"is_parallel": true,
"self": 0.00043069300045317505,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017430409989174223,
"count": 10,
"is_parallel": true,
"self": 0.0017430409989174223
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 304.7078311210216,
"count": 18191,
"is_parallel": true,
"self": 13.761532958168573,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.448107377951601,
"count": 18191,
"is_parallel": true,
"self": 7.448107377951601
},
"communicator.exchange": {
"total": 236.13964651195238,
"count": 18191,
"is_parallel": true,
"self": 236.13964651195238
},
"steps_from_proto": {
"total": 47.358544272949075,
"count": 18191,
"is_parallel": true,
"self": 8.386712790981619,
"children": {
"_process_rank_one_or_two_observation": {
"total": 38.971831481967456,
"count": 181910,
"is_parallel": true,
"self": 38.971831481967456
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 156.14739395997276,
"count": 18192,
"self": 0.8271200900107942,
"children": {
"process_trajectory": {
"total": 30.195439903956867,
"count": 18192,
"self": 29.715409851956792,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48003005200007465,
"count": 4,
"self": 0.48003005200007465
}
}
},
"_update_policy": {
"total": 125.1248339660051,
"count": 90,
"self": 46.94673122595941,
"children": {
"TorchPPOOptimizer.update": {
"total": 78.17810274004569,
"count": 4587,
"self": 78.17810274004569
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.7279999156016856e-06,
"count": 1,
"self": 1.7279999156016856e-06
},
"TrainerController._save_models": {
"total": 0.13325217700003122,
"count": 1,
"self": 0.0035736320005526068,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12967854499947862,
"count": 1,
"self": 0.12967854499947862
}
}
}
}
}
}
}