mkx07's picture
First Push
91503a2 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.862925112247467,
"min": 0.8514000773429871,
"max": 2.8537819385528564,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8201.240234375,
"min": 8201.240234375,
"max": 29131.40625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.890761375427246,
"min": 0.4507320523262024,
"max": 12.93065357208252,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2513.698486328125,
"min": 87.4420166015625,
"max": 2637.853271484375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06899987731052462,
"min": 0.06335313329662884,
"max": 0.07794964194341161,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2759995092420985,
"min": 0.25341253318651535,
"max": 0.38974820971705804,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.203451812632528,
"min": 0.11853546478132736,
"max": 0.3078170570088368,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.813807250530112,
"min": 0.47414185912530943,
"max": 1.539085285044184,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.15909090909091,
"min": 3.409090909090909,
"max": 25.363636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1107.0,
"min": 150.0,
"max": 1383.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.15909090909091,
"min": 3.409090909090909,
"max": 25.363636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1107.0,
"min": 150.0,
"max": 1383.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749097700",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749098203"
},
"total": 502.7600226950001,
"count": 1,
"self": 0.43546422000008533,
"children": {
"run_training.setup": {
"total": 0.02532069800008685,
"count": 1,
"self": 0.02532069800008685
},
"TrainerController.start_learning": {
"total": 502.2992377769999,
"count": 1,
"self": 0.45337052400986977,
"children": {
"TrainerController._reset_env": {
"total": 3.3068410969999604,
"count": 1,
"self": 3.3068410969999604
},
"TrainerController.advance": {
"total": 498.44259935999,
"count": 18192,
"self": 0.5116183670138525,
"children": {
"env_step": {
"total": 359.38550039898973,
"count": 18192,
"self": 273.9696637589925,
"children": {
"SubprocessEnvManager._take_step": {
"total": 85.1527901769955,
"count": 18192,
"self": 1.5288716779853075,
"children": {
"TorchPolicy.evaluate": {
"total": 83.62391849901019,
"count": 18192,
"self": 83.62391849901019
}
}
},
"workers": {
"total": 0.2630464630017286,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 500.6703331160038,
"count": 18192,
"is_parallel": true,
"self": 261.00389441301945,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0050309380001181125,
"count": 1,
"is_parallel": true,
"self": 0.0033545329997650697,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016764050003530429,
"count": 10,
"is_parallel": true,
"self": 0.0016764050003530429
}
}
},
"UnityEnvironment.step": {
"total": 0.041854987000078836,
"count": 1,
"is_parallel": true,
"self": 0.00069297700019888,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044875999992655125,
"count": 1,
"is_parallel": true,
"self": 0.00044875999992655125
},
"communicator.exchange": {
"total": 0.0386116800000309,
"count": 1,
"is_parallel": true,
"self": 0.0386116800000309
},
"steps_from_proto": {
"total": 0.0021015699999225035,
"count": 1,
"is_parallel": true,
"self": 0.0004200719997697888,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016814980001527147,
"count": 10,
"is_parallel": true,
"self": 0.0016814980001527147
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 239.66643870298435,
"count": 18191,
"is_parallel": true,
"self": 11.355764032957723,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.57951185899401,
"count": 18191,
"is_parallel": true,
"self": 6.57951185899401
},
"communicator.exchange": {
"total": 183.4546589510187,
"count": 18191,
"is_parallel": true,
"self": 183.4546589510187
},
"steps_from_proto": {
"total": 38.27650386001392,
"count": 18191,
"is_parallel": true,
"self": 7.105403192085532,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.171100667928386,
"count": 181910,
"is_parallel": true,
"self": 31.171100667928386
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 138.54548059398644,
"count": 18192,
"self": 0.5697710669790013,
"children": {
"process_trajectory": {
"total": 31.996537240008138,
"count": 18192,
"self": 31.542733656008068,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45380358400007026,
"count": 4,
"self": 0.45380358400007026
}
}
},
"_update_policy": {
"total": 105.9791722869993,
"count": 90,
"self": 43.891343187995744,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.08782909900356,
"count": 4587,
"self": 62.08782909900356
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.220000265486306e-07,
"count": 1,
"self": 9.220000265486306e-07
},
"TrainerController._save_models": {
"total": 0.09642587400003322,
"count": 1,
"self": 0.0008416100001795712,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09558426399985365,
"count": 1,
"self": 0.09558426399985365
}
}
}
}
}
}
}