Pie33000's picture
First Push
fe43181 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6723476052284241,
"min": 0.6723476052284241,
"max": 2.8632023334503174,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6389.99169921875,
"min": 6389.99169921875,
"max": 29227.5703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.976069450378418,
"min": 0.24375399947166443,
"max": 12.976069450378418,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2530.33349609375,
"min": 47.28827667236328,
"max": 2616.23193359375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06700006679873731,
"min": 0.06006346760579047,
"max": 0.07771163879406945,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26800026719494924,
"min": 0.2526639623124627,
"max": 0.36488507549574045,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1748400707090018,
"min": 0.10442580072113368,
"max": 0.27325200169694186,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6993602828360072,
"min": 0.4177032028845347,
"max": 1.3662600084847094,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.886363636363637,
"min": 2.977272727272727,
"max": 25.886363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1139.0,
"min": 131.0,
"max": 1396.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.886363636363637,
"min": 2.977272727272727,
"max": 25.886363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1139.0,
"min": 131.0,
"max": 1396.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739544452",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739544875"
},
"total": 422.99211140099993,
"count": 1,
"self": 0.4391055879999044,
"children": {
"run_training.setup": {
"total": 0.02256464100003086,
"count": 1,
"self": 0.02256464100003086
},
"TrainerController.start_learning": {
"total": 422.530441172,
"count": 1,
"self": 0.3234187110047628,
"children": {
"TrainerController._reset_env": {
"total": 2.931380503000014,
"count": 1,
"self": 2.931380503000014
},
"TrainerController.advance": {
"total": 419.18859194499515,
"count": 18192,
"self": 0.3590904749934225,
"children": {
"env_step": {
"total": 297.4501084250057,
"count": 18192,
"self": 226.6158425880024,
"children": {
"SubprocessEnvManager._take_step": {
"total": 70.63581350699928,
"count": 18192,
"self": 1.2374014479997868,
"children": {
"TorchPolicy.evaluate": {
"total": 69.3984120589995,
"count": 18192,
"self": 69.3984120589995
}
}
},
"workers": {
"total": 0.1984523300039882,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 421.01412313199467,
"count": 18192,
"is_parallel": true,
"self": 222.20636863199115,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0052979499999992186,
"count": 1,
"is_parallel": true,
"self": 0.0037912189998223766,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001506731000176842,
"count": 10,
"is_parallel": true,
"self": 0.001506731000176842
}
}
},
"UnityEnvironment.step": {
"total": 0.03516615799998135,
"count": 1,
"is_parallel": true,
"self": 0.0005822209999450934,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004011830000081318,
"count": 1,
"is_parallel": true,
"self": 0.0004011830000081318
},
"communicator.exchange": {
"total": 0.032253732000015134,
"count": 1,
"is_parallel": true,
"self": 0.032253732000015134
},
"steps_from_proto": {
"total": 0.001929022000012992,
"count": 1,
"is_parallel": true,
"self": 0.0003885259999947266,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015404960000182655,
"count": 10,
"is_parallel": true,
"self": 0.0015404960000182655
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 198.8077545000035,
"count": 18191,
"is_parallel": true,
"self": 9.766726515999437,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.3492122759954555,
"count": 18191,
"is_parallel": true,
"self": 5.3492122759954555
},
"communicator.exchange": {
"total": 152.54842960300806,
"count": 18191,
"is_parallel": true,
"self": 152.54842960300806
},
"steps_from_proto": {
"total": 31.14338610500056,
"count": 18191,
"is_parallel": true,
"self": 5.576297554992493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.567088550008066,
"count": 181910,
"is_parallel": true,
"self": 25.567088550008066
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 121.37939304499605,
"count": 18192,
"self": 0.40992698799720984,
"children": {
"process_trajectory": {
"total": 26.7134874869991,
"count": 18192,
"self": 26.124393160999034,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5890943260000654,
"count": 4,
"self": 0.5890943260000654
}
}
},
"_update_policy": {
"total": 94.25597856999974,
"count": 90,
"self": 37.93550616500414,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.3204724049956,
"count": 4587,
"self": 56.3204724049956
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.306000058320933e-06,
"count": 1,
"self": 1.306000058320933e-06
},
"TrainerController._save_models": {
"total": 0.08704870700000811,
"count": 1,
"self": 0.0009595279999530248,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08608917900005508,
"count": 1,
"self": 0.08608917900005508
}
}
}
}
}
}
}