DoctorPingu's picture
Upload SnowballTarget trained agent
9202a2e verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7869852781295776,
"min": 0.7756290435791016,
"max": 2.841956377029419,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7479.50830078125,
"min": 7479.50830078125,
"max": 29010.69140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.176697731018066,
"min": 0.17971068620681763,
"max": 13.176697731018066,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2569.4560546875,
"min": 34.86387252807617,
"max": 2646.81787109375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06788724704300894,
"min": 0.06045923870785604,
"max": 0.07309876575143825,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27154898817203577,
"min": 0.2617024634508233,
"max": 0.3654938287571913,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19455917891772354,
"min": 0.12023388191044111,
"max": 0.2837839346452087,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7782367156708941,
"min": 0.48093552764176445,
"max": 1.3747278358422075,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.931818181818183,
"min": 3.0454545454545454,
"max": 26.163636363636364,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1141.0,
"min": 134.0,
"max": 1439.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.931818181818183,
"min": 3.0454545454545454,
"max": 26.163636363636364,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1141.0,
"min": 134.0,
"max": 1439.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1775571146",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1775571747"
},
"total": 600.4748929870002,
"count": 1,
"self": 0.5404181410008277,
"children": {
"run_training.setup": {
"total": 0.035422892999577016,
"count": 1,
"self": 0.035422892999577016
},
"TrainerController.start_learning": {
"total": 599.8990519529998,
"count": 1,
"self": 0.5978882220206287,
"children": {
"TrainerController._reset_env": {
"total": 3.3453025439998783,
"count": 1,
"self": 3.3453025439998783
},
"TrainerController.advance": {
"total": 595.857713988979,
"count": 18192,
"self": 0.6008996329951515,
"children": {
"env_step": {
"total": 444.00582477896523,
"count": 18192,
"self": 348.18976595098593,
"children": {
"SubprocessEnvManager._take_step": {
"total": 95.46325864298524,
"count": 18192,
"self": 1.8778774829916074,
"children": {
"TorchPolicy.evaluate": {
"total": 93.58538115999363,
"count": 18192,
"self": 93.58538115999363
}
}
},
"workers": {
"total": 0.3528001849940665,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 596.8310099079963,
"count": 18192,
"is_parallel": true,
"self": 294.1976169819941,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004622466000000713,
"count": 1,
"is_parallel": true,
"self": 0.003229035000913427,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013934309990872862,
"count": 10,
"is_parallel": true,
"self": 0.0013934309990872862
}
}
},
"UnityEnvironment.step": {
"total": 0.040052429999832384,
"count": 1,
"is_parallel": true,
"self": 0.000692773000082525,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047612600019419915,
"count": 1,
"is_parallel": true,
"self": 0.00047612600019419915
},
"communicator.exchange": {
"total": 0.036828816999786795,
"count": 1,
"is_parallel": true,
"self": 0.036828816999786795
},
"steps_from_proto": {
"total": 0.0020547139997688646,
"count": 1,
"is_parallel": true,
"self": 0.000388615999781905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016660979999869596,
"count": 10,
"is_parallel": true,
"self": 0.0016660979999869596
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 302.6333929260022,
"count": 18191,
"is_parallel": true,
"self": 13.603995400935673,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.602406884026095,
"count": 18191,
"is_parallel": true,
"self": 7.602406884026095
},
"communicator.exchange": {
"total": 232.6001724280004,
"count": 18191,
"is_parallel": true,
"self": 232.6001724280004
},
"steps_from_proto": {
"total": 48.82681821304004,
"count": 18191,
"is_parallel": true,
"self": 8.361242842112915,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.46557537092713,
"count": 181910,
"is_parallel": true,
"self": 40.46557537092713
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 151.25098957701857,
"count": 18192,
"self": 0.7340248320506362,
"children": {
"process_trajectory": {
"total": 35.11844556796905,
"count": 18192,
"self": 34.5333300309685,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5851155370005472,
"count": 4,
"self": 0.5851155370005472
}
}
},
"_update_policy": {
"total": 115.39851917699889,
"count": 90,
"self": 46.69837440901574,
"children": {
"TorchPPOOptimizer.update": {
"total": 68.70014476798315,
"count": 4587,
"self": 68.70014476798315
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2030000107188243e-06,
"count": 1,
"self": 1.2030000107188243e-06
},
"TrainerController._save_models": {
"total": 0.09814599500032273,
"count": 1,
"self": 0.0009039230003509147,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09724207199997181,
"count": 1,
"self": 0.09724207199997181
}
}
}
}
}
}
}