andryr's picture
First Push
a59025a verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9508161544799805,
"min": 0.9376689195632935,
"max": 2.857456922531128,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9036.556640625,
"min": 8994.1201171875,
"max": 29168.919921875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.987527847290039,
"min": 0.39481472969055176,
"max": 12.987527847290039,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2532.56787109375,
"min": 76.59405517578125,
"max": 2628.041015625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06435796104484168,
"min": 0.0625649938573695,
"max": 0.07481702238426305,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2574318441793667,
"min": 0.250259975429478,
"max": 0.37408511192131527,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1943555437028408,
"min": 0.1325449599375419,
"max": 0.32993191334546784,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7774221748113632,
"min": 0.5301798397501676,
"max": 1.5098914772856469,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.681818181818183,
"min": 3.659090909090909,
"max": 25.681818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1130.0,
"min": 161.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.681818181818183,
"min": 3.659090909090909,
"max": 25.681818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1130.0,
"min": 161.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756581904",
"python_version": "3.10.12 (main, Aug 15 2025, 14:32:43) [GCC 11.4.0]",
"command_line_arguments": "/content/venv/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756582324"
},
"total": 419.14861921500005,
"count": 1,
"self": 0.4392074849999972,
"children": {
"run_training.setup": {
"total": 0.020040388000097664,
"count": 1,
"self": 0.020040388000097664
},
"TrainerController.start_learning": {
"total": 418.68937134199996,
"count": 1,
"self": 0.34937508898451597,
"children": {
"TrainerController._reset_env": {
"total": 2.8146098520001033,
"count": 1,
"self": 2.8146098520001033
},
"TrainerController.advance": {
"total": 415.4435061970155,
"count": 18192,
"self": 0.3734602419719977,
"children": {
"env_step": {
"total": 297.5605676760433,
"count": 18192,
"self": 228.96820766005794,
"children": {
"SubprocessEnvManager._take_step": {
"total": 68.37179934297023,
"count": 18192,
"self": 1.257339452990891,
"children": {
"TorchPolicy.evaluate": {
"total": 67.11445988997934,
"count": 18192,
"self": 67.11445988997934
}
}
},
"workers": {
"total": 0.22056067301514304,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 417.43982083197807,
"count": 18192,
"is_parallel": true,
"self": 216.19833347197323,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005191462999846408,
"count": 1,
"is_parallel": true,
"self": 0.0038381799995477195,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013532830002986884,
"count": 10,
"is_parallel": true,
"self": 0.0013532830002986884
}
}
},
"UnityEnvironment.step": {
"total": 0.03474213000004056,
"count": 1,
"is_parallel": true,
"self": 0.0005786469998838584,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004448449999472359,
"count": 1,
"is_parallel": true,
"self": 0.0004448449999472359
},
"communicator.exchange": {
"total": 0.03187212200009526,
"count": 1,
"is_parallel": true,
"self": 0.03187212200009526
},
"steps_from_proto": {
"total": 0.0018465160001142067,
"count": 1,
"is_parallel": true,
"self": 0.0003962319999573083,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014502840001568984,
"count": 10,
"is_parallel": true,
"self": 0.0014502840001568984
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 201.24148736000484,
"count": 18191,
"is_parallel": true,
"self": 9.61199964400771,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.320303811997519,
"count": 18191,
"is_parallel": true,
"self": 5.320303811997519
},
"communicator.exchange": {
"total": 154.58015152198118,
"count": 18191,
"is_parallel": true,
"self": 154.58015152198118
},
"steps_from_proto": {
"total": 31.72903238201843,
"count": 18191,
"is_parallel": true,
"self": 5.730231346904475,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.998801035113956,
"count": 181910,
"is_parallel": true,
"self": 25.998801035113956
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 117.50947827900018,
"count": 18192,
"self": 0.4482774370028437,
"children": {
"process_trajectory": {
"total": 26.48108122299618,
"count": 18192,
"self": 26.07982493299687,
"children": {
"RLTrainer._checkpoint": {
"total": 0.40125628999930996,
"count": 4,
"self": 0.40125628999930996
}
}
},
"_update_policy": {
"total": 90.58011961900115,
"count": 90,
"self": 36.65294283300682,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.927176785994334,
"count": 4587,
"self": 53.927176785994334
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.041999894368928e-06,
"count": 1,
"self": 1.041999894368928e-06
},
"TrainerController._save_models": {
"total": 0.08187916199995016,
"count": 1,
"self": 0.0007911749999038875,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08108798700004627,
"count": 1,
"self": 0.08108798700004627
}
}
}
}
}
}
}