wengti0608's picture
First attempt
0a0d386 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6563659310340881,
"min": 0.6563659310340881,
"max": 2.8665053844451904,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6238.10205078125,
"min": 6238.10205078125,
"max": 29261.287109375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.306512832641602,
"min": 0.4980175495147705,
"max": 13.306512832641602,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2594.77001953125,
"min": 96.61540222167969,
"max": 2694.50927734375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06700554414019994,
"min": 0.059859575980511844,
"max": 0.07539219555274387,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26802217656079974,
"min": 0.23943830392204737,
"max": 0.3608257872632142,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2160660255627305,
"min": 0.11940814892752277,
"max": 0.27352607637351634,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.864264102250922,
"min": 0.4776325957100911,
"max": 1.3302956699740653,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.0,
"min": 3.477272727272727,
"max": 26.672727272727272,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1144.0,
"min": 153.0,
"max": 1467.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.0,
"min": 3.477272727272727,
"max": 26.672727272727272,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1144.0,
"min": 153.0,
"max": 1467.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1750229669",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget.x86_64 --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1750230091"
},
"total": 422.22124321800004,
"count": 1,
"self": 0.4376328609999973,
"children": {
"run_training.setup": {
"total": 0.027707028000008904,
"count": 1,
"self": 0.027707028000008904
},
"TrainerController.start_learning": {
"total": 421.75590332900003,
"count": 1,
"self": 0.35357783101051155,
"children": {
"TrainerController._reset_env": {
"total": 2.4008558020000237,
"count": 1,
"self": 2.4008558020000237
},
"TrainerController.advance": {
"total": 418.9239204169894,
"count": 18192,
"self": 0.3734884909763423,
"children": {
"env_step": {
"total": 301.5315417960044,
"count": 18192,
"self": 232.65576527100302,
"children": {
"SubprocessEnvManager._take_step": {
"total": 68.65859459100557,
"count": 18192,
"self": 1.2337480690023312,
"children": {
"TorchPolicy.evaluate": {
"total": 67.42484652200324,
"count": 18192,
"self": 67.42484652200324
}
}
},
"workers": {
"total": 0.21718193399578922,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 420.2703504909874,
"count": 18192,
"is_parallel": true,
"self": 216.3137494039787,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00262341200004812,
"count": 1,
"is_parallel": true,
"self": 0.000700210999752926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019232010002951938,
"count": 10,
"is_parallel": true,
"self": 0.0019232010002951938
}
}
},
"UnityEnvironment.step": {
"total": 0.03467166299992641,
"count": 1,
"is_parallel": true,
"self": 0.0005654519999325203,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003907010000148148,
"count": 1,
"is_parallel": true,
"self": 0.0003907010000148148
},
"communicator.exchange": {
"total": 0.03196972700004608,
"count": 1,
"is_parallel": true,
"self": 0.03196972700004608
},
"steps_from_proto": {
"total": 0.0017457829999329988,
"count": 1,
"is_parallel": true,
"self": 0.00034962600022936385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001396156999703635,
"count": 10,
"is_parallel": true,
"self": 0.001396156999703635
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 203.95660108700872,
"count": 18191,
"is_parallel": true,
"self": 9.580517625027483,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.317501171994195,
"count": 18191,
"is_parallel": true,
"self": 5.317501171994195
},
"communicator.exchange": {
"total": 157.7676894689988,
"count": 18191,
"is_parallel": true,
"self": 157.7676894689988
},
"steps_from_proto": {
"total": 31.290892820988233,
"count": 18191,
"is_parallel": true,
"self": 5.688911855982155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.601980965006078,
"count": 181910,
"is_parallel": true,
"self": 25.601980965006078
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 117.01889013000869,
"count": 18192,
"self": 0.43461065501765006,
"children": {
"process_trajectory": {
"total": 25.857300552991546,
"count": 18192,
"self": 25.45414048899147,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4031600640000761,
"count": 4,
"self": 0.4031600640000761
}
}
},
"_update_policy": {
"total": 90.72697892199949,
"count": 90,
"self": 37.507185737004306,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.21979318499518,
"count": 4587,
"self": 53.21979318499518
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0660000953066628e-06,
"count": 1,
"self": 1.0660000953066628e-06
},
"TrainerController._save_models": {
"total": 0.07754821300000003,
"count": 1,
"self": 0.0009780539999155735,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07657015900008446,
"count": 1,
"self": 0.07657015900008446
}
}
}
}
}
}
}