mvyboh's picture
First Push
40d1701 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9762593507766724,
"min": 0.9742963910102844,
"max": 2.8526597023010254,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9278.369140625,
"min": 9278.369140625,
"max": 29119.94921875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.249286651611328,
"min": 0.4922589063644409,
"max": 13.249286651611328,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2583.61083984375,
"min": 95.49822998046875,
"max": 2687.4931640625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06584298044903086,
"min": 0.06142284990843002,
"max": 0.07325592897462889,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26337192179612345,
"min": 0.2463662321102924,
"max": 0.3602274457158523,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21594802881864938,
"min": 0.13885829486546344,
"max": 0.30538078581585604,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8637921152745975,
"min": 0.5554331794618538,
"max": 1.5269039290792803,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.0,
"min": 4.386363636363637,
"max": 26.436363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1144.0,
"min": 193.0,
"max": 1454.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.0,
"min": 4.386363636363637,
"max": 26.436363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1144.0,
"min": 193.0,
"max": 1454.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748525409",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748525870"
},
"total": 461.4248328780001,
"count": 1,
"self": 0.39069899899993743,
"children": {
"run_training.setup": {
"total": 0.03158664900001895,
"count": 1,
"self": 0.03158664900001895
},
"TrainerController.start_learning": {
"total": 461.0025472300001,
"count": 1,
"self": 0.3385848990028535,
"children": {
"TrainerController._reset_env": {
"total": 3.1970629930000314,
"count": 1,
"self": 3.1970629930000314
},
"TrainerController.advance": {
"total": 457.350499973997,
"count": 18192,
"self": 0.3690018110366964,
"children": {
"env_step": {
"total": 309.4406322829833,
"count": 18192,
"self": 230.21464812498755,
"children": {
"SubprocessEnvManager._take_step": {
"total": 79.01775360899308,
"count": 18192,
"self": 1.265770641989434,
"children": {
"TorchPolicy.evaluate": {
"total": 77.75198296700364,
"count": 18192,
"self": 77.75198296700364
}
}
},
"workers": {
"total": 0.20823054900267834,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 459.3821871489956,
"count": 18192,
"is_parallel": true,
"self": 257.4784654880036,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006745017999946867,
"count": 1,
"is_parallel": true,
"self": 0.005317889999901126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001427128000045741,
"count": 10,
"is_parallel": true,
"self": 0.001427128000045741
}
}
},
"UnityEnvironment.step": {
"total": 0.03611070700003438,
"count": 1,
"is_parallel": true,
"self": 0.0006132759999672999,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004530349999640748,
"count": 1,
"is_parallel": true,
"self": 0.0004530349999640748
},
"communicator.exchange": {
"total": 0.033235516000104326,
"count": 1,
"is_parallel": true,
"self": 0.033235516000104326
},
"steps_from_proto": {
"total": 0.0018088799999986804,
"count": 1,
"is_parallel": true,
"self": 0.0003585610003256079,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014503189996730725,
"count": 10,
"is_parallel": true,
"self": 0.0014503189996730725
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 201.903721660992,
"count": 18191,
"is_parallel": true,
"self": 9.755900229982558,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.445447548998914,
"count": 18191,
"is_parallel": true,
"self": 5.445447548998914
},
"communicator.exchange": {
"total": 154.7102695710006,
"count": 18191,
"is_parallel": true,
"self": 154.7102695710006
},
"steps_from_proto": {
"total": 31.99210431100994,
"count": 18191,
"is_parallel": true,
"self": 5.712036963980722,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.280067347029217,
"count": 181910,
"is_parallel": true,
"self": 26.280067347029217
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 147.540865879977,
"count": 18192,
"self": 0.44079943598103455,
"children": {
"process_trajectory": {
"total": 47.80012096299731,
"count": 18192,
"self": 47.27518342599751,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5249375369998006,
"count": 4,
"self": 0.5249375369998006
}
}
},
"_update_policy": {
"total": 99.29994548099864,
"count": 90,
"self": 39.66932629299606,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.63061918800258,
"count": 4587,
"self": 59.63061918800258
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2010000318696257e-06,
"count": 1,
"self": 1.2010000318696257e-06
},
"TrainerController._save_models": {
"total": 0.11639816300021266,
"count": 1,
"self": 0.0008668420000503829,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11553132100016228,
"count": 1,
"self": 0.11553132100016228
}
}
}
}
}
}
}